New upstream version 18.08
[deb_dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <netinet/in.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
17
18 #include <rte_interrupts.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_pci.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_eal.h>
26 #include <rte_alarm.h>
27 #include <rte_ether.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_malloc.h>
30 #include <rte_random.h>
31 #include <rte_dev.h>
32 #include <rte_hash_crc.h>
33 #include <rte_flow.h>
34 #include <rte_flow_driver.h>
35
36 #include "ixgbe_logs.h"
37 #include "base/ixgbe_api.h"
38 #include "base/ixgbe_vf.h"
39 #include "base/ixgbe_common.h"
40 #include "ixgbe_ethdev.h"
41 #include "ixgbe_bypass.h"
42 #include "ixgbe_rxtx.h"
43 #include "base/ixgbe_type.h"
44 #include "base/ixgbe_phy.h"
45 #include "rte_pmd_ixgbe.h"
46
47
48 #define IXGBE_MIN_N_TUPLE_PRIO 1
49 #define IXGBE_MAX_N_TUPLE_PRIO 7
50 #define IXGBE_MAX_FLX_SOURCE_OFF 62
51
52 /* ntuple filter list structure */
53 struct ixgbe_ntuple_filter_ele {
54         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
55         struct rte_eth_ntuple_filter filter_info;
56 };
57 /* ethertype filter list structure */
58 struct ixgbe_ethertype_filter_ele {
59         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
60         struct rte_eth_ethertype_filter filter_info;
61 };
62 /* syn filter list structure */
63 struct ixgbe_eth_syn_filter_ele {
64         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
65         struct rte_eth_syn_filter filter_info;
66 };
67 /* fdir filter list structure */
68 struct ixgbe_fdir_rule_ele {
69         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
70         struct ixgbe_fdir_rule filter_info;
71 };
72 /* l2_tunnel filter list structure */
73 struct ixgbe_eth_l2_tunnel_conf_ele {
74         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
75         struct rte_eth_l2_tunnel_conf filter_info;
76 };
77 /* rss filter list structure */
78 struct ixgbe_rss_conf_ele {
79         TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
80         struct ixgbe_rte_flow_rss_conf filter_info;
81 };
82 /* ixgbe_flow memory list structure */
83 struct ixgbe_flow_mem {
84         TAILQ_ENTRY(ixgbe_flow_mem) entries;
85         struct rte_flow *flow;
86 };
87
88 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
89 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
90 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
91 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
92 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
93 TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
94 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
95
96 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
97 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
98 static struct ixgbe_syn_filter_list filter_syn_list;
99 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
100 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
101 static struct ixgbe_rss_filter_list filter_rss_list;
102 static struct ixgbe_flow_mem_list ixgbe_flow_list;
103
104 /**
105  * Endless loop will never happen with below assumption
106  * 1. there is at least one no-void item(END)
107  * 2. cur is before END.
108  */
109 static inline
110 const struct rte_flow_item *next_no_void_pattern(
111                 const struct rte_flow_item pattern[],
112                 const struct rte_flow_item *cur)
113 {
114         const struct rte_flow_item *next =
115                 cur ? cur + 1 : &pattern[0];
116         while (1) {
117                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
118                         return next;
119                 next++;
120         }
121 }
122
123 static inline
124 const struct rte_flow_action *next_no_void_action(
125                 const struct rte_flow_action actions[],
126                 const struct rte_flow_action *cur)
127 {
128         const struct rte_flow_action *next =
129                 cur ? cur + 1 : &actions[0];
130         while (1) {
131                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
132                         return next;
133                 next++;
134         }
135 }
136
137 /**
138  * Please aware there's an asumption for all the parsers.
139  * rte_flow_item is using big endian, rte_flow_attr and
140  * rte_flow_action are using CPU order.
141  * Because the pattern is used to describe the packets,
142  * normally the packets should use network order.
143  */
144
145 /**
146  * Parse the rule to see if it is a n-tuple rule.
147  * And get the n-tuple filter info BTW.
148  * pattern:
149  * The first not void item can be ETH or IPV4.
150  * The second not void item must be IPV4 if the first one is ETH.
151  * The third not void item must be UDP or TCP.
152  * The next not void item must be END.
153  * action:
154  * The first not void action should be QUEUE.
155  * The next not void action should be END.
156  * pattern example:
157  * ITEM         Spec                    Mask
158  * ETH          NULL                    NULL
159  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
160  *              dst_addr 192.167.3.50   0xFFFFFFFF
161  *              next_proto_id   17      0xFF
162  * UDP/TCP/     src_port        80      0xFFFF
163  * SCTP         dst_port        80      0xFFFF
164  * END
165  * other members in mask and spec should set to 0x00.
166  * item->last should be NULL.
167  *
168  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
169  *
170  */
171 static int
172 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
173                          const struct rte_flow_item pattern[],
174                          const struct rte_flow_action actions[],
175                          struct rte_eth_ntuple_filter *filter,
176                          struct rte_flow_error *error)
177 {
178         const struct rte_flow_item *item;
179         const struct rte_flow_action *act;
180         const struct rte_flow_item_ipv4 *ipv4_spec;
181         const struct rte_flow_item_ipv4 *ipv4_mask;
182         const struct rte_flow_item_tcp *tcp_spec;
183         const struct rte_flow_item_tcp *tcp_mask;
184         const struct rte_flow_item_udp *udp_spec;
185         const struct rte_flow_item_udp *udp_mask;
186         const struct rte_flow_item_sctp *sctp_spec;
187         const struct rte_flow_item_sctp *sctp_mask;
188         const struct rte_flow_item_eth *eth_spec;
189         const struct rte_flow_item_eth *eth_mask;
190         const struct rte_flow_item_vlan *vlan_spec;
191         const struct rte_flow_item_vlan *vlan_mask;
192         struct rte_flow_item_eth eth_null;
193         struct rte_flow_item_vlan vlan_null;
194
195         if (!pattern) {
196                 rte_flow_error_set(error,
197                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
198                         NULL, "NULL pattern.");
199                 return -rte_errno;
200         }
201
202         if (!actions) {
203                 rte_flow_error_set(error, EINVAL,
204                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
205                                    NULL, "NULL action.");
206                 return -rte_errno;
207         }
208         if (!attr) {
209                 rte_flow_error_set(error, EINVAL,
210                                    RTE_FLOW_ERROR_TYPE_ATTR,
211                                    NULL, "NULL attribute.");
212                 return -rte_errno;
213         }
214
215         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
216         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
217
218 #ifdef RTE_LIBRTE_SECURITY
219         /**
220          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
221          */
222         act = next_no_void_action(actions, NULL);
223         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
224                 const void *conf = act->conf;
225                 /* check if the next not void item is END */
226                 act = next_no_void_action(actions, act);
227                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
228                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
229                         rte_flow_error_set(error, EINVAL,
230                                 RTE_FLOW_ERROR_TYPE_ACTION,
231                                 act, "Not supported action.");
232                         return -rte_errno;
233                 }
234
235                 /* get the IP pattern*/
236                 item = next_no_void_pattern(pattern, NULL);
237                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
238                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
239                         if (item->last ||
240                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
241                                 rte_flow_error_set(error, EINVAL,
242                                         RTE_FLOW_ERROR_TYPE_ITEM,
243                                         item, "IP pattern missing.");
244                                 return -rte_errno;
245                         }
246                         item = next_no_void_pattern(pattern, item);
247                 }
248
249                 filter->proto = IPPROTO_ESP;
250                 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
251                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
252         }
253 #endif
254
255         /* the first not void item can be MAC or IPv4 */
256         item = next_no_void_pattern(pattern, NULL);
257
258         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
259             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
260                 rte_flow_error_set(error, EINVAL,
261                         RTE_FLOW_ERROR_TYPE_ITEM,
262                         item, "Not supported by ntuple filter");
263                 return -rte_errno;
264         }
265         /* Skip Ethernet */
266         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
267                 eth_spec = item->spec;
268                 eth_mask = item->mask;
269                 /*Not supported last point for range*/
270                 if (item->last) {
271                         rte_flow_error_set(error,
272                           EINVAL,
273                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
274                           item, "Not supported last point for range");
275                         return -rte_errno;
276
277                 }
278                 /* if the first item is MAC, the content should be NULL */
279                 if ((item->spec || item->mask) &&
280                         (memcmp(eth_spec, &eth_null,
281                                 sizeof(struct rte_flow_item_eth)) ||
282                          memcmp(eth_mask, &eth_null,
283                                 sizeof(struct rte_flow_item_eth)))) {
284                         rte_flow_error_set(error, EINVAL,
285                                 RTE_FLOW_ERROR_TYPE_ITEM,
286                                 item, "Not supported by ntuple filter");
287                         return -rte_errno;
288                 }
289                 /* check if the next not void item is IPv4 or Vlan */
290                 item = next_no_void_pattern(pattern, item);
291                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
292                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
293                         rte_flow_error_set(error,
294                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
295                           item, "Not supported by ntuple filter");
296                           return -rte_errno;
297                 }
298         }
299
300         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
301                 vlan_spec = item->spec;
302                 vlan_mask = item->mask;
303                 /*Not supported last point for range*/
304                 if (item->last) {
305                         rte_flow_error_set(error,
306                           EINVAL,
307                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
308                           item, "Not supported last point for range");
309                         return -rte_errno;
310                 }
311                 /* the content should be NULL */
312                 if ((item->spec || item->mask) &&
313                         (memcmp(vlan_spec, &vlan_null,
314                                 sizeof(struct rte_flow_item_vlan)) ||
315                          memcmp(vlan_mask, &vlan_null,
316                                 sizeof(struct rte_flow_item_vlan)))) {
317
318                         rte_flow_error_set(error, EINVAL,
319                                 RTE_FLOW_ERROR_TYPE_ITEM,
320                                 item, "Not supported by ntuple filter");
321                         return -rte_errno;
322                 }
323                 /* check if the next not void item is IPv4 */
324                 item = next_no_void_pattern(pattern, item);
325                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
326                         rte_flow_error_set(error,
327                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
328                           item, "Not supported by ntuple filter");
329                         return -rte_errno;
330                 }
331         }
332
333         if (item->mask) {
334                 /* get the IPv4 info */
335                 if (!item->spec || !item->mask) {
336                         rte_flow_error_set(error, EINVAL,
337                                 RTE_FLOW_ERROR_TYPE_ITEM,
338                                 item, "Invalid ntuple mask");
339                         return -rte_errno;
340                 }
341                 /*Not supported last point for range*/
342                 if (item->last) {
343                         rte_flow_error_set(error, EINVAL,
344                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
345                                 item, "Not supported last point for range");
346                         return -rte_errno;
347                 }
348
349                 ipv4_mask = item->mask;
350                 /**
351                  * Only support src & dst addresses, protocol,
352                  * others should be masked.
353                  */
354                 if (ipv4_mask->hdr.version_ihl ||
355                     ipv4_mask->hdr.type_of_service ||
356                     ipv4_mask->hdr.total_length ||
357                     ipv4_mask->hdr.packet_id ||
358                     ipv4_mask->hdr.fragment_offset ||
359                     ipv4_mask->hdr.time_to_live ||
360                     ipv4_mask->hdr.hdr_checksum) {
361                         rte_flow_error_set(error,
362                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
363                                 item, "Not supported by ntuple filter");
364                         return -rte_errno;
365                 }
366
367                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
368                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
369                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
370
371                 ipv4_spec = item->spec;
372                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
373                 filter->src_ip = ipv4_spec->hdr.src_addr;
374                 filter->proto  = ipv4_spec->hdr.next_proto_id;
375         }
376
377         /* check if the next not void item is TCP or UDP */
378         item = next_no_void_pattern(pattern, item);
379         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
380             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
381             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
382             item->type != RTE_FLOW_ITEM_TYPE_END) {
383                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
384                 rte_flow_error_set(error, EINVAL,
385                         RTE_FLOW_ERROR_TYPE_ITEM,
386                         item, "Not supported by ntuple filter");
387                 return -rte_errno;
388         }
389
390         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
391                 (!item->spec && !item->mask)) {
392                 goto action;
393         }
394
395         /* get the TCP/UDP/SCTP info */
396         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
397                 (!item->spec || !item->mask)) {
398                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
399                 rte_flow_error_set(error, EINVAL,
400                         RTE_FLOW_ERROR_TYPE_ITEM,
401                         item, "Invalid ntuple mask");
402                 return -rte_errno;
403         }
404
405         /*Not supported last point for range*/
406         if (item->last) {
407                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408                 rte_flow_error_set(error, EINVAL,
409                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
410                         item, "Not supported last point for range");
411                 return -rte_errno;
412
413         }
414
415         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
416                 tcp_mask = item->mask;
417
418                 /**
419                  * Only support src & dst ports, tcp flags,
420                  * others should be masked.
421                  */
422                 if (tcp_mask->hdr.sent_seq ||
423                     tcp_mask->hdr.recv_ack ||
424                     tcp_mask->hdr.data_off ||
425                     tcp_mask->hdr.rx_win ||
426                     tcp_mask->hdr.cksum ||
427                     tcp_mask->hdr.tcp_urp) {
428                         memset(filter, 0,
429                                 sizeof(struct rte_eth_ntuple_filter));
430                         rte_flow_error_set(error, EINVAL,
431                                 RTE_FLOW_ERROR_TYPE_ITEM,
432                                 item, "Not supported by ntuple filter");
433                         return -rte_errno;
434                 }
435
436                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
437                 filter->src_port_mask  = tcp_mask->hdr.src_port;
438                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
439                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
440                 } else if (!tcp_mask->hdr.tcp_flags) {
441                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
442                 } else {
443                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
444                         rte_flow_error_set(error, EINVAL,
445                                 RTE_FLOW_ERROR_TYPE_ITEM,
446                                 item, "Not supported by ntuple filter");
447                         return -rte_errno;
448                 }
449
450                 tcp_spec = item->spec;
451                 filter->dst_port  = tcp_spec->hdr.dst_port;
452                 filter->src_port  = tcp_spec->hdr.src_port;
453                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
454         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
455                 udp_mask = item->mask;
456
457                 /**
458                  * Only support src & dst ports,
459                  * others should be masked.
460                  */
461                 if (udp_mask->hdr.dgram_len ||
462                     udp_mask->hdr.dgram_cksum) {
463                         memset(filter, 0,
464                                 sizeof(struct rte_eth_ntuple_filter));
465                         rte_flow_error_set(error, EINVAL,
466                                 RTE_FLOW_ERROR_TYPE_ITEM,
467                                 item, "Not supported by ntuple filter");
468                         return -rte_errno;
469                 }
470
471                 filter->dst_port_mask = udp_mask->hdr.dst_port;
472                 filter->src_port_mask = udp_mask->hdr.src_port;
473
474                 udp_spec = item->spec;
475                 filter->dst_port = udp_spec->hdr.dst_port;
476                 filter->src_port = udp_spec->hdr.src_port;
477         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
478                 sctp_mask = item->mask;
479
480                 /**
481                  * Only support src & dst ports,
482                  * others should be masked.
483                  */
484                 if (sctp_mask->hdr.tag ||
485                     sctp_mask->hdr.cksum) {
486                         memset(filter, 0,
487                                 sizeof(struct rte_eth_ntuple_filter));
488                         rte_flow_error_set(error, EINVAL,
489                                 RTE_FLOW_ERROR_TYPE_ITEM,
490                                 item, "Not supported by ntuple filter");
491                         return -rte_errno;
492                 }
493
494                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
495                 filter->src_port_mask = sctp_mask->hdr.src_port;
496
497                 sctp_spec = item->spec;
498                 filter->dst_port = sctp_spec->hdr.dst_port;
499                 filter->src_port = sctp_spec->hdr.src_port;
500         } else {
501                 goto action;
502         }
503
504         /* check if the next not void item is END */
505         item = next_no_void_pattern(pattern, item);
506         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
507                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
508                 rte_flow_error_set(error, EINVAL,
509                         RTE_FLOW_ERROR_TYPE_ITEM,
510                         item, "Not supported by ntuple filter");
511                 return -rte_errno;
512         }
513
514 action:
515
516         /**
517          * n-tuple only supports forwarding,
518          * check if the first not void action is QUEUE.
519          */
520         act = next_no_void_action(actions, NULL);
521         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
522                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
523                 rte_flow_error_set(error, EINVAL,
524                         RTE_FLOW_ERROR_TYPE_ACTION,
525                         item, "Not supported action.");
526                 return -rte_errno;
527         }
528         filter->queue =
529                 ((const struct rte_flow_action_queue *)act->conf)->index;
530
531         /* check if the next not void item is END */
532         act = next_no_void_action(actions, act);
533         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
534                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
535                 rte_flow_error_set(error, EINVAL,
536                         RTE_FLOW_ERROR_TYPE_ACTION,
537                         act, "Not supported action.");
538                 return -rte_errno;
539         }
540
541         /* parse attr */
542         /* must be input direction */
543         if (!attr->ingress) {
544                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
545                 rte_flow_error_set(error, EINVAL,
546                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
547                                    attr, "Only support ingress.");
548                 return -rte_errno;
549         }
550
551         /* not supported */
552         if (attr->egress) {
553                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
554                 rte_flow_error_set(error, EINVAL,
555                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
556                                    attr, "Not support egress.");
557                 return -rte_errno;
558         }
559
560         /* not supported */
561         if (attr->transfer) {
562                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
563                 rte_flow_error_set(error, EINVAL,
564                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
565                                    attr, "No support for transfer.");
566                 return -rte_errno;
567         }
568
569         if (attr->priority > 0xFFFF) {
570                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
571                 rte_flow_error_set(error, EINVAL,
572                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
573                                    attr, "Error priority.");
574                 return -rte_errno;
575         }
576         filter->priority = (uint16_t)attr->priority;
577         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
578             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
579             filter->priority = 1;
580
581         return 0;
582 }
583
584 /* a specific function for ixgbe because the flags is specific */
585 static int
586 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
587                           const struct rte_flow_attr *attr,
588                           const struct rte_flow_item pattern[],
589                           const struct rte_flow_action actions[],
590                           struct rte_eth_ntuple_filter *filter,
591                           struct rte_flow_error *error)
592 {
593         int ret;
594         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
595
596         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
597
598         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
599
600         if (ret)
601                 return ret;
602
603 #ifdef RTE_LIBRTE_SECURITY
604         /* ESP flow not really a flow*/
605         if (filter->proto == IPPROTO_ESP)
606                 return 0;
607 #endif
608
609         /* Ixgbe doesn't support tcp flags. */
610         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
611                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
612                 rte_flow_error_set(error, EINVAL,
613                                    RTE_FLOW_ERROR_TYPE_ITEM,
614                                    NULL, "Not supported by ntuple filter");
615                 return -rte_errno;
616         }
617
618         /* Ixgbe doesn't support many priorities. */
619         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
620             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
621                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
622                 rte_flow_error_set(error, EINVAL,
623                         RTE_FLOW_ERROR_TYPE_ITEM,
624                         NULL, "Priority not supported by ntuple filter");
625                 return -rte_errno;
626         }
627
628         if (filter->queue >= dev->data->nb_rx_queues)
629                 return -rte_errno;
630
631         /* fixed value for ixgbe */
632         filter->flags = RTE_5TUPLE_FLAGS;
633         return 0;
634 }
635
636 /**
637  * Parse the rule to see if it is a ethertype rule.
638  * And get the ethertype filter info BTW.
639  * pattern:
640  * The first not void item can be ETH.
641  * The next not void item must be END.
642  * action:
643  * The first not void action should be QUEUE.
644  * The next not void action should be END.
645  * pattern example:
646  * ITEM         Spec                    Mask
647  * ETH          type    0x0807          0xFFFF
648  * END
649  * other members in mask and spec should set to 0x00.
650  * item->last should be NULL.
651  */
652 static int
653 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
654                             const struct rte_flow_item *pattern,
655                             const struct rte_flow_action *actions,
656                             struct rte_eth_ethertype_filter *filter,
657                             struct rte_flow_error *error)
658 {
659         const struct rte_flow_item *item;
660         const struct rte_flow_action *act;
661         const struct rte_flow_item_eth *eth_spec;
662         const struct rte_flow_item_eth *eth_mask;
663         const struct rte_flow_action_queue *act_q;
664
665         if (!pattern) {
666                 rte_flow_error_set(error, EINVAL,
667                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
668                                 NULL, "NULL pattern.");
669                 return -rte_errno;
670         }
671
672         if (!actions) {
673                 rte_flow_error_set(error, EINVAL,
674                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
675                                 NULL, "NULL action.");
676                 return -rte_errno;
677         }
678
679         if (!attr) {
680                 rte_flow_error_set(error, EINVAL,
681                                    RTE_FLOW_ERROR_TYPE_ATTR,
682                                    NULL, "NULL attribute.");
683                 return -rte_errno;
684         }
685
686         item = next_no_void_pattern(pattern, NULL);
687         /* The first non-void item should be MAC. */
688         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
689                 rte_flow_error_set(error, EINVAL,
690                         RTE_FLOW_ERROR_TYPE_ITEM,
691                         item, "Not supported by ethertype filter");
692                 return -rte_errno;
693         }
694
695         /*Not supported last point for range*/
696         if (item->last) {
697                 rte_flow_error_set(error, EINVAL,
698                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
699                         item, "Not supported last point for range");
700                 return -rte_errno;
701         }
702
703         /* Get the MAC info. */
704         if (!item->spec || !item->mask) {
705                 rte_flow_error_set(error, EINVAL,
706                                 RTE_FLOW_ERROR_TYPE_ITEM,
707                                 item, "Not supported by ethertype filter");
708                 return -rte_errno;
709         }
710
711         eth_spec = item->spec;
712         eth_mask = item->mask;
713
714         /* Mask bits of source MAC address must be full of 0.
715          * Mask bits of destination MAC address must be full
716          * of 1 or full of 0.
717          */
718         if (!is_zero_ether_addr(&eth_mask->src) ||
719             (!is_zero_ether_addr(&eth_mask->dst) &&
720              !is_broadcast_ether_addr(&eth_mask->dst))) {
721                 rte_flow_error_set(error, EINVAL,
722                                 RTE_FLOW_ERROR_TYPE_ITEM,
723                                 item, "Invalid ether address mask");
724                 return -rte_errno;
725         }
726
727         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
728                 rte_flow_error_set(error, EINVAL,
729                                 RTE_FLOW_ERROR_TYPE_ITEM,
730                                 item, "Invalid ethertype mask");
731                 return -rte_errno;
732         }
733
734         /* If mask bits of destination MAC address
735          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
736          */
737         if (is_broadcast_ether_addr(&eth_mask->dst)) {
738                 filter->mac_addr = eth_spec->dst;
739                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
740         } else {
741                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
742         }
743         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
744
745         /* Check if the next non-void item is END. */
746         item = next_no_void_pattern(pattern, item);
747         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
748                 rte_flow_error_set(error, EINVAL,
749                                 RTE_FLOW_ERROR_TYPE_ITEM,
750                                 item, "Not supported by ethertype filter.");
751                 return -rte_errno;
752         }
753
754         /* Parse action */
755
756         act = next_no_void_action(actions, NULL);
757         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
758             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
759                 rte_flow_error_set(error, EINVAL,
760                                 RTE_FLOW_ERROR_TYPE_ACTION,
761                                 act, "Not supported action.");
762                 return -rte_errno;
763         }
764
765         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
766                 act_q = (const struct rte_flow_action_queue *)act->conf;
767                 filter->queue = act_q->index;
768         } else {
769                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
770         }
771
772         /* Check if the next non-void item is END */
773         act = next_no_void_action(actions, act);
774         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
775                 rte_flow_error_set(error, EINVAL,
776                                 RTE_FLOW_ERROR_TYPE_ACTION,
777                                 act, "Not supported action.");
778                 return -rte_errno;
779         }
780
781         /* Parse attr */
782         /* Must be input direction */
783         if (!attr->ingress) {
784                 rte_flow_error_set(error, EINVAL,
785                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
786                                 attr, "Only support ingress.");
787                 return -rte_errno;
788         }
789
790         /* Not supported */
791         if (attr->egress) {
792                 rte_flow_error_set(error, EINVAL,
793                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
794                                 attr, "Not support egress.");
795                 return -rte_errno;
796         }
797
798         /* Not supported */
799         if (attr->transfer) {
800                 rte_flow_error_set(error, EINVAL,
801                                 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
802                                 attr, "No support for transfer.");
803                 return -rte_errno;
804         }
805
806         /* Not supported */
807         if (attr->priority) {
808                 rte_flow_error_set(error, EINVAL,
809                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
810                                 attr, "Not support priority.");
811                 return -rte_errno;
812         }
813
814         /* Not supported */
815         if (attr->group) {
816                 rte_flow_error_set(error, EINVAL,
817                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
818                                 attr, "Not support group.");
819                 return -rte_errno;
820         }
821
822         return 0;
823 }
824
825 static int
826 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
827                                  const struct rte_flow_attr *attr,
828                              const struct rte_flow_item pattern[],
829                              const struct rte_flow_action actions[],
830                              struct rte_eth_ethertype_filter *filter,
831                              struct rte_flow_error *error)
832 {
833         int ret;
834         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
835
836         MAC_TYPE_FILTER_SUP(hw->mac.type);
837
838         ret = cons_parse_ethertype_filter(attr, pattern,
839                                         actions, filter, error);
840
841         if (ret)
842                 return ret;
843
844         /* Ixgbe doesn't support MAC address. */
845         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
846                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
847                 rte_flow_error_set(error, EINVAL,
848                         RTE_FLOW_ERROR_TYPE_ITEM,
849                         NULL, "Not supported by ethertype filter");
850                 return -rte_errno;
851         }
852
853         if (filter->queue >= dev->data->nb_rx_queues) {
854                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
855                 rte_flow_error_set(error, EINVAL,
856                         RTE_FLOW_ERROR_TYPE_ITEM,
857                         NULL, "queue index much too big");
858                 return -rte_errno;
859         }
860
861         if (filter->ether_type == ETHER_TYPE_IPv4 ||
862                 filter->ether_type == ETHER_TYPE_IPv6) {
863                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
864                 rte_flow_error_set(error, EINVAL,
865                         RTE_FLOW_ERROR_TYPE_ITEM,
866                         NULL, "IPv4/IPv6 not supported by ethertype filter");
867                 return -rte_errno;
868         }
869
870         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
871                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
872                 rte_flow_error_set(error, EINVAL,
873                         RTE_FLOW_ERROR_TYPE_ITEM,
874                         NULL, "mac compare is unsupported");
875                 return -rte_errno;
876         }
877
878         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
879                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
880                 rte_flow_error_set(error, EINVAL,
881                         RTE_FLOW_ERROR_TYPE_ITEM,
882                         NULL, "drop option is unsupported");
883                 return -rte_errno;
884         }
885
886         return 0;
887 }
888
889 /**
890  * Parse the rule to see if it is a TCP SYN rule.
891  * And get the TCP SYN filter info BTW.
892  * pattern:
893  * The first not void item must be ETH.
894  * The second not void item must be IPV4 or IPV6.
895  * The third not void item must be TCP.
896  * The next not void item must be END.
897  * action:
898  * The first not void action should be QUEUE.
899  * The next not void action should be END.
900  * pattern example:
901  * ITEM         Spec                    Mask
902  * ETH          NULL                    NULL
903  * IPV4/IPV6    NULL                    NULL
904  * TCP          tcp_flags       0x02    0xFF
905  * END
906  * other members in mask and spec should set to 0x00.
907  * item->last should be NULL.
908  */
909 static int
910 cons_parse_syn_filter(const struct rte_flow_attr *attr,
911                                 const struct rte_flow_item pattern[],
912                                 const struct rte_flow_action actions[],
913                                 struct rte_eth_syn_filter *filter,
914                                 struct rte_flow_error *error)
915 {
916         const struct rte_flow_item *item;
917         const struct rte_flow_action *act;
918         const struct rte_flow_item_tcp *tcp_spec;
919         const struct rte_flow_item_tcp *tcp_mask;
920         const struct rte_flow_action_queue *act_q;
921
922         if (!pattern) {
923                 rte_flow_error_set(error, EINVAL,
924                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
925                                 NULL, "NULL pattern.");
926                 return -rte_errno;
927         }
928
929         if (!actions) {
930                 rte_flow_error_set(error, EINVAL,
931                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
932                                 NULL, "NULL action.");
933                 return -rte_errno;
934         }
935
936         if (!attr) {
937                 rte_flow_error_set(error, EINVAL,
938                                    RTE_FLOW_ERROR_TYPE_ATTR,
939                                    NULL, "NULL attribute.");
940                 return -rte_errno;
941         }
942
943
944         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
945         item = next_no_void_pattern(pattern, NULL);
946         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
947             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
948             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
949             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
950                 rte_flow_error_set(error, EINVAL,
951                                 RTE_FLOW_ERROR_TYPE_ITEM,
952                                 item, "Not supported by syn filter");
953                 return -rte_errno;
954         }
955                 /*Not supported last point for range*/
956         if (item->last) {
957                 rte_flow_error_set(error, EINVAL,
958                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
959                         item, "Not supported last point for range");
960                 return -rte_errno;
961         }
962
963         /* Skip Ethernet */
964         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
965                 /* if the item is MAC, the content should be NULL */
966                 if (item->spec || item->mask) {
967                         rte_flow_error_set(error, EINVAL,
968                                 RTE_FLOW_ERROR_TYPE_ITEM,
969                                 item, "Invalid SYN address mask");
970                         return -rte_errno;
971                 }
972
973                 /* check if the next not void item is IPv4 or IPv6 */
974                 item = next_no_void_pattern(pattern, item);
975                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
976                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
977                         rte_flow_error_set(error, EINVAL,
978                                 RTE_FLOW_ERROR_TYPE_ITEM,
979                                 item, "Not supported by syn filter");
980                         return -rte_errno;
981                 }
982         }
983
984         /* Skip IP */
985         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
986             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
987                 /* if the item is IP, the content should be NULL */
988                 if (item->spec || item->mask) {
989                         rte_flow_error_set(error, EINVAL,
990                                 RTE_FLOW_ERROR_TYPE_ITEM,
991                                 item, "Invalid SYN mask");
992                         return -rte_errno;
993                 }
994
995                 /* check if the next not void item is TCP */
996                 item = next_no_void_pattern(pattern, item);
997                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
998                         rte_flow_error_set(error, EINVAL,
999                                 RTE_FLOW_ERROR_TYPE_ITEM,
1000                                 item, "Not supported by syn filter");
1001                         return -rte_errno;
1002                 }
1003         }
1004
1005         /* Get the TCP info. Only support SYN. */
1006         if (!item->spec || !item->mask) {
1007                 rte_flow_error_set(error, EINVAL,
1008                                 RTE_FLOW_ERROR_TYPE_ITEM,
1009                                 item, "Invalid SYN mask");
1010                 return -rte_errno;
1011         }
1012         /*Not supported last point for range*/
1013         if (item->last) {
1014                 rte_flow_error_set(error, EINVAL,
1015                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1016                         item, "Not supported last point for range");
1017                 return -rte_errno;
1018         }
1019
1020         tcp_spec = item->spec;
1021         tcp_mask = item->mask;
1022         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
1023             tcp_mask->hdr.src_port ||
1024             tcp_mask->hdr.dst_port ||
1025             tcp_mask->hdr.sent_seq ||
1026             tcp_mask->hdr.recv_ack ||
1027             tcp_mask->hdr.data_off ||
1028             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
1029             tcp_mask->hdr.rx_win ||
1030             tcp_mask->hdr.cksum ||
1031             tcp_mask->hdr.tcp_urp) {
1032                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1033                 rte_flow_error_set(error, EINVAL,
1034                                 RTE_FLOW_ERROR_TYPE_ITEM,
1035                                 item, "Not supported by syn filter");
1036                 return -rte_errno;
1037         }
1038
1039         /* check if the next not void item is END */
1040         item = next_no_void_pattern(pattern, item);
1041         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1042                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1043                 rte_flow_error_set(error, EINVAL,
1044                                 RTE_FLOW_ERROR_TYPE_ITEM,
1045                                 item, "Not supported by syn filter");
1046                 return -rte_errno;
1047         }
1048
1049         /* check if the first not void action is QUEUE. */
1050         act = next_no_void_action(actions, NULL);
1051         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1052                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1053                 rte_flow_error_set(error, EINVAL,
1054                                 RTE_FLOW_ERROR_TYPE_ACTION,
1055                                 act, "Not supported action.");
1056                 return -rte_errno;
1057         }
1058
1059         act_q = (const struct rte_flow_action_queue *)act->conf;
1060         filter->queue = act_q->index;
1061         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1062                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1063                 rte_flow_error_set(error, EINVAL,
1064                                 RTE_FLOW_ERROR_TYPE_ACTION,
1065                                 act, "Not supported action.");
1066                 return -rte_errno;
1067         }
1068
1069         /* check if the next not void item is END */
1070         act = next_no_void_action(actions, act);
1071         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1072                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1073                 rte_flow_error_set(error, EINVAL,
1074                                 RTE_FLOW_ERROR_TYPE_ACTION,
1075                                 act, "Not supported action.");
1076                 return -rte_errno;
1077         }
1078
1079         /* parse attr */
1080         /* must be input direction */
1081         if (!attr->ingress) {
1082                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1083                 rte_flow_error_set(error, EINVAL,
1084                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1085                         attr, "Only support ingress.");
1086                 return -rte_errno;
1087         }
1088
1089         /* not supported */
1090         if (attr->egress) {
1091                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1092                 rte_flow_error_set(error, EINVAL,
1093                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1094                         attr, "Not support egress.");
1095                 return -rte_errno;
1096         }
1097
1098         /* not supported */
1099         if (attr->transfer) {
1100                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1101                 rte_flow_error_set(error, EINVAL,
1102                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1103                         attr, "No support for transfer.");
1104                 return -rte_errno;
1105         }
1106
1107         /* Support 2 priorities, the lowest or highest. */
1108         if (!attr->priority) {
1109                 filter->hig_pri = 0;
1110         } else if (attr->priority == (uint32_t)~0U) {
1111                 filter->hig_pri = 1;
1112         } else {
1113                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1114                 rte_flow_error_set(error, EINVAL,
1115                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1116                         attr, "Not support priority.");
1117                 return -rte_errno;
1118         }
1119
1120         return 0;
1121 }
1122
1123 static int
1124 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1125                                  const struct rte_flow_attr *attr,
1126                              const struct rte_flow_item pattern[],
1127                              const struct rte_flow_action actions[],
1128                              struct rte_eth_syn_filter *filter,
1129                              struct rte_flow_error *error)
1130 {
1131         int ret;
1132         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1133
1134         MAC_TYPE_FILTER_SUP(hw->mac.type);
1135
1136         ret = cons_parse_syn_filter(attr, pattern,
1137                                         actions, filter, error);
1138
1139         if (filter->queue >= dev->data->nb_rx_queues)
1140                 return -rte_errno;
1141
1142         if (ret)
1143                 return ret;
1144
1145         return 0;
1146 }
1147
1148 /**
1149  * Parse the rule to see if it is a L2 tunnel rule.
1150  * And get the L2 tunnel filter info BTW.
1151  * Only support E-tag now.
1152  * pattern:
1153  * The first not void item can be E_TAG.
1154  * The next not void item must be END.
1155  * action:
1156  * The first not void action should be VF or PF.
1157  * The next not void action should be END.
1158  * pattern example:
1159  * ITEM         Spec                    Mask
1160  * E_TAG        grp             0x1     0x3
1161                 e_cid_base      0x309   0xFFF
1162  * END
1163  * other members in mask and spec should set to 0x00.
1164  * item->last should be NULL.
1165  */
1166 static int
1167 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1168                         const struct rte_flow_attr *attr,
1169                         const struct rte_flow_item pattern[],
1170                         const struct rte_flow_action actions[],
1171                         struct rte_eth_l2_tunnel_conf *filter,
1172                         struct rte_flow_error *error)
1173 {
1174         const struct rte_flow_item *item;
1175         const struct rte_flow_item_e_tag *e_tag_spec;
1176         const struct rte_flow_item_e_tag *e_tag_mask;
1177         const struct rte_flow_action *act;
1178         const struct rte_flow_action_vf *act_vf;
1179         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1180
1181         if (!pattern) {
1182                 rte_flow_error_set(error, EINVAL,
1183                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1184                         NULL, "NULL pattern.");
1185                 return -rte_errno;
1186         }
1187
1188         if (!actions) {
1189                 rte_flow_error_set(error, EINVAL,
1190                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1191                                    NULL, "NULL action.");
1192                 return -rte_errno;
1193         }
1194
1195         if (!attr) {
1196                 rte_flow_error_set(error, EINVAL,
1197                                    RTE_FLOW_ERROR_TYPE_ATTR,
1198                                    NULL, "NULL attribute.");
1199                 return -rte_errno;
1200         }
1201
1202         /* The first not void item should be e-tag. */
1203         item = next_no_void_pattern(pattern, NULL);
1204         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1205                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1206                 rte_flow_error_set(error, EINVAL,
1207                         RTE_FLOW_ERROR_TYPE_ITEM,
1208                         item, "Not supported by L2 tunnel filter");
1209                 return -rte_errno;
1210         }
1211
1212         if (!item->spec || !item->mask) {
1213                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1214                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1215                         item, "Not supported by L2 tunnel filter");
1216                 return -rte_errno;
1217         }
1218
1219         /*Not supported last point for range*/
1220         if (item->last) {
1221                 rte_flow_error_set(error, EINVAL,
1222                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1223                         item, "Not supported last point for range");
1224                 return -rte_errno;
1225         }
1226
1227         e_tag_spec = item->spec;
1228         e_tag_mask = item->mask;
1229
1230         /* Only care about GRP and E cid base. */
1231         if (e_tag_mask->epcp_edei_in_ecid_b ||
1232             e_tag_mask->in_ecid_e ||
1233             e_tag_mask->ecid_e ||
1234             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1235                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1236                 rte_flow_error_set(error, EINVAL,
1237                         RTE_FLOW_ERROR_TYPE_ITEM,
1238                         item, "Not supported by L2 tunnel filter");
1239                 return -rte_errno;
1240         }
1241
1242         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1243         /**
1244          * grp and e_cid_base are bit fields and only use 14 bits.
1245          * e-tag id is taken as little endian by HW.
1246          */
1247         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1248
1249         /* check if the next not void item is END */
1250         item = next_no_void_pattern(pattern, item);
1251         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1252                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1253                 rte_flow_error_set(error, EINVAL,
1254                         RTE_FLOW_ERROR_TYPE_ITEM,
1255                         item, "Not supported by L2 tunnel filter");
1256                 return -rte_errno;
1257         }
1258
1259         /* parse attr */
1260         /* must be input direction */
1261         if (!attr->ingress) {
1262                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1263                 rte_flow_error_set(error, EINVAL,
1264                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1265                         attr, "Only support ingress.");
1266                 return -rte_errno;
1267         }
1268
1269         /* not supported */
1270         if (attr->egress) {
1271                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1272                 rte_flow_error_set(error, EINVAL,
1273                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1274                         attr, "Not support egress.");
1275                 return -rte_errno;
1276         }
1277
1278         /* not supported */
1279         if (attr->transfer) {
1280                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1281                 rte_flow_error_set(error, EINVAL,
1282                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1283                         attr, "No support for transfer.");
1284                 return -rte_errno;
1285         }
1286
1287         /* not supported */
1288         if (attr->priority) {
1289                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1290                 rte_flow_error_set(error, EINVAL,
1291                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1292                         attr, "Not support priority.");
1293                 return -rte_errno;
1294         }
1295
1296         /* check if the first not void action is VF or PF. */
1297         act = next_no_void_action(actions, NULL);
1298         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1299                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1300                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1301                 rte_flow_error_set(error, EINVAL,
1302                         RTE_FLOW_ERROR_TYPE_ACTION,
1303                         act, "Not supported action.");
1304                 return -rte_errno;
1305         }
1306
1307         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1308                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1309                 filter->pool = act_vf->id;
1310         } else {
1311                 filter->pool = pci_dev->max_vfs;
1312         }
1313
1314         /* check if the next not void item is END */
1315         act = next_no_void_action(actions, act);
1316         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1317                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1318                 rte_flow_error_set(error, EINVAL,
1319                         RTE_FLOW_ERROR_TYPE_ACTION,
1320                         act, "Not supported action.");
1321                 return -rte_errno;
1322         }
1323
1324         return 0;
1325 }
1326
1327 static int
1328 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1329                         const struct rte_flow_attr *attr,
1330                         const struct rte_flow_item pattern[],
1331                         const struct rte_flow_action actions[],
1332                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1333                         struct rte_flow_error *error)
1334 {
1335         int ret = 0;
1336         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1337         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1338         uint16_t vf_num;
1339
1340         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1341                                 actions, l2_tn_filter, error);
1342
1343         if (hw->mac.type != ixgbe_mac_X550 &&
1344                 hw->mac.type != ixgbe_mac_X550EM_x &&
1345                 hw->mac.type != ixgbe_mac_X550EM_a) {
1346                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1347                 rte_flow_error_set(error, EINVAL,
1348                         RTE_FLOW_ERROR_TYPE_ITEM,
1349                         NULL, "Not supported by L2 tunnel filter");
1350                 return -rte_errno;
1351         }
1352
1353         vf_num = pci_dev->max_vfs;
1354
1355         if (l2_tn_filter->pool > vf_num)
1356                 return -rte_errno;
1357
1358         return ret;
1359 }
1360
1361 /* Parse to get the attr and action info of flow director rule. */
1362 static int
1363 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1364                           const struct rte_flow_action actions[],
1365                           struct ixgbe_fdir_rule *rule,
1366                           struct rte_flow_error *error)
1367 {
1368         const struct rte_flow_action *act;
1369         const struct rte_flow_action_queue *act_q;
1370         const struct rte_flow_action_mark *mark;
1371
1372         /* parse attr */
1373         /* must be input direction */
1374         if (!attr->ingress) {
1375                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1376                 rte_flow_error_set(error, EINVAL,
1377                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1378                         attr, "Only support ingress.");
1379                 return -rte_errno;
1380         }
1381
1382         /* not supported */
1383         if (attr->egress) {
1384                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1385                 rte_flow_error_set(error, EINVAL,
1386                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1387                         attr, "Not support egress.");
1388                 return -rte_errno;
1389         }
1390
1391         /* not supported */
1392         if (attr->transfer) {
1393                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1394                 rte_flow_error_set(error, EINVAL,
1395                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1396                         attr, "No support for transfer.");
1397                 return -rte_errno;
1398         }
1399
1400         /* not supported */
1401         if (attr->priority) {
1402                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1403                 rte_flow_error_set(error, EINVAL,
1404                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1405                         attr, "Not support priority.");
1406                 return -rte_errno;
1407         }
1408
1409         /* check if the first not void action is QUEUE or DROP. */
1410         act = next_no_void_action(actions, NULL);
1411         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1412             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1413                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1414                 rte_flow_error_set(error, EINVAL,
1415                         RTE_FLOW_ERROR_TYPE_ACTION,
1416                         act, "Not supported action.");
1417                 return -rte_errno;
1418         }
1419
1420         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1421                 act_q = (const struct rte_flow_action_queue *)act->conf;
1422                 rule->queue = act_q->index;
1423         } else { /* drop */
1424                 /* signature mode does not support drop action. */
1425                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1426                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1427                         rte_flow_error_set(error, EINVAL,
1428                                 RTE_FLOW_ERROR_TYPE_ACTION,
1429                                 act, "Not supported action.");
1430                         return -rte_errno;
1431                 }
1432                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1433         }
1434
1435         /* check if the next not void item is MARK */
1436         act = next_no_void_action(actions, act);
1437         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1438                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1439                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1440                 rte_flow_error_set(error, EINVAL,
1441                         RTE_FLOW_ERROR_TYPE_ACTION,
1442                         act, "Not supported action.");
1443                 return -rte_errno;
1444         }
1445
1446         rule->soft_id = 0;
1447
1448         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1449                 mark = (const struct rte_flow_action_mark *)act->conf;
1450                 rule->soft_id = mark->id;
1451                 act = next_no_void_action(actions, act);
1452         }
1453
1454         /* check if the next not void item is END */
1455         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1456                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1457                 rte_flow_error_set(error, EINVAL,
1458                         RTE_FLOW_ERROR_TYPE_ACTION,
1459                         act, "Not supported action.");
1460                 return -rte_errno;
1461         }
1462
1463         return 0;
1464 }
1465
1466 /* search next no void pattern and skip fuzzy */
1467 static inline
1468 const struct rte_flow_item *next_no_fuzzy_pattern(
1469                 const struct rte_flow_item pattern[],
1470                 const struct rte_flow_item *cur)
1471 {
1472         const struct rte_flow_item *next =
1473                 next_no_void_pattern(pattern, cur);
1474         while (1) {
1475                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1476                         return next;
1477                 next = next_no_void_pattern(pattern, next);
1478         }
1479 }
1480
1481 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1482 {
1483         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1484         const struct rte_flow_item *item;
1485         uint32_t sh, lh, mh;
1486         int i = 0;
1487
1488         while (1) {
1489                 item = pattern + i;
1490                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1491                         break;
1492
1493                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1494                         spec = item->spec;
1495                         last = item->last;
1496                         mask = item->mask;
1497
1498                         if (!spec || !mask)
1499                                 return 0;
1500
1501                         sh = spec->thresh;
1502
1503                         if (!last)
1504                                 lh = sh;
1505                         else
1506                                 lh = last->thresh;
1507
1508                         mh = mask->thresh;
1509                         sh = sh & mh;
1510                         lh = lh & mh;
1511
1512                         if (!sh || sh > lh)
1513                                 return 0;
1514
1515                         return 1;
1516                 }
1517
1518                 i++;
1519         }
1520
1521         return 0;
1522 }
1523
1524 /**
1525  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1526  * And get the flow director filter info BTW.
1527  * UDP/TCP/SCTP PATTERN:
1528  * The first not void item can be ETH or IPV4 or IPV6
1529  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1530  * The next not void item could be UDP or TCP or SCTP (optional)
1531  * The next not void item could be RAW (for flexbyte, optional)
1532  * The next not void item must be END.
1533  * A Fuzzy Match pattern can appear at any place before END.
1534  * Fuzzy Match is optional for IPV4 but is required for IPV6
1535  * MAC VLAN PATTERN:
1536  * The first not void item must be ETH.
1537  * The second not void item must be MAC VLAN.
1538  * The next not void item must be END.
1539  * ACTION:
1540  * The first not void action should be QUEUE or DROP.
1541  * The second not void optional action should be MARK,
1542  * mark_id is a uint32_t number.
1543  * The next not void action should be END.
1544  * UDP/TCP/SCTP pattern example:
1545  * ITEM         Spec                    Mask
1546  * ETH          NULL                    NULL
1547  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1548  *              dst_addr 192.167.3.50   0xFFFFFFFF
1549  * UDP/TCP/SCTP src_port        80      0xFFFF
1550  *              dst_port        80      0xFFFF
1551  * FLEX relative        0       0x1
1552  *              search          0       0x1
1553  *              reserved        0       0
1554  *              offset          12      0xFFFFFFFF
1555  *              limit           0       0xFFFF
1556  *              length          2       0xFFFF
1557  *              pattern[0]      0x86    0xFF
1558  *              pattern[1]      0xDD    0xFF
1559  * END
1560  * MAC VLAN pattern example:
1561  * ITEM         Spec                    Mask
1562  * ETH          dst_addr
1563                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1564                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1565  * MAC VLAN     tci     0x2016          0xEFFF
1566  * END
1567  * Other members in mask and spec should set to 0x00.
1568  * Item->last should be NULL.
1569  */
1570 static int
1571 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1572                                const struct rte_flow_attr *attr,
1573                                const struct rte_flow_item pattern[],
1574                                const struct rte_flow_action actions[],
1575                                struct ixgbe_fdir_rule *rule,
1576                                struct rte_flow_error *error)
1577 {
1578         const struct rte_flow_item *item;
1579         const struct rte_flow_item_eth *eth_spec;
1580         const struct rte_flow_item_eth *eth_mask;
1581         const struct rte_flow_item_ipv4 *ipv4_spec;
1582         const struct rte_flow_item_ipv4 *ipv4_mask;
1583         const struct rte_flow_item_ipv6 *ipv6_spec;
1584         const struct rte_flow_item_ipv6 *ipv6_mask;
1585         const struct rte_flow_item_tcp *tcp_spec;
1586         const struct rte_flow_item_tcp *tcp_mask;
1587         const struct rte_flow_item_udp *udp_spec;
1588         const struct rte_flow_item_udp *udp_mask;
1589         const struct rte_flow_item_sctp *sctp_spec;
1590         const struct rte_flow_item_sctp *sctp_mask;
1591         const struct rte_flow_item_vlan *vlan_spec;
1592         const struct rte_flow_item_vlan *vlan_mask;
1593         const struct rte_flow_item_raw *raw_mask;
1594         const struct rte_flow_item_raw *raw_spec;
1595         uint8_t j;
1596
1597         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1598
1599         if (!pattern) {
1600                 rte_flow_error_set(error, EINVAL,
1601                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1602                         NULL, "NULL pattern.");
1603                 return -rte_errno;
1604         }
1605
1606         if (!actions) {
1607                 rte_flow_error_set(error, EINVAL,
1608                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1609                                    NULL, "NULL action.");
1610                 return -rte_errno;
1611         }
1612
1613         if (!attr) {
1614                 rte_flow_error_set(error, EINVAL,
1615                                    RTE_FLOW_ERROR_TYPE_ATTR,
1616                                    NULL, "NULL attribute.");
1617                 return -rte_errno;
1618         }
1619
1620         /**
1621          * Some fields may not be provided. Set spec to 0 and mask to default
1622          * value. So, we need not do anything for the not provided fields later.
1623          */
1624         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1625         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1626         rule->mask.vlan_tci_mask = 0;
1627         rule->mask.flex_bytes_mask = 0;
1628
1629         /**
1630          * The first not void item should be
1631          * MAC or IPv4 or TCP or UDP or SCTP.
1632          */
1633         item = next_no_fuzzy_pattern(pattern, NULL);
1634         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1635             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1636             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1637             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1638             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1639             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1640                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1641                 rte_flow_error_set(error, EINVAL,
1642                         RTE_FLOW_ERROR_TYPE_ITEM,
1643                         item, "Not supported by fdir filter");
1644                 return -rte_errno;
1645         }
1646
1647         if (signature_match(pattern))
1648                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1649         else
1650                 rule->mode = RTE_FDIR_MODE_PERFECT;
1651
1652         /*Not supported last point for range*/
1653         if (item->last) {
1654                 rte_flow_error_set(error, EINVAL,
1655                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1656                         item, "Not supported last point for range");
1657                 return -rte_errno;
1658         }
1659
1660         /* Get the MAC info. */
1661         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1662                 /**
1663                  * Only support vlan and dst MAC address,
1664                  * others should be masked.
1665                  */
1666                 if (item->spec && !item->mask) {
1667                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1668                         rte_flow_error_set(error, EINVAL,
1669                                 RTE_FLOW_ERROR_TYPE_ITEM,
1670                                 item, "Not supported by fdir filter");
1671                         return -rte_errno;
1672                 }
1673
1674                 if (item->spec) {
1675                         rule->b_spec = TRUE;
1676                         eth_spec = item->spec;
1677
1678                         /* Get the dst MAC. */
1679                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1680                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1681                                         eth_spec->dst.addr_bytes[j];
1682                         }
1683                 }
1684
1685
1686                 if (item->mask) {
1687
1688                         rule->b_mask = TRUE;
1689                         eth_mask = item->mask;
1690
1691                         /* Ether type should be masked. */
1692                         if (eth_mask->type ||
1693                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1694                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1695                                 rte_flow_error_set(error, EINVAL,
1696                                         RTE_FLOW_ERROR_TYPE_ITEM,
1697                                         item, "Not supported by fdir filter");
1698                                 return -rte_errno;
1699                         }
1700
1701                         /* If ethernet has meaning, it means MAC VLAN mode. */
1702                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1703
1704                         /**
1705                          * src MAC address must be masked,
1706                          * and don't support dst MAC address mask.
1707                          */
1708                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1709                                 if (eth_mask->src.addr_bytes[j] ||
1710                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1711                                         memset(rule, 0,
1712                                         sizeof(struct ixgbe_fdir_rule));
1713                                         rte_flow_error_set(error, EINVAL,
1714                                         RTE_FLOW_ERROR_TYPE_ITEM,
1715                                         item, "Not supported by fdir filter");
1716                                         return -rte_errno;
1717                                 }
1718                         }
1719
1720                         /* When no VLAN, considered as full mask. */
1721                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1722                 }
1723                 /*** If both spec and mask are item,
1724                  * it means don't care about ETH.
1725                  * Do nothing.
1726                  */
1727
1728                 /**
1729                  * Check if the next not void item is vlan or ipv4.
1730                  * IPv6 is not supported.
1731                  */
1732                 item = next_no_fuzzy_pattern(pattern, item);
1733                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1734                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1735                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1736                                 rte_flow_error_set(error, EINVAL,
1737                                         RTE_FLOW_ERROR_TYPE_ITEM,
1738                                         item, "Not supported by fdir filter");
1739                                 return -rte_errno;
1740                         }
1741                 } else {
1742                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1743                                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1744                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1745                                 rte_flow_error_set(error, EINVAL,
1746                                         RTE_FLOW_ERROR_TYPE_ITEM,
1747                                         item, "Not supported by fdir filter");
1748                                 return -rte_errno;
1749                         }
1750                 }
1751         }
1752
1753         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1754                 if (!(item->spec && item->mask)) {
1755                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1756                         rte_flow_error_set(error, EINVAL,
1757                                 RTE_FLOW_ERROR_TYPE_ITEM,
1758                                 item, "Not supported by fdir filter");
1759                         return -rte_errno;
1760                 }
1761
1762                 /*Not supported last point for range*/
1763                 if (item->last) {
1764                         rte_flow_error_set(error, EINVAL,
1765                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1766                                 item, "Not supported last point for range");
1767                         return -rte_errno;
1768                 }
1769
1770                 vlan_spec = item->spec;
1771                 vlan_mask = item->mask;
1772
1773                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1774
1775                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1776                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1777                 /* More than one tags are not supported. */
1778
1779                 /* Next not void item must be END */
1780                 item = next_no_fuzzy_pattern(pattern, item);
1781                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1782                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1783                         rte_flow_error_set(error, EINVAL,
1784                                 RTE_FLOW_ERROR_TYPE_ITEM,
1785                                 item, "Not supported by fdir filter");
1786                         return -rte_errno;
1787                 }
1788         }
1789
1790         /* Get the IPV4 info. */
1791         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1792                 /**
1793                  * Set the flow type even if there's no content
1794                  * as we must have a flow type.
1795                  */
1796                 rule->ixgbe_fdir.formatted.flow_type =
1797                         IXGBE_ATR_FLOW_TYPE_IPV4;
1798                 /*Not supported last point for range*/
1799                 if (item->last) {
1800                         rte_flow_error_set(error, EINVAL,
1801                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1802                                 item, "Not supported last point for range");
1803                         return -rte_errno;
1804                 }
1805                 /**
1806                  * Only care about src & dst addresses,
1807                  * others should be masked.
1808                  */
1809                 if (!item->mask) {
1810                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1811                         rte_flow_error_set(error, EINVAL,
1812                                 RTE_FLOW_ERROR_TYPE_ITEM,
1813                                 item, "Not supported by fdir filter");
1814                         return -rte_errno;
1815                 }
1816                 rule->b_mask = TRUE;
1817                 ipv4_mask = item->mask;
1818                 if (ipv4_mask->hdr.version_ihl ||
1819                     ipv4_mask->hdr.type_of_service ||
1820                     ipv4_mask->hdr.total_length ||
1821                     ipv4_mask->hdr.packet_id ||
1822                     ipv4_mask->hdr.fragment_offset ||
1823                     ipv4_mask->hdr.time_to_live ||
1824                     ipv4_mask->hdr.next_proto_id ||
1825                     ipv4_mask->hdr.hdr_checksum) {
1826                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1827                         rte_flow_error_set(error, EINVAL,
1828                                 RTE_FLOW_ERROR_TYPE_ITEM,
1829                                 item, "Not supported by fdir filter");
1830                         return -rte_errno;
1831                 }
1832                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1833                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1834
1835                 if (item->spec) {
1836                         rule->b_spec = TRUE;
1837                         ipv4_spec = item->spec;
1838                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1839                                 ipv4_spec->hdr.dst_addr;
1840                         rule->ixgbe_fdir.formatted.src_ip[0] =
1841                                 ipv4_spec->hdr.src_addr;
1842                 }
1843
1844                 /**
1845                  * Check if the next not void item is
1846                  * TCP or UDP or SCTP or END.
1847                  */
1848                 item = next_no_fuzzy_pattern(pattern, item);
1849                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1850                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1851                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1852                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1853                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1854                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1855                         rte_flow_error_set(error, EINVAL,
1856                                 RTE_FLOW_ERROR_TYPE_ITEM,
1857                                 item, "Not supported by fdir filter");
1858                         return -rte_errno;
1859                 }
1860         }
1861
1862         /* Get the IPV6 info. */
1863         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1864                 /**
1865                  * Set the flow type even if there's no content
1866                  * as we must have a flow type.
1867                  */
1868                 rule->ixgbe_fdir.formatted.flow_type =
1869                         IXGBE_ATR_FLOW_TYPE_IPV6;
1870
1871                 /**
1872                  * 1. must signature match
1873                  * 2. not support last
1874                  * 3. mask must not null
1875                  */
1876                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1877                     item->last ||
1878                     !item->mask) {
1879                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1880                         rte_flow_error_set(error, EINVAL,
1881                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1882                                 item, "Not supported last point for range");
1883                         return -rte_errno;
1884                 }
1885
1886                 rule->b_mask = TRUE;
1887                 ipv6_mask = item->mask;
1888                 if (ipv6_mask->hdr.vtc_flow ||
1889                     ipv6_mask->hdr.payload_len ||
1890                     ipv6_mask->hdr.proto ||
1891                     ipv6_mask->hdr.hop_limits) {
1892                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1893                         rte_flow_error_set(error, EINVAL,
1894                                 RTE_FLOW_ERROR_TYPE_ITEM,
1895                                 item, "Not supported by fdir filter");
1896                         return -rte_errno;
1897                 }
1898
1899                 /* check src addr mask */
1900                 for (j = 0; j < 16; j++) {
1901                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1902                                 rule->mask.src_ipv6_mask |= 1 << j;
1903                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1904                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1905                                 rte_flow_error_set(error, EINVAL,
1906                                         RTE_FLOW_ERROR_TYPE_ITEM,
1907                                         item, "Not supported by fdir filter");
1908                                 return -rte_errno;
1909                         }
1910                 }
1911
1912                 /* check dst addr mask */
1913                 for (j = 0; j < 16; j++) {
1914                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1915                                 rule->mask.dst_ipv6_mask |= 1 << j;
1916                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1917                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1918                                 rte_flow_error_set(error, EINVAL,
1919                                         RTE_FLOW_ERROR_TYPE_ITEM,
1920                                         item, "Not supported by fdir filter");
1921                                 return -rte_errno;
1922                         }
1923                 }
1924
1925                 if (item->spec) {
1926                         rule->b_spec = TRUE;
1927                         ipv6_spec = item->spec;
1928                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1929                                    ipv6_spec->hdr.src_addr, 16);
1930                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1931                                    ipv6_spec->hdr.dst_addr, 16);
1932                 }
1933
1934                 /**
1935                  * Check if the next not void item is
1936                  * TCP or UDP or SCTP or END.
1937                  */
1938                 item = next_no_fuzzy_pattern(pattern, item);
1939                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1940                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1941                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1942                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1943                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1944                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1945                         rte_flow_error_set(error, EINVAL,
1946                                 RTE_FLOW_ERROR_TYPE_ITEM,
1947                                 item, "Not supported by fdir filter");
1948                         return -rte_errno;
1949                 }
1950         }
1951
1952         /* Get the TCP info. */
1953         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1954                 /**
1955                  * Set the flow type even if there's no content
1956                  * as we must have a flow type.
1957                  */
1958                 rule->ixgbe_fdir.formatted.flow_type |=
1959                         IXGBE_ATR_L4TYPE_TCP;
1960                 /*Not supported last point for range*/
1961                 if (item->last) {
1962                         rte_flow_error_set(error, EINVAL,
1963                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1964                                 item, "Not supported last point for range");
1965                         return -rte_errno;
1966                 }
1967                 /**
1968                  * Only care about src & dst ports,
1969                  * others should be masked.
1970                  */
1971                 if (!item->mask) {
1972                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1973                         rte_flow_error_set(error, EINVAL,
1974                                 RTE_FLOW_ERROR_TYPE_ITEM,
1975                                 item, "Not supported by fdir filter");
1976                         return -rte_errno;
1977                 }
1978                 rule->b_mask = TRUE;
1979                 tcp_mask = item->mask;
1980                 if (tcp_mask->hdr.sent_seq ||
1981                     tcp_mask->hdr.recv_ack ||
1982                     tcp_mask->hdr.data_off ||
1983                     tcp_mask->hdr.tcp_flags ||
1984                     tcp_mask->hdr.rx_win ||
1985                     tcp_mask->hdr.cksum ||
1986                     tcp_mask->hdr.tcp_urp) {
1987                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1988                         rte_flow_error_set(error, EINVAL,
1989                                 RTE_FLOW_ERROR_TYPE_ITEM,
1990                                 item, "Not supported by fdir filter");
1991                         return -rte_errno;
1992                 }
1993                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1994                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1995
1996                 if (item->spec) {
1997                         rule->b_spec = TRUE;
1998                         tcp_spec = item->spec;
1999                         rule->ixgbe_fdir.formatted.src_port =
2000                                 tcp_spec->hdr.src_port;
2001                         rule->ixgbe_fdir.formatted.dst_port =
2002                                 tcp_spec->hdr.dst_port;
2003                 }
2004
2005                 item = next_no_fuzzy_pattern(pattern, item);
2006                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2007                     item->type != RTE_FLOW_ITEM_TYPE_END) {
2008                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2009                         rte_flow_error_set(error, EINVAL,
2010                                 RTE_FLOW_ERROR_TYPE_ITEM,
2011                                 item, "Not supported by fdir filter");
2012                         return -rte_errno;
2013                 }
2014
2015         }
2016
2017         /* Get the UDP info */
2018         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2019                 /**
2020                  * Set the flow type even if there's no content
2021                  * as we must have a flow type.
2022                  */
2023                 rule->ixgbe_fdir.formatted.flow_type |=
2024                         IXGBE_ATR_L4TYPE_UDP;
2025                 /*Not supported last point for range*/
2026                 if (item->last) {
2027                         rte_flow_error_set(error, EINVAL,
2028                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2029                                 item, "Not supported last point for range");
2030                         return -rte_errno;
2031                 }
2032                 /**
2033                  * Only care about src & dst ports,
2034                  * others should be masked.
2035                  */
2036                 if (!item->mask) {
2037                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2038                         rte_flow_error_set(error, EINVAL,
2039                                 RTE_FLOW_ERROR_TYPE_ITEM,
2040                                 item, "Not supported by fdir filter");
2041                         return -rte_errno;
2042                 }
2043                 rule->b_mask = TRUE;
2044                 udp_mask = item->mask;
2045                 if (udp_mask->hdr.dgram_len ||
2046                     udp_mask->hdr.dgram_cksum) {
2047                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2048                         rte_flow_error_set(error, EINVAL,
2049                                 RTE_FLOW_ERROR_TYPE_ITEM,
2050                                 item, "Not supported by fdir filter");
2051                         return -rte_errno;
2052                 }
2053                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
2054                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
2055
2056                 if (item->spec) {
2057                         rule->b_spec = TRUE;
2058                         udp_spec = item->spec;
2059                         rule->ixgbe_fdir.formatted.src_port =
2060                                 udp_spec->hdr.src_port;
2061                         rule->ixgbe_fdir.formatted.dst_port =
2062                                 udp_spec->hdr.dst_port;
2063                 }
2064
2065                 item = next_no_fuzzy_pattern(pattern, item);
2066                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2067                     item->type != RTE_FLOW_ITEM_TYPE_END) {
2068                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2069                         rte_flow_error_set(error, EINVAL,
2070                                 RTE_FLOW_ERROR_TYPE_ITEM,
2071                                 item, "Not supported by fdir filter");
2072                         return -rte_errno;
2073                 }
2074
2075         }
2076
2077         /* Get the SCTP info */
2078         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2079                 /**
2080                  * Set the flow type even if there's no content
2081                  * as we must have a flow type.
2082                  */
2083                 rule->ixgbe_fdir.formatted.flow_type |=
2084                         IXGBE_ATR_L4TYPE_SCTP;
2085                 /*Not supported last point for range*/
2086                 if (item->last) {
2087                         rte_flow_error_set(error, EINVAL,
2088                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2089                                 item, "Not supported last point for range");
2090                         return -rte_errno;
2091                 }
2092
2093                 /* only x550 family only support sctp port */
2094                 if (hw->mac.type == ixgbe_mac_X550 ||
2095                     hw->mac.type == ixgbe_mac_X550EM_x ||
2096                     hw->mac.type == ixgbe_mac_X550EM_a) {
2097                         /**
2098                          * Only care about src & dst ports,
2099                          * others should be masked.
2100                          */
2101                         if (!item->mask) {
2102                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2103                                 rte_flow_error_set(error, EINVAL,
2104                                         RTE_FLOW_ERROR_TYPE_ITEM,
2105                                         item, "Not supported by fdir filter");
2106                                 return -rte_errno;
2107                         }
2108                         rule->b_mask = TRUE;
2109                         sctp_mask = item->mask;
2110                         if (sctp_mask->hdr.tag ||
2111                                 sctp_mask->hdr.cksum) {
2112                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2113                                 rte_flow_error_set(error, EINVAL,
2114                                         RTE_FLOW_ERROR_TYPE_ITEM,
2115                                         item, "Not supported by fdir filter");
2116                                 return -rte_errno;
2117                         }
2118                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2119                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2120
2121                         if (item->spec) {
2122                                 rule->b_spec = TRUE;
2123                                 sctp_spec = item->spec;
2124                                 rule->ixgbe_fdir.formatted.src_port =
2125                                         sctp_spec->hdr.src_port;
2126                                 rule->ixgbe_fdir.formatted.dst_port =
2127                                         sctp_spec->hdr.dst_port;
2128                         }
2129                 /* others even sctp port is not supported */
2130                 } else {
2131                         sctp_mask = item->mask;
2132                         if (sctp_mask &&
2133                                 (sctp_mask->hdr.src_port ||
2134                                  sctp_mask->hdr.dst_port ||
2135                                  sctp_mask->hdr.tag ||
2136                                  sctp_mask->hdr.cksum)) {
2137                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2138                                 rte_flow_error_set(error, EINVAL,
2139                                         RTE_FLOW_ERROR_TYPE_ITEM,
2140                                         item, "Not supported by fdir filter");
2141                                 return -rte_errno;
2142                         }
2143                 }
2144
2145                 item = next_no_fuzzy_pattern(pattern, item);
2146                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2147                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2148                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2149                         rte_flow_error_set(error, EINVAL,
2150                                 RTE_FLOW_ERROR_TYPE_ITEM,
2151                                 item, "Not supported by fdir filter");
2152                         return -rte_errno;
2153                 }
2154         }
2155
2156         /* Get the flex byte info */
2157         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2158                 /* Not supported last point for range*/
2159                 if (item->last) {
2160                         rte_flow_error_set(error, EINVAL,
2161                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2162                                 item, "Not supported last point for range");
2163                         return -rte_errno;
2164                 }
2165                 /* mask should not be null */
2166                 if (!item->mask || !item->spec) {
2167                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2168                         rte_flow_error_set(error, EINVAL,
2169                                 RTE_FLOW_ERROR_TYPE_ITEM,
2170                                 item, "Not supported by fdir filter");
2171                         return -rte_errno;
2172                 }
2173
2174                 raw_mask = item->mask;
2175
2176                 /* check mask */
2177                 if (raw_mask->relative != 0x1 ||
2178                     raw_mask->search != 0x1 ||
2179                     raw_mask->reserved != 0x0 ||
2180                     (uint32_t)raw_mask->offset != 0xffffffff ||
2181                     raw_mask->limit != 0xffff ||
2182                     raw_mask->length != 0xffff) {
2183                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2184                         rte_flow_error_set(error, EINVAL,
2185                                 RTE_FLOW_ERROR_TYPE_ITEM,
2186                                 item, "Not supported by fdir filter");
2187                         return -rte_errno;
2188                 }
2189
2190                 raw_spec = item->spec;
2191
2192                 /* check spec */
2193                 if (raw_spec->relative != 0 ||
2194                     raw_spec->search != 0 ||
2195                     raw_spec->reserved != 0 ||
2196                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2197                     raw_spec->offset % 2 ||
2198                     raw_spec->limit != 0 ||
2199                     raw_spec->length != 2 ||
2200                     /* pattern can't be 0xffff */
2201                     (raw_spec->pattern[0] == 0xff &&
2202                      raw_spec->pattern[1] == 0xff)) {
2203                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2204                         rte_flow_error_set(error, EINVAL,
2205                                 RTE_FLOW_ERROR_TYPE_ITEM,
2206                                 item, "Not supported by fdir filter");
2207                         return -rte_errno;
2208                 }
2209
2210                 /* check pattern mask */
2211                 if (raw_mask->pattern[0] != 0xff ||
2212                     raw_mask->pattern[1] != 0xff) {
2213                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2214                         rte_flow_error_set(error, EINVAL,
2215                                 RTE_FLOW_ERROR_TYPE_ITEM,
2216                                 item, "Not supported by fdir filter");
2217                         return -rte_errno;
2218                 }
2219
2220                 rule->mask.flex_bytes_mask = 0xffff;
2221                 rule->ixgbe_fdir.formatted.flex_bytes =
2222                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2223                         raw_spec->pattern[0];
2224                 rule->flex_bytes_offset = raw_spec->offset;
2225         }
2226
2227         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2228                 /* check if the next not void item is END */
2229                 item = next_no_fuzzy_pattern(pattern, item);
2230                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2231                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2232                         rte_flow_error_set(error, EINVAL,
2233                                 RTE_FLOW_ERROR_TYPE_ITEM,
2234                                 item, "Not supported by fdir filter");
2235                         return -rte_errno;
2236                 }
2237         }
2238
2239         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2240 }
2241
2242 #define NVGRE_PROTOCOL 0x6558
2243
2244 /**
2245  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2246  * And get the flow director filter info BTW.
2247  * VxLAN PATTERN:
2248  * The first not void item must be ETH.
2249  * The second not void item must be IPV4/ IPV6.
2250  * The third not void item must be NVGRE.
2251  * The next not void item must be END.
2252  * NVGRE PATTERN:
2253  * The first not void item must be ETH.
2254  * The second not void item must be IPV4/ IPV6.
2255  * The third not void item must be NVGRE.
2256  * The next not void item must be END.
2257  * ACTION:
2258  * The first not void action should be QUEUE or DROP.
2259  * The second not void optional action should be MARK,
2260  * mark_id is a uint32_t number.
2261  * The next not void action should be END.
2262  * VxLAN pattern example:
2263  * ITEM         Spec                    Mask
2264  * ETH          NULL                    NULL
2265  * IPV4/IPV6    NULL                    NULL
2266  * UDP          NULL                    NULL
2267  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2268  * MAC VLAN     tci     0x2016          0xEFFF
2269  * END
2270  * NEGRV pattern example:
2271  * ITEM         Spec                    Mask
2272  * ETH          NULL                    NULL
2273  * IPV4/IPV6    NULL                    NULL
2274  * NVGRE        protocol        0x6558  0xFFFF
2275  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2276  * MAC VLAN     tci     0x2016          0xEFFF
2277  * END
2278  * other members in mask and spec should set to 0x00.
2279  * item->last should be NULL.
2280  */
2281 static int
2282 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2283                                const struct rte_flow_item pattern[],
2284                                const struct rte_flow_action actions[],
2285                                struct ixgbe_fdir_rule *rule,
2286                                struct rte_flow_error *error)
2287 {
2288         const struct rte_flow_item *item;
2289         const struct rte_flow_item_vxlan *vxlan_spec;
2290         const struct rte_flow_item_vxlan *vxlan_mask;
2291         const struct rte_flow_item_nvgre *nvgre_spec;
2292         const struct rte_flow_item_nvgre *nvgre_mask;
2293         const struct rte_flow_item_eth *eth_spec;
2294         const struct rte_flow_item_eth *eth_mask;
2295         const struct rte_flow_item_vlan *vlan_spec;
2296         const struct rte_flow_item_vlan *vlan_mask;
2297         uint32_t j;
2298
2299         if (!pattern) {
2300                 rte_flow_error_set(error, EINVAL,
2301                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2302                                    NULL, "NULL pattern.");
2303                 return -rte_errno;
2304         }
2305
2306         if (!actions) {
2307                 rte_flow_error_set(error, EINVAL,
2308                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2309                                    NULL, "NULL action.");
2310                 return -rte_errno;
2311         }
2312
2313         if (!attr) {
2314                 rte_flow_error_set(error, EINVAL,
2315                                    RTE_FLOW_ERROR_TYPE_ATTR,
2316                                    NULL, "NULL attribute.");
2317                 return -rte_errno;
2318         }
2319
2320         /**
2321          * Some fields may not be provided. Set spec to 0 and mask to default
2322          * value. So, we need not do anything for the not provided fields later.
2323          */
2324         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2325         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2326         rule->mask.vlan_tci_mask = 0;
2327
2328         /**
2329          * The first not void item should be
2330          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2331          */
2332         item = next_no_void_pattern(pattern, NULL);
2333         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2334             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2335             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2336             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2337             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2338             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2339                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2340                 rte_flow_error_set(error, EINVAL,
2341                         RTE_FLOW_ERROR_TYPE_ITEM,
2342                         item, "Not supported by fdir filter");
2343                 return -rte_errno;
2344         }
2345
2346         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2347
2348         /* Skip MAC. */
2349         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2350                 /* Only used to describe the protocol stack. */
2351                 if (item->spec || item->mask) {
2352                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2353                         rte_flow_error_set(error, EINVAL,
2354                                 RTE_FLOW_ERROR_TYPE_ITEM,
2355                                 item, "Not supported by fdir filter");
2356                         return -rte_errno;
2357                 }
2358                 /* Not supported last point for range*/
2359                 if (item->last) {
2360                         rte_flow_error_set(error, EINVAL,
2361                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2362                                 item, "Not supported last point for range");
2363                         return -rte_errno;
2364                 }
2365
2366                 /* Check if the next not void item is IPv4 or IPv6. */
2367                 item = next_no_void_pattern(pattern, item);
2368                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2369                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2370                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2371                         rte_flow_error_set(error, EINVAL,
2372                                 RTE_FLOW_ERROR_TYPE_ITEM,
2373                                 item, "Not supported by fdir filter");
2374                         return -rte_errno;
2375                 }
2376         }
2377
2378         /* Skip IP. */
2379         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2380             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2381                 /* Only used to describe the protocol stack. */
2382                 if (item->spec || item->mask) {
2383                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2384                         rte_flow_error_set(error, EINVAL,
2385                                 RTE_FLOW_ERROR_TYPE_ITEM,
2386                                 item, "Not supported by fdir filter");
2387                         return -rte_errno;
2388                 }
2389                 /*Not supported last point for range*/
2390                 if (item->last) {
2391                         rte_flow_error_set(error, EINVAL,
2392                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2393                                 item, "Not supported last point for range");
2394                         return -rte_errno;
2395                 }
2396
2397                 /* Check if the next not void item is UDP or NVGRE. */
2398                 item = next_no_void_pattern(pattern, item);
2399                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2400                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2401                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2402                         rte_flow_error_set(error, EINVAL,
2403                                 RTE_FLOW_ERROR_TYPE_ITEM,
2404                                 item, "Not supported by fdir filter");
2405                         return -rte_errno;
2406                 }
2407         }
2408
2409         /* Skip UDP. */
2410         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2411                 /* Only used to describe the protocol stack. */
2412                 if (item->spec || item->mask) {
2413                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2414                         rte_flow_error_set(error, EINVAL,
2415                                 RTE_FLOW_ERROR_TYPE_ITEM,
2416                                 item, "Not supported by fdir filter");
2417                         return -rte_errno;
2418                 }
2419                 /*Not supported last point for range*/
2420                 if (item->last) {
2421                         rte_flow_error_set(error, EINVAL,
2422                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2423                                 item, "Not supported last point for range");
2424                         return -rte_errno;
2425                 }
2426
2427                 /* Check if the next not void item is VxLAN. */
2428                 item = next_no_void_pattern(pattern, item);
2429                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2430                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2431                         rte_flow_error_set(error, EINVAL,
2432                                 RTE_FLOW_ERROR_TYPE_ITEM,
2433                                 item, "Not supported by fdir filter");
2434                         return -rte_errno;
2435                 }
2436         }
2437
2438         /* Get the VxLAN info */
2439         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2440                 rule->ixgbe_fdir.formatted.tunnel_type =
2441                                 IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
2442
2443                 /* Only care about VNI, others should be masked. */
2444                 if (!item->mask) {
2445                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2446                         rte_flow_error_set(error, EINVAL,
2447                                 RTE_FLOW_ERROR_TYPE_ITEM,
2448                                 item, "Not supported by fdir filter");
2449                         return -rte_errno;
2450                 }
2451                 /*Not supported last point for range*/
2452                 if (item->last) {
2453                         rte_flow_error_set(error, EINVAL,
2454                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2455                                 item, "Not supported last point for range");
2456                         return -rte_errno;
2457                 }
2458                 rule->b_mask = TRUE;
2459
2460                 /* Tunnel type is always meaningful. */
2461                 rule->mask.tunnel_type_mask = 1;
2462
2463                 vxlan_mask = item->mask;
2464                 if (vxlan_mask->flags) {
2465                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2466                         rte_flow_error_set(error, EINVAL,
2467                                 RTE_FLOW_ERROR_TYPE_ITEM,
2468                                 item, "Not supported by fdir filter");
2469                         return -rte_errno;
2470                 }
2471                 /* VNI must be totally masked or not. */
2472                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2473                         vxlan_mask->vni[2]) &&
2474                         ((vxlan_mask->vni[0] != 0xFF) ||
2475                         (vxlan_mask->vni[1] != 0xFF) ||
2476                                 (vxlan_mask->vni[2] != 0xFF))) {
2477                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2478                         rte_flow_error_set(error, EINVAL,
2479                                 RTE_FLOW_ERROR_TYPE_ITEM,
2480                                 item, "Not supported by fdir filter");
2481                         return -rte_errno;
2482                 }
2483
2484                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2485                         RTE_DIM(vxlan_mask->vni));
2486
2487                 if (item->spec) {
2488                         rule->b_spec = TRUE;
2489                         vxlan_spec = item->spec;
2490                         rte_memcpy(((uint8_t *)
2491                                 &rule->ixgbe_fdir.formatted.tni_vni),
2492                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2493                 }
2494         }
2495
2496         /* Get the NVGRE info */
2497         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2498                 rule->ixgbe_fdir.formatted.tunnel_type =
2499                                 IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
2500
2501                 /**
2502                  * Only care about flags0, flags1, protocol and TNI,
2503                  * others should be masked.
2504                  */
2505                 if (!item->mask) {
2506                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2507                         rte_flow_error_set(error, EINVAL,
2508                                 RTE_FLOW_ERROR_TYPE_ITEM,
2509                                 item, "Not supported by fdir filter");
2510                         return -rte_errno;
2511                 }
2512                 /*Not supported last point for range*/
2513                 if (item->last) {
2514                         rte_flow_error_set(error, EINVAL,
2515                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2516                                 item, "Not supported last point for range");
2517                         return -rte_errno;
2518                 }
2519                 rule->b_mask = TRUE;
2520
2521                 /* Tunnel type is always meaningful. */
2522                 rule->mask.tunnel_type_mask = 1;
2523
2524                 nvgre_mask = item->mask;
2525                 if (nvgre_mask->flow_id) {
2526                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2527                         rte_flow_error_set(error, EINVAL,
2528                                 RTE_FLOW_ERROR_TYPE_ITEM,
2529                                 item, "Not supported by fdir filter");
2530                         return -rte_errno;
2531                 }
2532                 if (nvgre_mask->protocol &&
2533                     nvgre_mask->protocol != 0xFFFF) {
2534                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2535                         rte_flow_error_set(error, EINVAL,
2536                                 RTE_FLOW_ERROR_TYPE_ITEM,
2537                                 item, "Not supported by fdir filter");
2538                         return -rte_errno;
2539                 }
2540                 if (nvgre_mask->c_k_s_rsvd0_ver &&
2541                     nvgre_mask->c_k_s_rsvd0_ver !=
2542                         rte_cpu_to_be_16(0xFFFF)) {
2543                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2544                         rte_flow_error_set(error, EINVAL,
2545                                 RTE_FLOW_ERROR_TYPE_ITEM,
2546                                 item, "Not supported by fdir filter");
2547                         return -rte_errno;
2548                 }
2549                 /* TNI must be totally masked or not. */
2550                 if (nvgre_mask->tni[0] &&
2551                     ((nvgre_mask->tni[0] != 0xFF) ||
2552                     (nvgre_mask->tni[1] != 0xFF) ||
2553                     (nvgre_mask->tni[2] != 0xFF))) {
2554                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2555                         rte_flow_error_set(error, EINVAL,
2556                                 RTE_FLOW_ERROR_TYPE_ITEM,
2557                                 item, "Not supported by fdir filter");
2558                         return -rte_errno;
2559                 }
2560                 /* tni is a 24-bits bit field */
2561                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2562                         RTE_DIM(nvgre_mask->tni));
2563                 rule->mask.tunnel_id_mask <<= 8;
2564
2565                 if (item->spec) {
2566                         rule->b_spec = TRUE;
2567                         nvgre_spec = item->spec;
2568                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2569                             rte_cpu_to_be_16(0x2000) &&
2570                                 nvgre_mask->c_k_s_rsvd0_ver) {
2571                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2572                                 rte_flow_error_set(error, EINVAL,
2573                                         RTE_FLOW_ERROR_TYPE_ITEM,
2574                                         item, "Not supported by fdir filter");
2575                                 return -rte_errno;
2576                         }
2577                         if (nvgre_mask->protocol &&
2578                             nvgre_spec->protocol !=
2579                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2580                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2581                                 rte_flow_error_set(error, EINVAL,
2582                                         RTE_FLOW_ERROR_TYPE_ITEM,
2583                                         item, "Not supported by fdir filter");
2584                                 return -rte_errno;
2585                         }
2586                         /* tni is a 24-bits bit field */
2587                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2588                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2589                 }
2590         }
2591
2592         /* check if the next not void item is MAC */
2593         item = next_no_void_pattern(pattern, item);
2594         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2595                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2596                 rte_flow_error_set(error, EINVAL,
2597                         RTE_FLOW_ERROR_TYPE_ITEM,
2598                         item, "Not supported by fdir filter");
2599                 return -rte_errno;
2600         }
2601
2602         /**
2603          * Only support vlan and dst MAC address,
2604          * others should be masked.
2605          */
2606
2607         if (!item->mask) {
2608                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2609                 rte_flow_error_set(error, EINVAL,
2610                         RTE_FLOW_ERROR_TYPE_ITEM,
2611                         item, "Not supported by fdir filter");
2612                 return -rte_errno;
2613         }
2614         /*Not supported last point for range*/
2615         if (item->last) {
2616                 rte_flow_error_set(error, EINVAL,
2617                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2618                         item, "Not supported last point for range");
2619                 return -rte_errno;
2620         }
2621         rule->b_mask = TRUE;
2622         eth_mask = item->mask;
2623
2624         /* Ether type should be masked. */
2625         if (eth_mask->type) {
2626                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2627                 rte_flow_error_set(error, EINVAL,
2628                         RTE_FLOW_ERROR_TYPE_ITEM,
2629                         item, "Not supported by fdir filter");
2630                 return -rte_errno;
2631         }
2632
2633         /* src MAC address should be masked. */
2634         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2635                 if (eth_mask->src.addr_bytes[j]) {
2636                         memset(rule, 0,
2637                                sizeof(struct ixgbe_fdir_rule));
2638                         rte_flow_error_set(error, EINVAL,
2639                                 RTE_FLOW_ERROR_TYPE_ITEM,
2640                                 item, "Not supported by fdir filter");
2641                         return -rte_errno;
2642                 }
2643         }
2644         rule->mask.mac_addr_byte_mask = 0;
2645         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2646                 /* It's a per byte mask. */
2647                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2648                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2649                 } else if (eth_mask->dst.addr_bytes[j]) {
2650                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2651                         rte_flow_error_set(error, EINVAL,
2652                                 RTE_FLOW_ERROR_TYPE_ITEM,
2653                                 item, "Not supported by fdir filter");
2654                         return -rte_errno;
2655                 }
2656         }
2657
2658         /* When no vlan, considered as full mask. */
2659         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2660
2661         if (item->spec) {
2662                 rule->b_spec = TRUE;
2663                 eth_spec = item->spec;
2664
2665                 /* Get the dst MAC. */
2666                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2667                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2668                                 eth_spec->dst.addr_bytes[j];
2669                 }
2670         }
2671
2672         /**
2673          * Check if the next not void item is vlan or ipv4.
2674          * IPv6 is not supported.
2675          */
2676         item = next_no_void_pattern(pattern, item);
2677         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2678                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2679                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2680                 rte_flow_error_set(error, EINVAL,
2681                         RTE_FLOW_ERROR_TYPE_ITEM,
2682                         item, "Not supported by fdir filter");
2683                 return -rte_errno;
2684         }
2685         /*Not supported last point for range*/
2686         if (item->last) {
2687                 rte_flow_error_set(error, EINVAL,
2688                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2689                         item, "Not supported last point for range");
2690                 return -rte_errno;
2691         }
2692
2693         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2694                 if (!(item->spec && item->mask)) {
2695                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2696                         rte_flow_error_set(error, EINVAL,
2697                                 RTE_FLOW_ERROR_TYPE_ITEM,
2698                                 item, "Not supported by fdir filter");
2699                         return -rte_errno;
2700                 }
2701
2702                 vlan_spec = item->spec;
2703                 vlan_mask = item->mask;
2704
2705                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2706
2707                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2708                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2709                 /* More than one tags are not supported. */
2710
2711                 /* check if the next not void item is END */
2712                 item = next_no_void_pattern(pattern, item);
2713
2714                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2715                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2716                         rte_flow_error_set(error, EINVAL,
2717                                 RTE_FLOW_ERROR_TYPE_ITEM,
2718                                 item, "Not supported by fdir filter");
2719                         return -rte_errno;
2720                 }
2721         }
2722
2723         /**
2724          * If the tags is 0, it means don't care about the VLAN.
2725          * Do nothing.
2726          */
2727
2728         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2729 }
2730
2731 static int
2732 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2733                         const struct rte_flow_attr *attr,
2734                         const struct rte_flow_item pattern[],
2735                         const struct rte_flow_action actions[],
2736                         struct ixgbe_fdir_rule *rule,
2737                         struct rte_flow_error *error)
2738 {
2739         int ret;
2740         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2741         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2742
2743         if (hw->mac.type != ixgbe_mac_82599EB &&
2744                 hw->mac.type != ixgbe_mac_X540 &&
2745                 hw->mac.type != ixgbe_mac_X550 &&
2746                 hw->mac.type != ixgbe_mac_X550EM_x &&
2747                 hw->mac.type != ixgbe_mac_X550EM_a)
2748                 return -ENOTSUP;
2749
2750         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2751                                         actions, rule, error);
2752
2753         if (!ret)
2754                 goto step_next;
2755
2756         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2757                                         actions, rule, error);
2758
2759         if (ret)
2760                 return ret;
2761
2762 step_next:
2763
2764         if (hw->mac.type == ixgbe_mac_82599EB &&
2765                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2766                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2767                 rule->ixgbe_fdir.formatted.dst_port != 0))
2768                 return -ENOTSUP;
2769
2770         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2771             fdir_mode != rule->mode)
2772                 return -ENOTSUP;
2773
2774         if (rule->queue >= dev->data->nb_rx_queues)
2775                 return -ENOTSUP;
2776
2777         return ret;
2778 }
2779
2780 static int
2781 ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
2782                         const struct rte_flow_attr *attr,
2783                         const struct rte_flow_action actions[],
2784                         struct ixgbe_rte_flow_rss_conf *rss_conf,
2785                         struct rte_flow_error *error)
2786 {
2787         const struct rte_flow_action *act;
2788         const struct rte_flow_action_rss *rss;
2789         uint16_t n;
2790
2791         /**
2792          * rss only supports forwarding,
2793          * check if the first not void action is RSS.
2794          */
2795         act = next_no_void_action(actions, NULL);
2796         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2797                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2798                 rte_flow_error_set(error, EINVAL,
2799                         RTE_FLOW_ERROR_TYPE_ACTION,
2800                         act, "Not supported action.");
2801                 return -rte_errno;
2802         }
2803
2804         rss = (const struct rte_flow_action_rss *)act->conf;
2805
2806         if (!rss || !rss->queue_num) {
2807                 rte_flow_error_set(error, EINVAL,
2808                                 RTE_FLOW_ERROR_TYPE_ACTION,
2809                                 act,
2810                            "no valid queues");
2811                 return -rte_errno;
2812         }
2813
2814         for (n = 0; n < rss->queue_num; n++) {
2815                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2816                         rte_flow_error_set(error, EINVAL,
2817                                    RTE_FLOW_ERROR_TYPE_ACTION,
2818                                    act,
2819                                    "queue id > max number of queues");
2820                         return -rte_errno;
2821                 }
2822         }
2823
2824         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2825                 return rte_flow_error_set
2826                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2827                          "non-default RSS hash functions are not supported");
2828         if (rss->level)
2829                 return rte_flow_error_set
2830                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2831                          "a nonzero RSS encapsulation level is not supported");
2832         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2833                 return rte_flow_error_set
2834                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2835                          "RSS hash key must be exactly 40 bytes");
2836         if (rss->queue_num > RTE_DIM(rss_conf->queue))
2837                 return rte_flow_error_set
2838                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2839                          "too many queues for RSS context");
2840         if (ixgbe_rss_conf_init(rss_conf, rss))
2841                 return rte_flow_error_set
2842                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2843                          "RSS context initialization failure");
2844
2845         /* check if the next not void item is END */
2846         act = next_no_void_action(actions, act);
2847         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2848                 memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
2849                 rte_flow_error_set(error, EINVAL,
2850                         RTE_FLOW_ERROR_TYPE_ACTION,
2851                         act, "Not supported action.");
2852                 return -rte_errno;
2853         }
2854
2855         /* parse attr */
2856         /* must be input direction */
2857         if (!attr->ingress) {
2858                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2859                 rte_flow_error_set(error, EINVAL,
2860                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2861                                    attr, "Only support ingress.");
2862                 return -rte_errno;
2863         }
2864
2865         /* not supported */
2866         if (attr->egress) {
2867                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2868                 rte_flow_error_set(error, EINVAL,
2869                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2870                                    attr, "Not support egress.");
2871                 return -rte_errno;
2872         }
2873
2874         /* not supported */
2875         if (attr->transfer) {
2876                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2877                 rte_flow_error_set(error, EINVAL,
2878                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2879                                    attr, "No support for transfer.");
2880                 return -rte_errno;
2881         }
2882
2883         if (attr->priority > 0xFFFF) {
2884                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2885                 rte_flow_error_set(error, EINVAL,
2886                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2887                                    attr, "Error priority.");
2888                 return -rte_errno;
2889         }
2890
2891         return 0;
2892 }
2893
2894 /* remove the rss filter */
2895 static void
2896 ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
2897 {
2898         struct ixgbe_filter_info *filter_info =
2899                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2900
2901         if (filter_info->rss_info.conf.queue_num)
2902                 ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2903 }
2904
2905 void
2906 ixgbe_filterlist_init(void)
2907 {
2908         TAILQ_INIT(&filter_ntuple_list);
2909         TAILQ_INIT(&filter_ethertype_list);
2910         TAILQ_INIT(&filter_syn_list);
2911         TAILQ_INIT(&filter_fdir_list);
2912         TAILQ_INIT(&filter_l2_tunnel_list);
2913         TAILQ_INIT(&filter_rss_list);
2914         TAILQ_INIT(&ixgbe_flow_list);
2915 }
2916
2917 void
2918 ixgbe_filterlist_flush(void)
2919 {
2920         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2921         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2922         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2923         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2924         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2925         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2926         struct ixgbe_rss_conf_ele *rss_filter_ptr;
2927
2928         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2929                 TAILQ_REMOVE(&filter_ntuple_list,
2930                                  ntuple_filter_ptr,
2931                                  entries);
2932                 rte_free(ntuple_filter_ptr);
2933         }
2934
2935         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2936                 TAILQ_REMOVE(&filter_ethertype_list,
2937                                  ethertype_filter_ptr,
2938                                  entries);
2939                 rte_free(ethertype_filter_ptr);
2940         }
2941
2942         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2943                 TAILQ_REMOVE(&filter_syn_list,
2944                                  syn_filter_ptr,
2945                                  entries);
2946                 rte_free(syn_filter_ptr);
2947         }
2948
2949         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2950                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2951                                  l2_tn_filter_ptr,
2952                                  entries);
2953                 rte_free(l2_tn_filter_ptr);
2954         }
2955
2956         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2957                 TAILQ_REMOVE(&filter_fdir_list,
2958                                  fdir_rule_ptr,
2959                                  entries);
2960                 rte_free(fdir_rule_ptr);
2961         }
2962
2963         while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2964                 TAILQ_REMOVE(&filter_rss_list,
2965                                  rss_filter_ptr,
2966                                  entries);
2967                 rte_free(rss_filter_ptr);
2968         }
2969
2970         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2971                 TAILQ_REMOVE(&ixgbe_flow_list,
2972                                  ixgbe_flow_mem_ptr,
2973                                  entries);
2974                 rte_free(ixgbe_flow_mem_ptr->flow);
2975                 rte_free(ixgbe_flow_mem_ptr);
2976         }
2977 }
2978
2979 /**
2980  * Create or destroy a flow rule.
2981  * Theorically one rule can match more than one filters.
2982  * We will let it use the filter which it hitt first.
2983  * So, the sequence matters.
2984  */
2985 static struct rte_flow *
2986 ixgbe_flow_create(struct rte_eth_dev *dev,
2987                   const struct rte_flow_attr *attr,
2988                   const struct rte_flow_item pattern[],
2989                   const struct rte_flow_action actions[],
2990                   struct rte_flow_error *error)
2991 {
2992         int ret;
2993         struct rte_eth_ntuple_filter ntuple_filter;
2994         struct rte_eth_ethertype_filter ethertype_filter;
2995         struct rte_eth_syn_filter syn_filter;
2996         struct ixgbe_fdir_rule fdir_rule;
2997         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2998         struct ixgbe_hw_fdir_info *fdir_info =
2999                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3000         struct ixgbe_rte_flow_rss_conf rss_conf;
3001         struct rte_flow *flow = NULL;
3002         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3003         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3004         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3005         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3006         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3007         struct ixgbe_rss_conf_ele *rss_filter_ptr;
3008         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3009         uint8_t first_mask = FALSE;
3010
3011         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
3012         if (!flow) {
3013                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3014                 return (struct rte_flow *)flow;
3015         }
3016         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
3017                         sizeof(struct ixgbe_flow_mem), 0);
3018         if (!ixgbe_flow_mem_ptr) {
3019                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3020                 rte_free(flow);
3021                 return NULL;
3022         }
3023         ixgbe_flow_mem_ptr->flow = flow;
3024         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
3025                                 ixgbe_flow_mem_ptr, entries);
3026
3027         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3028         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3029                         actions, &ntuple_filter, error);
3030
3031 #ifdef RTE_LIBRTE_SECURITY
3032         /* ESP flow not really a flow*/
3033         if (ntuple_filter.proto == IPPROTO_ESP)
3034                 return flow;
3035 #endif
3036
3037         if (!ret) {
3038                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
3039                 if (!ret) {
3040                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
3041                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
3042                         if (!ntuple_filter_ptr) {
3043                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3044                                 goto out;
3045                         }
3046                         rte_memcpy(&ntuple_filter_ptr->filter_info,
3047                                 &ntuple_filter,
3048                                 sizeof(struct rte_eth_ntuple_filter));
3049                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
3050                                 ntuple_filter_ptr, entries);
3051                         flow->rule = ntuple_filter_ptr;
3052                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
3053                         return flow;
3054                 }
3055                 goto out;
3056         }
3057
3058         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3059         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3060                                 actions, &ethertype_filter, error);
3061         if (!ret) {
3062                 ret = ixgbe_add_del_ethertype_filter(dev,
3063                                 &ethertype_filter, TRUE);
3064                 if (!ret) {
3065                         ethertype_filter_ptr = rte_zmalloc(
3066                                 "ixgbe_ethertype_filter",
3067                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
3068                         if (!ethertype_filter_ptr) {
3069                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3070                                 goto out;
3071                         }
3072                         rte_memcpy(&ethertype_filter_ptr->filter_info,
3073                                 &ethertype_filter,
3074                                 sizeof(struct rte_eth_ethertype_filter));
3075                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
3076                                 ethertype_filter_ptr, entries);
3077                         flow->rule = ethertype_filter_ptr;
3078                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3079                         return flow;
3080                 }
3081                 goto out;
3082         }
3083
3084         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3085         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3086                                 actions, &syn_filter, error);
3087         if (!ret) {
3088                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
3089                 if (!ret) {
3090                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
3091                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
3092                         if (!syn_filter_ptr) {
3093                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3094                                 goto out;
3095                         }
3096                         rte_memcpy(&syn_filter_ptr->filter_info,
3097                                 &syn_filter,
3098                                 sizeof(struct rte_eth_syn_filter));
3099                         TAILQ_INSERT_TAIL(&filter_syn_list,
3100                                 syn_filter_ptr,
3101                                 entries);
3102                         flow->rule = syn_filter_ptr;
3103                         flow->filter_type = RTE_ETH_FILTER_SYN;
3104                         return flow;
3105                 }
3106                 goto out;
3107         }
3108
3109         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3110         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3111                                 actions, &fdir_rule, error);
3112         if (!ret) {
3113                 /* A mask cannot be deleted. */
3114                 if (fdir_rule.b_mask) {
3115                         if (!fdir_info->mask_added) {
3116                                 /* It's the first time the mask is set. */
3117                                 rte_memcpy(&fdir_info->mask,
3118                                         &fdir_rule.mask,
3119                                         sizeof(struct ixgbe_hw_fdir_mask));
3120                                 fdir_info->flex_bytes_offset =
3121                                         fdir_rule.flex_bytes_offset;
3122
3123                                 if (fdir_rule.mask.flex_bytes_mask)
3124                                         ixgbe_fdir_set_flexbytes_offset(dev,
3125                                                 fdir_rule.flex_bytes_offset);
3126
3127                                 ret = ixgbe_fdir_set_input_mask(dev);
3128                                 if (ret)
3129                                         goto out;
3130
3131                                 fdir_info->mask_added = TRUE;
3132                                 first_mask = TRUE;
3133                         } else {
3134                                 /**
3135                                  * Only support one global mask,
3136                                  * all the masks should be the same.
3137                                  */
3138                                 ret = memcmp(&fdir_info->mask,
3139                                         &fdir_rule.mask,
3140                                         sizeof(struct ixgbe_hw_fdir_mask));
3141                                 if (ret)
3142                                         goto out;
3143
3144                                 if (fdir_info->flex_bytes_offset !=
3145                                                 fdir_rule.flex_bytes_offset)
3146                                         goto out;
3147                         }
3148                 }
3149
3150                 if (fdir_rule.b_spec) {
3151                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
3152                                         FALSE, FALSE);
3153                         if (!ret) {
3154                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
3155                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
3156                                 if (!fdir_rule_ptr) {
3157                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
3158                                         goto out;
3159                                 }
3160                                 rte_memcpy(&fdir_rule_ptr->filter_info,
3161                                         &fdir_rule,
3162                                         sizeof(struct ixgbe_fdir_rule));
3163                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
3164                                         fdir_rule_ptr, entries);
3165                                 flow->rule = fdir_rule_ptr;
3166                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
3167
3168                                 return flow;
3169                         }
3170
3171                         if (ret) {
3172                                 /**
3173                                  * clean the mask_added flag if fail to
3174                                  * program
3175                                  **/
3176                                 if (first_mask)
3177                                         fdir_info->mask_added = FALSE;
3178                                 goto out;
3179                         }
3180                 }
3181
3182                 goto out;
3183         }
3184
3185         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3186         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3187                                         actions, &l2_tn_filter, error);
3188         if (!ret) {
3189                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
3190                 if (!ret) {
3191                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
3192                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
3193                         if (!l2_tn_filter_ptr) {
3194                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3195                                 goto out;
3196                         }
3197                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
3198                                 &l2_tn_filter,
3199                                 sizeof(struct rte_eth_l2_tunnel_conf));
3200                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3201                                 l2_tn_filter_ptr, entries);
3202                         flow->rule = l2_tn_filter_ptr;
3203                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3204                         return flow;
3205                 }
3206         }
3207
3208         memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3209         ret = ixgbe_parse_rss_filter(dev, attr,
3210                                         actions, &rss_conf, error);
3211         if (!ret) {
3212                 ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE);
3213                 if (!ret) {
3214                         rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter",
3215                                 sizeof(struct ixgbe_rss_conf_ele), 0);
3216                         if (!rss_filter_ptr) {
3217                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3218                                 goto out;
3219                         }
3220                         ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
3221                                             &rss_conf.conf);
3222                         TAILQ_INSERT_TAIL(&filter_rss_list,
3223                                 rss_filter_ptr, entries);
3224                         flow->rule = rss_filter_ptr;
3225                         flow->filter_type = RTE_ETH_FILTER_HASH;
3226                         return flow;
3227                 }
3228         }
3229
3230 out:
3231         TAILQ_REMOVE(&ixgbe_flow_list,
3232                 ixgbe_flow_mem_ptr, entries);
3233         rte_flow_error_set(error, -ret,
3234                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3235                            "Failed to create flow.");
3236         rte_free(ixgbe_flow_mem_ptr);
3237         rte_free(flow);
3238         return NULL;
3239 }
3240
3241 /**
3242  * Check if the flow rule is supported by ixgbe.
3243  * It only checkes the format. Don't guarantee the rule can be programmed into
3244  * the HW. Because there can be no enough room for the rule.
3245  */
3246 static int
3247 ixgbe_flow_validate(struct rte_eth_dev *dev,
3248                 const struct rte_flow_attr *attr,
3249                 const struct rte_flow_item pattern[],
3250                 const struct rte_flow_action actions[],
3251                 struct rte_flow_error *error)
3252 {
3253         struct rte_eth_ntuple_filter ntuple_filter;
3254         struct rte_eth_ethertype_filter ethertype_filter;
3255         struct rte_eth_syn_filter syn_filter;
3256         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3257         struct ixgbe_fdir_rule fdir_rule;
3258         struct ixgbe_rte_flow_rss_conf rss_conf;
3259         int ret;
3260
3261         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3262         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3263                                 actions, &ntuple_filter, error);
3264         if (!ret)
3265                 return 0;
3266
3267         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3268         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3269                                 actions, &ethertype_filter, error);
3270         if (!ret)
3271                 return 0;
3272
3273         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3274         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3275                                 actions, &syn_filter, error);
3276         if (!ret)
3277                 return 0;
3278
3279         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3280         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3281                                 actions, &fdir_rule, error);
3282         if (!ret)
3283                 return 0;
3284
3285         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3286         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3287                                 actions, &l2_tn_filter, error);
3288         if (!ret)
3289                 return 0;
3290
3291         memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3292         ret = ixgbe_parse_rss_filter(dev, attr,
3293                                         actions, &rss_conf, error);
3294
3295         return ret;
3296 }
3297
3298 /* Destroy a flow rule on ixgbe. */
3299 static int
3300 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3301                 struct rte_flow *flow,
3302                 struct rte_flow_error *error)
3303 {
3304         int ret;
3305         struct rte_flow *pmd_flow = flow;
3306         enum rte_filter_type filter_type = pmd_flow->filter_type;
3307         struct rte_eth_ntuple_filter ntuple_filter;
3308         struct rte_eth_ethertype_filter ethertype_filter;
3309         struct rte_eth_syn_filter syn_filter;
3310         struct ixgbe_fdir_rule fdir_rule;
3311         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3312         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3313         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3314         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3315         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3316         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3317         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3318         struct ixgbe_hw_fdir_info *fdir_info =
3319                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3320         struct ixgbe_rss_conf_ele *rss_filter_ptr;
3321
3322         switch (filter_type) {
3323         case RTE_ETH_FILTER_NTUPLE:
3324                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3325                                         pmd_flow->rule;
3326                 rte_memcpy(&ntuple_filter,
3327                         &ntuple_filter_ptr->filter_info,
3328                         sizeof(struct rte_eth_ntuple_filter));
3329                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3330                 if (!ret) {
3331                         TAILQ_REMOVE(&filter_ntuple_list,
3332                         ntuple_filter_ptr, entries);
3333                         rte_free(ntuple_filter_ptr);
3334                 }
3335                 break;
3336         case RTE_ETH_FILTER_ETHERTYPE:
3337                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3338                                         pmd_flow->rule;
3339                 rte_memcpy(&ethertype_filter,
3340                         &ethertype_filter_ptr->filter_info,
3341                         sizeof(struct rte_eth_ethertype_filter));
3342                 ret = ixgbe_add_del_ethertype_filter(dev,
3343                                 &ethertype_filter, FALSE);
3344                 if (!ret) {
3345                         TAILQ_REMOVE(&filter_ethertype_list,
3346                                 ethertype_filter_ptr, entries);
3347                         rte_free(ethertype_filter_ptr);
3348                 }
3349                 break;
3350         case RTE_ETH_FILTER_SYN:
3351                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3352                                 pmd_flow->rule;
3353                 rte_memcpy(&syn_filter,
3354                         &syn_filter_ptr->filter_info,
3355                         sizeof(struct rte_eth_syn_filter));
3356                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3357                 if (!ret) {
3358                         TAILQ_REMOVE(&filter_syn_list,
3359                                 syn_filter_ptr, entries);
3360                         rte_free(syn_filter_ptr);
3361                 }
3362                 break;
3363         case RTE_ETH_FILTER_FDIR:
3364                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3365                 rte_memcpy(&fdir_rule,
3366                         &fdir_rule_ptr->filter_info,
3367                         sizeof(struct ixgbe_fdir_rule));
3368                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3369                 if (!ret) {
3370                         TAILQ_REMOVE(&filter_fdir_list,
3371                                 fdir_rule_ptr, entries);
3372                         rte_free(fdir_rule_ptr);
3373                         if (TAILQ_EMPTY(&filter_fdir_list))
3374                                 fdir_info->mask_added = false;
3375                 }
3376                 break;
3377         case RTE_ETH_FILTER_L2_TUNNEL:
3378                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3379                                 pmd_flow->rule;
3380                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3381                         sizeof(struct rte_eth_l2_tunnel_conf));
3382                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3383                 if (!ret) {
3384                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3385                                 l2_tn_filter_ptr, entries);
3386                         rte_free(l2_tn_filter_ptr);
3387                 }
3388                 break;
3389         case RTE_ETH_FILTER_HASH:
3390                 rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
3391                                 pmd_flow->rule;
3392                 ret = ixgbe_config_rss_filter(dev,
3393                                         &rss_filter_ptr->filter_info, FALSE);
3394                 if (!ret) {
3395                         TAILQ_REMOVE(&filter_rss_list,
3396                                 rss_filter_ptr, entries);
3397                         rte_free(rss_filter_ptr);
3398                 }
3399                 break;
3400         default:
3401                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3402                             filter_type);
3403                 ret = -EINVAL;
3404                 break;
3405         }
3406
3407         if (ret) {
3408                 rte_flow_error_set(error, EINVAL,
3409                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3410                                 NULL, "Failed to destroy flow");
3411                 return ret;
3412         }
3413
3414         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3415                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3416                         TAILQ_REMOVE(&ixgbe_flow_list,
3417                                 ixgbe_flow_mem_ptr, entries);
3418                         rte_free(ixgbe_flow_mem_ptr);
3419                 }
3420         }
3421         rte_free(flow);
3422
3423         return ret;
3424 }
3425
3426 /*  Destroy all flow rules associated with a port on ixgbe. */
3427 static int
3428 ixgbe_flow_flush(struct rte_eth_dev *dev,
3429                 struct rte_flow_error *error)
3430 {
3431         int ret = 0;
3432
3433         ixgbe_clear_all_ntuple_filter(dev);
3434         ixgbe_clear_all_ethertype_filter(dev);
3435         ixgbe_clear_syn_filter(dev);
3436
3437         ret = ixgbe_clear_all_fdir_filter(dev);
3438         if (ret < 0) {
3439                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3440                                         NULL, "Failed to flush rule");
3441                 return ret;
3442         }
3443
3444         ret = ixgbe_clear_all_l2_tn_filter(dev);
3445         if (ret < 0) {
3446                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3447                                         NULL, "Failed to flush rule");
3448                 return ret;
3449         }
3450
3451         ixgbe_clear_rss_filter(dev);
3452
3453         ixgbe_filterlist_flush();
3454
3455         return 0;
3456 }
3457
3458 const struct rte_flow_ops ixgbe_flow_ops = {
3459         .validate = ixgbe_flow_validate,
3460         .create = ixgbe_flow_create,
3461         .destroy = ixgbe_flow_destroy,
3462         .flush = ixgbe_flow_flush,
3463 };