New upstream version 18.02
[deb_dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <netinet/in.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
17
18 #include <rte_interrupts.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_pci.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_eal.h>
26 #include <rte_alarm.h>
27 #include <rte_ether.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_malloc.h>
30 #include <rte_random.h>
31 #include <rte_dev.h>
32 #include <rte_hash_crc.h>
33 #include <rte_flow.h>
34 #include <rte_flow_driver.h>
35
36 #include "ixgbe_logs.h"
37 #include "base/ixgbe_api.h"
38 #include "base/ixgbe_vf.h"
39 #include "base/ixgbe_common.h"
40 #include "ixgbe_ethdev.h"
41 #include "ixgbe_bypass.h"
42 #include "ixgbe_rxtx.h"
43 #include "base/ixgbe_type.h"
44 #include "base/ixgbe_phy.h"
45 #include "rte_pmd_ixgbe.h"
46
47
48 #define IXGBE_MIN_N_TUPLE_PRIO 1
49 #define IXGBE_MAX_N_TUPLE_PRIO 7
50 #define IXGBE_MAX_FLX_SOURCE_OFF 62
51
52 /* ntuple filter list structure */
53 struct ixgbe_ntuple_filter_ele {
54         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
55         struct rte_eth_ntuple_filter filter_info;
56 };
57 /* ethertype filter list structure */
58 struct ixgbe_ethertype_filter_ele {
59         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
60         struct rte_eth_ethertype_filter filter_info;
61 };
62 /* syn filter list structure */
63 struct ixgbe_eth_syn_filter_ele {
64         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
65         struct rte_eth_syn_filter filter_info;
66 };
67 /* fdir filter list structure */
68 struct ixgbe_fdir_rule_ele {
69         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
70         struct ixgbe_fdir_rule filter_info;
71 };
72 /* l2_tunnel filter list structure */
73 struct ixgbe_eth_l2_tunnel_conf_ele {
74         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
75         struct rte_eth_l2_tunnel_conf filter_info;
76 };
77 /* rss filter list structure */
78 struct ixgbe_rss_conf_ele {
79         TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
80         struct ixgbe_rte_flow_rss_conf filter_info;
81 };
82 /* ixgbe_flow memory list structure */
83 struct ixgbe_flow_mem {
84         TAILQ_ENTRY(ixgbe_flow_mem) entries;
85         struct rte_flow *flow;
86 };
87
88 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
89 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
90 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
91 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
92 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
93 TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
94 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
95
96 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
97 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
98 static struct ixgbe_syn_filter_list filter_syn_list;
99 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
100 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
101 static struct ixgbe_rss_filter_list filter_rss_list;
102 static struct ixgbe_flow_mem_list ixgbe_flow_list;
103
104 /**
105  * Endless loop will never happen with below assumption
106  * 1. there is at least one no-void item(END)
107  * 2. cur is before END.
108  */
109 static inline
110 const struct rte_flow_item *next_no_void_pattern(
111                 const struct rte_flow_item pattern[],
112                 const struct rte_flow_item *cur)
113 {
114         const struct rte_flow_item *next =
115                 cur ? cur + 1 : &pattern[0];
116         while (1) {
117                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
118                         return next;
119                 next++;
120         }
121 }
122
123 static inline
124 const struct rte_flow_action *next_no_void_action(
125                 const struct rte_flow_action actions[],
126                 const struct rte_flow_action *cur)
127 {
128         const struct rte_flow_action *next =
129                 cur ? cur + 1 : &actions[0];
130         while (1) {
131                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
132                         return next;
133                 next++;
134         }
135 }
136
137 /**
138  * Please aware there's an asumption for all the parsers.
139  * rte_flow_item is using big endian, rte_flow_attr and
140  * rte_flow_action are using CPU order.
141  * Because the pattern is used to describe the packets,
142  * normally the packets should use network order.
143  */
144
145 /**
146  * Parse the rule to see if it is a n-tuple rule.
147  * And get the n-tuple filter info BTW.
148  * pattern:
149  * The first not void item can be ETH or IPV4.
150  * The second not void item must be IPV4 if the first one is ETH.
151  * The third not void item must be UDP or TCP.
152  * The next not void item must be END.
153  * action:
154  * The first not void action should be QUEUE.
155  * The next not void action should be END.
156  * pattern example:
157  * ITEM         Spec                    Mask
158  * ETH          NULL                    NULL
159  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
160  *              dst_addr 192.167.3.50   0xFFFFFFFF
161  *              next_proto_id   17      0xFF
162  * UDP/TCP/     src_port        80      0xFFFF
163  * SCTP         dst_port        80      0xFFFF
164  * END
165  * other members in mask and spec should set to 0x00.
166  * item->last should be NULL.
167  *
168  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
169  *
170  */
171 static int
172 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
173                          const struct rte_flow_item pattern[],
174                          const struct rte_flow_action actions[],
175                          struct rte_eth_ntuple_filter *filter,
176                          struct rte_flow_error *error)
177 {
178         const struct rte_flow_item *item;
179         const struct rte_flow_action *act;
180         const struct rte_flow_item_ipv4 *ipv4_spec;
181         const struct rte_flow_item_ipv4 *ipv4_mask;
182         const struct rte_flow_item_tcp *tcp_spec;
183         const struct rte_flow_item_tcp *tcp_mask;
184         const struct rte_flow_item_udp *udp_spec;
185         const struct rte_flow_item_udp *udp_mask;
186         const struct rte_flow_item_sctp *sctp_spec;
187         const struct rte_flow_item_sctp *sctp_mask;
188         const struct rte_flow_item_eth *eth_spec;
189         const struct rte_flow_item_eth *eth_mask;
190         const struct rte_flow_item_vlan *vlan_spec;
191         const struct rte_flow_item_vlan *vlan_mask;
192         struct rte_flow_item_eth eth_null;
193         struct rte_flow_item_vlan vlan_null;
194
195         if (!pattern) {
196                 rte_flow_error_set(error,
197                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
198                         NULL, "NULL pattern.");
199                 return -rte_errno;
200         }
201
202         if (!actions) {
203                 rte_flow_error_set(error, EINVAL,
204                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
205                                    NULL, "NULL action.");
206                 return -rte_errno;
207         }
208         if (!attr) {
209                 rte_flow_error_set(error, EINVAL,
210                                    RTE_FLOW_ERROR_TYPE_ATTR,
211                                    NULL, "NULL attribute.");
212                 return -rte_errno;
213         }
214
215         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
216         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
217
218 #ifdef RTE_LIBRTE_SECURITY
219         /**
220          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
221          */
222         act = next_no_void_action(actions, NULL);
223         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
224                 const void *conf = act->conf;
225                 /* check if the next not void item is END */
226                 act = next_no_void_action(actions, act);
227                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
228                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
229                         rte_flow_error_set(error, EINVAL,
230                                 RTE_FLOW_ERROR_TYPE_ACTION,
231                                 act, "Not supported action.");
232                         return -rte_errno;
233                 }
234
235                 /* get the IP pattern*/
236                 item = next_no_void_pattern(pattern, NULL);
237                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
238                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
239                         if (item->last ||
240                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
241                                 rte_flow_error_set(error, EINVAL,
242                                         RTE_FLOW_ERROR_TYPE_ITEM,
243                                         item, "IP pattern missing.");
244                                 return -rte_errno;
245                         }
246                         item = next_no_void_pattern(pattern, item);
247                 }
248
249                 filter->proto = IPPROTO_ESP;
250                 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
251                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
252         }
253 #endif
254
255         /* the first not void item can be MAC or IPv4 */
256         item = next_no_void_pattern(pattern, NULL);
257
258         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
259             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
260                 rte_flow_error_set(error, EINVAL,
261                         RTE_FLOW_ERROR_TYPE_ITEM,
262                         item, "Not supported by ntuple filter");
263                 return -rte_errno;
264         }
265         /* Skip Ethernet */
266         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
267                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
268                 eth_mask = (const struct rte_flow_item_eth *)item->mask;
269                 /*Not supported last point for range*/
270                 if (item->last) {
271                         rte_flow_error_set(error,
272                           EINVAL,
273                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
274                           item, "Not supported last point for range");
275                         return -rte_errno;
276
277                 }
278                 /* if the first item is MAC, the content should be NULL */
279                 if ((item->spec || item->mask) &&
280                         (memcmp(eth_spec, &eth_null,
281                                 sizeof(struct rte_flow_item_eth)) ||
282                          memcmp(eth_mask, &eth_null,
283                                 sizeof(struct rte_flow_item_eth)))) {
284                         rte_flow_error_set(error, EINVAL,
285                                 RTE_FLOW_ERROR_TYPE_ITEM,
286                                 item, "Not supported by ntuple filter");
287                         return -rte_errno;
288                 }
289                 /* check if the next not void item is IPv4 or Vlan */
290                 item = next_no_void_pattern(pattern, item);
291                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
292                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
293                         rte_flow_error_set(error,
294                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
295                           item, "Not supported by ntuple filter");
296                           return -rte_errno;
297                 }
298         }
299
300         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
301                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
302                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
303                 /*Not supported last point for range*/
304                 if (item->last) {
305                         rte_flow_error_set(error,
306                           EINVAL,
307                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
308                           item, "Not supported last point for range");
309                         return -rte_errno;
310                 }
311                 /* the content should be NULL */
312                 if ((item->spec || item->mask) &&
313                         (memcmp(vlan_spec, &vlan_null,
314                                 sizeof(struct rte_flow_item_vlan)) ||
315                          memcmp(vlan_mask, &vlan_null,
316                                 sizeof(struct rte_flow_item_vlan)))) {
317
318                         rte_flow_error_set(error, EINVAL,
319                                 RTE_FLOW_ERROR_TYPE_ITEM,
320                                 item, "Not supported by ntuple filter");
321                         return -rte_errno;
322                 }
323                 /* check if the next not void item is IPv4 */
324                 item = next_no_void_pattern(pattern, item);
325                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
326                         rte_flow_error_set(error,
327                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
328                           item, "Not supported by ntuple filter");
329                         return -rte_errno;
330                 }
331         }
332
333         if (item->mask) {
334                 /* get the IPv4 info */
335                 if (!item->spec || !item->mask) {
336                         rte_flow_error_set(error, EINVAL,
337                                 RTE_FLOW_ERROR_TYPE_ITEM,
338                                 item, "Invalid ntuple mask");
339                         return -rte_errno;
340                 }
341                 /*Not supported last point for range*/
342                 if (item->last) {
343                         rte_flow_error_set(error, EINVAL,
344                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
345                                 item, "Not supported last point for range");
346                         return -rte_errno;
347                 }
348
349                 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
350                 /**
351                  * Only support src & dst addresses, protocol,
352                  * others should be masked.
353                  */
354                 if (ipv4_mask->hdr.version_ihl ||
355                     ipv4_mask->hdr.type_of_service ||
356                     ipv4_mask->hdr.total_length ||
357                     ipv4_mask->hdr.packet_id ||
358                     ipv4_mask->hdr.fragment_offset ||
359                     ipv4_mask->hdr.time_to_live ||
360                     ipv4_mask->hdr.hdr_checksum) {
361                         rte_flow_error_set(error,
362                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
363                                 item, "Not supported by ntuple filter");
364                         return -rte_errno;
365                 }
366
367                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
368                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
369                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
370
371                 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
372                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
373                 filter->src_ip = ipv4_spec->hdr.src_addr;
374                 filter->proto  = ipv4_spec->hdr.next_proto_id;
375         }
376
377         /* check if the next not void item is TCP or UDP */
378         item = next_no_void_pattern(pattern, item);
379         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
380             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
381             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
382             item->type != RTE_FLOW_ITEM_TYPE_END) {
383                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
384                 rte_flow_error_set(error, EINVAL,
385                         RTE_FLOW_ERROR_TYPE_ITEM,
386                         item, "Not supported by ntuple filter");
387                 return -rte_errno;
388         }
389
390         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
391                 (!item->spec && !item->mask)) {
392                 goto action;
393         }
394
395         /* get the TCP/UDP/SCTP info */
396         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
397                 (!item->spec || !item->mask)) {
398                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
399                 rte_flow_error_set(error, EINVAL,
400                         RTE_FLOW_ERROR_TYPE_ITEM,
401                         item, "Invalid ntuple mask");
402                 return -rte_errno;
403         }
404
405         /*Not supported last point for range*/
406         if (item->last) {
407                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408                 rte_flow_error_set(error, EINVAL,
409                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
410                         item, "Not supported last point for range");
411                 return -rte_errno;
412
413         }
414
415         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
416                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
417
418                 /**
419                  * Only support src & dst ports, tcp flags,
420                  * others should be masked.
421                  */
422                 if (tcp_mask->hdr.sent_seq ||
423                     tcp_mask->hdr.recv_ack ||
424                     tcp_mask->hdr.data_off ||
425                     tcp_mask->hdr.rx_win ||
426                     tcp_mask->hdr.cksum ||
427                     tcp_mask->hdr.tcp_urp) {
428                         memset(filter, 0,
429                                 sizeof(struct rte_eth_ntuple_filter));
430                         rte_flow_error_set(error, EINVAL,
431                                 RTE_FLOW_ERROR_TYPE_ITEM,
432                                 item, "Not supported by ntuple filter");
433                         return -rte_errno;
434                 }
435
436                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
437                 filter->src_port_mask  = tcp_mask->hdr.src_port;
438                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
439                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
440                 } else if (!tcp_mask->hdr.tcp_flags) {
441                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
442                 } else {
443                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
444                         rte_flow_error_set(error, EINVAL,
445                                 RTE_FLOW_ERROR_TYPE_ITEM,
446                                 item, "Not supported by ntuple filter");
447                         return -rte_errno;
448                 }
449
450                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
451                 filter->dst_port  = tcp_spec->hdr.dst_port;
452                 filter->src_port  = tcp_spec->hdr.src_port;
453                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
454         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
455                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
456
457                 /**
458                  * Only support src & dst ports,
459                  * others should be masked.
460                  */
461                 if (udp_mask->hdr.dgram_len ||
462                     udp_mask->hdr.dgram_cksum) {
463                         memset(filter, 0,
464                                 sizeof(struct rte_eth_ntuple_filter));
465                         rte_flow_error_set(error, EINVAL,
466                                 RTE_FLOW_ERROR_TYPE_ITEM,
467                                 item, "Not supported by ntuple filter");
468                         return -rte_errno;
469                 }
470
471                 filter->dst_port_mask = udp_mask->hdr.dst_port;
472                 filter->src_port_mask = udp_mask->hdr.src_port;
473
474                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
475                 filter->dst_port = udp_spec->hdr.dst_port;
476                 filter->src_port = udp_spec->hdr.src_port;
477         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
478                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
479
480                 /**
481                  * Only support src & dst ports,
482                  * others should be masked.
483                  */
484                 if (sctp_mask->hdr.tag ||
485                     sctp_mask->hdr.cksum) {
486                         memset(filter, 0,
487                                 sizeof(struct rte_eth_ntuple_filter));
488                         rte_flow_error_set(error, EINVAL,
489                                 RTE_FLOW_ERROR_TYPE_ITEM,
490                                 item, "Not supported by ntuple filter");
491                         return -rte_errno;
492                 }
493
494                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
495                 filter->src_port_mask = sctp_mask->hdr.src_port;
496
497                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
498                 filter->dst_port = sctp_spec->hdr.dst_port;
499                 filter->src_port = sctp_spec->hdr.src_port;
500         } else {
501                 goto action;
502         }
503
504         /* check if the next not void item is END */
505         item = next_no_void_pattern(pattern, item);
506         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
507                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
508                 rte_flow_error_set(error, EINVAL,
509                         RTE_FLOW_ERROR_TYPE_ITEM,
510                         item, "Not supported by ntuple filter");
511                 return -rte_errno;
512         }
513
514 action:
515
516         /**
517          * n-tuple only supports forwarding,
518          * check if the first not void action is QUEUE.
519          */
520         act = next_no_void_action(actions, NULL);
521         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
522                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
523                 rte_flow_error_set(error, EINVAL,
524                         RTE_FLOW_ERROR_TYPE_ACTION,
525                         item, "Not supported action.");
526                 return -rte_errno;
527         }
528         filter->queue =
529                 ((const struct rte_flow_action_queue *)act->conf)->index;
530
531         /* check if the next not void item is END */
532         act = next_no_void_action(actions, act);
533         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
534                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
535                 rte_flow_error_set(error, EINVAL,
536                         RTE_FLOW_ERROR_TYPE_ACTION,
537                         act, "Not supported action.");
538                 return -rte_errno;
539         }
540
541         /* parse attr */
542         /* must be input direction */
543         if (!attr->ingress) {
544                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
545                 rte_flow_error_set(error, EINVAL,
546                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
547                                    attr, "Only support ingress.");
548                 return -rte_errno;
549         }
550
551         /* not supported */
552         if (attr->egress) {
553                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
554                 rte_flow_error_set(error, EINVAL,
555                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
556                                    attr, "Not support egress.");
557                 return -rte_errno;
558         }
559
560         if (attr->priority > 0xFFFF) {
561                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
562                 rte_flow_error_set(error, EINVAL,
563                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
564                                    attr, "Error priority.");
565                 return -rte_errno;
566         }
567         filter->priority = (uint16_t)attr->priority;
568         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
569             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
570             filter->priority = 1;
571
572         return 0;
573 }
574
575 /* a specific function for ixgbe because the flags is specific */
576 static int
577 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
578                           const struct rte_flow_attr *attr,
579                           const struct rte_flow_item pattern[],
580                           const struct rte_flow_action actions[],
581                           struct rte_eth_ntuple_filter *filter,
582                           struct rte_flow_error *error)
583 {
584         int ret;
585         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
586
587         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
588
589         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
590
591         if (ret)
592                 return ret;
593
594 #ifdef RTE_LIBRTE_SECURITY
595         /* ESP flow not really a flow*/
596         if (filter->proto == IPPROTO_ESP)
597                 return 0;
598 #endif
599
600         /* Ixgbe doesn't support tcp flags. */
601         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
602                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
603                 rte_flow_error_set(error, EINVAL,
604                                    RTE_FLOW_ERROR_TYPE_ITEM,
605                                    NULL, "Not supported by ntuple filter");
606                 return -rte_errno;
607         }
608
609         /* Ixgbe doesn't support many priorities. */
610         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
611             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
612                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
613                 rte_flow_error_set(error, EINVAL,
614                         RTE_FLOW_ERROR_TYPE_ITEM,
615                         NULL, "Priority not supported by ntuple filter");
616                 return -rte_errno;
617         }
618
619         if (filter->queue >= dev->data->nb_rx_queues)
620                 return -rte_errno;
621
622         /* fixed value for ixgbe */
623         filter->flags = RTE_5TUPLE_FLAGS;
624         return 0;
625 }
626
627 /**
628  * Parse the rule to see if it is a ethertype rule.
629  * And get the ethertype filter info BTW.
630  * pattern:
631  * The first not void item can be ETH.
632  * The next not void item must be END.
633  * action:
634  * The first not void action should be QUEUE.
635  * The next not void action should be END.
636  * pattern example:
637  * ITEM         Spec                    Mask
638  * ETH          type    0x0807          0xFFFF
639  * END
640  * other members in mask and spec should set to 0x00.
641  * item->last should be NULL.
642  */
643 static int
644 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
645                             const struct rte_flow_item *pattern,
646                             const struct rte_flow_action *actions,
647                             struct rte_eth_ethertype_filter *filter,
648                             struct rte_flow_error *error)
649 {
650         const struct rte_flow_item *item;
651         const struct rte_flow_action *act;
652         const struct rte_flow_item_eth *eth_spec;
653         const struct rte_flow_item_eth *eth_mask;
654         const struct rte_flow_action_queue *act_q;
655
656         if (!pattern) {
657                 rte_flow_error_set(error, EINVAL,
658                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
659                                 NULL, "NULL pattern.");
660                 return -rte_errno;
661         }
662
663         if (!actions) {
664                 rte_flow_error_set(error, EINVAL,
665                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
666                                 NULL, "NULL action.");
667                 return -rte_errno;
668         }
669
670         if (!attr) {
671                 rte_flow_error_set(error, EINVAL,
672                                    RTE_FLOW_ERROR_TYPE_ATTR,
673                                    NULL, "NULL attribute.");
674                 return -rte_errno;
675         }
676
677         item = next_no_void_pattern(pattern, NULL);
678         /* The first non-void item should be MAC. */
679         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
680                 rte_flow_error_set(error, EINVAL,
681                         RTE_FLOW_ERROR_TYPE_ITEM,
682                         item, "Not supported by ethertype filter");
683                 return -rte_errno;
684         }
685
686         /*Not supported last point for range*/
687         if (item->last) {
688                 rte_flow_error_set(error, EINVAL,
689                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
690                         item, "Not supported last point for range");
691                 return -rte_errno;
692         }
693
694         /* Get the MAC info. */
695         if (!item->spec || !item->mask) {
696                 rte_flow_error_set(error, EINVAL,
697                                 RTE_FLOW_ERROR_TYPE_ITEM,
698                                 item, "Not supported by ethertype filter");
699                 return -rte_errno;
700         }
701
702         eth_spec = (const struct rte_flow_item_eth *)item->spec;
703         eth_mask = (const struct rte_flow_item_eth *)item->mask;
704
705         /* Mask bits of source MAC address must be full of 0.
706          * Mask bits of destination MAC address must be full
707          * of 1 or full of 0.
708          */
709         if (!is_zero_ether_addr(&eth_mask->src) ||
710             (!is_zero_ether_addr(&eth_mask->dst) &&
711              !is_broadcast_ether_addr(&eth_mask->dst))) {
712                 rte_flow_error_set(error, EINVAL,
713                                 RTE_FLOW_ERROR_TYPE_ITEM,
714                                 item, "Invalid ether address mask");
715                 return -rte_errno;
716         }
717
718         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
719                 rte_flow_error_set(error, EINVAL,
720                                 RTE_FLOW_ERROR_TYPE_ITEM,
721                                 item, "Invalid ethertype mask");
722                 return -rte_errno;
723         }
724
725         /* If mask bits of destination MAC address
726          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
727          */
728         if (is_broadcast_ether_addr(&eth_mask->dst)) {
729                 filter->mac_addr = eth_spec->dst;
730                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
731         } else {
732                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
733         }
734         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
735
736         /* Check if the next non-void item is END. */
737         item = next_no_void_pattern(pattern, item);
738         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
739                 rte_flow_error_set(error, EINVAL,
740                                 RTE_FLOW_ERROR_TYPE_ITEM,
741                                 item, "Not supported by ethertype filter.");
742                 return -rte_errno;
743         }
744
745         /* Parse action */
746
747         act = next_no_void_action(actions, NULL);
748         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
749             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
750                 rte_flow_error_set(error, EINVAL,
751                                 RTE_FLOW_ERROR_TYPE_ACTION,
752                                 act, "Not supported action.");
753                 return -rte_errno;
754         }
755
756         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
757                 act_q = (const struct rte_flow_action_queue *)act->conf;
758                 filter->queue = act_q->index;
759         } else {
760                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
761         }
762
763         /* Check if the next non-void item is END */
764         act = next_no_void_action(actions, act);
765         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
766                 rte_flow_error_set(error, EINVAL,
767                                 RTE_FLOW_ERROR_TYPE_ACTION,
768                                 act, "Not supported action.");
769                 return -rte_errno;
770         }
771
772         /* Parse attr */
773         /* Must be input direction */
774         if (!attr->ingress) {
775                 rte_flow_error_set(error, EINVAL,
776                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
777                                 attr, "Only support ingress.");
778                 return -rte_errno;
779         }
780
781         /* Not supported */
782         if (attr->egress) {
783                 rte_flow_error_set(error, EINVAL,
784                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
785                                 attr, "Not support egress.");
786                 return -rte_errno;
787         }
788
789         /* Not supported */
790         if (attr->priority) {
791                 rte_flow_error_set(error, EINVAL,
792                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
793                                 attr, "Not support priority.");
794                 return -rte_errno;
795         }
796
797         /* Not supported */
798         if (attr->group) {
799                 rte_flow_error_set(error, EINVAL,
800                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
801                                 attr, "Not support group.");
802                 return -rte_errno;
803         }
804
805         return 0;
806 }
807
808 static int
809 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
810                                  const struct rte_flow_attr *attr,
811                              const struct rte_flow_item pattern[],
812                              const struct rte_flow_action actions[],
813                              struct rte_eth_ethertype_filter *filter,
814                              struct rte_flow_error *error)
815 {
816         int ret;
817         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
818
819         MAC_TYPE_FILTER_SUP(hw->mac.type);
820
821         ret = cons_parse_ethertype_filter(attr, pattern,
822                                         actions, filter, error);
823
824         if (ret)
825                 return ret;
826
827         /* Ixgbe doesn't support MAC address. */
828         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
829                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
830                 rte_flow_error_set(error, EINVAL,
831                         RTE_FLOW_ERROR_TYPE_ITEM,
832                         NULL, "Not supported by ethertype filter");
833                 return -rte_errno;
834         }
835
836         if (filter->queue >= dev->data->nb_rx_queues) {
837                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
838                 rte_flow_error_set(error, EINVAL,
839                         RTE_FLOW_ERROR_TYPE_ITEM,
840                         NULL, "queue index much too big");
841                 return -rte_errno;
842         }
843
844         if (filter->ether_type == ETHER_TYPE_IPv4 ||
845                 filter->ether_type == ETHER_TYPE_IPv6) {
846                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
847                 rte_flow_error_set(error, EINVAL,
848                         RTE_FLOW_ERROR_TYPE_ITEM,
849                         NULL, "IPv4/IPv6 not supported by ethertype filter");
850                 return -rte_errno;
851         }
852
853         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
854                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
855                 rte_flow_error_set(error, EINVAL,
856                         RTE_FLOW_ERROR_TYPE_ITEM,
857                         NULL, "mac compare is unsupported");
858                 return -rte_errno;
859         }
860
861         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
862                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
863                 rte_flow_error_set(error, EINVAL,
864                         RTE_FLOW_ERROR_TYPE_ITEM,
865                         NULL, "drop option is unsupported");
866                 return -rte_errno;
867         }
868
869         return 0;
870 }
871
872 /**
873  * Parse the rule to see if it is a TCP SYN rule.
874  * And get the TCP SYN filter info BTW.
875  * pattern:
876  * The first not void item must be ETH.
877  * The second not void item must be IPV4 or IPV6.
878  * The third not void item must be TCP.
879  * The next not void item must be END.
880  * action:
881  * The first not void action should be QUEUE.
882  * The next not void action should be END.
883  * pattern example:
884  * ITEM         Spec                    Mask
885  * ETH          NULL                    NULL
886  * IPV4/IPV6    NULL                    NULL
887  * TCP          tcp_flags       0x02    0xFF
888  * END
889  * other members in mask and spec should set to 0x00.
890  * item->last should be NULL.
891  */
892 static int
893 cons_parse_syn_filter(const struct rte_flow_attr *attr,
894                                 const struct rte_flow_item pattern[],
895                                 const struct rte_flow_action actions[],
896                                 struct rte_eth_syn_filter *filter,
897                                 struct rte_flow_error *error)
898 {
899         const struct rte_flow_item *item;
900         const struct rte_flow_action *act;
901         const struct rte_flow_item_tcp *tcp_spec;
902         const struct rte_flow_item_tcp *tcp_mask;
903         const struct rte_flow_action_queue *act_q;
904
905         if (!pattern) {
906                 rte_flow_error_set(error, EINVAL,
907                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
908                                 NULL, "NULL pattern.");
909                 return -rte_errno;
910         }
911
912         if (!actions) {
913                 rte_flow_error_set(error, EINVAL,
914                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
915                                 NULL, "NULL action.");
916                 return -rte_errno;
917         }
918
919         if (!attr) {
920                 rte_flow_error_set(error, EINVAL,
921                                    RTE_FLOW_ERROR_TYPE_ATTR,
922                                    NULL, "NULL attribute.");
923                 return -rte_errno;
924         }
925
926
927         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
928         item = next_no_void_pattern(pattern, NULL);
929         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
930             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
931             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
932             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
933                 rte_flow_error_set(error, EINVAL,
934                                 RTE_FLOW_ERROR_TYPE_ITEM,
935                                 item, "Not supported by syn filter");
936                 return -rte_errno;
937         }
938                 /*Not supported last point for range*/
939         if (item->last) {
940                 rte_flow_error_set(error, EINVAL,
941                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
942                         item, "Not supported last point for range");
943                 return -rte_errno;
944         }
945
946         /* Skip Ethernet */
947         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
948                 /* if the item is MAC, the content should be NULL */
949                 if (item->spec || item->mask) {
950                         rte_flow_error_set(error, EINVAL,
951                                 RTE_FLOW_ERROR_TYPE_ITEM,
952                                 item, "Invalid SYN address mask");
953                         return -rte_errno;
954                 }
955
956                 /* check if the next not void item is IPv4 or IPv6 */
957                 item = next_no_void_pattern(pattern, item);
958                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
959                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
960                         rte_flow_error_set(error, EINVAL,
961                                 RTE_FLOW_ERROR_TYPE_ITEM,
962                                 item, "Not supported by syn filter");
963                         return -rte_errno;
964                 }
965         }
966
967         /* Skip IP */
968         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
969             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
970                 /* if the item is IP, the content should be NULL */
971                 if (item->spec || item->mask) {
972                         rte_flow_error_set(error, EINVAL,
973                                 RTE_FLOW_ERROR_TYPE_ITEM,
974                                 item, "Invalid SYN mask");
975                         return -rte_errno;
976                 }
977
978                 /* check if the next not void item is TCP */
979                 item = next_no_void_pattern(pattern, item);
980                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
981                         rte_flow_error_set(error, EINVAL,
982                                 RTE_FLOW_ERROR_TYPE_ITEM,
983                                 item, "Not supported by syn filter");
984                         return -rte_errno;
985                 }
986         }
987
988         /* Get the TCP info. Only support SYN. */
989         if (!item->spec || !item->mask) {
990                 rte_flow_error_set(error, EINVAL,
991                                 RTE_FLOW_ERROR_TYPE_ITEM,
992                                 item, "Invalid SYN mask");
993                 return -rte_errno;
994         }
995         /*Not supported last point for range*/
996         if (item->last) {
997                 rte_flow_error_set(error, EINVAL,
998                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
999                         item, "Not supported last point for range");
1000                 return -rte_errno;
1001         }
1002
1003         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1004         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1005         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
1006             tcp_mask->hdr.src_port ||
1007             tcp_mask->hdr.dst_port ||
1008             tcp_mask->hdr.sent_seq ||
1009             tcp_mask->hdr.recv_ack ||
1010             tcp_mask->hdr.data_off ||
1011             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
1012             tcp_mask->hdr.rx_win ||
1013             tcp_mask->hdr.cksum ||
1014             tcp_mask->hdr.tcp_urp) {
1015                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1016                 rte_flow_error_set(error, EINVAL,
1017                                 RTE_FLOW_ERROR_TYPE_ITEM,
1018                                 item, "Not supported by syn filter");
1019                 return -rte_errno;
1020         }
1021
1022         /* check if the next not void item is END */
1023         item = next_no_void_pattern(pattern, item);
1024         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1025                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1026                 rte_flow_error_set(error, EINVAL,
1027                                 RTE_FLOW_ERROR_TYPE_ITEM,
1028                                 item, "Not supported by syn filter");
1029                 return -rte_errno;
1030         }
1031
1032         /* check if the first not void action is QUEUE. */
1033         act = next_no_void_action(actions, NULL);
1034         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1035                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1036                 rte_flow_error_set(error, EINVAL,
1037                                 RTE_FLOW_ERROR_TYPE_ACTION,
1038                                 act, "Not supported action.");
1039                 return -rte_errno;
1040         }
1041
1042         act_q = (const struct rte_flow_action_queue *)act->conf;
1043         filter->queue = act_q->index;
1044         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1045                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1046                 rte_flow_error_set(error, EINVAL,
1047                                 RTE_FLOW_ERROR_TYPE_ACTION,
1048                                 act, "Not supported action.");
1049                 return -rte_errno;
1050         }
1051
1052         /* check if the next not void item is END */
1053         act = next_no_void_action(actions, act);
1054         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1055                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1056                 rte_flow_error_set(error, EINVAL,
1057                                 RTE_FLOW_ERROR_TYPE_ACTION,
1058                                 act, "Not supported action.");
1059                 return -rte_errno;
1060         }
1061
1062         /* parse attr */
1063         /* must be input direction */
1064         if (!attr->ingress) {
1065                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1066                 rte_flow_error_set(error, EINVAL,
1067                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1068                         attr, "Only support ingress.");
1069                 return -rte_errno;
1070         }
1071
1072         /* not supported */
1073         if (attr->egress) {
1074                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1075                 rte_flow_error_set(error, EINVAL,
1076                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1077                         attr, "Not support egress.");
1078                 return -rte_errno;
1079         }
1080
1081         /* Support 2 priorities, the lowest or highest. */
1082         if (!attr->priority) {
1083                 filter->hig_pri = 0;
1084         } else if (attr->priority == (uint32_t)~0U) {
1085                 filter->hig_pri = 1;
1086         } else {
1087                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1088                 rte_flow_error_set(error, EINVAL,
1089                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1090                         attr, "Not support priority.");
1091                 return -rte_errno;
1092         }
1093
1094         return 0;
1095 }
1096
1097 static int
1098 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1099                                  const struct rte_flow_attr *attr,
1100                              const struct rte_flow_item pattern[],
1101                              const struct rte_flow_action actions[],
1102                              struct rte_eth_syn_filter *filter,
1103                              struct rte_flow_error *error)
1104 {
1105         int ret;
1106         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1107
1108         MAC_TYPE_FILTER_SUP(hw->mac.type);
1109
1110         ret = cons_parse_syn_filter(attr, pattern,
1111                                         actions, filter, error);
1112
1113         if (filter->queue >= dev->data->nb_rx_queues)
1114                 return -rte_errno;
1115
1116         if (ret)
1117                 return ret;
1118
1119         return 0;
1120 }
1121
1122 /**
1123  * Parse the rule to see if it is a L2 tunnel rule.
1124  * And get the L2 tunnel filter info BTW.
1125  * Only support E-tag now.
1126  * pattern:
1127  * The first not void item can be E_TAG.
1128  * The next not void item must be END.
1129  * action:
1130  * The first not void action should be VF or PF.
1131  * The next not void action should be END.
1132  * pattern example:
1133  * ITEM         Spec                    Mask
1134  * E_TAG        grp             0x1     0x3
1135                 e_cid_base      0x309   0xFFF
1136  * END
1137  * other members in mask and spec should set to 0x00.
1138  * item->last should be NULL.
1139  */
1140 static int
1141 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1142                         const struct rte_flow_attr *attr,
1143                         const struct rte_flow_item pattern[],
1144                         const struct rte_flow_action actions[],
1145                         struct rte_eth_l2_tunnel_conf *filter,
1146                         struct rte_flow_error *error)
1147 {
1148         const struct rte_flow_item *item;
1149         const struct rte_flow_item_e_tag *e_tag_spec;
1150         const struct rte_flow_item_e_tag *e_tag_mask;
1151         const struct rte_flow_action *act;
1152         const struct rte_flow_action_vf *act_vf;
1153         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1154
1155         if (!pattern) {
1156                 rte_flow_error_set(error, EINVAL,
1157                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1158                         NULL, "NULL pattern.");
1159                 return -rte_errno;
1160         }
1161
1162         if (!actions) {
1163                 rte_flow_error_set(error, EINVAL,
1164                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1165                                    NULL, "NULL action.");
1166                 return -rte_errno;
1167         }
1168
1169         if (!attr) {
1170                 rte_flow_error_set(error, EINVAL,
1171                                    RTE_FLOW_ERROR_TYPE_ATTR,
1172                                    NULL, "NULL attribute.");
1173                 return -rte_errno;
1174         }
1175
1176         /* The first not void item should be e-tag. */
1177         item = next_no_void_pattern(pattern, NULL);
1178         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1179                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1180                 rte_flow_error_set(error, EINVAL,
1181                         RTE_FLOW_ERROR_TYPE_ITEM,
1182                         item, "Not supported by L2 tunnel filter");
1183                 return -rte_errno;
1184         }
1185
1186         if (!item->spec || !item->mask) {
1187                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1188                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1189                         item, "Not supported by L2 tunnel filter");
1190                 return -rte_errno;
1191         }
1192
1193         /*Not supported last point for range*/
1194         if (item->last) {
1195                 rte_flow_error_set(error, EINVAL,
1196                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1197                         item, "Not supported last point for range");
1198                 return -rte_errno;
1199         }
1200
1201         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1202         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1203
1204         /* Only care about GRP and E cid base. */
1205         if (e_tag_mask->epcp_edei_in_ecid_b ||
1206             e_tag_mask->in_ecid_e ||
1207             e_tag_mask->ecid_e ||
1208             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1209                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1210                 rte_flow_error_set(error, EINVAL,
1211                         RTE_FLOW_ERROR_TYPE_ITEM,
1212                         item, "Not supported by L2 tunnel filter");
1213                 return -rte_errno;
1214         }
1215
1216         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1217         /**
1218          * grp and e_cid_base are bit fields and only use 14 bits.
1219          * e-tag id is taken as little endian by HW.
1220          */
1221         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1222
1223         /* check if the next not void item is END */
1224         item = next_no_void_pattern(pattern, item);
1225         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1226                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1227                 rte_flow_error_set(error, EINVAL,
1228                         RTE_FLOW_ERROR_TYPE_ITEM,
1229                         item, "Not supported by L2 tunnel filter");
1230                 return -rte_errno;
1231         }
1232
1233         /* parse attr */
1234         /* must be input direction */
1235         if (!attr->ingress) {
1236                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1237                 rte_flow_error_set(error, EINVAL,
1238                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1239                         attr, "Only support ingress.");
1240                 return -rte_errno;
1241         }
1242
1243         /* not supported */
1244         if (attr->egress) {
1245                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1246                 rte_flow_error_set(error, EINVAL,
1247                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1248                         attr, "Not support egress.");
1249                 return -rte_errno;
1250         }
1251
1252         /* not supported */
1253         if (attr->priority) {
1254                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1255                 rte_flow_error_set(error, EINVAL,
1256                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1257                         attr, "Not support priority.");
1258                 return -rte_errno;
1259         }
1260
1261         /* check if the first not void action is VF or PF. */
1262         act = next_no_void_action(actions, NULL);
1263         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1264                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1265                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1266                 rte_flow_error_set(error, EINVAL,
1267                         RTE_FLOW_ERROR_TYPE_ACTION,
1268                         act, "Not supported action.");
1269                 return -rte_errno;
1270         }
1271
1272         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1273                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1274                 filter->pool = act_vf->id;
1275         } else {
1276                 filter->pool = pci_dev->max_vfs;
1277         }
1278
1279         /* check if the next not void item is END */
1280         act = next_no_void_action(actions, act);
1281         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1282                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1283                 rte_flow_error_set(error, EINVAL,
1284                         RTE_FLOW_ERROR_TYPE_ACTION,
1285                         act, "Not supported action.");
1286                 return -rte_errno;
1287         }
1288
1289         return 0;
1290 }
1291
1292 static int
1293 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1294                         const struct rte_flow_attr *attr,
1295                         const struct rte_flow_item pattern[],
1296                         const struct rte_flow_action actions[],
1297                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1298                         struct rte_flow_error *error)
1299 {
1300         int ret = 0;
1301         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1302         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1303         uint16_t vf_num;
1304
1305         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1306                                 actions, l2_tn_filter, error);
1307
1308         if (hw->mac.type != ixgbe_mac_X550 &&
1309                 hw->mac.type != ixgbe_mac_X550EM_x &&
1310                 hw->mac.type != ixgbe_mac_X550EM_a) {
1311                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1312                 rte_flow_error_set(error, EINVAL,
1313                         RTE_FLOW_ERROR_TYPE_ITEM,
1314                         NULL, "Not supported by L2 tunnel filter");
1315                 return -rte_errno;
1316         }
1317
1318         vf_num = pci_dev->max_vfs;
1319
1320         if (l2_tn_filter->pool > vf_num)
1321                 return -rte_errno;
1322
1323         return ret;
1324 }
1325
1326 /* Parse to get the attr and action info of flow director rule. */
1327 static int
1328 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1329                           const struct rte_flow_action actions[],
1330                           struct ixgbe_fdir_rule *rule,
1331                           struct rte_flow_error *error)
1332 {
1333         const struct rte_flow_action *act;
1334         const struct rte_flow_action_queue *act_q;
1335         const struct rte_flow_action_mark *mark;
1336
1337         /* parse attr */
1338         /* must be input direction */
1339         if (!attr->ingress) {
1340                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1341                 rte_flow_error_set(error, EINVAL,
1342                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1343                         attr, "Only support ingress.");
1344                 return -rte_errno;
1345         }
1346
1347         /* not supported */
1348         if (attr->egress) {
1349                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1350                 rte_flow_error_set(error, EINVAL,
1351                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1352                         attr, "Not support egress.");
1353                 return -rte_errno;
1354         }
1355
1356         /* not supported */
1357         if (attr->priority) {
1358                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1359                 rte_flow_error_set(error, EINVAL,
1360                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1361                         attr, "Not support priority.");
1362                 return -rte_errno;
1363         }
1364
1365         /* check if the first not void action is QUEUE or DROP. */
1366         act = next_no_void_action(actions, NULL);
1367         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1368             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1369                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1370                 rte_flow_error_set(error, EINVAL,
1371                         RTE_FLOW_ERROR_TYPE_ACTION,
1372                         act, "Not supported action.");
1373                 return -rte_errno;
1374         }
1375
1376         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1377                 act_q = (const struct rte_flow_action_queue *)act->conf;
1378                 rule->queue = act_q->index;
1379         } else { /* drop */
1380                 /* signature mode does not support drop action. */
1381                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1382                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1383                         rte_flow_error_set(error, EINVAL,
1384                                 RTE_FLOW_ERROR_TYPE_ACTION,
1385                                 act, "Not supported action.");
1386                         return -rte_errno;
1387                 }
1388                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1389         }
1390
1391         /* check if the next not void item is MARK */
1392         act = next_no_void_action(actions, act);
1393         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1394                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1395                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1396                 rte_flow_error_set(error, EINVAL,
1397                         RTE_FLOW_ERROR_TYPE_ACTION,
1398                         act, "Not supported action.");
1399                 return -rte_errno;
1400         }
1401
1402         rule->soft_id = 0;
1403
1404         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1405                 mark = (const struct rte_flow_action_mark *)act->conf;
1406                 rule->soft_id = mark->id;
1407                 act = next_no_void_action(actions, act);
1408         }
1409
1410         /* check if the next not void item is END */
1411         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1412                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1413                 rte_flow_error_set(error, EINVAL,
1414                         RTE_FLOW_ERROR_TYPE_ACTION,
1415                         act, "Not supported action.");
1416                 return -rte_errno;
1417         }
1418
1419         return 0;
1420 }
1421
1422 /* search next no void pattern and skip fuzzy */
1423 static inline
1424 const struct rte_flow_item *next_no_fuzzy_pattern(
1425                 const struct rte_flow_item pattern[],
1426                 const struct rte_flow_item *cur)
1427 {
1428         const struct rte_flow_item *next =
1429                 next_no_void_pattern(pattern, cur);
1430         while (1) {
1431                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1432                         return next;
1433                 next = next_no_void_pattern(pattern, next);
1434         }
1435 }
1436
1437 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1438 {
1439         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1440         const struct rte_flow_item *item;
1441         uint32_t sh, lh, mh;
1442         int i = 0;
1443
1444         while (1) {
1445                 item = pattern + i;
1446                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1447                         break;
1448
1449                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1450                         spec =
1451                         (const struct rte_flow_item_fuzzy *)item->spec;
1452                         last =
1453                         (const struct rte_flow_item_fuzzy *)item->last;
1454                         mask =
1455                         (const struct rte_flow_item_fuzzy *)item->mask;
1456
1457                         if (!spec || !mask)
1458                                 return 0;
1459
1460                         sh = spec->thresh;
1461
1462                         if (!last)
1463                                 lh = sh;
1464                         else
1465                                 lh = last->thresh;
1466
1467                         mh = mask->thresh;
1468                         sh = sh & mh;
1469                         lh = lh & mh;
1470
1471                         if (!sh || sh > lh)
1472                                 return 0;
1473
1474                         return 1;
1475                 }
1476
1477                 i++;
1478         }
1479
1480         return 0;
1481 }
1482
1483 /**
1484  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1485  * And get the flow director filter info BTW.
1486  * UDP/TCP/SCTP PATTERN:
1487  * The first not void item can be ETH or IPV4 or IPV6
1488  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1489  * The next not void item could be UDP or TCP or SCTP (optional)
1490  * The next not void item could be RAW (for flexbyte, optional)
1491  * The next not void item must be END.
1492  * A Fuzzy Match pattern can appear at any place before END.
1493  * Fuzzy Match is optional for IPV4 but is required for IPV6
1494  * MAC VLAN PATTERN:
1495  * The first not void item must be ETH.
1496  * The second not void item must be MAC VLAN.
1497  * The next not void item must be END.
1498  * ACTION:
1499  * The first not void action should be QUEUE or DROP.
1500  * The second not void optional action should be MARK,
1501  * mark_id is a uint32_t number.
1502  * The next not void action should be END.
1503  * UDP/TCP/SCTP pattern example:
1504  * ITEM         Spec                    Mask
1505  * ETH          NULL                    NULL
1506  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1507  *              dst_addr 192.167.3.50   0xFFFFFFFF
1508  * UDP/TCP/SCTP src_port        80      0xFFFF
1509  *              dst_port        80      0xFFFF
1510  * FLEX relative        0       0x1
1511  *              search          0       0x1
1512  *              reserved        0       0
1513  *              offset          12      0xFFFFFFFF
1514  *              limit           0       0xFFFF
1515  *              length          2       0xFFFF
1516  *              pattern[0]      0x86    0xFF
1517  *              pattern[1]      0xDD    0xFF
1518  * END
1519  * MAC VLAN pattern example:
1520  * ITEM         Spec                    Mask
1521  * ETH          dst_addr
1522                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1523                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1524  * MAC VLAN     tci     0x2016          0xEFFF
1525  * END
1526  * Other members in mask and spec should set to 0x00.
1527  * Item->last should be NULL.
1528  */
1529 static int
1530 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1531                                const struct rte_flow_attr *attr,
1532                                const struct rte_flow_item pattern[],
1533                                const struct rte_flow_action actions[],
1534                                struct ixgbe_fdir_rule *rule,
1535                                struct rte_flow_error *error)
1536 {
1537         const struct rte_flow_item *item;
1538         const struct rte_flow_item_eth *eth_spec;
1539         const struct rte_flow_item_eth *eth_mask;
1540         const struct rte_flow_item_ipv4 *ipv4_spec;
1541         const struct rte_flow_item_ipv4 *ipv4_mask;
1542         const struct rte_flow_item_ipv6 *ipv6_spec;
1543         const struct rte_flow_item_ipv6 *ipv6_mask;
1544         const struct rte_flow_item_tcp *tcp_spec;
1545         const struct rte_flow_item_tcp *tcp_mask;
1546         const struct rte_flow_item_udp *udp_spec;
1547         const struct rte_flow_item_udp *udp_mask;
1548         const struct rte_flow_item_sctp *sctp_spec;
1549         const struct rte_flow_item_sctp *sctp_mask;
1550         const struct rte_flow_item_vlan *vlan_spec;
1551         const struct rte_flow_item_vlan *vlan_mask;
1552         const struct rte_flow_item_raw *raw_mask;
1553         const struct rte_flow_item_raw *raw_spec;
1554         uint8_t j;
1555
1556         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1557
1558         if (!pattern) {
1559                 rte_flow_error_set(error, EINVAL,
1560                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1561                         NULL, "NULL pattern.");
1562                 return -rte_errno;
1563         }
1564
1565         if (!actions) {
1566                 rte_flow_error_set(error, EINVAL,
1567                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1568                                    NULL, "NULL action.");
1569                 return -rte_errno;
1570         }
1571
1572         if (!attr) {
1573                 rte_flow_error_set(error, EINVAL,
1574                                    RTE_FLOW_ERROR_TYPE_ATTR,
1575                                    NULL, "NULL attribute.");
1576                 return -rte_errno;
1577         }
1578
1579         /**
1580          * Some fields may not be provided. Set spec to 0 and mask to default
1581          * value. So, we need not do anything for the not provided fields later.
1582          */
1583         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1584         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1585         rule->mask.vlan_tci_mask = 0;
1586         rule->mask.flex_bytes_mask = 0;
1587
1588         /**
1589          * The first not void item should be
1590          * MAC or IPv4 or TCP or UDP or SCTP.
1591          */
1592         item = next_no_fuzzy_pattern(pattern, NULL);
1593         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1594             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1595             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1596             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1597             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1598             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1599                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1600                 rte_flow_error_set(error, EINVAL,
1601                         RTE_FLOW_ERROR_TYPE_ITEM,
1602                         item, "Not supported by fdir filter");
1603                 return -rte_errno;
1604         }
1605
1606         if (signature_match(pattern))
1607                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1608         else
1609                 rule->mode = RTE_FDIR_MODE_PERFECT;
1610
1611         /*Not supported last point for range*/
1612         if (item->last) {
1613                 rte_flow_error_set(error, EINVAL,
1614                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1615                         item, "Not supported last point for range");
1616                 return -rte_errno;
1617         }
1618
1619         /* Get the MAC info. */
1620         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1621                 /**
1622                  * Only support vlan and dst MAC address,
1623                  * others should be masked.
1624                  */
1625                 if (item->spec && !item->mask) {
1626                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1627                         rte_flow_error_set(error, EINVAL,
1628                                 RTE_FLOW_ERROR_TYPE_ITEM,
1629                                 item, "Not supported by fdir filter");
1630                         return -rte_errno;
1631                 }
1632
1633                 if (item->spec) {
1634                         rule->b_spec = TRUE;
1635                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1636
1637                         /* Get the dst MAC. */
1638                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1639                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1640                                         eth_spec->dst.addr_bytes[j];
1641                         }
1642                 }
1643
1644
1645                 if (item->mask) {
1646
1647                         rule->b_mask = TRUE;
1648                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1649
1650                         /* Ether type should be masked. */
1651                         if (eth_mask->type ||
1652                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1653                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1654                                 rte_flow_error_set(error, EINVAL,
1655                                         RTE_FLOW_ERROR_TYPE_ITEM,
1656                                         item, "Not supported by fdir filter");
1657                                 return -rte_errno;
1658                         }
1659
1660                         /* If ethernet has meaning, it means MAC VLAN mode. */
1661                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1662
1663                         /**
1664                          * src MAC address must be masked,
1665                          * and don't support dst MAC address mask.
1666                          */
1667                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1668                                 if (eth_mask->src.addr_bytes[j] ||
1669                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1670                                         memset(rule, 0,
1671                                         sizeof(struct ixgbe_fdir_rule));
1672                                         rte_flow_error_set(error, EINVAL,
1673                                         RTE_FLOW_ERROR_TYPE_ITEM,
1674                                         item, "Not supported by fdir filter");
1675                                         return -rte_errno;
1676                                 }
1677                         }
1678
1679                         /* When no VLAN, considered as full mask. */
1680                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1681                 }
1682                 /*** If both spec and mask are item,
1683                  * it means don't care about ETH.
1684                  * Do nothing.
1685                  */
1686
1687                 /**
1688                  * Check if the next not void item is vlan or ipv4.
1689                  * IPv6 is not supported.
1690                  */
1691                 item = next_no_fuzzy_pattern(pattern, item);
1692                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1693                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1694                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1695                                 rte_flow_error_set(error, EINVAL,
1696                                         RTE_FLOW_ERROR_TYPE_ITEM,
1697                                         item, "Not supported by fdir filter");
1698                                 return -rte_errno;
1699                         }
1700                 } else {
1701                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1702                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1703                                 rte_flow_error_set(error, EINVAL,
1704                                         RTE_FLOW_ERROR_TYPE_ITEM,
1705                                         item, "Not supported by fdir filter");
1706                                 return -rte_errno;
1707                         }
1708                 }
1709         }
1710
1711         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1712                 if (!(item->spec && item->mask)) {
1713                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1714                         rte_flow_error_set(error, EINVAL,
1715                                 RTE_FLOW_ERROR_TYPE_ITEM,
1716                                 item, "Not supported by fdir filter");
1717                         return -rte_errno;
1718                 }
1719
1720                 /*Not supported last point for range*/
1721                 if (item->last) {
1722                         rte_flow_error_set(error, EINVAL,
1723                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1724                                 item, "Not supported last point for range");
1725                         return -rte_errno;
1726                 }
1727
1728                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1729                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1730
1731                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1732
1733                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1734                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1735                 /* More than one tags are not supported. */
1736
1737                 /* Next not void item must be END */
1738                 item = next_no_fuzzy_pattern(pattern, item);
1739                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1740                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1741                         rte_flow_error_set(error, EINVAL,
1742                                 RTE_FLOW_ERROR_TYPE_ITEM,
1743                                 item, "Not supported by fdir filter");
1744                         return -rte_errno;
1745                 }
1746         }
1747
1748         /* Get the IPV4 info. */
1749         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1750                 /**
1751                  * Set the flow type even if there's no content
1752                  * as we must have a flow type.
1753                  */
1754                 rule->ixgbe_fdir.formatted.flow_type =
1755                         IXGBE_ATR_FLOW_TYPE_IPV4;
1756                 /*Not supported last point for range*/
1757                 if (item->last) {
1758                         rte_flow_error_set(error, EINVAL,
1759                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1760                                 item, "Not supported last point for range");
1761                         return -rte_errno;
1762                 }
1763                 /**
1764                  * Only care about src & dst addresses,
1765                  * others should be masked.
1766                  */
1767                 if (!item->mask) {
1768                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1769                         rte_flow_error_set(error, EINVAL,
1770                                 RTE_FLOW_ERROR_TYPE_ITEM,
1771                                 item, "Not supported by fdir filter");
1772                         return -rte_errno;
1773                 }
1774                 rule->b_mask = TRUE;
1775                 ipv4_mask =
1776                         (const struct rte_flow_item_ipv4 *)item->mask;
1777                 if (ipv4_mask->hdr.version_ihl ||
1778                     ipv4_mask->hdr.type_of_service ||
1779                     ipv4_mask->hdr.total_length ||
1780                     ipv4_mask->hdr.packet_id ||
1781                     ipv4_mask->hdr.fragment_offset ||
1782                     ipv4_mask->hdr.time_to_live ||
1783                     ipv4_mask->hdr.next_proto_id ||
1784                     ipv4_mask->hdr.hdr_checksum) {
1785                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1786                         rte_flow_error_set(error, EINVAL,
1787                                 RTE_FLOW_ERROR_TYPE_ITEM,
1788                                 item, "Not supported by fdir filter");
1789                         return -rte_errno;
1790                 }
1791                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1792                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1793
1794                 if (item->spec) {
1795                         rule->b_spec = TRUE;
1796                         ipv4_spec =
1797                                 (const struct rte_flow_item_ipv4 *)item->spec;
1798                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1799                                 ipv4_spec->hdr.dst_addr;
1800                         rule->ixgbe_fdir.formatted.src_ip[0] =
1801                                 ipv4_spec->hdr.src_addr;
1802                 }
1803
1804                 /**
1805                  * Check if the next not void item is
1806                  * TCP or UDP or SCTP or END.
1807                  */
1808                 item = next_no_fuzzy_pattern(pattern, item);
1809                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1810                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1811                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1812                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1813                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1814                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1815                         rte_flow_error_set(error, EINVAL,
1816                                 RTE_FLOW_ERROR_TYPE_ITEM,
1817                                 item, "Not supported by fdir filter");
1818                         return -rte_errno;
1819                 }
1820         }
1821
1822         /* Get the IPV6 info. */
1823         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1824                 /**
1825                  * Set the flow type even if there's no content
1826                  * as we must have a flow type.
1827                  */
1828                 rule->ixgbe_fdir.formatted.flow_type =
1829                         IXGBE_ATR_FLOW_TYPE_IPV6;
1830
1831                 /**
1832                  * 1. must signature match
1833                  * 2. not support last
1834                  * 3. mask must not null
1835                  */
1836                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1837                     item->last ||
1838                     !item->mask) {
1839                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1840                         rte_flow_error_set(error, EINVAL,
1841                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1842                                 item, "Not supported last point for range");
1843                         return -rte_errno;
1844                 }
1845
1846                 rule->b_mask = TRUE;
1847                 ipv6_mask =
1848                         (const struct rte_flow_item_ipv6 *)item->mask;
1849                 if (ipv6_mask->hdr.vtc_flow ||
1850                     ipv6_mask->hdr.payload_len ||
1851                     ipv6_mask->hdr.proto ||
1852                     ipv6_mask->hdr.hop_limits) {
1853                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1854                         rte_flow_error_set(error, EINVAL,
1855                                 RTE_FLOW_ERROR_TYPE_ITEM,
1856                                 item, "Not supported by fdir filter");
1857                         return -rte_errno;
1858                 }
1859
1860                 /* check src addr mask */
1861                 for (j = 0; j < 16; j++) {
1862                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1863                                 rule->mask.src_ipv6_mask |= 1 << j;
1864                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1865                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1866                                 rte_flow_error_set(error, EINVAL,
1867                                         RTE_FLOW_ERROR_TYPE_ITEM,
1868                                         item, "Not supported by fdir filter");
1869                                 return -rte_errno;
1870                         }
1871                 }
1872
1873                 /* check dst addr mask */
1874                 for (j = 0; j < 16; j++) {
1875                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1876                                 rule->mask.dst_ipv6_mask |= 1 << j;
1877                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1878                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1879                                 rte_flow_error_set(error, EINVAL,
1880                                         RTE_FLOW_ERROR_TYPE_ITEM,
1881                                         item, "Not supported by fdir filter");
1882                                 return -rte_errno;
1883                         }
1884                 }
1885
1886                 if (item->spec) {
1887                         rule->b_spec = TRUE;
1888                         ipv6_spec =
1889                                 (const struct rte_flow_item_ipv6 *)item->spec;
1890                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1891                                    ipv6_spec->hdr.src_addr, 16);
1892                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1893                                    ipv6_spec->hdr.dst_addr, 16);
1894                 }
1895
1896                 /**
1897                  * Check if the next not void item is
1898                  * TCP or UDP or SCTP or END.
1899                  */
1900                 item = next_no_fuzzy_pattern(pattern, item);
1901                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1902                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1903                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1904                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1905                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1906                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1907                         rte_flow_error_set(error, EINVAL,
1908                                 RTE_FLOW_ERROR_TYPE_ITEM,
1909                                 item, "Not supported by fdir filter");
1910                         return -rte_errno;
1911                 }
1912         }
1913
1914         /* Get the TCP info. */
1915         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1916                 /**
1917                  * Set the flow type even if there's no content
1918                  * as we must have a flow type.
1919                  */
1920                 rule->ixgbe_fdir.formatted.flow_type |=
1921                         IXGBE_ATR_L4TYPE_TCP;
1922                 /*Not supported last point for range*/
1923                 if (item->last) {
1924                         rte_flow_error_set(error, EINVAL,
1925                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1926                                 item, "Not supported last point for range");
1927                         return -rte_errno;
1928                 }
1929                 /**
1930                  * Only care about src & dst ports,
1931                  * others should be masked.
1932                  */
1933                 if (!item->mask) {
1934                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1935                         rte_flow_error_set(error, EINVAL,
1936                                 RTE_FLOW_ERROR_TYPE_ITEM,
1937                                 item, "Not supported by fdir filter");
1938                         return -rte_errno;
1939                 }
1940                 rule->b_mask = TRUE;
1941                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1942                 if (tcp_mask->hdr.sent_seq ||
1943                     tcp_mask->hdr.recv_ack ||
1944                     tcp_mask->hdr.data_off ||
1945                     tcp_mask->hdr.tcp_flags ||
1946                     tcp_mask->hdr.rx_win ||
1947                     tcp_mask->hdr.cksum ||
1948                     tcp_mask->hdr.tcp_urp) {
1949                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1950                         rte_flow_error_set(error, EINVAL,
1951                                 RTE_FLOW_ERROR_TYPE_ITEM,
1952                                 item, "Not supported by fdir filter");
1953                         return -rte_errno;
1954                 }
1955                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1956                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1957
1958                 if (item->spec) {
1959                         rule->b_spec = TRUE;
1960                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1961                         rule->ixgbe_fdir.formatted.src_port =
1962                                 tcp_spec->hdr.src_port;
1963                         rule->ixgbe_fdir.formatted.dst_port =
1964                                 tcp_spec->hdr.dst_port;
1965                 }
1966
1967                 item = next_no_fuzzy_pattern(pattern, item);
1968                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1969                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1970                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1971                         rte_flow_error_set(error, EINVAL,
1972                                 RTE_FLOW_ERROR_TYPE_ITEM,
1973                                 item, "Not supported by fdir filter");
1974                         return -rte_errno;
1975                 }
1976
1977         }
1978
1979         /* Get the UDP info */
1980         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1981                 /**
1982                  * Set the flow type even if there's no content
1983                  * as we must have a flow type.
1984                  */
1985                 rule->ixgbe_fdir.formatted.flow_type |=
1986                         IXGBE_ATR_L4TYPE_UDP;
1987                 /*Not supported last point for range*/
1988                 if (item->last) {
1989                         rte_flow_error_set(error, EINVAL,
1990                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1991                                 item, "Not supported last point for range");
1992                         return -rte_errno;
1993                 }
1994                 /**
1995                  * Only care about src & dst ports,
1996                  * others should be masked.
1997                  */
1998                 if (!item->mask) {
1999                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2000                         rte_flow_error_set(error, EINVAL,
2001                                 RTE_FLOW_ERROR_TYPE_ITEM,
2002                                 item, "Not supported by fdir filter");
2003                         return -rte_errno;
2004                 }
2005                 rule->b_mask = TRUE;
2006                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
2007                 if (udp_mask->hdr.dgram_len ||
2008                     udp_mask->hdr.dgram_cksum) {
2009                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2010                         rte_flow_error_set(error, EINVAL,
2011                                 RTE_FLOW_ERROR_TYPE_ITEM,
2012                                 item, "Not supported by fdir filter");
2013                         return -rte_errno;
2014                 }
2015                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
2016                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
2017
2018                 if (item->spec) {
2019                         rule->b_spec = TRUE;
2020                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
2021                         rule->ixgbe_fdir.formatted.src_port =
2022                                 udp_spec->hdr.src_port;
2023                         rule->ixgbe_fdir.formatted.dst_port =
2024                                 udp_spec->hdr.dst_port;
2025                 }
2026
2027                 item = next_no_fuzzy_pattern(pattern, item);
2028                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2029                     item->type != RTE_FLOW_ITEM_TYPE_END) {
2030                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2031                         rte_flow_error_set(error, EINVAL,
2032                                 RTE_FLOW_ERROR_TYPE_ITEM,
2033                                 item, "Not supported by fdir filter");
2034                         return -rte_errno;
2035                 }
2036
2037         }
2038
2039         /* Get the SCTP info */
2040         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2041                 /**
2042                  * Set the flow type even if there's no content
2043                  * as we must have a flow type.
2044                  */
2045                 rule->ixgbe_fdir.formatted.flow_type |=
2046                         IXGBE_ATR_L4TYPE_SCTP;
2047                 /*Not supported last point for range*/
2048                 if (item->last) {
2049                         rte_flow_error_set(error, EINVAL,
2050                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2051                                 item, "Not supported last point for range");
2052                         return -rte_errno;
2053                 }
2054
2055                 /* only x550 family only support sctp port */
2056                 if (hw->mac.type == ixgbe_mac_X550 ||
2057                     hw->mac.type == ixgbe_mac_X550EM_x ||
2058                     hw->mac.type == ixgbe_mac_X550EM_a) {
2059                         /**
2060                          * Only care about src & dst ports,
2061                          * others should be masked.
2062                          */
2063                         if (!item->mask) {
2064                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2065                                 rte_flow_error_set(error, EINVAL,
2066                                         RTE_FLOW_ERROR_TYPE_ITEM,
2067                                         item, "Not supported by fdir filter");
2068                                 return -rte_errno;
2069                         }
2070                         rule->b_mask = TRUE;
2071                         sctp_mask =
2072                                 (const struct rte_flow_item_sctp *)item->mask;
2073                         if (sctp_mask->hdr.tag ||
2074                                 sctp_mask->hdr.cksum) {
2075                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2076                                 rte_flow_error_set(error, EINVAL,
2077                                         RTE_FLOW_ERROR_TYPE_ITEM,
2078                                         item, "Not supported by fdir filter");
2079                                 return -rte_errno;
2080                         }
2081                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2082                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2083
2084                         if (item->spec) {
2085                                 rule->b_spec = TRUE;
2086                                 sctp_spec =
2087                                 (const struct rte_flow_item_sctp *)item->spec;
2088                                 rule->ixgbe_fdir.formatted.src_port =
2089                                         sctp_spec->hdr.src_port;
2090                                 rule->ixgbe_fdir.formatted.dst_port =
2091                                         sctp_spec->hdr.dst_port;
2092                         }
2093                 /* others even sctp port is not supported */
2094                 } else {
2095                         sctp_mask =
2096                                 (const struct rte_flow_item_sctp *)item->mask;
2097                         if (sctp_mask &&
2098                                 (sctp_mask->hdr.src_port ||
2099                                  sctp_mask->hdr.dst_port ||
2100                                  sctp_mask->hdr.tag ||
2101                                  sctp_mask->hdr.cksum)) {
2102                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2103                                 rte_flow_error_set(error, EINVAL,
2104                                         RTE_FLOW_ERROR_TYPE_ITEM,
2105                                         item, "Not supported by fdir filter");
2106                                 return -rte_errno;
2107                         }
2108                 }
2109
2110                 item = next_no_fuzzy_pattern(pattern, item);
2111                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2112                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2113                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2114                         rte_flow_error_set(error, EINVAL,
2115                                 RTE_FLOW_ERROR_TYPE_ITEM,
2116                                 item, "Not supported by fdir filter");
2117                         return -rte_errno;
2118                 }
2119         }
2120
2121         /* Get the flex byte info */
2122         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2123                 /* Not supported last point for range*/
2124                 if (item->last) {
2125                         rte_flow_error_set(error, EINVAL,
2126                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2127                                 item, "Not supported last point for range");
2128                         return -rte_errno;
2129                 }
2130                 /* mask should not be null */
2131                 if (!item->mask || !item->spec) {
2132                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2133                         rte_flow_error_set(error, EINVAL,
2134                                 RTE_FLOW_ERROR_TYPE_ITEM,
2135                                 item, "Not supported by fdir filter");
2136                         return -rte_errno;
2137                 }
2138
2139                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2140
2141                 /* check mask */
2142                 if (raw_mask->relative != 0x1 ||
2143                     raw_mask->search != 0x1 ||
2144                     raw_mask->reserved != 0x0 ||
2145                     (uint32_t)raw_mask->offset != 0xffffffff ||
2146                     raw_mask->limit != 0xffff ||
2147                     raw_mask->length != 0xffff) {
2148                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2149                         rte_flow_error_set(error, EINVAL,
2150                                 RTE_FLOW_ERROR_TYPE_ITEM,
2151                                 item, "Not supported by fdir filter");
2152                         return -rte_errno;
2153                 }
2154
2155                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2156
2157                 /* check spec */
2158                 if (raw_spec->relative != 0 ||
2159                     raw_spec->search != 0 ||
2160                     raw_spec->reserved != 0 ||
2161                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2162                     raw_spec->offset % 2 ||
2163                     raw_spec->limit != 0 ||
2164                     raw_spec->length != 2 ||
2165                     /* pattern can't be 0xffff */
2166                     (raw_spec->pattern[0] == 0xff &&
2167                      raw_spec->pattern[1] == 0xff)) {
2168                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2169                         rte_flow_error_set(error, EINVAL,
2170                                 RTE_FLOW_ERROR_TYPE_ITEM,
2171                                 item, "Not supported by fdir filter");
2172                         return -rte_errno;
2173                 }
2174
2175                 /* check pattern mask */
2176                 if (raw_mask->pattern[0] != 0xff ||
2177                     raw_mask->pattern[1] != 0xff) {
2178                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2179                         rte_flow_error_set(error, EINVAL,
2180                                 RTE_FLOW_ERROR_TYPE_ITEM,
2181                                 item, "Not supported by fdir filter");
2182                         return -rte_errno;
2183                 }
2184
2185                 rule->mask.flex_bytes_mask = 0xffff;
2186                 rule->ixgbe_fdir.formatted.flex_bytes =
2187                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2188                         raw_spec->pattern[0];
2189                 rule->flex_bytes_offset = raw_spec->offset;
2190         }
2191
2192         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2193                 /* check if the next not void item is END */
2194                 item = next_no_fuzzy_pattern(pattern, item);
2195                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2196                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2197                         rte_flow_error_set(error, EINVAL,
2198                                 RTE_FLOW_ERROR_TYPE_ITEM,
2199                                 item, "Not supported by fdir filter");
2200                         return -rte_errno;
2201                 }
2202         }
2203
2204         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2205 }
2206
2207 #define NVGRE_PROTOCOL 0x6558
2208
2209 /**
2210  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2211  * And get the flow director filter info BTW.
2212  * VxLAN PATTERN:
2213  * The first not void item must be ETH.
2214  * The second not void item must be IPV4/ IPV6.
2215  * The third not void item must be NVGRE.
2216  * The next not void item must be END.
2217  * NVGRE PATTERN:
2218  * The first not void item must be ETH.
2219  * The second not void item must be IPV4/ IPV6.
2220  * The third not void item must be NVGRE.
2221  * The next not void item must be END.
2222  * ACTION:
2223  * The first not void action should be QUEUE or DROP.
2224  * The second not void optional action should be MARK,
2225  * mark_id is a uint32_t number.
2226  * The next not void action should be END.
2227  * VxLAN pattern example:
2228  * ITEM         Spec                    Mask
2229  * ETH          NULL                    NULL
2230  * IPV4/IPV6    NULL                    NULL
2231  * UDP          NULL                    NULL
2232  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2233  * MAC VLAN     tci     0x2016          0xEFFF
2234  * END
2235  * NEGRV pattern example:
2236  * ITEM         Spec                    Mask
2237  * ETH          NULL                    NULL
2238  * IPV4/IPV6    NULL                    NULL
2239  * NVGRE        protocol        0x6558  0xFFFF
2240  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2241  * MAC VLAN     tci     0x2016          0xEFFF
2242  * END
2243  * other members in mask and spec should set to 0x00.
2244  * item->last should be NULL.
2245  */
2246 static int
2247 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2248                                const struct rte_flow_item pattern[],
2249                                const struct rte_flow_action actions[],
2250                                struct ixgbe_fdir_rule *rule,
2251                                struct rte_flow_error *error)
2252 {
2253         const struct rte_flow_item *item;
2254         const struct rte_flow_item_vxlan *vxlan_spec;
2255         const struct rte_flow_item_vxlan *vxlan_mask;
2256         const struct rte_flow_item_nvgre *nvgre_spec;
2257         const struct rte_flow_item_nvgre *nvgre_mask;
2258         const struct rte_flow_item_eth *eth_spec;
2259         const struct rte_flow_item_eth *eth_mask;
2260         const struct rte_flow_item_vlan *vlan_spec;
2261         const struct rte_flow_item_vlan *vlan_mask;
2262         uint32_t j;
2263
2264         if (!pattern) {
2265                 rte_flow_error_set(error, EINVAL,
2266                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2267                                    NULL, "NULL pattern.");
2268                 return -rte_errno;
2269         }
2270
2271         if (!actions) {
2272                 rte_flow_error_set(error, EINVAL,
2273                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2274                                    NULL, "NULL action.");
2275                 return -rte_errno;
2276         }
2277
2278         if (!attr) {
2279                 rte_flow_error_set(error, EINVAL,
2280                                    RTE_FLOW_ERROR_TYPE_ATTR,
2281                                    NULL, "NULL attribute.");
2282                 return -rte_errno;
2283         }
2284
2285         /**
2286          * Some fields may not be provided. Set spec to 0 and mask to default
2287          * value. So, we need not do anything for the not provided fields later.
2288          */
2289         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2290         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2291         rule->mask.vlan_tci_mask = 0;
2292
2293         /**
2294          * The first not void item should be
2295          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2296          */
2297         item = next_no_void_pattern(pattern, NULL);
2298         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2299             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2300             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2301             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2302             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2303             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2304                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2305                 rte_flow_error_set(error, EINVAL,
2306                         RTE_FLOW_ERROR_TYPE_ITEM,
2307                         item, "Not supported by fdir filter");
2308                 return -rte_errno;
2309         }
2310
2311         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2312
2313         /* Skip MAC. */
2314         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2315                 /* Only used to describe the protocol stack. */
2316                 if (item->spec || item->mask) {
2317                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2318                         rte_flow_error_set(error, EINVAL,
2319                                 RTE_FLOW_ERROR_TYPE_ITEM,
2320                                 item, "Not supported by fdir filter");
2321                         return -rte_errno;
2322                 }
2323                 /* Not supported last point for range*/
2324                 if (item->last) {
2325                         rte_flow_error_set(error, EINVAL,
2326                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2327                                 item, "Not supported last point for range");
2328                         return -rte_errno;
2329                 }
2330
2331                 /* Check if the next not void item is IPv4 or IPv6. */
2332                 item = next_no_void_pattern(pattern, item);
2333                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2334                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2335                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2336                         rte_flow_error_set(error, EINVAL,
2337                                 RTE_FLOW_ERROR_TYPE_ITEM,
2338                                 item, "Not supported by fdir filter");
2339                         return -rte_errno;
2340                 }
2341         }
2342
2343         /* Skip IP. */
2344         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2345             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2346                 /* Only used to describe the protocol stack. */
2347                 if (item->spec || item->mask) {
2348                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2349                         rte_flow_error_set(error, EINVAL,
2350                                 RTE_FLOW_ERROR_TYPE_ITEM,
2351                                 item, "Not supported by fdir filter");
2352                         return -rte_errno;
2353                 }
2354                 /*Not supported last point for range*/
2355                 if (item->last) {
2356                         rte_flow_error_set(error, EINVAL,
2357                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2358                                 item, "Not supported last point for range");
2359                         return -rte_errno;
2360                 }
2361
2362                 /* Check if the next not void item is UDP or NVGRE. */
2363                 item = next_no_void_pattern(pattern, item);
2364                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2365                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2366                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2367                         rte_flow_error_set(error, EINVAL,
2368                                 RTE_FLOW_ERROR_TYPE_ITEM,
2369                                 item, "Not supported by fdir filter");
2370                         return -rte_errno;
2371                 }
2372         }
2373
2374         /* Skip UDP. */
2375         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2376                 /* Only used to describe the protocol stack. */
2377                 if (item->spec || item->mask) {
2378                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2379                         rte_flow_error_set(error, EINVAL,
2380                                 RTE_FLOW_ERROR_TYPE_ITEM,
2381                                 item, "Not supported by fdir filter");
2382                         return -rte_errno;
2383                 }
2384                 /*Not supported last point for range*/
2385                 if (item->last) {
2386                         rte_flow_error_set(error, EINVAL,
2387                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2388                                 item, "Not supported last point for range");
2389                         return -rte_errno;
2390                 }
2391
2392                 /* Check if the next not void item is VxLAN. */
2393                 item = next_no_void_pattern(pattern, item);
2394                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2395                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2396                         rte_flow_error_set(error, EINVAL,
2397                                 RTE_FLOW_ERROR_TYPE_ITEM,
2398                                 item, "Not supported by fdir filter");
2399                         return -rte_errno;
2400                 }
2401         }
2402
2403         /* Get the VxLAN info */
2404         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2405                 rule->ixgbe_fdir.formatted.tunnel_type =
2406                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2407
2408                 /* Only care about VNI, others should be masked. */
2409                 if (!item->mask) {
2410                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2411                         rte_flow_error_set(error, EINVAL,
2412                                 RTE_FLOW_ERROR_TYPE_ITEM,
2413                                 item, "Not supported by fdir filter");
2414                         return -rte_errno;
2415                 }
2416                 /*Not supported last point for range*/
2417                 if (item->last) {
2418                         rte_flow_error_set(error, EINVAL,
2419                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2420                                 item, "Not supported last point for range");
2421                         return -rte_errno;
2422                 }
2423                 rule->b_mask = TRUE;
2424
2425                 /* Tunnel type is always meaningful. */
2426                 rule->mask.tunnel_type_mask = 1;
2427
2428                 vxlan_mask =
2429                         (const struct rte_flow_item_vxlan *)item->mask;
2430                 if (vxlan_mask->flags) {
2431                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2432                         rte_flow_error_set(error, EINVAL,
2433                                 RTE_FLOW_ERROR_TYPE_ITEM,
2434                                 item, "Not supported by fdir filter");
2435                         return -rte_errno;
2436                 }
2437                 /* VNI must be totally masked or not. */
2438                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2439                         vxlan_mask->vni[2]) &&
2440                         ((vxlan_mask->vni[0] != 0xFF) ||
2441                         (vxlan_mask->vni[1] != 0xFF) ||
2442                                 (vxlan_mask->vni[2] != 0xFF))) {
2443                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2444                         rte_flow_error_set(error, EINVAL,
2445                                 RTE_FLOW_ERROR_TYPE_ITEM,
2446                                 item, "Not supported by fdir filter");
2447                         return -rte_errno;
2448                 }
2449
2450                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2451                         RTE_DIM(vxlan_mask->vni));
2452
2453                 if (item->spec) {
2454                         rule->b_spec = TRUE;
2455                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2456                                         item->spec;
2457                         rte_memcpy(((uint8_t *)
2458                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2459                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2460                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2461                                 rule->ixgbe_fdir.formatted.tni_vni);
2462                 }
2463         }
2464
2465         /* Get the NVGRE info */
2466         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2467                 rule->ixgbe_fdir.formatted.tunnel_type =
2468                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2469
2470                 /**
2471                  * Only care about flags0, flags1, protocol and TNI,
2472                  * others should be masked.
2473                  */
2474                 if (!item->mask) {
2475                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2476                         rte_flow_error_set(error, EINVAL,
2477                                 RTE_FLOW_ERROR_TYPE_ITEM,
2478                                 item, "Not supported by fdir filter");
2479                         return -rte_errno;
2480                 }
2481                 /*Not supported last point for range*/
2482                 if (item->last) {
2483                         rte_flow_error_set(error, EINVAL,
2484                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2485                                 item, "Not supported last point for range");
2486                         return -rte_errno;
2487                 }
2488                 rule->b_mask = TRUE;
2489
2490                 /* Tunnel type is always meaningful. */
2491                 rule->mask.tunnel_type_mask = 1;
2492
2493                 nvgre_mask =
2494                         (const struct rte_flow_item_nvgre *)item->mask;
2495                 if (nvgre_mask->flow_id) {
2496                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2497                         rte_flow_error_set(error, EINVAL,
2498                                 RTE_FLOW_ERROR_TYPE_ITEM,
2499                                 item, "Not supported by fdir filter");
2500                         return -rte_errno;
2501                 }
2502                 if (nvgre_mask->protocol &&
2503                     nvgre_mask->protocol != 0xFFFF) {
2504                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2505                         rte_flow_error_set(error, EINVAL,
2506                                 RTE_FLOW_ERROR_TYPE_ITEM,
2507                                 item, "Not supported by fdir filter");
2508                         return -rte_errno;
2509                 }
2510                 if (nvgre_mask->c_k_s_rsvd0_ver &&
2511                     nvgre_mask->c_k_s_rsvd0_ver !=
2512                         rte_cpu_to_be_16(0xFFFF)) {
2513                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2514                         rte_flow_error_set(error, EINVAL,
2515                                 RTE_FLOW_ERROR_TYPE_ITEM,
2516                                 item, "Not supported by fdir filter");
2517                         return -rte_errno;
2518                 }
2519                 /* TNI must be totally masked or not. */
2520                 if (nvgre_mask->tni[0] &&
2521                     ((nvgre_mask->tni[0] != 0xFF) ||
2522                     (nvgre_mask->tni[1] != 0xFF) ||
2523                     (nvgre_mask->tni[2] != 0xFF))) {
2524                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2525                         rte_flow_error_set(error, EINVAL,
2526                                 RTE_FLOW_ERROR_TYPE_ITEM,
2527                                 item, "Not supported by fdir filter");
2528                         return -rte_errno;
2529                 }
2530                 /* tni is a 24-bits bit field */
2531                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2532                         RTE_DIM(nvgre_mask->tni));
2533                 rule->mask.tunnel_id_mask <<= 8;
2534
2535                 if (item->spec) {
2536                         rule->b_spec = TRUE;
2537                         nvgre_spec =
2538                                 (const struct rte_flow_item_nvgre *)item->spec;
2539                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2540                             rte_cpu_to_be_16(0x2000) &&
2541                                 nvgre_mask->c_k_s_rsvd0_ver) {
2542                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2543                                 rte_flow_error_set(error, EINVAL,
2544                                         RTE_FLOW_ERROR_TYPE_ITEM,
2545                                         item, "Not supported by fdir filter");
2546                                 return -rte_errno;
2547                         }
2548                         if (nvgre_mask->protocol &&
2549                             nvgre_spec->protocol !=
2550                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2551                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2552                                 rte_flow_error_set(error, EINVAL,
2553                                         RTE_FLOW_ERROR_TYPE_ITEM,
2554                                         item, "Not supported by fdir filter");
2555                                 return -rte_errno;
2556                         }
2557                         /* tni is a 24-bits bit field */
2558                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2559                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2560                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2561                 }
2562         }
2563
2564         /* check if the next not void item is MAC */
2565         item = next_no_void_pattern(pattern, item);
2566         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2567                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2568                 rte_flow_error_set(error, EINVAL,
2569                         RTE_FLOW_ERROR_TYPE_ITEM,
2570                         item, "Not supported by fdir filter");
2571                 return -rte_errno;
2572         }
2573
2574         /**
2575          * Only support vlan and dst MAC address,
2576          * others should be masked.
2577          */
2578
2579         if (!item->mask) {
2580                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2581                 rte_flow_error_set(error, EINVAL,
2582                         RTE_FLOW_ERROR_TYPE_ITEM,
2583                         item, "Not supported by fdir filter");
2584                 return -rte_errno;
2585         }
2586         /*Not supported last point for range*/
2587         if (item->last) {
2588                 rte_flow_error_set(error, EINVAL,
2589                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2590                         item, "Not supported last point for range");
2591                 return -rte_errno;
2592         }
2593         rule->b_mask = TRUE;
2594         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2595
2596         /* Ether type should be masked. */
2597         if (eth_mask->type) {
2598                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2599                 rte_flow_error_set(error, EINVAL,
2600                         RTE_FLOW_ERROR_TYPE_ITEM,
2601                         item, "Not supported by fdir filter");
2602                 return -rte_errno;
2603         }
2604
2605         /* src MAC address should be masked. */
2606         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2607                 if (eth_mask->src.addr_bytes[j]) {
2608                         memset(rule, 0,
2609                                sizeof(struct ixgbe_fdir_rule));
2610                         rte_flow_error_set(error, EINVAL,
2611                                 RTE_FLOW_ERROR_TYPE_ITEM,
2612                                 item, "Not supported by fdir filter");
2613                         return -rte_errno;
2614                 }
2615         }
2616         rule->mask.mac_addr_byte_mask = 0;
2617         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2618                 /* It's a per byte mask. */
2619                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2620                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2621                 } else if (eth_mask->dst.addr_bytes[j]) {
2622                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2623                         rte_flow_error_set(error, EINVAL,
2624                                 RTE_FLOW_ERROR_TYPE_ITEM,
2625                                 item, "Not supported by fdir filter");
2626                         return -rte_errno;
2627                 }
2628         }
2629
2630         /* When no vlan, considered as full mask. */
2631         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2632
2633         if (item->spec) {
2634                 rule->b_spec = TRUE;
2635                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2636
2637                 /* Get the dst MAC. */
2638                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2639                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2640                                 eth_spec->dst.addr_bytes[j];
2641                 }
2642         }
2643
2644         /**
2645          * Check if the next not void item is vlan or ipv4.
2646          * IPv6 is not supported.
2647          */
2648         item = next_no_void_pattern(pattern, item);
2649         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2650                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2651                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2652                 rte_flow_error_set(error, EINVAL,
2653                         RTE_FLOW_ERROR_TYPE_ITEM,
2654                         item, "Not supported by fdir filter");
2655                 return -rte_errno;
2656         }
2657         /*Not supported last point for range*/
2658         if (item->last) {
2659                 rte_flow_error_set(error, EINVAL,
2660                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2661                         item, "Not supported last point for range");
2662                 return -rte_errno;
2663         }
2664
2665         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2666                 if (!(item->spec && item->mask)) {
2667                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2668                         rte_flow_error_set(error, EINVAL,
2669                                 RTE_FLOW_ERROR_TYPE_ITEM,
2670                                 item, "Not supported by fdir filter");
2671                         return -rte_errno;
2672                 }
2673
2674                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2675                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2676
2677                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2678
2679                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2680                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2681                 /* More than one tags are not supported. */
2682
2683                 /* check if the next not void item is END */
2684                 item = next_no_void_pattern(pattern, item);
2685
2686                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2687                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2688                         rte_flow_error_set(error, EINVAL,
2689                                 RTE_FLOW_ERROR_TYPE_ITEM,
2690                                 item, "Not supported by fdir filter");
2691                         return -rte_errno;
2692                 }
2693         }
2694
2695         /**
2696          * If the tags is 0, it means don't care about the VLAN.
2697          * Do nothing.
2698          */
2699
2700         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2701 }
2702
2703 static int
2704 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2705                         const struct rte_flow_attr *attr,
2706                         const struct rte_flow_item pattern[],
2707                         const struct rte_flow_action actions[],
2708                         struct ixgbe_fdir_rule *rule,
2709                         struct rte_flow_error *error)
2710 {
2711         int ret;
2712         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2713         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2714
2715         if (hw->mac.type != ixgbe_mac_82599EB &&
2716                 hw->mac.type != ixgbe_mac_X540 &&
2717                 hw->mac.type != ixgbe_mac_X550 &&
2718                 hw->mac.type != ixgbe_mac_X550EM_x &&
2719                 hw->mac.type != ixgbe_mac_X550EM_a)
2720                 return -ENOTSUP;
2721
2722         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2723                                         actions, rule, error);
2724
2725         if (!ret)
2726                 goto step_next;
2727
2728         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2729                                         actions, rule, error);
2730
2731         if (ret)
2732                 return ret;
2733
2734 step_next:
2735
2736         if (hw->mac.type == ixgbe_mac_82599EB &&
2737                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2738                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2739                 rule->ixgbe_fdir.formatted.dst_port != 0))
2740                 return -ENOTSUP;
2741
2742         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2743             fdir_mode != rule->mode)
2744                 return -ENOTSUP;
2745
2746         if (rule->queue >= dev->data->nb_rx_queues)
2747                 return -ENOTSUP;
2748
2749         return ret;
2750 }
2751
2752 static int
2753 ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
2754                         const struct rte_flow_attr *attr,
2755                         const struct rte_flow_action actions[],
2756                         struct ixgbe_rte_flow_rss_conf *rss_conf,
2757                         struct rte_flow_error *error)
2758 {
2759         const struct rte_flow_action *act;
2760         const struct rte_flow_action_rss *rss;
2761         uint16_t n;
2762
2763         /**
2764          * rss only supports forwarding,
2765          * check if the first not void action is RSS.
2766          */
2767         act = next_no_void_action(actions, NULL);
2768         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2769                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2770                 rte_flow_error_set(error, EINVAL,
2771                         RTE_FLOW_ERROR_TYPE_ACTION,
2772                         act, "Not supported action.");
2773                 return -rte_errno;
2774         }
2775
2776         rss = (const struct rte_flow_action_rss *)act->conf;
2777
2778         if (!rss || !rss->num) {
2779                 rte_flow_error_set(error, EINVAL,
2780                                 RTE_FLOW_ERROR_TYPE_ACTION,
2781                                 act,
2782                            "no valid queues");
2783                 return -rte_errno;
2784         }
2785
2786         for (n = 0; n < rss->num; n++) {
2787                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2788                         rte_flow_error_set(error, EINVAL,
2789                                    RTE_FLOW_ERROR_TYPE_ACTION,
2790                                    act,
2791                                    "queue id > max number of queues");
2792                         return -rte_errno;
2793                 }
2794         }
2795         if (rss->rss_conf)
2796                 rss_conf->rss_conf = *rss->rss_conf;
2797         else
2798                 rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL;
2799
2800         for (n = 0; n < rss->num; ++n)
2801                 rss_conf->queue[n] = rss->queue[n];
2802         rss_conf->num = rss->num;
2803
2804         /* check if the next not void item is END */
2805         act = next_no_void_action(actions, act);
2806         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2807                 memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
2808                 rte_flow_error_set(error, EINVAL,
2809                         RTE_FLOW_ERROR_TYPE_ACTION,
2810                         act, "Not supported action.");
2811                 return -rte_errno;
2812         }
2813
2814         /* parse attr */
2815         /* must be input direction */
2816         if (!attr->ingress) {
2817                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2818                 rte_flow_error_set(error, EINVAL,
2819                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2820                                    attr, "Only support ingress.");
2821                 return -rte_errno;
2822         }
2823
2824         /* not supported */
2825         if (attr->egress) {
2826                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2827                 rte_flow_error_set(error, EINVAL,
2828                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2829                                    attr, "Not support egress.");
2830                 return -rte_errno;
2831         }
2832
2833         if (attr->priority > 0xFFFF) {
2834                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2835                 rte_flow_error_set(error, EINVAL,
2836                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2837                                    attr, "Error priority.");
2838                 return -rte_errno;
2839         }
2840
2841         return 0;
2842 }
2843
2844 /* remove the rss filter */
2845 static void
2846 ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
2847 {
2848         struct ixgbe_filter_info *filter_info =
2849                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2850
2851         if (filter_info->rss_info.num)
2852                 ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2853 }
2854
2855 void
2856 ixgbe_filterlist_init(void)
2857 {
2858         TAILQ_INIT(&filter_ntuple_list);
2859         TAILQ_INIT(&filter_ethertype_list);
2860         TAILQ_INIT(&filter_syn_list);
2861         TAILQ_INIT(&filter_fdir_list);
2862         TAILQ_INIT(&filter_l2_tunnel_list);
2863         TAILQ_INIT(&filter_rss_list);
2864         TAILQ_INIT(&ixgbe_flow_list);
2865 }
2866
2867 void
2868 ixgbe_filterlist_flush(void)
2869 {
2870         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2871         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2872         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2873         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2874         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2875         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2876         struct ixgbe_rss_conf_ele *rss_filter_ptr;
2877
2878         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2879                 TAILQ_REMOVE(&filter_ntuple_list,
2880                                  ntuple_filter_ptr,
2881                                  entries);
2882                 rte_free(ntuple_filter_ptr);
2883         }
2884
2885         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2886                 TAILQ_REMOVE(&filter_ethertype_list,
2887                                  ethertype_filter_ptr,
2888                                  entries);
2889                 rte_free(ethertype_filter_ptr);
2890         }
2891
2892         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2893                 TAILQ_REMOVE(&filter_syn_list,
2894                                  syn_filter_ptr,
2895                                  entries);
2896                 rte_free(syn_filter_ptr);
2897         }
2898
2899         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2900                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2901                                  l2_tn_filter_ptr,
2902                                  entries);
2903                 rte_free(l2_tn_filter_ptr);
2904         }
2905
2906         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2907                 TAILQ_REMOVE(&filter_fdir_list,
2908                                  fdir_rule_ptr,
2909                                  entries);
2910                 rte_free(fdir_rule_ptr);
2911         }
2912
2913         while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2914                 TAILQ_REMOVE(&filter_rss_list,
2915                                  rss_filter_ptr,
2916                                  entries);
2917                 rte_free(rss_filter_ptr);
2918         }
2919
2920         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2921                 TAILQ_REMOVE(&ixgbe_flow_list,
2922                                  ixgbe_flow_mem_ptr,
2923                                  entries);
2924                 rte_free(ixgbe_flow_mem_ptr->flow);
2925                 rte_free(ixgbe_flow_mem_ptr);
2926         }
2927 }
2928
2929 /**
2930  * Create or destroy a flow rule.
2931  * Theorically one rule can match more than one filters.
2932  * We will let it use the filter which it hitt first.
2933  * So, the sequence matters.
2934  */
2935 static struct rte_flow *
2936 ixgbe_flow_create(struct rte_eth_dev *dev,
2937                   const struct rte_flow_attr *attr,
2938                   const struct rte_flow_item pattern[],
2939                   const struct rte_flow_action actions[],
2940                   struct rte_flow_error *error)
2941 {
2942         int ret;
2943         struct rte_eth_ntuple_filter ntuple_filter;
2944         struct rte_eth_ethertype_filter ethertype_filter;
2945         struct rte_eth_syn_filter syn_filter;
2946         struct ixgbe_fdir_rule fdir_rule;
2947         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2948         struct ixgbe_hw_fdir_info *fdir_info =
2949                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2950         struct ixgbe_rte_flow_rss_conf rss_conf;
2951         struct rte_flow *flow = NULL;
2952         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2953         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2954         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2955         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2956         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2957         struct ixgbe_rss_conf_ele *rss_filter_ptr;
2958         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2959         uint8_t first_mask = FALSE;
2960
2961         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2962         if (!flow) {
2963                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2964                 return (struct rte_flow *)flow;
2965         }
2966         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2967                         sizeof(struct ixgbe_flow_mem), 0);
2968         if (!ixgbe_flow_mem_ptr) {
2969                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2970                 rte_free(flow);
2971                 return NULL;
2972         }
2973         ixgbe_flow_mem_ptr->flow = flow;
2974         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2975                                 ixgbe_flow_mem_ptr, entries);
2976
2977         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2978         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2979                         actions, &ntuple_filter, error);
2980
2981 #ifdef RTE_LIBRTE_SECURITY
2982         /* ESP flow not really a flow*/
2983         if (ntuple_filter.proto == IPPROTO_ESP)
2984                 return flow;
2985 #endif
2986
2987         if (!ret) {
2988                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2989                 if (!ret) {
2990                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2991                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2992                         if (!ntuple_filter_ptr) {
2993                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2994                                 goto out;
2995                         }
2996                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2997                                 &ntuple_filter,
2998                                 sizeof(struct rte_eth_ntuple_filter));
2999                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
3000                                 ntuple_filter_ptr, entries);
3001                         flow->rule = ntuple_filter_ptr;
3002                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
3003                         return flow;
3004                 }
3005                 goto out;
3006         }
3007
3008         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3009         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3010                                 actions, &ethertype_filter, error);
3011         if (!ret) {
3012                 ret = ixgbe_add_del_ethertype_filter(dev,
3013                                 &ethertype_filter, TRUE);
3014                 if (!ret) {
3015                         ethertype_filter_ptr = rte_zmalloc(
3016                                 "ixgbe_ethertype_filter",
3017                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
3018                         if (!ethertype_filter_ptr) {
3019                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3020                                 goto out;
3021                         }
3022                         rte_memcpy(&ethertype_filter_ptr->filter_info,
3023                                 &ethertype_filter,
3024                                 sizeof(struct rte_eth_ethertype_filter));
3025                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
3026                                 ethertype_filter_ptr, entries);
3027                         flow->rule = ethertype_filter_ptr;
3028                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3029                         return flow;
3030                 }
3031                 goto out;
3032         }
3033
3034         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3035         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3036                                 actions, &syn_filter, error);
3037         if (!ret) {
3038                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
3039                 if (!ret) {
3040                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
3041                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
3042                         if (!syn_filter_ptr) {
3043                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3044                                 goto out;
3045                         }
3046                         rte_memcpy(&syn_filter_ptr->filter_info,
3047                                 &syn_filter,
3048                                 sizeof(struct rte_eth_syn_filter));
3049                         TAILQ_INSERT_TAIL(&filter_syn_list,
3050                                 syn_filter_ptr,
3051                                 entries);
3052                         flow->rule = syn_filter_ptr;
3053                         flow->filter_type = RTE_ETH_FILTER_SYN;
3054                         return flow;
3055                 }
3056                 goto out;
3057         }
3058
3059         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3060         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3061                                 actions, &fdir_rule, error);
3062         if (!ret) {
3063                 /* A mask cannot be deleted. */
3064                 if (fdir_rule.b_mask) {
3065                         if (!fdir_info->mask_added) {
3066                                 /* It's the first time the mask is set. */
3067                                 rte_memcpy(&fdir_info->mask,
3068                                         &fdir_rule.mask,
3069                                         sizeof(struct ixgbe_hw_fdir_mask));
3070                                 fdir_info->flex_bytes_offset =
3071                                         fdir_rule.flex_bytes_offset;
3072
3073                                 if (fdir_rule.mask.flex_bytes_mask)
3074                                         ixgbe_fdir_set_flexbytes_offset(dev,
3075                                                 fdir_rule.flex_bytes_offset);
3076
3077                                 ret = ixgbe_fdir_set_input_mask(dev);
3078                                 if (ret)
3079                                         goto out;
3080
3081                                 fdir_info->mask_added = TRUE;
3082                                 first_mask = TRUE;
3083                         } else {
3084                                 /**
3085                                  * Only support one global mask,
3086                                  * all the masks should be the same.
3087                                  */
3088                                 ret = memcmp(&fdir_info->mask,
3089                                         &fdir_rule.mask,
3090                                         sizeof(struct ixgbe_hw_fdir_mask));
3091                                 if (ret)
3092                                         goto out;
3093
3094                                 if (fdir_info->flex_bytes_offset !=
3095                                                 fdir_rule.flex_bytes_offset)
3096                                         goto out;
3097                         }
3098                 }
3099
3100                 if (fdir_rule.b_spec) {
3101                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
3102                                         FALSE, FALSE);
3103                         if (!ret) {
3104                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
3105                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
3106                                 if (!fdir_rule_ptr) {
3107                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
3108                                         goto out;
3109                                 }
3110                                 rte_memcpy(&fdir_rule_ptr->filter_info,
3111                                         &fdir_rule,
3112                                         sizeof(struct ixgbe_fdir_rule));
3113                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
3114                                         fdir_rule_ptr, entries);
3115                                 flow->rule = fdir_rule_ptr;
3116                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
3117
3118                                 return flow;
3119                         }
3120
3121                         if (ret) {
3122                                 /**
3123                                  * clean the mask_added flag if fail to
3124                                  * program
3125                                  **/
3126                                 if (first_mask)
3127                                         fdir_info->mask_added = FALSE;
3128                                 goto out;
3129                         }
3130                 }
3131
3132                 goto out;
3133         }
3134
3135         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3136         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3137                                         actions, &l2_tn_filter, error);
3138         if (!ret) {
3139                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
3140                 if (!ret) {
3141                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
3142                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
3143                         if (!l2_tn_filter_ptr) {
3144                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3145                                 goto out;
3146                         }
3147                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
3148                                 &l2_tn_filter,
3149                                 sizeof(struct rte_eth_l2_tunnel_conf));
3150                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3151                                 l2_tn_filter_ptr, entries);
3152                         flow->rule = l2_tn_filter_ptr;
3153                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3154                         return flow;
3155                 }
3156         }
3157
3158         memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3159         ret = ixgbe_parse_rss_filter(dev, attr,
3160                                         actions, &rss_conf, error);
3161         if (!ret) {
3162                 ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE);
3163                 if (!ret) {
3164                         rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter",
3165                                 sizeof(struct ixgbe_rss_conf_ele), 0);
3166                         if (!rss_filter_ptr) {
3167                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3168                                 goto out;
3169                         }
3170                         rte_memcpy(&rss_filter_ptr->filter_info,
3171                                 &rss_conf,
3172                                 sizeof(struct ixgbe_rte_flow_rss_conf));
3173                         TAILQ_INSERT_TAIL(&filter_rss_list,
3174                                 rss_filter_ptr, entries);
3175                         flow->rule = rss_filter_ptr;
3176                         flow->filter_type = RTE_ETH_FILTER_HASH;
3177                         return flow;
3178                 }
3179         }
3180
3181 out:
3182         TAILQ_REMOVE(&ixgbe_flow_list,
3183                 ixgbe_flow_mem_ptr, entries);
3184         rte_flow_error_set(error, -ret,
3185                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3186                            "Failed to create flow.");
3187         rte_free(ixgbe_flow_mem_ptr);
3188         rte_free(flow);
3189         return NULL;
3190 }
3191
3192 /**
3193  * Check if the flow rule is supported by ixgbe.
3194  * It only checkes the format. Don't guarantee the rule can be programmed into
3195  * the HW. Because there can be no enough room for the rule.
3196  */
3197 static int
3198 ixgbe_flow_validate(struct rte_eth_dev *dev,
3199                 const struct rte_flow_attr *attr,
3200                 const struct rte_flow_item pattern[],
3201                 const struct rte_flow_action actions[],
3202                 struct rte_flow_error *error)
3203 {
3204         struct rte_eth_ntuple_filter ntuple_filter;
3205         struct rte_eth_ethertype_filter ethertype_filter;
3206         struct rte_eth_syn_filter syn_filter;
3207         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3208         struct ixgbe_fdir_rule fdir_rule;
3209         struct ixgbe_rte_flow_rss_conf rss_conf;
3210         int ret;
3211
3212         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3213         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3214                                 actions, &ntuple_filter, error);
3215         if (!ret)
3216                 return 0;
3217
3218         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3219         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3220                                 actions, &ethertype_filter, error);
3221         if (!ret)
3222                 return 0;
3223
3224         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3225         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3226                                 actions, &syn_filter, error);
3227         if (!ret)
3228                 return 0;
3229
3230         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3231         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3232                                 actions, &fdir_rule, error);
3233         if (!ret)
3234                 return 0;
3235
3236         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3237         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3238                                 actions, &l2_tn_filter, error);
3239         if (!ret)
3240                 return 0;
3241
3242         memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3243         ret = ixgbe_parse_rss_filter(dev, attr,
3244                                         actions, &rss_conf, error);
3245
3246         return ret;
3247 }
3248
3249 /* Destroy a flow rule on ixgbe. */
3250 static int
3251 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3252                 struct rte_flow *flow,
3253                 struct rte_flow_error *error)
3254 {
3255         int ret;
3256         struct rte_flow *pmd_flow = flow;
3257         enum rte_filter_type filter_type = pmd_flow->filter_type;
3258         struct rte_eth_ntuple_filter ntuple_filter;
3259         struct rte_eth_ethertype_filter ethertype_filter;
3260         struct rte_eth_syn_filter syn_filter;
3261         struct ixgbe_fdir_rule fdir_rule;
3262         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3263         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3264         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3265         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3266         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3267         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3268         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3269         struct ixgbe_hw_fdir_info *fdir_info =
3270                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3271         struct ixgbe_rss_conf_ele *rss_filter_ptr;
3272
3273         switch (filter_type) {
3274         case RTE_ETH_FILTER_NTUPLE:
3275                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3276                                         pmd_flow->rule;
3277                 rte_memcpy(&ntuple_filter,
3278                         &ntuple_filter_ptr->filter_info,
3279                         sizeof(struct rte_eth_ntuple_filter));
3280                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3281                 if (!ret) {
3282                         TAILQ_REMOVE(&filter_ntuple_list,
3283                         ntuple_filter_ptr, entries);
3284                         rte_free(ntuple_filter_ptr);
3285                 }
3286                 break;
3287         case RTE_ETH_FILTER_ETHERTYPE:
3288                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3289                                         pmd_flow->rule;
3290                 rte_memcpy(&ethertype_filter,
3291                         &ethertype_filter_ptr->filter_info,
3292                         sizeof(struct rte_eth_ethertype_filter));
3293                 ret = ixgbe_add_del_ethertype_filter(dev,
3294                                 &ethertype_filter, FALSE);
3295                 if (!ret) {
3296                         TAILQ_REMOVE(&filter_ethertype_list,
3297                                 ethertype_filter_ptr, entries);
3298                         rte_free(ethertype_filter_ptr);
3299                 }
3300                 break;
3301         case RTE_ETH_FILTER_SYN:
3302                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3303                                 pmd_flow->rule;
3304                 rte_memcpy(&syn_filter,
3305                         &syn_filter_ptr->filter_info,
3306                         sizeof(struct rte_eth_syn_filter));
3307                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3308                 if (!ret) {
3309                         TAILQ_REMOVE(&filter_syn_list,
3310                                 syn_filter_ptr, entries);
3311                         rte_free(syn_filter_ptr);
3312                 }
3313                 break;
3314         case RTE_ETH_FILTER_FDIR:
3315                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3316                 rte_memcpy(&fdir_rule,
3317                         &fdir_rule_ptr->filter_info,
3318                         sizeof(struct ixgbe_fdir_rule));
3319                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3320                 if (!ret) {
3321                         TAILQ_REMOVE(&filter_fdir_list,
3322                                 fdir_rule_ptr, entries);
3323                         rte_free(fdir_rule_ptr);
3324                         if (TAILQ_EMPTY(&filter_fdir_list))
3325                                 fdir_info->mask_added = false;
3326                 }
3327                 break;
3328         case RTE_ETH_FILTER_L2_TUNNEL:
3329                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3330                                 pmd_flow->rule;
3331                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3332                         sizeof(struct rte_eth_l2_tunnel_conf));
3333                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3334                 if (!ret) {
3335                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3336                                 l2_tn_filter_ptr, entries);
3337                         rte_free(l2_tn_filter_ptr);
3338                 }
3339                 break;
3340         case RTE_ETH_FILTER_HASH:
3341                 rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
3342                                 pmd_flow->rule;
3343                 ret = ixgbe_config_rss_filter(dev,
3344                                         &rss_filter_ptr->filter_info, FALSE);
3345                 if (!ret) {
3346                         TAILQ_REMOVE(&filter_rss_list,
3347                                 rss_filter_ptr, entries);
3348                         rte_free(rss_filter_ptr);
3349                 }
3350                 break;
3351         default:
3352                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3353                             filter_type);
3354                 ret = -EINVAL;
3355                 break;
3356         }
3357
3358         if (ret) {
3359                 rte_flow_error_set(error, EINVAL,
3360                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3361                                 NULL, "Failed to destroy flow");
3362                 return ret;
3363         }
3364
3365         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3366                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3367                         TAILQ_REMOVE(&ixgbe_flow_list,
3368                                 ixgbe_flow_mem_ptr, entries);
3369                         rte_free(ixgbe_flow_mem_ptr);
3370                 }
3371         }
3372         rte_free(flow);
3373
3374         return ret;
3375 }
3376
3377 /*  Destroy all flow rules associated with a port on ixgbe. */
3378 static int
3379 ixgbe_flow_flush(struct rte_eth_dev *dev,
3380                 struct rte_flow_error *error)
3381 {
3382         int ret = 0;
3383
3384         ixgbe_clear_all_ntuple_filter(dev);
3385         ixgbe_clear_all_ethertype_filter(dev);
3386         ixgbe_clear_syn_filter(dev);
3387
3388         ret = ixgbe_clear_all_fdir_filter(dev);
3389         if (ret < 0) {
3390                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3391                                         NULL, "Failed to flush rule");
3392                 return ret;
3393         }
3394
3395         ret = ixgbe_clear_all_l2_tn_filter(dev);
3396         if (ret < 0) {
3397                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3398                                         NULL, "Failed to flush rule");
3399                 return ret;
3400         }
3401
3402         ixgbe_clear_rss_filter(dev);
3403
3404         ixgbe_filterlist_flush();
3405
3406         return 0;
3407 }
3408
3409 const struct rte_flow_ops ixgbe_flow_ops = {
3410         .validate = ixgbe_flow_validate,
3411         .create = ixgbe_flow_create,
3412         .destroy = ixgbe_flow_destroy,
3413         .flush = ixgbe_flow_flush,
3414 };