New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <netinet/in.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
17
18 #include <rte_interrupts.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_pci.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_eal.h>
26 #include <rte_alarm.h>
27 #include <rte_ether.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_malloc.h>
30 #include <rte_random.h>
31 #include <rte_dev.h>
32 #include <rte_hash_crc.h>
33 #include <rte_flow.h>
34 #include <rte_flow_driver.h>
35
36 #include "ixgbe_logs.h"
37 #include "base/ixgbe_api.h"
38 #include "base/ixgbe_vf.h"
39 #include "base/ixgbe_common.h"
40 #include "ixgbe_ethdev.h"
41 #include "ixgbe_bypass.h"
42 #include "ixgbe_rxtx.h"
43 #include "base/ixgbe_type.h"
44 #include "base/ixgbe_phy.h"
45 #include "rte_pmd_ixgbe.h"
46
47
48 #define IXGBE_MIN_N_TUPLE_PRIO 1
49 #define IXGBE_MAX_N_TUPLE_PRIO 7
50 #define IXGBE_MAX_FLX_SOURCE_OFF 62
51
52 /* ntuple filter list structure */
53 struct ixgbe_ntuple_filter_ele {
54         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
55         struct rte_eth_ntuple_filter filter_info;
56 };
57 /* ethertype filter list structure */
58 struct ixgbe_ethertype_filter_ele {
59         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
60         struct rte_eth_ethertype_filter filter_info;
61 };
62 /* syn filter list structure */
63 struct ixgbe_eth_syn_filter_ele {
64         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
65         struct rte_eth_syn_filter filter_info;
66 };
67 /* fdir filter list structure */
68 struct ixgbe_fdir_rule_ele {
69         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
70         struct ixgbe_fdir_rule filter_info;
71 };
72 /* l2_tunnel filter list structure */
73 struct ixgbe_eth_l2_tunnel_conf_ele {
74         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
75         struct rte_eth_l2_tunnel_conf filter_info;
76 };
77 /* rss filter list structure */
78 struct ixgbe_rss_conf_ele {
79         TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
80         struct ixgbe_rte_flow_rss_conf filter_info;
81 };
82 /* ixgbe_flow memory list structure */
83 struct ixgbe_flow_mem {
84         TAILQ_ENTRY(ixgbe_flow_mem) entries;
85         struct rte_flow *flow;
86 };
87
88 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
89 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
90 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
91 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
92 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
93 TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
94 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
95
96 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
97 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
98 static struct ixgbe_syn_filter_list filter_syn_list;
99 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
100 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
101 static struct ixgbe_rss_filter_list filter_rss_list;
102 static struct ixgbe_flow_mem_list ixgbe_flow_list;
103
104 /**
105  * Endless loop will never happen with below assumption
106  * 1. there is at least one no-void item(END)
107  * 2. cur is before END.
108  */
109 static inline
110 const struct rte_flow_item *next_no_void_pattern(
111                 const struct rte_flow_item pattern[],
112                 const struct rte_flow_item *cur)
113 {
114         const struct rte_flow_item *next =
115                 cur ? cur + 1 : &pattern[0];
116         while (1) {
117                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
118                         return next;
119                 next++;
120         }
121 }
122
123 static inline
124 const struct rte_flow_action *next_no_void_action(
125                 const struct rte_flow_action actions[],
126                 const struct rte_flow_action *cur)
127 {
128         const struct rte_flow_action *next =
129                 cur ? cur + 1 : &actions[0];
130         while (1) {
131                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
132                         return next;
133                 next++;
134         }
135 }
136
137 /**
138  * Please aware there's an asumption for all the parsers.
139  * rte_flow_item is using big endian, rte_flow_attr and
140  * rte_flow_action are using CPU order.
141  * Because the pattern is used to describe the packets,
142  * normally the packets should use network order.
143  */
144
145 /**
146  * Parse the rule to see if it is a n-tuple rule.
147  * And get the n-tuple filter info BTW.
148  * pattern:
149  * The first not void item can be ETH or IPV4.
150  * The second not void item must be IPV4 if the first one is ETH.
151  * The third not void item must be UDP or TCP.
152  * The next not void item must be END.
153  * action:
154  * The first not void action should be QUEUE.
155  * The next not void action should be END.
156  * pattern example:
157  * ITEM         Spec                    Mask
158  * ETH          NULL                    NULL
159  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
160  *              dst_addr 192.167.3.50   0xFFFFFFFF
161  *              next_proto_id   17      0xFF
162  * UDP/TCP/     src_port        80      0xFFFF
163  * SCTP         dst_port        80      0xFFFF
164  * END
165  * other members in mask and spec should set to 0x00.
166  * item->last should be NULL.
167  *
168  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
169  *
170  */
171 static int
172 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
173                          const struct rte_flow_item pattern[],
174                          const struct rte_flow_action actions[],
175                          struct rte_eth_ntuple_filter *filter,
176                          struct rte_flow_error *error)
177 {
178         const struct rte_flow_item *item;
179         const struct rte_flow_action *act;
180         const struct rte_flow_item_ipv4 *ipv4_spec;
181         const struct rte_flow_item_ipv4 *ipv4_mask;
182         const struct rte_flow_item_tcp *tcp_spec;
183         const struct rte_flow_item_tcp *tcp_mask;
184         const struct rte_flow_item_udp *udp_spec;
185         const struct rte_flow_item_udp *udp_mask;
186         const struct rte_flow_item_sctp *sctp_spec;
187         const struct rte_flow_item_sctp *sctp_mask;
188         const struct rte_flow_item_eth *eth_spec;
189         const struct rte_flow_item_eth *eth_mask;
190         const struct rte_flow_item_vlan *vlan_spec;
191         const struct rte_flow_item_vlan *vlan_mask;
192         struct rte_flow_item_eth eth_null;
193         struct rte_flow_item_vlan vlan_null;
194
195         if (!pattern) {
196                 rte_flow_error_set(error,
197                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
198                         NULL, "NULL pattern.");
199                 return -rte_errno;
200         }
201
202         if (!actions) {
203                 rte_flow_error_set(error, EINVAL,
204                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
205                                    NULL, "NULL action.");
206                 return -rte_errno;
207         }
208         if (!attr) {
209                 rte_flow_error_set(error, EINVAL,
210                                    RTE_FLOW_ERROR_TYPE_ATTR,
211                                    NULL, "NULL attribute.");
212                 return -rte_errno;
213         }
214
215         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
216         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
217
218 #ifdef RTE_LIBRTE_SECURITY
219         /**
220          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
221          */
222         act = next_no_void_action(actions, NULL);
223         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
224                 const void *conf = act->conf;
225                 /* check if the next not void item is END */
226                 act = next_no_void_action(actions, act);
227                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
228                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
229                         rte_flow_error_set(error, EINVAL,
230                                 RTE_FLOW_ERROR_TYPE_ACTION,
231                                 act, "Not supported action.");
232                         return -rte_errno;
233                 }
234
235                 /* get the IP pattern*/
236                 item = next_no_void_pattern(pattern, NULL);
237                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
238                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
239                         if (item->last ||
240                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
241                                 rte_flow_error_set(error, EINVAL,
242                                         RTE_FLOW_ERROR_TYPE_ITEM,
243                                         item, "IP pattern missing.");
244                                 return -rte_errno;
245                         }
246                         item = next_no_void_pattern(pattern, item);
247                 }
248
249                 filter->proto = IPPROTO_ESP;
250                 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
251                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
252         }
253 #endif
254
255         /* the first not void item can be MAC or IPv4 */
256         item = next_no_void_pattern(pattern, NULL);
257
258         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
259             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
260                 rte_flow_error_set(error, EINVAL,
261                         RTE_FLOW_ERROR_TYPE_ITEM,
262                         item, "Not supported by ntuple filter");
263                 return -rte_errno;
264         }
265         /* Skip Ethernet */
266         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
267                 eth_spec = item->spec;
268                 eth_mask = item->mask;
269                 /*Not supported last point for range*/
270                 if (item->last) {
271                         rte_flow_error_set(error,
272                           EINVAL,
273                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
274                           item, "Not supported last point for range");
275                         return -rte_errno;
276
277                 }
278                 /* if the first item is MAC, the content should be NULL */
279                 if ((item->spec || item->mask) &&
280                         (memcmp(eth_spec, &eth_null,
281                                 sizeof(struct rte_flow_item_eth)) ||
282                          memcmp(eth_mask, &eth_null,
283                                 sizeof(struct rte_flow_item_eth)))) {
284                         rte_flow_error_set(error, EINVAL,
285                                 RTE_FLOW_ERROR_TYPE_ITEM,
286                                 item, "Not supported by ntuple filter");
287                         return -rte_errno;
288                 }
289                 /* check if the next not void item is IPv4 or Vlan */
290                 item = next_no_void_pattern(pattern, item);
291                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
292                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
293                         rte_flow_error_set(error,
294                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
295                           item, "Not supported by ntuple filter");
296                           return -rte_errno;
297                 }
298         }
299
300         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
301                 vlan_spec = item->spec;
302                 vlan_mask = item->mask;
303                 /*Not supported last point for range*/
304                 if (item->last) {
305                         rte_flow_error_set(error,
306                           EINVAL,
307                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
308                           item, "Not supported last point for range");
309                         return -rte_errno;
310                 }
311                 /* the content should be NULL */
312                 if ((item->spec || item->mask) &&
313                         (memcmp(vlan_spec, &vlan_null,
314                                 sizeof(struct rte_flow_item_vlan)) ||
315                          memcmp(vlan_mask, &vlan_null,
316                                 sizeof(struct rte_flow_item_vlan)))) {
317
318                         rte_flow_error_set(error, EINVAL,
319                                 RTE_FLOW_ERROR_TYPE_ITEM,
320                                 item, "Not supported by ntuple filter");
321                         return -rte_errno;
322                 }
323                 /* check if the next not void item is IPv4 */
324                 item = next_no_void_pattern(pattern, item);
325                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
326                         rte_flow_error_set(error,
327                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
328                           item, "Not supported by ntuple filter");
329                         return -rte_errno;
330                 }
331         }
332
333         if (item->mask) {
334                 /* get the IPv4 info */
335                 if (!item->spec || !item->mask) {
336                         rte_flow_error_set(error, EINVAL,
337                                 RTE_FLOW_ERROR_TYPE_ITEM,
338                                 item, "Invalid ntuple mask");
339                         return -rte_errno;
340                 }
341                 /*Not supported last point for range*/
342                 if (item->last) {
343                         rte_flow_error_set(error, EINVAL,
344                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
345                                 item, "Not supported last point for range");
346                         return -rte_errno;
347                 }
348
349                 ipv4_mask = item->mask;
350                 /**
351                  * Only support src & dst addresses, protocol,
352                  * others should be masked.
353                  */
354                 if (ipv4_mask->hdr.version_ihl ||
355                     ipv4_mask->hdr.type_of_service ||
356                     ipv4_mask->hdr.total_length ||
357                     ipv4_mask->hdr.packet_id ||
358                     ipv4_mask->hdr.fragment_offset ||
359                     ipv4_mask->hdr.time_to_live ||
360                     ipv4_mask->hdr.hdr_checksum) {
361                         rte_flow_error_set(error,
362                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
363                                 item, "Not supported by ntuple filter");
364                         return -rte_errno;
365                 }
366                 if ((ipv4_mask->hdr.src_addr != 0 &&
367                         ipv4_mask->hdr.src_addr != UINT32_MAX) ||
368                         (ipv4_mask->hdr.dst_addr != 0 &&
369                         ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
370                         (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
371                         ipv4_mask->hdr.next_proto_id != 0)) {
372                         rte_flow_error_set(error,
373                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
374                                 item, "Not supported by ntuple filter");
375                         return -rte_errno;
376                 }
377
378                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
379                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
380                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
381
382                 ipv4_spec = item->spec;
383                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
384                 filter->src_ip = ipv4_spec->hdr.src_addr;
385                 filter->proto  = ipv4_spec->hdr.next_proto_id;
386         }
387
388         /* check if the next not void item is TCP or UDP */
389         item = next_no_void_pattern(pattern, item);
390         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
391             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
392             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
393             item->type != RTE_FLOW_ITEM_TYPE_END) {
394                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
395                 rte_flow_error_set(error, EINVAL,
396                         RTE_FLOW_ERROR_TYPE_ITEM,
397                         item, "Not supported by ntuple filter");
398                 return -rte_errno;
399         }
400
401         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
402                 (!item->spec && !item->mask)) {
403                 goto action;
404         }
405
406         /* get the TCP/UDP/SCTP info */
407         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
408                 (!item->spec || !item->mask)) {
409                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
410                 rte_flow_error_set(error, EINVAL,
411                         RTE_FLOW_ERROR_TYPE_ITEM,
412                         item, "Invalid ntuple mask");
413                 return -rte_errno;
414         }
415
416         /*Not supported last point for range*/
417         if (item->last) {
418                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
419                 rte_flow_error_set(error, EINVAL,
420                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
421                         item, "Not supported last point for range");
422                 return -rte_errno;
423
424         }
425
426         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
427                 tcp_mask = item->mask;
428
429                 /**
430                  * Only support src & dst ports, tcp flags,
431                  * others should be masked.
432                  */
433                 if (tcp_mask->hdr.sent_seq ||
434                     tcp_mask->hdr.recv_ack ||
435                     tcp_mask->hdr.data_off ||
436                     tcp_mask->hdr.rx_win ||
437                     tcp_mask->hdr.cksum ||
438                     tcp_mask->hdr.tcp_urp) {
439                         memset(filter, 0,
440                                 sizeof(struct rte_eth_ntuple_filter));
441                         rte_flow_error_set(error, EINVAL,
442                                 RTE_FLOW_ERROR_TYPE_ITEM,
443                                 item, "Not supported by ntuple filter");
444                         return -rte_errno;
445                 }
446                 if ((tcp_mask->hdr.src_port != 0 &&
447                         tcp_mask->hdr.src_port != UINT16_MAX) ||
448                         (tcp_mask->hdr.dst_port != 0 &&
449                         tcp_mask->hdr.dst_port != UINT16_MAX)) {
450                         rte_flow_error_set(error,
451                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
452                                 item, "Not supported by ntuple filter");
453                         return -rte_errno;
454                 }
455
456                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
457                 filter->src_port_mask  = tcp_mask->hdr.src_port;
458                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
459                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
460                 } else if (!tcp_mask->hdr.tcp_flags) {
461                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
462                 } else {
463                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
464                         rte_flow_error_set(error, EINVAL,
465                                 RTE_FLOW_ERROR_TYPE_ITEM,
466                                 item, "Not supported by ntuple filter");
467                         return -rte_errno;
468                 }
469
470                 tcp_spec = item->spec;
471                 filter->dst_port  = tcp_spec->hdr.dst_port;
472                 filter->src_port  = tcp_spec->hdr.src_port;
473                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
474         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
475                 udp_mask = item->mask;
476
477                 /**
478                  * Only support src & dst ports,
479                  * others should be masked.
480                  */
481                 if (udp_mask->hdr.dgram_len ||
482                     udp_mask->hdr.dgram_cksum) {
483                         memset(filter, 0,
484                                 sizeof(struct rte_eth_ntuple_filter));
485                         rte_flow_error_set(error, EINVAL,
486                                 RTE_FLOW_ERROR_TYPE_ITEM,
487                                 item, "Not supported by ntuple filter");
488                         return -rte_errno;
489                 }
490                 if ((udp_mask->hdr.src_port != 0 &&
491                         udp_mask->hdr.src_port != UINT16_MAX) ||
492                         (udp_mask->hdr.dst_port != 0 &&
493                         udp_mask->hdr.dst_port != UINT16_MAX)) {
494                         rte_flow_error_set(error,
495                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
496                                 item, "Not supported by ntuple filter");
497                         return -rte_errno;
498                 }
499
500                 filter->dst_port_mask = udp_mask->hdr.dst_port;
501                 filter->src_port_mask = udp_mask->hdr.src_port;
502
503                 udp_spec = item->spec;
504                 filter->dst_port = udp_spec->hdr.dst_port;
505                 filter->src_port = udp_spec->hdr.src_port;
506         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
507                 sctp_mask = item->mask;
508
509                 /**
510                  * Only support src & dst ports,
511                  * others should be masked.
512                  */
513                 if (sctp_mask->hdr.tag ||
514                     sctp_mask->hdr.cksum) {
515                         memset(filter, 0,
516                                 sizeof(struct rte_eth_ntuple_filter));
517                         rte_flow_error_set(error, EINVAL,
518                                 RTE_FLOW_ERROR_TYPE_ITEM,
519                                 item, "Not supported by ntuple filter");
520                         return -rte_errno;
521                 }
522
523                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
524                 filter->src_port_mask = sctp_mask->hdr.src_port;
525
526                 sctp_spec = item->spec;
527                 filter->dst_port = sctp_spec->hdr.dst_port;
528                 filter->src_port = sctp_spec->hdr.src_port;
529         } else {
530                 goto action;
531         }
532
533         /* check if the next not void item is END */
534         item = next_no_void_pattern(pattern, item);
535         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
536                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
537                 rte_flow_error_set(error, EINVAL,
538                         RTE_FLOW_ERROR_TYPE_ITEM,
539                         item, "Not supported by ntuple filter");
540                 return -rte_errno;
541         }
542
543 action:
544
545         /**
546          * n-tuple only supports forwarding,
547          * check if the first not void action is QUEUE.
548          */
549         act = next_no_void_action(actions, NULL);
550         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
551                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
552                 rte_flow_error_set(error, EINVAL,
553                         RTE_FLOW_ERROR_TYPE_ACTION,
554                         item, "Not supported action.");
555                 return -rte_errno;
556         }
557         filter->queue =
558                 ((const struct rte_flow_action_queue *)act->conf)->index;
559
560         /* check if the next not void item is END */
561         act = next_no_void_action(actions, act);
562         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
563                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
564                 rte_flow_error_set(error, EINVAL,
565                         RTE_FLOW_ERROR_TYPE_ACTION,
566                         act, "Not supported action.");
567                 return -rte_errno;
568         }
569
570         /* parse attr */
571         /* must be input direction */
572         if (!attr->ingress) {
573                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
574                 rte_flow_error_set(error, EINVAL,
575                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
576                                    attr, "Only support ingress.");
577                 return -rte_errno;
578         }
579
580         /* not supported */
581         if (attr->egress) {
582                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
583                 rte_flow_error_set(error, EINVAL,
584                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
585                                    attr, "Not support egress.");
586                 return -rte_errno;
587         }
588
589         /* not supported */
590         if (attr->transfer) {
591                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
592                 rte_flow_error_set(error, EINVAL,
593                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
594                                    attr, "No support for transfer.");
595                 return -rte_errno;
596         }
597
598         if (attr->priority > 0xFFFF) {
599                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
600                 rte_flow_error_set(error, EINVAL,
601                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
602                                    attr, "Error priority.");
603                 return -rte_errno;
604         }
605         filter->priority = (uint16_t)attr->priority;
606         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
607             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
608             filter->priority = 1;
609
610         return 0;
611 }
612
613 /* a specific function for ixgbe because the flags is specific */
614 static int
615 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
616                           const struct rte_flow_attr *attr,
617                           const struct rte_flow_item pattern[],
618                           const struct rte_flow_action actions[],
619                           struct rte_eth_ntuple_filter *filter,
620                           struct rte_flow_error *error)
621 {
622         int ret;
623         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
624
625         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
626
627         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
628
629         if (ret)
630                 return ret;
631
632 #ifdef RTE_LIBRTE_SECURITY
633         /* ESP flow not really a flow*/
634         if (filter->proto == IPPROTO_ESP)
635                 return 0;
636 #endif
637
638         /* Ixgbe doesn't support tcp flags. */
639         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
640                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
641                 rte_flow_error_set(error, EINVAL,
642                                    RTE_FLOW_ERROR_TYPE_ITEM,
643                                    NULL, "Not supported by ntuple filter");
644                 return -rte_errno;
645         }
646
647         /* Ixgbe doesn't support many priorities. */
648         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
649             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
650                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
651                 rte_flow_error_set(error, EINVAL,
652                         RTE_FLOW_ERROR_TYPE_ITEM,
653                         NULL, "Priority not supported by ntuple filter");
654                 return -rte_errno;
655         }
656
657         if (filter->queue >= dev->data->nb_rx_queues)
658                 return -rte_errno;
659
660         /* fixed value for ixgbe */
661         filter->flags = RTE_5TUPLE_FLAGS;
662         return 0;
663 }
664
665 /**
666  * Parse the rule to see if it is a ethertype rule.
667  * And get the ethertype filter info BTW.
668  * pattern:
669  * The first not void item can be ETH.
670  * The next not void item must be END.
671  * action:
672  * The first not void action should be QUEUE.
673  * The next not void action should be END.
674  * pattern example:
675  * ITEM         Spec                    Mask
676  * ETH          type    0x0807          0xFFFF
677  * END
678  * other members in mask and spec should set to 0x00.
679  * item->last should be NULL.
680  */
681 static int
682 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
683                             const struct rte_flow_item *pattern,
684                             const struct rte_flow_action *actions,
685                             struct rte_eth_ethertype_filter *filter,
686                             struct rte_flow_error *error)
687 {
688         const struct rte_flow_item *item;
689         const struct rte_flow_action *act;
690         const struct rte_flow_item_eth *eth_spec;
691         const struct rte_flow_item_eth *eth_mask;
692         const struct rte_flow_action_queue *act_q;
693
694         if (!pattern) {
695                 rte_flow_error_set(error, EINVAL,
696                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
697                                 NULL, "NULL pattern.");
698                 return -rte_errno;
699         }
700
701         if (!actions) {
702                 rte_flow_error_set(error, EINVAL,
703                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
704                                 NULL, "NULL action.");
705                 return -rte_errno;
706         }
707
708         if (!attr) {
709                 rte_flow_error_set(error, EINVAL,
710                                    RTE_FLOW_ERROR_TYPE_ATTR,
711                                    NULL, "NULL attribute.");
712                 return -rte_errno;
713         }
714
715         item = next_no_void_pattern(pattern, NULL);
716         /* The first non-void item should be MAC. */
717         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
718                 rte_flow_error_set(error, EINVAL,
719                         RTE_FLOW_ERROR_TYPE_ITEM,
720                         item, "Not supported by ethertype filter");
721                 return -rte_errno;
722         }
723
724         /*Not supported last point for range*/
725         if (item->last) {
726                 rte_flow_error_set(error, EINVAL,
727                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
728                         item, "Not supported last point for range");
729                 return -rte_errno;
730         }
731
732         /* Get the MAC info. */
733         if (!item->spec || !item->mask) {
734                 rte_flow_error_set(error, EINVAL,
735                                 RTE_FLOW_ERROR_TYPE_ITEM,
736                                 item, "Not supported by ethertype filter");
737                 return -rte_errno;
738         }
739
740         eth_spec = item->spec;
741         eth_mask = item->mask;
742
743         /* Mask bits of source MAC address must be full of 0.
744          * Mask bits of destination MAC address must be full
745          * of 1 or full of 0.
746          */
747         if (!is_zero_ether_addr(&eth_mask->src) ||
748             (!is_zero_ether_addr(&eth_mask->dst) &&
749              !is_broadcast_ether_addr(&eth_mask->dst))) {
750                 rte_flow_error_set(error, EINVAL,
751                                 RTE_FLOW_ERROR_TYPE_ITEM,
752                                 item, "Invalid ether address mask");
753                 return -rte_errno;
754         }
755
756         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
757                 rte_flow_error_set(error, EINVAL,
758                                 RTE_FLOW_ERROR_TYPE_ITEM,
759                                 item, "Invalid ethertype mask");
760                 return -rte_errno;
761         }
762
763         /* If mask bits of destination MAC address
764          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
765          */
766         if (is_broadcast_ether_addr(&eth_mask->dst)) {
767                 filter->mac_addr = eth_spec->dst;
768                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
769         } else {
770                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
771         }
772         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
773
774         /* Check if the next non-void item is END. */
775         item = next_no_void_pattern(pattern, item);
776         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
777                 rte_flow_error_set(error, EINVAL,
778                                 RTE_FLOW_ERROR_TYPE_ITEM,
779                                 item, "Not supported by ethertype filter.");
780                 return -rte_errno;
781         }
782
783         /* Parse action */
784
785         act = next_no_void_action(actions, NULL);
786         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
787             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
788                 rte_flow_error_set(error, EINVAL,
789                                 RTE_FLOW_ERROR_TYPE_ACTION,
790                                 act, "Not supported action.");
791                 return -rte_errno;
792         }
793
794         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
795                 act_q = (const struct rte_flow_action_queue *)act->conf;
796                 filter->queue = act_q->index;
797         } else {
798                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
799         }
800
801         /* Check if the next non-void item is END */
802         act = next_no_void_action(actions, act);
803         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
804                 rte_flow_error_set(error, EINVAL,
805                                 RTE_FLOW_ERROR_TYPE_ACTION,
806                                 act, "Not supported action.");
807                 return -rte_errno;
808         }
809
810         /* Parse attr */
811         /* Must be input direction */
812         if (!attr->ingress) {
813                 rte_flow_error_set(error, EINVAL,
814                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
815                                 attr, "Only support ingress.");
816                 return -rte_errno;
817         }
818
819         /* Not supported */
820         if (attr->egress) {
821                 rte_flow_error_set(error, EINVAL,
822                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
823                                 attr, "Not support egress.");
824                 return -rte_errno;
825         }
826
827         /* Not supported */
828         if (attr->transfer) {
829                 rte_flow_error_set(error, EINVAL,
830                                 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
831                                 attr, "No support for transfer.");
832                 return -rte_errno;
833         }
834
835         /* Not supported */
836         if (attr->priority) {
837                 rte_flow_error_set(error, EINVAL,
838                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
839                                 attr, "Not support priority.");
840                 return -rte_errno;
841         }
842
843         /* Not supported */
844         if (attr->group) {
845                 rte_flow_error_set(error, EINVAL,
846                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
847                                 attr, "Not support group.");
848                 return -rte_errno;
849         }
850
851         return 0;
852 }
853
854 static int
855 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
856                                  const struct rte_flow_attr *attr,
857                              const struct rte_flow_item pattern[],
858                              const struct rte_flow_action actions[],
859                              struct rte_eth_ethertype_filter *filter,
860                              struct rte_flow_error *error)
861 {
862         int ret;
863         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
864
865         MAC_TYPE_FILTER_SUP(hw->mac.type);
866
867         ret = cons_parse_ethertype_filter(attr, pattern,
868                                         actions, filter, error);
869
870         if (ret)
871                 return ret;
872
873         /* Ixgbe doesn't support MAC address. */
874         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
875                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
876                 rte_flow_error_set(error, EINVAL,
877                         RTE_FLOW_ERROR_TYPE_ITEM,
878                         NULL, "Not supported by ethertype filter");
879                 return -rte_errno;
880         }
881
882         if (filter->queue >= dev->data->nb_rx_queues) {
883                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
884                 rte_flow_error_set(error, EINVAL,
885                         RTE_FLOW_ERROR_TYPE_ITEM,
886                         NULL, "queue index much too big");
887                 return -rte_errno;
888         }
889
890         if (filter->ether_type == ETHER_TYPE_IPv4 ||
891                 filter->ether_type == ETHER_TYPE_IPv6) {
892                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
893                 rte_flow_error_set(error, EINVAL,
894                         RTE_FLOW_ERROR_TYPE_ITEM,
895                         NULL, "IPv4/IPv6 not supported by ethertype filter");
896                 return -rte_errno;
897         }
898
899         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
900                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
901                 rte_flow_error_set(error, EINVAL,
902                         RTE_FLOW_ERROR_TYPE_ITEM,
903                         NULL, "mac compare is unsupported");
904                 return -rte_errno;
905         }
906
907         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
908                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
909                 rte_flow_error_set(error, EINVAL,
910                         RTE_FLOW_ERROR_TYPE_ITEM,
911                         NULL, "drop option is unsupported");
912                 return -rte_errno;
913         }
914
915         return 0;
916 }
917
918 /**
919  * Parse the rule to see if it is a TCP SYN rule.
920  * And get the TCP SYN filter info BTW.
921  * pattern:
922  * The first not void item must be ETH.
923  * The second not void item must be IPV4 or IPV6.
924  * The third not void item must be TCP.
925  * The next not void item must be END.
926  * action:
927  * The first not void action should be QUEUE.
928  * The next not void action should be END.
929  * pattern example:
930  * ITEM         Spec                    Mask
931  * ETH          NULL                    NULL
932  * IPV4/IPV6    NULL                    NULL
933  * TCP          tcp_flags       0x02    0xFF
934  * END
935  * other members in mask and spec should set to 0x00.
936  * item->last should be NULL.
937  */
938 static int
939 cons_parse_syn_filter(const struct rte_flow_attr *attr,
940                                 const struct rte_flow_item pattern[],
941                                 const struct rte_flow_action actions[],
942                                 struct rte_eth_syn_filter *filter,
943                                 struct rte_flow_error *error)
944 {
945         const struct rte_flow_item *item;
946         const struct rte_flow_action *act;
947         const struct rte_flow_item_tcp *tcp_spec;
948         const struct rte_flow_item_tcp *tcp_mask;
949         const struct rte_flow_action_queue *act_q;
950
951         if (!pattern) {
952                 rte_flow_error_set(error, EINVAL,
953                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
954                                 NULL, "NULL pattern.");
955                 return -rte_errno;
956         }
957
958         if (!actions) {
959                 rte_flow_error_set(error, EINVAL,
960                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
961                                 NULL, "NULL action.");
962                 return -rte_errno;
963         }
964
965         if (!attr) {
966                 rte_flow_error_set(error, EINVAL,
967                                    RTE_FLOW_ERROR_TYPE_ATTR,
968                                    NULL, "NULL attribute.");
969                 return -rte_errno;
970         }
971
972
973         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
974         item = next_no_void_pattern(pattern, NULL);
975         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
976             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
977             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
978             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
979                 rte_flow_error_set(error, EINVAL,
980                                 RTE_FLOW_ERROR_TYPE_ITEM,
981                                 item, "Not supported by syn filter");
982                 return -rte_errno;
983         }
984                 /*Not supported last point for range*/
985         if (item->last) {
986                 rte_flow_error_set(error, EINVAL,
987                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
988                         item, "Not supported last point for range");
989                 return -rte_errno;
990         }
991
992         /* Skip Ethernet */
993         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
994                 /* if the item is MAC, the content should be NULL */
995                 if (item->spec || item->mask) {
996                         rte_flow_error_set(error, EINVAL,
997                                 RTE_FLOW_ERROR_TYPE_ITEM,
998                                 item, "Invalid SYN address mask");
999                         return -rte_errno;
1000                 }
1001
1002                 /* check if the next not void item is IPv4 or IPv6 */
1003                 item = next_no_void_pattern(pattern, item);
1004                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1005                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
1006                         rte_flow_error_set(error, EINVAL,
1007                                 RTE_FLOW_ERROR_TYPE_ITEM,
1008                                 item, "Not supported by syn filter");
1009                         return -rte_errno;
1010                 }
1011         }
1012
1013         /* Skip IP */
1014         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1015             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1016                 /* if the item is IP, the content should be NULL */
1017                 if (item->spec || item->mask) {
1018                         rte_flow_error_set(error, EINVAL,
1019                                 RTE_FLOW_ERROR_TYPE_ITEM,
1020                                 item, "Invalid SYN mask");
1021                         return -rte_errno;
1022                 }
1023
1024                 /* check if the next not void item is TCP */
1025                 item = next_no_void_pattern(pattern, item);
1026                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
1027                         rte_flow_error_set(error, EINVAL,
1028                                 RTE_FLOW_ERROR_TYPE_ITEM,
1029                                 item, "Not supported by syn filter");
1030                         return -rte_errno;
1031                 }
1032         }
1033
1034         /* Get the TCP info. Only support SYN. */
1035         if (!item->spec || !item->mask) {
1036                 rte_flow_error_set(error, EINVAL,
1037                                 RTE_FLOW_ERROR_TYPE_ITEM,
1038                                 item, "Invalid SYN mask");
1039                 return -rte_errno;
1040         }
1041         /*Not supported last point for range*/
1042         if (item->last) {
1043                 rte_flow_error_set(error, EINVAL,
1044                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1045                         item, "Not supported last point for range");
1046                 return -rte_errno;
1047         }
1048
1049         tcp_spec = item->spec;
1050         tcp_mask = item->mask;
1051         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
1052             tcp_mask->hdr.src_port ||
1053             tcp_mask->hdr.dst_port ||
1054             tcp_mask->hdr.sent_seq ||
1055             tcp_mask->hdr.recv_ack ||
1056             tcp_mask->hdr.data_off ||
1057             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
1058             tcp_mask->hdr.rx_win ||
1059             tcp_mask->hdr.cksum ||
1060             tcp_mask->hdr.tcp_urp) {
1061                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1062                 rte_flow_error_set(error, EINVAL,
1063                                 RTE_FLOW_ERROR_TYPE_ITEM,
1064                                 item, "Not supported by syn filter");
1065                 return -rte_errno;
1066         }
1067
1068         /* check if the next not void item is END */
1069         item = next_no_void_pattern(pattern, item);
1070         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1071                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1072                 rte_flow_error_set(error, EINVAL,
1073                                 RTE_FLOW_ERROR_TYPE_ITEM,
1074                                 item, "Not supported by syn filter");
1075                 return -rte_errno;
1076         }
1077
1078         /* check if the first not void action is QUEUE. */
1079         act = next_no_void_action(actions, NULL);
1080         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1081                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1082                 rte_flow_error_set(error, EINVAL,
1083                                 RTE_FLOW_ERROR_TYPE_ACTION,
1084                                 act, "Not supported action.");
1085                 return -rte_errno;
1086         }
1087
1088         act_q = (const struct rte_flow_action_queue *)act->conf;
1089         filter->queue = act_q->index;
1090         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1091                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1092                 rte_flow_error_set(error, EINVAL,
1093                                 RTE_FLOW_ERROR_TYPE_ACTION,
1094                                 act, "Not supported action.");
1095                 return -rte_errno;
1096         }
1097
1098         /* check if the next not void item is END */
1099         act = next_no_void_action(actions, act);
1100         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1101                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1102                 rte_flow_error_set(error, EINVAL,
1103                                 RTE_FLOW_ERROR_TYPE_ACTION,
1104                                 act, "Not supported action.");
1105                 return -rte_errno;
1106         }
1107
1108         /* parse attr */
1109         /* must be input direction */
1110         if (!attr->ingress) {
1111                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1112                 rte_flow_error_set(error, EINVAL,
1113                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1114                         attr, "Only support ingress.");
1115                 return -rte_errno;
1116         }
1117
1118         /* not supported */
1119         if (attr->egress) {
1120                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1121                 rte_flow_error_set(error, EINVAL,
1122                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1123                         attr, "Not support egress.");
1124                 return -rte_errno;
1125         }
1126
1127         /* not supported */
1128         if (attr->transfer) {
1129                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1130                 rte_flow_error_set(error, EINVAL,
1131                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1132                         attr, "No support for transfer.");
1133                 return -rte_errno;
1134         }
1135
1136         /* Support 2 priorities, the lowest or highest. */
1137         if (!attr->priority) {
1138                 filter->hig_pri = 0;
1139         } else if (attr->priority == (uint32_t)~0U) {
1140                 filter->hig_pri = 1;
1141         } else {
1142                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1143                 rte_flow_error_set(error, EINVAL,
1144                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1145                         attr, "Not support priority.");
1146                 return -rte_errno;
1147         }
1148
1149         return 0;
1150 }
1151
1152 static int
1153 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1154                                  const struct rte_flow_attr *attr,
1155                              const struct rte_flow_item pattern[],
1156                              const struct rte_flow_action actions[],
1157                              struct rte_eth_syn_filter *filter,
1158                              struct rte_flow_error *error)
1159 {
1160         int ret;
1161         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1162
1163         MAC_TYPE_FILTER_SUP(hw->mac.type);
1164
1165         ret = cons_parse_syn_filter(attr, pattern,
1166                                         actions, filter, error);
1167
1168         if (filter->queue >= dev->data->nb_rx_queues)
1169                 return -rte_errno;
1170
1171         if (ret)
1172                 return ret;
1173
1174         return 0;
1175 }
1176
1177 /**
1178  * Parse the rule to see if it is a L2 tunnel rule.
1179  * And get the L2 tunnel filter info BTW.
1180  * Only support E-tag now.
1181  * pattern:
1182  * The first not void item can be E_TAG.
1183  * The next not void item must be END.
1184  * action:
1185  * The first not void action should be VF or PF.
1186  * The next not void action should be END.
1187  * pattern example:
1188  * ITEM         Spec                    Mask
1189  * E_TAG        grp             0x1     0x3
1190                 e_cid_base      0x309   0xFFF
1191  * END
1192  * other members in mask and spec should set to 0x00.
1193  * item->last should be NULL.
1194  */
1195 static int
1196 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1197                         const struct rte_flow_attr *attr,
1198                         const struct rte_flow_item pattern[],
1199                         const struct rte_flow_action actions[],
1200                         struct rte_eth_l2_tunnel_conf *filter,
1201                         struct rte_flow_error *error)
1202 {
1203         const struct rte_flow_item *item;
1204         const struct rte_flow_item_e_tag *e_tag_spec;
1205         const struct rte_flow_item_e_tag *e_tag_mask;
1206         const struct rte_flow_action *act;
1207         const struct rte_flow_action_vf *act_vf;
1208         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1209
1210         if (!pattern) {
1211                 rte_flow_error_set(error, EINVAL,
1212                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1213                         NULL, "NULL pattern.");
1214                 return -rte_errno;
1215         }
1216
1217         if (!actions) {
1218                 rte_flow_error_set(error, EINVAL,
1219                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1220                                    NULL, "NULL action.");
1221                 return -rte_errno;
1222         }
1223
1224         if (!attr) {
1225                 rte_flow_error_set(error, EINVAL,
1226                                    RTE_FLOW_ERROR_TYPE_ATTR,
1227                                    NULL, "NULL attribute.");
1228                 return -rte_errno;
1229         }
1230
1231         /* The first not void item should be e-tag. */
1232         item = next_no_void_pattern(pattern, NULL);
1233         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1234                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1235                 rte_flow_error_set(error, EINVAL,
1236                         RTE_FLOW_ERROR_TYPE_ITEM,
1237                         item, "Not supported by L2 tunnel filter");
1238                 return -rte_errno;
1239         }
1240
1241         if (!item->spec || !item->mask) {
1242                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1243                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1244                         item, "Not supported by L2 tunnel filter");
1245                 return -rte_errno;
1246         }
1247
1248         /*Not supported last point for range*/
1249         if (item->last) {
1250                 rte_flow_error_set(error, EINVAL,
1251                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1252                         item, "Not supported last point for range");
1253                 return -rte_errno;
1254         }
1255
1256         e_tag_spec = item->spec;
1257         e_tag_mask = item->mask;
1258
1259         /* Only care about GRP and E cid base. */
1260         if (e_tag_mask->epcp_edei_in_ecid_b ||
1261             e_tag_mask->in_ecid_e ||
1262             e_tag_mask->ecid_e ||
1263             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1264                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1265                 rte_flow_error_set(error, EINVAL,
1266                         RTE_FLOW_ERROR_TYPE_ITEM,
1267                         item, "Not supported by L2 tunnel filter");
1268                 return -rte_errno;
1269         }
1270
1271         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1272         /**
1273          * grp and e_cid_base are bit fields and only use 14 bits.
1274          * e-tag id is taken as little endian by HW.
1275          */
1276         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1277
1278         /* check if the next not void item is END */
1279         item = next_no_void_pattern(pattern, item);
1280         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1281                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1282                 rte_flow_error_set(error, EINVAL,
1283                         RTE_FLOW_ERROR_TYPE_ITEM,
1284                         item, "Not supported by L2 tunnel filter");
1285                 return -rte_errno;
1286         }
1287
1288         /* parse attr */
1289         /* must be input direction */
1290         if (!attr->ingress) {
1291                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1292                 rte_flow_error_set(error, EINVAL,
1293                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1294                         attr, "Only support ingress.");
1295                 return -rte_errno;
1296         }
1297
1298         /* not supported */
1299         if (attr->egress) {
1300                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1301                 rte_flow_error_set(error, EINVAL,
1302                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1303                         attr, "Not support egress.");
1304                 return -rte_errno;
1305         }
1306
1307         /* not supported */
1308         if (attr->transfer) {
1309                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1310                 rte_flow_error_set(error, EINVAL,
1311                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1312                         attr, "No support for transfer.");
1313                 return -rte_errno;
1314         }
1315
1316         /* not supported */
1317         if (attr->priority) {
1318                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1319                 rte_flow_error_set(error, EINVAL,
1320                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1321                         attr, "Not support priority.");
1322                 return -rte_errno;
1323         }
1324
1325         /* check if the first not void action is VF or PF. */
1326         act = next_no_void_action(actions, NULL);
1327         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1328                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1329                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1330                 rte_flow_error_set(error, EINVAL,
1331                         RTE_FLOW_ERROR_TYPE_ACTION,
1332                         act, "Not supported action.");
1333                 return -rte_errno;
1334         }
1335
1336         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1337                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1338                 filter->pool = act_vf->id;
1339         } else {
1340                 filter->pool = pci_dev->max_vfs;
1341         }
1342
1343         /* check if the next not void item is END */
1344         act = next_no_void_action(actions, act);
1345         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1346                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1347                 rte_flow_error_set(error, EINVAL,
1348                         RTE_FLOW_ERROR_TYPE_ACTION,
1349                         act, "Not supported action.");
1350                 return -rte_errno;
1351         }
1352
1353         return 0;
1354 }
1355
1356 static int
1357 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1358                         const struct rte_flow_attr *attr,
1359                         const struct rte_flow_item pattern[],
1360                         const struct rte_flow_action actions[],
1361                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1362                         struct rte_flow_error *error)
1363 {
1364         int ret = 0;
1365         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1366         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1367         uint16_t vf_num;
1368
1369         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1370                                 actions, l2_tn_filter, error);
1371
1372         if (hw->mac.type != ixgbe_mac_X550 &&
1373                 hw->mac.type != ixgbe_mac_X550EM_x &&
1374                 hw->mac.type != ixgbe_mac_X550EM_a) {
1375                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1376                 rte_flow_error_set(error, EINVAL,
1377                         RTE_FLOW_ERROR_TYPE_ITEM,
1378                         NULL, "Not supported by L2 tunnel filter");
1379                 return -rte_errno;
1380         }
1381
1382         vf_num = pci_dev->max_vfs;
1383
1384         if (l2_tn_filter->pool > vf_num)
1385                 return -rte_errno;
1386
1387         return ret;
1388 }
1389
1390 /* Parse to get the attr and action info of flow director rule. */
1391 static int
1392 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1393                           const struct rte_flow_action actions[],
1394                           struct ixgbe_fdir_rule *rule,
1395                           struct rte_flow_error *error)
1396 {
1397         const struct rte_flow_action *act;
1398         const struct rte_flow_action_queue *act_q;
1399         const struct rte_flow_action_mark *mark;
1400
1401         /* parse attr */
1402         /* must be input direction */
1403         if (!attr->ingress) {
1404                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1405                 rte_flow_error_set(error, EINVAL,
1406                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1407                         attr, "Only support ingress.");
1408                 return -rte_errno;
1409         }
1410
1411         /* not supported */
1412         if (attr->egress) {
1413                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1414                 rte_flow_error_set(error, EINVAL,
1415                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1416                         attr, "Not support egress.");
1417                 return -rte_errno;
1418         }
1419
1420         /* not supported */
1421         if (attr->transfer) {
1422                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1423                 rte_flow_error_set(error, EINVAL,
1424                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1425                         attr, "No support for transfer.");
1426                 return -rte_errno;
1427         }
1428
1429         /* not supported */
1430         if (attr->priority) {
1431                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1432                 rte_flow_error_set(error, EINVAL,
1433                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1434                         attr, "Not support priority.");
1435                 return -rte_errno;
1436         }
1437
1438         /* check if the first not void action is QUEUE or DROP. */
1439         act = next_no_void_action(actions, NULL);
1440         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1441             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1442                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1443                 rte_flow_error_set(error, EINVAL,
1444                         RTE_FLOW_ERROR_TYPE_ACTION,
1445                         act, "Not supported action.");
1446                 return -rte_errno;
1447         }
1448
1449         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1450                 act_q = (const struct rte_flow_action_queue *)act->conf;
1451                 rule->queue = act_q->index;
1452         } else { /* drop */
1453                 /* signature mode does not support drop action. */
1454                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1455                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1456                         rte_flow_error_set(error, EINVAL,
1457                                 RTE_FLOW_ERROR_TYPE_ACTION,
1458                                 act, "Not supported action.");
1459                         return -rte_errno;
1460                 }
1461                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1462         }
1463
1464         /* check if the next not void item is MARK */
1465         act = next_no_void_action(actions, act);
1466         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1467                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1468                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1469                 rte_flow_error_set(error, EINVAL,
1470                         RTE_FLOW_ERROR_TYPE_ACTION,
1471                         act, "Not supported action.");
1472                 return -rte_errno;
1473         }
1474
1475         rule->soft_id = 0;
1476
1477         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1478                 mark = (const struct rte_flow_action_mark *)act->conf;
1479                 rule->soft_id = mark->id;
1480                 act = next_no_void_action(actions, act);
1481         }
1482
1483         /* check if the next not void item is END */
1484         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1485                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1486                 rte_flow_error_set(error, EINVAL,
1487                         RTE_FLOW_ERROR_TYPE_ACTION,
1488                         act, "Not supported action.");
1489                 return -rte_errno;
1490         }
1491
1492         return 0;
1493 }
1494
1495 /* search next no void pattern and skip fuzzy */
1496 static inline
1497 const struct rte_flow_item *next_no_fuzzy_pattern(
1498                 const struct rte_flow_item pattern[],
1499                 const struct rte_flow_item *cur)
1500 {
1501         const struct rte_flow_item *next =
1502                 next_no_void_pattern(pattern, cur);
1503         while (1) {
1504                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1505                         return next;
1506                 next = next_no_void_pattern(pattern, next);
1507         }
1508 }
1509
1510 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1511 {
1512         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1513         const struct rte_flow_item *item;
1514         uint32_t sh, lh, mh;
1515         int i = 0;
1516
1517         while (1) {
1518                 item = pattern + i;
1519                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1520                         break;
1521
1522                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1523                         spec = item->spec;
1524                         last = item->last;
1525                         mask = item->mask;
1526
1527                         if (!spec || !mask)
1528                                 return 0;
1529
1530                         sh = spec->thresh;
1531
1532                         if (!last)
1533                                 lh = sh;
1534                         else
1535                                 lh = last->thresh;
1536
1537                         mh = mask->thresh;
1538                         sh = sh & mh;
1539                         lh = lh & mh;
1540
1541                         if (!sh || sh > lh)
1542                                 return 0;
1543
1544                         return 1;
1545                 }
1546
1547                 i++;
1548         }
1549
1550         return 0;
1551 }
1552
1553 /**
1554  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1555  * And get the flow director filter info BTW.
1556  * UDP/TCP/SCTP PATTERN:
1557  * The first not void item can be ETH or IPV4 or IPV6
1558  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1559  * The next not void item could be UDP or TCP or SCTP (optional)
1560  * The next not void item could be RAW (for flexbyte, optional)
1561  * The next not void item must be END.
1562  * A Fuzzy Match pattern can appear at any place before END.
1563  * Fuzzy Match is optional for IPV4 but is required for IPV6
1564  * MAC VLAN PATTERN:
1565  * The first not void item must be ETH.
1566  * The second not void item must be MAC VLAN.
1567  * The next not void item must be END.
1568  * ACTION:
1569  * The first not void action should be QUEUE or DROP.
1570  * The second not void optional action should be MARK,
1571  * mark_id is a uint32_t number.
1572  * The next not void action should be END.
1573  * UDP/TCP/SCTP pattern example:
1574  * ITEM         Spec                    Mask
1575  * ETH          NULL                    NULL
1576  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1577  *              dst_addr 192.167.3.50   0xFFFFFFFF
1578  * UDP/TCP/SCTP src_port        80      0xFFFF
1579  *              dst_port        80      0xFFFF
1580  * FLEX relative        0       0x1
1581  *              search          0       0x1
1582  *              reserved        0       0
1583  *              offset          12      0xFFFFFFFF
1584  *              limit           0       0xFFFF
1585  *              length          2       0xFFFF
1586  *              pattern[0]      0x86    0xFF
1587  *              pattern[1]      0xDD    0xFF
1588  * END
1589  * MAC VLAN pattern example:
1590  * ITEM         Spec                    Mask
1591  * ETH          dst_addr
1592                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1593                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1594  * MAC VLAN     tci     0x2016          0xEFFF
1595  * END
1596  * Other members in mask and spec should set to 0x00.
1597  * Item->last should be NULL.
1598  */
1599 static int
1600 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1601                                const struct rte_flow_attr *attr,
1602                                const struct rte_flow_item pattern[],
1603                                const struct rte_flow_action actions[],
1604                                struct ixgbe_fdir_rule *rule,
1605                                struct rte_flow_error *error)
1606 {
1607         const struct rte_flow_item *item;
1608         const struct rte_flow_item_eth *eth_spec;
1609         const struct rte_flow_item_eth *eth_mask;
1610         const struct rte_flow_item_ipv4 *ipv4_spec;
1611         const struct rte_flow_item_ipv4 *ipv4_mask;
1612         const struct rte_flow_item_ipv6 *ipv6_spec;
1613         const struct rte_flow_item_ipv6 *ipv6_mask;
1614         const struct rte_flow_item_tcp *tcp_spec;
1615         const struct rte_flow_item_tcp *tcp_mask;
1616         const struct rte_flow_item_udp *udp_spec;
1617         const struct rte_flow_item_udp *udp_mask;
1618         const struct rte_flow_item_sctp *sctp_spec;
1619         const struct rte_flow_item_sctp *sctp_mask;
1620         const struct rte_flow_item_vlan *vlan_spec;
1621         const struct rte_flow_item_vlan *vlan_mask;
1622         const struct rte_flow_item_raw *raw_mask;
1623         const struct rte_flow_item_raw *raw_spec;
1624         uint8_t j;
1625
1626         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1627
1628         if (!pattern) {
1629                 rte_flow_error_set(error, EINVAL,
1630                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1631                         NULL, "NULL pattern.");
1632                 return -rte_errno;
1633         }
1634
1635         if (!actions) {
1636                 rte_flow_error_set(error, EINVAL,
1637                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1638                                    NULL, "NULL action.");
1639                 return -rte_errno;
1640         }
1641
1642         if (!attr) {
1643                 rte_flow_error_set(error, EINVAL,
1644                                    RTE_FLOW_ERROR_TYPE_ATTR,
1645                                    NULL, "NULL attribute.");
1646                 return -rte_errno;
1647         }
1648
1649         /**
1650          * Some fields may not be provided. Set spec to 0 and mask to default
1651          * value. So, we need not do anything for the not provided fields later.
1652          */
1653         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1654         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1655         rule->mask.vlan_tci_mask = 0;
1656         rule->mask.flex_bytes_mask = 0;
1657
1658         /**
1659          * The first not void item should be
1660          * MAC or IPv4 or TCP or UDP or SCTP.
1661          */
1662         item = next_no_fuzzy_pattern(pattern, NULL);
1663         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1664             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1665             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1666             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1667             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1668             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1669                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1670                 rte_flow_error_set(error, EINVAL,
1671                         RTE_FLOW_ERROR_TYPE_ITEM,
1672                         item, "Not supported by fdir filter");
1673                 return -rte_errno;
1674         }
1675
1676         if (signature_match(pattern))
1677                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1678         else
1679                 rule->mode = RTE_FDIR_MODE_PERFECT;
1680
1681         /*Not supported last point for range*/
1682         if (item->last) {
1683                 rte_flow_error_set(error, EINVAL,
1684                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1685                         item, "Not supported last point for range");
1686                 return -rte_errno;
1687         }
1688
1689         /* Get the MAC info. */
1690         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1691                 /**
1692                  * Only support vlan and dst MAC address,
1693                  * others should be masked.
1694                  */
1695                 if (item->spec && !item->mask) {
1696                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1697                         rte_flow_error_set(error, EINVAL,
1698                                 RTE_FLOW_ERROR_TYPE_ITEM,
1699                                 item, "Not supported by fdir filter");
1700                         return -rte_errno;
1701                 }
1702
1703                 if (item->spec) {
1704                         rule->b_spec = TRUE;
1705                         eth_spec = item->spec;
1706
1707                         /* Get the dst MAC. */
1708                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1709                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1710                                         eth_spec->dst.addr_bytes[j];
1711                         }
1712                 }
1713
1714
1715                 if (item->mask) {
1716
1717                         rule->b_mask = TRUE;
1718                         eth_mask = item->mask;
1719
1720                         /* Ether type should be masked. */
1721                         if (eth_mask->type ||
1722                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1723                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1724                                 rte_flow_error_set(error, EINVAL,
1725                                         RTE_FLOW_ERROR_TYPE_ITEM,
1726                                         item, "Not supported by fdir filter");
1727                                 return -rte_errno;
1728                         }
1729
1730                         /* If ethernet has meaning, it means MAC VLAN mode. */
1731                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1732
1733                         /**
1734                          * src MAC address must be masked,
1735                          * and don't support dst MAC address mask.
1736                          */
1737                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1738                                 if (eth_mask->src.addr_bytes[j] ||
1739                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1740                                         memset(rule, 0,
1741                                         sizeof(struct ixgbe_fdir_rule));
1742                                         rte_flow_error_set(error, EINVAL,
1743                                         RTE_FLOW_ERROR_TYPE_ITEM,
1744                                         item, "Not supported by fdir filter");
1745                                         return -rte_errno;
1746                                 }
1747                         }
1748
1749                         /* When no VLAN, considered as full mask. */
1750                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1751                 }
1752                 /*** If both spec and mask are item,
1753                  * it means don't care about ETH.
1754                  * Do nothing.
1755                  */
1756
1757                 /**
1758                  * Check if the next not void item is vlan or ipv4.
1759                  * IPv6 is not supported.
1760                  */
1761                 item = next_no_fuzzy_pattern(pattern, item);
1762                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1763                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1764                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1765                                 rte_flow_error_set(error, EINVAL,
1766                                         RTE_FLOW_ERROR_TYPE_ITEM,
1767                                         item, "Not supported by fdir filter");
1768                                 return -rte_errno;
1769                         }
1770                 } else {
1771                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1772                                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1773                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1774                                 rte_flow_error_set(error, EINVAL,
1775                                         RTE_FLOW_ERROR_TYPE_ITEM,
1776                                         item, "Not supported by fdir filter");
1777                                 return -rte_errno;
1778                         }
1779                 }
1780         }
1781
1782         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1783                 if (!(item->spec && item->mask)) {
1784                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1785                         rte_flow_error_set(error, EINVAL,
1786                                 RTE_FLOW_ERROR_TYPE_ITEM,
1787                                 item, "Not supported by fdir filter");
1788                         return -rte_errno;
1789                 }
1790
1791                 /*Not supported last point for range*/
1792                 if (item->last) {
1793                         rte_flow_error_set(error, EINVAL,
1794                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1795                                 item, "Not supported last point for range");
1796                         return -rte_errno;
1797                 }
1798
1799                 vlan_spec = item->spec;
1800                 vlan_mask = item->mask;
1801
1802                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1803
1804                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1805                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1806                 /* More than one tags are not supported. */
1807
1808                 /* Next not void item must be END */
1809                 item = next_no_fuzzy_pattern(pattern, item);
1810                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1811                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1812                         rte_flow_error_set(error, EINVAL,
1813                                 RTE_FLOW_ERROR_TYPE_ITEM,
1814                                 item, "Not supported by fdir filter");
1815                         return -rte_errno;
1816                 }
1817         }
1818
1819         /* Get the IPV4 info. */
1820         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1821                 /**
1822                  * Set the flow type even if there's no content
1823                  * as we must have a flow type.
1824                  */
1825                 rule->ixgbe_fdir.formatted.flow_type =
1826                         IXGBE_ATR_FLOW_TYPE_IPV4;
1827                 /*Not supported last point for range*/
1828                 if (item->last) {
1829                         rte_flow_error_set(error, EINVAL,
1830                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1831                                 item, "Not supported last point for range");
1832                         return -rte_errno;
1833                 }
1834                 /**
1835                  * Only care about src & dst addresses,
1836                  * others should be masked.
1837                  */
1838                 if (!item->mask) {
1839                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1840                         rte_flow_error_set(error, EINVAL,
1841                                 RTE_FLOW_ERROR_TYPE_ITEM,
1842                                 item, "Not supported by fdir filter");
1843                         return -rte_errno;
1844                 }
1845                 rule->b_mask = TRUE;
1846                 ipv4_mask = item->mask;
1847                 if (ipv4_mask->hdr.version_ihl ||
1848                     ipv4_mask->hdr.type_of_service ||
1849                     ipv4_mask->hdr.total_length ||
1850                     ipv4_mask->hdr.packet_id ||
1851                     ipv4_mask->hdr.fragment_offset ||
1852                     ipv4_mask->hdr.time_to_live ||
1853                     ipv4_mask->hdr.next_proto_id ||
1854                     ipv4_mask->hdr.hdr_checksum) {
1855                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1856                         rte_flow_error_set(error, EINVAL,
1857                                 RTE_FLOW_ERROR_TYPE_ITEM,
1858                                 item, "Not supported by fdir filter");
1859                         return -rte_errno;
1860                 }
1861                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1862                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1863
1864                 if (item->spec) {
1865                         rule->b_spec = TRUE;
1866                         ipv4_spec = item->spec;
1867                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1868                                 ipv4_spec->hdr.dst_addr;
1869                         rule->ixgbe_fdir.formatted.src_ip[0] =
1870                                 ipv4_spec->hdr.src_addr;
1871                 }
1872
1873                 /**
1874                  * Check if the next not void item is
1875                  * TCP or UDP or SCTP or END.
1876                  */
1877                 item = next_no_fuzzy_pattern(pattern, item);
1878                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1879                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1880                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1881                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1882                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1883                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1884                         rte_flow_error_set(error, EINVAL,
1885                                 RTE_FLOW_ERROR_TYPE_ITEM,
1886                                 item, "Not supported by fdir filter");
1887                         return -rte_errno;
1888                 }
1889         }
1890
1891         /* Get the IPV6 info. */
1892         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1893                 /**
1894                  * Set the flow type even if there's no content
1895                  * as we must have a flow type.
1896                  */
1897                 rule->ixgbe_fdir.formatted.flow_type =
1898                         IXGBE_ATR_FLOW_TYPE_IPV6;
1899
1900                 /**
1901                  * 1. must signature match
1902                  * 2. not support last
1903                  * 3. mask must not null
1904                  */
1905                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1906                     item->last ||
1907                     !item->mask) {
1908                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1909                         rte_flow_error_set(error, EINVAL,
1910                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1911                                 item, "Not supported last point for range");
1912                         return -rte_errno;
1913                 }
1914
1915                 rule->b_mask = TRUE;
1916                 ipv6_mask = item->mask;
1917                 if (ipv6_mask->hdr.vtc_flow ||
1918                     ipv6_mask->hdr.payload_len ||
1919                     ipv6_mask->hdr.proto ||
1920                     ipv6_mask->hdr.hop_limits) {
1921                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1922                         rte_flow_error_set(error, EINVAL,
1923                                 RTE_FLOW_ERROR_TYPE_ITEM,
1924                                 item, "Not supported by fdir filter");
1925                         return -rte_errno;
1926                 }
1927
1928                 /* check src addr mask */
1929                 for (j = 0; j < 16; j++) {
1930                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1931                                 rule->mask.src_ipv6_mask |= 1 << j;
1932                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1933                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1934                                 rte_flow_error_set(error, EINVAL,
1935                                         RTE_FLOW_ERROR_TYPE_ITEM,
1936                                         item, "Not supported by fdir filter");
1937                                 return -rte_errno;
1938                         }
1939                 }
1940
1941                 /* check dst addr mask */
1942                 for (j = 0; j < 16; j++) {
1943                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1944                                 rule->mask.dst_ipv6_mask |= 1 << j;
1945                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1946                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1947                                 rte_flow_error_set(error, EINVAL,
1948                                         RTE_FLOW_ERROR_TYPE_ITEM,
1949                                         item, "Not supported by fdir filter");
1950                                 return -rte_errno;
1951                         }
1952                 }
1953
1954                 if (item->spec) {
1955                         rule->b_spec = TRUE;
1956                         ipv6_spec = item->spec;
1957                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1958                                    ipv6_spec->hdr.src_addr, 16);
1959                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1960                                    ipv6_spec->hdr.dst_addr, 16);
1961                 }
1962
1963                 /**
1964                  * Check if the next not void item is
1965                  * TCP or UDP or SCTP or END.
1966                  */
1967                 item = next_no_fuzzy_pattern(pattern, item);
1968                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1969                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1970                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1971                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1972                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1973                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1974                         rte_flow_error_set(error, EINVAL,
1975                                 RTE_FLOW_ERROR_TYPE_ITEM,
1976                                 item, "Not supported by fdir filter");
1977                         return -rte_errno;
1978                 }
1979         }
1980
1981         /* Get the TCP info. */
1982         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1983                 /**
1984                  * Set the flow type even if there's no content
1985                  * as we must have a flow type.
1986                  */
1987                 rule->ixgbe_fdir.formatted.flow_type |=
1988                         IXGBE_ATR_L4TYPE_TCP;
1989                 /*Not supported last point for range*/
1990                 if (item->last) {
1991                         rte_flow_error_set(error, EINVAL,
1992                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1993                                 item, "Not supported last point for range");
1994                         return -rte_errno;
1995                 }
1996                 /**
1997                  * Only care about src & dst ports,
1998                  * others should be masked.
1999                  */
2000                 if (!item->mask) {
2001                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2002                         rte_flow_error_set(error, EINVAL,
2003                                 RTE_FLOW_ERROR_TYPE_ITEM,
2004                                 item, "Not supported by fdir filter");
2005                         return -rte_errno;
2006                 }
2007                 rule->b_mask = TRUE;
2008                 tcp_mask = item->mask;
2009                 if (tcp_mask->hdr.sent_seq ||
2010                     tcp_mask->hdr.recv_ack ||
2011                     tcp_mask->hdr.data_off ||
2012                     tcp_mask->hdr.tcp_flags ||
2013                     tcp_mask->hdr.rx_win ||
2014                     tcp_mask->hdr.cksum ||
2015                     tcp_mask->hdr.tcp_urp) {
2016                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2017                         rte_flow_error_set(error, EINVAL,
2018                                 RTE_FLOW_ERROR_TYPE_ITEM,
2019                                 item, "Not supported by fdir filter");
2020                         return -rte_errno;
2021                 }
2022                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
2023                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
2024
2025                 if (item->spec) {
2026                         rule->b_spec = TRUE;
2027                         tcp_spec = item->spec;
2028                         rule->ixgbe_fdir.formatted.src_port =
2029                                 tcp_spec->hdr.src_port;
2030                         rule->ixgbe_fdir.formatted.dst_port =
2031                                 tcp_spec->hdr.dst_port;
2032                 }
2033
2034                 item = next_no_fuzzy_pattern(pattern, item);
2035                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2036                     item->type != RTE_FLOW_ITEM_TYPE_END) {
2037                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2038                         rte_flow_error_set(error, EINVAL,
2039                                 RTE_FLOW_ERROR_TYPE_ITEM,
2040                                 item, "Not supported by fdir filter");
2041                         return -rte_errno;
2042                 }
2043
2044         }
2045
2046         /* Get the UDP info */
2047         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2048                 /**
2049                  * Set the flow type even if there's no content
2050                  * as we must have a flow type.
2051                  */
2052                 rule->ixgbe_fdir.formatted.flow_type |=
2053                         IXGBE_ATR_L4TYPE_UDP;
2054                 /*Not supported last point for range*/
2055                 if (item->last) {
2056                         rte_flow_error_set(error, EINVAL,
2057                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2058                                 item, "Not supported last point for range");
2059                         return -rte_errno;
2060                 }
2061                 /**
2062                  * Only care about src & dst ports,
2063                  * others should be masked.
2064                  */
2065                 if (!item->mask) {
2066                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2067                         rte_flow_error_set(error, EINVAL,
2068                                 RTE_FLOW_ERROR_TYPE_ITEM,
2069                                 item, "Not supported by fdir filter");
2070                         return -rte_errno;
2071                 }
2072                 rule->b_mask = TRUE;
2073                 udp_mask = item->mask;
2074                 if (udp_mask->hdr.dgram_len ||
2075                     udp_mask->hdr.dgram_cksum) {
2076                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2077                         rte_flow_error_set(error, EINVAL,
2078                                 RTE_FLOW_ERROR_TYPE_ITEM,
2079                                 item, "Not supported by fdir filter");
2080                         return -rte_errno;
2081                 }
2082                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
2083                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
2084
2085                 if (item->spec) {
2086                         rule->b_spec = TRUE;
2087                         udp_spec = item->spec;
2088                         rule->ixgbe_fdir.formatted.src_port =
2089                                 udp_spec->hdr.src_port;
2090                         rule->ixgbe_fdir.formatted.dst_port =
2091                                 udp_spec->hdr.dst_port;
2092                 }
2093
2094                 item = next_no_fuzzy_pattern(pattern, item);
2095                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2096                     item->type != RTE_FLOW_ITEM_TYPE_END) {
2097                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2098                         rte_flow_error_set(error, EINVAL,
2099                                 RTE_FLOW_ERROR_TYPE_ITEM,
2100                                 item, "Not supported by fdir filter");
2101                         return -rte_errno;
2102                 }
2103
2104         }
2105
2106         /* Get the SCTP info */
2107         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2108                 /**
2109                  * Set the flow type even if there's no content
2110                  * as we must have a flow type.
2111                  */
2112                 rule->ixgbe_fdir.formatted.flow_type |=
2113                         IXGBE_ATR_L4TYPE_SCTP;
2114                 /*Not supported last point for range*/
2115                 if (item->last) {
2116                         rte_flow_error_set(error, EINVAL,
2117                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2118                                 item, "Not supported last point for range");
2119                         return -rte_errno;
2120                 }
2121
2122                 /* only x550 family only support sctp port */
2123                 if (hw->mac.type == ixgbe_mac_X550 ||
2124                     hw->mac.type == ixgbe_mac_X550EM_x ||
2125                     hw->mac.type == ixgbe_mac_X550EM_a) {
2126                         /**
2127                          * Only care about src & dst ports,
2128                          * others should be masked.
2129                          */
2130                         if (!item->mask) {
2131                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2132                                 rte_flow_error_set(error, EINVAL,
2133                                         RTE_FLOW_ERROR_TYPE_ITEM,
2134                                         item, "Not supported by fdir filter");
2135                                 return -rte_errno;
2136                         }
2137                         rule->b_mask = TRUE;
2138                         sctp_mask = item->mask;
2139                         if (sctp_mask->hdr.tag ||
2140                                 sctp_mask->hdr.cksum) {
2141                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2142                                 rte_flow_error_set(error, EINVAL,
2143                                         RTE_FLOW_ERROR_TYPE_ITEM,
2144                                         item, "Not supported by fdir filter");
2145                                 return -rte_errno;
2146                         }
2147                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2148                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2149
2150                         if (item->spec) {
2151                                 rule->b_spec = TRUE;
2152                                 sctp_spec = item->spec;
2153                                 rule->ixgbe_fdir.formatted.src_port =
2154                                         sctp_spec->hdr.src_port;
2155                                 rule->ixgbe_fdir.formatted.dst_port =
2156                                         sctp_spec->hdr.dst_port;
2157                         }
2158                 /* others even sctp port is not supported */
2159                 } else {
2160                         sctp_mask = item->mask;
2161                         if (sctp_mask &&
2162                                 (sctp_mask->hdr.src_port ||
2163                                  sctp_mask->hdr.dst_port ||
2164                                  sctp_mask->hdr.tag ||
2165                                  sctp_mask->hdr.cksum)) {
2166                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2167                                 rte_flow_error_set(error, EINVAL,
2168                                         RTE_FLOW_ERROR_TYPE_ITEM,
2169                                         item, "Not supported by fdir filter");
2170                                 return -rte_errno;
2171                         }
2172                 }
2173
2174                 item = next_no_fuzzy_pattern(pattern, item);
2175                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2176                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2177                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2178                         rte_flow_error_set(error, EINVAL,
2179                                 RTE_FLOW_ERROR_TYPE_ITEM,
2180                                 item, "Not supported by fdir filter");
2181                         return -rte_errno;
2182                 }
2183         }
2184
2185         /* Get the flex byte info */
2186         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2187                 /* Not supported last point for range*/
2188                 if (item->last) {
2189                         rte_flow_error_set(error, EINVAL,
2190                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2191                                 item, "Not supported last point for range");
2192                         return -rte_errno;
2193                 }
2194                 /* mask should not be null */
2195                 if (!item->mask || !item->spec) {
2196                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2197                         rte_flow_error_set(error, EINVAL,
2198                                 RTE_FLOW_ERROR_TYPE_ITEM,
2199                                 item, "Not supported by fdir filter");
2200                         return -rte_errno;
2201                 }
2202
2203                 raw_mask = item->mask;
2204
2205                 /* check mask */
2206                 if (raw_mask->relative != 0x1 ||
2207                     raw_mask->search != 0x1 ||
2208                     raw_mask->reserved != 0x0 ||
2209                     (uint32_t)raw_mask->offset != 0xffffffff ||
2210                     raw_mask->limit != 0xffff ||
2211                     raw_mask->length != 0xffff) {
2212                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2213                         rte_flow_error_set(error, EINVAL,
2214                                 RTE_FLOW_ERROR_TYPE_ITEM,
2215                                 item, "Not supported by fdir filter");
2216                         return -rte_errno;
2217                 }
2218
2219                 raw_spec = item->spec;
2220
2221                 /* check spec */
2222                 if (raw_spec->relative != 0 ||
2223                     raw_spec->search != 0 ||
2224                     raw_spec->reserved != 0 ||
2225                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2226                     raw_spec->offset % 2 ||
2227                     raw_spec->limit != 0 ||
2228                     raw_spec->length != 2 ||
2229                     /* pattern can't be 0xffff */
2230                     (raw_spec->pattern[0] == 0xff &&
2231                      raw_spec->pattern[1] == 0xff)) {
2232                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2233                         rte_flow_error_set(error, EINVAL,
2234                                 RTE_FLOW_ERROR_TYPE_ITEM,
2235                                 item, "Not supported by fdir filter");
2236                         return -rte_errno;
2237                 }
2238
2239                 /* check pattern mask */
2240                 if (raw_mask->pattern[0] != 0xff ||
2241                     raw_mask->pattern[1] != 0xff) {
2242                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2243                         rte_flow_error_set(error, EINVAL,
2244                                 RTE_FLOW_ERROR_TYPE_ITEM,
2245                                 item, "Not supported by fdir filter");
2246                         return -rte_errno;
2247                 }
2248
2249                 rule->mask.flex_bytes_mask = 0xffff;
2250                 rule->ixgbe_fdir.formatted.flex_bytes =
2251                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2252                         raw_spec->pattern[0];
2253                 rule->flex_bytes_offset = raw_spec->offset;
2254         }
2255
2256         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2257                 /* check if the next not void item is END */
2258                 item = next_no_fuzzy_pattern(pattern, item);
2259                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2260                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2261                         rte_flow_error_set(error, EINVAL,
2262                                 RTE_FLOW_ERROR_TYPE_ITEM,
2263                                 item, "Not supported by fdir filter");
2264                         return -rte_errno;
2265                 }
2266         }
2267
2268         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2269 }
2270
2271 #define NVGRE_PROTOCOL 0x6558
2272
2273 /**
2274  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2275  * And get the flow director filter info BTW.
2276  * VxLAN PATTERN:
2277  * The first not void item must be ETH.
2278  * The second not void item must be IPV4/ IPV6.
2279  * The third not void item must be NVGRE.
2280  * The next not void item must be END.
2281  * NVGRE PATTERN:
2282  * The first not void item must be ETH.
2283  * The second not void item must be IPV4/ IPV6.
2284  * The third not void item must be NVGRE.
2285  * The next not void item must be END.
2286  * ACTION:
2287  * The first not void action should be QUEUE or DROP.
2288  * The second not void optional action should be MARK,
2289  * mark_id is a uint32_t number.
2290  * The next not void action should be END.
2291  * VxLAN pattern example:
2292  * ITEM         Spec                    Mask
2293  * ETH          NULL                    NULL
2294  * IPV4/IPV6    NULL                    NULL
2295  * UDP          NULL                    NULL
2296  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2297  * MAC VLAN     tci     0x2016          0xEFFF
2298  * END
2299  * NEGRV pattern example:
2300  * ITEM         Spec                    Mask
2301  * ETH          NULL                    NULL
2302  * IPV4/IPV6    NULL                    NULL
2303  * NVGRE        protocol        0x6558  0xFFFF
2304  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2305  * MAC VLAN     tci     0x2016          0xEFFF
2306  * END
2307  * other members in mask and spec should set to 0x00.
2308  * item->last should be NULL.
2309  */
2310 static int
2311 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2312                                const struct rte_flow_item pattern[],
2313                                const struct rte_flow_action actions[],
2314                                struct ixgbe_fdir_rule *rule,
2315                                struct rte_flow_error *error)
2316 {
2317         const struct rte_flow_item *item;
2318         const struct rte_flow_item_vxlan *vxlan_spec;
2319         const struct rte_flow_item_vxlan *vxlan_mask;
2320         const struct rte_flow_item_nvgre *nvgre_spec;
2321         const struct rte_flow_item_nvgre *nvgre_mask;
2322         const struct rte_flow_item_eth *eth_spec;
2323         const struct rte_flow_item_eth *eth_mask;
2324         const struct rte_flow_item_vlan *vlan_spec;
2325         const struct rte_flow_item_vlan *vlan_mask;
2326         uint32_t j;
2327
2328         if (!pattern) {
2329                 rte_flow_error_set(error, EINVAL,
2330                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2331                                    NULL, "NULL pattern.");
2332                 return -rte_errno;
2333         }
2334
2335         if (!actions) {
2336                 rte_flow_error_set(error, EINVAL,
2337                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2338                                    NULL, "NULL action.");
2339                 return -rte_errno;
2340         }
2341
2342         if (!attr) {
2343                 rte_flow_error_set(error, EINVAL,
2344                                    RTE_FLOW_ERROR_TYPE_ATTR,
2345                                    NULL, "NULL attribute.");
2346                 return -rte_errno;
2347         }
2348
2349         /**
2350          * Some fields may not be provided. Set spec to 0 and mask to default
2351          * value. So, we need not do anything for the not provided fields later.
2352          */
2353         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2354         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2355         rule->mask.vlan_tci_mask = 0;
2356
2357         /**
2358          * The first not void item should be
2359          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2360          */
2361         item = next_no_void_pattern(pattern, NULL);
2362         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2363             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2364             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2365             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2366             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2367             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2368                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2369                 rte_flow_error_set(error, EINVAL,
2370                         RTE_FLOW_ERROR_TYPE_ITEM,
2371                         item, "Not supported by fdir filter");
2372                 return -rte_errno;
2373         }
2374
2375         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2376
2377         /* Skip MAC. */
2378         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2379                 /* Only used to describe the protocol stack. */
2380                 if (item->spec || item->mask) {
2381                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2382                         rte_flow_error_set(error, EINVAL,
2383                                 RTE_FLOW_ERROR_TYPE_ITEM,
2384                                 item, "Not supported by fdir filter");
2385                         return -rte_errno;
2386                 }
2387                 /* Not supported last point for range*/
2388                 if (item->last) {
2389                         rte_flow_error_set(error, EINVAL,
2390                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2391                                 item, "Not supported last point for range");
2392                         return -rte_errno;
2393                 }
2394
2395                 /* Check if the next not void item is IPv4 or IPv6. */
2396                 item = next_no_void_pattern(pattern, item);
2397                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2398                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2399                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2400                         rte_flow_error_set(error, EINVAL,
2401                                 RTE_FLOW_ERROR_TYPE_ITEM,
2402                                 item, "Not supported by fdir filter");
2403                         return -rte_errno;
2404                 }
2405         }
2406
2407         /* Skip IP. */
2408         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2409             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2410                 /* Only used to describe the protocol stack. */
2411                 if (item->spec || item->mask) {
2412                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2413                         rte_flow_error_set(error, EINVAL,
2414                                 RTE_FLOW_ERROR_TYPE_ITEM,
2415                                 item, "Not supported by fdir filter");
2416                         return -rte_errno;
2417                 }
2418                 /*Not supported last point for range*/
2419                 if (item->last) {
2420                         rte_flow_error_set(error, EINVAL,
2421                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2422                                 item, "Not supported last point for range");
2423                         return -rte_errno;
2424                 }
2425
2426                 /* Check if the next not void item is UDP or NVGRE. */
2427                 item = next_no_void_pattern(pattern, item);
2428                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2429                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2430                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2431                         rte_flow_error_set(error, EINVAL,
2432                                 RTE_FLOW_ERROR_TYPE_ITEM,
2433                                 item, "Not supported by fdir filter");
2434                         return -rte_errno;
2435                 }
2436         }
2437
2438         /* Skip UDP. */
2439         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2440                 /* Only used to describe the protocol stack. */
2441                 if (item->spec || item->mask) {
2442                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2443                         rte_flow_error_set(error, EINVAL,
2444                                 RTE_FLOW_ERROR_TYPE_ITEM,
2445                                 item, "Not supported by fdir filter");
2446                         return -rte_errno;
2447                 }
2448                 /*Not supported last point for range*/
2449                 if (item->last) {
2450                         rte_flow_error_set(error, EINVAL,
2451                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2452                                 item, "Not supported last point for range");
2453                         return -rte_errno;
2454                 }
2455
2456                 /* Check if the next not void item is VxLAN. */
2457                 item = next_no_void_pattern(pattern, item);
2458                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2459                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2460                         rte_flow_error_set(error, EINVAL,
2461                                 RTE_FLOW_ERROR_TYPE_ITEM,
2462                                 item, "Not supported by fdir filter");
2463                         return -rte_errno;
2464                 }
2465         }
2466
2467         /* Get the VxLAN info */
2468         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2469                 rule->ixgbe_fdir.formatted.tunnel_type =
2470                                 IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
2471
2472                 /* Only care about VNI, others should be masked. */
2473                 if (!item->mask) {
2474                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2475                         rte_flow_error_set(error, EINVAL,
2476                                 RTE_FLOW_ERROR_TYPE_ITEM,
2477                                 item, "Not supported by fdir filter");
2478                         return -rte_errno;
2479                 }
2480                 /*Not supported last point for range*/
2481                 if (item->last) {
2482                         rte_flow_error_set(error, EINVAL,
2483                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2484                                 item, "Not supported last point for range");
2485                         return -rte_errno;
2486                 }
2487                 rule->b_mask = TRUE;
2488
2489                 /* Tunnel type is always meaningful. */
2490                 rule->mask.tunnel_type_mask = 1;
2491
2492                 vxlan_mask = item->mask;
2493                 if (vxlan_mask->flags) {
2494                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2495                         rte_flow_error_set(error, EINVAL,
2496                                 RTE_FLOW_ERROR_TYPE_ITEM,
2497                                 item, "Not supported by fdir filter");
2498                         return -rte_errno;
2499                 }
2500                 /* VNI must be totally masked or not. */
2501                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2502                         vxlan_mask->vni[2]) &&
2503                         ((vxlan_mask->vni[0] != 0xFF) ||
2504                         (vxlan_mask->vni[1] != 0xFF) ||
2505                                 (vxlan_mask->vni[2] != 0xFF))) {
2506                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2507                         rte_flow_error_set(error, EINVAL,
2508                                 RTE_FLOW_ERROR_TYPE_ITEM,
2509                                 item, "Not supported by fdir filter");
2510                         return -rte_errno;
2511                 }
2512
2513                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2514                         RTE_DIM(vxlan_mask->vni));
2515
2516                 if (item->spec) {
2517                         rule->b_spec = TRUE;
2518                         vxlan_spec = item->spec;
2519                         rte_memcpy(((uint8_t *)
2520                                 &rule->ixgbe_fdir.formatted.tni_vni),
2521                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2522                 }
2523         }
2524
2525         /* Get the NVGRE info */
2526         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2527                 rule->ixgbe_fdir.formatted.tunnel_type =
2528                                 IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
2529
2530                 /**
2531                  * Only care about flags0, flags1, protocol and TNI,
2532                  * others should be masked.
2533                  */
2534                 if (!item->mask) {
2535                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2536                         rte_flow_error_set(error, EINVAL,
2537                                 RTE_FLOW_ERROR_TYPE_ITEM,
2538                                 item, "Not supported by fdir filter");
2539                         return -rte_errno;
2540                 }
2541                 /*Not supported last point for range*/
2542                 if (item->last) {
2543                         rte_flow_error_set(error, EINVAL,
2544                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2545                                 item, "Not supported last point for range");
2546                         return -rte_errno;
2547                 }
2548                 rule->b_mask = TRUE;
2549
2550                 /* Tunnel type is always meaningful. */
2551                 rule->mask.tunnel_type_mask = 1;
2552
2553                 nvgre_mask = item->mask;
2554                 if (nvgre_mask->flow_id) {
2555                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2556                         rte_flow_error_set(error, EINVAL,
2557                                 RTE_FLOW_ERROR_TYPE_ITEM,
2558                                 item, "Not supported by fdir filter");
2559                         return -rte_errno;
2560                 }
2561                 if (nvgre_mask->protocol &&
2562                     nvgre_mask->protocol != 0xFFFF) {
2563                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2564                         rte_flow_error_set(error, EINVAL,
2565                                 RTE_FLOW_ERROR_TYPE_ITEM,
2566                                 item, "Not supported by fdir filter");
2567                         return -rte_errno;
2568                 }
2569                 if (nvgre_mask->c_k_s_rsvd0_ver &&
2570                     nvgre_mask->c_k_s_rsvd0_ver !=
2571                         rte_cpu_to_be_16(0xFFFF)) {
2572                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2573                         rte_flow_error_set(error, EINVAL,
2574                                 RTE_FLOW_ERROR_TYPE_ITEM,
2575                                 item, "Not supported by fdir filter");
2576                         return -rte_errno;
2577                 }
2578                 /* TNI must be totally masked or not. */
2579                 if (nvgre_mask->tni[0] &&
2580                     ((nvgre_mask->tni[0] != 0xFF) ||
2581                     (nvgre_mask->tni[1] != 0xFF) ||
2582                     (nvgre_mask->tni[2] != 0xFF))) {
2583                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2584                         rte_flow_error_set(error, EINVAL,
2585                                 RTE_FLOW_ERROR_TYPE_ITEM,
2586                                 item, "Not supported by fdir filter");
2587                         return -rte_errno;
2588                 }
2589                 /* tni is a 24-bits bit field */
2590                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2591                         RTE_DIM(nvgre_mask->tni));
2592                 rule->mask.tunnel_id_mask <<= 8;
2593
2594                 if (item->spec) {
2595                         rule->b_spec = TRUE;
2596                         nvgre_spec = item->spec;
2597                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2598                             rte_cpu_to_be_16(0x2000) &&
2599                                 nvgre_mask->c_k_s_rsvd0_ver) {
2600                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2601                                 rte_flow_error_set(error, EINVAL,
2602                                         RTE_FLOW_ERROR_TYPE_ITEM,
2603                                         item, "Not supported by fdir filter");
2604                                 return -rte_errno;
2605                         }
2606                         if (nvgre_mask->protocol &&
2607                             nvgre_spec->protocol !=
2608                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2609                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2610                                 rte_flow_error_set(error, EINVAL,
2611                                         RTE_FLOW_ERROR_TYPE_ITEM,
2612                                         item, "Not supported by fdir filter");
2613                                 return -rte_errno;
2614                         }
2615                         /* tni is a 24-bits bit field */
2616                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2617                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2618                 }
2619         }
2620
2621         /* check if the next not void item is MAC */
2622         item = next_no_void_pattern(pattern, item);
2623         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2624                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2625                 rte_flow_error_set(error, EINVAL,
2626                         RTE_FLOW_ERROR_TYPE_ITEM,
2627                         item, "Not supported by fdir filter");
2628                 return -rte_errno;
2629         }
2630
2631         /**
2632          * Only support vlan and dst MAC address,
2633          * others should be masked.
2634          */
2635
2636         if (!item->mask) {
2637                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2638                 rte_flow_error_set(error, EINVAL,
2639                         RTE_FLOW_ERROR_TYPE_ITEM,
2640                         item, "Not supported by fdir filter");
2641                 return -rte_errno;
2642         }
2643         /*Not supported last point for range*/
2644         if (item->last) {
2645                 rte_flow_error_set(error, EINVAL,
2646                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2647                         item, "Not supported last point for range");
2648                 return -rte_errno;
2649         }
2650         rule->b_mask = TRUE;
2651         eth_mask = item->mask;
2652
2653         /* Ether type should be masked. */
2654         if (eth_mask->type) {
2655                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2656                 rte_flow_error_set(error, EINVAL,
2657                         RTE_FLOW_ERROR_TYPE_ITEM,
2658                         item, "Not supported by fdir filter");
2659                 return -rte_errno;
2660         }
2661
2662         /* src MAC address should be masked. */
2663         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2664                 if (eth_mask->src.addr_bytes[j]) {
2665                         memset(rule, 0,
2666                                sizeof(struct ixgbe_fdir_rule));
2667                         rte_flow_error_set(error, EINVAL,
2668                                 RTE_FLOW_ERROR_TYPE_ITEM,
2669                                 item, "Not supported by fdir filter");
2670                         return -rte_errno;
2671                 }
2672         }
2673         rule->mask.mac_addr_byte_mask = 0;
2674         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2675                 /* It's a per byte mask. */
2676                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2677                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2678                 } else if (eth_mask->dst.addr_bytes[j]) {
2679                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2680                         rte_flow_error_set(error, EINVAL,
2681                                 RTE_FLOW_ERROR_TYPE_ITEM,
2682                                 item, "Not supported by fdir filter");
2683                         return -rte_errno;
2684                 }
2685         }
2686
2687         /* When no vlan, considered as full mask. */
2688         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2689
2690         if (item->spec) {
2691                 rule->b_spec = TRUE;
2692                 eth_spec = item->spec;
2693
2694                 /* Get the dst MAC. */
2695                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2696                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2697                                 eth_spec->dst.addr_bytes[j];
2698                 }
2699         }
2700
2701         /**
2702          * Check if the next not void item is vlan or ipv4.
2703          * IPv6 is not supported.
2704          */
2705         item = next_no_void_pattern(pattern, item);
2706         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2707                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2708                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2709                 rte_flow_error_set(error, EINVAL,
2710                         RTE_FLOW_ERROR_TYPE_ITEM,
2711                         item, "Not supported by fdir filter");
2712                 return -rte_errno;
2713         }
2714         /*Not supported last point for range*/
2715         if (item->last) {
2716                 rte_flow_error_set(error, EINVAL,
2717                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2718                         item, "Not supported last point for range");
2719                 return -rte_errno;
2720         }
2721
2722         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2723                 if (!(item->spec && item->mask)) {
2724                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2725                         rte_flow_error_set(error, EINVAL,
2726                                 RTE_FLOW_ERROR_TYPE_ITEM,
2727                                 item, "Not supported by fdir filter");
2728                         return -rte_errno;
2729                 }
2730
2731                 vlan_spec = item->spec;
2732                 vlan_mask = item->mask;
2733
2734                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2735
2736                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2737                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2738                 /* More than one tags are not supported. */
2739
2740                 /* check if the next not void item is END */
2741                 item = next_no_void_pattern(pattern, item);
2742
2743                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2744                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2745                         rte_flow_error_set(error, EINVAL,
2746                                 RTE_FLOW_ERROR_TYPE_ITEM,
2747                                 item, "Not supported by fdir filter");
2748                         return -rte_errno;
2749                 }
2750         }
2751
2752         /**
2753          * If the tags is 0, it means don't care about the VLAN.
2754          * Do nothing.
2755          */
2756
2757         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2758 }
2759
2760 static int
2761 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2762                         const struct rte_flow_attr *attr,
2763                         const struct rte_flow_item pattern[],
2764                         const struct rte_flow_action actions[],
2765                         struct ixgbe_fdir_rule *rule,
2766                         struct rte_flow_error *error)
2767 {
2768         int ret;
2769         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2770         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2771
2772         if (hw->mac.type != ixgbe_mac_82599EB &&
2773                 hw->mac.type != ixgbe_mac_X540 &&
2774                 hw->mac.type != ixgbe_mac_X550 &&
2775                 hw->mac.type != ixgbe_mac_X550EM_x &&
2776                 hw->mac.type != ixgbe_mac_X550EM_a)
2777                 return -ENOTSUP;
2778
2779         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2780                                         actions, rule, error);
2781
2782         if (!ret)
2783                 goto step_next;
2784
2785         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2786                                         actions, rule, error);
2787
2788         if (ret)
2789                 return ret;
2790
2791 step_next:
2792
2793         if (hw->mac.type == ixgbe_mac_82599EB &&
2794                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2795                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2796                 rule->ixgbe_fdir.formatted.dst_port != 0))
2797                 return -ENOTSUP;
2798
2799         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2800             fdir_mode != rule->mode)
2801                 return -ENOTSUP;
2802
2803         if (rule->queue >= dev->data->nb_rx_queues)
2804                 return -ENOTSUP;
2805
2806         return ret;
2807 }
2808
2809 static int
2810 ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
2811                         const struct rte_flow_attr *attr,
2812                         const struct rte_flow_action actions[],
2813                         struct ixgbe_rte_flow_rss_conf *rss_conf,
2814                         struct rte_flow_error *error)
2815 {
2816         const struct rte_flow_action *act;
2817         const struct rte_flow_action_rss *rss;
2818         uint16_t n;
2819
2820         /**
2821          * rss only supports forwarding,
2822          * check if the first not void action is RSS.
2823          */
2824         act = next_no_void_action(actions, NULL);
2825         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2826                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2827                 rte_flow_error_set(error, EINVAL,
2828                         RTE_FLOW_ERROR_TYPE_ACTION,
2829                         act, "Not supported action.");
2830                 return -rte_errno;
2831         }
2832
2833         rss = (const struct rte_flow_action_rss *)act->conf;
2834
2835         if (!rss || !rss->queue_num) {
2836                 rte_flow_error_set(error, EINVAL,
2837                                 RTE_FLOW_ERROR_TYPE_ACTION,
2838                                 act,
2839                            "no valid queues");
2840                 return -rte_errno;
2841         }
2842
2843         for (n = 0; n < rss->queue_num; n++) {
2844                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2845                         rte_flow_error_set(error, EINVAL,
2846                                    RTE_FLOW_ERROR_TYPE_ACTION,
2847                                    act,
2848                                    "queue id > max number of queues");
2849                         return -rte_errno;
2850                 }
2851         }
2852
2853         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2854                 return rte_flow_error_set
2855                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2856                          "non-default RSS hash functions are not supported");
2857         if (rss->level)
2858                 return rte_flow_error_set
2859                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2860                          "a nonzero RSS encapsulation level is not supported");
2861         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2862                 return rte_flow_error_set
2863                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2864                          "RSS hash key must be exactly 40 bytes");
2865         if (rss->queue_num > RTE_DIM(rss_conf->queue))
2866                 return rte_flow_error_set
2867                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2868                          "too many queues for RSS context");
2869         if (ixgbe_rss_conf_init(rss_conf, rss))
2870                 return rte_flow_error_set
2871                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2872                          "RSS context initialization failure");
2873
2874         /* check if the next not void item is END */
2875         act = next_no_void_action(actions, act);
2876         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2877                 memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
2878                 rte_flow_error_set(error, EINVAL,
2879                         RTE_FLOW_ERROR_TYPE_ACTION,
2880                         act, "Not supported action.");
2881                 return -rte_errno;
2882         }
2883
2884         /* parse attr */
2885         /* must be input direction */
2886         if (!attr->ingress) {
2887                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2888                 rte_flow_error_set(error, EINVAL,
2889                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2890                                    attr, "Only support ingress.");
2891                 return -rte_errno;
2892         }
2893
2894         /* not supported */
2895         if (attr->egress) {
2896                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2897                 rte_flow_error_set(error, EINVAL,
2898                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2899                                    attr, "Not support egress.");
2900                 return -rte_errno;
2901         }
2902
2903         /* not supported */
2904         if (attr->transfer) {
2905                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2906                 rte_flow_error_set(error, EINVAL,
2907                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2908                                    attr, "No support for transfer.");
2909                 return -rte_errno;
2910         }
2911
2912         if (attr->priority > 0xFFFF) {
2913                 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2914                 rte_flow_error_set(error, EINVAL,
2915                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2916                                    attr, "Error priority.");
2917                 return -rte_errno;
2918         }
2919
2920         return 0;
2921 }
2922
2923 /* remove the rss filter */
2924 static void
2925 ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
2926 {
2927         struct ixgbe_filter_info *filter_info =
2928                 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2929
2930         if (filter_info->rss_info.conf.queue_num)
2931                 ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2932 }
2933
2934 void
2935 ixgbe_filterlist_init(void)
2936 {
2937         TAILQ_INIT(&filter_ntuple_list);
2938         TAILQ_INIT(&filter_ethertype_list);
2939         TAILQ_INIT(&filter_syn_list);
2940         TAILQ_INIT(&filter_fdir_list);
2941         TAILQ_INIT(&filter_l2_tunnel_list);
2942         TAILQ_INIT(&filter_rss_list);
2943         TAILQ_INIT(&ixgbe_flow_list);
2944 }
2945
2946 void
2947 ixgbe_filterlist_flush(void)
2948 {
2949         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2950         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2951         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2952         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2953         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2954         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2955         struct ixgbe_rss_conf_ele *rss_filter_ptr;
2956
2957         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2958                 TAILQ_REMOVE(&filter_ntuple_list,
2959                                  ntuple_filter_ptr,
2960                                  entries);
2961                 rte_free(ntuple_filter_ptr);
2962         }
2963
2964         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2965                 TAILQ_REMOVE(&filter_ethertype_list,
2966                                  ethertype_filter_ptr,
2967                                  entries);
2968                 rte_free(ethertype_filter_ptr);
2969         }
2970
2971         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2972                 TAILQ_REMOVE(&filter_syn_list,
2973                                  syn_filter_ptr,
2974                                  entries);
2975                 rte_free(syn_filter_ptr);
2976         }
2977
2978         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2979                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2980                                  l2_tn_filter_ptr,
2981                                  entries);
2982                 rte_free(l2_tn_filter_ptr);
2983         }
2984
2985         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2986                 TAILQ_REMOVE(&filter_fdir_list,
2987                                  fdir_rule_ptr,
2988                                  entries);
2989                 rte_free(fdir_rule_ptr);
2990         }
2991
2992         while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2993                 TAILQ_REMOVE(&filter_rss_list,
2994                                  rss_filter_ptr,
2995                                  entries);
2996                 rte_free(rss_filter_ptr);
2997         }
2998
2999         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
3000                 TAILQ_REMOVE(&ixgbe_flow_list,
3001                                  ixgbe_flow_mem_ptr,
3002                                  entries);
3003                 rte_free(ixgbe_flow_mem_ptr->flow);
3004                 rte_free(ixgbe_flow_mem_ptr);
3005         }
3006 }
3007
3008 /**
3009  * Create or destroy a flow rule.
3010  * Theorically one rule can match more than one filters.
3011  * We will let it use the filter which it hitt first.
3012  * So, the sequence matters.
3013  */
3014 static struct rte_flow *
3015 ixgbe_flow_create(struct rte_eth_dev *dev,
3016                   const struct rte_flow_attr *attr,
3017                   const struct rte_flow_item pattern[],
3018                   const struct rte_flow_action actions[],
3019                   struct rte_flow_error *error)
3020 {
3021         int ret;
3022         struct rte_eth_ntuple_filter ntuple_filter;
3023         struct rte_eth_ethertype_filter ethertype_filter;
3024         struct rte_eth_syn_filter syn_filter;
3025         struct ixgbe_fdir_rule fdir_rule;
3026         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3027         struct ixgbe_hw_fdir_info *fdir_info =
3028                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3029         struct ixgbe_rte_flow_rss_conf rss_conf;
3030         struct rte_flow *flow = NULL;
3031         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3032         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3033         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3034         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3035         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3036         struct ixgbe_rss_conf_ele *rss_filter_ptr;
3037         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3038         uint8_t first_mask = FALSE;
3039
3040         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
3041         if (!flow) {
3042                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3043                 return (struct rte_flow *)flow;
3044         }
3045         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
3046                         sizeof(struct ixgbe_flow_mem), 0);
3047         if (!ixgbe_flow_mem_ptr) {
3048                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3049                 rte_free(flow);
3050                 return NULL;
3051         }
3052         ixgbe_flow_mem_ptr->flow = flow;
3053         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
3054                                 ixgbe_flow_mem_ptr, entries);
3055
3056         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3057         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3058                         actions, &ntuple_filter, error);
3059
3060 #ifdef RTE_LIBRTE_SECURITY
3061         /* ESP flow not really a flow*/
3062         if (ntuple_filter.proto == IPPROTO_ESP)
3063                 return flow;
3064 #endif
3065
3066         if (!ret) {
3067                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
3068                 if (!ret) {
3069                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
3070                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
3071                         if (!ntuple_filter_ptr) {
3072                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3073                                 goto out;
3074                         }
3075                         rte_memcpy(&ntuple_filter_ptr->filter_info,
3076                                 &ntuple_filter,
3077                                 sizeof(struct rte_eth_ntuple_filter));
3078                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
3079                                 ntuple_filter_ptr, entries);
3080                         flow->rule = ntuple_filter_ptr;
3081                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
3082                         return flow;
3083                 }
3084                 goto out;
3085         }
3086
3087         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3088         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3089                                 actions, &ethertype_filter, error);
3090         if (!ret) {
3091                 ret = ixgbe_add_del_ethertype_filter(dev,
3092                                 &ethertype_filter, TRUE);
3093                 if (!ret) {
3094                         ethertype_filter_ptr = rte_zmalloc(
3095                                 "ixgbe_ethertype_filter",
3096                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
3097                         if (!ethertype_filter_ptr) {
3098                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3099                                 goto out;
3100                         }
3101                         rte_memcpy(&ethertype_filter_ptr->filter_info,
3102                                 &ethertype_filter,
3103                                 sizeof(struct rte_eth_ethertype_filter));
3104                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
3105                                 ethertype_filter_ptr, entries);
3106                         flow->rule = ethertype_filter_ptr;
3107                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3108                         return flow;
3109                 }
3110                 goto out;
3111         }
3112
3113         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3114         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3115                                 actions, &syn_filter, error);
3116         if (!ret) {
3117                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
3118                 if (!ret) {
3119                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
3120                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
3121                         if (!syn_filter_ptr) {
3122                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3123                                 goto out;
3124                         }
3125                         rte_memcpy(&syn_filter_ptr->filter_info,
3126                                 &syn_filter,
3127                                 sizeof(struct rte_eth_syn_filter));
3128                         TAILQ_INSERT_TAIL(&filter_syn_list,
3129                                 syn_filter_ptr,
3130                                 entries);
3131                         flow->rule = syn_filter_ptr;
3132                         flow->filter_type = RTE_ETH_FILTER_SYN;
3133                         return flow;
3134                 }
3135                 goto out;
3136         }
3137
3138         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3139         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3140                                 actions, &fdir_rule, error);
3141         if (!ret) {
3142                 /* A mask cannot be deleted. */
3143                 if (fdir_rule.b_mask) {
3144                         if (!fdir_info->mask_added) {
3145                                 /* It's the first time the mask is set. */
3146                                 rte_memcpy(&fdir_info->mask,
3147                                         &fdir_rule.mask,
3148                                         sizeof(struct ixgbe_hw_fdir_mask));
3149                                 fdir_info->flex_bytes_offset =
3150                                         fdir_rule.flex_bytes_offset;
3151
3152                                 if (fdir_rule.mask.flex_bytes_mask)
3153                                         ixgbe_fdir_set_flexbytes_offset(dev,
3154                                                 fdir_rule.flex_bytes_offset);
3155
3156                                 ret = ixgbe_fdir_set_input_mask(dev);
3157                                 if (ret)
3158                                         goto out;
3159
3160                                 fdir_info->mask_added = TRUE;
3161                                 first_mask = TRUE;
3162                         } else {
3163                                 /**
3164                                  * Only support one global mask,
3165                                  * all the masks should be the same.
3166                                  */
3167                                 ret = memcmp(&fdir_info->mask,
3168                                         &fdir_rule.mask,
3169                                         sizeof(struct ixgbe_hw_fdir_mask));
3170                                 if (ret)
3171                                         goto out;
3172
3173                                 if (fdir_info->flex_bytes_offset !=
3174                                                 fdir_rule.flex_bytes_offset)
3175                                         goto out;
3176                         }
3177                 }
3178
3179                 if (fdir_rule.b_spec) {
3180                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
3181                                         FALSE, FALSE);
3182                         if (!ret) {
3183                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
3184                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
3185                                 if (!fdir_rule_ptr) {
3186                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
3187                                         goto out;
3188                                 }
3189                                 rte_memcpy(&fdir_rule_ptr->filter_info,
3190                                         &fdir_rule,
3191                                         sizeof(struct ixgbe_fdir_rule));
3192                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
3193                                         fdir_rule_ptr, entries);
3194                                 flow->rule = fdir_rule_ptr;
3195                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
3196
3197                                 return flow;
3198                         }
3199
3200                         if (ret) {
3201                                 /**
3202                                  * clean the mask_added flag if fail to
3203                                  * program
3204                                  **/
3205                                 if (first_mask)
3206                                         fdir_info->mask_added = FALSE;
3207                                 goto out;
3208                         }
3209                 }
3210
3211                 goto out;
3212         }
3213
3214         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3215         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3216                                         actions, &l2_tn_filter, error);
3217         if (!ret) {
3218                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
3219                 if (!ret) {
3220                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
3221                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
3222                         if (!l2_tn_filter_ptr) {
3223                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3224                                 goto out;
3225                         }
3226                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
3227                                 &l2_tn_filter,
3228                                 sizeof(struct rte_eth_l2_tunnel_conf));
3229                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3230                                 l2_tn_filter_ptr, entries);
3231                         flow->rule = l2_tn_filter_ptr;
3232                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3233                         return flow;
3234                 }
3235         }
3236
3237         memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3238         ret = ixgbe_parse_rss_filter(dev, attr,
3239                                         actions, &rss_conf, error);
3240         if (!ret) {
3241                 ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE);
3242                 if (!ret) {
3243                         rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter",
3244                                 sizeof(struct ixgbe_rss_conf_ele), 0);
3245                         if (!rss_filter_ptr) {
3246                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3247                                 goto out;
3248                         }
3249                         ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
3250                                             &rss_conf.conf);
3251                         TAILQ_INSERT_TAIL(&filter_rss_list,
3252                                 rss_filter_ptr, entries);
3253                         flow->rule = rss_filter_ptr;
3254                         flow->filter_type = RTE_ETH_FILTER_HASH;
3255                         return flow;
3256                 }
3257         }
3258
3259 out:
3260         TAILQ_REMOVE(&ixgbe_flow_list,
3261                 ixgbe_flow_mem_ptr, entries);
3262         rte_flow_error_set(error, -ret,
3263                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3264                            "Failed to create flow.");
3265         rte_free(ixgbe_flow_mem_ptr);
3266         rte_free(flow);
3267         return NULL;
3268 }
3269
3270 /**
3271  * Check if the flow rule is supported by ixgbe.
3272  * It only checkes the format. Don't guarantee the rule can be programmed into
3273  * the HW. Because there can be no enough room for the rule.
3274  */
3275 static int
3276 ixgbe_flow_validate(struct rte_eth_dev *dev,
3277                 const struct rte_flow_attr *attr,
3278                 const struct rte_flow_item pattern[],
3279                 const struct rte_flow_action actions[],
3280                 struct rte_flow_error *error)
3281 {
3282         struct rte_eth_ntuple_filter ntuple_filter;
3283         struct rte_eth_ethertype_filter ethertype_filter;
3284         struct rte_eth_syn_filter syn_filter;
3285         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3286         struct ixgbe_fdir_rule fdir_rule;
3287         struct ixgbe_rte_flow_rss_conf rss_conf;
3288         int ret;
3289
3290         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3291         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3292                                 actions, &ntuple_filter, error);
3293         if (!ret)
3294                 return 0;
3295
3296         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3297         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3298                                 actions, &ethertype_filter, error);
3299         if (!ret)
3300                 return 0;
3301
3302         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3303         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3304                                 actions, &syn_filter, error);
3305         if (!ret)
3306                 return 0;
3307
3308         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3309         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3310                                 actions, &fdir_rule, error);
3311         if (!ret)
3312                 return 0;
3313
3314         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3315         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3316                                 actions, &l2_tn_filter, error);
3317         if (!ret)
3318                 return 0;
3319
3320         memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3321         ret = ixgbe_parse_rss_filter(dev, attr,
3322                                         actions, &rss_conf, error);
3323
3324         return ret;
3325 }
3326
3327 /* Destroy a flow rule on ixgbe. */
3328 static int
3329 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3330                 struct rte_flow *flow,
3331                 struct rte_flow_error *error)
3332 {
3333         int ret;
3334         struct rte_flow *pmd_flow = flow;
3335         enum rte_filter_type filter_type = pmd_flow->filter_type;
3336         struct rte_eth_ntuple_filter ntuple_filter;
3337         struct rte_eth_ethertype_filter ethertype_filter;
3338         struct rte_eth_syn_filter syn_filter;
3339         struct ixgbe_fdir_rule fdir_rule;
3340         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3341         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3342         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3343         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3344         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3345         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3346         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3347         struct ixgbe_hw_fdir_info *fdir_info =
3348                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3349         struct ixgbe_rss_conf_ele *rss_filter_ptr;
3350
3351         switch (filter_type) {
3352         case RTE_ETH_FILTER_NTUPLE:
3353                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3354                                         pmd_flow->rule;
3355                 rte_memcpy(&ntuple_filter,
3356                         &ntuple_filter_ptr->filter_info,
3357                         sizeof(struct rte_eth_ntuple_filter));
3358                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3359                 if (!ret) {
3360                         TAILQ_REMOVE(&filter_ntuple_list,
3361                         ntuple_filter_ptr, entries);
3362                         rte_free(ntuple_filter_ptr);
3363                 }
3364                 break;
3365         case RTE_ETH_FILTER_ETHERTYPE:
3366                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3367                                         pmd_flow->rule;
3368                 rte_memcpy(&ethertype_filter,
3369                         &ethertype_filter_ptr->filter_info,
3370                         sizeof(struct rte_eth_ethertype_filter));
3371                 ret = ixgbe_add_del_ethertype_filter(dev,
3372                                 &ethertype_filter, FALSE);
3373                 if (!ret) {
3374                         TAILQ_REMOVE(&filter_ethertype_list,
3375                                 ethertype_filter_ptr, entries);
3376                         rte_free(ethertype_filter_ptr);
3377                 }
3378                 break;
3379         case RTE_ETH_FILTER_SYN:
3380                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3381                                 pmd_flow->rule;
3382                 rte_memcpy(&syn_filter,
3383                         &syn_filter_ptr->filter_info,
3384                         sizeof(struct rte_eth_syn_filter));
3385                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3386                 if (!ret) {
3387                         TAILQ_REMOVE(&filter_syn_list,
3388                                 syn_filter_ptr, entries);
3389                         rte_free(syn_filter_ptr);
3390                 }
3391                 break;
3392         case RTE_ETH_FILTER_FDIR:
3393                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3394                 rte_memcpy(&fdir_rule,
3395                         &fdir_rule_ptr->filter_info,
3396                         sizeof(struct ixgbe_fdir_rule));
3397                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3398                 if (!ret) {
3399                         TAILQ_REMOVE(&filter_fdir_list,
3400                                 fdir_rule_ptr, entries);
3401                         rte_free(fdir_rule_ptr);
3402                         if (TAILQ_EMPTY(&filter_fdir_list))
3403                                 fdir_info->mask_added = false;
3404                 }
3405                 break;
3406         case RTE_ETH_FILTER_L2_TUNNEL:
3407                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3408                                 pmd_flow->rule;
3409                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3410                         sizeof(struct rte_eth_l2_tunnel_conf));
3411                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3412                 if (!ret) {
3413                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3414                                 l2_tn_filter_ptr, entries);
3415                         rte_free(l2_tn_filter_ptr);
3416                 }
3417                 break;
3418         case RTE_ETH_FILTER_HASH:
3419                 rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
3420                                 pmd_flow->rule;
3421                 ret = ixgbe_config_rss_filter(dev,
3422                                         &rss_filter_ptr->filter_info, FALSE);
3423                 if (!ret) {
3424                         TAILQ_REMOVE(&filter_rss_list,
3425                                 rss_filter_ptr, entries);
3426                         rte_free(rss_filter_ptr);
3427                 }
3428                 break;
3429         default:
3430                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3431                             filter_type);
3432                 ret = -EINVAL;
3433                 break;
3434         }
3435
3436         if (ret) {
3437                 rte_flow_error_set(error, EINVAL,
3438                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3439                                 NULL, "Failed to destroy flow");
3440                 return ret;
3441         }
3442
3443         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3444                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3445                         TAILQ_REMOVE(&ixgbe_flow_list,
3446                                 ixgbe_flow_mem_ptr, entries);
3447                         rte_free(ixgbe_flow_mem_ptr);
3448                 }
3449         }
3450         rte_free(flow);
3451
3452         return ret;
3453 }
3454
3455 /*  Destroy all flow rules associated with a port on ixgbe. */
3456 static int
3457 ixgbe_flow_flush(struct rte_eth_dev *dev,
3458                 struct rte_flow_error *error)
3459 {
3460         int ret = 0;
3461
3462         ixgbe_clear_all_ntuple_filter(dev);
3463         ixgbe_clear_all_ethertype_filter(dev);
3464         ixgbe_clear_syn_filter(dev);
3465
3466         ret = ixgbe_clear_all_fdir_filter(dev);
3467         if (ret < 0) {
3468                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3469                                         NULL, "Failed to flush rule");
3470                 return ret;
3471         }
3472
3473         ret = ixgbe_clear_all_l2_tn_filter(dev);
3474         if (ret < 0) {
3475                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3476                                         NULL, "Failed to flush rule");
3477                 return ret;
3478         }
3479
3480         ixgbe_clear_rss_filter(dev);
3481
3482         ixgbe_filterlist_flush();
3483
3484         return 0;
3485 }
3486
3487 const struct rte_flow_ops ixgbe_flow_ops = {
3488         .validate = ixgbe_flow_validate,
3489         .create = ixgbe_flow_create,
3490         .destroy = ixgbe_flow_destroy,
3491         .flush = ixgbe_flow_flush,
3492 };