New upstream version 17.11-rc3
[deb_dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_eal.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_malloc.h>
59 #include <rte_random.h>
60 #include <rte_dev.h>
61 #include <rte_hash_crc.h>
62 #include <rte_flow.h>
63 #include <rte_flow_driver.h>
64
65 #include "ixgbe_logs.h"
66 #include "base/ixgbe_api.h"
67 #include "base/ixgbe_vf.h"
68 #include "base/ixgbe_common.h"
69 #include "ixgbe_ethdev.h"
70 #include "ixgbe_bypass.h"
71 #include "ixgbe_rxtx.h"
72 #include "base/ixgbe_type.h"
73 #include "base/ixgbe_phy.h"
74 #include "rte_pmd_ixgbe.h"
75
76
77 #define IXGBE_MIN_N_TUPLE_PRIO 1
78 #define IXGBE_MAX_N_TUPLE_PRIO 7
79 #define IXGBE_MAX_FLX_SOURCE_OFF 62
80
81 /* ntuple filter list structure */
82 struct ixgbe_ntuple_filter_ele {
83         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
84         struct rte_eth_ntuple_filter filter_info;
85 };
86 /* ethertype filter list structure */
87 struct ixgbe_ethertype_filter_ele {
88         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
89         struct rte_eth_ethertype_filter filter_info;
90 };
91 /* syn filter list structure */
92 struct ixgbe_eth_syn_filter_ele {
93         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
94         struct rte_eth_syn_filter filter_info;
95 };
96 /* fdir filter list structure */
97 struct ixgbe_fdir_rule_ele {
98         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
99         struct ixgbe_fdir_rule filter_info;
100 };
101 /* l2_tunnel filter list structure */
102 struct ixgbe_eth_l2_tunnel_conf_ele {
103         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
104         struct rte_eth_l2_tunnel_conf filter_info;
105 };
106 /* ixgbe_flow memory list structure */
107 struct ixgbe_flow_mem {
108         TAILQ_ENTRY(ixgbe_flow_mem) entries;
109         struct rte_flow *flow;
110 };
111
112 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
113 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
114 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
115 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
116 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
117 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
118
119 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
120 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
121 static struct ixgbe_syn_filter_list filter_syn_list;
122 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
123 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
124 static struct ixgbe_flow_mem_list ixgbe_flow_list;
125
126 /**
127  * Endless loop will never happen with below assumption
128  * 1. there is at least one no-void item(END)
129  * 2. cur is before END.
130  */
131 static inline
132 const struct rte_flow_item *next_no_void_pattern(
133                 const struct rte_flow_item pattern[],
134                 const struct rte_flow_item *cur)
135 {
136         const struct rte_flow_item *next =
137                 cur ? cur + 1 : &pattern[0];
138         while (1) {
139                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
140                         return next;
141                 next++;
142         }
143 }
144
145 static inline
146 const struct rte_flow_action *next_no_void_action(
147                 const struct rte_flow_action actions[],
148                 const struct rte_flow_action *cur)
149 {
150         const struct rte_flow_action *next =
151                 cur ? cur + 1 : &actions[0];
152         while (1) {
153                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
154                         return next;
155                 next++;
156         }
157 }
158
159 /**
160  * Please aware there's an asumption for all the parsers.
161  * rte_flow_item is using big endian, rte_flow_attr and
162  * rte_flow_action are using CPU order.
163  * Because the pattern is used to describe the packets,
164  * normally the packets should use network order.
165  */
166
167 /**
168  * Parse the rule to see if it is a n-tuple rule.
169  * And get the n-tuple filter info BTW.
170  * pattern:
171  * The first not void item can be ETH or IPV4.
172  * The second not void item must be IPV4 if the first one is ETH.
173  * The third not void item must be UDP or TCP.
174  * The next not void item must be END.
175  * action:
176  * The first not void action should be QUEUE.
177  * The next not void action should be END.
178  * pattern example:
179  * ITEM         Spec                    Mask
180  * ETH          NULL                    NULL
181  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
182  *              dst_addr 192.167.3.50   0xFFFFFFFF
183  *              next_proto_id   17      0xFF
184  * UDP/TCP/     src_port        80      0xFFFF
185  * SCTP         dst_port        80      0xFFFF
186  * END
187  * other members in mask and spec should set to 0x00.
188  * item->last should be NULL.
189  *
190  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
191  *
192  */
193 static int
194 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
195                          const struct rte_flow_item pattern[],
196                          const struct rte_flow_action actions[],
197                          struct rte_eth_ntuple_filter *filter,
198                          struct rte_flow_error *error)
199 {
200         const struct rte_flow_item *item;
201         const struct rte_flow_action *act;
202         const struct rte_flow_item_ipv4 *ipv4_spec;
203         const struct rte_flow_item_ipv4 *ipv4_mask;
204         const struct rte_flow_item_tcp *tcp_spec;
205         const struct rte_flow_item_tcp *tcp_mask;
206         const struct rte_flow_item_udp *udp_spec;
207         const struct rte_flow_item_udp *udp_mask;
208         const struct rte_flow_item_sctp *sctp_spec;
209         const struct rte_flow_item_sctp *sctp_mask;
210
211         if (!pattern) {
212                 rte_flow_error_set(error,
213                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
214                         NULL, "NULL pattern.");
215                 return -rte_errno;
216         }
217
218         if (!actions) {
219                 rte_flow_error_set(error, EINVAL,
220                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
221                                    NULL, "NULL action.");
222                 return -rte_errno;
223         }
224         if (!attr) {
225                 rte_flow_error_set(error, EINVAL,
226                                    RTE_FLOW_ERROR_TYPE_ATTR,
227                                    NULL, "NULL attribute.");
228                 return -rte_errno;
229         }
230
231 #ifdef RTE_LIBRTE_SECURITY
232         /**
233          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
234          */
235         act = next_no_void_action(actions, NULL);
236         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
237                 const void *conf = act->conf;
238                 /* check if the next not void item is END */
239                 act = next_no_void_action(actions, act);
240                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
241                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
242                         rte_flow_error_set(error, EINVAL,
243                                 RTE_FLOW_ERROR_TYPE_ACTION,
244                                 act, "Not supported action.");
245                         return -rte_errno;
246                 }
247
248                 /* get the IP pattern*/
249                 item = next_no_void_pattern(pattern, NULL);
250                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
251                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
252                         if (item->last ||
253                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
254                                 rte_flow_error_set(error, EINVAL,
255                                         RTE_FLOW_ERROR_TYPE_ITEM,
256                                         item, "IP pattern missing.");
257                                 return -rte_errno;
258                         }
259                         item = next_no_void_pattern(pattern, item);
260                 }
261
262                 filter->proto = IPPROTO_ESP;
263                 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
264                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
265         }
266 #endif
267
268         /* the first not void item can be MAC or IPv4 */
269         item = next_no_void_pattern(pattern, NULL);
270
271         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
272             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
273                 rte_flow_error_set(error, EINVAL,
274                         RTE_FLOW_ERROR_TYPE_ITEM,
275                         item, "Not supported by ntuple filter");
276                 return -rte_errno;
277         }
278         /* Skip Ethernet */
279         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
280                 /*Not supported last point for range*/
281                 if (item->last) {
282                         rte_flow_error_set(error,
283                           EINVAL,
284                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
285                           item, "Not supported last point for range");
286                         return -rte_errno;
287
288                 }
289                 /* if the first item is MAC, the content should be NULL */
290                 if (item->spec || item->mask) {
291                         rte_flow_error_set(error, EINVAL,
292                                 RTE_FLOW_ERROR_TYPE_ITEM,
293                                 item, "Not supported by ntuple filter");
294                         return -rte_errno;
295                 }
296                 /* check if the next not void item is IPv4 */
297                 item = next_no_void_pattern(pattern, item);
298                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
299                         rte_flow_error_set(error,
300                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
301                           item, "Not supported by ntuple filter");
302                           return -rte_errno;
303                 }
304         }
305
306         /* get the IPv4 info */
307         if (!item->spec || !item->mask) {
308                 rte_flow_error_set(error, EINVAL,
309                         RTE_FLOW_ERROR_TYPE_ITEM,
310                         item, "Invalid ntuple mask");
311                 return -rte_errno;
312         }
313         /*Not supported last point for range*/
314         if (item->last) {
315                 rte_flow_error_set(error, EINVAL,
316                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
317                         item, "Not supported last point for range");
318                 return -rte_errno;
319
320         }
321
322         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
323         /**
324          * Only support src & dst addresses, protocol,
325          * others should be masked.
326          */
327         if (ipv4_mask->hdr.version_ihl ||
328             ipv4_mask->hdr.type_of_service ||
329             ipv4_mask->hdr.total_length ||
330             ipv4_mask->hdr.packet_id ||
331             ipv4_mask->hdr.fragment_offset ||
332             ipv4_mask->hdr.time_to_live ||
333             ipv4_mask->hdr.hdr_checksum) {
334                         rte_flow_error_set(error,
335                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
336                         item, "Not supported by ntuple filter");
337                 return -rte_errno;
338         }
339
340         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
341         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
342         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
343
344         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
345         filter->dst_ip = ipv4_spec->hdr.dst_addr;
346         filter->src_ip = ipv4_spec->hdr.src_addr;
347         filter->proto  = ipv4_spec->hdr.next_proto_id;
348
349         /* check if the next not void item is TCP or UDP */
350         item = next_no_void_pattern(pattern, item);
351         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
352             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
353             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
354             item->type != RTE_FLOW_ITEM_TYPE_END) {
355                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
356                 rte_flow_error_set(error, EINVAL,
357                         RTE_FLOW_ERROR_TYPE_ITEM,
358                         item, "Not supported by ntuple filter");
359                 return -rte_errno;
360         }
361
362         /* get the TCP/UDP info */
363         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
364                 (!item->spec || !item->mask)) {
365                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
366                 rte_flow_error_set(error, EINVAL,
367                         RTE_FLOW_ERROR_TYPE_ITEM,
368                         item, "Invalid ntuple mask");
369                 return -rte_errno;
370         }
371
372         /*Not supported last point for range*/
373         if (item->last) {
374                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
375                 rte_flow_error_set(error, EINVAL,
376                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
377                         item, "Not supported last point for range");
378                 return -rte_errno;
379
380         }
381
382         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
383                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
384
385                 /**
386                  * Only support src & dst ports, tcp flags,
387                  * others should be masked.
388                  */
389                 if (tcp_mask->hdr.sent_seq ||
390                     tcp_mask->hdr.recv_ack ||
391                     tcp_mask->hdr.data_off ||
392                     tcp_mask->hdr.rx_win ||
393                     tcp_mask->hdr.cksum ||
394                     tcp_mask->hdr.tcp_urp) {
395                         memset(filter, 0,
396                                 sizeof(struct rte_eth_ntuple_filter));
397                         rte_flow_error_set(error, EINVAL,
398                                 RTE_FLOW_ERROR_TYPE_ITEM,
399                                 item, "Not supported by ntuple filter");
400                         return -rte_errno;
401                 }
402
403                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
404                 filter->src_port_mask  = tcp_mask->hdr.src_port;
405                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
406                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
407                 } else if (!tcp_mask->hdr.tcp_flags) {
408                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
409                 } else {
410                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
411                         rte_flow_error_set(error, EINVAL,
412                                 RTE_FLOW_ERROR_TYPE_ITEM,
413                                 item, "Not supported by ntuple filter");
414                         return -rte_errno;
415                 }
416
417                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
418                 filter->dst_port  = tcp_spec->hdr.dst_port;
419                 filter->src_port  = tcp_spec->hdr.src_port;
420                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
421         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
422                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
423
424                 /**
425                  * Only support src & dst ports,
426                  * others should be masked.
427                  */
428                 if (udp_mask->hdr.dgram_len ||
429                     udp_mask->hdr.dgram_cksum) {
430                         memset(filter, 0,
431                                 sizeof(struct rte_eth_ntuple_filter));
432                         rte_flow_error_set(error, EINVAL,
433                                 RTE_FLOW_ERROR_TYPE_ITEM,
434                                 item, "Not supported by ntuple filter");
435                         return -rte_errno;
436                 }
437
438                 filter->dst_port_mask = udp_mask->hdr.dst_port;
439                 filter->src_port_mask = udp_mask->hdr.src_port;
440
441                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
442                 filter->dst_port = udp_spec->hdr.dst_port;
443                 filter->src_port = udp_spec->hdr.src_port;
444         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
445                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
446
447                 /**
448                  * Only support src & dst ports,
449                  * others should be masked.
450                  */
451                 if (sctp_mask->hdr.tag ||
452                     sctp_mask->hdr.cksum) {
453                         memset(filter, 0,
454                                 sizeof(struct rte_eth_ntuple_filter));
455                         rte_flow_error_set(error, EINVAL,
456                                 RTE_FLOW_ERROR_TYPE_ITEM,
457                                 item, "Not supported by ntuple filter");
458                         return -rte_errno;
459                 }
460
461                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
462                 filter->src_port_mask = sctp_mask->hdr.src_port;
463
464                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
465                 filter->dst_port = sctp_spec->hdr.dst_port;
466                 filter->src_port = sctp_spec->hdr.src_port;
467         } else {
468                 goto action;
469         }
470
471         /* check if the next not void item is END */
472         item = next_no_void_pattern(pattern, item);
473         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
474                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
475                 rte_flow_error_set(error, EINVAL,
476                         RTE_FLOW_ERROR_TYPE_ITEM,
477                         item, "Not supported by ntuple filter");
478                 return -rte_errno;
479         }
480
481 action:
482
483         /**
484          * n-tuple only supports forwarding,
485          * check if the first not void action is QUEUE.
486          */
487         act = next_no_void_action(actions, NULL);
488         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
489                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490                 rte_flow_error_set(error, EINVAL,
491                         RTE_FLOW_ERROR_TYPE_ACTION,
492                         item, "Not supported action.");
493                 return -rte_errno;
494         }
495         filter->queue =
496                 ((const struct rte_flow_action_queue *)act->conf)->index;
497
498         /* check if the next not void item is END */
499         act = next_no_void_action(actions, act);
500         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
501                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
502                 rte_flow_error_set(error, EINVAL,
503                         RTE_FLOW_ERROR_TYPE_ACTION,
504                         act, "Not supported action.");
505                 return -rte_errno;
506         }
507
508         /* parse attr */
509         /* must be input direction */
510         if (!attr->ingress) {
511                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
512                 rte_flow_error_set(error, EINVAL,
513                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
514                                    attr, "Only support ingress.");
515                 return -rte_errno;
516         }
517
518         /* not supported */
519         if (attr->egress) {
520                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
521                 rte_flow_error_set(error, EINVAL,
522                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
523                                    attr, "Not support egress.");
524                 return -rte_errno;
525         }
526
527         if (attr->priority > 0xFFFF) {
528                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
529                 rte_flow_error_set(error, EINVAL,
530                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
531                                    attr, "Error priority.");
532                 return -rte_errno;
533         }
534         filter->priority = (uint16_t)attr->priority;
535         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
536             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
537             filter->priority = 1;
538
539         return 0;
540 }
541
542 /* a specific function for ixgbe because the flags is specific */
543 static int
544 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
545                           const struct rte_flow_attr *attr,
546                           const struct rte_flow_item pattern[],
547                           const struct rte_flow_action actions[],
548                           struct rte_eth_ntuple_filter *filter,
549                           struct rte_flow_error *error)
550 {
551         int ret;
552         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
553
554         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
555
556         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
557
558         if (ret)
559                 return ret;
560
561 #ifdef RTE_LIBRTE_SECURITY
562         /* ESP flow not really a flow*/
563         if (filter->proto == IPPROTO_ESP)
564                 return 0;
565 #endif
566
567         /* Ixgbe doesn't support tcp flags. */
568         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
569                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
570                 rte_flow_error_set(error, EINVAL,
571                                    RTE_FLOW_ERROR_TYPE_ITEM,
572                                    NULL, "Not supported by ntuple filter");
573                 return -rte_errno;
574         }
575
576         /* Ixgbe doesn't support many priorities. */
577         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
578             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
579                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
580                 rte_flow_error_set(error, EINVAL,
581                         RTE_FLOW_ERROR_TYPE_ITEM,
582                         NULL, "Priority not supported by ntuple filter");
583                 return -rte_errno;
584         }
585
586         if (filter->queue >= dev->data->nb_rx_queues)
587                 return -rte_errno;
588
589         /* fixed value for ixgbe */
590         filter->flags = RTE_5TUPLE_FLAGS;
591         return 0;
592 }
593
594 /**
595  * Parse the rule to see if it is a ethertype rule.
596  * And get the ethertype filter info BTW.
597  * pattern:
598  * The first not void item can be ETH.
599  * The next not void item must be END.
600  * action:
601  * The first not void action should be QUEUE.
602  * The next not void action should be END.
603  * pattern example:
604  * ITEM         Spec                    Mask
605  * ETH          type    0x0807          0xFFFF
606  * END
607  * other members in mask and spec should set to 0x00.
608  * item->last should be NULL.
609  */
610 static int
611 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
612                             const struct rte_flow_item *pattern,
613                             const struct rte_flow_action *actions,
614                             struct rte_eth_ethertype_filter *filter,
615                             struct rte_flow_error *error)
616 {
617         const struct rte_flow_item *item;
618         const struct rte_flow_action *act;
619         const struct rte_flow_item_eth *eth_spec;
620         const struct rte_flow_item_eth *eth_mask;
621         const struct rte_flow_action_queue *act_q;
622
623         if (!pattern) {
624                 rte_flow_error_set(error, EINVAL,
625                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
626                                 NULL, "NULL pattern.");
627                 return -rte_errno;
628         }
629
630         if (!actions) {
631                 rte_flow_error_set(error, EINVAL,
632                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
633                                 NULL, "NULL action.");
634                 return -rte_errno;
635         }
636
637         if (!attr) {
638                 rte_flow_error_set(error, EINVAL,
639                                    RTE_FLOW_ERROR_TYPE_ATTR,
640                                    NULL, "NULL attribute.");
641                 return -rte_errno;
642         }
643
644         item = next_no_void_pattern(pattern, NULL);
645         /* The first non-void item should be MAC. */
646         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
647                 rte_flow_error_set(error, EINVAL,
648                         RTE_FLOW_ERROR_TYPE_ITEM,
649                         item, "Not supported by ethertype filter");
650                 return -rte_errno;
651         }
652
653         /*Not supported last point for range*/
654         if (item->last) {
655                 rte_flow_error_set(error, EINVAL,
656                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
657                         item, "Not supported last point for range");
658                 return -rte_errno;
659         }
660
661         /* Get the MAC info. */
662         if (!item->spec || !item->mask) {
663                 rte_flow_error_set(error, EINVAL,
664                                 RTE_FLOW_ERROR_TYPE_ITEM,
665                                 item, "Not supported by ethertype filter");
666                 return -rte_errno;
667         }
668
669         eth_spec = (const struct rte_flow_item_eth *)item->spec;
670         eth_mask = (const struct rte_flow_item_eth *)item->mask;
671
672         /* Mask bits of source MAC address must be full of 0.
673          * Mask bits of destination MAC address must be full
674          * of 1 or full of 0.
675          */
676         if (!is_zero_ether_addr(&eth_mask->src) ||
677             (!is_zero_ether_addr(&eth_mask->dst) &&
678              !is_broadcast_ether_addr(&eth_mask->dst))) {
679                 rte_flow_error_set(error, EINVAL,
680                                 RTE_FLOW_ERROR_TYPE_ITEM,
681                                 item, "Invalid ether address mask");
682                 return -rte_errno;
683         }
684
685         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
686                 rte_flow_error_set(error, EINVAL,
687                                 RTE_FLOW_ERROR_TYPE_ITEM,
688                                 item, "Invalid ethertype mask");
689                 return -rte_errno;
690         }
691
692         /* If mask bits of destination MAC address
693          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
694          */
695         if (is_broadcast_ether_addr(&eth_mask->dst)) {
696                 filter->mac_addr = eth_spec->dst;
697                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
698         } else {
699                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
700         }
701         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
702
703         /* Check if the next non-void item is END. */
704         item = next_no_void_pattern(pattern, item);
705         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
706                 rte_flow_error_set(error, EINVAL,
707                                 RTE_FLOW_ERROR_TYPE_ITEM,
708                                 item, "Not supported by ethertype filter.");
709                 return -rte_errno;
710         }
711
712         /* Parse action */
713
714         act = next_no_void_action(actions, NULL);
715         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
716             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
717                 rte_flow_error_set(error, EINVAL,
718                                 RTE_FLOW_ERROR_TYPE_ACTION,
719                                 act, "Not supported action.");
720                 return -rte_errno;
721         }
722
723         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
724                 act_q = (const struct rte_flow_action_queue *)act->conf;
725                 filter->queue = act_q->index;
726         } else {
727                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
728         }
729
730         /* Check if the next non-void item is END */
731         act = next_no_void_action(actions, act);
732         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
733                 rte_flow_error_set(error, EINVAL,
734                                 RTE_FLOW_ERROR_TYPE_ACTION,
735                                 act, "Not supported action.");
736                 return -rte_errno;
737         }
738
739         /* Parse attr */
740         /* Must be input direction */
741         if (!attr->ingress) {
742                 rte_flow_error_set(error, EINVAL,
743                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
744                                 attr, "Only support ingress.");
745                 return -rte_errno;
746         }
747
748         /* Not supported */
749         if (attr->egress) {
750                 rte_flow_error_set(error, EINVAL,
751                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
752                                 attr, "Not support egress.");
753                 return -rte_errno;
754         }
755
756         /* Not supported */
757         if (attr->priority) {
758                 rte_flow_error_set(error, EINVAL,
759                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
760                                 attr, "Not support priority.");
761                 return -rte_errno;
762         }
763
764         /* Not supported */
765         if (attr->group) {
766                 rte_flow_error_set(error, EINVAL,
767                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
768                                 attr, "Not support group.");
769                 return -rte_errno;
770         }
771
772         return 0;
773 }
774
775 static int
776 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
777                                  const struct rte_flow_attr *attr,
778                              const struct rte_flow_item pattern[],
779                              const struct rte_flow_action actions[],
780                              struct rte_eth_ethertype_filter *filter,
781                              struct rte_flow_error *error)
782 {
783         int ret;
784         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
785
786         MAC_TYPE_FILTER_SUP(hw->mac.type);
787
788         ret = cons_parse_ethertype_filter(attr, pattern,
789                                         actions, filter, error);
790
791         if (ret)
792                 return ret;
793
794         /* Ixgbe doesn't support MAC address. */
795         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
796                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
797                 rte_flow_error_set(error, EINVAL,
798                         RTE_FLOW_ERROR_TYPE_ITEM,
799                         NULL, "Not supported by ethertype filter");
800                 return -rte_errno;
801         }
802
803         if (filter->queue >= dev->data->nb_rx_queues) {
804                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
805                 rte_flow_error_set(error, EINVAL,
806                         RTE_FLOW_ERROR_TYPE_ITEM,
807                         NULL, "queue index much too big");
808                 return -rte_errno;
809         }
810
811         if (filter->ether_type == ETHER_TYPE_IPv4 ||
812                 filter->ether_type == ETHER_TYPE_IPv6) {
813                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
814                 rte_flow_error_set(error, EINVAL,
815                         RTE_FLOW_ERROR_TYPE_ITEM,
816                         NULL, "IPv4/IPv6 not supported by ethertype filter");
817                 return -rte_errno;
818         }
819
820         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
821                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
822                 rte_flow_error_set(error, EINVAL,
823                         RTE_FLOW_ERROR_TYPE_ITEM,
824                         NULL, "mac compare is unsupported");
825                 return -rte_errno;
826         }
827
828         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
829                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
830                 rte_flow_error_set(error, EINVAL,
831                         RTE_FLOW_ERROR_TYPE_ITEM,
832                         NULL, "drop option is unsupported");
833                 return -rte_errno;
834         }
835
836         return 0;
837 }
838
839 /**
840  * Parse the rule to see if it is a TCP SYN rule.
841  * And get the TCP SYN filter info BTW.
842  * pattern:
843  * The first not void item must be ETH.
844  * The second not void item must be IPV4 or IPV6.
845  * The third not void item must be TCP.
846  * The next not void item must be END.
847  * action:
848  * The first not void action should be QUEUE.
849  * The next not void action should be END.
850  * pattern example:
851  * ITEM         Spec                    Mask
852  * ETH          NULL                    NULL
853  * IPV4/IPV6    NULL                    NULL
854  * TCP          tcp_flags       0x02    0xFF
855  * END
856  * other members in mask and spec should set to 0x00.
857  * item->last should be NULL.
858  */
859 static int
860 cons_parse_syn_filter(const struct rte_flow_attr *attr,
861                                 const struct rte_flow_item pattern[],
862                                 const struct rte_flow_action actions[],
863                                 struct rte_eth_syn_filter *filter,
864                                 struct rte_flow_error *error)
865 {
866         const struct rte_flow_item *item;
867         const struct rte_flow_action *act;
868         const struct rte_flow_item_tcp *tcp_spec;
869         const struct rte_flow_item_tcp *tcp_mask;
870         const struct rte_flow_action_queue *act_q;
871
872         if (!pattern) {
873                 rte_flow_error_set(error, EINVAL,
874                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
875                                 NULL, "NULL pattern.");
876                 return -rte_errno;
877         }
878
879         if (!actions) {
880                 rte_flow_error_set(error, EINVAL,
881                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
882                                 NULL, "NULL action.");
883                 return -rte_errno;
884         }
885
886         if (!attr) {
887                 rte_flow_error_set(error, EINVAL,
888                                    RTE_FLOW_ERROR_TYPE_ATTR,
889                                    NULL, "NULL attribute.");
890                 return -rte_errno;
891         }
892
893
894         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
895         item = next_no_void_pattern(pattern, NULL);
896         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
897             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
898             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
899             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
900                 rte_flow_error_set(error, EINVAL,
901                                 RTE_FLOW_ERROR_TYPE_ITEM,
902                                 item, "Not supported by syn filter");
903                 return -rte_errno;
904         }
905                 /*Not supported last point for range*/
906         if (item->last) {
907                 rte_flow_error_set(error, EINVAL,
908                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
909                         item, "Not supported last point for range");
910                 return -rte_errno;
911         }
912
913         /* Skip Ethernet */
914         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
915                 /* if the item is MAC, the content should be NULL */
916                 if (item->spec || item->mask) {
917                         rte_flow_error_set(error, EINVAL,
918                                 RTE_FLOW_ERROR_TYPE_ITEM,
919                                 item, "Invalid SYN address mask");
920                         return -rte_errno;
921                 }
922
923                 /* check if the next not void item is IPv4 or IPv6 */
924                 item = next_no_void_pattern(pattern, item);
925                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
926                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
927                         rte_flow_error_set(error, EINVAL,
928                                 RTE_FLOW_ERROR_TYPE_ITEM,
929                                 item, "Not supported by syn filter");
930                         return -rte_errno;
931                 }
932         }
933
934         /* Skip IP */
935         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
936             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
937                 /* if the item is IP, the content should be NULL */
938                 if (item->spec || item->mask) {
939                         rte_flow_error_set(error, EINVAL,
940                                 RTE_FLOW_ERROR_TYPE_ITEM,
941                                 item, "Invalid SYN mask");
942                         return -rte_errno;
943                 }
944
945                 /* check if the next not void item is TCP */
946                 item = next_no_void_pattern(pattern, item);
947                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
948                         rte_flow_error_set(error, EINVAL,
949                                 RTE_FLOW_ERROR_TYPE_ITEM,
950                                 item, "Not supported by syn filter");
951                         return -rte_errno;
952                 }
953         }
954
955         /* Get the TCP info. Only support SYN. */
956         if (!item->spec || !item->mask) {
957                 rte_flow_error_set(error, EINVAL,
958                                 RTE_FLOW_ERROR_TYPE_ITEM,
959                                 item, "Invalid SYN mask");
960                 return -rte_errno;
961         }
962         /*Not supported last point for range*/
963         if (item->last) {
964                 rte_flow_error_set(error, EINVAL,
965                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
966                         item, "Not supported last point for range");
967                 return -rte_errno;
968         }
969
970         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
971         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
972         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
973             tcp_mask->hdr.src_port ||
974             tcp_mask->hdr.dst_port ||
975             tcp_mask->hdr.sent_seq ||
976             tcp_mask->hdr.recv_ack ||
977             tcp_mask->hdr.data_off ||
978             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
979             tcp_mask->hdr.rx_win ||
980             tcp_mask->hdr.cksum ||
981             tcp_mask->hdr.tcp_urp) {
982                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
983                 rte_flow_error_set(error, EINVAL,
984                                 RTE_FLOW_ERROR_TYPE_ITEM,
985                                 item, "Not supported by syn filter");
986                 return -rte_errno;
987         }
988
989         /* check if the next not void item is END */
990         item = next_no_void_pattern(pattern, item);
991         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
992                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
993                 rte_flow_error_set(error, EINVAL,
994                                 RTE_FLOW_ERROR_TYPE_ITEM,
995                                 item, "Not supported by syn filter");
996                 return -rte_errno;
997         }
998
999         /* check if the first not void action is QUEUE. */
1000         act = next_no_void_action(actions, NULL);
1001         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1002                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1003                 rte_flow_error_set(error, EINVAL,
1004                                 RTE_FLOW_ERROR_TYPE_ACTION,
1005                                 act, "Not supported action.");
1006                 return -rte_errno;
1007         }
1008
1009         act_q = (const struct rte_flow_action_queue *)act->conf;
1010         filter->queue = act_q->index;
1011         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1012                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1013                 rte_flow_error_set(error, EINVAL,
1014                                 RTE_FLOW_ERROR_TYPE_ACTION,
1015                                 act, "Not supported action.");
1016                 return -rte_errno;
1017         }
1018
1019         /* check if the next not void item is END */
1020         act = next_no_void_action(actions, act);
1021         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1022                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1023                 rte_flow_error_set(error, EINVAL,
1024                                 RTE_FLOW_ERROR_TYPE_ACTION,
1025                                 act, "Not supported action.");
1026                 return -rte_errno;
1027         }
1028
1029         /* parse attr */
1030         /* must be input direction */
1031         if (!attr->ingress) {
1032                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1033                 rte_flow_error_set(error, EINVAL,
1034                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1035                         attr, "Only support ingress.");
1036                 return -rte_errno;
1037         }
1038
1039         /* not supported */
1040         if (attr->egress) {
1041                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1042                 rte_flow_error_set(error, EINVAL,
1043                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1044                         attr, "Not support egress.");
1045                 return -rte_errno;
1046         }
1047
1048         /* Support 2 priorities, the lowest or highest. */
1049         if (!attr->priority) {
1050                 filter->hig_pri = 0;
1051         } else if (attr->priority == (uint32_t)~0U) {
1052                 filter->hig_pri = 1;
1053         } else {
1054                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1055                 rte_flow_error_set(error, EINVAL,
1056                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1057                         attr, "Not support priority.");
1058                 return -rte_errno;
1059         }
1060
1061         return 0;
1062 }
1063
1064 static int
1065 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1066                                  const struct rte_flow_attr *attr,
1067                              const struct rte_flow_item pattern[],
1068                              const struct rte_flow_action actions[],
1069                              struct rte_eth_syn_filter *filter,
1070                              struct rte_flow_error *error)
1071 {
1072         int ret;
1073         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1074
1075         MAC_TYPE_FILTER_SUP(hw->mac.type);
1076
1077         ret = cons_parse_syn_filter(attr, pattern,
1078                                         actions, filter, error);
1079
1080         if (filter->queue >= dev->data->nb_rx_queues)
1081                 return -rte_errno;
1082
1083         if (ret)
1084                 return ret;
1085
1086         return 0;
1087 }
1088
1089 /**
1090  * Parse the rule to see if it is a L2 tunnel rule.
1091  * And get the L2 tunnel filter info BTW.
1092  * Only support E-tag now.
1093  * pattern:
1094  * The first not void item can be E_TAG.
1095  * The next not void item must be END.
1096  * action:
1097  * The first not void action should be VF or PF.
1098  * The next not void action should be END.
1099  * pattern example:
1100  * ITEM         Spec                    Mask
1101  * E_TAG        grp             0x1     0x3
1102                 e_cid_base      0x309   0xFFF
1103  * END
1104  * other members in mask and spec should set to 0x00.
1105  * item->last should be NULL.
1106  */
1107 static int
1108 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1109                         const struct rte_flow_attr *attr,
1110                         const struct rte_flow_item pattern[],
1111                         const struct rte_flow_action actions[],
1112                         struct rte_eth_l2_tunnel_conf *filter,
1113                         struct rte_flow_error *error)
1114 {
1115         const struct rte_flow_item *item;
1116         const struct rte_flow_item_e_tag *e_tag_spec;
1117         const struct rte_flow_item_e_tag *e_tag_mask;
1118         const struct rte_flow_action *act;
1119         const struct rte_flow_action_vf *act_vf;
1120         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1121
1122         if (!pattern) {
1123                 rte_flow_error_set(error, EINVAL,
1124                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1125                         NULL, "NULL pattern.");
1126                 return -rte_errno;
1127         }
1128
1129         if (!actions) {
1130                 rte_flow_error_set(error, EINVAL,
1131                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1132                                    NULL, "NULL action.");
1133                 return -rte_errno;
1134         }
1135
1136         if (!attr) {
1137                 rte_flow_error_set(error, EINVAL,
1138                                    RTE_FLOW_ERROR_TYPE_ATTR,
1139                                    NULL, "NULL attribute.");
1140                 return -rte_errno;
1141         }
1142
1143         /* The first not void item should be e-tag. */
1144         item = next_no_void_pattern(pattern, NULL);
1145         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1146                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147                 rte_flow_error_set(error, EINVAL,
1148                         RTE_FLOW_ERROR_TYPE_ITEM,
1149                         item, "Not supported by L2 tunnel filter");
1150                 return -rte_errno;
1151         }
1152
1153         if (!item->spec || !item->mask) {
1154                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1155                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1156                         item, "Not supported by L2 tunnel filter");
1157                 return -rte_errno;
1158         }
1159
1160         /*Not supported last point for range*/
1161         if (item->last) {
1162                 rte_flow_error_set(error, EINVAL,
1163                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1164                         item, "Not supported last point for range");
1165                 return -rte_errno;
1166         }
1167
1168         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1169         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1170
1171         /* Only care about GRP and E cid base. */
1172         if (e_tag_mask->epcp_edei_in_ecid_b ||
1173             e_tag_mask->in_ecid_e ||
1174             e_tag_mask->ecid_e ||
1175             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1176                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1177                 rte_flow_error_set(error, EINVAL,
1178                         RTE_FLOW_ERROR_TYPE_ITEM,
1179                         item, "Not supported by L2 tunnel filter");
1180                 return -rte_errno;
1181         }
1182
1183         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1184         /**
1185          * grp and e_cid_base are bit fields and only use 14 bits.
1186          * e-tag id is taken as little endian by HW.
1187          */
1188         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1189
1190         /* check if the next not void item is END */
1191         item = next_no_void_pattern(pattern, item);
1192         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1193                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1194                 rte_flow_error_set(error, EINVAL,
1195                         RTE_FLOW_ERROR_TYPE_ITEM,
1196                         item, "Not supported by L2 tunnel filter");
1197                 return -rte_errno;
1198         }
1199
1200         /* parse attr */
1201         /* must be input direction */
1202         if (!attr->ingress) {
1203                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1204                 rte_flow_error_set(error, EINVAL,
1205                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1206                         attr, "Only support ingress.");
1207                 return -rte_errno;
1208         }
1209
1210         /* not supported */
1211         if (attr->egress) {
1212                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1213                 rte_flow_error_set(error, EINVAL,
1214                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1215                         attr, "Not support egress.");
1216                 return -rte_errno;
1217         }
1218
1219         /* not supported */
1220         if (attr->priority) {
1221                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1222                 rte_flow_error_set(error, EINVAL,
1223                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1224                         attr, "Not support priority.");
1225                 return -rte_errno;
1226         }
1227
1228         /* check if the first not void action is VF or PF. */
1229         act = next_no_void_action(actions, NULL);
1230         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1231                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1232                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1233                 rte_flow_error_set(error, EINVAL,
1234                         RTE_FLOW_ERROR_TYPE_ACTION,
1235                         act, "Not supported action.");
1236                 return -rte_errno;
1237         }
1238
1239         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1240                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1241                 filter->pool = act_vf->id;
1242         } else {
1243                 filter->pool = pci_dev->max_vfs;
1244         }
1245
1246         /* check if the next not void item is END */
1247         act = next_no_void_action(actions, act);
1248         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1249                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1250                 rte_flow_error_set(error, EINVAL,
1251                         RTE_FLOW_ERROR_TYPE_ACTION,
1252                         act, "Not supported action.");
1253                 return -rte_errno;
1254         }
1255
1256         return 0;
1257 }
1258
1259 static int
1260 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1261                         const struct rte_flow_attr *attr,
1262                         const struct rte_flow_item pattern[],
1263                         const struct rte_flow_action actions[],
1264                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1265                         struct rte_flow_error *error)
1266 {
1267         int ret = 0;
1268         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1269         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1270         uint16_t vf_num;
1271
1272         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1273                                 actions, l2_tn_filter, error);
1274
1275         if (hw->mac.type != ixgbe_mac_X550 &&
1276                 hw->mac.type != ixgbe_mac_X550EM_x &&
1277                 hw->mac.type != ixgbe_mac_X550EM_a) {
1278                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1279                 rte_flow_error_set(error, EINVAL,
1280                         RTE_FLOW_ERROR_TYPE_ITEM,
1281                         NULL, "Not supported by L2 tunnel filter");
1282                 return -rte_errno;
1283         }
1284
1285         vf_num = pci_dev->max_vfs;
1286
1287         if (l2_tn_filter->pool > vf_num)
1288                 return -rte_errno;
1289
1290         return ret;
1291 }
1292
1293 /* Parse to get the attr and action info of flow director rule. */
1294 static int
1295 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1296                           const struct rte_flow_action actions[],
1297                           struct ixgbe_fdir_rule *rule,
1298                           struct rte_flow_error *error)
1299 {
1300         const struct rte_flow_action *act;
1301         const struct rte_flow_action_queue *act_q;
1302         const struct rte_flow_action_mark *mark;
1303
1304         /* parse attr */
1305         /* must be input direction */
1306         if (!attr->ingress) {
1307                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1308                 rte_flow_error_set(error, EINVAL,
1309                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1310                         attr, "Only support ingress.");
1311                 return -rte_errno;
1312         }
1313
1314         /* not supported */
1315         if (attr->egress) {
1316                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1317                 rte_flow_error_set(error, EINVAL,
1318                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1319                         attr, "Not support egress.");
1320                 return -rte_errno;
1321         }
1322
1323         /* not supported */
1324         if (attr->priority) {
1325                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1326                 rte_flow_error_set(error, EINVAL,
1327                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1328                         attr, "Not support priority.");
1329                 return -rte_errno;
1330         }
1331
1332         /* check if the first not void action is QUEUE or DROP. */
1333         act = next_no_void_action(actions, NULL);
1334         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1335             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1336                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1337                 rte_flow_error_set(error, EINVAL,
1338                         RTE_FLOW_ERROR_TYPE_ACTION,
1339                         act, "Not supported action.");
1340                 return -rte_errno;
1341         }
1342
1343         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1344                 act_q = (const struct rte_flow_action_queue *)act->conf;
1345                 rule->queue = act_q->index;
1346         } else { /* drop */
1347                 /* signature mode does not support drop action. */
1348                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1349                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1350                         rte_flow_error_set(error, EINVAL,
1351                                 RTE_FLOW_ERROR_TYPE_ACTION,
1352                                 act, "Not supported action.");
1353                         return -rte_errno;
1354                 }
1355                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1356         }
1357
1358         /* check if the next not void item is MARK */
1359         act = next_no_void_action(actions, act);
1360         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1361                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1362                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1363                 rte_flow_error_set(error, EINVAL,
1364                         RTE_FLOW_ERROR_TYPE_ACTION,
1365                         act, "Not supported action.");
1366                 return -rte_errno;
1367         }
1368
1369         rule->soft_id = 0;
1370
1371         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1372                 mark = (const struct rte_flow_action_mark *)act->conf;
1373                 rule->soft_id = mark->id;
1374                 act = next_no_void_action(actions, act);
1375         }
1376
1377         /* check if the next not void item is END */
1378         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1379                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1380                 rte_flow_error_set(error, EINVAL,
1381                         RTE_FLOW_ERROR_TYPE_ACTION,
1382                         act, "Not supported action.");
1383                 return -rte_errno;
1384         }
1385
1386         return 0;
1387 }
1388
1389 /* search next no void pattern and skip fuzzy */
1390 static inline
1391 const struct rte_flow_item *next_no_fuzzy_pattern(
1392                 const struct rte_flow_item pattern[],
1393                 const struct rte_flow_item *cur)
1394 {
1395         const struct rte_flow_item *next =
1396                 next_no_void_pattern(pattern, cur);
1397         while (1) {
1398                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1399                         return next;
1400                 next = next_no_void_pattern(pattern, next);
1401         }
1402 }
1403
1404 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1405 {
1406         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1407         const struct rte_flow_item *item;
1408         uint32_t sh, lh, mh;
1409         int i = 0;
1410
1411         while (1) {
1412                 item = pattern + i;
1413                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1414                         break;
1415
1416                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1417                         spec =
1418                         (const struct rte_flow_item_fuzzy *)item->spec;
1419                         last =
1420                         (const struct rte_flow_item_fuzzy *)item->last;
1421                         mask =
1422                         (const struct rte_flow_item_fuzzy *)item->mask;
1423
1424                         if (!spec || !mask)
1425                                 return 0;
1426
1427                         sh = spec->thresh;
1428
1429                         if (!last)
1430                                 lh = sh;
1431                         else
1432                                 lh = last->thresh;
1433
1434                         mh = mask->thresh;
1435                         sh = sh & mh;
1436                         lh = lh & mh;
1437
1438                         if (!sh || sh > lh)
1439                                 return 0;
1440
1441                         return 1;
1442                 }
1443
1444                 i++;
1445         }
1446
1447         return 0;
1448 }
1449
1450 /**
1451  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1452  * And get the flow director filter info BTW.
1453  * UDP/TCP/SCTP PATTERN:
1454  * The first not void item can be ETH or IPV4 or IPV6
1455  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1456  * The next not void item could be UDP or TCP or SCTP (optional)
1457  * The next not void item could be RAW (for flexbyte, optional)
1458  * The next not void item must be END.
1459  * A Fuzzy Match pattern can appear at any place before END.
1460  * Fuzzy Match is optional for IPV4 but is required for IPV6
1461  * MAC VLAN PATTERN:
1462  * The first not void item must be ETH.
1463  * The second not void item must be MAC VLAN.
1464  * The next not void item must be END.
1465  * ACTION:
1466  * The first not void action should be QUEUE or DROP.
1467  * The second not void optional action should be MARK,
1468  * mark_id is a uint32_t number.
1469  * The next not void action should be END.
1470  * UDP/TCP/SCTP pattern example:
1471  * ITEM         Spec                    Mask
1472  * ETH          NULL                    NULL
1473  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1474  *              dst_addr 192.167.3.50   0xFFFFFFFF
1475  * UDP/TCP/SCTP src_port        80      0xFFFF
1476  *              dst_port        80      0xFFFF
1477  * FLEX relative        0       0x1
1478  *              search          0       0x1
1479  *              reserved        0       0
1480  *              offset          12      0xFFFFFFFF
1481  *              limit           0       0xFFFF
1482  *              length          2       0xFFFF
1483  *              pattern[0]      0x86    0xFF
1484  *              pattern[1]      0xDD    0xFF
1485  * END
1486  * MAC VLAN pattern example:
1487  * ITEM         Spec                    Mask
1488  * ETH          dst_addr
1489                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1490                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1491  * MAC VLAN     tci     0x2016          0xEFFF
1492  * END
1493  * Other members in mask and spec should set to 0x00.
1494  * Item->last should be NULL.
1495  */
1496 static int
1497 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1498                                const struct rte_flow_attr *attr,
1499                                const struct rte_flow_item pattern[],
1500                                const struct rte_flow_action actions[],
1501                                struct ixgbe_fdir_rule *rule,
1502                                struct rte_flow_error *error)
1503 {
1504         const struct rte_flow_item *item;
1505         const struct rte_flow_item_eth *eth_spec;
1506         const struct rte_flow_item_eth *eth_mask;
1507         const struct rte_flow_item_ipv4 *ipv4_spec;
1508         const struct rte_flow_item_ipv4 *ipv4_mask;
1509         const struct rte_flow_item_ipv6 *ipv6_spec;
1510         const struct rte_flow_item_ipv6 *ipv6_mask;
1511         const struct rte_flow_item_tcp *tcp_spec;
1512         const struct rte_flow_item_tcp *tcp_mask;
1513         const struct rte_flow_item_udp *udp_spec;
1514         const struct rte_flow_item_udp *udp_mask;
1515         const struct rte_flow_item_sctp *sctp_spec;
1516         const struct rte_flow_item_sctp *sctp_mask;
1517         const struct rte_flow_item_vlan *vlan_spec;
1518         const struct rte_flow_item_vlan *vlan_mask;
1519         const struct rte_flow_item_raw *raw_mask;
1520         const struct rte_flow_item_raw *raw_spec;
1521         uint8_t j;
1522
1523         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1524
1525         if (!pattern) {
1526                 rte_flow_error_set(error, EINVAL,
1527                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1528                         NULL, "NULL pattern.");
1529                 return -rte_errno;
1530         }
1531
1532         if (!actions) {
1533                 rte_flow_error_set(error, EINVAL,
1534                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1535                                    NULL, "NULL action.");
1536                 return -rte_errno;
1537         }
1538
1539         if (!attr) {
1540                 rte_flow_error_set(error, EINVAL,
1541                                    RTE_FLOW_ERROR_TYPE_ATTR,
1542                                    NULL, "NULL attribute.");
1543                 return -rte_errno;
1544         }
1545
1546         /**
1547          * Some fields may not be provided. Set spec to 0 and mask to default
1548          * value. So, we need not do anything for the not provided fields later.
1549          */
1550         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1551         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1552         rule->mask.vlan_tci_mask = 0;
1553         rule->mask.flex_bytes_mask = 0;
1554
1555         /**
1556          * The first not void item should be
1557          * MAC or IPv4 or TCP or UDP or SCTP.
1558          */
1559         item = next_no_fuzzy_pattern(pattern, NULL);
1560         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1561             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1562             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1563             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1564             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1565             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1566                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1567                 rte_flow_error_set(error, EINVAL,
1568                         RTE_FLOW_ERROR_TYPE_ITEM,
1569                         item, "Not supported by fdir filter");
1570                 return -rte_errno;
1571         }
1572
1573         if (signature_match(pattern))
1574                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1575         else
1576                 rule->mode = RTE_FDIR_MODE_PERFECT;
1577
1578         /*Not supported last point for range*/
1579         if (item->last) {
1580                 rte_flow_error_set(error, EINVAL,
1581                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1582                         item, "Not supported last point for range");
1583                 return -rte_errno;
1584         }
1585
1586         /* Get the MAC info. */
1587         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1588                 /**
1589                  * Only support vlan and dst MAC address,
1590                  * others should be masked.
1591                  */
1592                 if (item->spec && !item->mask) {
1593                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1594                         rte_flow_error_set(error, EINVAL,
1595                                 RTE_FLOW_ERROR_TYPE_ITEM,
1596                                 item, "Not supported by fdir filter");
1597                         return -rte_errno;
1598                 }
1599
1600                 if (item->spec) {
1601                         rule->b_spec = TRUE;
1602                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1603
1604                         /* Get the dst MAC. */
1605                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1606                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1607                                         eth_spec->dst.addr_bytes[j];
1608                         }
1609                 }
1610
1611
1612                 if (item->mask) {
1613
1614                         rule->b_mask = TRUE;
1615                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1616
1617                         /* Ether type should be masked. */
1618                         if (eth_mask->type ||
1619                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1620                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1621                                 rte_flow_error_set(error, EINVAL,
1622                                         RTE_FLOW_ERROR_TYPE_ITEM,
1623                                         item, "Not supported by fdir filter");
1624                                 return -rte_errno;
1625                         }
1626
1627                         /* If ethernet has meaning, it means MAC VLAN mode. */
1628                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1629
1630                         /**
1631                          * src MAC address must be masked,
1632                          * and don't support dst MAC address mask.
1633                          */
1634                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1635                                 if (eth_mask->src.addr_bytes[j] ||
1636                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1637                                         memset(rule, 0,
1638                                         sizeof(struct ixgbe_fdir_rule));
1639                                         rte_flow_error_set(error, EINVAL,
1640                                         RTE_FLOW_ERROR_TYPE_ITEM,
1641                                         item, "Not supported by fdir filter");
1642                                         return -rte_errno;
1643                                 }
1644                         }
1645
1646                         /* When no VLAN, considered as full mask. */
1647                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1648                 }
1649                 /*** If both spec and mask are item,
1650                  * it means don't care about ETH.
1651                  * Do nothing.
1652                  */
1653
1654                 /**
1655                  * Check if the next not void item is vlan or ipv4.
1656                  * IPv6 is not supported.
1657                  */
1658                 item = next_no_fuzzy_pattern(pattern, item);
1659                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1660                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1661                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1662                                 rte_flow_error_set(error, EINVAL,
1663                                         RTE_FLOW_ERROR_TYPE_ITEM,
1664                                         item, "Not supported by fdir filter");
1665                                 return -rte_errno;
1666                         }
1667                 } else {
1668                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1669                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1670                                 rte_flow_error_set(error, EINVAL,
1671                                         RTE_FLOW_ERROR_TYPE_ITEM,
1672                                         item, "Not supported by fdir filter");
1673                                 return -rte_errno;
1674                         }
1675                 }
1676         }
1677
1678         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1679                 if (!(item->spec && item->mask)) {
1680                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1681                         rte_flow_error_set(error, EINVAL,
1682                                 RTE_FLOW_ERROR_TYPE_ITEM,
1683                                 item, "Not supported by fdir filter");
1684                         return -rte_errno;
1685                 }
1686
1687                 /*Not supported last point for range*/
1688                 if (item->last) {
1689                         rte_flow_error_set(error, EINVAL,
1690                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1691                                 item, "Not supported last point for range");
1692                         return -rte_errno;
1693                 }
1694
1695                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1696                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1697
1698                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1699
1700                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1701                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1702                 /* More than one tags are not supported. */
1703
1704                 /* Next not void item must be END */
1705                 item = next_no_fuzzy_pattern(pattern, item);
1706                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1707                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1708                         rte_flow_error_set(error, EINVAL,
1709                                 RTE_FLOW_ERROR_TYPE_ITEM,
1710                                 item, "Not supported by fdir filter");
1711                         return -rte_errno;
1712                 }
1713         }
1714
1715         /* Get the IPV4 info. */
1716         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1717                 /**
1718                  * Set the flow type even if there's no content
1719                  * as we must have a flow type.
1720                  */
1721                 rule->ixgbe_fdir.formatted.flow_type =
1722                         IXGBE_ATR_FLOW_TYPE_IPV4;
1723                 /*Not supported last point for range*/
1724                 if (item->last) {
1725                         rte_flow_error_set(error, EINVAL,
1726                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1727                                 item, "Not supported last point for range");
1728                         return -rte_errno;
1729                 }
1730                 /**
1731                  * Only care about src & dst addresses,
1732                  * others should be masked.
1733                  */
1734                 if (!item->mask) {
1735                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1736                         rte_flow_error_set(error, EINVAL,
1737                                 RTE_FLOW_ERROR_TYPE_ITEM,
1738                                 item, "Not supported by fdir filter");
1739                         return -rte_errno;
1740                 }
1741                 rule->b_mask = TRUE;
1742                 ipv4_mask =
1743                         (const struct rte_flow_item_ipv4 *)item->mask;
1744                 if (ipv4_mask->hdr.version_ihl ||
1745                     ipv4_mask->hdr.type_of_service ||
1746                     ipv4_mask->hdr.total_length ||
1747                     ipv4_mask->hdr.packet_id ||
1748                     ipv4_mask->hdr.fragment_offset ||
1749                     ipv4_mask->hdr.time_to_live ||
1750                     ipv4_mask->hdr.next_proto_id ||
1751                     ipv4_mask->hdr.hdr_checksum) {
1752                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1753                         rte_flow_error_set(error, EINVAL,
1754                                 RTE_FLOW_ERROR_TYPE_ITEM,
1755                                 item, "Not supported by fdir filter");
1756                         return -rte_errno;
1757                 }
1758                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1759                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1760
1761                 if (item->spec) {
1762                         rule->b_spec = TRUE;
1763                         ipv4_spec =
1764                                 (const struct rte_flow_item_ipv4 *)item->spec;
1765                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1766                                 ipv4_spec->hdr.dst_addr;
1767                         rule->ixgbe_fdir.formatted.src_ip[0] =
1768                                 ipv4_spec->hdr.src_addr;
1769                 }
1770
1771                 /**
1772                  * Check if the next not void item is
1773                  * TCP or UDP or SCTP or END.
1774                  */
1775                 item = next_no_fuzzy_pattern(pattern, item);
1776                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1777                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1778                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1779                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1780                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1781                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1782                         rte_flow_error_set(error, EINVAL,
1783                                 RTE_FLOW_ERROR_TYPE_ITEM,
1784                                 item, "Not supported by fdir filter");
1785                         return -rte_errno;
1786                 }
1787         }
1788
1789         /* Get the IPV6 info. */
1790         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1791                 /**
1792                  * Set the flow type even if there's no content
1793                  * as we must have a flow type.
1794                  */
1795                 rule->ixgbe_fdir.formatted.flow_type =
1796                         IXGBE_ATR_FLOW_TYPE_IPV6;
1797
1798                 /**
1799                  * 1. must signature match
1800                  * 2. not support last
1801                  * 3. mask must not null
1802                  */
1803                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1804                     item->last ||
1805                     !item->mask) {
1806                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1807                         rte_flow_error_set(error, EINVAL,
1808                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1809                                 item, "Not supported last point for range");
1810                         return -rte_errno;
1811                 }
1812
1813                 rule->b_mask = TRUE;
1814                 ipv6_mask =
1815                         (const struct rte_flow_item_ipv6 *)item->mask;
1816                 if (ipv6_mask->hdr.vtc_flow ||
1817                     ipv6_mask->hdr.payload_len ||
1818                     ipv6_mask->hdr.proto ||
1819                     ipv6_mask->hdr.hop_limits) {
1820                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1821                         rte_flow_error_set(error, EINVAL,
1822                                 RTE_FLOW_ERROR_TYPE_ITEM,
1823                                 item, "Not supported by fdir filter");
1824                         return -rte_errno;
1825                 }
1826
1827                 /* check src addr mask */
1828                 for (j = 0; j < 16; j++) {
1829                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1830                                 rule->mask.src_ipv6_mask |= 1 << j;
1831                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1832                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1833                                 rte_flow_error_set(error, EINVAL,
1834                                         RTE_FLOW_ERROR_TYPE_ITEM,
1835                                         item, "Not supported by fdir filter");
1836                                 return -rte_errno;
1837                         }
1838                 }
1839
1840                 /* check dst addr mask */
1841                 for (j = 0; j < 16; j++) {
1842                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1843                                 rule->mask.dst_ipv6_mask |= 1 << j;
1844                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1845                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1846                                 rte_flow_error_set(error, EINVAL,
1847                                         RTE_FLOW_ERROR_TYPE_ITEM,
1848                                         item, "Not supported by fdir filter");
1849                                 return -rte_errno;
1850                         }
1851                 }
1852
1853                 if (item->spec) {
1854                         rule->b_spec = TRUE;
1855                         ipv6_spec =
1856                                 (const struct rte_flow_item_ipv6 *)item->spec;
1857                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1858                                    ipv6_spec->hdr.src_addr, 16);
1859                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1860                                    ipv6_spec->hdr.dst_addr, 16);
1861                 }
1862
1863                 /**
1864                  * Check if the next not void item is
1865                  * TCP or UDP or SCTP or END.
1866                  */
1867                 item = next_no_fuzzy_pattern(pattern, item);
1868                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1869                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1870                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1871                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1872                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1873                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1874                         rte_flow_error_set(error, EINVAL,
1875                                 RTE_FLOW_ERROR_TYPE_ITEM,
1876                                 item, "Not supported by fdir filter");
1877                         return -rte_errno;
1878                 }
1879         }
1880
1881         /* Get the TCP info. */
1882         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1883                 /**
1884                  * Set the flow type even if there's no content
1885                  * as we must have a flow type.
1886                  */
1887                 rule->ixgbe_fdir.formatted.flow_type |=
1888                         IXGBE_ATR_L4TYPE_TCP;
1889                 /*Not supported last point for range*/
1890                 if (item->last) {
1891                         rte_flow_error_set(error, EINVAL,
1892                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1893                                 item, "Not supported last point for range");
1894                         return -rte_errno;
1895                 }
1896                 /**
1897                  * Only care about src & dst ports,
1898                  * others should be masked.
1899                  */
1900                 if (!item->mask) {
1901                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1902                         rte_flow_error_set(error, EINVAL,
1903                                 RTE_FLOW_ERROR_TYPE_ITEM,
1904                                 item, "Not supported by fdir filter");
1905                         return -rte_errno;
1906                 }
1907                 rule->b_mask = TRUE;
1908                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1909                 if (tcp_mask->hdr.sent_seq ||
1910                     tcp_mask->hdr.recv_ack ||
1911                     tcp_mask->hdr.data_off ||
1912                     tcp_mask->hdr.tcp_flags ||
1913                     tcp_mask->hdr.rx_win ||
1914                     tcp_mask->hdr.cksum ||
1915                     tcp_mask->hdr.tcp_urp) {
1916                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1917                         rte_flow_error_set(error, EINVAL,
1918                                 RTE_FLOW_ERROR_TYPE_ITEM,
1919                                 item, "Not supported by fdir filter");
1920                         return -rte_errno;
1921                 }
1922                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1923                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1924
1925                 if (item->spec) {
1926                         rule->b_spec = TRUE;
1927                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1928                         rule->ixgbe_fdir.formatted.src_port =
1929                                 tcp_spec->hdr.src_port;
1930                         rule->ixgbe_fdir.formatted.dst_port =
1931                                 tcp_spec->hdr.dst_port;
1932                 }
1933
1934                 item = next_no_fuzzy_pattern(pattern, item);
1935                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1936                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1937                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1938                         rte_flow_error_set(error, EINVAL,
1939                                 RTE_FLOW_ERROR_TYPE_ITEM,
1940                                 item, "Not supported by fdir filter");
1941                         return -rte_errno;
1942                 }
1943
1944         }
1945
1946         /* Get the UDP info */
1947         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1948                 /**
1949                  * Set the flow type even if there's no content
1950                  * as we must have a flow type.
1951                  */
1952                 rule->ixgbe_fdir.formatted.flow_type |=
1953                         IXGBE_ATR_L4TYPE_UDP;
1954                 /*Not supported last point for range*/
1955                 if (item->last) {
1956                         rte_flow_error_set(error, EINVAL,
1957                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1958                                 item, "Not supported last point for range");
1959                         return -rte_errno;
1960                 }
1961                 /**
1962                  * Only care about src & dst ports,
1963                  * others should be masked.
1964                  */
1965                 if (!item->mask) {
1966                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1967                         rte_flow_error_set(error, EINVAL,
1968                                 RTE_FLOW_ERROR_TYPE_ITEM,
1969                                 item, "Not supported by fdir filter");
1970                         return -rte_errno;
1971                 }
1972                 rule->b_mask = TRUE;
1973                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1974                 if (udp_mask->hdr.dgram_len ||
1975                     udp_mask->hdr.dgram_cksum) {
1976                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1977                         rte_flow_error_set(error, EINVAL,
1978                                 RTE_FLOW_ERROR_TYPE_ITEM,
1979                                 item, "Not supported by fdir filter");
1980                         return -rte_errno;
1981                 }
1982                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1983                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1984
1985                 if (item->spec) {
1986                         rule->b_spec = TRUE;
1987                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1988                         rule->ixgbe_fdir.formatted.src_port =
1989                                 udp_spec->hdr.src_port;
1990                         rule->ixgbe_fdir.formatted.dst_port =
1991                                 udp_spec->hdr.dst_port;
1992                 }
1993
1994                 item = next_no_fuzzy_pattern(pattern, item);
1995                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1996                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1997                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1998                         rte_flow_error_set(error, EINVAL,
1999                                 RTE_FLOW_ERROR_TYPE_ITEM,
2000                                 item, "Not supported by fdir filter");
2001                         return -rte_errno;
2002                 }
2003
2004         }
2005
2006         /* Get the SCTP info */
2007         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2008                 /**
2009                  * Set the flow type even if there's no content
2010                  * as we must have a flow type.
2011                  */
2012                 rule->ixgbe_fdir.formatted.flow_type |=
2013                         IXGBE_ATR_L4TYPE_SCTP;
2014                 /*Not supported last point for range*/
2015                 if (item->last) {
2016                         rte_flow_error_set(error, EINVAL,
2017                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2018                                 item, "Not supported last point for range");
2019                         return -rte_errno;
2020                 }
2021
2022                 /* only x550 family only support sctp port */
2023                 if (hw->mac.type == ixgbe_mac_X550 ||
2024                     hw->mac.type == ixgbe_mac_X550EM_x ||
2025                     hw->mac.type == ixgbe_mac_X550EM_a) {
2026                         /**
2027                          * Only care about src & dst ports,
2028                          * others should be masked.
2029                          */
2030                         if (!item->mask) {
2031                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2032                                 rte_flow_error_set(error, EINVAL,
2033                                         RTE_FLOW_ERROR_TYPE_ITEM,
2034                                         item, "Not supported by fdir filter");
2035                                 return -rte_errno;
2036                         }
2037                         rule->b_mask = TRUE;
2038                         sctp_mask =
2039                                 (const struct rte_flow_item_sctp *)item->mask;
2040                         if (sctp_mask->hdr.tag ||
2041                                 sctp_mask->hdr.cksum) {
2042                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2043                                 rte_flow_error_set(error, EINVAL,
2044                                         RTE_FLOW_ERROR_TYPE_ITEM,
2045                                         item, "Not supported by fdir filter");
2046                                 return -rte_errno;
2047                         }
2048                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2049                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2050
2051                         if (item->spec) {
2052                                 rule->b_spec = TRUE;
2053                                 sctp_spec =
2054                                 (const struct rte_flow_item_sctp *)item->spec;
2055                                 rule->ixgbe_fdir.formatted.src_port =
2056                                         sctp_spec->hdr.src_port;
2057                                 rule->ixgbe_fdir.formatted.dst_port =
2058                                         sctp_spec->hdr.dst_port;
2059                         }
2060                 /* others even sctp port is not supported */
2061                 } else {
2062                         sctp_mask =
2063                                 (const struct rte_flow_item_sctp *)item->mask;
2064                         if (sctp_mask &&
2065                                 (sctp_mask->hdr.src_port ||
2066                                  sctp_mask->hdr.dst_port ||
2067                                  sctp_mask->hdr.tag ||
2068                                  sctp_mask->hdr.cksum)) {
2069                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2070                                 rte_flow_error_set(error, EINVAL,
2071                                         RTE_FLOW_ERROR_TYPE_ITEM,
2072                                         item, "Not supported by fdir filter");
2073                                 return -rte_errno;
2074                         }
2075                 }
2076
2077                 item = next_no_fuzzy_pattern(pattern, item);
2078                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2079                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2080                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2081                         rte_flow_error_set(error, EINVAL,
2082                                 RTE_FLOW_ERROR_TYPE_ITEM,
2083                                 item, "Not supported by fdir filter");
2084                         return -rte_errno;
2085                 }
2086         }
2087
2088         /* Get the flex byte info */
2089         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2090                 /* Not supported last point for range*/
2091                 if (item->last) {
2092                         rte_flow_error_set(error, EINVAL,
2093                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2094                                 item, "Not supported last point for range");
2095                         return -rte_errno;
2096                 }
2097                 /* mask should not be null */
2098                 if (!item->mask || !item->spec) {
2099                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2100                         rte_flow_error_set(error, EINVAL,
2101                                 RTE_FLOW_ERROR_TYPE_ITEM,
2102                                 item, "Not supported by fdir filter");
2103                         return -rte_errno;
2104                 }
2105
2106                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2107
2108                 /* check mask */
2109                 if (raw_mask->relative != 0x1 ||
2110                     raw_mask->search != 0x1 ||
2111                     raw_mask->reserved != 0x0 ||
2112                     (uint32_t)raw_mask->offset != 0xffffffff ||
2113                     raw_mask->limit != 0xffff ||
2114                     raw_mask->length != 0xffff) {
2115                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2116                         rte_flow_error_set(error, EINVAL,
2117                                 RTE_FLOW_ERROR_TYPE_ITEM,
2118                                 item, "Not supported by fdir filter");
2119                         return -rte_errno;
2120                 }
2121
2122                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2123
2124                 /* check spec */
2125                 if (raw_spec->relative != 0 ||
2126                     raw_spec->search != 0 ||
2127                     raw_spec->reserved != 0 ||
2128                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2129                     raw_spec->offset % 2 ||
2130                     raw_spec->limit != 0 ||
2131                     raw_spec->length != 2 ||
2132                     /* pattern can't be 0xffff */
2133                     (raw_spec->pattern[0] == 0xff &&
2134                      raw_spec->pattern[1] == 0xff)) {
2135                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2136                         rte_flow_error_set(error, EINVAL,
2137                                 RTE_FLOW_ERROR_TYPE_ITEM,
2138                                 item, "Not supported by fdir filter");
2139                         return -rte_errno;
2140                 }
2141
2142                 /* check pattern mask */
2143                 if (raw_mask->pattern[0] != 0xff ||
2144                     raw_mask->pattern[1] != 0xff) {
2145                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2146                         rte_flow_error_set(error, EINVAL,
2147                                 RTE_FLOW_ERROR_TYPE_ITEM,
2148                                 item, "Not supported by fdir filter");
2149                         return -rte_errno;
2150                 }
2151
2152                 rule->mask.flex_bytes_mask = 0xffff;
2153                 rule->ixgbe_fdir.formatted.flex_bytes =
2154                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2155                         raw_spec->pattern[0];
2156                 rule->flex_bytes_offset = raw_spec->offset;
2157         }
2158
2159         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2160                 /* check if the next not void item is END */
2161                 item = next_no_fuzzy_pattern(pattern, item);
2162                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2163                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2164                         rte_flow_error_set(error, EINVAL,
2165                                 RTE_FLOW_ERROR_TYPE_ITEM,
2166                                 item, "Not supported by fdir filter");
2167                         return -rte_errno;
2168                 }
2169         }
2170
2171         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2172 }
2173
2174 #define NVGRE_PROTOCOL 0x6558
2175
2176 /**
2177  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2178  * And get the flow director filter info BTW.
2179  * VxLAN PATTERN:
2180  * The first not void item must be ETH.
2181  * The second not void item must be IPV4/ IPV6.
2182  * The third not void item must be NVGRE.
2183  * The next not void item must be END.
2184  * NVGRE PATTERN:
2185  * The first not void item must be ETH.
2186  * The second not void item must be IPV4/ IPV6.
2187  * The third not void item must be NVGRE.
2188  * The next not void item must be END.
2189  * ACTION:
2190  * The first not void action should be QUEUE or DROP.
2191  * The second not void optional action should be MARK,
2192  * mark_id is a uint32_t number.
2193  * The next not void action should be END.
2194  * VxLAN pattern example:
2195  * ITEM         Spec                    Mask
2196  * ETH          NULL                    NULL
2197  * IPV4/IPV6    NULL                    NULL
2198  * UDP          NULL                    NULL
2199  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2200  * MAC VLAN     tci     0x2016          0xEFFF
2201  * END
2202  * NEGRV pattern example:
2203  * ITEM         Spec                    Mask
2204  * ETH          NULL                    NULL
2205  * IPV4/IPV6    NULL                    NULL
2206  * NVGRE        protocol        0x6558  0xFFFF
2207  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2208  * MAC VLAN     tci     0x2016          0xEFFF
2209  * END
2210  * other members in mask and spec should set to 0x00.
2211  * item->last should be NULL.
2212  */
2213 static int
2214 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2215                                const struct rte_flow_item pattern[],
2216                                const struct rte_flow_action actions[],
2217                                struct ixgbe_fdir_rule *rule,
2218                                struct rte_flow_error *error)
2219 {
2220         const struct rte_flow_item *item;
2221         const struct rte_flow_item_vxlan *vxlan_spec;
2222         const struct rte_flow_item_vxlan *vxlan_mask;
2223         const struct rte_flow_item_nvgre *nvgre_spec;
2224         const struct rte_flow_item_nvgre *nvgre_mask;
2225         const struct rte_flow_item_eth *eth_spec;
2226         const struct rte_flow_item_eth *eth_mask;
2227         const struct rte_flow_item_vlan *vlan_spec;
2228         const struct rte_flow_item_vlan *vlan_mask;
2229         uint32_t j;
2230
2231         if (!pattern) {
2232                 rte_flow_error_set(error, EINVAL,
2233                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2234                                    NULL, "NULL pattern.");
2235                 return -rte_errno;
2236         }
2237
2238         if (!actions) {
2239                 rte_flow_error_set(error, EINVAL,
2240                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2241                                    NULL, "NULL action.");
2242                 return -rte_errno;
2243         }
2244
2245         if (!attr) {
2246                 rte_flow_error_set(error, EINVAL,
2247                                    RTE_FLOW_ERROR_TYPE_ATTR,
2248                                    NULL, "NULL attribute.");
2249                 return -rte_errno;
2250         }
2251
2252         /**
2253          * Some fields may not be provided. Set spec to 0 and mask to default
2254          * value. So, we need not do anything for the not provided fields later.
2255          */
2256         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2257         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2258         rule->mask.vlan_tci_mask = 0;
2259
2260         /**
2261          * The first not void item should be
2262          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2263          */
2264         item = next_no_void_pattern(pattern, NULL);
2265         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2266             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2267             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2268             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2269             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2270             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2271                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2272                 rte_flow_error_set(error, EINVAL,
2273                         RTE_FLOW_ERROR_TYPE_ITEM,
2274                         item, "Not supported by fdir filter");
2275                 return -rte_errno;
2276         }
2277
2278         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2279
2280         /* Skip MAC. */
2281         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2282                 /* Only used to describe the protocol stack. */
2283                 if (item->spec || item->mask) {
2284                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2285                         rte_flow_error_set(error, EINVAL,
2286                                 RTE_FLOW_ERROR_TYPE_ITEM,
2287                                 item, "Not supported by fdir filter");
2288                         return -rte_errno;
2289                 }
2290                 /* Not supported last point for range*/
2291                 if (item->last) {
2292                         rte_flow_error_set(error, EINVAL,
2293                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2294                                 item, "Not supported last point for range");
2295                         return -rte_errno;
2296                 }
2297
2298                 /* Check if the next not void item is IPv4 or IPv6. */
2299                 item = next_no_void_pattern(pattern, item);
2300                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2301                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2302                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2303                         rte_flow_error_set(error, EINVAL,
2304                                 RTE_FLOW_ERROR_TYPE_ITEM,
2305                                 item, "Not supported by fdir filter");
2306                         return -rte_errno;
2307                 }
2308         }
2309
2310         /* Skip IP. */
2311         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2312             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2313                 /* Only used to describe the protocol stack. */
2314                 if (item->spec || item->mask) {
2315                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2316                         rte_flow_error_set(error, EINVAL,
2317                                 RTE_FLOW_ERROR_TYPE_ITEM,
2318                                 item, "Not supported by fdir filter");
2319                         return -rte_errno;
2320                 }
2321                 /*Not supported last point for range*/
2322                 if (item->last) {
2323                         rte_flow_error_set(error, EINVAL,
2324                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2325                                 item, "Not supported last point for range");
2326                         return -rte_errno;
2327                 }
2328
2329                 /* Check if the next not void item is UDP or NVGRE. */
2330                 item = next_no_void_pattern(pattern, item);
2331                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2332                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2333                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2334                         rte_flow_error_set(error, EINVAL,
2335                                 RTE_FLOW_ERROR_TYPE_ITEM,
2336                                 item, "Not supported by fdir filter");
2337                         return -rte_errno;
2338                 }
2339         }
2340
2341         /* Skip UDP. */
2342         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2343                 /* Only used to describe the protocol stack. */
2344                 if (item->spec || item->mask) {
2345                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2346                         rte_flow_error_set(error, EINVAL,
2347                                 RTE_FLOW_ERROR_TYPE_ITEM,
2348                                 item, "Not supported by fdir filter");
2349                         return -rte_errno;
2350                 }
2351                 /*Not supported last point for range*/
2352                 if (item->last) {
2353                         rte_flow_error_set(error, EINVAL,
2354                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2355                                 item, "Not supported last point for range");
2356                         return -rte_errno;
2357                 }
2358
2359                 /* Check if the next not void item is VxLAN. */
2360                 item = next_no_void_pattern(pattern, item);
2361                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2362                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2363                         rte_flow_error_set(error, EINVAL,
2364                                 RTE_FLOW_ERROR_TYPE_ITEM,
2365                                 item, "Not supported by fdir filter");
2366                         return -rte_errno;
2367                 }
2368         }
2369
2370         /* Get the VxLAN info */
2371         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2372                 rule->ixgbe_fdir.formatted.tunnel_type =
2373                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2374
2375                 /* Only care about VNI, others should be masked. */
2376                 if (!item->mask) {
2377                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2378                         rte_flow_error_set(error, EINVAL,
2379                                 RTE_FLOW_ERROR_TYPE_ITEM,
2380                                 item, "Not supported by fdir filter");
2381                         return -rte_errno;
2382                 }
2383                 /*Not supported last point for range*/
2384                 if (item->last) {
2385                         rte_flow_error_set(error, EINVAL,
2386                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2387                                 item, "Not supported last point for range");
2388                         return -rte_errno;
2389                 }
2390                 rule->b_mask = TRUE;
2391
2392                 /* Tunnel type is always meaningful. */
2393                 rule->mask.tunnel_type_mask = 1;
2394
2395                 vxlan_mask =
2396                         (const struct rte_flow_item_vxlan *)item->mask;
2397                 if (vxlan_mask->flags) {
2398                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2399                         rte_flow_error_set(error, EINVAL,
2400                                 RTE_FLOW_ERROR_TYPE_ITEM,
2401                                 item, "Not supported by fdir filter");
2402                         return -rte_errno;
2403                 }
2404                 /* VNI must be totally masked or not. */
2405                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2406                         vxlan_mask->vni[2]) &&
2407                         ((vxlan_mask->vni[0] != 0xFF) ||
2408                         (vxlan_mask->vni[1] != 0xFF) ||
2409                                 (vxlan_mask->vni[2] != 0xFF))) {
2410                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2411                         rte_flow_error_set(error, EINVAL,
2412                                 RTE_FLOW_ERROR_TYPE_ITEM,
2413                                 item, "Not supported by fdir filter");
2414                         return -rte_errno;
2415                 }
2416
2417                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2418                         RTE_DIM(vxlan_mask->vni));
2419
2420                 if (item->spec) {
2421                         rule->b_spec = TRUE;
2422                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2423                                         item->spec;
2424                         rte_memcpy(((uint8_t *)
2425                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2426                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2427                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2428                                 rule->ixgbe_fdir.formatted.tni_vni);
2429                 }
2430         }
2431
2432         /* Get the NVGRE info */
2433         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2434                 rule->ixgbe_fdir.formatted.tunnel_type =
2435                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2436
2437                 /**
2438                  * Only care about flags0, flags1, protocol and TNI,
2439                  * others should be masked.
2440                  */
2441                 if (!item->mask) {
2442                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2443                         rte_flow_error_set(error, EINVAL,
2444                                 RTE_FLOW_ERROR_TYPE_ITEM,
2445                                 item, "Not supported by fdir filter");
2446                         return -rte_errno;
2447                 }
2448                 /*Not supported last point for range*/
2449                 if (item->last) {
2450                         rte_flow_error_set(error, EINVAL,
2451                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2452                                 item, "Not supported last point for range");
2453                         return -rte_errno;
2454                 }
2455                 rule->b_mask = TRUE;
2456
2457                 /* Tunnel type is always meaningful. */
2458                 rule->mask.tunnel_type_mask = 1;
2459
2460                 nvgre_mask =
2461                         (const struct rte_flow_item_nvgre *)item->mask;
2462                 if (nvgre_mask->flow_id) {
2463                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2464                         rte_flow_error_set(error, EINVAL,
2465                                 RTE_FLOW_ERROR_TYPE_ITEM,
2466                                 item, "Not supported by fdir filter");
2467                         return -rte_errno;
2468                 }
2469                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2470                         rte_cpu_to_be_16(0x3000) ||
2471                     nvgre_mask->protocol != 0xFFFF) {
2472                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2473                         rte_flow_error_set(error, EINVAL,
2474                                 RTE_FLOW_ERROR_TYPE_ITEM,
2475                                 item, "Not supported by fdir filter");
2476                         return -rte_errno;
2477                 }
2478                 /* TNI must be totally masked or not. */
2479                 if (nvgre_mask->tni[0] &&
2480                     ((nvgre_mask->tni[0] != 0xFF) ||
2481                     (nvgre_mask->tni[1] != 0xFF) ||
2482                     (nvgre_mask->tni[2] != 0xFF))) {
2483                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2484                         rte_flow_error_set(error, EINVAL,
2485                                 RTE_FLOW_ERROR_TYPE_ITEM,
2486                                 item, "Not supported by fdir filter");
2487                         return -rte_errno;
2488                 }
2489                 /* tni is a 24-bits bit field */
2490                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2491                         RTE_DIM(nvgre_mask->tni));
2492                 rule->mask.tunnel_id_mask <<= 8;
2493
2494                 if (item->spec) {
2495                         rule->b_spec = TRUE;
2496                         nvgre_spec =
2497                                 (const struct rte_flow_item_nvgre *)item->spec;
2498                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2499                             rte_cpu_to_be_16(0x2000) ||
2500                             nvgre_spec->protocol !=
2501                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2502                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2503                                 rte_flow_error_set(error, EINVAL,
2504                                         RTE_FLOW_ERROR_TYPE_ITEM,
2505                                         item, "Not supported by fdir filter");
2506                                 return -rte_errno;
2507                         }
2508                         /* tni is a 24-bits bit field */
2509                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2510                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2511                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2512                 }
2513         }
2514
2515         /* check if the next not void item is MAC */
2516         item = next_no_void_pattern(pattern, item);
2517         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2518                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2519                 rte_flow_error_set(error, EINVAL,
2520                         RTE_FLOW_ERROR_TYPE_ITEM,
2521                         item, "Not supported by fdir filter");
2522                 return -rte_errno;
2523         }
2524
2525         /**
2526          * Only support vlan and dst MAC address,
2527          * others should be masked.
2528          */
2529
2530         if (!item->mask) {
2531                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2532                 rte_flow_error_set(error, EINVAL,
2533                         RTE_FLOW_ERROR_TYPE_ITEM,
2534                         item, "Not supported by fdir filter");
2535                 return -rte_errno;
2536         }
2537         /*Not supported last point for range*/
2538         if (item->last) {
2539                 rte_flow_error_set(error, EINVAL,
2540                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2541                         item, "Not supported last point for range");
2542                 return -rte_errno;
2543         }
2544         rule->b_mask = TRUE;
2545         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2546
2547         /* Ether type should be masked. */
2548         if (eth_mask->type) {
2549                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2550                 rte_flow_error_set(error, EINVAL,
2551                         RTE_FLOW_ERROR_TYPE_ITEM,
2552                         item, "Not supported by fdir filter");
2553                 return -rte_errno;
2554         }
2555
2556         /* src MAC address should be masked. */
2557         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2558                 if (eth_mask->src.addr_bytes[j]) {
2559                         memset(rule, 0,
2560                                sizeof(struct ixgbe_fdir_rule));
2561                         rte_flow_error_set(error, EINVAL,
2562                                 RTE_FLOW_ERROR_TYPE_ITEM,
2563                                 item, "Not supported by fdir filter");
2564                         return -rte_errno;
2565                 }
2566         }
2567         rule->mask.mac_addr_byte_mask = 0;
2568         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2569                 /* It's a per byte mask. */
2570                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2571                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2572                 } else if (eth_mask->dst.addr_bytes[j]) {
2573                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2574                         rte_flow_error_set(error, EINVAL,
2575                                 RTE_FLOW_ERROR_TYPE_ITEM,
2576                                 item, "Not supported by fdir filter");
2577                         return -rte_errno;
2578                 }
2579         }
2580
2581         /* When no vlan, considered as full mask. */
2582         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2583
2584         if (item->spec) {
2585                 rule->b_spec = TRUE;
2586                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2587
2588                 /* Get the dst MAC. */
2589                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2590                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2591                                 eth_spec->dst.addr_bytes[j];
2592                 }
2593         }
2594
2595         /**
2596          * Check if the next not void item is vlan or ipv4.
2597          * IPv6 is not supported.
2598          */
2599         item = next_no_void_pattern(pattern, item);
2600         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2601                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2602                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2603                 rte_flow_error_set(error, EINVAL,
2604                         RTE_FLOW_ERROR_TYPE_ITEM,
2605                         item, "Not supported by fdir filter");
2606                 return -rte_errno;
2607         }
2608         /*Not supported last point for range*/
2609         if (item->last) {
2610                 rte_flow_error_set(error, EINVAL,
2611                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2612                         item, "Not supported last point for range");
2613                 return -rte_errno;
2614         }
2615
2616         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2617                 if (!(item->spec && item->mask)) {
2618                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2619                         rte_flow_error_set(error, EINVAL,
2620                                 RTE_FLOW_ERROR_TYPE_ITEM,
2621                                 item, "Not supported by fdir filter");
2622                         return -rte_errno;
2623                 }
2624
2625                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2626                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2627
2628                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2629
2630                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2631                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2632                 /* More than one tags are not supported. */
2633
2634                 /* check if the next not void item is END */
2635                 item = next_no_void_pattern(pattern, item);
2636
2637                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2638                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2639                         rte_flow_error_set(error, EINVAL,
2640                                 RTE_FLOW_ERROR_TYPE_ITEM,
2641                                 item, "Not supported by fdir filter");
2642                         return -rte_errno;
2643                 }
2644         }
2645
2646         /**
2647          * If the tags is 0, it means don't care about the VLAN.
2648          * Do nothing.
2649          */
2650
2651         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2652 }
2653
2654 static int
2655 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2656                         const struct rte_flow_attr *attr,
2657                         const struct rte_flow_item pattern[],
2658                         const struct rte_flow_action actions[],
2659                         struct ixgbe_fdir_rule *rule,
2660                         struct rte_flow_error *error)
2661 {
2662         int ret;
2663         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2664         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2665
2666         if (hw->mac.type != ixgbe_mac_82599EB &&
2667                 hw->mac.type != ixgbe_mac_X540 &&
2668                 hw->mac.type != ixgbe_mac_X550 &&
2669                 hw->mac.type != ixgbe_mac_X550EM_x &&
2670                 hw->mac.type != ixgbe_mac_X550EM_a)
2671                 return -ENOTSUP;
2672
2673         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2674                                         actions, rule, error);
2675
2676         if (!ret)
2677                 goto step_next;
2678
2679         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2680                                         actions, rule, error);
2681
2682         if (ret)
2683                 return ret;
2684
2685 step_next:
2686
2687         if (hw->mac.type == ixgbe_mac_82599EB &&
2688                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2689                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2690                 rule->ixgbe_fdir.formatted.dst_port != 0))
2691                 return -ENOTSUP;
2692
2693         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2694             fdir_mode != rule->mode)
2695                 return -ENOTSUP;
2696
2697         if (rule->queue >= dev->data->nb_rx_queues)
2698                 return -ENOTSUP;
2699
2700         return ret;
2701 }
2702
2703 void
2704 ixgbe_filterlist_init(void)
2705 {
2706         TAILQ_INIT(&filter_ntuple_list);
2707         TAILQ_INIT(&filter_ethertype_list);
2708         TAILQ_INIT(&filter_syn_list);
2709         TAILQ_INIT(&filter_fdir_list);
2710         TAILQ_INIT(&filter_l2_tunnel_list);
2711         TAILQ_INIT(&ixgbe_flow_list);
2712 }
2713
2714 void
2715 ixgbe_filterlist_flush(void)
2716 {
2717         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2718         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2719         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2720         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2721         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2722         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2723
2724         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2725                 TAILQ_REMOVE(&filter_ntuple_list,
2726                                  ntuple_filter_ptr,
2727                                  entries);
2728                 rte_free(ntuple_filter_ptr);
2729         }
2730
2731         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2732                 TAILQ_REMOVE(&filter_ethertype_list,
2733                                  ethertype_filter_ptr,
2734                                  entries);
2735                 rte_free(ethertype_filter_ptr);
2736         }
2737
2738         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2739                 TAILQ_REMOVE(&filter_syn_list,
2740                                  syn_filter_ptr,
2741                                  entries);
2742                 rte_free(syn_filter_ptr);
2743         }
2744
2745         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2746                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2747                                  l2_tn_filter_ptr,
2748                                  entries);
2749                 rte_free(l2_tn_filter_ptr);
2750         }
2751
2752         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2753                 TAILQ_REMOVE(&filter_fdir_list,
2754                                  fdir_rule_ptr,
2755                                  entries);
2756                 rte_free(fdir_rule_ptr);
2757         }
2758
2759         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2760                 TAILQ_REMOVE(&ixgbe_flow_list,
2761                                  ixgbe_flow_mem_ptr,
2762                                  entries);
2763                 rte_free(ixgbe_flow_mem_ptr->flow);
2764                 rte_free(ixgbe_flow_mem_ptr);
2765         }
2766 }
2767
2768 /**
2769  * Create or destroy a flow rule.
2770  * Theorically one rule can match more than one filters.
2771  * We will let it use the filter which it hitt first.
2772  * So, the sequence matters.
2773  */
2774 static struct rte_flow *
2775 ixgbe_flow_create(struct rte_eth_dev *dev,
2776                   const struct rte_flow_attr *attr,
2777                   const struct rte_flow_item pattern[],
2778                   const struct rte_flow_action actions[],
2779                   struct rte_flow_error *error)
2780 {
2781         int ret;
2782         struct rte_eth_ntuple_filter ntuple_filter;
2783         struct rte_eth_ethertype_filter ethertype_filter;
2784         struct rte_eth_syn_filter syn_filter;
2785         struct ixgbe_fdir_rule fdir_rule;
2786         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2787         struct ixgbe_hw_fdir_info *fdir_info =
2788                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2789         struct rte_flow *flow = NULL;
2790         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2791         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2792         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2793         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2794         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2795         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2796         uint8_t first_mask = FALSE;
2797
2798         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2799         if (!flow) {
2800                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2801                 return (struct rte_flow *)flow;
2802         }
2803         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2804                         sizeof(struct ixgbe_flow_mem), 0);
2805         if (!ixgbe_flow_mem_ptr) {
2806                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2807                 rte_free(flow);
2808                 return NULL;
2809         }
2810         ixgbe_flow_mem_ptr->flow = flow;
2811         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2812                                 ixgbe_flow_mem_ptr, entries);
2813
2814         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2815         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2816                         actions, &ntuple_filter, error);
2817
2818 #ifdef RTE_LIBRTE_SECURITY
2819         /* ESP flow not really a flow*/
2820         if (ntuple_filter.proto == IPPROTO_ESP)
2821                 return flow;
2822 #endif
2823
2824         if (!ret) {
2825                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2826                 if (!ret) {
2827                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2828                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2829                         if (!ntuple_filter_ptr) {
2830                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2831                                 goto out;
2832                         }
2833                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2834                                 &ntuple_filter,
2835                                 sizeof(struct rte_eth_ntuple_filter));
2836                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2837                                 ntuple_filter_ptr, entries);
2838                         flow->rule = ntuple_filter_ptr;
2839                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2840                         return flow;
2841                 }
2842                 goto out;
2843         }
2844
2845         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2846         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2847                                 actions, &ethertype_filter, error);
2848         if (!ret) {
2849                 ret = ixgbe_add_del_ethertype_filter(dev,
2850                                 &ethertype_filter, TRUE);
2851                 if (!ret) {
2852                         ethertype_filter_ptr = rte_zmalloc(
2853                                 "ixgbe_ethertype_filter",
2854                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2855                         if (!ethertype_filter_ptr) {
2856                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2857                                 goto out;
2858                         }
2859                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2860                                 &ethertype_filter,
2861                                 sizeof(struct rte_eth_ethertype_filter));
2862                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2863                                 ethertype_filter_ptr, entries);
2864                         flow->rule = ethertype_filter_ptr;
2865                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2866                         return flow;
2867                 }
2868                 goto out;
2869         }
2870
2871         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2872         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2873                                 actions, &syn_filter, error);
2874         if (!ret) {
2875                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2876                 if (!ret) {
2877                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2878                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2879                         if (!syn_filter_ptr) {
2880                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2881                                 goto out;
2882                         }
2883                         rte_memcpy(&syn_filter_ptr->filter_info,
2884                                 &syn_filter,
2885                                 sizeof(struct rte_eth_syn_filter));
2886                         TAILQ_INSERT_TAIL(&filter_syn_list,
2887                                 syn_filter_ptr,
2888                                 entries);
2889                         flow->rule = syn_filter_ptr;
2890                         flow->filter_type = RTE_ETH_FILTER_SYN;
2891                         return flow;
2892                 }
2893                 goto out;
2894         }
2895
2896         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2897         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2898                                 actions, &fdir_rule, error);
2899         if (!ret) {
2900                 /* A mask cannot be deleted. */
2901                 if (fdir_rule.b_mask) {
2902                         if (!fdir_info->mask_added) {
2903                                 /* It's the first time the mask is set. */
2904                                 rte_memcpy(&fdir_info->mask,
2905                                         &fdir_rule.mask,
2906                                         sizeof(struct ixgbe_hw_fdir_mask));
2907                                 fdir_info->flex_bytes_offset =
2908                                         fdir_rule.flex_bytes_offset;
2909
2910                                 if (fdir_rule.mask.flex_bytes_mask)
2911                                         ixgbe_fdir_set_flexbytes_offset(dev,
2912                                                 fdir_rule.flex_bytes_offset);
2913
2914                                 ret = ixgbe_fdir_set_input_mask(dev);
2915                                 if (ret)
2916                                         goto out;
2917
2918                                 fdir_info->mask_added = TRUE;
2919                                 first_mask = TRUE;
2920                         } else {
2921                                 /**
2922                                  * Only support one global mask,
2923                                  * all the masks should be the same.
2924                                  */
2925                                 ret = memcmp(&fdir_info->mask,
2926                                         &fdir_rule.mask,
2927                                         sizeof(struct ixgbe_hw_fdir_mask));
2928                                 if (ret)
2929                                         goto out;
2930
2931                                 if (fdir_info->flex_bytes_offset !=
2932                                                 fdir_rule.flex_bytes_offset)
2933                                         goto out;
2934                         }
2935                 }
2936
2937                 if (fdir_rule.b_spec) {
2938                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2939                                         FALSE, FALSE);
2940                         if (!ret) {
2941                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2942                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2943                                 if (!fdir_rule_ptr) {
2944                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2945                                         goto out;
2946                                 }
2947                                 rte_memcpy(&fdir_rule_ptr->filter_info,
2948                                         &fdir_rule,
2949                                         sizeof(struct ixgbe_fdir_rule));
2950                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2951                                         fdir_rule_ptr, entries);
2952                                 flow->rule = fdir_rule_ptr;
2953                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2954
2955                                 return flow;
2956                         }
2957
2958                         if (ret) {
2959                                 /**
2960                                  * clean the mask_added flag if fail to
2961                                  * program
2962                                  **/
2963                                 if (first_mask)
2964                                         fdir_info->mask_added = FALSE;
2965                                 goto out;
2966                         }
2967                 }
2968
2969                 goto out;
2970         }
2971
2972         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2973         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2974                                         actions, &l2_tn_filter, error);
2975         if (!ret) {
2976                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2977                 if (!ret) {
2978                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2979                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2980                         if (!l2_tn_filter_ptr) {
2981                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2982                                 goto out;
2983                         }
2984                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
2985                                 &l2_tn_filter,
2986                                 sizeof(struct rte_eth_l2_tunnel_conf));
2987                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2988                                 l2_tn_filter_ptr, entries);
2989                         flow->rule = l2_tn_filter_ptr;
2990                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2991                         return flow;
2992                 }
2993         }
2994
2995 out:
2996         TAILQ_REMOVE(&ixgbe_flow_list,
2997                 ixgbe_flow_mem_ptr, entries);
2998         rte_flow_error_set(error, -ret,
2999                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3000                            "Failed to create flow.");
3001         rte_free(ixgbe_flow_mem_ptr);
3002         rte_free(flow);
3003         return NULL;
3004 }
3005
3006 /**
3007  * Check if the flow rule is supported by ixgbe.
3008  * It only checkes the format. Don't guarantee the rule can be programmed into
3009  * the HW. Because there can be no enough room for the rule.
3010  */
3011 static int
3012 ixgbe_flow_validate(struct rte_eth_dev *dev,
3013                 const struct rte_flow_attr *attr,
3014                 const struct rte_flow_item pattern[],
3015                 const struct rte_flow_action actions[],
3016                 struct rte_flow_error *error)
3017 {
3018         struct rte_eth_ntuple_filter ntuple_filter;
3019         struct rte_eth_ethertype_filter ethertype_filter;
3020         struct rte_eth_syn_filter syn_filter;
3021         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3022         struct ixgbe_fdir_rule fdir_rule;
3023         int ret;
3024
3025         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3026         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3027                                 actions, &ntuple_filter, error);
3028         if (!ret)
3029                 return 0;
3030
3031         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3032         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3033                                 actions, &ethertype_filter, error);
3034         if (!ret)
3035                 return 0;
3036
3037         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3038         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3039                                 actions, &syn_filter, error);
3040         if (!ret)
3041                 return 0;
3042
3043         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3044         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3045                                 actions, &fdir_rule, error);
3046         if (!ret)
3047                 return 0;
3048
3049         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3050         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3051                                 actions, &l2_tn_filter, error);
3052
3053         return ret;
3054 }
3055
3056 /* Destroy a flow rule on ixgbe. */
3057 static int
3058 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3059                 struct rte_flow *flow,
3060                 struct rte_flow_error *error)
3061 {
3062         int ret;
3063         struct rte_flow *pmd_flow = flow;
3064         enum rte_filter_type filter_type = pmd_flow->filter_type;
3065         struct rte_eth_ntuple_filter ntuple_filter;
3066         struct rte_eth_ethertype_filter ethertype_filter;
3067         struct rte_eth_syn_filter syn_filter;
3068         struct ixgbe_fdir_rule fdir_rule;
3069         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3070         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3071         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3072         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3073         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3074         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3075         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3076         struct ixgbe_hw_fdir_info *fdir_info =
3077                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3078
3079         switch (filter_type) {
3080         case RTE_ETH_FILTER_NTUPLE:
3081                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3082                                         pmd_flow->rule;
3083                 rte_memcpy(&ntuple_filter,
3084                         &ntuple_filter_ptr->filter_info,
3085                         sizeof(struct rte_eth_ntuple_filter));
3086                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3087                 if (!ret) {
3088                         TAILQ_REMOVE(&filter_ntuple_list,
3089                         ntuple_filter_ptr, entries);
3090                         rte_free(ntuple_filter_ptr);
3091                 }
3092                 break;
3093         case RTE_ETH_FILTER_ETHERTYPE:
3094                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3095                                         pmd_flow->rule;
3096                 rte_memcpy(&ethertype_filter,
3097                         &ethertype_filter_ptr->filter_info,
3098                         sizeof(struct rte_eth_ethertype_filter));
3099                 ret = ixgbe_add_del_ethertype_filter(dev,
3100                                 &ethertype_filter, FALSE);
3101                 if (!ret) {
3102                         TAILQ_REMOVE(&filter_ethertype_list,
3103                                 ethertype_filter_ptr, entries);
3104                         rte_free(ethertype_filter_ptr);
3105                 }
3106                 break;
3107         case RTE_ETH_FILTER_SYN:
3108                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3109                                 pmd_flow->rule;
3110                 rte_memcpy(&syn_filter,
3111                         &syn_filter_ptr->filter_info,
3112                         sizeof(struct rte_eth_syn_filter));
3113                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3114                 if (!ret) {
3115                         TAILQ_REMOVE(&filter_syn_list,
3116                                 syn_filter_ptr, entries);
3117                         rte_free(syn_filter_ptr);
3118                 }
3119                 break;
3120         case RTE_ETH_FILTER_FDIR:
3121                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3122                 rte_memcpy(&fdir_rule,
3123                         &fdir_rule_ptr->filter_info,
3124                         sizeof(struct ixgbe_fdir_rule));
3125                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3126                 if (!ret) {
3127                         TAILQ_REMOVE(&filter_fdir_list,
3128                                 fdir_rule_ptr, entries);
3129                         rte_free(fdir_rule_ptr);
3130                         if (TAILQ_EMPTY(&filter_fdir_list))
3131                                 fdir_info->mask_added = false;
3132                 }
3133                 break;
3134         case RTE_ETH_FILTER_L2_TUNNEL:
3135                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3136                                 pmd_flow->rule;
3137                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3138                         sizeof(struct rte_eth_l2_tunnel_conf));
3139                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3140                 if (!ret) {
3141                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3142                                 l2_tn_filter_ptr, entries);
3143                         rte_free(l2_tn_filter_ptr);
3144                 }
3145                 break;
3146         default:
3147                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3148                             filter_type);
3149                 ret = -EINVAL;
3150                 break;
3151         }
3152
3153         if (ret) {
3154                 rte_flow_error_set(error, EINVAL,
3155                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3156                                 NULL, "Failed to destroy flow");
3157                 return ret;
3158         }
3159
3160         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3161                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3162                         TAILQ_REMOVE(&ixgbe_flow_list,
3163                                 ixgbe_flow_mem_ptr, entries);
3164                         rte_free(ixgbe_flow_mem_ptr);
3165                 }
3166         }
3167         rte_free(flow);
3168
3169         return ret;
3170 }
3171
3172 /*  Destroy all flow rules associated with a port on ixgbe. */
3173 static int
3174 ixgbe_flow_flush(struct rte_eth_dev *dev,
3175                 struct rte_flow_error *error)
3176 {
3177         int ret = 0;
3178
3179         ixgbe_clear_all_ntuple_filter(dev);
3180         ixgbe_clear_all_ethertype_filter(dev);
3181         ixgbe_clear_syn_filter(dev);
3182
3183         ret = ixgbe_clear_all_fdir_filter(dev);
3184         if (ret < 0) {
3185                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3186                                         NULL, "Failed to flush rule");
3187                 return ret;
3188         }
3189
3190         ret = ixgbe_clear_all_l2_tn_filter(dev);
3191         if (ret < 0) {
3192                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3193                                         NULL, "Failed to flush rule");
3194                 return ret;
3195         }
3196
3197         ixgbe_filterlist_flush();
3198
3199         return 0;
3200 }
3201
3202 const struct rte_flow_ops ixgbe_flow_ops = {
3203         .validate = ixgbe_flow_validate,
3204         .create = ixgbe_flow_create,
3205         .destroy = ixgbe_flow_destroy,
3206         .flush = ixgbe_flow_flush,
3207 };