New upstream version 17.11.4
[deb_dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_eal.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_malloc.h>
59 #include <rte_random.h>
60 #include <rte_dev.h>
61 #include <rte_hash_crc.h>
62 #include <rte_flow.h>
63 #include <rte_flow_driver.h>
64
65 #include "ixgbe_logs.h"
66 #include "base/ixgbe_api.h"
67 #include "base/ixgbe_vf.h"
68 #include "base/ixgbe_common.h"
69 #include "ixgbe_ethdev.h"
70 #include "ixgbe_bypass.h"
71 #include "ixgbe_rxtx.h"
72 #include "base/ixgbe_type.h"
73 #include "base/ixgbe_phy.h"
74 #include "rte_pmd_ixgbe.h"
75
76
77 #define IXGBE_MIN_N_TUPLE_PRIO 1
78 #define IXGBE_MAX_N_TUPLE_PRIO 7
79 #define IXGBE_MAX_FLX_SOURCE_OFF 62
80
81 /* ntuple filter list structure */
82 struct ixgbe_ntuple_filter_ele {
83         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
84         struct rte_eth_ntuple_filter filter_info;
85 };
86 /* ethertype filter list structure */
87 struct ixgbe_ethertype_filter_ele {
88         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
89         struct rte_eth_ethertype_filter filter_info;
90 };
91 /* syn filter list structure */
92 struct ixgbe_eth_syn_filter_ele {
93         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
94         struct rte_eth_syn_filter filter_info;
95 };
96 /* fdir filter list structure */
97 struct ixgbe_fdir_rule_ele {
98         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
99         struct ixgbe_fdir_rule filter_info;
100 };
101 /* l2_tunnel filter list structure */
102 struct ixgbe_eth_l2_tunnel_conf_ele {
103         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
104         struct rte_eth_l2_tunnel_conf filter_info;
105 };
106 /* ixgbe_flow memory list structure */
107 struct ixgbe_flow_mem {
108         TAILQ_ENTRY(ixgbe_flow_mem) entries;
109         struct rte_flow *flow;
110 };
111
112 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
113 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
114 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
115 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
116 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
117 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
118
119 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
120 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
121 static struct ixgbe_syn_filter_list filter_syn_list;
122 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
123 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
124 static struct ixgbe_flow_mem_list ixgbe_flow_list;
125
126 /**
127  * Endless loop will never happen with below assumption
128  * 1. there is at least one no-void item(END)
129  * 2. cur is before END.
130  */
131 static inline
132 const struct rte_flow_item *next_no_void_pattern(
133                 const struct rte_flow_item pattern[],
134                 const struct rte_flow_item *cur)
135 {
136         const struct rte_flow_item *next =
137                 cur ? cur + 1 : &pattern[0];
138         while (1) {
139                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
140                         return next;
141                 next++;
142         }
143 }
144
145 static inline
146 const struct rte_flow_action *next_no_void_action(
147                 const struct rte_flow_action actions[],
148                 const struct rte_flow_action *cur)
149 {
150         const struct rte_flow_action *next =
151                 cur ? cur + 1 : &actions[0];
152         while (1) {
153                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
154                         return next;
155                 next++;
156         }
157 }
158
159 /**
160  * Please aware there's an asumption for all the parsers.
161  * rte_flow_item is using big endian, rte_flow_attr and
162  * rte_flow_action are using CPU order.
163  * Because the pattern is used to describe the packets,
164  * normally the packets should use network order.
165  */
166
167 /**
168  * Parse the rule to see if it is a n-tuple rule.
169  * And get the n-tuple filter info BTW.
170  * pattern:
171  * The first not void item can be ETH or IPV4.
172  * The second not void item must be IPV4 if the first one is ETH.
173  * The third not void item must be UDP or TCP.
174  * The next not void item must be END.
175  * action:
176  * The first not void action should be QUEUE.
177  * The next not void action should be END.
178  * pattern example:
179  * ITEM         Spec                    Mask
180  * ETH          NULL                    NULL
181  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
182  *              dst_addr 192.167.3.50   0xFFFFFFFF
183  *              next_proto_id   17      0xFF
184  * UDP/TCP/     src_port        80      0xFFFF
185  * SCTP         dst_port        80      0xFFFF
186  * END
187  * other members in mask and spec should set to 0x00.
188  * item->last should be NULL.
189  *
190  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
191  *
192  */
193 static int
194 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
195                          const struct rte_flow_item pattern[],
196                          const struct rte_flow_action actions[],
197                          struct rte_eth_ntuple_filter *filter,
198                          struct rte_flow_error *error)
199 {
200         const struct rte_flow_item *item;
201         const struct rte_flow_action *act;
202         const struct rte_flow_item_ipv4 *ipv4_spec;
203         const struct rte_flow_item_ipv4 *ipv4_mask;
204         const struct rte_flow_item_tcp *tcp_spec;
205         const struct rte_flow_item_tcp *tcp_mask;
206         const struct rte_flow_item_udp *udp_spec;
207         const struct rte_flow_item_udp *udp_mask;
208         const struct rte_flow_item_sctp *sctp_spec;
209         const struct rte_flow_item_sctp *sctp_mask;
210
211         if (!pattern) {
212                 rte_flow_error_set(error,
213                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
214                         NULL, "NULL pattern.");
215                 return -rte_errno;
216         }
217
218         if (!actions) {
219                 rte_flow_error_set(error, EINVAL,
220                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
221                                    NULL, "NULL action.");
222                 return -rte_errno;
223         }
224         if (!attr) {
225                 rte_flow_error_set(error, EINVAL,
226                                    RTE_FLOW_ERROR_TYPE_ATTR,
227                                    NULL, "NULL attribute.");
228                 return -rte_errno;
229         }
230
231 #ifdef RTE_LIBRTE_SECURITY
232         /**
233          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
234          */
235         act = next_no_void_action(actions, NULL);
236         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
237                 const void *conf = act->conf;
238                 /* check if the next not void item is END */
239                 act = next_no_void_action(actions, act);
240                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
241                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
242                         rte_flow_error_set(error, EINVAL,
243                                 RTE_FLOW_ERROR_TYPE_ACTION,
244                                 act, "Not supported action.");
245                         return -rte_errno;
246                 }
247
248                 /* get the IP pattern*/
249                 item = next_no_void_pattern(pattern, NULL);
250                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
251                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
252                         if (item->last ||
253                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
254                                 rte_flow_error_set(error, EINVAL,
255                                         RTE_FLOW_ERROR_TYPE_ITEM,
256                                         item, "IP pattern missing.");
257                                 return -rte_errno;
258                         }
259                         item = next_no_void_pattern(pattern, item);
260                 }
261
262                 filter->proto = IPPROTO_ESP;
263                 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
264                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
265         }
266 #endif
267
268         /* the first not void item can be MAC or IPv4 */
269         item = next_no_void_pattern(pattern, NULL);
270
271         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
272             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
273                 rte_flow_error_set(error, EINVAL,
274                         RTE_FLOW_ERROR_TYPE_ITEM,
275                         item, "Not supported by ntuple filter");
276                 return -rte_errno;
277         }
278         /* Skip Ethernet */
279         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
280                 /*Not supported last point for range*/
281                 if (item->last) {
282                         rte_flow_error_set(error,
283                           EINVAL,
284                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
285                           item, "Not supported last point for range");
286                         return -rte_errno;
287
288                 }
289                 /* if the first item is MAC, the content should be NULL */
290                 if (item->spec || item->mask) {
291                         rte_flow_error_set(error, EINVAL,
292                                 RTE_FLOW_ERROR_TYPE_ITEM,
293                                 item, "Not supported by ntuple filter");
294                         return -rte_errno;
295                 }
296                 /* check if the next not void item is IPv4 */
297                 item = next_no_void_pattern(pattern, item);
298                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
299                         rte_flow_error_set(error,
300                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
301                           item, "Not supported by ntuple filter");
302                           return -rte_errno;
303                 }
304         }
305
306         /* get the IPv4 info */
307         if (!item->spec || !item->mask) {
308                 rte_flow_error_set(error, EINVAL,
309                         RTE_FLOW_ERROR_TYPE_ITEM,
310                         item, "Invalid ntuple mask");
311                 return -rte_errno;
312         }
313         /*Not supported last point for range*/
314         if (item->last) {
315                 rte_flow_error_set(error, EINVAL,
316                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
317                         item, "Not supported last point for range");
318                 return -rte_errno;
319
320         }
321
322         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
323         /**
324          * Only support src & dst addresses, protocol,
325          * others should be masked.
326          */
327         if (ipv4_mask->hdr.version_ihl ||
328             ipv4_mask->hdr.type_of_service ||
329             ipv4_mask->hdr.total_length ||
330             ipv4_mask->hdr.packet_id ||
331             ipv4_mask->hdr.fragment_offset ||
332             ipv4_mask->hdr.time_to_live ||
333             ipv4_mask->hdr.hdr_checksum) {
334                         rte_flow_error_set(error,
335                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
336                         item, "Not supported by ntuple filter");
337                 return -rte_errno;
338         }
339
340         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
341         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
342         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
343
344         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
345         filter->dst_ip = ipv4_spec->hdr.dst_addr;
346         filter->src_ip = ipv4_spec->hdr.src_addr;
347         filter->proto  = ipv4_spec->hdr.next_proto_id;
348
349         /* check if the next not void item is TCP or UDP */
350         item = next_no_void_pattern(pattern, item);
351         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
352             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
353             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
354             item->type != RTE_FLOW_ITEM_TYPE_END) {
355                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
356                 rte_flow_error_set(error, EINVAL,
357                         RTE_FLOW_ERROR_TYPE_ITEM,
358                         item, "Not supported by ntuple filter");
359                 return -rte_errno;
360         }
361
362         /* get the TCP/UDP info */
363         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
364                 (!item->spec || !item->mask)) {
365                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
366                 rte_flow_error_set(error, EINVAL,
367                         RTE_FLOW_ERROR_TYPE_ITEM,
368                         item, "Invalid ntuple mask");
369                 return -rte_errno;
370         }
371
372         /*Not supported last point for range*/
373         if (item->last) {
374                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
375                 rte_flow_error_set(error, EINVAL,
376                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
377                         item, "Not supported last point for range");
378                 return -rte_errno;
379
380         }
381
382         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
383                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
384
385                 /**
386                  * Only support src & dst ports, tcp flags,
387                  * others should be masked.
388                  */
389                 if (tcp_mask->hdr.sent_seq ||
390                     tcp_mask->hdr.recv_ack ||
391                     tcp_mask->hdr.data_off ||
392                     tcp_mask->hdr.rx_win ||
393                     tcp_mask->hdr.cksum ||
394                     tcp_mask->hdr.tcp_urp) {
395                         memset(filter, 0,
396                                 sizeof(struct rte_eth_ntuple_filter));
397                         rte_flow_error_set(error, EINVAL,
398                                 RTE_FLOW_ERROR_TYPE_ITEM,
399                                 item, "Not supported by ntuple filter");
400                         return -rte_errno;
401                 }
402
403                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
404                 filter->src_port_mask  = tcp_mask->hdr.src_port;
405                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
406                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
407                 } else if (!tcp_mask->hdr.tcp_flags) {
408                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
409                 } else {
410                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
411                         rte_flow_error_set(error, EINVAL,
412                                 RTE_FLOW_ERROR_TYPE_ITEM,
413                                 item, "Not supported by ntuple filter");
414                         return -rte_errno;
415                 }
416
417                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
418                 filter->dst_port  = tcp_spec->hdr.dst_port;
419                 filter->src_port  = tcp_spec->hdr.src_port;
420                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
421         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
422                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
423
424                 /**
425                  * Only support src & dst ports,
426                  * others should be masked.
427                  */
428                 if (udp_mask->hdr.dgram_len ||
429                     udp_mask->hdr.dgram_cksum) {
430                         memset(filter, 0,
431                                 sizeof(struct rte_eth_ntuple_filter));
432                         rte_flow_error_set(error, EINVAL,
433                                 RTE_FLOW_ERROR_TYPE_ITEM,
434                                 item, "Not supported by ntuple filter");
435                         return -rte_errno;
436                 }
437
438                 filter->dst_port_mask = udp_mask->hdr.dst_port;
439                 filter->src_port_mask = udp_mask->hdr.src_port;
440
441                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
442                 filter->dst_port = udp_spec->hdr.dst_port;
443                 filter->src_port = udp_spec->hdr.src_port;
444         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
445                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
446
447                 /**
448                  * Only support src & dst ports,
449                  * others should be masked.
450                  */
451                 if (sctp_mask->hdr.tag ||
452                     sctp_mask->hdr.cksum) {
453                         memset(filter, 0,
454                                 sizeof(struct rte_eth_ntuple_filter));
455                         rte_flow_error_set(error, EINVAL,
456                                 RTE_FLOW_ERROR_TYPE_ITEM,
457                                 item, "Not supported by ntuple filter");
458                         return -rte_errno;
459                 }
460
461                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
462                 filter->src_port_mask = sctp_mask->hdr.src_port;
463
464                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
465                 filter->dst_port = sctp_spec->hdr.dst_port;
466                 filter->src_port = sctp_spec->hdr.src_port;
467         } else {
468                 goto action;
469         }
470
471         /* check if the next not void item is END */
472         item = next_no_void_pattern(pattern, item);
473         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
474                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
475                 rte_flow_error_set(error, EINVAL,
476                         RTE_FLOW_ERROR_TYPE_ITEM,
477                         item, "Not supported by ntuple filter");
478                 return -rte_errno;
479         }
480
481 action:
482
483         /**
484          * n-tuple only supports forwarding,
485          * check if the first not void action is QUEUE.
486          */
487         act = next_no_void_action(actions, NULL);
488         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
489                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490                 rte_flow_error_set(error, EINVAL,
491                         RTE_FLOW_ERROR_TYPE_ACTION,
492                         item, "Not supported action.");
493                 return -rte_errno;
494         }
495         filter->queue =
496                 ((const struct rte_flow_action_queue *)act->conf)->index;
497
498         /* check if the next not void item is END */
499         act = next_no_void_action(actions, act);
500         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
501                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
502                 rte_flow_error_set(error, EINVAL,
503                         RTE_FLOW_ERROR_TYPE_ACTION,
504                         act, "Not supported action.");
505                 return -rte_errno;
506         }
507
508         /* parse attr */
509         /* must be input direction */
510         if (!attr->ingress) {
511                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
512                 rte_flow_error_set(error, EINVAL,
513                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
514                                    attr, "Only support ingress.");
515                 return -rte_errno;
516         }
517
518         /* not supported */
519         if (attr->egress) {
520                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
521                 rte_flow_error_set(error, EINVAL,
522                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
523                                    attr, "Not support egress.");
524                 return -rte_errno;
525         }
526
527         if (attr->priority > 0xFFFF) {
528                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
529                 rte_flow_error_set(error, EINVAL,
530                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
531                                    attr, "Error priority.");
532                 return -rte_errno;
533         }
534         filter->priority = (uint16_t)attr->priority;
535         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
536             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
537             filter->priority = 1;
538
539         return 0;
540 }
541
542 /* a specific function for ixgbe because the flags is specific */
543 static int
544 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
545                           const struct rte_flow_attr *attr,
546                           const struct rte_flow_item pattern[],
547                           const struct rte_flow_action actions[],
548                           struct rte_eth_ntuple_filter *filter,
549                           struct rte_flow_error *error)
550 {
551         int ret;
552         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
553
554         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
555
556         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
557
558         if (ret)
559                 return ret;
560
561 #ifdef RTE_LIBRTE_SECURITY
562         /* ESP flow not really a flow*/
563         if (filter->proto == IPPROTO_ESP)
564                 return 0;
565 #endif
566
567         /* Ixgbe doesn't support tcp flags. */
568         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
569                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
570                 rte_flow_error_set(error, EINVAL,
571                                    RTE_FLOW_ERROR_TYPE_ITEM,
572                                    NULL, "Not supported by ntuple filter");
573                 return -rte_errno;
574         }
575
576         /* Ixgbe doesn't support many priorities. */
577         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
578             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
579                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
580                 rte_flow_error_set(error, EINVAL,
581                         RTE_FLOW_ERROR_TYPE_ITEM,
582                         NULL, "Priority not supported by ntuple filter");
583                 return -rte_errno;
584         }
585
586         if (filter->queue >= dev->data->nb_rx_queues)
587                 return -rte_errno;
588
589         /* fixed value for ixgbe */
590         filter->flags = RTE_5TUPLE_FLAGS;
591         return 0;
592 }
593
594 /**
595  * Parse the rule to see if it is a ethertype rule.
596  * And get the ethertype filter info BTW.
597  * pattern:
598  * The first not void item can be ETH.
599  * The next not void item must be END.
600  * action:
601  * The first not void action should be QUEUE.
602  * The next not void action should be END.
603  * pattern example:
604  * ITEM         Spec                    Mask
605  * ETH          type    0x0807          0xFFFF
606  * END
607  * other members in mask and spec should set to 0x00.
608  * item->last should be NULL.
609  */
610 static int
611 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
612                             const struct rte_flow_item *pattern,
613                             const struct rte_flow_action *actions,
614                             struct rte_eth_ethertype_filter *filter,
615                             struct rte_flow_error *error)
616 {
617         const struct rte_flow_item *item;
618         const struct rte_flow_action *act;
619         const struct rte_flow_item_eth *eth_spec;
620         const struct rte_flow_item_eth *eth_mask;
621         const struct rte_flow_action_queue *act_q;
622
623         if (!pattern) {
624                 rte_flow_error_set(error, EINVAL,
625                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
626                                 NULL, "NULL pattern.");
627                 return -rte_errno;
628         }
629
630         if (!actions) {
631                 rte_flow_error_set(error, EINVAL,
632                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
633                                 NULL, "NULL action.");
634                 return -rte_errno;
635         }
636
637         if (!attr) {
638                 rte_flow_error_set(error, EINVAL,
639                                    RTE_FLOW_ERROR_TYPE_ATTR,
640                                    NULL, "NULL attribute.");
641                 return -rte_errno;
642         }
643
644         item = next_no_void_pattern(pattern, NULL);
645         /* The first non-void item should be MAC. */
646         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
647                 rte_flow_error_set(error, EINVAL,
648                         RTE_FLOW_ERROR_TYPE_ITEM,
649                         item, "Not supported by ethertype filter");
650                 return -rte_errno;
651         }
652
653         /*Not supported last point for range*/
654         if (item->last) {
655                 rte_flow_error_set(error, EINVAL,
656                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
657                         item, "Not supported last point for range");
658                 return -rte_errno;
659         }
660
661         /* Get the MAC info. */
662         if (!item->spec || !item->mask) {
663                 rte_flow_error_set(error, EINVAL,
664                                 RTE_FLOW_ERROR_TYPE_ITEM,
665                                 item, "Not supported by ethertype filter");
666                 return -rte_errno;
667         }
668
669         eth_spec = (const struct rte_flow_item_eth *)item->spec;
670         eth_mask = (const struct rte_flow_item_eth *)item->mask;
671
672         /* Mask bits of source MAC address must be full of 0.
673          * Mask bits of destination MAC address must be full
674          * of 1 or full of 0.
675          */
676         if (!is_zero_ether_addr(&eth_mask->src) ||
677             (!is_zero_ether_addr(&eth_mask->dst) &&
678              !is_broadcast_ether_addr(&eth_mask->dst))) {
679                 rte_flow_error_set(error, EINVAL,
680                                 RTE_FLOW_ERROR_TYPE_ITEM,
681                                 item, "Invalid ether address mask");
682                 return -rte_errno;
683         }
684
685         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
686                 rte_flow_error_set(error, EINVAL,
687                                 RTE_FLOW_ERROR_TYPE_ITEM,
688                                 item, "Invalid ethertype mask");
689                 return -rte_errno;
690         }
691
692         /* If mask bits of destination MAC address
693          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
694          */
695         if (is_broadcast_ether_addr(&eth_mask->dst)) {
696                 filter->mac_addr = eth_spec->dst;
697                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
698         } else {
699                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
700         }
701         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
702
703         /* Check if the next non-void item is END. */
704         item = next_no_void_pattern(pattern, item);
705         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
706                 rte_flow_error_set(error, EINVAL,
707                                 RTE_FLOW_ERROR_TYPE_ITEM,
708                                 item, "Not supported by ethertype filter.");
709                 return -rte_errno;
710         }
711
712         /* Parse action */
713
714         act = next_no_void_action(actions, NULL);
715         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
716             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
717                 rte_flow_error_set(error, EINVAL,
718                                 RTE_FLOW_ERROR_TYPE_ACTION,
719                                 act, "Not supported action.");
720                 return -rte_errno;
721         }
722
723         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
724                 act_q = (const struct rte_flow_action_queue *)act->conf;
725                 filter->queue = act_q->index;
726         } else {
727                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
728         }
729
730         /* Check if the next non-void item is END */
731         act = next_no_void_action(actions, act);
732         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
733                 rte_flow_error_set(error, EINVAL,
734                                 RTE_FLOW_ERROR_TYPE_ACTION,
735                                 act, "Not supported action.");
736                 return -rte_errno;
737         }
738
739         /* Parse attr */
740         /* Must be input direction */
741         if (!attr->ingress) {
742                 rte_flow_error_set(error, EINVAL,
743                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
744                                 attr, "Only support ingress.");
745                 return -rte_errno;
746         }
747
748         /* Not supported */
749         if (attr->egress) {
750                 rte_flow_error_set(error, EINVAL,
751                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
752                                 attr, "Not support egress.");
753                 return -rte_errno;
754         }
755
756         /* Not supported */
757         if (attr->priority) {
758                 rte_flow_error_set(error, EINVAL,
759                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
760                                 attr, "Not support priority.");
761                 return -rte_errno;
762         }
763
764         /* Not supported */
765         if (attr->group) {
766                 rte_flow_error_set(error, EINVAL,
767                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
768                                 attr, "Not support group.");
769                 return -rte_errno;
770         }
771
772         return 0;
773 }
774
775 static int
776 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
777                                  const struct rte_flow_attr *attr,
778                              const struct rte_flow_item pattern[],
779                              const struct rte_flow_action actions[],
780                              struct rte_eth_ethertype_filter *filter,
781                              struct rte_flow_error *error)
782 {
783         int ret;
784         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
785
786         MAC_TYPE_FILTER_SUP(hw->mac.type);
787
788         ret = cons_parse_ethertype_filter(attr, pattern,
789                                         actions, filter, error);
790
791         if (ret)
792                 return ret;
793
794         /* Ixgbe doesn't support MAC address. */
795         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
796                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
797                 rte_flow_error_set(error, EINVAL,
798                         RTE_FLOW_ERROR_TYPE_ITEM,
799                         NULL, "Not supported by ethertype filter");
800                 return -rte_errno;
801         }
802
803         if (filter->queue >= dev->data->nb_rx_queues) {
804                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
805                 rte_flow_error_set(error, EINVAL,
806                         RTE_FLOW_ERROR_TYPE_ITEM,
807                         NULL, "queue index much too big");
808                 return -rte_errno;
809         }
810
811         if (filter->ether_type == ETHER_TYPE_IPv4 ||
812                 filter->ether_type == ETHER_TYPE_IPv6) {
813                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
814                 rte_flow_error_set(error, EINVAL,
815                         RTE_FLOW_ERROR_TYPE_ITEM,
816                         NULL, "IPv4/IPv6 not supported by ethertype filter");
817                 return -rte_errno;
818         }
819
820         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
821                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
822                 rte_flow_error_set(error, EINVAL,
823                         RTE_FLOW_ERROR_TYPE_ITEM,
824                         NULL, "mac compare is unsupported");
825                 return -rte_errno;
826         }
827
828         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
829                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
830                 rte_flow_error_set(error, EINVAL,
831                         RTE_FLOW_ERROR_TYPE_ITEM,
832                         NULL, "drop option is unsupported");
833                 return -rte_errno;
834         }
835
836         return 0;
837 }
838
839 /**
840  * Parse the rule to see if it is a TCP SYN rule.
841  * And get the TCP SYN filter info BTW.
842  * pattern:
843  * The first not void item must be ETH.
844  * The second not void item must be IPV4 or IPV6.
845  * The third not void item must be TCP.
846  * The next not void item must be END.
847  * action:
848  * The first not void action should be QUEUE.
849  * The next not void action should be END.
850  * pattern example:
851  * ITEM         Spec                    Mask
852  * ETH          NULL                    NULL
853  * IPV4/IPV6    NULL                    NULL
854  * TCP          tcp_flags       0x02    0xFF
855  * END
856  * other members in mask and spec should set to 0x00.
857  * item->last should be NULL.
858  */
859 static int
860 cons_parse_syn_filter(const struct rte_flow_attr *attr,
861                                 const struct rte_flow_item pattern[],
862                                 const struct rte_flow_action actions[],
863                                 struct rte_eth_syn_filter *filter,
864                                 struct rte_flow_error *error)
865 {
866         const struct rte_flow_item *item;
867         const struct rte_flow_action *act;
868         const struct rte_flow_item_tcp *tcp_spec;
869         const struct rte_flow_item_tcp *tcp_mask;
870         const struct rte_flow_action_queue *act_q;
871
872         if (!pattern) {
873                 rte_flow_error_set(error, EINVAL,
874                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
875                                 NULL, "NULL pattern.");
876                 return -rte_errno;
877         }
878
879         if (!actions) {
880                 rte_flow_error_set(error, EINVAL,
881                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
882                                 NULL, "NULL action.");
883                 return -rte_errno;
884         }
885
886         if (!attr) {
887                 rte_flow_error_set(error, EINVAL,
888                                    RTE_FLOW_ERROR_TYPE_ATTR,
889                                    NULL, "NULL attribute.");
890                 return -rte_errno;
891         }
892
893
894         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
895         item = next_no_void_pattern(pattern, NULL);
896         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
897             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
898             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
899             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
900                 rte_flow_error_set(error, EINVAL,
901                                 RTE_FLOW_ERROR_TYPE_ITEM,
902                                 item, "Not supported by syn filter");
903                 return -rte_errno;
904         }
905                 /*Not supported last point for range*/
906         if (item->last) {
907                 rte_flow_error_set(error, EINVAL,
908                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
909                         item, "Not supported last point for range");
910                 return -rte_errno;
911         }
912
913         /* Skip Ethernet */
914         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
915                 /* if the item is MAC, the content should be NULL */
916                 if (item->spec || item->mask) {
917                         rte_flow_error_set(error, EINVAL,
918                                 RTE_FLOW_ERROR_TYPE_ITEM,
919                                 item, "Invalid SYN address mask");
920                         return -rte_errno;
921                 }
922
923                 /* check if the next not void item is IPv4 or IPv6 */
924                 item = next_no_void_pattern(pattern, item);
925                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
926                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
927                         rte_flow_error_set(error, EINVAL,
928                                 RTE_FLOW_ERROR_TYPE_ITEM,
929                                 item, "Not supported by syn filter");
930                         return -rte_errno;
931                 }
932         }
933
934         /* Skip IP */
935         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
936             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
937                 /* if the item is IP, the content should be NULL */
938                 if (item->spec || item->mask) {
939                         rte_flow_error_set(error, EINVAL,
940                                 RTE_FLOW_ERROR_TYPE_ITEM,
941                                 item, "Invalid SYN mask");
942                         return -rte_errno;
943                 }
944
945                 /* check if the next not void item is TCP */
946                 item = next_no_void_pattern(pattern, item);
947                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
948                         rte_flow_error_set(error, EINVAL,
949                                 RTE_FLOW_ERROR_TYPE_ITEM,
950                                 item, "Not supported by syn filter");
951                         return -rte_errno;
952                 }
953         }
954
955         /* Get the TCP info. Only support SYN. */
956         if (!item->spec || !item->mask) {
957                 rte_flow_error_set(error, EINVAL,
958                                 RTE_FLOW_ERROR_TYPE_ITEM,
959                                 item, "Invalid SYN mask");
960                 return -rte_errno;
961         }
962         /*Not supported last point for range*/
963         if (item->last) {
964                 rte_flow_error_set(error, EINVAL,
965                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
966                         item, "Not supported last point for range");
967                 return -rte_errno;
968         }
969
970         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
971         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
972         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
973             tcp_mask->hdr.src_port ||
974             tcp_mask->hdr.dst_port ||
975             tcp_mask->hdr.sent_seq ||
976             tcp_mask->hdr.recv_ack ||
977             tcp_mask->hdr.data_off ||
978             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
979             tcp_mask->hdr.rx_win ||
980             tcp_mask->hdr.cksum ||
981             tcp_mask->hdr.tcp_urp) {
982                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
983                 rte_flow_error_set(error, EINVAL,
984                                 RTE_FLOW_ERROR_TYPE_ITEM,
985                                 item, "Not supported by syn filter");
986                 return -rte_errno;
987         }
988
989         /* check if the next not void item is END */
990         item = next_no_void_pattern(pattern, item);
991         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
992                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
993                 rte_flow_error_set(error, EINVAL,
994                                 RTE_FLOW_ERROR_TYPE_ITEM,
995                                 item, "Not supported by syn filter");
996                 return -rte_errno;
997         }
998
999         /* check if the first not void action is QUEUE. */
1000         act = next_no_void_action(actions, NULL);
1001         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1002                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1003                 rte_flow_error_set(error, EINVAL,
1004                                 RTE_FLOW_ERROR_TYPE_ACTION,
1005                                 act, "Not supported action.");
1006                 return -rte_errno;
1007         }
1008
1009         act_q = (const struct rte_flow_action_queue *)act->conf;
1010         filter->queue = act_q->index;
1011         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1012                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1013                 rte_flow_error_set(error, EINVAL,
1014                                 RTE_FLOW_ERROR_TYPE_ACTION,
1015                                 act, "Not supported action.");
1016                 return -rte_errno;
1017         }
1018
1019         /* check if the next not void item is END */
1020         act = next_no_void_action(actions, act);
1021         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1022                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1023                 rte_flow_error_set(error, EINVAL,
1024                                 RTE_FLOW_ERROR_TYPE_ACTION,
1025                                 act, "Not supported action.");
1026                 return -rte_errno;
1027         }
1028
1029         /* parse attr */
1030         /* must be input direction */
1031         if (!attr->ingress) {
1032                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1033                 rte_flow_error_set(error, EINVAL,
1034                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1035                         attr, "Only support ingress.");
1036                 return -rte_errno;
1037         }
1038
1039         /* not supported */
1040         if (attr->egress) {
1041                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1042                 rte_flow_error_set(error, EINVAL,
1043                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1044                         attr, "Not support egress.");
1045                 return -rte_errno;
1046         }
1047
1048         /* Support 2 priorities, the lowest or highest. */
1049         if (!attr->priority) {
1050                 filter->hig_pri = 0;
1051         } else if (attr->priority == (uint32_t)~0U) {
1052                 filter->hig_pri = 1;
1053         } else {
1054                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1055                 rte_flow_error_set(error, EINVAL,
1056                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1057                         attr, "Not support priority.");
1058                 return -rte_errno;
1059         }
1060
1061         return 0;
1062 }
1063
1064 static int
1065 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1066                                  const struct rte_flow_attr *attr,
1067                              const struct rte_flow_item pattern[],
1068                              const struct rte_flow_action actions[],
1069                              struct rte_eth_syn_filter *filter,
1070                              struct rte_flow_error *error)
1071 {
1072         int ret;
1073         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1074
1075         MAC_TYPE_FILTER_SUP(hw->mac.type);
1076
1077         ret = cons_parse_syn_filter(attr, pattern,
1078                                         actions, filter, error);
1079
1080         if (filter->queue >= dev->data->nb_rx_queues)
1081                 return -rte_errno;
1082
1083         if (ret)
1084                 return ret;
1085
1086         return 0;
1087 }
1088
1089 /**
1090  * Parse the rule to see if it is a L2 tunnel rule.
1091  * And get the L2 tunnel filter info BTW.
1092  * Only support E-tag now.
1093  * pattern:
1094  * The first not void item can be E_TAG.
1095  * The next not void item must be END.
1096  * action:
1097  * The first not void action should be VF or PF.
1098  * The next not void action should be END.
1099  * pattern example:
1100  * ITEM         Spec                    Mask
1101  * E_TAG        grp             0x1     0x3
1102                 e_cid_base      0x309   0xFFF
1103  * END
1104  * other members in mask and spec should set to 0x00.
1105  * item->last should be NULL.
1106  */
1107 static int
1108 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1109                         const struct rte_flow_attr *attr,
1110                         const struct rte_flow_item pattern[],
1111                         const struct rte_flow_action actions[],
1112                         struct rte_eth_l2_tunnel_conf *filter,
1113                         struct rte_flow_error *error)
1114 {
1115         const struct rte_flow_item *item;
1116         const struct rte_flow_item_e_tag *e_tag_spec;
1117         const struct rte_flow_item_e_tag *e_tag_mask;
1118         const struct rte_flow_action *act;
1119         const struct rte_flow_action_vf *act_vf;
1120         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1121
1122         if (!pattern) {
1123                 rte_flow_error_set(error, EINVAL,
1124                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1125                         NULL, "NULL pattern.");
1126                 return -rte_errno;
1127         }
1128
1129         if (!actions) {
1130                 rte_flow_error_set(error, EINVAL,
1131                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1132                                    NULL, "NULL action.");
1133                 return -rte_errno;
1134         }
1135
1136         if (!attr) {
1137                 rte_flow_error_set(error, EINVAL,
1138                                    RTE_FLOW_ERROR_TYPE_ATTR,
1139                                    NULL, "NULL attribute.");
1140                 return -rte_errno;
1141         }
1142
1143         /* The first not void item should be e-tag. */
1144         item = next_no_void_pattern(pattern, NULL);
1145         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1146                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147                 rte_flow_error_set(error, EINVAL,
1148                         RTE_FLOW_ERROR_TYPE_ITEM,
1149                         item, "Not supported by L2 tunnel filter");
1150                 return -rte_errno;
1151         }
1152
1153         if (!item->spec || !item->mask) {
1154                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1155                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1156                         item, "Not supported by L2 tunnel filter");
1157                 return -rte_errno;
1158         }
1159
1160         /*Not supported last point for range*/
1161         if (item->last) {
1162                 rte_flow_error_set(error, EINVAL,
1163                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1164                         item, "Not supported last point for range");
1165                 return -rte_errno;
1166         }
1167
1168         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1169         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1170
1171         /* Only care about GRP and E cid base. */
1172         if (e_tag_mask->epcp_edei_in_ecid_b ||
1173             e_tag_mask->in_ecid_e ||
1174             e_tag_mask->ecid_e ||
1175             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1176                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1177                 rte_flow_error_set(error, EINVAL,
1178                         RTE_FLOW_ERROR_TYPE_ITEM,
1179                         item, "Not supported by L2 tunnel filter");
1180                 return -rte_errno;
1181         }
1182
1183         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1184         /**
1185          * grp and e_cid_base are bit fields and only use 14 bits.
1186          * e-tag id is taken as little endian by HW.
1187          */
1188         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1189
1190         /* check if the next not void item is END */
1191         item = next_no_void_pattern(pattern, item);
1192         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1193                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1194                 rte_flow_error_set(error, EINVAL,
1195                         RTE_FLOW_ERROR_TYPE_ITEM,
1196                         item, "Not supported by L2 tunnel filter");
1197                 return -rte_errno;
1198         }
1199
1200         /* parse attr */
1201         /* must be input direction */
1202         if (!attr->ingress) {
1203                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1204                 rte_flow_error_set(error, EINVAL,
1205                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1206                         attr, "Only support ingress.");
1207                 return -rte_errno;
1208         }
1209
1210         /* not supported */
1211         if (attr->egress) {
1212                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1213                 rte_flow_error_set(error, EINVAL,
1214                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1215                         attr, "Not support egress.");
1216                 return -rte_errno;
1217         }
1218
1219         /* not supported */
1220         if (attr->priority) {
1221                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1222                 rte_flow_error_set(error, EINVAL,
1223                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1224                         attr, "Not support priority.");
1225                 return -rte_errno;
1226         }
1227
1228         /* check if the first not void action is VF or PF. */
1229         act = next_no_void_action(actions, NULL);
1230         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1231                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1232                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1233                 rte_flow_error_set(error, EINVAL,
1234                         RTE_FLOW_ERROR_TYPE_ACTION,
1235                         act, "Not supported action.");
1236                 return -rte_errno;
1237         }
1238
1239         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1240                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1241                 filter->pool = act_vf->id;
1242         } else {
1243                 filter->pool = pci_dev->max_vfs;
1244         }
1245
1246         /* check if the next not void item is END */
1247         act = next_no_void_action(actions, act);
1248         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1249                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1250                 rte_flow_error_set(error, EINVAL,
1251                         RTE_FLOW_ERROR_TYPE_ACTION,
1252                         act, "Not supported action.");
1253                 return -rte_errno;
1254         }
1255
1256         return 0;
1257 }
1258
1259 static int
1260 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1261                         const struct rte_flow_attr *attr,
1262                         const struct rte_flow_item pattern[],
1263                         const struct rte_flow_action actions[],
1264                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1265                         struct rte_flow_error *error)
1266 {
1267         int ret = 0;
1268         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1269         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1270         uint16_t vf_num;
1271
1272         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1273                                 actions, l2_tn_filter, error);
1274
1275         if (hw->mac.type != ixgbe_mac_X550 &&
1276                 hw->mac.type != ixgbe_mac_X550EM_x &&
1277                 hw->mac.type != ixgbe_mac_X550EM_a) {
1278                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1279                 rte_flow_error_set(error, EINVAL,
1280                         RTE_FLOW_ERROR_TYPE_ITEM,
1281                         NULL, "Not supported by L2 tunnel filter");
1282                 return -rte_errno;
1283         }
1284
1285         vf_num = pci_dev->max_vfs;
1286
1287         if (l2_tn_filter->pool > vf_num)
1288                 return -rte_errno;
1289
1290         return ret;
1291 }
1292
1293 /* Parse to get the attr and action info of flow director rule. */
1294 static int
1295 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1296                           const struct rte_flow_action actions[],
1297                           struct ixgbe_fdir_rule *rule,
1298                           struct rte_flow_error *error)
1299 {
1300         const struct rte_flow_action *act;
1301         const struct rte_flow_action_queue *act_q;
1302         const struct rte_flow_action_mark *mark;
1303
1304         /* parse attr */
1305         /* must be input direction */
1306         if (!attr->ingress) {
1307                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1308                 rte_flow_error_set(error, EINVAL,
1309                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1310                         attr, "Only support ingress.");
1311                 return -rte_errno;
1312         }
1313
1314         /* not supported */
1315         if (attr->egress) {
1316                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1317                 rte_flow_error_set(error, EINVAL,
1318                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1319                         attr, "Not support egress.");
1320                 return -rte_errno;
1321         }
1322
1323         /* not supported */
1324         if (attr->priority) {
1325                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1326                 rte_flow_error_set(error, EINVAL,
1327                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1328                         attr, "Not support priority.");
1329                 return -rte_errno;
1330         }
1331
1332         /* check if the first not void action is QUEUE or DROP. */
1333         act = next_no_void_action(actions, NULL);
1334         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1335             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1336                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1337                 rte_flow_error_set(error, EINVAL,
1338                         RTE_FLOW_ERROR_TYPE_ACTION,
1339                         act, "Not supported action.");
1340                 return -rte_errno;
1341         }
1342
1343         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1344                 act_q = (const struct rte_flow_action_queue *)act->conf;
1345                 rule->queue = act_q->index;
1346         } else { /* drop */
1347                 /* signature mode does not support drop action. */
1348                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1349                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1350                         rte_flow_error_set(error, EINVAL,
1351                                 RTE_FLOW_ERROR_TYPE_ACTION,
1352                                 act, "Not supported action.");
1353                         return -rte_errno;
1354                 }
1355                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1356         }
1357
1358         /* check if the next not void item is MARK */
1359         act = next_no_void_action(actions, act);
1360         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1361                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1362                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1363                 rte_flow_error_set(error, EINVAL,
1364                         RTE_FLOW_ERROR_TYPE_ACTION,
1365                         act, "Not supported action.");
1366                 return -rte_errno;
1367         }
1368
1369         rule->soft_id = 0;
1370
1371         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1372                 mark = (const struct rte_flow_action_mark *)act->conf;
1373                 rule->soft_id = mark->id;
1374                 act = next_no_void_action(actions, act);
1375         }
1376
1377         /* check if the next not void item is END */
1378         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1379                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1380                 rte_flow_error_set(error, EINVAL,
1381                         RTE_FLOW_ERROR_TYPE_ACTION,
1382                         act, "Not supported action.");
1383                 return -rte_errno;
1384         }
1385
1386         return 0;
1387 }
1388
1389 /* search next no void pattern and skip fuzzy */
1390 static inline
1391 const struct rte_flow_item *next_no_fuzzy_pattern(
1392                 const struct rte_flow_item pattern[],
1393                 const struct rte_flow_item *cur)
1394 {
1395         const struct rte_flow_item *next =
1396                 next_no_void_pattern(pattern, cur);
1397         while (1) {
1398                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1399                         return next;
1400                 next = next_no_void_pattern(pattern, next);
1401         }
1402 }
1403
1404 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1405 {
1406         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1407         const struct rte_flow_item *item;
1408         uint32_t sh, lh, mh;
1409         int i = 0;
1410
1411         while (1) {
1412                 item = pattern + i;
1413                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1414                         break;
1415
1416                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1417                         spec =
1418                         (const struct rte_flow_item_fuzzy *)item->spec;
1419                         last =
1420                         (const struct rte_flow_item_fuzzy *)item->last;
1421                         mask =
1422                         (const struct rte_flow_item_fuzzy *)item->mask;
1423
1424                         if (!spec || !mask)
1425                                 return 0;
1426
1427                         sh = spec->thresh;
1428
1429                         if (!last)
1430                                 lh = sh;
1431                         else
1432                                 lh = last->thresh;
1433
1434                         mh = mask->thresh;
1435                         sh = sh & mh;
1436                         lh = lh & mh;
1437
1438                         if (!sh || sh > lh)
1439                                 return 0;
1440
1441                         return 1;
1442                 }
1443
1444                 i++;
1445         }
1446
1447         return 0;
1448 }
1449
1450 /**
1451  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1452  * And get the flow director filter info BTW.
1453  * UDP/TCP/SCTP PATTERN:
1454  * The first not void item can be ETH or IPV4 or IPV6
1455  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1456  * The next not void item could be UDP or TCP or SCTP (optional)
1457  * The next not void item could be RAW (for flexbyte, optional)
1458  * The next not void item must be END.
1459  * A Fuzzy Match pattern can appear at any place before END.
1460  * Fuzzy Match is optional for IPV4 but is required for IPV6
1461  * MAC VLAN PATTERN:
1462  * The first not void item must be ETH.
1463  * The second not void item must be MAC VLAN.
1464  * The next not void item must be END.
1465  * ACTION:
1466  * The first not void action should be QUEUE or DROP.
1467  * The second not void optional action should be MARK,
1468  * mark_id is a uint32_t number.
1469  * The next not void action should be END.
1470  * UDP/TCP/SCTP pattern example:
1471  * ITEM         Spec                    Mask
1472  * ETH          NULL                    NULL
1473  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1474  *              dst_addr 192.167.3.50   0xFFFFFFFF
1475  * UDP/TCP/SCTP src_port        80      0xFFFF
1476  *              dst_port        80      0xFFFF
1477  * FLEX relative        0       0x1
1478  *              search          0       0x1
1479  *              reserved        0       0
1480  *              offset          12      0xFFFFFFFF
1481  *              limit           0       0xFFFF
1482  *              length          2       0xFFFF
1483  *              pattern[0]      0x86    0xFF
1484  *              pattern[1]      0xDD    0xFF
1485  * END
1486  * MAC VLAN pattern example:
1487  * ITEM         Spec                    Mask
1488  * ETH          dst_addr
1489                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1490                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1491  * MAC VLAN     tci     0x2016          0xEFFF
1492  * END
1493  * Other members in mask and spec should set to 0x00.
1494  * Item->last should be NULL.
1495  */
1496 static int
1497 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1498                                const struct rte_flow_attr *attr,
1499                                const struct rte_flow_item pattern[],
1500                                const struct rte_flow_action actions[],
1501                                struct ixgbe_fdir_rule *rule,
1502                                struct rte_flow_error *error)
1503 {
1504         const struct rte_flow_item *item;
1505         const struct rte_flow_item_eth *eth_spec;
1506         const struct rte_flow_item_eth *eth_mask;
1507         const struct rte_flow_item_ipv4 *ipv4_spec;
1508         const struct rte_flow_item_ipv4 *ipv4_mask;
1509         const struct rte_flow_item_ipv6 *ipv6_spec;
1510         const struct rte_flow_item_ipv6 *ipv6_mask;
1511         const struct rte_flow_item_tcp *tcp_spec;
1512         const struct rte_flow_item_tcp *tcp_mask;
1513         const struct rte_flow_item_udp *udp_spec;
1514         const struct rte_flow_item_udp *udp_mask;
1515         const struct rte_flow_item_sctp *sctp_spec;
1516         const struct rte_flow_item_sctp *sctp_mask;
1517         const struct rte_flow_item_vlan *vlan_spec;
1518         const struct rte_flow_item_vlan *vlan_mask;
1519         const struct rte_flow_item_raw *raw_mask;
1520         const struct rte_flow_item_raw *raw_spec;
1521         uint8_t j;
1522
1523         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1524
1525         if (!pattern) {
1526                 rte_flow_error_set(error, EINVAL,
1527                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1528                         NULL, "NULL pattern.");
1529                 return -rte_errno;
1530         }
1531
1532         if (!actions) {
1533                 rte_flow_error_set(error, EINVAL,
1534                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1535                                    NULL, "NULL action.");
1536                 return -rte_errno;
1537         }
1538
1539         if (!attr) {
1540                 rte_flow_error_set(error, EINVAL,
1541                                    RTE_FLOW_ERROR_TYPE_ATTR,
1542                                    NULL, "NULL attribute.");
1543                 return -rte_errno;
1544         }
1545
1546         /**
1547          * Some fields may not be provided. Set spec to 0 and mask to default
1548          * value. So, we need not do anything for the not provided fields later.
1549          */
1550         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1551         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1552         rule->mask.vlan_tci_mask = 0;
1553         rule->mask.flex_bytes_mask = 0;
1554
1555         /**
1556          * The first not void item should be
1557          * MAC or IPv4 or TCP or UDP or SCTP.
1558          */
1559         item = next_no_fuzzy_pattern(pattern, NULL);
1560         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1561             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1562             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1563             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1564             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1565             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1566                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1567                 rte_flow_error_set(error, EINVAL,
1568                         RTE_FLOW_ERROR_TYPE_ITEM,
1569                         item, "Not supported by fdir filter");
1570                 return -rte_errno;
1571         }
1572
1573         if (signature_match(pattern))
1574                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1575         else
1576                 rule->mode = RTE_FDIR_MODE_PERFECT;
1577
1578         /*Not supported last point for range*/
1579         if (item->last) {
1580                 rte_flow_error_set(error, EINVAL,
1581                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1582                         item, "Not supported last point for range");
1583                 return -rte_errno;
1584         }
1585
1586         /* Get the MAC info. */
1587         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1588                 /**
1589                  * Only support vlan and dst MAC address,
1590                  * others should be masked.
1591                  */
1592                 if (item->spec && !item->mask) {
1593                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1594                         rte_flow_error_set(error, EINVAL,
1595                                 RTE_FLOW_ERROR_TYPE_ITEM,
1596                                 item, "Not supported by fdir filter");
1597                         return -rte_errno;
1598                 }
1599
1600                 if (item->spec) {
1601                         rule->b_spec = TRUE;
1602                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1603
1604                         /* Get the dst MAC. */
1605                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1606                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1607                                         eth_spec->dst.addr_bytes[j];
1608                         }
1609                 }
1610
1611
1612                 if (item->mask) {
1613
1614                         rule->b_mask = TRUE;
1615                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1616
1617                         /* Ether type should be masked. */
1618                         if (eth_mask->type ||
1619                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1620                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1621                                 rte_flow_error_set(error, EINVAL,
1622                                         RTE_FLOW_ERROR_TYPE_ITEM,
1623                                         item, "Not supported by fdir filter");
1624                                 return -rte_errno;
1625                         }
1626
1627                         /* If ethernet has meaning, it means MAC VLAN mode. */
1628                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1629
1630                         /**
1631                          * src MAC address must be masked,
1632                          * and don't support dst MAC address mask.
1633                          */
1634                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1635                                 if (eth_mask->src.addr_bytes[j] ||
1636                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1637                                         memset(rule, 0,
1638                                         sizeof(struct ixgbe_fdir_rule));
1639                                         rte_flow_error_set(error, EINVAL,
1640                                         RTE_FLOW_ERROR_TYPE_ITEM,
1641                                         item, "Not supported by fdir filter");
1642                                         return -rte_errno;
1643                                 }
1644                         }
1645
1646                         /* When no VLAN, considered as full mask. */
1647                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1648                 }
1649                 /*** If both spec and mask are item,
1650                  * it means don't care about ETH.
1651                  * Do nothing.
1652                  */
1653
1654                 /**
1655                  * Check if the next not void item is vlan or ipv4.
1656                  * IPv6 is not supported.
1657                  */
1658                 item = next_no_fuzzy_pattern(pattern, item);
1659                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1660                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1661                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1662                                 rte_flow_error_set(error, EINVAL,
1663                                         RTE_FLOW_ERROR_TYPE_ITEM,
1664                                         item, "Not supported by fdir filter");
1665                                 return -rte_errno;
1666                         }
1667                 } else {
1668                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1669                                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1670                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1671                                 rte_flow_error_set(error, EINVAL,
1672                                         RTE_FLOW_ERROR_TYPE_ITEM,
1673                                         item, "Not supported by fdir filter");
1674                                 return -rte_errno;
1675                         }
1676                 }
1677         }
1678
1679         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1680                 if (!(item->spec && item->mask)) {
1681                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1682                         rte_flow_error_set(error, EINVAL,
1683                                 RTE_FLOW_ERROR_TYPE_ITEM,
1684                                 item, "Not supported by fdir filter");
1685                         return -rte_errno;
1686                 }
1687
1688                 /*Not supported last point for range*/
1689                 if (item->last) {
1690                         rte_flow_error_set(error, EINVAL,
1691                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1692                                 item, "Not supported last point for range");
1693                         return -rte_errno;
1694                 }
1695
1696                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1697                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1698
1699                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1700
1701                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1702                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1703                 /* More than one tags are not supported. */
1704
1705                 /* Next not void item must be END */
1706                 item = next_no_fuzzy_pattern(pattern, item);
1707                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1708                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1709                         rte_flow_error_set(error, EINVAL,
1710                                 RTE_FLOW_ERROR_TYPE_ITEM,
1711                                 item, "Not supported by fdir filter");
1712                         return -rte_errno;
1713                 }
1714         }
1715
1716         /* Get the IPV4 info. */
1717         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1718                 /**
1719                  * Set the flow type even if there's no content
1720                  * as we must have a flow type.
1721                  */
1722                 rule->ixgbe_fdir.formatted.flow_type =
1723                         IXGBE_ATR_FLOW_TYPE_IPV4;
1724                 /*Not supported last point for range*/
1725                 if (item->last) {
1726                         rte_flow_error_set(error, EINVAL,
1727                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1728                                 item, "Not supported last point for range");
1729                         return -rte_errno;
1730                 }
1731                 /**
1732                  * Only care about src & dst addresses,
1733                  * others should be masked.
1734                  */
1735                 if (!item->mask) {
1736                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1737                         rte_flow_error_set(error, EINVAL,
1738                                 RTE_FLOW_ERROR_TYPE_ITEM,
1739                                 item, "Not supported by fdir filter");
1740                         return -rte_errno;
1741                 }
1742                 rule->b_mask = TRUE;
1743                 ipv4_mask =
1744                         (const struct rte_flow_item_ipv4 *)item->mask;
1745                 if (ipv4_mask->hdr.version_ihl ||
1746                     ipv4_mask->hdr.type_of_service ||
1747                     ipv4_mask->hdr.total_length ||
1748                     ipv4_mask->hdr.packet_id ||
1749                     ipv4_mask->hdr.fragment_offset ||
1750                     ipv4_mask->hdr.time_to_live ||
1751                     ipv4_mask->hdr.next_proto_id ||
1752                     ipv4_mask->hdr.hdr_checksum) {
1753                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1754                         rte_flow_error_set(error, EINVAL,
1755                                 RTE_FLOW_ERROR_TYPE_ITEM,
1756                                 item, "Not supported by fdir filter");
1757                         return -rte_errno;
1758                 }
1759                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1760                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1761
1762                 if (item->spec) {
1763                         rule->b_spec = TRUE;
1764                         ipv4_spec =
1765                                 (const struct rte_flow_item_ipv4 *)item->spec;
1766                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1767                                 ipv4_spec->hdr.dst_addr;
1768                         rule->ixgbe_fdir.formatted.src_ip[0] =
1769                                 ipv4_spec->hdr.src_addr;
1770                 }
1771
1772                 /**
1773                  * Check if the next not void item is
1774                  * TCP or UDP or SCTP or END.
1775                  */
1776                 item = next_no_fuzzy_pattern(pattern, item);
1777                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1778                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1779                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1780                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1781                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1782                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1783                         rte_flow_error_set(error, EINVAL,
1784                                 RTE_FLOW_ERROR_TYPE_ITEM,
1785                                 item, "Not supported by fdir filter");
1786                         return -rte_errno;
1787                 }
1788         }
1789
1790         /* Get the IPV6 info. */
1791         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1792                 /**
1793                  * Set the flow type even if there's no content
1794                  * as we must have a flow type.
1795                  */
1796                 rule->ixgbe_fdir.formatted.flow_type =
1797                         IXGBE_ATR_FLOW_TYPE_IPV6;
1798
1799                 /**
1800                  * 1. must signature match
1801                  * 2. not support last
1802                  * 3. mask must not null
1803                  */
1804                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1805                     item->last ||
1806                     !item->mask) {
1807                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1808                         rte_flow_error_set(error, EINVAL,
1809                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1810                                 item, "Not supported last point for range");
1811                         return -rte_errno;
1812                 }
1813
1814                 rule->b_mask = TRUE;
1815                 ipv6_mask =
1816                         (const struct rte_flow_item_ipv6 *)item->mask;
1817                 if (ipv6_mask->hdr.vtc_flow ||
1818                     ipv6_mask->hdr.payload_len ||
1819                     ipv6_mask->hdr.proto ||
1820                     ipv6_mask->hdr.hop_limits) {
1821                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1822                         rte_flow_error_set(error, EINVAL,
1823                                 RTE_FLOW_ERROR_TYPE_ITEM,
1824                                 item, "Not supported by fdir filter");
1825                         return -rte_errno;
1826                 }
1827
1828                 /* check src addr mask */
1829                 for (j = 0; j < 16; j++) {
1830                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1831                                 rule->mask.src_ipv6_mask |= 1 << j;
1832                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1833                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1834                                 rte_flow_error_set(error, EINVAL,
1835                                         RTE_FLOW_ERROR_TYPE_ITEM,
1836                                         item, "Not supported by fdir filter");
1837                                 return -rte_errno;
1838                         }
1839                 }
1840
1841                 /* check dst addr mask */
1842                 for (j = 0; j < 16; j++) {
1843                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1844                                 rule->mask.dst_ipv6_mask |= 1 << j;
1845                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1846                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1847                                 rte_flow_error_set(error, EINVAL,
1848                                         RTE_FLOW_ERROR_TYPE_ITEM,
1849                                         item, "Not supported by fdir filter");
1850                                 return -rte_errno;
1851                         }
1852                 }
1853
1854                 if (item->spec) {
1855                         rule->b_spec = TRUE;
1856                         ipv6_spec =
1857                                 (const struct rte_flow_item_ipv6 *)item->spec;
1858                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1859                                    ipv6_spec->hdr.src_addr, 16);
1860                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1861                                    ipv6_spec->hdr.dst_addr, 16);
1862                 }
1863
1864                 /**
1865                  * Check if the next not void item is
1866                  * TCP or UDP or SCTP or END.
1867                  */
1868                 item = next_no_fuzzy_pattern(pattern, item);
1869                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1870                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1871                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1872                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1873                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1874                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1875                         rte_flow_error_set(error, EINVAL,
1876                                 RTE_FLOW_ERROR_TYPE_ITEM,
1877                                 item, "Not supported by fdir filter");
1878                         return -rte_errno;
1879                 }
1880         }
1881
1882         /* Get the TCP info. */
1883         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1884                 /**
1885                  * Set the flow type even if there's no content
1886                  * as we must have a flow type.
1887                  */
1888                 rule->ixgbe_fdir.formatted.flow_type |=
1889                         IXGBE_ATR_L4TYPE_TCP;
1890                 /*Not supported last point for range*/
1891                 if (item->last) {
1892                         rte_flow_error_set(error, EINVAL,
1893                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1894                                 item, "Not supported last point for range");
1895                         return -rte_errno;
1896                 }
1897                 /**
1898                  * Only care about src & dst ports,
1899                  * others should be masked.
1900                  */
1901                 if (!item->mask) {
1902                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1903                         rte_flow_error_set(error, EINVAL,
1904                                 RTE_FLOW_ERROR_TYPE_ITEM,
1905                                 item, "Not supported by fdir filter");
1906                         return -rte_errno;
1907                 }
1908                 rule->b_mask = TRUE;
1909                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1910                 if (tcp_mask->hdr.sent_seq ||
1911                     tcp_mask->hdr.recv_ack ||
1912                     tcp_mask->hdr.data_off ||
1913                     tcp_mask->hdr.tcp_flags ||
1914                     tcp_mask->hdr.rx_win ||
1915                     tcp_mask->hdr.cksum ||
1916                     tcp_mask->hdr.tcp_urp) {
1917                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1918                         rte_flow_error_set(error, EINVAL,
1919                                 RTE_FLOW_ERROR_TYPE_ITEM,
1920                                 item, "Not supported by fdir filter");
1921                         return -rte_errno;
1922                 }
1923                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1924                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1925
1926                 if (item->spec) {
1927                         rule->b_spec = TRUE;
1928                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1929                         rule->ixgbe_fdir.formatted.src_port =
1930                                 tcp_spec->hdr.src_port;
1931                         rule->ixgbe_fdir.formatted.dst_port =
1932                                 tcp_spec->hdr.dst_port;
1933                 }
1934
1935                 item = next_no_fuzzy_pattern(pattern, item);
1936                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1937                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1938                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1939                         rte_flow_error_set(error, EINVAL,
1940                                 RTE_FLOW_ERROR_TYPE_ITEM,
1941                                 item, "Not supported by fdir filter");
1942                         return -rte_errno;
1943                 }
1944
1945         }
1946
1947         /* Get the UDP info */
1948         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1949                 /**
1950                  * Set the flow type even if there's no content
1951                  * as we must have a flow type.
1952                  */
1953                 rule->ixgbe_fdir.formatted.flow_type |=
1954                         IXGBE_ATR_L4TYPE_UDP;
1955                 /*Not supported last point for range*/
1956                 if (item->last) {
1957                         rte_flow_error_set(error, EINVAL,
1958                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1959                                 item, "Not supported last point for range");
1960                         return -rte_errno;
1961                 }
1962                 /**
1963                  * Only care about src & dst ports,
1964                  * others should be masked.
1965                  */
1966                 if (!item->mask) {
1967                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1968                         rte_flow_error_set(error, EINVAL,
1969                                 RTE_FLOW_ERROR_TYPE_ITEM,
1970                                 item, "Not supported by fdir filter");
1971                         return -rte_errno;
1972                 }
1973                 rule->b_mask = TRUE;
1974                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1975                 if (udp_mask->hdr.dgram_len ||
1976                     udp_mask->hdr.dgram_cksum) {
1977                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1978                         rte_flow_error_set(error, EINVAL,
1979                                 RTE_FLOW_ERROR_TYPE_ITEM,
1980                                 item, "Not supported by fdir filter");
1981                         return -rte_errno;
1982                 }
1983                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1984                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1985
1986                 if (item->spec) {
1987                         rule->b_spec = TRUE;
1988                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1989                         rule->ixgbe_fdir.formatted.src_port =
1990                                 udp_spec->hdr.src_port;
1991                         rule->ixgbe_fdir.formatted.dst_port =
1992                                 udp_spec->hdr.dst_port;
1993                 }
1994
1995                 item = next_no_fuzzy_pattern(pattern, item);
1996                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1997                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1998                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1999                         rte_flow_error_set(error, EINVAL,
2000                                 RTE_FLOW_ERROR_TYPE_ITEM,
2001                                 item, "Not supported by fdir filter");
2002                         return -rte_errno;
2003                 }
2004
2005         }
2006
2007         /* Get the SCTP info */
2008         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2009                 /**
2010                  * Set the flow type even if there's no content
2011                  * as we must have a flow type.
2012                  */
2013                 rule->ixgbe_fdir.formatted.flow_type |=
2014                         IXGBE_ATR_L4TYPE_SCTP;
2015                 /*Not supported last point for range*/
2016                 if (item->last) {
2017                         rte_flow_error_set(error, EINVAL,
2018                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2019                                 item, "Not supported last point for range");
2020                         return -rte_errno;
2021                 }
2022
2023                 /* only x550 family only support sctp port */
2024                 if (hw->mac.type == ixgbe_mac_X550 ||
2025                     hw->mac.type == ixgbe_mac_X550EM_x ||
2026                     hw->mac.type == ixgbe_mac_X550EM_a) {
2027                         /**
2028                          * Only care about src & dst ports,
2029                          * others should be masked.
2030                          */
2031                         if (!item->mask) {
2032                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2033                                 rte_flow_error_set(error, EINVAL,
2034                                         RTE_FLOW_ERROR_TYPE_ITEM,
2035                                         item, "Not supported by fdir filter");
2036                                 return -rte_errno;
2037                         }
2038                         rule->b_mask = TRUE;
2039                         sctp_mask =
2040                                 (const struct rte_flow_item_sctp *)item->mask;
2041                         if (sctp_mask->hdr.tag ||
2042                                 sctp_mask->hdr.cksum) {
2043                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2044                                 rte_flow_error_set(error, EINVAL,
2045                                         RTE_FLOW_ERROR_TYPE_ITEM,
2046                                         item, "Not supported by fdir filter");
2047                                 return -rte_errno;
2048                         }
2049                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2050                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2051
2052                         if (item->spec) {
2053                                 rule->b_spec = TRUE;
2054                                 sctp_spec =
2055                                 (const struct rte_flow_item_sctp *)item->spec;
2056                                 rule->ixgbe_fdir.formatted.src_port =
2057                                         sctp_spec->hdr.src_port;
2058                                 rule->ixgbe_fdir.formatted.dst_port =
2059                                         sctp_spec->hdr.dst_port;
2060                         }
2061                 /* others even sctp port is not supported */
2062                 } else {
2063                         sctp_mask =
2064                                 (const struct rte_flow_item_sctp *)item->mask;
2065                         if (sctp_mask &&
2066                                 (sctp_mask->hdr.src_port ||
2067                                  sctp_mask->hdr.dst_port ||
2068                                  sctp_mask->hdr.tag ||
2069                                  sctp_mask->hdr.cksum)) {
2070                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2071                                 rte_flow_error_set(error, EINVAL,
2072                                         RTE_FLOW_ERROR_TYPE_ITEM,
2073                                         item, "Not supported by fdir filter");
2074                                 return -rte_errno;
2075                         }
2076                 }
2077
2078                 item = next_no_fuzzy_pattern(pattern, item);
2079                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2080                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2081                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2082                         rte_flow_error_set(error, EINVAL,
2083                                 RTE_FLOW_ERROR_TYPE_ITEM,
2084                                 item, "Not supported by fdir filter");
2085                         return -rte_errno;
2086                 }
2087         }
2088
2089         /* Get the flex byte info */
2090         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2091                 /* Not supported last point for range*/
2092                 if (item->last) {
2093                         rte_flow_error_set(error, EINVAL,
2094                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2095                                 item, "Not supported last point for range");
2096                         return -rte_errno;
2097                 }
2098                 /* mask should not be null */
2099                 if (!item->mask || !item->spec) {
2100                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2101                         rte_flow_error_set(error, EINVAL,
2102                                 RTE_FLOW_ERROR_TYPE_ITEM,
2103                                 item, "Not supported by fdir filter");
2104                         return -rte_errno;
2105                 }
2106
2107                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2108
2109                 /* check mask */
2110                 if (raw_mask->relative != 0x1 ||
2111                     raw_mask->search != 0x1 ||
2112                     raw_mask->reserved != 0x0 ||
2113                     (uint32_t)raw_mask->offset != 0xffffffff ||
2114                     raw_mask->limit != 0xffff ||
2115                     raw_mask->length != 0xffff) {
2116                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2117                         rte_flow_error_set(error, EINVAL,
2118                                 RTE_FLOW_ERROR_TYPE_ITEM,
2119                                 item, "Not supported by fdir filter");
2120                         return -rte_errno;
2121                 }
2122
2123                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2124
2125                 /* check spec */
2126                 if (raw_spec->relative != 0 ||
2127                     raw_spec->search != 0 ||
2128                     raw_spec->reserved != 0 ||
2129                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2130                     raw_spec->offset % 2 ||
2131                     raw_spec->limit != 0 ||
2132                     raw_spec->length != 2 ||
2133                     /* pattern can't be 0xffff */
2134                     (raw_spec->pattern[0] == 0xff &&
2135                      raw_spec->pattern[1] == 0xff)) {
2136                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2137                         rte_flow_error_set(error, EINVAL,
2138                                 RTE_FLOW_ERROR_TYPE_ITEM,
2139                                 item, "Not supported by fdir filter");
2140                         return -rte_errno;
2141                 }
2142
2143                 /* check pattern mask */
2144                 if (raw_mask->pattern[0] != 0xff ||
2145                     raw_mask->pattern[1] != 0xff) {
2146                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2147                         rte_flow_error_set(error, EINVAL,
2148                                 RTE_FLOW_ERROR_TYPE_ITEM,
2149                                 item, "Not supported by fdir filter");
2150                         return -rte_errno;
2151                 }
2152
2153                 rule->mask.flex_bytes_mask = 0xffff;
2154                 rule->ixgbe_fdir.formatted.flex_bytes =
2155                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2156                         raw_spec->pattern[0];
2157                 rule->flex_bytes_offset = raw_spec->offset;
2158         }
2159
2160         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2161                 /* check if the next not void item is END */
2162                 item = next_no_fuzzy_pattern(pattern, item);
2163                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2164                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2165                         rte_flow_error_set(error, EINVAL,
2166                                 RTE_FLOW_ERROR_TYPE_ITEM,
2167                                 item, "Not supported by fdir filter");
2168                         return -rte_errno;
2169                 }
2170         }
2171
2172         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2173 }
2174
2175 #define NVGRE_PROTOCOL 0x6558
2176
2177 /**
2178  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2179  * And get the flow director filter info BTW.
2180  * VxLAN PATTERN:
2181  * The first not void item must be ETH.
2182  * The second not void item must be IPV4/ IPV6.
2183  * The third not void item must be NVGRE.
2184  * The next not void item must be END.
2185  * NVGRE PATTERN:
2186  * The first not void item must be ETH.
2187  * The second not void item must be IPV4/ IPV6.
2188  * The third not void item must be NVGRE.
2189  * The next not void item must be END.
2190  * ACTION:
2191  * The first not void action should be QUEUE or DROP.
2192  * The second not void optional action should be MARK,
2193  * mark_id is a uint32_t number.
2194  * The next not void action should be END.
2195  * VxLAN pattern example:
2196  * ITEM         Spec                    Mask
2197  * ETH          NULL                    NULL
2198  * IPV4/IPV6    NULL                    NULL
2199  * UDP          NULL                    NULL
2200  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2201  * MAC VLAN     tci     0x2016          0xEFFF
2202  * END
2203  * NEGRV pattern example:
2204  * ITEM         Spec                    Mask
2205  * ETH          NULL                    NULL
2206  * IPV4/IPV6    NULL                    NULL
2207  * NVGRE        protocol        0x6558  0xFFFF
2208  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2209  * MAC VLAN     tci     0x2016          0xEFFF
2210  * END
2211  * other members in mask and spec should set to 0x00.
2212  * item->last should be NULL.
2213  */
2214 static int
2215 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2216                                const struct rte_flow_item pattern[],
2217                                const struct rte_flow_action actions[],
2218                                struct ixgbe_fdir_rule *rule,
2219                                struct rte_flow_error *error)
2220 {
2221         const struct rte_flow_item *item;
2222         const struct rte_flow_item_vxlan *vxlan_spec;
2223         const struct rte_flow_item_vxlan *vxlan_mask;
2224         const struct rte_flow_item_nvgre *nvgre_spec;
2225         const struct rte_flow_item_nvgre *nvgre_mask;
2226         const struct rte_flow_item_eth *eth_spec;
2227         const struct rte_flow_item_eth *eth_mask;
2228         const struct rte_flow_item_vlan *vlan_spec;
2229         const struct rte_flow_item_vlan *vlan_mask;
2230         uint32_t j;
2231
2232         if (!pattern) {
2233                 rte_flow_error_set(error, EINVAL,
2234                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2235                                    NULL, "NULL pattern.");
2236                 return -rte_errno;
2237         }
2238
2239         if (!actions) {
2240                 rte_flow_error_set(error, EINVAL,
2241                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2242                                    NULL, "NULL action.");
2243                 return -rte_errno;
2244         }
2245
2246         if (!attr) {
2247                 rte_flow_error_set(error, EINVAL,
2248                                    RTE_FLOW_ERROR_TYPE_ATTR,
2249                                    NULL, "NULL attribute.");
2250                 return -rte_errno;
2251         }
2252
2253         /**
2254          * Some fields may not be provided. Set spec to 0 and mask to default
2255          * value. So, we need not do anything for the not provided fields later.
2256          */
2257         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2258         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2259         rule->mask.vlan_tci_mask = 0;
2260
2261         /**
2262          * The first not void item should be
2263          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2264          */
2265         item = next_no_void_pattern(pattern, NULL);
2266         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2267             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2268             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2269             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2270             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2271             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2272                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2273                 rte_flow_error_set(error, EINVAL,
2274                         RTE_FLOW_ERROR_TYPE_ITEM,
2275                         item, "Not supported by fdir filter");
2276                 return -rte_errno;
2277         }
2278
2279         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2280
2281         /* Skip MAC. */
2282         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2283                 /* Only used to describe the protocol stack. */
2284                 if (item->spec || item->mask) {
2285                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2286                         rte_flow_error_set(error, EINVAL,
2287                                 RTE_FLOW_ERROR_TYPE_ITEM,
2288                                 item, "Not supported by fdir filter");
2289                         return -rte_errno;
2290                 }
2291                 /* Not supported last point for range*/
2292                 if (item->last) {
2293                         rte_flow_error_set(error, EINVAL,
2294                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2295                                 item, "Not supported last point for range");
2296                         return -rte_errno;
2297                 }
2298
2299                 /* Check if the next not void item is IPv4 or IPv6. */
2300                 item = next_no_void_pattern(pattern, item);
2301                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2302                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2303                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2304                         rte_flow_error_set(error, EINVAL,
2305                                 RTE_FLOW_ERROR_TYPE_ITEM,
2306                                 item, "Not supported by fdir filter");
2307                         return -rte_errno;
2308                 }
2309         }
2310
2311         /* Skip IP. */
2312         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2313             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2314                 /* Only used to describe the protocol stack. */
2315                 if (item->spec || item->mask) {
2316                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2317                         rte_flow_error_set(error, EINVAL,
2318                                 RTE_FLOW_ERROR_TYPE_ITEM,
2319                                 item, "Not supported by fdir filter");
2320                         return -rte_errno;
2321                 }
2322                 /*Not supported last point for range*/
2323                 if (item->last) {
2324                         rte_flow_error_set(error, EINVAL,
2325                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2326                                 item, "Not supported last point for range");
2327                         return -rte_errno;
2328                 }
2329
2330                 /* Check if the next not void item is UDP or NVGRE. */
2331                 item = next_no_void_pattern(pattern, item);
2332                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2333                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2334                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2335                         rte_flow_error_set(error, EINVAL,
2336                                 RTE_FLOW_ERROR_TYPE_ITEM,
2337                                 item, "Not supported by fdir filter");
2338                         return -rte_errno;
2339                 }
2340         }
2341
2342         /* Skip UDP. */
2343         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2344                 /* Only used to describe the protocol stack. */
2345                 if (item->spec || item->mask) {
2346                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2347                         rte_flow_error_set(error, EINVAL,
2348                                 RTE_FLOW_ERROR_TYPE_ITEM,
2349                                 item, "Not supported by fdir filter");
2350                         return -rte_errno;
2351                 }
2352                 /*Not supported last point for range*/
2353                 if (item->last) {
2354                         rte_flow_error_set(error, EINVAL,
2355                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2356                                 item, "Not supported last point for range");
2357                         return -rte_errno;
2358                 }
2359
2360                 /* Check if the next not void item is VxLAN. */
2361                 item = next_no_void_pattern(pattern, item);
2362                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2363                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2364                         rte_flow_error_set(error, EINVAL,
2365                                 RTE_FLOW_ERROR_TYPE_ITEM,
2366                                 item, "Not supported by fdir filter");
2367                         return -rte_errno;
2368                 }
2369         }
2370
2371         /* Get the VxLAN info */
2372         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2373                 rule->ixgbe_fdir.formatted.tunnel_type =
2374                                 IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
2375
2376                 /* Only care about VNI, others should be masked. */
2377                 if (!item->mask) {
2378                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2379                         rte_flow_error_set(error, EINVAL,
2380                                 RTE_FLOW_ERROR_TYPE_ITEM,
2381                                 item, "Not supported by fdir filter");
2382                         return -rte_errno;
2383                 }
2384                 /*Not supported last point for range*/
2385                 if (item->last) {
2386                         rte_flow_error_set(error, EINVAL,
2387                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2388                                 item, "Not supported last point for range");
2389                         return -rte_errno;
2390                 }
2391                 rule->b_mask = TRUE;
2392
2393                 /* Tunnel type is always meaningful. */
2394                 rule->mask.tunnel_type_mask = 1;
2395
2396                 vxlan_mask =
2397                         (const struct rte_flow_item_vxlan *)item->mask;
2398                 if (vxlan_mask->flags) {
2399                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2400                         rte_flow_error_set(error, EINVAL,
2401                                 RTE_FLOW_ERROR_TYPE_ITEM,
2402                                 item, "Not supported by fdir filter");
2403                         return -rte_errno;
2404                 }
2405                 /* VNI must be totally masked or not. */
2406                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2407                         vxlan_mask->vni[2]) &&
2408                         ((vxlan_mask->vni[0] != 0xFF) ||
2409                         (vxlan_mask->vni[1] != 0xFF) ||
2410                                 (vxlan_mask->vni[2] != 0xFF))) {
2411                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2412                         rte_flow_error_set(error, EINVAL,
2413                                 RTE_FLOW_ERROR_TYPE_ITEM,
2414                                 item, "Not supported by fdir filter");
2415                         return -rte_errno;
2416                 }
2417
2418                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2419                         RTE_DIM(vxlan_mask->vni));
2420
2421                 if (item->spec) {
2422                         rule->b_spec = TRUE;
2423                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2424                                         item->spec;
2425                         rte_memcpy(((uint8_t *)
2426                                 &rule->ixgbe_fdir.formatted.tni_vni),
2427                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2428                 }
2429         }
2430
2431         /* Get the NVGRE info */
2432         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2433                 rule->ixgbe_fdir.formatted.tunnel_type =
2434                                 IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
2435
2436                 /**
2437                  * Only care about flags0, flags1, protocol and TNI,
2438                  * others should be masked.
2439                  */
2440                 if (!item->mask) {
2441                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2442                         rte_flow_error_set(error, EINVAL,
2443                                 RTE_FLOW_ERROR_TYPE_ITEM,
2444                                 item, "Not supported by fdir filter");
2445                         return -rte_errno;
2446                 }
2447                 /*Not supported last point for range*/
2448                 if (item->last) {
2449                         rte_flow_error_set(error, EINVAL,
2450                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2451                                 item, "Not supported last point for range");
2452                         return -rte_errno;
2453                 }
2454                 rule->b_mask = TRUE;
2455
2456                 /* Tunnel type is always meaningful. */
2457                 rule->mask.tunnel_type_mask = 1;
2458
2459                 nvgre_mask =
2460                         (const struct rte_flow_item_nvgre *)item->mask;
2461                 if (nvgre_mask->flow_id) {
2462                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2463                         rte_flow_error_set(error, EINVAL,
2464                                 RTE_FLOW_ERROR_TYPE_ITEM,
2465                                 item, "Not supported by fdir filter");
2466                         return -rte_errno;
2467                 }
2468                 if (nvgre_mask->protocol &&
2469                     nvgre_mask->protocol != 0xFFFF) {
2470                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2471                         rte_flow_error_set(error, EINVAL,
2472                                 RTE_FLOW_ERROR_TYPE_ITEM,
2473                                 item, "Not supported by fdir filter");
2474                         return -rte_errno;
2475                 }
2476                 if (nvgre_mask->c_k_s_rsvd0_ver &&
2477                     nvgre_mask->c_k_s_rsvd0_ver !=
2478                         rte_cpu_to_be_16(0xFFFF)) {
2479                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2480                         rte_flow_error_set(error, EINVAL,
2481                                 RTE_FLOW_ERROR_TYPE_ITEM,
2482                                 item, "Not supported by fdir filter");
2483                         return -rte_errno;
2484                 }
2485                 /* TNI must be totally masked or not. */
2486                 if (nvgre_mask->tni[0] &&
2487                     ((nvgre_mask->tni[0] != 0xFF) ||
2488                     (nvgre_mask->tni[1] != 0xFF) ||
2489                     (nvgre_mask->tni[2] != 0xFF))) {
2490                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2491                         rte_flow_error_set(error, EINVAL,
2492                                 RTE_FLOW_ERROR_TYPE_ITEM,
2493                                 item, "Not supported by fdir filter");
2494                         return -rte_errno;
2495                 }
2496                 /* tni is a 24-bits bit field */
2497                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2498                         RTE_DIM(nvgre_mask->tni));
2499                 rule->mask.tunnel_id_mask <<= 8;
2500
2501                 if (item->spec) {
2502                         rule->b_spec = TRUE;
2503                         nvgre_spec =
2504                                 (const struct rte_flow_item_nvgre *)item->spec;
2505                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2506                             rte_cpu_to_be_16(0x2000) &&
2507                                 nvgre_mask->c_k_s_rsvd0_ver) {
2508                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2509                                 rte_flow_error_set(error, EINVAL,
2510                                         RTE_FLOW_ERROR_TYPE_ITEM,
2511                                         item, "Not supported by fdir filter");
2512                                 return -rte_errno;
2513                         }
2514                         if (nvgre_mask->protocol &&
2515                             nvgre_spec->protocol !=
2516                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2517                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2518                                 rte_flow_error_set(error, EINVAL,
2519                                         RTE_FLOW_ERROR_TYPE_ITEM,
2520                                         item, "Not supported by fdir filter");
2521                                 return -rte_errno;
2522                         }
2523                         /* tni is a 24-bits bit field */
2524                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2525                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2526                 }
2527         }
2528
2529         /* check if the next not void item is MAC */
2530         item = next_no_void_pattern(pattern, item);
2531         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2532                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2533                 rte_flow_error_set(error, EINVAL,
2534                         RTE_FLOW_ERROR_TYPE_ITEM,
2535                         item, "Not supported by fdir filter");
2536                 return -rte_errno;
2537         }
2538
2539         /**
2540          * Only support vlan and dst MAC address,
2541          * others should be masked.
2542          */
2543
2544         if (!item->mask) {
2545                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2546                 rte_flow_error_set(error, EINVAL,
2547                         RTE_FLOW_ERROR_TYPE_ITEM,
2548                         item, "Not supported by fdir filter");
2549                 return -rte_errno;
2550         }
2551         /*Not supported last point for range*/
2552         if (item->last) {
2553                 rte_flow_error_set(error, EINVAL,
2554                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2555                         item, "Not supported last point for range");
2556                 return -rte_errno;
2557         }
2558         rule->b_mask = TRUE;
2559         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2560
2561         /* Ether type should be masked. */
2562         if (eth_mask->type) {
2563                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2564                 rte_flow_error_set(error, EINVAL,
2565                         RTE_FLOW_ERROR_TYPE_ITEM,
2566                         item, "Not supported by fdir filter");
2567                 return -rte_errno;
2568         }
2569
2570         /* src MAC address should be masked. */
2571         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2572                 if (eth_mask->src.addr_bytes[j]) {
2573                         memset(rule, 0,
2574                                sizeof(struct ixgbe_fdir_rule));
2575                         rte_flow_error_set(error, EINVAL,
2576                                 RTE_FLOW_ERROR_TYPE_ITEM,
2577                                 item, "Not supported by fdir filter");
2578                         return -rte_errno;
2579                 }
2580         }
2581         rule->mask.mac_addr_byte_mask = 0;
2582         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2583                 /* It's a per byte mask. */
2584                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2585                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2586                 } else if (eth_mask->dst.addr_bytes[j]) {
2587                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2588                         rte_flow_error_set(error, EINVAL,
2589                                 RTE_FLOW_ERROR_TYPE_ITEM,
2590                                 item, "Not supported by fdir filter");
2591                         return -rte_errno;
2592                 }
2593         }
2594
2595         /* When no vlan, considered as full mask. */
2596         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2597
2598         if (item->spec) {
2599                 rule->b_spec = TRUE;
2600                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2601
2602                 /* Get the dst MAC. */
2603                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2604                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2605                                 eth_spec->dst.addr_bytes[j];
2606                 }
2607         }
2608
2609         /**
2610          * Check if the next not void item is vlan or ipv4.
2611          * IPv6 is not supported.
2612          */
2613         item = next_no_void_pattern(pattern, item);
2614         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2615                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2616                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2617                 rte_flow_error_set(error, EINVAL,
2618                         RTE_FLOW_ERROR_TYPE_ITEM,
2619                         item, "Not supported by fdir filter");
2620                 return -rte_errno;
2621         }
2622         /*Not supported last point for range*/
2623         if (item->last) {
2624                 rte_flow_error_set(error, EINVAL,
2625                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2626                         item, "Not supported last point for range");
2627                 return -rte_errno;
2628         }
2629
2630         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2631                 if (!(item->spec && item->mask)) {
2632                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2633                         rte_flow_error_set(error, EINVAL,
2634                                 RTE_FLOW_ERROR_TYPE_ITEM,
2635                                 item, "Not supported by fdir filter");
2636                         return -rte_errno;
2637                 }
2638
2639                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2640                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2641
2642                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2643
2644                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2645                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2646                 /* More than one tags are not supported. */
2647
2648                 /* check if the next not void item is END */
2649                 item = next_no_void_pattern(pattern, item);
2650
2651                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2652                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2653                         rte_flow_error_set(error, EINVAL,
2654                                 RTE_FLOW_ERROR_TYPE_ITEM,
2655                                 item, "Not supported by fdir filter");
2656                         return -rte_errno;
2657                 }
2658         }
2659
2660         /**
2661          * If the tags is 0, it means don't care about the VLAN.
2662          * Do nothing.
2663          */
2664
2665         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2666 }
2667
2668 static int
2669 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2670                         const struct rte_flow_attr *attr,
2671                         const struct rte_flow_item pattern[],
2672                         const struct rte_flow_action actions[],
2673                         struct ixgbe_fdir_rule *rule,
2674                         struct rte_flow_error *error)
2675 {
2676         int ret;
2677         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2678         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2679
2680         if (hw->mac.type != ixgbe_mac_82599EB &&
2681                 hw->mac.type != ixgbe_mac_X540 &&
2682                 hw->mac.type != ixgbe_mac_X550 &&
2683                 hw->mac.type != ixgbe_mac_X550EM_x &&
2684                 hw->mac.type != ixgbe_mac_X550EM_a)
2685                 return -ENOTSUP;
2686
2687         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2688                                         actions, rule, error);
2689
2690         if (!ret)
2691                 goto step_next;
2692
2693         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2694                                         actions, rule, error);
2695
2696         if (ret)
2697                 return ret;
2698
2699 step_next:
2700
2701         if (hw->mac.type == ixgbe_mac_82599EB &&
2702                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2703                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2704                 rule->ixgbe_fdir.formatted.dst_port != 0))
2705                 return -ENOTSUP;
2706
2707         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2708             fdir_mode != rule->mode)
2709                 return -ENOTSUP;
2710
2711         if (rule->queue >= dev->data->nb_rx_queues)
2712                 return -ENOTSUP;
2713
2714         return ret;
2715 }
2716
2717 void
2718 ixgbe_filterlist_init(void)
2719 {
2720         TAILQ_INIT(&filter_ntuple_list);
2721         TAILQ_INIT(&filter_ethertype_list);
2722         TAILQ_INIT(&filter_syn_list);
2723         TAILQ_INIT(&filter_fdir_list);
2724         TAILQ_INIT(&filter_l2_tunnel_list);
2725         TAILQ_INIT(&ixgbe_flow_list);
2726 }
2727
2728 void
2729 ixgbe_filterlist_flush(void)
2730 {
2731         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2732         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2733         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2734         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2735         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2736         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2737
2738         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2739                 TAILQ_REMOVE(&filter_ntuple_list,
2740                                  ntuple_filter_ptr,
2741                                  entries);
2742                 rte_free(ntuple_filter_ptr);
2743         }
2744
2745         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2746                 TAILQ_REMOVE(&filter_ethertype_list,
2747                                  ethertype_filter_ptr,
2748                                  entries);
2749                 rte_free(ethertype_filter_ptr);
2750         }
2751
2752         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2753                 TAILQ_REMOVE(&filter_syn_list,
2754                                  syn_filter_ptr,
2755                                  entries);
2756                 rte_free(syn_filter_ptr);
2757         }
2758
2759         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2760                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2761                                  l2_tn_filter_ptr,
2762                                  entries);
2763                 rte_free(l2_tn_filter_ptr);
2764         }
2765
2766         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2767                 TAILQ_REMOVE(&filter_fdir_list,
2768                                  fdir_rule_ptr,
2769                                  entries);
2770                 rte_free(fdir_rule_ptr);
2771         }
2772
2773         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2774                 TAILQ_REMOVE(&ixgbe_flow_list,
2775                                  ixgbe_flow_mem_ptr,
2776                                  entries);
2777                 rte_free(ixgbe_flow_mem_ptr->flow);
2778                 rte_free(ixgbe_flow_mem_ptr);
2779         }
2780 }
2781
2782 /**
2783  * Create or destroy a flow rule.
2784  * Theorically one rule can match more than one filters.
2785  * We will let it use the filter which it hitt first.
2786  * So, the sequence matters.
2787  */
2788 static struct rte_flow *
2789 ixgbe_flow_create(struct rte_eth_dev *dev,
2790                   const struct rte_flow_attr *attr,
2791                   const struct rte_flow_item pattern[],
2792                   const struct rte_flow_action actions[],
2793                   struct rte_flow_error *error)
2794 {
2795         int ret;
2796         struct rte_eth_ntuple_filter ntuple_filter;
2797         struct rte_eth_ethertype_filter ethertype_filter;
2798         struct rte_eth_syn_filter syn_filter;
2799         struct ixgbe_fdir_rule fdir_rule;
2800         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2801         struct ixgbe_hw_fdir_info *fdir_info =
2802                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2803         struct rte_flow *flow = NULL;
2804         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2805         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2806         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2807         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2808         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2809         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2810         uint8_t first_mask = FALSE;
2811
2812         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2813         if (!flow) {
2814                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2815                 return (struct rte_flow *)flow;
2816         }
2817         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2818                         sizeof(struct ixgbe_flow_mem), 0);
2819         if (!ixgbe_flow_mem_ptr) {
2820                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2821                 rte_free(flow);
2822                 return NULL;
2823         }
2824         ixgbe_flow_mem_ptr->flow = flow;
2825         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2826                                 ixgbe_flow_mem_ptr, entries);
2827
2828         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2829         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2830                         actions, &ntuple_filter, error);
2831
2832 #ifdef RTE_LIBRTE_SECURITY
2833         /* ESP flow not really a flow*/
2834         if (ntuple_filter.proto == IPPROTO_ESP)
2835                 return flow;
2836 #endif
2837
2838         if (!ret) {
2839                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2840                 if (!ret) {
2841                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2842                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2843                         if (!ntuple_filter_ptr) {
2844                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2845                                 goto out;
2846                         }
2847                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2848                                 &ntuple_filter,
2849                                 sizeof(struct rte_eth_ntuple_filter));
2850                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2851                                 ntuple_filter_ptr, entries);
2852                         flow->rule = ntuple_filter_ptr;
2853                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2854                         return flow;
2855                 }
2856                 goto out;
2857         }
2858
2859         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2860         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2861                                 actions, &ethertype_filter, error);
2862         if (!ret) {
2863                 ret = ixgbe_add_del_ethertype_filter(dev,
2864                                 &ethertype_filter, TRUE);
2865                 if (!ret) {
2866                         ethertype_filter_ptr = rte_zmalloc(
2867                                 "ixgbe_ethertype_filter",
2868                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2869                         if (!ethertype_filter_ptr) {
2870                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2871                                 goto out;
2872                         }
2873                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2874                                 &ethertype_filter,
2875                                 sizeof(struct rte_eth_ethertype_filter));
2876                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2877                                 ethertype_filter_ptr, entries);
2878                         flow->rule = ethertype_filter_ptr;
2879                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2880                         return flow;
2881                 }
2882                 goto out;
2883         }
2884
2885         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2886         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2887                                 actions, &syn_filter, error);
2888         if (!ret) {
2889                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2890                 if (!ret) {
2891                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2892                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2893                         if (!syn_filter_ptr) {
2894                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2895                                 goto out;
2896                         }
2897                         rte_memcpy(&syn_filter_ptr->filter_info,
2898                                 &syn_filter,
2899                                 sizeof(struct rte_eth_syn_filter));
2900                         TAILQ_INSERT_TAIL(&filter_syn_list,
2901                                 syn_filter_ptr,
2902                                 entries);
2903                         flow->rule = syn_filter_ptr;
2904                         flow->filter_type = RTE_ETH_FILTER_SYN;
2905                         return flow;
2906                 }
2907                 goto out;
2908         }
2909
2910         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2911         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2912                                 actions, &fdir_rule, error);
2913         if (!ret) {
2914                 /* A mask cannot be deleted. */
2915                 if (fdir_rule.b_mask) {
2916                         if (!fdir_info->mask_added) {
2917                                 /* It's the first time the mask is set. */
2918                                 rte_memcpy(&fdir_info->mask,
2919                                         &fdir_rule.mask,
2920                                         sizeof(struct ixgbe_hw_fdir_mask));
2921                                 fdir_info->flex_bytes_offset =
2922                                         fdir_rule.flex_bytes_offset;
2923
2924                                 if (fdir_rule.mask.flex_bytes_mask)
2925                                         ixgbe_fdir_set_flexbytes_offset(dev,
2926                                                 fdir_rule.flex_bytes_offset);
2927
2928                                 ret = ixgbe_fdir_set_input_mask(dev);
2929                                 if (ret)
2930                                         goto out;
2931
2932                                 fdir_info->mask_added = TRUE;
2933                                 first_mask = TRUE;
2934                         } else {
2935                                 /**
2936                                  * Only support one global mask,
2937                                  * all the masks should be the same.
2938                                  */
2939                                 ret = memcmp(&fdir_info->mask,
2940                                         &fdir_rule.mask,
2941                                         sizeof(struct ixgbe_hw_fdir_mask));
2942                                 if (ret)
2943                                         goto out;
2944
2945                                 if (fdir_info->flex_bytes_offset !=
2946                                                 fdir_rule.flex_bytes_offset)
2947                                         goto out;
2948                         }
2949                 }
2950
2951                 if (fdir_rule.b_spec) {
2952                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2953                                         FALSE, FALSE);
2954                         if (!ret) {
2955                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2956                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2957                                 if (!fdir_rule_ptr) {
2958                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2959                                         goto out;
2960                                 }
2961                                 rte_memcpy(&fdir_rule_ptr->filter_info,
2962                                         &fdir_rule,
2963                                         sizeof(struct ixgbe_fdir_rule));
2964                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2965                                         fdir_rule_ptr, entries);
2966                                 flow->rule = fdir_rule_ptr;
2967                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2968
2969                                 return flow;
2970                         }
2971
2972                         if (ret) {
2973                                 /**
2974                                  * clean the mask_added flag if fail to
2975                                  * program
2976                                  **/
2977                                 if (first_mask)
2978                                         fdir_info->mask_added = FALSE;
2979                                 goto out;
2980                         }
2981                 }
2982
2983                 goto out;
2984         }
2985
2986         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2987         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2988                                         actions, &l2_tn_filter, error);
2989         if (!ret) {
2990                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2991                 if (!ret) {
2992                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2993                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2994                         if (!l2_tn_filter_ptr) {
2995                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2996                                 goto out;
2997                         }
2998                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
2999                                 &l2_tn_filter,
3000                                 sizeof(struct rte_eth_l2_tunnel_conf));
3001                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3002                                 l2_tn_filter_ptr, entries);
3003                         flow->rule = l2_tn_filter_ptr;
3004                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3005                         return flow;
3006                 }
3007         }
3008
3009 out:
3010         TAILQ_REMOVE(&ixgbe_flow_list,
3011                 ixgbe_flow_mem_ptr, entries);
3012         rte_flow_error_set(error, -ret,
3013                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3014                            "Failed to create flow.");
3015         rte_free(ixgbe_flow_mem_ptr);
3016         rte_free(flow);
3017         return NULL;
3018 }
3019
3020 /**
3021  * Check if the flow rule is supported by ixgbe.
3022  * It only checkes the format. Don't guarantee the rule can be programmed into
3023  * the HW. Because there can be no enough room for the rule.
3024  */
3025 static int
3026 ixgbe_flow_validate(struct rte_eth_dev *dev,
3027                 const struct rte_flow_attr *attr,
3028                 const struct rte_flow_item pattern[],
3029                 const struct rte_flow_action actions[],
3030                 struct rte_flow_error *error)
3031 {
3032         struct rte_eth_ntuple_filter ntuple_filter;
3033         struct rte_eth_ethertype_filter ethertype_filter;
3034         struct rte_eth_syn_filter syn_filter;
3035         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3036         struct ixgbe_fdir_rule fdir_rule;
3037         int ret;
3038
3039         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3040         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3041                                 actions, &ntuple_filter, error);
3042         if (!ret)
3043                 return 0;
3044
3045         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3046         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3047                                 actions, &ethertype_filter, error);
3048         if (!ret)
3049                 return 0;
3050
3051         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3052         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3053                                 actions, &syn_filter, error);
3054         if (!ret)
3055                 return 0;
3056
3057         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3058         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3059                                 actions, &fdir_rule, error);
3060         if (!ret)
3061                 return 0;
3062
3063         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3064         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3065                                 actions, &l2_tn_filter, error);
3066
3067         return ret;
3068 }
3069
3070 /* Destroy a flow rule on ixgbe. */
3071 static int
3072 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3073                 struct rte_flow *flow,
3074                 struct rte_flow_error *error)
3075 {
3076         int ret;
3077         struct rte_flow *pmd_flow = flow;
3078         enum rte_filter_type filter_type = pmd_flow->filter_type;
3079         struct rte_eth_ntuple_filter ntuple_filter;
3080         struct rte_eth_ethertype_filter ethertype_filter;
3081         struct rte_eth_syn_filter syn_filter;
3082         struct ixgbe_fdir_rule fdir_rule;
3083         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3084         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3085         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3086         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3087         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3088         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3089         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3090         struct ixgbe_hw_fdir_info *fdir_info =
3091                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3092
3093         switch (filter_type) {
3094         case RTE_ETH_FILTER_NTUPLE:
3095                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3096                                         pmd_flow->rule;
3097                 rte_memcpy(&ntuple_filter,
3098                         &ntuple_filter_ptr->filter_info,
3099                         sizeof(struct rte_eth_ntuple_filter));
3100                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3101                 if (!ret) {
3102                         TAILQ_REMOVE(&filter_ntuple_list,
3103                         ntuple_filter_ptr, entries);
3104                         rte_free(ntuple_filter_ptr);
3105                 }
3106                 break;
3107         case RTE_ETH_FILTER_ETHERTYPE:
3108                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3109                                         pmd_flow->rule;
3110                 rte_memcpy(&ethertype_filter,
3111                         &ethertype_filter_ptr->filter_info,
3112                         sizeof(struct rte_eth_ethertype_filter));
3113                 ret = ixgbe_add_del_ethertype_filter(dev,
3114                                 &ethertype_filter, FALSE);
3115                 if (!ret) {
3116                         TAILQ_REMOVE(&filter_ethertype_list,
3117                                 ethertype_filter_ptr, entries);
3118                         rte_free(ethertype_filter_ptr);
3119                 }
3120                 break;
3121         case RTE_ETH_FILTER_SYN:
3122                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3123                                 pmd_flow->rule;
3124                 rte_memcpy(&syn_filter,
3125                         &syn_filter_ptr->filter_info,
3126                         sizeof(struct rte_eth_syn_filter));
3127                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3128                 if (!ret) {
3129                         TAILQ_REMOVE(&filter_syn_list,
3130                                 syn_filter_ptr, entries);
3131                         rte_free(syn_filter_ptr);
3132                 }
3133                 break;
3134         case RTE_ETH_FILTER_FDIR:
3135                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3136                 rte_memcpy(&fdir_rule,
3137                         &fdir_rule_ptr->filter_info,
3138                         sizeof(struct ixgbe_fdir_rule));
3139                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3140                 if (!ret) {
3141                         TAILQ_REMOVE(&filter_fdir_list,
3142                                 fdir_rule_ptr, entries);
3143                         rte_free(fdir_rule_ptr);
3144                         if (TAILQ_EMPTY(&filter_fdir_list))
3145                                 fdir_info->mask_added = false;
3146                 }
3147                 break;
3148         case RTE_ETH_FILTER_L2_TUNNEL:
3149                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3150                                 pmd_flow->rule;
3151                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3152                         sizeof(struct rte_eth_l2_tunnel_conf));
3153                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3154                 if (!ret) {
3155                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3156                                 l2_tn_filter_ptr, entries);
3157                         rte_free(l2_tn_filter_ptr);
3158                 }
3159                 break;
3160         default:
3161                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3162                             filter_type);
3163                 ret = -EINVAL;
3164                 break;
3165         }
3166
3167         if (ret) {
3168                 rte_flow_error_set(error, EINVAL,
3169                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3170                                 NULL, "Failed to destroy flow");
3171                 return ret;
3172         }
3173
3174         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3175                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3176                         TAILQ_REMOVE(&ixgbe_flow_list,
3177                                 ixgbe_flow_mem_ptr, entries);
3178                         rte_free(ixgbe_flow_mem_ptr);
3179                 }
3180         }
3181         rte_free(flow);
3182
3183         return ret;
3184 }
3185
3186 /*  Destroy all flow rules associated with a port on ixgbe. */
3187 static int
3188 ixgbe_flow_flush(struct rte_eth_dev *dev,
3189                 struct rte_flow_error *error)
3190 {
3191         int ret = 0;
3192
3193         ixgbe_clear_all_ntuple_filter(dev);
3194         ixgbe_clear_all_ethertype_filter(dev);
3195         ixgbe_clear_syn_filter(dev);
3196
3197         ret = ixgbe_clear_all_fdir_filter(dev);
3198         if (ret < 0) {
3199                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3200                                         NULL, "Failed to flush rule");
3201                 return ret;
3202         }
3203
3204         ret = ixgbe_clear_all_l2_tn_filter(dev);
3205         if (ret < 0) {
3206                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3207                                         NULL, "Failed to flush rule");
3208                 return ret;
3209         }
3210
3211         ixgbe_filterlist_flush();
3212
3213         return 0;
3214 }
3215
3216 const struct rte_flow_ops ixgbe_flow_ops = {
3217         .validate = ixgbe_flow_validate,
3218         .create = ixgbe_flow_create,
3219         .destroy = ixgbe_flow_destroy,
3220         .flush = ixgbe_flow_flush,
3221 };