New upstream version 17.11.1
[deb_dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_eal.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_malloc.h>
59 #include <rte_random.h>
60 #include <rte_dev.h>
61 #include <rte_hash_crc.h>
62 #include <rte_flow.h>
63 #include <rte_flow_driver.h>
64
65 #include "ixgbe_logs.h"
66 #include "base/ixgbe_api.h"
67 #include "base/ixgbe_vf.h"
68 #include "base/ixgbe_common.h"
69 #include "ixgbe_ethdev.h"
70 #include "ixgbe_bypass.h"
71 #include "ixgbe_rxtx.h"
72 #include "base/ixgbe_type.h"
73 #include "base/ixgbe_phy.h"
74 #include "rte_pmd_ixgbe.h"
75
76
77 #define IXGBE_MIN_N_TUPLE_PRIO 1
78 #define IXGBE_MAX_N_TUPLE_PRIO 7
79 #define IXGBE_MAX_FLX_SOURCE_OFF 62
80
81 /* ntuple filter list structure */
82 struct ixgbe_ntuple_filter_ele {
83         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
84         struct rte_eth_ntuple_filter filter_info;
85 };
86 /* ethertype filter list structure */
87 struct ixgbe_ethertype_filter_ele {
88         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
89         struct rte_eth_ethertype_filter filter_info;
90 };
91 /* syn filter list structure */
92 struct ixgbe_eth_syn_filter_ele {
93         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
94         struct rte_eth_syn_filter filter_info;
95 };
96 /* fdir filter list structure */
97 struct ixgbe_fdir_rule_ele {
98         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
99         struct ixgbe_fdir_rule filter_info;
100 };
101 /* l2_tunnel filter list structure */
102 struct ixgbe_eth_l2_tunnel_conf_ele {
103         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
104         struct rte_eth_l2_tunnel_conf filter_info;
105 };
106 /* ixgbe_flow memory list structure */
107 struct ixgbe_flow_mem {
108         TAILQ_ENTRY(ixgbe_flow_mem) entries;
109         struct rte_flow *flow;
110 };
111
112 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
113 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
114 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
115 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
116 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
117 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
118
119 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
120 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
121 static struct ixgbe_syn_filter_list filter_syn_list;
122 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
123 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
124 static struct ixgbe_flow_mem_list ixgbe_flow_list;
125
126 /**
127  * Endless loop will never happen with below assumption
128  * 1. there is at least one no-void item(END)
129  * 2. cur is before END.
130  */
131 static inline
132 const struct rte_flow_item *next_no_void_pattern(
133                 const struct rte_flow_item pattern[],
134                 const struct rte_flow_item *cur)
135 {
136         const struct rte_flow_item *next =
137                 cur ? cur + 1 : &pattern[0];
138         while (1) {
139                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
140                         return next;
141                 next++;
142         }
143 }
144
145 static inline
146 const struct rte_flow_action *next_no_void_action(
147                 const struct rte_flow_action actions[],
148                 const struct rte_flow_action *cur)
149 {
150         const struct rte_flow_action *next =
151                 cur ? cur + 1 : &actions[0];
152         while (1) {
153                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
154                         return next;
155                 next++;
156         }
157 }
158
159 /**
160  * Please aware there's an asumption for all the parsers.
161  * rte_flow_item is using big endian, rte_flow_attr and
162  * rte_flow_action are using CPU order.
163  * Because the pattern is used to describe the packets,
164  * normally the packets should use network order.
165  */
166
167 /**
168  * Parse the rule to see if it is a n-tuple rule.
169  * And get the n-tuple filter info BTW.
170  * pattern:
171  * The first not void item can be ETH or IPV4.
172  * The second not void item must be IPV4 if the first one is ETH.
173  * The third not void item must be UDP or TCP.
174  * The next not void item must be END.
175  * action:
176  * The first not void action should be QUEUE.
177  * The next not void action should be END.
178  * pattern example:
179  * ITEM         Spec                    Mask
180  * ETH          NULL                    NULL
181  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
182  *              dst_addr 192.167.3.50   0xFFFFFFFF
183  *              next_proto_id   17      0xFF
184  * UDP/TCP/     src_port        80      0xFFFF
185  * SCTP         dst_port        80      0xFFFF
186  * END
187  * other members in mask and spec should set to 0x00.
188  * item->last should be NULL.
189  *
190  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
191  *
192  */
193 static int
194 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
195                          const struct rte_flow_item pattern[],
196                          const struct rte_flow_action actions[],
197                          struct rte_eth_ntuple_filter *filter,
198                          struct rte_flow_error *error)
199 {
200         const struct rte_flow_item *item;
201         const struct rte_flow_action *act;
202         const struct rte_flow_item_ipv4 *ipv4_spec;
203         const struct rte_flow_item_ipv4 *ipv4_mask;
204         const struct rte_flow_item_tcp *tcp_spec;
205         const struct rte_flow_item_tcp *tcp_mask;
206         const struct rte_flow_item_udp *udp_spec;
207         const struct rte_flow_item_udp *udp_mask;
208         const struct rte_flow_item_sctp *sctp_spec;
209         const struct rte_flow_item_sctp *sctp_mask;
210
211         if (!pattern) {
212                 rte_flow_error_set(error,
213                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
214                         NULL, "NULL pattern.");
215                 return -rte_errno;
216         }
217
218         if (!actions) {
219                 rte_flow_error_set(error, EINVAL,
220                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
221                                    NULL, "NULL action.");
222                 return -rte_errno;
223         }
224         if (!attr) {
225                 rte_flow_error_set(error, EINVAL,
226                                    RTE_FLOW_ERROR_TYPE_ATTR,
227                                    NULL, "NULL attribute.");
228                 return -rte_errno;
229         }
230
231 #ifdef RTE_LIBRTE_SECURITY
232         /**
233          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
234          */
235         act = next_no_void_action(actions, NULL);
236         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
237                 const void *conf = act->conf;
238                 /* check if the next not void item is END */
239                 act = next_no_void_action(actions, act);
240                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
241                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
242                         rte_flow_error_set(error, EINVAL,
243                                 RTE_FLOW_ERROR_TYPE_ACTION,
244                                 act, "Not supported action.");
245                         return -rte_errno;
246                 }
247
248                 /* get the IP pattern*/
249                 item = next_no_void_pattern(pattern, NULL);
250                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
251                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
252                         if (item->last ||
253                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
254                                 rte_flow_error_set(error, EINVAL,
255                                         RTE_FLOW_ERROR_TYPE_ITEM,
256                                         item, "IP pattern missing.");
257                                 return -rte_errno;
258                         }
259                         item = next_no_void_pattern(pattern, item);
260                 }
261
262                 filter->proto = IPPROTO_ESP;
263                 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
264                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
265         }
266 #endif
267
268         /* the first not void item can be MAC or IPv4 */
269         item = next_no_void_pattern(pattern, NULL);
270
271         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
272             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
273                 rte_flow_error_set(error, EINVAL,
274                         RTE_FLOW_ERROR_TYPE_ITEM,
275                         item, "Not supported by ntuple filter");
276                 return -rte_errno;
277         }
278         /* Skip Ethernet */
279         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
280                 /*Not supported last point for range*/
281                 if (item->last) {
282                         rte_flow_error_set(error,
283                           EINVAL,
284                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
285                           item, "Not supported last point for range");
286                         return -rte_errno;
287
288                 }
289                 /* if the first item is MAC, the content should be NULL */
290                 if (item->spec || item->mask) {
291                         rte_flow_error_set(error, EINVAL,
292                                 RTE_FLOW_ERROR_TYPE_ITEM,
293                                 item, "Not supported by ntuple filter");
294                         return -rte_errno;
295                 }
296                 /* check if the next not void item is IPv4 */
297                 item = next_no_void_pattern(pattern, item);
298                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
299                         rte_flow_error_set(error,
300                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
301                           item, "Not supported by ntuple filter");
302                           return -rte_errno;
303                 }
304         }
305
306         /* get the IPv4 info */
307         if (!item->spec || !item->mask) {
308                 rte_flow_error_set(error, EINVAL,
309                         RTE_FLOW_ERROR_TYPE_ITEM,
310                         item, "Invalid ntuple mask");
311                 return -rte_errno;
312         }
313         /*Not supported last point for range*/
314         if (item->last) {
315                 rte_flow_error_set(error, EINVAL,
316                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
317                         item, "Not supported last point for range");
318                 return -rte_errno;
319
320         }
321
322         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
323         /**
324          * Only support src & dst addresses, protocol,
325          * others should be masked.
326          */
327         if (ipv4_mask->hdr.version_ihl ||
328             ipv4_mask->hdr.type_of_service ||
329             ipv4_mask->hdr.total_length ||
330             ipv4_mask->hdr.packet_id ||
331             ipv4_mask->hdr.fragment_offset ||
332             ipv4_mask->hdr.time_to_live ||
333             ipv4_mask->hdr.hdr_checksum) {
334                         rte_flow_error_set(error,
335                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
336                         item, "Not supported by ntuple filter");
337                 return -rte_errno;
338         }
339
340         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
341         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
342         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
343
344         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
345         filter->dst_ip = ipv4_spec->hdr.dst_addr;
346         filter->src_ip = ipv4_spec->hdr.src_addr;
347         filter->proto  = ipv4_spec->hdr.next_proto_id;
348
349         /* check if the next not void item is TCP or UDP */
350         item = next_no_void_pattern(pattern, item);
351         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
352             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
353             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
354             item->type != RTE_FLOW_ITEM_TYPE_END) {
355                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
356                 rte_flow_error_set(error, EINVAL,
357                         RTE_FLOW_ERROR_TYPE_ITEM,
358                         item, "Not supported by ntuple filter");
359                 return -rte_errno;
360         }
361
362         /* get the TCP/UDP info */
363         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
364                 (!item->spec || !item->mask)) {
365                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
366                 rte_flow_error_set(error, EINVAL,
367                         RTE_FLOW_ERROR_TYPE_ITEM,
368                         item, "Invalid ntuple mask");
369                 return -rte_errno;
370         }
371
372         /*Not supported last point for range*/
373         if (item->last) {
374                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
375                 rte_flow_error_set(error, EINVAL,
376                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
377                         item, "Not supported last point for range");
378                 return -rte_errno;
379
380         }
381
382         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
383                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
384
385                 /**
386                  * Only support src & dst ports, tcp flags,
387                  * others should be masked.
388                  */
389                 if (tcp_mask->hdr.sent_seq ||
390                     tcp_mask->hdr.recv_ack ||
391                     tcp_mask->hdr.data_off ||
392                     tcp_mask->hdr.rx_win ||
393                     tcp_mask->hdr.cksum ||
394                     tcp_mask->hdr.tcp_urp) {
395                         memset(filter, 0,
396                                 sizeof(struct rte_eth_ntuple_filter));
397                         rte_flow_error_set(error, EINVAL,
398                                 RTE_FLOW_ERROR_TYPE_ITEM,
399                                 item, "Not supported by ntuple filter");
400                         return -rte_errno;
401                 }
402
403                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
404                 filter->src_port_mask  = tcp_mask->hdr.src_port;
405                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
406                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
407                 } else if (!tcp_mask->hdr.tcp_flags) {
408                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
409                 } else {
410                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
411                         rte_flow_error_set(error, EINVAL,
412                                 RTE_FLOW_ERROR_TYPE_ITEM,
413                                 item, "Not supported by ntuple filter");
414                         return -rte_errno;
415                 }
416
417                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
418                 filter->dst_port  = tcp_spec->hdr.dst_port;
419                 filter->src_port  = tcp_spec->hdr.src_port;
420                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
421         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
422                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
423
424                 /**
425                  * Only support src & dst ports,
426                  * others should be masked.
427                  */
428                 if (udp_mask->hdr.dgram_len ||
429                     udp_mask->hdr.dgram_cksum) {
430                         memset(filter, 0,
431                                 sizeof(struct rte_eth_ntuple_filter));
432                         rte_flow_error_set(error, EINVAL,
433                                 RTE_FLOW_ERROR_TYPE_ITEM,
434                                 item, "Not supported by ntuple filter");
435                         return -rte_errno;
436                 }
437
438                 filter->dst_port_mask = udp_mask->hdr.dst_port;
439                 filter->src_port_mask = udp_mask->hdr.src_port;
440
441                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
442                 filter->dst_port = udp_spec->hdr.dst_port;
443                 filter->src_port = udp_spec->hdr.src_port;
444         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
445                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
446
447                 /**
448                  * Only support src & dst ports,
449                  * others should be masked.
450                  */
451                 if (sctp_mask->hdr.tag ||
452                     sctp_mask->hdr.cksum) {
453                         memset(filter, 0,
454                                 sizeof(struct rte_eth_ntuple_filter));
455                         rte_flow_error_set(error, EINVAL,
456                                 RTE_FLOW_ERROR_TYPE_ITEM,
457                                 item, "Not supported by ntuple filter");
458                         return -rte_errno;
459                 }
460
461                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
462                 filter->src_port_mask = sctp_mask->hdr.src_port;
463
464                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
465                 filter->dst_port = sctp_spec->hdr.dst_port;
466                 filter->src_port = sctp_spec->hdr.src_port;
467         } else {
468                 goto action;
469         }
470
471         /* check if the next not void item is END */
472         item = next_no_void_pattern(pattern, item);
473         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
474                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
475                 rte_flow_error_set(error, EINVAL,
476                         RTE_FLOW_ERROR_TYPE_ITEM,
477                         item, "Not supported by ntuple filter");
478                 return -rte_errno;
479         }
480
481 action:
482
483         /**
484          * n-tuple only supports forwarding,
485          * check if the first not void action is QUEUE.
486          */
487         act = next_no_void_action(actions, NULL);
488         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
489                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490                 rte_flow_error_set(error, EINVAL,
491                         RTE_FLOW_ERROR_TYPE_ACTION,
492                         item, "Not supported action.");
493                 return -rte_errno;
494         }
495         filter->queue =
496                 ((const struct rte_flow_action_queue *)act->conf)->index;
497
498         /* check if the next not void item is END */
499         act = next_no_void_action(actions, act);
500         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
501                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
502                 rte_flow_error_set(error, EINVAL,
503                         RTE_FLOW_ERROR_TYPE_ACTION,
504                         act, "Not supported action.");
505                 return -rte_errno;
506         }
507
508         /* parse attr */
509         /* must be input direction */
510         if (!attr->ingress) {
511                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
512                 rte_flow_error_set(error, EINVAL,
513                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
514                                    attr, "Only support ingress.");
515                 return -rte_errno;
516         }
517
518         /* not supported */
519         if (attr->egress) {
520                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
521                 rte_flow_error_set(error, EINVAL,
522                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
523                                    attr, "Not support egress.");
524                 return -rte_errno;
525         }
526
527         if (attr->priority > 0xFFFF) {
528                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
529                 rte_flow_error_set(error, EINVAL,
530                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
531                                    attr, "Error priority.");
532                 return -rte_errno;
533         }
534         filter->priority = (uint16_t)attr->priority;
535         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
536             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
537             filter->priority = 1;
538
539         return 0;
540 }
541
542 /* a specific function for ixgbe because the flags is specific */
543 static int
544 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
545                           const struct rte_flow_attr *attr,
546                           const struct rte_flow_item pattern[],
547                           const struct rte_flow_action actions[],
548                           struct rte_eth_ntuple_filter *filter,
549                           struct rte_flow_error *error)
550 {
551         int ret;
552         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
553
554         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
555
556         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
557
558         if (ret)
559                 return ret;
560
561 #ifdef RTE_LIBRTE_SECURITY
562         /* ESP flow not really a flow*/
563         if (filter->proto == IPPROTO_ESP)
564                 return 0;
565 #endif
566
567         /* Ixgbe doesn't support tcp flags. */
568         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
569                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
570                 rte_flow_error_set(error, EINVAL,
571                                    RTE_FLOW_ERROR_TYPE_ITEM,
572                                    NULL, "Not supported by ntuple filter");
573                 return -rte_errno;
574         }
575
576         /* Ixgbe doesn't support many priorities. */
577         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
578             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
579                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
580                 rte_flow_error_set(error, EINVAL,
581                         RTE_FLOW_ERROR_TYPE_ITEM,
582                         NULL, "Priority not supported by ntuple filter");
583                 return -rte_errno;
584         }
585
586         if (filter->queue >= dev->data->nb_rx_queues)
587                 return -rte_errno;
588
589         /* fixed value for ixgbe */
590         filter->flags = RTE_5TUPLE_FLAGS;
591         return 0;
592 }
593
594 /**
595  * Parse the rule to see if it is a ethertype rule.
596  * And get the ethertype filter info BTW.
597  * pattern:
598  * The first not void item can be ETH.
599  * The next not void item must be END.
600  * action:
601  * The first not void action should be QUEUE.
602  * The next not void action should be END.
603  * pattern example:
604  * ITEM         Spec                    Mask
605  * ETH          type    0x0807          0xFFFF
606  * END
607  * other members in mask and spec should set to 0x00.
608  * item->last should be NULL.
609  */
610 static int
611 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
612                             const struct rte_flow_item *pattern,
613                             const struct rte_flow_action *actions,
614                             struct rte_eth_ethertype_filter *filter,
615                             struct rte_flow_error *error)
616 {
617         const struct rte_flow_item *item;
618         const struct rte_flow_action *act;
619         const struct rte_flow_item_eth *eth_spec;
620         const struct rte_flow_item_eth *eth_mask;
621         const struct rte_flow_action_queue *act_q;
622
623         if (!pattern) {
624                 rte_flow_error_set(error, EINVAL,
625                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
626                                 NULL, "NULL pattern.");
627                 return -rte_errno;
628         }
629
630         if (!actions) {
631                 rte_flow_error_set(error, EINVAL,
632                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
633                                 NULL, "NULL action.");
634                 return -rte_errno;
635         }
636
637         if (!attr) {
638                 rte_flow_error_set(error, EINVAL,
639                                    RTE_FLOW_ERROR_TYPE_ATTR,
640                                    NULL, "NULL attribute.");
641                 return -rte_errno;
642         }
643
644         item = next_no_void_pattern(pattern, NULL);
645         /* The first non-void item should be MAC. */
646         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
647                 rte_flow_error_set(error, EINVAL,
648                         RTE_FLOW_ERROR_TYPE_ITEM,
649                         item, "Not supported by ethertype filter");
650                 return -rte_errno;
651         }
652
653         /*Not supported last point for range*/
654         if (item->last) {
655                 rte_flow_error_set(error, EINVAL,
656                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
657                         item, "Not supported last point for range");
658                 return -rte_errno;
659         }
660
661         /* Get the MAC info. */
662         if (!item->spec || !item->mask) {
663                 rte_flow_error_set(error, EINVAL,
664                                 RTE_FLOW_ERROR_TYPE_ITEM,
665                                 item, "Not supported by ethertype filter");
666                 return -rte_errno;
667         }
668
669         eth_spec = (const struct rte_flow_item_eth *)item->spec;
670         eth_mask = (const struct rte_flow_item_eth *)item->mask;
671
672         /* Mask bits of source MAC address must be full of 0.
673          * Mask bits of destination MAC address must be full
674          * of 1 or full of 0.
675          */
676         if (!is_zero_ether_addr(&eth_mask->src) ||
677             (!is_zero_ether_addr(&eth_mask->dst) &&
678              !is_broadcast_ether_addr(&eth_mask->dst))) {
679                 rte_flow_error_set(error, EINVAL,
680                                 RTE_FLOW_ERROR_TYPE_ITEM,
681                                 item, "Invalid ether address mask");
682                 return -rte_errno;
683         }
684
685         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
686                 rte_flow_error_set(error, EINVAL,
687                                 RTE_FLOW_ERROR_TYPE_ITEM,
688                                 item, "Invalid ethertype mask");
689                 return -rte_errno;
690         }
691
692         /* If mask bits of destination MAC address
693          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
694          */
695         if (is_broadcast_ether_addr(&eth_mask->dst)) {
696                 filter->mac_addr = eth_spec->dst;
697                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
698         } else {
699                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
700         }
701         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
702
703         /* Check if the next non-void item is END. */
704         item = next_no_void_pattern(pattern, item);
705         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
706                 rte_flow_error_set(error, EINVAL,
707                                 RTE_FLOW_ERROR_TYPE_ITEM,
708                                 item, "Not supported by ethertype filter.");
709                 return -rte_errno;
710         }
711
712         /* Parse action */
713
714         act = next_no_void_action(actions, NULL);
715         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
716             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
717                 rte_flow_error_set(error, EINVAL,
718                                 RTE_FLOW_ERROR_TYPE_ACTION,
719                                 act, "Not supported action.");
720                 return -rte_errno;
721         }
722
723         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
724                 act_q = (const struct rte_flow_action_queue *)act->conf;
725                 filter->queue = act_q->index;
726         } else {
727                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
728         }
729
730         /* Check if the next non-void item is END */
731         act = next_no_void_action(actions, act);
732         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
733                 rte_flow_error_set(error, EINVAL,
734                                 RTE_FLOW_ERROR_TYPE_ACTION,
735                                 act, "Not supported action.");
736                 return -rte_errno;
737         }
738
739         /* Parse attr */
740         /* Must be input direction */
741         if (!attr->ingress) {
742                 rte_flow_error_set(error, EINVAL,
743                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
744                                 attr, "Only support ingress.");
745                 return -rte_errno;
746         }
747
748         /* Not supported */
749         if (attr->egress) {
750                 rte_flow_error_set(error, EINVAL,
751                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
752                                 attr, "Not support egress.");
753                 return -rte_errno;
754         }
755
756         /* Not supported */
757         if (attr->priority) {
758                 rte_flow_error_set(error, EINVAL,
759                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
760                                 attr, "Not support priority.");
761                 return -rte_errno;
762         }
763
764         /* Not supported */
765         if (attr->group) {
766                 rte_flow_error_set(error, EINVAL,
767                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
768                                 attr, "Not support group.");
769                 return -rte_errno;
770         }
771
772         return 0;
773 }
774
775 static int
776 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
777                                  const struct rte_flow_attr *attr,
778                              const struct rte_flow_item pattern[],
779                              const struct rte_flow_action actions[],
780                              struct rte_eth_ethertype_filter *filter,
781                              struct rte_flow_error *error)
782 {
783         int ret;
784         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
785
786         MAC_TYPE_FILTER_SUP(hw->mac.type);
787
788         ret = cons_parse_ethertype_filter(attr, pattern,
789                                         actions, filter, error);
790
791         if (ret)
792                 return ret;
793
794         /* Ixgbe doesn't support MAC address. */
795         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
796                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
797                 rte_flow_error_set(error, EINVAL,
798                         RTE_FLOW_ERROR_TYPE_ITEM,
799                         NULL, "Not supported by ethertype filter");
800                 return -rte_errno;
801         }
802
803         if (filter->queue >= dev->data->nb_rx_queues) {
804                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
805                 rte_flow_error_set(error, EINVAL,
806                         RTE_FLOW_ERROR_TYPE_ITEM,
807                         NULL, "queue index much too big");
808                 return -rte_errno;
809         }
810
811         if (filter->ether_type == ETHER_TYPE_IPv4 ||
812                 filter->ether_type == ETHER_TYPE_IPv6) {
813                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
814                 rte_flow_error_set(error, EINVAL,
815                         RTE_FLOW_ERROR_TYPE_ITEM,
816                         NULL, "IPv4/IPv6 not supported by ethertype filter");
817                 return -rte_errno;
818         }
819
820         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
821                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
822                 rte_flow_error_set(error, EINVAL,
823                         RTE_FLOW_ERROR_TYPE_ITEM,
824                         NULL, "mac compare is unsupported");
825                 return -rte_errno;
826         }
827
828         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
829                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
830                 rte_flow_error_set(error, EINVAL,
831                         RTE_FLOW_ERROR_TYPE_ITEM,
832                         NULL, "drop option is unsupported");
833                 return -rte_errno;
834         }
835
836         return 0;
837 }
838
839 /**
840  * Parse the rule to see if it is a TCP SYN rule.
841  * And get the TCP SYN filter info BTW.
842  * pattern:
843  * The first not void item must be ETH.
844  * The second not void item must be IPV4 or IPV6.
845  * The third not void item must be TCP.
846  * The next not void item must be END.
847  * action:
848  * The first not void action should be QUEUE.
849  * The next not void action should be END.
850  * pattern example:
851  * ITEM         Spec                    Mask
852  * ETH          NULL                    NULL
853  * IPV4/IPV6    NULL                    NULL
854  * TCP          tcp_flags       0x02    0xFF
855  * END
856  * other members in mask and spec should set to 0x00.
857  * item->last should be NULL.
858  */
859 static int
860 cons_parse_syn_filter(const struct rte_flow_attr *attr,
861                                 const struct rte_flow_item pattern[],
862                                 const struct rte_flow_action actions[],
863                                 struct rte_eth_syn_filter *filter,
864                                 struct rte_flow_error *error)
865 {
866         const struct rte_flow_item *item;
867         const struct rte_flow_action *act;
868         const struct rte_flow_item_tcp *tcp_spec;
869         const struct rte_flow_item_tcp *tcp_mask;
870         const struct rte_flow_action_queue *act_q;
871
872         if (!pattern) {
873                 rte_flow_error_set(error, EINVAL,
874                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
875                                 NULL, "NULL pattern.");
876                 return -rte_errno;
877         }
878
879         if (!actions) {
880                 rte_flow_error_set(error, EINVAL,
881                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
882                                 NULL, "NULL action.");
883                 return -rte_errno;
884         }
885
886         if (!attr) {
887                 rte_flow_error_set(error, EINVAL,
888                                    RTE_FLOW_ERROR_TYPE_ATTR,
889                                    NULL, "NULL attribute.");
890                 return -rte_errno;
891         }
892
893
894         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
895         item = next_no_void_pattern(pattern, NULL);
896         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
897             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
898             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
899             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
900                 rte_flow_error_set(error, EINVAL,
901                                 RTE_FLOW_ERROR_TYPE_ITEM,
902                                 item, "Not supported by syn filter");
903                 return -rte_errno;
904         }
905                 /*Not supported last point for range*/
906         if (item->last) {
907                 rte_flow_error_set(error, EINVAL,
908                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
909                         item, "Not supported last point for range");
910                 return -rte_errno;
911         }
912
913         /* Skip Ethernet */
914         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
915                 /* if the item is MAC, the content should be NULL */
916                 if (item->spec || item->mask) {
917                         rte_flow_error_set(error, EINVAL,
918                                 RTE_FLOW_ERROR_TYPE_ITEM,
919                                 item, "Invalid SYN address mask");
920                         return -rte_errno;
921                 }
922
923                 /* check if the next not void item is IPv4 or IPv6 */
924                 item = next_no_void_pattern(pattern, item);
925                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
926                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
927                         rte_flow_error_set(error, EINVAL,
928                                 RTE_FLOW_ERROR_TYPE_ITEM,
929                                 item, "Not supported by syn filter");
930                         return -rte_errno;
931                 }
932         }
933
934         /* Skip IP */
935         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
936             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
937                 /* if the item is IP, the content should be NULL */
938                 if (item->spec || item->mask) {
939                         rte_flow_error_set(error, EINVAL,
940                                 RTE_FLOW_ERROR_TYPE_ITEM,
941                                 item, "Invalid SYN mask");
942                         return -rte_errno;
943                 }
944
945                 /* check if the next not void item is TCP */
946                 item = next_no_void_pattern(pattern, item);
947                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
948                         rte_flow_error_set(error, EINVAL,
949                                 RTE_FLOW_ERROR_TYPE_ITEM,
950                                 item, "Not supported by syn filter");
951                         return -rte_errno;
952                 }
953         }
954
955         /* Get the TCP info. Only support SYN. */
956         if (!item->spec || !item->mask) {
957                 rte_flow_error_set(error, EINVAL,
958                                 RTE_FLOW_ERROR_TYPE_ITEM,
959                                 item, "Invalid SYN mask");
960                 return -rte_errno;
961         }
962         /*Not supported last point for range*/
963         if (item->last) {
964                 rte_flow_error_set(error, EINVAL,
965                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
966                         item, "Not supported last point for range");
967                 return -rte_errno;
968         }
969
970         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
971         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
972         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
973             tcp_mask->hdr.src_port ||
974             tcp_mask->hdr.dst_port ||
975             tcp_mask->hdr.sent_seq ||
976             tcp_mask->hdr.recv_ack ||
977             tcp_mask->hdr.data_off ||
978             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
979             tcp_mask->hdr.rx_win ||
980             tcp_mask->hdr.cksum ||
981             tcp_mask->hdr.tcp_urp) {
982                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
983                 rte_flow_error_set(error, EINVAL,
984                                 RTE_FLOW_ERROR_TYPE_ITEM,
985                                 item, "Not supported by syn filter");
986                 return -rte_errno;
987         }
988
989         /* check if the next not void item is END */
990         item = next_no_void_pattern(pattern, item);
991         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
992                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
993                 rte_flow_error_set(error, EINVAL,
994                                 RTE_FLOW_ERROR_TYPE_ITEM,
995                                 item, "Not supported by syn filter");
996                 return -rte_errno;
997         }
998
999         /* check if the first not void action is QUEUE. */
1000         act = next_no_void_action(actions, NULL);
1001         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1002                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1003                 rte_flow_error_set(error, EINVAL,
1004                                 RTE_FLOW_ERROR_TYPE_ACTION,
1005                                 act, "Not supported action.");
1006                 return -rte_errno;
1007         }
1008
1009         act_q = (const struct rte_flow_action_queue *)act->conf;
1010         filter->queue = act_q->index;
1011         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1012                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1013                 rte_flow_error_set(error, EINVAL,
1014                                 RTE_FLOW_ERROR_TYPE_ACTION,
1015                                 act, "Not supported action.");
1016                 return -rte_errno;
1017         }
1018
1019         /* check if the next not void item is END */
1020         act = next_no_void_action(actions, act);
1021         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1022                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1023                 rte_flow_error_set(error, EINVAL,
1024                                 RTE_FLOW_ERROR_TYPE_ACTION,
1025                                 act, "Not supported action.");
1026                 return -rte_errno;
1027         }
1028
1029         /* parse attr */
1030         /* must be input direction */
1031         if (!attr->ingress) {
1032                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1033                 rte_flow_error_set(error, EINVAL,
1034                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1035                         attr, "Only support ingress.");
1036                 return -rte_errno;
1037         }
1038
1039         /* not supported */
1040         if (attr->egress) {
1041                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1042                 rte_flow_error_set(error, EINVAL,
1043                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1044                         attr, "Not support egress.");
1045                 return -rte_errno;
1046         }
1047
1048         /* Support 2 priorities, the lowest or highest. */
1049         if (!attr->priority) {
1050                 filter->hig_pri = 0;
1051         } else if (attr->priority == (uint32_t)~0U) {
1052                 filter->hig_pri = 1;
1053         } else {
1054                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1055                 rte_flow_error_set(error, EINVAL,
1056                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1057                         attr, "Not support priority.");
1058                 return -rte_errno;
1059         }
1060
1061         return 0;
1062 }
1063
1064 static int
1065 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1066                                  const struct rte_flow_attr *attr,
1067                              const struct rte_flow_item pattern[],
1068                              const struct rte_flow_action actions[],
1069                              struct rte_eth_syn_filter *filter,
1070                              struct rte_flow_error *error)
1071 {
1072         int ret;
1073         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1074
1075         MAC_TYPE_FILTER_SUP(hw->mac.type);
1076
1077         ret = cons_parse_syn_filter(attr, pattern,
1078                                         actions, filter, error);
1079
1080         if (filter->queue >= dev->data->nb_rx_queues)
1081                 return -rte_errno;
1082
1083         if (ret)
1084                 return ret;
1085
1086         return 0;
1087 }
1088
1089 /**
1090  * Parse the rule to see if it is a L2 tunnel rule.
1091  * And get the L2 tunnel filter info BTW.
1092  * Only support E-tag now.
1093  * pattern:
1094  * The first not void item can be E_TAG.
1095  * The next not void item must be END.
1096  * action:
1097  * The first not void action should be VF or PF.
1098  * The next not void action should be END.
1099  * pattern example:
1100  * ITEM         Spec                    Mask
1101  * E_TAG        grp             0x1     0x3
1102                 e_cid_base      0x309   0xFFF
1103  * END
1104  * other members in mask and spec should set to 0x00.
1105  * item->last should be NULL.
1106  */
1107 static int
1108 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1109                         const struct rte_flow_attr *attr,
1110                         const struct rte_flow_item pattern[],
1111                         const struct rte_flow_action actions[],
1112                         struct rte_eth_l2_tunnel_conf *filter,
1113                         struct rte_flow_error *error)
1114 {
1115         const struct rte_flow_item *item;
1116         const struct rte_flow_item_e_tag *e_tag_spec;
1117         const struct rte_flow_item_e_tag *e_tag_mask;
1118         const struct rte_flow_action *act;
1119         const struct rte_flow_action_vf *act_vf;
1120         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1121
1122         if (!pattern) {
1123                 rte_flow_error_set(error, EINVAL,
1124                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1125                         NULL, "NULL pattern.");
1126                 return -rte_errno;
1127         }
1128
1129         if (!actions) {
1130                 rte_flow_error_set(error, EINVAL,
1131                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1132                                    NULL, "NULL action.");
1133                 return -rte_errno;
1134         }
1135
1136         if (!attr) {
1137                 rte_flow_error_set(error, EINVAL,
1138                                    RTE_FLOW_ERROR_TYPE_ATTR,
1139                                    NULL, "NULL attribute.");
1140                 return -rte_errno;
1141         }
1142
1143         /* The first not void item should be e-tag. */
1144         item = next_no_void_pattern(pattern, NULL);
1145         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1146                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147                 rte_flow_error_set(error, EINVAL,
1148                         RTE_FLOW_ERROR_TYPE_ITEM,
1149                         item, "Not supported by L2 tunnel filter");
1150                 return -rte_errno;
1151         }
1152
1153         if (!item->spec || !item->mask) {
1154                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1155                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1156                         item, "Not supported by L2 tunnel filter");
1157                 return -rte_errno;
1158         }
1159
1160         /*Not supported last point for range*/
1161         if (item->last) {
1162                 rte_flow_error_set(error, EINVAL,
1163                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1164                         item, "Not supported last point for range");
1165                 return -rte_errno;
1166         }
1167
1168         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1169         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1170
1171         /* Only care about GRP and E cid base. */
1172         if (e_tag_mask->epcp_edei_in_ecid_b ||
1173             e_tag_mask->in_ecid_e ||
1174             e_tag_mask->ecid_e ||
1175             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1176                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1177                 rte_flow_error_set(error, EINVAL,
1178                         RTE_FLOW_ERROR_TYPE_ITEM,
1179                         item, "Not supported by L2 tunnel filter");
1180                 return -rte_errno;
1181         }
1182
1183         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1184         /**
1185          * grp and e_cid_base are bit fields and only use 14 bits.
1186          * e-tag id is taken as little endian by HW.
1187          */
1188         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1189
1190         /* check if the next not void item is END */
1191         item = next_no_void_pattern(pattern, item);
1192         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1193                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1194                 rte_flow_error_set(error, EINVAL,
1195                         RTE_FLOW_ERROR_TYPE_ITEM,
1196                         item, "Not supported by L2 tunnel filter");
1197                 return -rte_errno;
1198         }
1199
1200         /* parse attr */
1201         /* must be input direction */
1202         if (!attr->ingress) {
1203                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1204                 rte_flow_error_set(error, EINVAL,
1205                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1206                         attr, "Only support ingress.");
1207                 return -rte_errno;
1208         }
1209
1210         /* not supported */
1211         if (attr->egress) {
1212                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1213                 rte_flow_error_set(error, EINVAL,
1214                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1215                         attr, "Not support egress.");
1216                 return -rte_errno;
1217         }
1218
1219         /* not supported */
1220         if (attr->priority) {
1221                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1222                 rte_flow_error_set(error, EINVAL,
1223                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1224                         attr, "Not support priority.");
1225                 return -rte_errno;
1226         }
1227
1228         /* check if the first not void action is VF or PF. */
1229         act = next_no_void_action(actions, NULL);
1230         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1231                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1232                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1233                 rte_flow_error_set(error, EINVAL,
1234                         RTE_FLOW_ERROR_TYPE_ACTION,
1235                         act, "Not supported action.");
1236                 return -rte_errno;
1237         }
1238
1239         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1240                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1241                 filter->pool = act_vf->id;
1242         } else {
1243                 filter->pool = pci_dev->max_vfs;
1244         }
1245
1246         /* check if the next not void item is END */
1247         act = next_no_void_action(actions, act);
1248         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1249                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1250                 rte_flow_error_set(error, EINVAL,
1251                         RTE_FLOW_ERROR_TYPE_ACTION,
1252                         act, "Not supported action.");
1253                 return -rte_errno;
1254         }
1255
1256         return 0;
1257 }
1258
1259 static int
1260 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1261                         const struct rte_flow_attr *attr,
1262                         const struct rte_flow_item pattern[],
1263                         const struct rte_flow_action actions[],
1264                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1265                         struct rte_flow_error *error)
1266 {
1267         int ret = 0;
1268         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1269         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1270         uint16_t vf_num;
1271
1272         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1273                                 actions, l2_tn_filter, error);
1274
1275         if (hw->mac.type != ixgbe_mac_X550 &&
1276                 hw->mac.type != ixgbe_mac_X550EM_x &&
1277                 hw->mac.type != ixgbe_mac_X550EM_a) {
1278                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1279                 rte_flow_error_set(error, EINVAL,
1280                         RTE_FLOW_ERROR_TYPE_ITEM,
1281                         NULL, "Not supported by L2 tunnel filter");
1282                 return -rte_errno;
1283         }
1284
1285         vf_num = pci_dev->max_vfs;
1286
1287         if (l2_tn_filter->pool > vf_num)
1288                 return -rte_errno;
1289
1290         return ret;
1291 }
1292
1293 /* Parse to get the attr and action info of flow director rule. */
1294 static int
1295 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1296                           const struct rte_flow_action actions[],
1297                           struct ixgbe_fdir_rule *rule,
1298                           struct rte_flow_error *error)
1299 {
1300         const struct rte_flow_action *act;
1301         const struct rte_flow_action_queue *act_q;
1302         const struct rte_flow_action_mark *mark;
1303
1304         /* parse attr */
1305         /* must be input direction */
1306         if (!attr->ingress) {
1307                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1308                 rte_flow_error_set(error, EINVAL,
1309                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1310                         attr, "Only support ingress.");
1311                 return -rte_errno;
1312         }
1313
1314         /* not supported */
1315         if (attr->egress) {
1316                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1317                 rte_flow_error_set(error, EINVAL,
1318                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1319                         attr, "Not support egress.");
1320                 return -rte_errno;
1321         }
1322
1323         /* not supported */
1324         if (attr->priority) {
1325                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1326                 rte_flow_error_set(error, EINVAL,
1327                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1328                         attr, "Not support priority.");
1329                 return -rte_errno;
1330         }
1331
1332         /* check if the first not void action is QUEUE or DROP. */
1333         act = next_no_void_action(actions, NULL);
1334         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1335             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1336                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1337                 rte_flow_error_set(error, EINVAL,
1338                         RTE_FLOW_ERROR_TYPE_ACTION,
1339                         act, "Not supported action.");
1340                 return -rte_errno;
1341         }
1342
1343         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1344                 act_q = (const struct rte_flow_action_queue *)act->conf;
1345                 rule->queue = act_q->index;
1346         } else { /* drop */
1347                 /* signature mode does not support drop action. */
1348                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1349                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1350                         rte_flow_error_set(error, EINVAL,
1351                                 RTE_FLOW_ERROR_TYPE_ACTION,
1352                                 act, "Not supported action.");
1353                         return -rte_errno;
1354                 }
1355                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1356         }
1357
1358         /* check if the next not void item is MARK */
1359         act = next_no_void_action(actions, act);
1360         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1361                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1362                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1363                 rte_flow_error_set(error, EINVAL,
1364                         RTE_FLOW_ERROR_TYPE_ACTION,
1365                         act, "Not supported action.");
1366                 return -rte_errno;
1367         }
1368
1369         rule->soft_id = 0;
1370
1371         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1372                 mark = (const struct rte_flow_action_mark *)act->conf;
1373                 rule->soft_id = mark->id;
1374                 act = next_no_void_action(actions, act);
1375         }
1376
1377         /* check if the next not void item is END */
1378         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1379                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1380                 rte_flow_error_set(error, EINVAL,
1381                         RTE_FLOW_ERROR_TYPE_ACTION,
1382                         act, "Not supported action.");
1383                 return -rte_errno;
1384         }
1385
1386         return 0;
1387 }
1388
1389 /* search next no void pattern and skip fuzzy */
1390 static inline
1391 const struct rte_flow_item *next_no_fuzzy_pattern(
1392                 const struct rte_flow_item pattern[],
1393                 const struct rte_flow_item *cur)
1394 {
1395         const struct rte_flow_item *next =
1396                 next_no_void_pattern(pattern, cur);
1397         while (1) {
1398                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1399                         return next;
1400                 next = next_no_void_pattern(pattern, next);
1401         }
1402 }
1403
1404 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1405 {
1406         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1407         const struct rte_flow_item *item;
1408         uint32_t sh, lh, mh;
1409         int i = 0;
1410
1411         while (1) {
1412                 item = pattern + i;
1413                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1414                         break;
1415
1416                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1417                         spec =
1418                         (const struct rte_flow_item_fuzzy *)item->spec;
1419                         last =
1420                         (const struct rte_flow_item_fuzzy *)item->last;
1421                         mask =
1422                         (const struct rte_flow_item_fuzzy *)item->mask;
1423
1424                         if (!spec || !mask)
1425                                 return 0;
1426
1427                         sh = spec->thresh;
1428
1429                         if (!last)
1430                                 lh = sh;
1431                         else
1432                                 lh = last->thresh;
1433
1434                         mh = mask->thresh;
1435                         sh = sh & mh;
1436                         lh = lh & mh;
1437
1438                         if (!sh || sh > lh)
1439                                 return 0;
1440
1441                         return 1;
1442                 }
1443
1444                 i++;
1445         }
1446
1447         return 0;
1448 }
1449
1450 /**
1451  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1452  * And get the flow director filter info BTW.
1453  * UDP/TCP/SCTP PATTERN:
1454  * The first not void item can be ETH or IPV4 or IPV6
1455  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1456  * The next not void item could be UDP or TCP or SCTP (optional)
1457  * The next not void item could be RAW (for flexbyte, optional)
1458  * The next not void item must be END.
1459  * A Fuzzy Match pattern can appear at any place before END.
1460  * Fuzzy Match is optional for IPV4 but is required for IPV6
1461  * MAC VLAN PATTERN:
1462  * The first not void item must be ETH.
1463  * The second not void item must be MAC VLAN.
1464  * The next not void item must be END.
1465  * ACTION:
1466  * The first not void action should be QUEUE or DROP.
1467  * The second not void optional action should be MARK,
1468  * mark_id is a uint32_t number.
1469  * The next not void action should be END.
1470  * UDP/TCP/SCTP pattern example:
1471  * ITEM         Spec                    Mask
1472  * ETH          NULL                    NULL
1473  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1474  *              dst_addr 192.167.3.50   0xFFFFFFFF
1475  * UDP/TCP/SCTP src_port        80      0xFFFF
1476  *              dst_port        80      0xFFFF
1477  * FLEX relative        0       0x1
1478  *              search          0       0x1
1479  *              reserved        0       0
1480  *              offset          12      0xFFFFFFFF
1481  *              limit           0       0xFFFF
1482  *              length          2       0xFFFF
1483  *              pattern[0]      0x86    0xFF
1484  *              pattern[1]      0xDD    0xFF
1485  * END
1486  * MAC VLAN pattern example:
1487  * ITEM         Spec                    Mask
1488  * ETH          dst_addr
1489                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1490                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1491  * MAC VLAN     tci     0x2016          0xEFFF
1492  * END
1493  * Other members in mask and spec should set to 0x00.
1494  * Item->last should be NULL.
1495  */
1496 static int
1497 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1498                                const struct rte_flow_attr *attr,
1499                                const struct rte_flow_item pattern[],
1500                                const struct rte_flow_action actions[],
1501                                struct ixgbe_fdir_rule *rule,
1502                                struct rte_flow_error *error)
1503 {
1504         const struct rte_flow_item *item;
1505         const struct rte_flow_item_eth *eth_spec;
1506         const struct rte_flow_item_eth *eth_mask;
1507         const struct rte_flow_item_ipv4 *ipv4_spec;
1508         const struct rte_flow_item_ipv4 *ipv4_mask;
1509         const struct rte_flow_item_ipv6 *ipv6_spec;
1510         const struct rte_flow_item_ipv6 *ipv6_mask;
1511         const struct rte_flow_item_tcp *tcp_spec;
1512         const struct rte_flow_item_tcp *tcp_mask;
1513         const struct rte_flow_item_udp *udp_spec;
1514         const struct rte_flow_item_udp *udp_mask;
1515         const struct rte_flow_item_sctp *sctp_spec;
1516         const struct rte_flow_item_sctp *sctp_mask;
1517         const struct rte_flow_item_vlan *vlan_spec;
1518         const struct rte_flow_item_vlan *vlan_mask;
1519         const struct rte_flow_item_raw *raw_mask;
1520         const struct rte_flow_item_raw *raw_spec;
1521         uint8_t j;
1522
1523         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1524
1525         if (!pattern) {
1526                 rte_flow_error_set(error, EINVAL,
1527                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1528                         NULL, "NULL pattern.");
1529                 return -rte_errno;
1530         }
1531
1532         if (!actions) {
1533                 rte_flow_error_set(error, EINVAL,
1534                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1535                                    NULL, "NULL action.");
1536                 return -rte_errno;
1537         }
1538
1539         if (!attr) {
1540                 rte_flow_error_set(error, EINVAL,
1541                                    RTE_FLOW_ERROR_TYPE_ATTR,
1542                                    NULL, "NULL attribute.");
1543                 return -rte_errno;
1544         }
1545
1546         /**
1547          * Some fields may not be provided. Set spec to 0 and mask to default
1548          * value. So, we need not do anything for the not provided fields later.
1549          */
1550         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1551         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1552         rule->mask.vlan_tci_mask = 0;
1553         rule->mask.flex_bytes_mask = 0;
1554
1555         /**
1556          * The first not void item should be
1557          * MAC or IPv4 or TCP or UDP or SCTP.
1558          */
1559         item = next_no_fuzzy_pattern(pattern, NULL);
1560         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1561             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1562             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1563             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1564             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1565             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1566                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1567                 rte_flow_error_set(error, EINVAL,
1568                         RTE_FLOW_ERROR_TYPE_ITEM,
1569                         item, "Not supported by fdir filter");
1570                 return -rte_errno;
1571         }
1572
1573         if (signature_match(pattern))
1574                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1575         else
1576                 rule->mode = RTE_FDIR_MODE_PERFECT;
1577
1578         /*Not supported last point for range*/
1579         if (item->last) {
1580                 rte_flow_error_set(error, EINVAL,
1581                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1582                         item, "Not supported last point for range");
1583                 return -rte_errno;
1584         }
1585
1586         /* Get the MAC info. */
1587         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1588                 /**
1589                  * Only support vlan and dst MAC address,
1590                  * others should be masked.
1591                  */
1592                 if (item->spec && !item->mask) {
1593                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1594                         rte_flow_error_set(error, EINVAL,
1595                                 RTE_FLOW_ERROR_TYPE_ITEM,
1596                                 item, "Not supported by fdir filter");
1597                         return -rte_errno;
1598                 }
1599
1600                 if (item->spec) {
1601                         rule->b_spec = TRUE;
1602                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1603
1604                         /* Get the dst MAC. */
1605                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1606                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1607                                         eth_spec->dst.addr_bytes[j];
1608                         }
1609                 }
1610
1611
1612                 if (item->mask) {
1613
1614                         rule->b_mask = TRUE;
1615                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1616
1617                         /* Ether type should be masked. */
1618                         if (eth_mask->type ||
1619                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1620                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1621                                 rte_flow_error_set(error, EINVAL,
1622                                         RTE_FLOW_ERROR_TYPE_ITEM,
1623                                         item, "Not supported by fdir filter");
1624                                 return -rte_errno;
1625                         }
1626
1627                         /* If ethernet has meaning, it means MAC VLAN mode. */
1628                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1629
1630                         /**
1631                          * src MAC address must be masked,
1632                          * and don't support dst MAC address mask.
1633                          */
1634                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1635                                 if (eth_mask->src.addr_bytes[j] ||
1636                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1637                                         memset(rule, 0,
1638                                         sizeof(struct ixgbe_fdir_rule));
1639                                         rte_flow_error_set(error, EINVAL,
1640                                         RTE_FLOW_ERROR_TYPE_ITEM,
1641                                         item, "Not supported by fdir filter");
1642                                         return -rte_errno;
1643                                 }
1644                         }
1645
1646                         /* When no VLAN, considered as full mask. */
1647                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1648                 }
1649                 /*** If both spec and mask are item,
1650                  * it means don't care about ETH.
1651                  * Do nothing.
1652                  */
1653
1654                 /**
1655                  * Check if the next not void item is vlan or ipv4.
1656                  * IPv6 is not supported.
1657                  */
1658                 item = next_no_fuzzy_pattern(pattern, item);
1659                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1660                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1661                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1662                                 rte_flow_error_set(error, EINVAL,
1663                                         RTE_FLOW_ERROR_TYPE_ITEM,
1664                                         item, "Not supported by fdir filter");
1665                                 return -rte_errno;
1666                         }
1667                 } else {
1668                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1669                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1670                                 rte_flow_error_set(error, EINVAL,
1671                                         RTE_FLOW_ERROR_TYPE_ITEM,
1672                                         item, "Not supported by fdir filter");
1673                                 return -rte_errno;
1674                         }
1675                 }
1676         }
1677
1678         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1679                 if (!(item->spec && item->mask)) {
1680                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1681                         rte_flow_error_set(error, EINVAL,
1682                                 RTE_FLOW_ERROR_TYPE_ITEM,
1683                                 item, "Not supported by fdir filter");
1684                         return -rte_errno;
1685                 }
1686
1687                 /*Not supported last point for range*/
1688                 if (item->last) {
1689                         rte_flow_error_set(error, EINVAL,
1690                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1691                                 item, "Not supported last point for range");
1692                         return -rte_errno;
1693                 }
1694
1695                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1696                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1697
1698                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1699
1700                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1701                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1702                 /* More than one tags are not supported. */
1703
1704                 /* Next not void item must be END */
1705                 item = next_no_fuzzy_pattern(pattern, item);
1706                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1707                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1708                         rte_flow_error_set(error, EINVAL,
1709                                 RTE_FLOW_ERROR_TYPE_ITEM,
1710                                 item, "Not supported by fdir filter");
1711                         return -rte_errno;
1712                 }
1713         }
1714
1715         /* Get the IPV4 info. */
1716         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1717                 /**
1718                  * Set the flow type even if there's no content
1719                  * as we must have a flow type.
1720                  */
1721                 rule->ixgbe_fdir.formatted.flow_type =
1722                         IXGBE_ATR_FLOW_TYPE_IPV4;
1723                 /*Not supported last point for range*/
1724                 if (item->last) {
1725                         rte_flow_error_set(error, EINVAL,
1726                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1727                                 item, "Not supported last point for range");
1728                         return -rte_errno;
1729                 }
1730                 /**
1731                  * Only care about src & dst addresses,
1732                  * others should be masked.
1733                  */
1734                 if (!item->mask) {
1735                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1736                         rte_flow_error_set(error, EINVAL,
1737                                 RTE_FLOW_ERROR_TYPE_ITEM,
1738                                 item, "Not supported by fdir filter");
1739                         return -rte_errno;
1740                 }
1741                 rule->b_mask = TRUE;
1742                 ipv4_mask =
1743                         (const struct rte_flow_item_ipv4 *)item->mask;
1744                 if (ipv4_mask->hdr.version_ihl ||
1745                     ipv4_mask->hdr.type_of_service ||
1746                     ipv4_mask->hdr.total_length ||
1747                     ipv4_mask->hdr.packet_id ||
1748                     ipv4_mask->hdr.fragment_offset ||
1749                     ipv4_mask->hdr.time_to_live ||
1750                     ipv4_mask->hdr.next_proto_id ||
1751                     ipv4_mask->hdr.hdr_checksum) {
1752                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1753                         rte_flow_error_set(error, EINVAL,
1754                                 RTE_FLOW_ERROR_TYPE_ITEM,
1755                                 item, "Not supported by fdir filter");
1756                         return -rte_errno;
1757                 }
1758                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1759                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1760
1761                 if (item->spec) {
1762                         rule->b_spec = TRUE;
1763                         ipv4_spec =
1764                                 (const struct rte_flow_item_ipv4 *)item->spec;
1765                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1766                                 ipv4_spec->hdr.dst_addr;
1767                         rule->ixgbe_fdir.formatted.src_ip[0] =
1768                                 ipv4_spec->hdr.src_addr;
1769                 }
1770
1771                 /**
1772                  * Check if the next not void item is
1773                  * TCP or UDP or SCTP or END.
1774                  */
1775                 item = next_no_fuzzy_pattern(pattern, item);
1776                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1777                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1778                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1779                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1780                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1781                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1782                         rte_flow_error_set(error, EINVAL,
1783                                 RTE_FLOW_ERROR_TYPE_ITEM,
1784                                 item, "Not supported by fdir filter");
1785                         return -rte_errno;
1786                 }
1787         }
1788
1789         /* Get the IPV6 info. */
1790         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1791                 /**
1792                  * Set the flow type even if there's no content
1793                  * as we must have a flow type.
1794                  */
1795                 rule->ixgbe_fdir.formatted.flow_type =
1796                         IXGBE_ATR_FLOW_TYPE_IPV6;
1797
1798                 /**
1799                  * 1. must signature match
1800                  * 2. not support last
1801                  * 3. mask must not null
1802                  */
1803                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1804                     item->last ||
1805                     !item->mask) {
1806                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1807                         rte_flow_error_set(error, EINVAL,
1808                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1809                                 item, "Not supported last point for range");
1810                         return -rte_errno;
1811                 }
1812
1813                 rule->b_mask = TRUE;
1814                 ipv6_mask =
1815                         (const struct rte_flow_item_ipv6 *)item->mask;
1816                 if (ipv6_mask->hdr.vtc_flow ||
1817                     ipv6_mask->hdr.payload_len ||
1818                     ipv6_mask->hdr.proto ||
1819                     ipv6_mask->hdr.hop_limits) {
1820                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1821                         rte_flow_error_set(error, EINVAL,
1822                                 RTE_FLOW_ERROR_TYPE_ITEM,
1823                                 item, "Not supported by fdir filter");
1824                         return -rte_errno;
1825                 }
1826
1827                 /* check src addr mask */
1828                 for (j = 0; j < 16; j++) {
1829                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1830                                 rule->mask.src_ipv6_mask |= 1 << j;
1831                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1832                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1833                                 rte_flow_error_set(error, EINVAL,
1834                                         RTE_FLOW_ERROR_TYPE_ITEM,
1835                                         item, "Not supported by fdir filter");
1836                                 return -rte_errno;
1837                         }
1838                 }
1839
1840                 /* check dst addr mask */
1841                 for (j = 0; j < 16; j++) {
1842                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1843                                 rule->mask.dst_ipv6_mask |= 1 << j;
1844                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1845                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1846                                 rte_flow_error_set(error, EINVAL,
1847                                         RTE_FLOW_ERROR_TYPE_ITEM,
1848                                         item, "Not supported by fdir filter");
1849                                 return -rte_errno;
1850                         }
1851                 }
1852
1853                 if (item->spec) {
1854                         rule->b_spec = TRUE;
1855                         ipv6_spec =
1856                                 (const struct rte_flow_item_ipv6 *)item->spec;
1857                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1858                                    ipv6_spec->hdr.src_addr, 16);
1859                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1860                                    ipv6_spec->hdr.dst_addr, 16);
1861                 }
1862
1863                 /**
1864                  * Check if the next not void item is
1865                  * TCP or UDP or SCTP or END.
1866                  */
1867                 item = next_no_fuzzy_pattern(pattern, item);
1868                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1869                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1870                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1871                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1872                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1873                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1874                         rte_flow_error_set(error, EINVAL,
1875                                 RTE_FLOW_ERROR_TYPE_ITEM,
1876                                 item, "Not supported by fdir filter");
1877                         return -rte_errno;
1878                 }
1879         }
1880
1881         /* Get the TCP info. */
1882         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1883                 /**
1884                  * Set the flow type even if there's no content
1885                  * as we must have a flow type.
1886                  */
1887                 rule->ixgbe_fdir.formatted.flow_type |=
1888                         IXGBE_ATR_L4TYPE_TCP;
1889                 /*Not supported last point for range*/
1890                 if (item->last) {
1891                         rte_flow_error_set(error, EINVAL,
1892                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1893                                 item, "Not supported last point for range");
1894                         return -rte_errno;
1895                 }
1896                 /**
1897                  * Only care about src & dst ports,
1898                  * others should be masked.
1899                  */
1900                 if (!item->mask) {
1901                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1902                         rte_flow_error_set(error, EINVAL,
1903                                 RTE_FLOW_ERROR_TYPE_ITEM,
1904                                 item, "Not supported by fdir filter");
1905                         return -rte_errno;
1906                 }
1907                 rule->b_mask = TRUE;
1908                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1909                 if (tcp_mask->hdr.sent_seq ||
1910                     tcp_mask->hdr.recv_ack ||
1911                     tcp_mask->hdr.data_off ||
1912                     tcp_mask->hdr.tcp_flags ||
1913                     tcp_mask->hdr.rx_win ||
1914                     tcp_mask->hdr.cksum ||
1915                     tcp_mask->hdr.tcp_urp) {
1916                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1917                         rte_flow_error_set(error, EINVAL,
1918                                 RTE_FLOW_ERROR_TYPE_ITEM,
1919                                 item, "Not supported by fdir filter");
1920                         return -rte_errno;
1921                 }
1922                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1923                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1924
1925                 if (item->spec) {
1926                         rule->b_spec = TRUE;
1927                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1928                         rule->ixgbe_fdir.formatted.src_port =
1929                                 tcp_spec->hdr.src_port;
1930                         rule->ixgbe_fdir.formatted.dst_port =
1931                                 tcp_spec->hdr.dst_port;
1932                 }
1933
1934                 item = next_no_fuzzy_pattern(pattern, item);
1935                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1936                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1937                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1938                         rte_flow_error_set(error, EINVAL,
1939                                 RTE_FLOW_ERROR_TYPE_ITEM,
1940                                 item, "Not supported by fdir filter");
1941                         return -rte_errno;
1942                 }
1943
1944         }
1945
1946         /* Get the UDP info */
1947         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1948                 /**
1949                  * Set the flow type even if there's no content
1950                  * as we must have a flow type.
1951                  */
1952                 rule->ixgbe_fdir.formatted.flow_type |=
1953                         IXGBE_ATR_L4TYPE_UDP;
1954                 /*Not supported last point for range*/
1955                 if (item->last) {
1956                         rte_flow_error_set(error, EINVAL,
1957                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1958                                 item, "Not supported last point for range");
1959                         return -rte_errno;
1960                 }
1961                 /**
1962                  * Only care about src & dst ports,
1963                  * others should be masked.
1964                  */
1965                 if (!item->mask) {
1966                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1967                         rte_flow_error_set(error, EINVAL,
1968                                 RTE_FLOW_ERROR_TYPE_ITEM,
1969                                 item, "Not supported by fdir filter");
1970                         return -rte_errno;
1971                 }
1972                 rule->b_mask = TRUE;
1973                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1974                 if (udp_mask->hdr.dgram_len ||
1975                     udp_mask->hdr.dgram_cksum) {
1976                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1977                         rte_flow_error_set(error, EINVAL,
1978                                 RTE_FLOW_ERROR_TYPE_ITEM,
1979                                 item, "Not supported by fdir filter");
1980                         return -rte_errno;
1981                 }
1982                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1983                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1984
1985                 if (item->spec) {
1986                         rule->b_spec = TRUE;
1987                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1988                         rule->ixgbe_fdir.formatted.src_port =
1989                                 udp_spec->hdr.src_port;
1990                         rule->ixgbe_fdir.formatted.dst_port =
1991                                 udp_spec->hdr.dst_port;
1992                 }
1993
1994                 item = next_no_fuzzy_pattern(pattern, item);
1995                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1996                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1997                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1998                         rte_flow_error_set(error, EINVAL,
1999                                 RTE_FLOW_ERROR_TYPE_ITEM,
2000                                 item, "Not supported by fdir filter");
2001                         return -rte_errno;
2002                 }
2003
2004         }
2005
2006         /* Get the SCTP info */
2007         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2008                 /**
2009                  * Set the flow type even if there's no content
2010                  * as we must have a flow type.
2011                  */
2012                 rule->ixgbe_fdir.formatted.flow_type |=
2013                         IXGBE_ATR_L4TYPE_SCTP;
2014                 /*Not supported last point for range*/
2015                 if (item->last) {
2016                         rte_flow_error_set(error, EINVAL,
2017                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2018                                 item, "Not supported last point for range");
2019                         return -rte_errno;
2020                 }
2021
2022                 /* only x550 family only support sctp port */
2023                 if (hw->mac.type == ixgbe_mac_X550 ||
2024                     hw->mac.type == ixgbe_mac_X550EM_x ||
2025                     hw->mac.type == ixgbe_mac_X550EM_a) {
2026                         /**
2027                          * Only care about src & dst ports,
2028                          * others should be masked.
2029                          */
2030                         if (!item->mask) {
2031                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2032                                 rte_flow_error_set(error, EINVAL,
2033                                         RTE_FLOW_ERROR_TYPE_ITEM,
2034                                         item, "Not supported by fdir filter");
2035                                 return -rte_errno;
2036                         }
2037                         rule->b_mask = TRUE;
2038                         sctp_mask =
2039                                 (const struct rte_flow_item_sctp *)item->mask;
2040                         if (sctp_mask->hdr.tag ||
2041                                 sctp_mask->hdr.cksum) {
2042                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2043                                 rte_flow_error_set(error, EINVAL,
2044                                         RTE_FLOW_ERROR_TYPE_ITEM,
2045                                         item, "Not supported by fdir filter");
2046                                 return -rte_errno;
2047                         }
2048                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2049                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2050
2051                         if (item->spec) {
2052                                 rule->b_spec = TRUE;
2053                                 sctp_spec =
2054                                 (const struct rte_flow_item_sctp *)item->spec;
2055                                 rule->ixgbe_fdir.formatted.src_port =
2056                                         sctp_spec->hdr.src_port;
2057                                 rule->ixgbe_fdir.formatted.dst_port =
2058                                         sctp_spec->hdr.dst_port;
2059                         }
2060                 /* others even sctp port is not supported */
2061                 } else {
2062                         sctp_mask =
2063                                 (const struct rte_flow_item_sctp *)item->mask;
2064                         if (sctp_mask &&
2065                                 (sctp_mask->hdr.src_port ||
2066                                  sctp_mask->hdr.dst_port ||
2067                                  sctp_mask->hdr.tag ||
2068                                  sctp_mask->hdr.cksum)) {
2069                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2070                                 rte_flow_error_set(error, EINVAL,
2071                                         RTE_FLOW_ERROR_TYPE_ITEM,
2072                                         item, "Not supported by fdir filter");
2073                                 return -rte_errno;
2074                         }
2075                 }
2076
2077                 item = next_no_fuzzy_pattern(pattern, item);
2078                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2079                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2080                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2081                         rte_flow_error_set(error, EINVAL,
2082                                 RTE_FLOW_ERROR_TYPE_ITEM,
2083                                 item, "Not supported by fdir filter");
2084                         return -rte_errno;
2085                 }
2086         }
2087
2088         /* Get the flex byte info */
2089         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2090                 /* Not supported last point for range*/
2091                 if (item->last) {
2092                         rte_flow_error_set(error, EINVAL,
2093                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2094                                 item, "Not supported last point for range");
2095                         return -rte_errno;
2096                 }
2097                 /* mask should not be null */
2098                 if (!item->mask || !item->spec) {
2099                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2100                         rte_flow_error_set(error, EINVAL,
2101                                 RTE_FLOW_ERROR_TYPE_ITEM,
2102                                 item, "Not supported by fdir filter");
2103                         return -rte_errno;
2104                 }
2105
2106                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2107
2108                 /* check mask */
2109                 if (raw_mask->relative != 0x1 ||
2110                     raw_mask->search != 0x1 ||
2111                     raw_mask->reserved != 0x0 ||
2112                     (uint32_t)raw_mask->offset != 0xffffffff ||
2113                     raw_mask->limit != 0xffff ||
2114                     raw_mask->length != 0xffff) {
2115                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2116                         rte_flow_error_set(error, EINVAL,
2117                                 RTE_FLOW_ERROR_TYPE_ITEM,
2118                                 item, "Not supported by fdir filter");
2119                         return -rte_errno;
2120                 }
2121
2122                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2123
2124                 /* check spec */
2125                 if (raw_spec->relative != 0 ||
2126                     raw_spec->search != 0 ||
2127                     raw_spec->reserved != 0 ||
2128                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2129                     raw_spec->offset % 2 ||
2130                     raw_spec->limit != 0 ||
2131                     raw_spec->length != 2 ||
2132                     /* pattern can't be 0xffff */
2133                     (raw_spec->pattern[0] == 0xff &&
2134                      raw_spec->pattern[1] == 0xff)) {
2135                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2136                         rte_flow_error_set(error, EINVAL,
2137                                 RTE_FLOW_ERROR_TYPE_ITEM,
2138                                 item, "Not supported by fdir filter");
2139                         return -rte_errno;
2140                 }
2141
2142                 /* check pattern mask */
2143                 if (raw_mask->pattern[0] != 0xff ||
2144                     raw_mask->pattern[1] != 0xff) {
2145                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2146                         rte_flow_error_set(error, EINVAL,
2147                                 RTE_FLOW_ERROR_TYPE_ITEM,
2148                                 item, "Not supported by fdir filter");
2149                         return -rte_errno;
2150                 }
2151
2152                 rule->mask.flex_bytes_mask = 0xffff;
2153                 rule->ixgbe_fdir.formatted.flex_bytes =
2154                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2155                         raw_spec->pattern[0];
2156                 rule->flex_bytes_offset = raw_spec->offset;
2157         }
2158
2159         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2160                 /* check if the next not void item is END */
2161                 item = next_no_fuzzy_pattern(pattern, item);
2162                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2163                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2164                         rte_flow_error_set(error, EINVAL,
2165                                 RTE_FLOW_ERROR_TYPE_ITEM,
2166                                 item, "Not supported by fdir filter");
2167                         return -rte_errno;
2168                 }
2169         }
2170
2171         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2172 }
2173
2174 #define NVGRE_PROTOCOL 0x6558
2175
2176 /**
2177  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2178  * And get the flow director filter info BTW.
2179  * VxLAN PATTERN:
2180  * The first not void item must be ETH.
2181  * The second not void item must be IPV4/ IPV6.
2182  * The third not void item must be NVGRE.
2183  * The next not void item must be END.
2184  * NVGRE PATTERN:
2185  * The first not void item must be ETH.
2186  * The second not void item must be IPV4/ IPV6.
2187  * The third not void item must be NVGRE.
2188  * The next not void item must be END.
2189  * ACTION:
2190  * The first not void action should be QUEUE or DROP.
2191  * The second not void optional action should be MARK,
2192  * mark_id is a uint32_t number.
2193  * The next not void action should be END.
2194  * VxLAN pattern example:
2195  * ITEM         Spec                    Mask
2196  * ETH          NULL                    NULL
2197  * IPV4/IPV6    NULL                    NULL
2198  * UDP          NULL                    NULL
2199  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2200  * MAC VLAN     tci     0x2016          0xEFFF
2201  * END
2202  * NEGRV pattern example:
2203  * ITEM         Spec                    Mask
2204  * ETH          NULL                    NULL
2205  * IPV4/IPV6    NULL                    NULL
2206  * NVGRE        protocol        0x6558  0xFFFF
2207  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2208  * MAC VLAN     tci     0x2016          0xEFFF
2209  * END
2210  * other members in mask and spec should set to 0x00.
2211  * item->last should be NULL.
2212  */
2213 static int
2214 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2215                                const struct rte_flow_item pattern[],
2216                                const struct rte_flow_action actions[],
2217                                struct ixgbe_fdir_rule *rule,
2218                                struct rte_flow_error *error)
2219 {
2220         const struct rte_flow_item *item;
2221         const struct rte_flow_item_vxlan *vxlan_spec;
2222         const struct rte_flow_item_vxlan *vxlan_mask;
2223         const struct rte_flow_item_nvgre *nvgre_spec;
2224         const struct rte_flow_item_nvgre *nvgre_mask;
2225         const struct rte_flow_item_eth *eth_spec;
2226         const struct rte_flow_item_eth *eth_mask;
2227         const struct rte_flow_item_vlan *vlan_spec;
2228         const struct rte_flow_item_vlan *vlan_mask;
2229         uint32_t j;
2230
2231         if (!pattern) {
2232                 rte_flow_error_set(error, EINVAL,
2233                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2234                                    NULL, "NULL pattern.");
2235                 return -rte_errno;
2236         }
2237
2238         if (!actions) {
2239                 rte_flow_error_set(error, EINVAL,
2240                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2241                                    NULL, "NULL action.");
2242                 return -rte_errno;
2243         }
2244
2245         if (!attr) {
2246                 rte_flow_error_set(error, EINVAL,
2247                                    RTE_FLOW_ERROR_TYPE_ATTR,
2248                                    NULL, "NULL attribute.");
2249                 return -rte_errno;
2250         }
2251
2252         /**
2253          * Some fields may not be provided. Set spec to 0 and mask to default
2254          * value. So, we need not do anything for the not provided fields later.
2255          */
2256         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2257         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2258         rule->mask.vlan_tci_mask = 0;
2259
2260         /**
2261          * The first not void item should be
2262          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2263          */
2264         item = next_no_void_pattern(pattern, NULL);
2265         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2266             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2267             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2268             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2269             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2270             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2271                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2272                 rte_flow_error_set(error, EINVAL,
2273                         RTE_FLOW_ERROR_TYPE_ITEM,
2274                         item, "Not supported by fdir filter");
2275                 return -rte_errno;
2276         }
2277
2278         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2279
2280         /* Skip MAC. */
2281         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2282                 /* Only used to describe the protocol stack. */
2283                 if (item->spec || item->mask) {
2284                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2285                         rte_flow_error_set(error, EINVAL,
2286                                 RTE_FLOW_ERROR_TYPE_ITEM,
2287                                 item, "Not supported by fdir filter");
2288                         return -rte_errno;
2289                 }
2290                 /* Not supported last point for range*/
2291                 if (item->last) {
2292                         rte_flow_error_set(error, EINVAL,
2293                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2294                                 item, "Not supported last point for range");
2295                         return -rte_errno;
2296                 }
2297
2298                 /* Check if the next not void item is IPv4 or IPv6. */
2299                 item = next_no_void_pattern(pattern, item);
2300                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2301                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2302                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2303                         rte_flow_error_set(error, EINVAL,
2304                                 RTE_FLOW_ERROR_TYPE_ITEM,
2305                                 item, "Not supported by fdir filter");
2306                         return -rte_errno;
2307                 }
2308         }
2309
2310         /* Skip IP. */
2311         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2312             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2313                 /* Only used to describe the protocol stack. */
2314                 if (item->spec || item->mask) {
2315                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2316                         rte_flow_error_set(error, EINVAL,
2317                                 RTE_FLOW_ERROR_TYPE_ITEM,
2318                                 item, "Not supported by fdir filter");
2319                         return -rte_errno;
2320                 }
2321                 /*Not supported last point for range*/
2322                 if (item->last) {
2323                         rte_flow_error_set(error, EINVAL,
2324                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2325                                 item, "Not supported last point for range");
2326                         return -rte_errno;
2327                 }
2328
2329                 /* Check if the next not void item is UDP or NVGRE. */
2330                 item = next_no_void_pattern(pattern, item);
2331                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2332                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2333                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2334                         rte_flow_error_set(error, EINVAL,
2335                                 RTE_FLOW_ERROR_TYPE_ITEM,
2336                                 item, "Not supported by fdir filter");
2337                         return -rte_errno;
2338                 }
2339         }
2340
2341         /* Skip UDP. */
2342         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2343                 /* Only used to describe the protocol stack. */
2344                 if (item->spec || item->mask) {
2345                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2346                         rte_flow_error_set(error, EINVAL,
2347                                 RTE_FLOW_ERROR_TYPE_ITEM,
2348                                 item, "Not supported by fdir filter");
2349                         return -rte_errno;
2350                 }
2351                 /*Not supported last point for range*/
2352                 if (item->last) {
2353                         rte_flow_error_set(error, EINVAL,
2354                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2355                                 item, "Not supported last point for range");
2356                         return -rte_errno;
2357                 }
2358
2359                 /* Check if the next not void item is VxLAN. */
2360                 item = next_no_void_pattern(pattern, item);
2361                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2362                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2363                         rte_flow_error_set(error, EINVAL,
2364                                 RTE_FLOW_ERROR_TYPE_ITEM,
2365                                 item, "Not supported by fdir filter");
2366                         return -rte_errno;
2367                 }
2368         }
2369
2370         /* Get the VxLAN info */
2371         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2372                 rule->ixgbe_fdir.formatted.tunnel_type =
2373                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2374
2375                 /* Only care about VNI, others should be masked. */
2376                 if (!item->mask) {
2377                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2378                         rte_flow_error_set(error, EINVAL,
2379                                 RTE_FLOW_ERROR_TYPE_ITEM,
2380                                 item, "Not supported by fdir filter");
2381                         return -rte_errno;
2382                 }
2383                 /*Not supported last point for range*/
2384                 if (item->last) {
2385                         rte_flow_error_set(error, EINVAL,
2386                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2387                                 item, "Not supported last point for range");
2388                         return -rte_errno;
2389                 }
2390                 rule->b_mask = TRUE;
2391
2392                 /* Tunnel type is always meaningful. */
2393                 rule->mask.tunnel_type_mask = 1;
2394
2395                 vxlan_mask =
2396                         (const struct rte_flow_item_vxlan *)item->mask;
2397                 if (vxlan_mask->flags) {
2398                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2399                         rte_flow_error_set(error, EINVAL,
2400                                 RTE_FLOW_ERROR_TYPE_ITEM,
2401                                 item, "Not supported by fdir filter");
2402                         return -rte_errno;
2403                 }
2404                 /* VNI must be totally masked or not. */
2405                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2406                         vxlan_mask->vni[2]) &&
2407                         ((vxlan_mask->vni[0] != 0xFF) ||
2408                         (vxlan_mask->vni[1] != 0xFF) ||
2409                                 (vxlan_mask->vni[2] != 0xFF))) {
2410                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2411                         rte_flow_error_set(error, EINVAL,
2412                                 RTE_FLOW_ERROR_TYPE_ITEM,
2413                                 item, "Not supported by fdir filter");
2414                         return -rte_errno;
2415                 }
2416
2417                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2418                         RTE_DIM(vxlan_mask->vni));
2419
2420                 if (item->spec) {
2421                         rule->b_spec = TRUE;
2422                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2423                                         item->spec;
2424                         rte_memcpy(((uint8_t *)
2425                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2426                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2427                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2428                                 rule->ixgbe_fdir.formatted.tni_vni);
2429                 }
2430         }
2431
2432         /* Get the NVGRE info */
2433         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2434                 rule->ixgbe_fdir.formatted.tunnel_type =
2435                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2436
2437                 /**
2438                  * Only care about flags0, flags1, protocol and TNI,
2439                  * others should be masked.
2440                  */
2441                 if (!item->mask) {
2442                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2443                         rte_flow_error_set(error, EINVAL,
2444                                 RTE_FLOW_ERROR_TYPE_ITEM,
2445                                 item, "Not supported by fdir filter");
2446                         return -rte_errno;
2447                 }
2448                 /*Not supported last point for range*/
2449                 if (item->last) {
2450                         rte_flow_error_set(error, EINVAL,
2451                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2452                                 item, "Not supported last point for range");
2453                         return -rte_errno;
2454                 }
2455                 rule->b_mask = TRUE;
2456
2457                 /* Tunnel type is always meaningful. */
2458                 rule->mask.tunnel_type_mask = 1;
2459
2460                 nvgre_mask =
2461                         (const struct rte_flow_item_nvgre *)item->mask;
2462                 if (nvgre_mask->flow_id) {
2463                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2464                         rte_flow_error_set(error, EINVAL,
2465                                 RTE_FLOW_ERROR_TYPE_ITEM,
2466                                 item, "Not supported by fdir filter");
2467                         return -rte_errno;
2468                 }
2469                 if (nvgre_mask->protocol &&
2470                     nvgre_mask->protocol != 0xFFFF) {
2471                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2472                         rte_flow_error_set(error, EINVAL,
2473                                 RTE_FLOW_ERROR_TYPE_ITEM,
2474                                 item, "Not supported by fdir filter");
2475                         return -rte_errno;
2476                 }
2477                 if (nvgre_mask->c_k_s_rsvd0_ver &&
2478                     nvgre_mask->c_k_s_rsvd0_ver !=
2479                         rte_cpu_to_be_16(0xFFFF)) {
2480                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2481                         rte_flow_error_set(error, EINVAL,
2482                                 RTE_FLOW_ERROR_TYPE_ITEM,
2483                                 item, "Not supported by fdir filter");
2484                         return -rte_errno;
2485                 }
2486                 /* TNI must be totally masked or not. */
2487                 if (nvgre_mask->tni[0] &&
2488                     ((nvgre_mask->tni[0] != 0xFF) ||
2489                     (nvgre_mask->tni[1] != 0xFF) ||
2490                     (nvgre_mask->tni[2] != 0xFF))) {
2491                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2492                         rte_flow_error_set(error, EINVAL,
2493                                 RTE_FLOW_ERROR_TYPE_ITEM,
2494                                 item, "Not supported by fdir filter");
2495                         return -rte_errno;
2496                 }
2497                 /* tni is a 24-bits bit field */
2498                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2499                         RTE_DIM(nvgre_mask->tni));
2500                 rule->mask.tunnel_id_mask <<= 8;
2501
2502                 if (item->spec) {
2503                         rule->b_spec = TRUE;
2504                         nvgre_spec =
2505                                 (const struct rte_flow_item_nvgre *)item->spec;
2506                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2507                             rte_cpu_to_be_16(0x2000) &&
2508                                 nvgre_mask->c_k_s_rsvd0_ver) {
2509                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2510                                 rte_flow_error_set(error, EINVAL,
2511                                         RTE_FLOW_ERROR_TYPE_ITEM,
2512                                         item, "Not supported by fdir filter");
2513                                 return -rte_errno;
2514                         }
2515                         if (nvgre_mask->protocol &&
2516                             nvgre_spec->protocol !=
2517                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2518                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2519                                 rte_flow_error_set(error, EINVAL,
2520                                         RTE_FLOW_ERROR_TYPE_ITEM,
2521                                         item, "Not supported by fdir filter");
2522                                 return -rte_errno;
2523                         }
2524                         /* tni is a 24-bits bit field */
2525                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2526                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2527                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2528                 }
2529         }
2530
2531         /* check if the next not void item is MAC */
2532         item = next_no_void_pattern(pattern, item);
2533         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2534                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2535                 rte_flow_error_set(error, EINVAL,
2536                         RTE_FLOW_ERROR_TYPE_ITEM,
2537                         item, "Not supported by fdir filter");
2538                 return -rte_errno;
2539         }
2540
2541         /**
2542          * Only support vlan and dst MAC address,
2543          * others should be masked.
2544          */
2545
2546         if (!item->mask) {
2547                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2548                 rte_flow_error_set(error, EINVAL,
2549                         RTE_FLOW_ERROR_TYPE_ITEM,
2550                         item, "Not supported by fdir filter");
2551                 return -rte_errno;
2552         }
2553         /*Not supported last point for range*/
2554         if (item->last) {
2555                 rte_flow_error_set(error, EINVAL,
2556                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2557                         item, "Not supported last point for range");
2558                 return -rte_errno;
2559         }
2560         rule->b_mask = TRUE;
2561         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2562
2563         /* Ether type should be masked. */
2564         if (eth_mask->type) {
2565                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2566                 rte_flow_error_set(error, EINVAL,
2567                         RTE_FLOW_ERROR_TYPE_ITEM,
2568                         item, "Not supported by fdir filter");
2569                 return -rte_errno;
2570         }
2571
2572         /* src MAC address should be masked. */
2573         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2574                 if (eth_mask->src.addr_bytes[j]) {
2575                         memset(rule, 0,
2576                                sizeof(struct ixgbe_fdir_rule));
2577                         rte_flow_error_set(error, EINVAL,
2578                                 RTE_FLOW_ERROR_TYPE_ITEM,
2579                                 item, "Not supported by fdir filter");
2580                         return -rte_errno;
2581                 }
2582         }
2583         rule->mask.mac_addr_byte_mask = 0;
2584         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2585                 /* It's a per byte mask. */
2586                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2587                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2588                 } else if (eth_mask->dst.addr_bytes[j]) {
2589                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2590                         rte_flow_error_set(error, EINVAL,
2591                                 RTE_FLOW_ERROR_TYPE_ITEM,
2592                                 item, "Not supported by fdir filter");
2593                         return -rte_errno;
2594                 }
2595         }
2596
2597         /* When no vlan, considered as full mask. */
2598         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2599
2600         if (item->spec) {
2601                 rule->b_spec = TRUE;
2602                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2603
2604                 /* Get the dst MAC. */
2605                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2606                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2607                                 eth_spec->dst.addr_bytes[j];
2608                 }
2609         }
2610
2611         /**
2612          * Check if the next not void item is vlan or ipv4.
2613          * IPv6 is not supported.
2614          */
2615         item = next_no_void_pattern(pattern, item);
2616         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2617                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2618                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2619                 rte_flow_error_set(error, EINVAL,
2620                         RTE_FLOW_ERROR_TYPE_ITEM,
2621                         item, "Not supported by fdir filter");
2622                 return -rte_errno;
2623         }
2624         /*Not supported last point for range*/
2625         if (item->last) {
2626                 rte_flow_error_set(error, EINVAL,
2627                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2628                         item, "Not supported last point for range");
2629                 return -rte_errno;
2630         }
2631
2632         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2633                 if (!(item->spec && item->mask)) {
2634                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2635                         rte_flow_error_set(error, EINVAL,
2636                                 RTE_FLOW_ERROR_TYPE_ITEM,
2637                                 item, "Not supported by fdir filter");
2638                         return -rte_errno;
2639                 }
2640
2641                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2642                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2643
2644                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2645
2646                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2647                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2648                 /* More than one tags are not supported. */
2649
2650                 /* check if the next not void item is END */
2651                 item = next_no_void_pattern(pattern, item);
2652
2653                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2654                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2655                         rte_flow_error_set(error, EINVAL,
2656                                 RTE_FLOW_ERROR_TYPE_ITEM,
2657                                 item, "Not supported by fdir filter");
2658                         return -rte_errno;
2659                 }
2660         }
2661
2662         /**
2663          * If the tags is 0, it means don't care about the VLAN.
2664          * Do nothing.
2665          */
2666
2667         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2668 }
2669
2670 static int
2671 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2672                         const struct rte_flow_attr *attr,
2673                         const struct rte_flow_item pattern[],
2674                         const struct rte_flow_action actions[],
2675                         struct ixgbe_fdir_rule *rule,
2676                         struct rte_flow_error *error)
2677 {
2678         int ret;
2679         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2680         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2681
2682         if (hw->mac.type != ixgbe_mac_82599EB &&
2683                 hw->mac.type != ixgbe_mac_X540 &&
2684                 hw->mac.type != ixgbe_mac_X550 &&
2685                 hw->mac.type != ixgbe_mac_X550EM_x &&
2686                 hw->mac.type != ixgbe_mac_X550EM_a)
2687                 return -ENOTSUP;
2688
2689         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2690                                         actions, rule, error);
2691
2692         if (!ret)
2693                 goto step_next;
2694
2695         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2696                                         actions, rule, error);
2697
2698         if (ret)
2699                 return ret;
2700
2701 step_next:
2702
2703         if (hw->mac.type == ixgbe_mac_82599EB &&
2704                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2705                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2706                 rule->ixgbe_fdir.formatted.dst_port != 0))
2707                 return -ENOTSUP;
2708
2709         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2710             fdir_mode != rule->mode)
2711                 return -ENOTSUP;
2712
2713         if (rule->queue >= dev->data->nb_rx_queues)
2714                 return -ENOTSUP;
2715
2716         return ret;
2717 }
2718
2719 void
2720 ixgbe_filterlist_init(void)
2721 {
2722         TAILQ_INIT(&filter_ntuple_list);
2723         TAILQ_INIT(&filter_ethertype_list);
2724         TAILQ_INIT(&filter_syn_list);
2725         TAILQ_INIT(&filter_fdir_list);
2726         TAILQ_INIT(&filter_l2_tunnel_list);
2727         TAILQ_INIT(&ixgbe_flow_list);
2728 }
2729
2730 void
2731 ixgbe_filterlist_flush(void)
2732 {
2733         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2734         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2735         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2736         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2737         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2738         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2739
2740         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2741                 TAILQ_REMOVE(&filter_ntuple_list,
2742                                  ntuple_filter_ptr,
2743                                  entries);
2744                 rte_free(ntuple_filter_ptr);
2745         }
2746
2747         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2748                 TAILQ_REMOVE(&filter_ethertype_list,
2749                                  ethertype_filter_ptr,
2750                                  entries);
2751                 rte_free(ethertype_filter_ptr);
2752         }
2753
2754         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2755                 TAILQ_REMOVE(&filter_syn_list,
2756                                  syn_filter_ptr,
2757                                  entries);
2758                 rte_free(syn_filter_ptr);
2759         }
2760
2761         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2762                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2763                                  l2_tn_filter_ptr,
2764                                  entries);
2765                 rte_free(l2_tn_filter_ptr);
2766         }
2767
2768         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2769                 TAILQ_REMOVE(&filter_fdir_list,
2770                                  fdir_rule_ptr,
2771                                  entries);
2772                 rte_free(fdir_rule_ptr);
2773         }
2774
2775         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2776                 TAILQ_REMOVE(&ixgbe_flow_list,
2777                                  ixgbe_flow_mem_ptr,
2778                                  entries);
2779                 rte_free(ixgbe_flow_mem_ptr->flow);
2780                 rte_free(ixgbe_flow_mem_ptr);
2781         }
2782 }
2783
2784 /**
2785  * Create or destroy a flow rule.
2786  * Theorically one rule can match more than one filters.
2787  * We will let it use the filter which it hitt first.
2788  * So, the sequence matters.
2789  */
2790 static struct rte_flow *
2791 ixgbe_flow_create(struct rte_eth_dev *dev,
2792                   const struct rte_flow_attr *attr,
2793                   const struct rte_flow_item pattern[],
2794                   const struct rte_flow_action actions[],
2795                   struct rte_flow_error *error)
2796 {
2797         int ret;
2798         struct rte_eth_ntuple_filter ntuple_filter;
2799         struct rte_eth_ethertype_filter ethertype_filter;
2800         struct rte_eth_syn_filter syn_filter;
2801         struct ixgbe_fdir_rule fdir_rule;
2802         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2803         struct ixgbe_hw_fdir_info *fdir_info =
2804                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2805         struct rte_flow *flow = NULL;
2806         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2807         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2808         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2809         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2810         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2811         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2812         uint8_t first_mask = FALSE;
2813
2814         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2815         if (!flow) {
2816                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2817                 return (struct rte_flow *)flow;
2818         }
2819         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2820                         sizeof(struct ixgbe_flow_mem), 0);
2821         if (!ixgbe_flow_mem_ptr) {
2822                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2823                 rte_free(flow);
2824                 return NULL;
2825         }
2826         ixgbe_flow_mem_ptr->flow = flow;
2827         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2828                                 ixgbe_flow_mem_ptr, entries);
2829
2830         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2831         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2832                         actions, &ntuple_filter, error);
2833
2834 #ifdef RTE_LIBRTE_SECURITY
2835         /* ESP flow not really a flow*/
2836         if (ntuple_filter.proto == IPPROTO_ESP)
2837                 return flow;
2838 #endif
2839
2840         if (!ret) {
2841                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2842                 if (!ret) {
2843                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2844                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2845                         if (!ntuple_filter_ptr) {
2846                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2847                                 goto out;
2848                         }
2849                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2850                                 &ntuple_filter,
2851                                 sizeof(struct rte_eth_ntuple_filter));
2852                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2853                                 ntuple_filter_ptr, entries);
2854                         flow->rule = ntuple_filter_ptr;
2855                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2856                         return flow;
2857                 }
2858                 goto out;
2859         }
2860
2861         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2862         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2863                                 actions, &ethertype_filter, error);
2864         if (!ret) {
2865                 ret = ixgbe_add_del_ethertype_filter(dev,
2866                                 &ethertype_filter, TRUE);
2867                 if (!ret) {
2868                         ethertype_filter_ptr = rte_zmalloc(
2869                                 "ixgbe_ethertype_filter",
2870                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2871                         if (!ethertype_filter_ptr) {
2872                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2873                                 goto out;
2874                         }
2875                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2876                                 &ethertype_filter,
2877                                 sizeof(struct rte_eth_ethertype_filter));
2878                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2879                                 ethertype_filter_ptr, entries);
2880                         flow->rule = ethertype_filter_ptr;
2881                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2882                         return flow;
2883                 }
2884                 goto out;
2885         }
2886
2887         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2888         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2889                                 actions, &syn_filter, error);
2890         if (!ret) {
2891                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2892                 if (!ret) {
2893                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2894                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2895                         if (!syn_filter_ptr) {
2896                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2897                                 goto out;
2898                         }
2899                         rte_memcpy(&syn_filter_ptr->filter_info,
2900                                 &syn_filter,
2901                                 sizeof(struct rte_eth_syn_filter));
2902                         TAILQ_INSERT_TAIL(&filter_syn_list,
2903                                 syn_filter_ptr,
2904                                 entries);
2905                         flow->rule = syn_filter_ptr;
2906                         flow->filter_type = RTE_ETH_FILTER_SYN;
2907                         return flow;
2908                 }
2909                 goto out;
2910         }
2911
2912         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2913         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2914                                 actions, &fdir_rule, error);
2915         if (!ret) {
2916                 /* A mask cannot be deleted. */
2917                 if (fdir_rule.b_mask) {
2918                         if (!fdir_info->mask_added) {
2919                                 /* It's the first time the mask is set. */
2920                                 rte_memcpy(&fdir_info->mask,
2921                                         &fdir_rule.mask,
2922                                         sizeof(struct ixgbe_hw_fdir_mask));
2923                                 fdir_info->flex_bytes_offset =
2924                                         fdir_rule.flex_bytes_offset;
2925
2926                                 if (fdir_rule.mask.flex_bytes_mask)
2927                                         ixgbe_fdir_set_flexbytes_offset(dev,
2928                                                 fdir_rule.flex_bytes_offset);
2929
2930                                 ret = ixgbe_fdir_set_input_mask(dev);
2931                                 if (ret)
2932                                         goto out;
2933
2934                                 fdir_info->mask_added = TRUE;
2935                                 first_mask = TRUE;
2936                         } else {
2937                                 /**
2938                                  * Only support one global mask,
2939                                  * all the masks should be the same.
2940                                  */
2941                                 ret = memcmp(&fdir_info->mask,
2942                                         &fdir_rule.mask,
2943                                         sizeof(struct ixgbe_hw_fdir_mask));
2944                                 if (ret)
2945                                         goto out;
2946
2947                                 if (fdir_info->flex_bytes_offset !=
2948                                                 fdir_rule.flex_bytes_offset)
2949                                         goto out;
2950                         }
2951                 }
2952
2953                 if (fdir_rule.b_spec) {
2954                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2955                                         FALSE, FALSE);
2956                         if (!ret) {
2957                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2958                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2959                                 if (!fdir_rule_ptr) {
2960                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2961                                         goto out;
2962                                 }
2963                                 rte_memcpy(&fdir_rule_ptr->filter_info,
2964                                         &fdir_rule,
2965                                         sizeof(struct ixgbe_fdir_rule));
2966                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2967                                         fdir_rule_ptr, entries);
2968                                 flow->rule = fdir_rule_ptr;
2969                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2970
2971                                 return flow;
2972                         }
2973
2974                         if (ret) {
2975                                 /**
2976                                  * clean the mask_added flag if fail to
2977                                  * program
2978                                  **/
2979                                 if (first_mask)
2980                                         fdir_info->mask_added = FALSE;
2981                                 goto out;
2982                         }
2983                 }
2984
2985                 goto out;
2986         }
2987
2988         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2989         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2990                                         actions, &l2_tn_filter, error);
2991         if (!ret) {
2992                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2993                 if (!ret) {
2994                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2995                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2996                         if (!l2_tn_filter_ptr) {
2997                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2998                                 goto out;
2999                         }
3000                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
3001                                 &l2_tn_filter,
3002                                 sizeof(struct rte_eth_l2_tunnel_conf));
3003                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3004                                 l2_tn_filter_ptr, entries);
3005                         flow->rule = l2_tn_filter_ptr;
3006                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3007                         return flow;
3008                 }
3009         }
3010
3011 out:
3012         TAILQ_REMOVE(&ixgbe_flow_list,
3013                 ixgbe_flow_mem_ptr, entries);
3014         rte_flow_error_set(error, -ret,
3015                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3016                            "Failed to create flow.");
3017         rte_free(ixgbe_flow_mem_ptr);
3018         rte_free(flow);
3019         return NULL;
3020 }
3021
3022 /**
3023  * Check if the flow rule is supported by ixgbe.
3024  * It only checkes the format. Don't guarantee the rule can be programmed into
3025  * the HW. Because there can be no enough room for the rule.
3026  */
3027 static int
3028 ixgbe_flow_validate(struct rte_eth_dev *dev,
3029                 const struct rte_flow_attr *attr,
3030                 const struct rte_flow_item pattern[],
3031                 const struct rte_flow_action actions[],
3032                 struct rte_flow_error *error)
3033 {
3034         struct rte_eth_ntuple_filter ntuple_filter;
3035         struct rte_eth_ethertype_filter ethertype_filter;
3036         struct rte_eth_syn_filter syn_filter;
3037         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3038         struct ixgbe_fdir_rule fdir_rule;
3039         int ret;
3040
3041         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3042         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3043                                 actions, &ntuple_filter, error);
3044         if (!ret)
3045                 return 0;
3046
3047         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3048         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3049                                 actions, &ethertype_filter, error);
3050         if (!ret)
3051                 return 0;
3052
3053         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3054         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3055                                 actions, &syn_filter, error);
3056         if (!ret)
3057                 return 0;
3058
3059         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3060         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3061                                 actions, &fdir_rule, error);
3062         if (!ret)
3063                 return 0;
3064
3065         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3066         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3067                                 actions, &l2_tn_filter, error);
3068
3069         return ret;
3070 }
3071
3072 /* Destroy a flow rule on ixgbe. */
3073 static int
3074 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3075                 struct rte_flow *flow,
3076                 struct rte_flow_error *error)
3077 {
3078         int ret;
3079         struct rte_flow *pmd_flow = flow;
3080         enum rte_filter_type filter_type = pmd_flow->filter_type;
3081         struct rte_eth_ntuple_filter ntuple_filter;
3082         struct rte_eth_ethertype_filter ethertype_filter;
3083         struct rte_eth_syn_filter syn_filter;
3084         struct ixgbe_fdir_rule fdir_rule;
3085         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3086         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3087         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3088         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3089         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3090         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3091         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3092         struct ixgbe_hw_fdir_info *fdir_info =
3093                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3094
3095         switch (filter_type) {
3096         case RTE_ETH_FILTER_NTUPLE:
3097                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3098                                         pmd_flow->rule;
3099                 rte_memcpy(&ntuple_filter,
3100                         &ntuple_filter_ptr->filter_info,
3101                         sizeof(struct rte_eth_ntuple_filter));
3102                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3103                 if (!ret) {
3104                         TAILQ_REMOVE(&filter_ntuple_list,
3105                         ntuple_filter_ptr, entries);
3106                         rte_free(ntuple_filter_ptr);
3107                 }
3108                 break;
3109         case RTE_ETH_FILTER_ETHERTYPE:
3110                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3111                                         pmd_flow->rule;
3112                 rte_memcpy(&ethertype_filter,
3113                         &ethertype_filter_ptr->filter_info,
3114                         sizeof(struct rte_eth_ethertype_filter));
3115                 ret = ixgbe_add_del_ethertype_filter(dev,
3116                                 &ethertype_filter, FALSE);
3117                 if (!ret) {
3118                         TAILQ_REMOVE(&filter_ethertype_list,
3119                                 ethertype_filter_ptr, entries);
3120                         rte_free(ethertype_filter_ptr);
3121                 }
3122                 break;
3123         case RTE_ETH_FILTER_SYN:
3124                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3125                                 pmd_flow->rule;
3126                 rte_memcpy(&syn_filter,
3127                         &syn_filter_ptr->filter_info,
3128                         sizeof(struct rte_eth_syn_filter));
3129                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3130                 if (!ret) {
3131                         TAILQ_REMOVE(&filter_syn_list,
3132                                 syn_filter_ptr, entries);
3133                         rte_free(syn_filter_ptr);
3134                 }
3135                 break;
3136         case RTE_ETH_FILTER_FDIR:
3137                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3138                 rte_memcpy(&fdir_rule,
3139                         &fdir_rule_ptr->filter_info,
3140                         sizeof(struct ixgbe_fdir_rule));
3141                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3142                 if (!ret) {
3143                         TAILQ_REMOVE(&filter_fdir_list,
3144                                 fdir_rule_ptr, entries);
3145                         rte_free(fdir_rule_ptr);
3146                         if (TAILQ_EMPTY(&filter_fdir_list))
3147                                 fdir_info->mask_added = false;
3148                 }
3149                 break;
3150         case RTE_ETH_FILTER_L2_TUNNEL:
3151                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3152                                 pmd_flow->rule;
3153                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3154                         sizeof(struct rte_eth_l2_tunnel_conf));
3155                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3156                 if (!ret) {
3157                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3158                                 l2_tn_filter_ptr, entries);
3159                         rte_free(l2_tn_filter_ptr);
3160                 }
3161                 break;
3162         default:
3163                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3164                             filter_type);
3165                 ret = -EINVAL;
3166                 break;
3167         }
3168
3169         if (ret) {
3170                 rte_flow_error_set(error, EINVAL,
3171                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3172                                 NULL, "Failed to destroy flow");
3173                 return ret;
3174         }
3175
3176         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3177                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3178                         TAILQ_REMOVE(&ixgbe_flow_list,
3179                                 ixgbe_flow_mem_ptr, entries);
3180                         rte_free(ixgbe_flow_mem_ptr);
3181                 }
3182         }
3183         rte_free(flow);
3184
3185         return ret;
3186 }
3187
3188 /*  Destroy all flow rules associated with a port on ixgbe. */
3189 static int
3190 ixgbe_flow_flush(struct rte_eth_dev *dev,
3191                 struct rte_flow_error *error)
3192 {
3193         int ret = 0;
3194
3195         ixgbe_clear_all_ntuple_filter(dev);
3196         ixgbe_clear_all_ethertype_filter(dev);
3197         ixgbe_clear_syn_filter(dev);
3198
3199         ret = ixgbe_clear_all_fdir_filter(dev);
3200         if (ret < 0) {
3201                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3202                                         NULL, "Failed to flush rule");
3203                 return ret;
3204         }
3205
3206         ret = ixgbe_clear_all_l2_tn_filter(dev);
3207         if (ret < 0) {
3208                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3209                                         NULL, "Failed to flush rule");
3210                 return ret;
3211         }
3212
3213         ixgbe_filterlist_flush();
3214
3215         return 0;
3216 }
3217
3218 const struct rte_flow_ops ixgbe_flow_ops = {
3219         .validate = ixgbe_flow_validate,
3220         .create = ixgbe_flow_create,
3221         .destroy = ixgbe_flow_destroy,
3222         .flush = ixgbe_flow_flush,
3223 };