d67960889f47d4eaced52db99d2d203c1f73d88b
[deb_dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
61 #include <rte_dev.h>
62 #include <rte_hash_crc.h>
63 #include <rte_flow.h>
64 #include <rte_flow_driver.h>
65
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
76
77
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
81
82 /**
83  * Endless loop will never happen with below assumption
84  * 1. there is at least one no-void item(END)
85  * 2. cur is before END.
86  */
87 static inline
88 const struct rte_flow_item *next_no_void_pattern(
89                 const struct rte_flow_item pattern[],
90                 const struct rte_flow_item *cur)
91 {
92         const struct rte_flow_item *next =
93                 cur ? cur + 1 : &pattern[0];
94         while (1) {
95                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
96                         return next;
97                 next++;
98         }
99 }
100
101 static inline
102 const struct rte_flow_action *next_no_void_action(
103                 const struct rte_flow_action actions[],
104                 const struct rte_flow_action *cur)
105 {
106         const struct rte_flow_action *next =
107                 cur ? cur + 1 : &actions[0];
108         while (1) {
109                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
110                         return next;
111                 next++;
112         }
113 }
114
115 /**
116  * Please aware there's an asumption for all the parsers.
117  * rte_flow_item is using big endian, rte_flow_attr and
118  * rte_flow_action are using CPU order.
119  * Because the pattern is used to describe the packets,
120  * normally the packets should use network order.
121  */
122
123 /**
124  * Parse the rule to see if it is a n-tuple rule.
125  * And get the n-tuple filter info BTW.
126  * pattern:
127  * The first not void item can be ETH or IPV4.
128  * The second not void item must be IPV4 if the first one is ETH.
129  * The third not void item must be UDP or TCP.
130  * The next not void item must be END.
131  * action:
132  * The first not void action should be QUEUE.
133  * The next not void action should be END.
134  * pattern example:
135  * ITEM         Spec                    Mask
136  * ETH          NULL                    NULL
137  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
138  *              dst_addr 192.167.3.50   0xFFFFFFFF
139  *              next_proto_id   17      0xFF
140  * UDP/TCP/     src_port        80      0xFFFF
141  * SCTP         dst_port        80      0xFFFF
142  * END
143  * other members in mask and spec should set to 0x00.
144  * item->last should be NULL.
145  */
146 static int
147 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
148                          const struct rte_flow_item pattern[],
149                          const struct rte_flow_action actions[],
150                          struct rte_eth_ntuple_filter *filter,
151                          struct rte_flow_error *error)
152 {
153         const struct rte_flow_item *item;
154         const struct rte_flow_action *act;
155         const struct rte_flow_item_ipv4 *ipv4_spec;
156         const struct rte_flow_item_ipv4 *ipv4_mask;
157         const struct rte_flow_item_tcp *tcp_spec;
158         const struct rte_flow_item_tcp *tcp_mask;
159         const struct rte_flow_item_udp *udp_spec;
160         const struct rte_flow_item_udp *udp_mask;
161         const struct rte_flow_item_sctp *sctp_spec;
162         const struct rte_flow_item_sctp *sctp_mask;
163
164         if (!pattern) {
165                 rte_flow_error_set(error,
166                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
167                         NULL, "NULL pattern.");
168                 return -rte_errno;
169         }
170
171         if (!actions) {
172                 rte_flow_error_set(error, EINVAL,
173                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
174                                    NULL, "NULL action.");
175                 return -rte_errno;
176         }
177         if (!attr) {
178                 rte_flow_error_set(error, EINVAL,
179                                    RTE_FLOW_ERROR_TYPE_ATTR,
180                                    NULL, "NULL attribute.");
181                 return -rte_errno;
182         }
183
184         /* the first not void item can be MAC or IPv4 */
185         item = next_no_void_pattern(pattern, NULL);
186
187         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
188             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
189                 rte_flow_error_set(error, EINVAL,
190                         RTE_FLOW_ERROR_TYPE_ITEM,
191                         item, "Not supported by ntuple filter");
192                 return -rte_errno;
193         }
194         /* Skip Ethernet */
195         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
196                 /*Not supported last point for range*/
197                 if (item->last) {
198                         rte_flow_error_set(error,
199                           EINVAL,
200                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
201                           item, "Not supported last point for range");
202                         return -rte_errno;
203
204                 }
205                 /* if the first item is MAC, the content should be NULL */
206                 if (item->spec || item->mask) {
207                         rte_flow_error_set(error, EINVAL,
208                                 RTE_FLOW_ERROR_TYPE_ITEM,
209                                 item, "Not supported by ntuple filter");
210                         return -rte_errno;
211                 }
212                 /* check if the next not void item is IPv4 */
213                 item = next_no_void_pattern(pattern, item);
214                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
215                         rte_flow_error_set(error,
216                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
217                           item, "Not supported by ntuple filter");
218                           return -rte_errno;
219                 }
220         }
221
222         /* get the IPv4 info */
223         if (!item->spec || !item->mask) {
224                 rte_flow_error_set(error, EINVAL,
225                         RTE_FLOW_ERROR_TYPE_ITEM,
226                         item, "Invalid ntuple mask");
227                 return -rte_errno;
228         }
229         /*Not supported last point for range*/
230         if (item->last) {
231                 rte_flow_error_set(error, EINVAL,
232                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
233                         item, "Not supported last point for range");
234                 return -rte_errno;
235
236         }
237
238         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
239         /**
240          * Only support src & dst addresses, protocol,
241          * others should be masked.
242          */
243         if (ipv4_mask->hdr.version_ihl ||
244             ipv4_mask->hdr.type_of_service ||
245             ipv4_mask->hdr.total_length ||
246             ipv4_mask->hdr.packet_id ||
247             ipv4_mask->hdr.fragment_offset ||
248             ipv4_mask->hdr.time_to_live ||
249             ipv4_mask->hdr.hdr_checksum) {
250                         rte_flow_error_set(error,
251                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
252                         item, "Not supported by ntuple filter");
253                 return -rte_errno;
254         }
255
256         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
257         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
258         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
259
260         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
261         filter->dst_ip = ipv4_spec->hdr.dst_addr;
262         filter->src_ip = ipv4_spec->hdr.src_addr;
263         filter->proto  = ipv4_spec->hdr.next_proto_id;
264
265         /* check if the next not void item is TCP or UDP */
266         item = next_no_void_pattern(pattern, item);
267         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
268             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
269             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
270             item->type != RTE_FLOW_ITEM_TYPE_END) {
271                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
272                 rte_flow_error_set(error, EINVAL,
273                         RTE_FLOW_ERROR_TYPE_ITEM,
274                         item, "Not supported by ntuple filter");
275                 return -rte_errno;
276         }
277
278         /* get the TCP/UDP info */
279         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
280                 (!item->spec || !item->mask)) {
281                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
282                 rte_flow_error_set(error, EINVAL,
283                         RTE_FLOW_ERROR_TYPE_ITEM,
284                         item, "Invalid ntuple mask");
285                 return -rte_errno;
286         }
287
288         /*Not supported last point for range*/
289         if (item->last) {
290                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
291                 rte_flow_error_set(error, EINVAL,
292                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
293                         item, "Not supported last point for range");
294                 return -rte_errno;
295
296         }
297
298         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
299                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
300
301                 /**
302                  * Only support src & dst ports, tcp flags,
303                  * others should be masked.
304                  */
305                 if (tcp_mask->hdr.sent_seq ||
306                     tcp_mask->hdr.recv_ack ||
307                     tcp_mask->hdr.data_off ||
308                     tcp_mask->hdr.rx_win ||
309                     tcp_mask->hdr.cksum ||
310                     tcp_mask->hdr.tcp_urp) {
311                         memset(filter, 0,
312                                 sizeof(struct rte_eth_ntuple_filter));
313                         rte_flow_error_set(error, EINVAL,
314                                 RTE_FLOW_ERROR_TYPE_ITEM,
315                                 item, "Not supported by ntuple filter");
316                         return -rte_errno;
317                 }
318
319                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
320                 filter->src_port_mask  = tcp_mask->hdr.src_port;
321                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
322                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
323                 } else if (!tcp_mask->hdr.tcp_flags) {
324                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
325                 } else {
326                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
327                         rte_flow_error_set(error, EINVAL,
328                                 RTE_FLOW_ERROR_TYPE_ITEM,
329                                 item, "Not supported by ntuple filter");
330                         return -rte_errno;
331                 }
332
333                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
334                 filter->dst_port  = tcp_spec->hdr.dst_port;
335                 filter->src_port  = tcp_spec->hdr.src_port;
336                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
337         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
338                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
339
340                 /**
341                  * Only support src & dst ports,
342                  * others should be masked.
343                  */
344                 if (udp_mask->hdr.dgram_len ||
345                     udp_mask->hdr.dgram_cksum) {
346                         memset(filter, 0,
347                                 sizeof(struct rte_eth_ntuple_filter));
348                         rte_flow_error_set(error, EINVAL,
349                                 RTE_FLOW_ERROR_TYPE_ITEM,
350                                 item, "Not supported by ntuple filter");
351                         return -rte_errno;
352                 }
353
354                 filter->dst_port_mask = udp_mask->hdr.dst_port;
355                 filter->src_port_mask = udp_mask->hdr.src_port;
356
357                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
358                 filter->dst_port = udp_spec->hdr.dst_port;
359                 filter->src_port = udp_spec->hdr.src_port;
360         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
361                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
362
363                 /**
364                  * Only support src & dst ports,
365                  * others should be masked.
366                  */
367                 if (sctp_mask->hdr.tag ||
368                     sctp_mask->hdr.cksum) {
369                         memset(filter, 0,
370                                 sizeof(struct rte_eth_ntuple_filter));
371                         rte_flow_error_set(error, EINVAL,
372                                 RTE_FLOW_ERROR_TYPE_ITEM,
373                                 item, "Not supported by ntuple filter");
374                         return -rte_errno;
375                 }
376
377                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
378                 filter->src_port_mask = sctp_mask->hdr.src_port;
379
380                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
381                 filter->dst_port = sctp_spec->hdr.dst_port;
382                 filter->src_port = sctp_spec->hdr.src_port;
383         } else {
384                 goto action;
385         }
386
387         /* check if the next not void item is END */
388         item = next_no_void_pattern(pattern, item);
389         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
390                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
391                 rte_flow_error_set(error, EINVAL,
392                         RTE_FLOW_ERROR_TYPE_ITEM,
393                         item, "Not supported by ntuple filter");
394                 return -rte_errno;
395         }
396
397 action:
398
399         /**
400          * n-tuple only supports forwarding,
401          * check if the first not void action is QUEUE.
402          */
403         act = next_no_void_action(actions, NULL);
404         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
405                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
406                 rte_flow_error_set(error, EINVAL,
407                         RTE_FLOW_ERROR_TYPE_ACTION,
408                         item, "Not supported action.");
409                 return -rte_errno;
410         }
411         filter->queue =
412                 ((const struct rte_flow_action_queue *)act->conf)->index;
413
414         /* check if the next not void item is END */
415         act = next_no_void_action(actions, act);
416         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
417                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
418                 rte_flow_error_set(error, EINVAL,
419                         RTE_FLOW_ERROR_TYPE_ACTION,
420                         act, "Not supported action.");
421                 return -rte_errno;
422         }
423
424         /* parse attr */
425         /* must be input direction */
426         if (!attr->ingress) {
427                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
428                 rte_flow_error_set(error, EINVAL,
429                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
430                                    attr, "Only support ingress.");
431                 return -rte_errno;
432         }
433
434         /* not supported */
435         if (attr->egress) {
436                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
437                 rte_flow_error_set(error, EINVAL,
438                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
439                                    attr, "Not support egress.");
440                 return -rte_errno;
441         }
442
443         if (attr->priority > 0xFFFF) {
444                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
445                 rte_flow_error_set(error, EINVAL,
446                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
447                                    attr, "Error priority.");
448                 return -rte_errno;
449         }
450         filter->priority = (uint16_t)attr->priority;
451         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
452             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
453             filter->priority = 1;
454
455         return 0;
456 }
457
458 /* a specific function for ixgbe because the flags is specific */
459 static int
460 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
461                           const struct rte_flow_attr *attr,
462                           const struct rte_flow_item pattern[],
463                           const struct rte_flow_action actions[],
464                           struct rte_eth_ntuple_filter *filter,
465                           struct rte_flow_error *error)
466 {
467         int ret;
468         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
469
470         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
471
472         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
473
474         if (ret)
475                 return ret;
476
477         /* Ixgbe doesn't support tcp flags. */
478         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
479                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
480                 rte_flow_error_set(error, EINVAL,
481                                    RTE_FLOW_ERROR_TYPE_ITEM,
482                                    NULL, "Not supported by ntuple filter");
483                 return -rte_errno;
484         }
485
486         /* Ixgbe doesn't support many priorities. */
487         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
488             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
489                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490                 rte_flow_error_set(error, EINVAL,
491                         RTE_FLOW_ERROR_TYPE_ITEM,
492                         NULL, "Priority not supported by ntuple filter");
493                 return -rte_errno;
494         }
495
496         if (filter->queue >= dev->data->nb_rx_queues)
497                 return -rte_errno;
498
499         /* fixed value for ixgbe */
500         filter->flags = RTE_5TUPLE_FLAGS;
501         return 0;
502 }
503
504 /**
505  * Parse the rule to see if it is a ethertype rule.
506  * And get the ethertype filter info BTW.
507  * pattern:
508  * The first not void item can be ETH.
509  * The next not void item must be END.
510  * action:
511  * The first not void action should be QUEUE.
512  * The next not void action should be END.
513  * pattern example:
514  * ITEM         Spec                    Mask
515  * ETH          type    0x0807          0xFFFF
516  * END
517  * other members in mask and spec should set to 0x00.
518  * item->last should be NULL.
519  */
520 static int
521 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
522                             const struct rte_flow_item *pattern,
523                             const struct rte_flow_action *actions,
524                             struct rte_eth_ethertype_filter *filter,
525                             struct rte_flow_error *error)
526 {
527         const struct rte_flow_item *item;
528         const struct rte_flow_action *act;
529         const struct rte_flow_item_eth *eth_spec;
530         const struct rte_flow_item_eth *eth_mask;
531         const struct rte_flow_action_queue *act_q;
532
533         if (!pattern) {
534                 rte_flow_error_set(error, EINVAL,
535                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
536                                 NULL, "NULL pattern.");
537                 return -rte_errno;
538         }
539
540         if (!actions) {
541                 rte_flow_error_set(error, EINVAL,
542                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
543                                 NULL, "NULL action.");
544                 return -rte_errno;
545         }
546
547         if (!attr) {
548                 rte_flow_error_set(error, EINVAL,
549                                    RTE_FLOW_ERROR_TYPE_ATTR,
550                                    NULL, "NULL attribute.");
551                 return -rte_errno;
552         }
553
554         item = next_no_void_pattern(pattern, NULL);
555         /* The first non-void item should be MAC. */
556         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
557                 rte_flow_error_set(error, EINVAL,
558                         RTE_FLOW_ERROR_TYPE_ITEM,
559                         item, "Not supported by ethertype filter");
560                 return -rte_errno;
561         }
562
563         /*Not supported last point for range*/
564         if (item->last) {
565                 rte_flow_error_set(error, EINVAL,
566                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
567                         item, "Not supported last point for range");
568                 return -rte_errno;
569         }
570
571         /* Get the MAC info. */
572         if (!item->spec || !item->mask) {
573                 rte_flow_error_set(error, EINVAL,
574                                 RTE_FLOW_ERROR_TYPE_ITEM,
575                                 item, "Not supported by ethertype filter");
576                 return -rte_errno;
577         }
578
579         eth_spec = (const struct rte_flow_item_eth *)item->spec;
580         eth_mask = (const struct rte_flow_item_eth *)item->mask;
581
582         /* Mask bits of source MAC address must be full of 0.
583          * Mask bits of destination MAC address must be full
584          * of 1 or full of 0.
585          */
586         if (!is_zero_ether_addr(&eth_mask->src) ||
587             (!is_zero_ether_addr(&eth_mask->dst) &&
588              !is_broadcast_ether_addr(&eth_mask->dst))) {
589                 rte_flow_error_set(error, EINVAL,
590                                 RTE_FLOW_ERROR_TYPE_ITEM,
591                                 item, "Invalid ether address mask");
592                 return -rte_errno;
593         }
594
595         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
596                 rte_flow_error_set(error, EINVAL,
597                                 RTE_FLOW_ERROR_TYPE_ITEM,
598                                 item, "Invalid ethertype mask");
599                 return -rte_errno;
600         }
601
602         /* If mask bits of destination MAC address
603          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
604          */
605         if (is_broadcast_ether_addr(&eth_mask->dst)) {
606                 filter->mac_addr = eth_spec->dst;
607                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
608         } else {
609                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
610         }
611         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
612
613         /* Check if the next non-void item is END. */
614         item = next_no_void_pattern(pattern, item);
615         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
616                 rte_flow_error_set(error, EINVAL,
617                                 RTE_FLOW_ERROR_TYPE_ITEM,
618                                 item, "Not supported by ethertype filter.");
619                 return -rte_errno;
620         }
621
622         /* Parse action */
623
624         act = next_no_void_action(actions, NULL);
625         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
626             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
627                 rte_flow_error_set(error, EINVAL,
628                                 RTE_FLOW_ERROR_TYPE_ACTION,
629                                 act, "Not supported action.");
630                 return -rte_errno;
631         }
632
633         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
634                 act_q = (const struct rte_flow_action_queue *)act->conf;
635                 filter->queue = act_q->index;
636         } else {
637                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
638         }
639
640         /* Check if the next non-void item is END */
641         act = next_no_void_action(actions, act);
642         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
643                 rte_flow_error_set(error, EINVAL,
644                                 RTE_FLOW_ERROR_TYPE_ACTION,
645                                 act, "Not supported action.");
646                 return -rte_errno;
647         }
648
649         /* Parse attr */
650         /* Must be input direction */
651         if (!attr->ingress) {
652                 rte_flow_error_set(error, EINVAL,
653                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
654                                 attr, "Only support ingress.");
655                 return -rte_errno;
656         }
657
658         /* Not supported */
659         if (attr->egress) {
660                 rte_flow_error_set(error, EINVAL,
661                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
662                                 attr, "Not support egress.");
663                 return -rte_errno;
664         }
665
666         /* Not supported */
667         if (attr->priority) {
668                 rte_flow_error_set(error, EINVAL,
669                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
670                                 attr, "Not support priority.");
671                 return -rte_errno;
672         }
673
674         /* Not supported */
675         if (attr->group) {
676                 rte_flow_error_set(error, EINVAL,
677                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
678                                 attr, "Not support group.");
679                 return -rte_errno;
680         }
681
682         return 0;
683 }
684
685 static int
686 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
687                                  const struct rte_flow_attr *attr,
688                              const struct rte_flow_item pattern[],
689                              const struct rte_flow_action actions[],
690                              struct rte_eth_ethertype_filter *filter,
691                              struct rte_flow_error *error)
692 {
693         int ret;
694         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
695
696         MAC_TYPE_FILTER_SUP(hw->mac.type);
697
698         ret = cons_parse_ethertype_filter(attr, pattern,
699                                         actions, filter, error);
700
701         if (ret)
702                 return ret;
703
704         /* Ixgbe doesn't support MAC address. */
705         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
706                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
707                 rte_flow_error_set(error, EINVAL,
708                         RTE_FLOW_ERROR_TYPE_ITEM,
709                         NULL, "Not supported by ethertype filter");
710                 return -rte_errno;
711         }
712
713         if (filter->queue >= dev->data->nb_rx_queues) {
714                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
715                 rte_flow_error_set(error, EINVAL,
716                         RTE_FLOW_ERROR_TYPE_ITEM,
717                         NULL, "queue index much too big");
718                 return -rte_errno;
719         }
720
721         if (filter->ether_type == ETHER_TYPE_IPv4 ||
722                 filter->ether_type == ETHER_TYPE_IPv6) {
723                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
724                 rte_flow_error_set(error, EINVAL,
725                         RTE_FLOW_ERROR_TYPE_ITEM,
726                         NULL, "IPv4/IPv6 not supported by ethertype filter");
727                 return -rte_errno;
728         }
729
730         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
731                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
732                 rte_flow_error_set(error, EINVAL,
733                         RTE_FLOW_ERROR_TYPE_ITEM,
734                         NULL, "mac compare is unsupported");
735                 return -rte_errno;
736         }
737
738         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
739                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
740                 rte_flow_error_set(error, EINVAL,
741                         RTE_FLOW_ERROR_TYPE_ITEM,
742                         NULL, "drop option is unsupported");
743                 return -rte_errno;
744         }
745
746         return 0;
747 }
748
749 /**
750  * Parse the rule to see if it is a TCP SYN rule.
751  * And get the TCP SYN filter info BTW.
752  * pattern:
753  * The first not void item must be ETH.
754  * The second not void item must be IPV4 or IPV6.
755  * The third not void item must be TCP.
756  * The next not void item must be END.
757  * action:
758  * The first not void action should be QUEUE.
759  * The next not void action should be END.
760  * pattern example:
761  * ITEM         Spec                    Mask
762  * ETH          NULL                    NULL
763  * IPV4/IPV6    NULL                    NULL
764  * TCP          tcp_flags       0x02    0xFF
765  * END
766  * other members in mask and spec should set to 0x00.
767  * item->last should be NULL.
768  */
769 static int
770 cons_parse_syn_filter(const struct rte_flow_attr *attr,
771                                 const struct rte_flow_item pattern[],
772                                 const struct rte_flow_action actions[],
773                                 struct rte_eth_syn_filter *filter,
774                                 struct rte_flow_error *error)
775 {
776         const struct rte_flow_item *item;
777         const struct rte_flow_action *act;
778         const struct rte_flow_item_tcp *tcp_spec;
779         const struct rte_flow_item_tcp *tcp_mask;
780         const struct rte_flow_action_queue *act_q;
781
782         if (!pattern) {
783                 rte_flow_error_set(error, EINVAL,
784                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
785                                 NULL, "NULL pattern.");
786                 return -rte_errno;
787         }
788
789         if (!actions) {
790                 rte_flow_error_set(error, EINVAL,
791                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
792                                 NULL, "NULL action.");
793                 return -rte_errno;
794         }
795
796         if (!attr) {
797                 rte_flow_error_set(error, EINVAL,
798                                    RTE_FLOW_ERROR_TYPE_ATTR,
799                                    NULL, "NULL attribute.");
800                 return -rte_errno;
801         }
802
803
804         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
805         item = next_no_void_pattern(pattern, NULL);
806         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
807             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
808             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
809             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
810                 rte_flow_error_set(error, EINVAL,
811                                 RTE_FLOW_ERROR_TYPE_ITEM,
812                                 item, "Not supported by syn filter");
813                 return -rte_errno;
814         }
815                 /*Not supported last point for range*/
816         if (item->last) {
817                 rte_flow_error_set(error, EINVAL,
818                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
819                         item, "Not supported last point for range");
820                 return -rte_errno;
821         }
822
823         /* Skip Ethernet */
824         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
825                 /* if the item is MAC, the content should be NULL */
826                 if (item->spec || item->mask) {
827                         rte_flow_error_set(error, EINVAL,
828                                 RTE_FLOW_ERROR_TYPE_ITEM,
829                                 item, "Invalid SYN address mask");
830                         return -rte_errno;
831                 }
832
833                 /* check if the next not void item is IPv4 or IPv6 */
834                 item = next_no_void_pattern(pattern, item);
835                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
836                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
837                         rte_flow_error_set(error, EINVAL,
838                                 RTE_FLOW_ERROR_TYPE_ITEM,
839                                 item, "Not supported by syn filter");
840                         return -rte_errno;
841                 }
842         }
843
844         /* Skip IP */
845         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
846             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
847                 /* if the item is IP, the content should be NULL */
848                 if (item->spec || item->mask) {
849                         rte_flow_error_set(error, EINVAL,
850                                 RTE_FLOW_ERROR_TYPE_ITEM,
851                                 item, "Invalid SYN mask");
852                         return -rte_errno;
853                 }
854
855                 /* check if the next not void item is TCP */
856                 item = next_no_void_pattern(pattern, item);
857                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
858                         rte_flow_error_set(error, EINVAL,
859                                 RTE_FLOW_ERROR_TYPE_ITEM,
860                                 item, "Not supported by syn filter");
861                         return -rte_errno;
862                 }
863         }
864
865         /* Get the TCP info. Only support SYN. */
866         if (!item->spec || !item->mask) {
867                 rte_flow_error_set(error, EINVAL,
868                                 RTE_FLOW_ERROR_TYPE_ITEM,
869                                 item, "Invalid SYN mask");
870                 return -rte_errno;
871         }
872         /*Not supported last point for range*/
873         if (item->last) {
874                 rte_flow_error_set(error, EINVAL,
875                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
876                         item, "Not supported last point for range");
877                 return -rte_errno;
878         }
879
880         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
881         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
882         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
883             tcp_mask->hdr.src_port ||
884             tcp_mask->hdr.dst_port ||
885             tcp_mask->hdr.sent_seq ||
886             tcp_mask->hdr.recv_ack ||
887             tcp_mask->hdr.data_off ||
888             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
889             tcp_mask->hdr.rx_win ||
890             tcp_mask->hdr.cksum ||
891             tcp_mask->hdr.tcp_urp) {
892                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
893                 rte_flow_error_set(error, EINVAL,
894                                 RTE_FLOW_ERROR_TYPE_ITEM,
895                                 item, "Not supported by syn filter");
896                 return -rte_errno;
897         }
898
899         /* check if the next not void item is END */
900         item = next_no_void_pattern(pattern, item);
901         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
902                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
903                 rte_flow_error_set(error, EINVAL,
904                                 RTE_FLOW_ERROR_TYPE_ITEM,
905                                 item, "Not supported by syn filter");
906                 return -rte_errno;
907         }
908
909         /* check if the first not void action is QUEUE. */
910         act = next_no_void_action(actions, NULL);
911         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
912                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
913                 rte_flow_error_set(error, EINVAL,
914                                 RTE_FLOW_ERROR_TYPE_ACTION,
915                                 act, "Not supported action.");
916                 return -rte_errno;
917         }
918
919         act_q = (const struct rte_flow_action_queue *)act->conf;
920         filter->queue = act_q->index;
921         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
922                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
923                 rte_flow_error_set(error, EINVAL,
924                                 RTE_FLOW_ERROR_TYPE_ACTION,
925                                 act, "Not supported action.");
926                 return -rte_errno;
927         }
928
929         /* check if the next not void item is END */
930         act = next_no_void_action(actions, act);
931         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
932                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
933                 rte_flow_error_set(error, EINVAL,
934                                 RTE_FLOW_ERROR_TYPE_ACTION,
935                                 act, "Not supported action.");
936                 return -rte_errno;
937         }
938
939         /* parse attr */
940         /* must be input direction */
941         if (!attr->ingress) {
942                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
943                 rte_flow_error_set(error, EINVAL,
944                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
945                         attr, "Only support ingress.");
946                 return -rte_errno;
947         }
948
949         /* not supported */
950         if (attr->egress) {
951                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
952                 rte_flow_error_set(error, EINVAL,
953                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
954                         attr, "Not support egress.");
955                 return -rte_errno;
956         }
957
958         /* Support 2 priorities, the lowest or highest. */
959         if (!attr->priority) {
960                 filter->hig_pri = 0;
961         } else if (attr->priority == (uint32_t)~0U) {
962                 filter->hig_pri = 1;
963         } else {
964                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
965                 rte_flow_error_set(error, EINVAL,
966                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
967                         attr, "Not support priority.");
968                 return -rte_errno;
969         }
970
971         return 0;
972 }
973
974 static int
975 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
976                                  const struct rte_flow_attr *attr,
977                              const struct rte_flow_item pattern[],
978                              const struct rte_flow_action actions[],
979                              struct rte_eth_syn_filter *filter,
980                              struct rte_flow_error *error)
981 {
982         int ret;
983         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
984
985         MAC_TYPE_FILTER_SUP(hw->mac.type);
986
987         ret = cons_parse_syn_filter(attr, pattern,
988                                         actions, filter, error);
989
990         if (filter->queue >= dev->data->nb_rx_queues)
991                 return -rte_errno;
992
993         if (ret)
994                 return ret;
995
996         return 0;
997 }
998
999 /**
1000  * Parse the rule to see if it is a L2 tunnel rule.
1001  * And get the L2 tunnel filter info BTW.
1002  * Only support E-tag now.
1003  * pattern:
1004  * The first not void item can be E_TAG.
1005  * The next not void item must be END.
1006  * action:
1007  * The first not void action should be QUEUE.
1008  * The next not void action should be END.
1009  * pattern example:
1010  * ITEM         Spec                    Mask
1011  * E_TAG        grp             0x1     0x3
1012                 e_cid_base      0x309   0xFFF
1013  * END
1014  * other members in mask and spec should set to 0x00.
1015  * item->last should be NULL.
1016  */
1017 static int
1018 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1019                         const struct rte_flow_item pattern[],
1020                         const struct rte_flow_action actions[],
1021                         struct rte_eth_l2_tunnel_conf *filter,
1022                         struct rte_flow_error *error)
1023 {
1024         const struct rte_flow_item *item;
1025         const struct rte_flow_item_e_tag *e_tag_spec;
1026         const struct rte_flow_item_e_tag *e_tag_mask;
1027         const struct rte_flow_action *act;
1028         const struct rte_flow_action_queue *act_q;
1029
1030         if (!pattern) {
1031                 rte_flow_error_set(error, EINVAL,
1032                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1033                         NULL, "NULL pattern.");
1034                 return -rte_errno;
1035         }
1036
1037         if (!actions) {
1038                 rte_flow_error_set(error, EINVAL,
1039                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1040                                    NULL, "NULL action.");
1041                 return -rte_errno;
1042         }
1043
1044         if (!attr) {
1045                 rte_flow_error_set(error, EINVAL,
1046                                    RTE_FLOW_ERROR_TYPE_ATTR,
1047                                    NULL, "NULL attribute.");
1048                 return -rte_errno;
1049         }
1050
1051         /* The first not void item should be e-tag. */
1052         item = next_no_void_pattern(pattern, NULL);
1053         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1054                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1055                 rte_flow_error_set(error, EINVAL,
1056                         RTE_FLOW_ERROR_TYPE_ITEM,
1057                         item, "Not supported by L2 tunnel filter");
1058                 return -rte_errno;
1059         }
1060
1061         if (!item->spec || !item->mask) {
1062                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1063                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1064                         item, "Not supported by L2 tunnel filter");
1065                 return -rte_errno;
1066         }
1067
1068         /*Not supported last point for range*/
1069         if (item->last) {
1070                 rte_flow_error_set(error, EINVAL,
1071                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1072                         item, "Not supported last point for range");
1073                 return -rte_errno;
1074         }
1075
1076         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1077         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1078
1079         /* Only care about GRP and E cid base. */
1080         if (e_tag_mask->epcp_edei_in_ecid_b ||
1081             e_tag_mask->in_ecid_e ||
1082             e_tag_mask->ecid_e ||
1083             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1084                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1085                 rte_flow_error_set(error, EINVAL,
1086                         RTE_FLOW_ERROR_TYPE_ITEM,
1087                         item, "Not supported by L2 tunnel filter");
1088                 return -rte_errno;
1089         }
1090
1091         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1092         /**
1093          * grp and e_cid_base are bit fields and only use 14 bits.
1094          * e-tag id is taken as little endian by HW.
1095          */
1096         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1097
1098         /* check if the next not void item is END */
1099         item = next_no_void_pattern(pattern, item);
1100         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1101                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1102                 rte_flow_error_set(error, EINVAL,
1103                         RTE_FLOW_ERROR_TYPE_ITEM,
1104                         item, "Not supported by L2 tunnel filter");
1105                 return -rte_errno;
1106         }
1107
1108         /* parse attr */
1109         /* must be input direction */
1110         if (!attr->ingress) {
1111                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1112                 rte_flow_error_set(error, EINVAL,
1113                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1114                         attr, "Only support ingress.");
1115                 return -rte_errno;
1116         }
1117
1118         /* not supported */
1119         if (attr->egress) {
1120                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1121                 rte_flow_error_set(error, EINVAL,
1122                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1123                         attr, "Not support egress.");
1124                 return -rte_errno;
1125         }
1126
1127         /* not supported */
1128         if (attr->priority) {
1129                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1130                 rte_flow_error_set(error, EINVAL,
1131                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1132                         attr, "Not support priority.");
1133                 return -rte_errno;
1134         }
1135
1136         /* check if the first not void action is QUEUE. */
1137         act = next_no_void_action(actions, NULL);
1138         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1139                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1140                 rte_flow_error_set(error, EINVAL,
1141                         RTE_FLOW_ERROR_TYPE_ACTION,
1142                         act, "Not supported action.");
1143                 return -rte_errno;
1144         }
1145
1146         act_q = (const struct rte_flow_action_queue *)act->conf;
1147         filter->pool = act_q->index;
1148
1149         /* check if the next not void item is END */
1150         act = next_no_void_action(actions, act);
1151         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1152                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1153                 rte_flow_error_set(error, EINVAL,
1154                         RTE_FLOW_ERROR_TYPE_ACTION,
1155                         act, "Not supported action.");
1156                 return -rte_errno;
1157         }
1158
1159         return 0;
1160 }
1161
1162 static int
1163 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1164                         const struct rte_flow_attr *attr,
1165                         const struct rte_flow_item pattern[],
1166                         const struct rte_flow_action actions[],
1167                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1168                         struct rte_flow_error *error)
1169 {
1170         int ret = 0;
1171         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1172
1173         ret = cons_parse_l2_tn_filter(attr, pattern,
1174                                 actions, l2_tn_filter, error);
1175
1176         if (hw->mac.type != ixgbe_mac_X550 &&
1177                 hw->mac.type != ixgbe_mac_X550EM_x &&
1178                 hw->mac.type != ixgbe_mac_X550EM_a) {
1179                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1180                 rte_flow_error_set(error, EINVAL,
1181                         RTE_FLOW_ERROR_TYPE_ITEM,
1182                         NULL, "Not supported by L2 tunnel filter");
1183                 return -rte_errno;
1184         }
1185
1186         if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
1187                 return -rte_errno;
1188
1189         return ret;
1190 }
1191
1192 /* Parse to get the attr and action info of flow director rule. */
1193 static int
1194 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1195                           const struct rte_flow_action actions[],
1196                           struct ixgbe_fdir_rule *rule,
1197                           struct rte_flow_error *error)
1198 {
1199         const struct rte_flow_action *act;
1200         const struct rte_flow_action_queue *act_q;
1201         const struct rte_flow_action_mark *mark;
1202
1203         /* parse attr */
1204         /* must be input direction */
1205         if (!attr->ingress) {
1206                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1207                 rte_flow_error_set(error, EINVAL,
1208                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1209                         attr, "Only support ingress.");
1210                 return -rte_errno;
1211         }
1212
1213         /* not supported */
1214         if (attr->egress) {
1215                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1216                 rte_flow_error_set(error, EINVAL,
1217                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1218                         attr, "Not support egress.");
1219                 return -rte_errno;
1220         }
1221
1222         /* not supported */
1223         if (attr->priority) {
1224                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1225                 rte_flow_error_set(error, EINVAL,
1226                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1227                         attr, "Not support priority.");
1228                 return -rte_errno;
1229         }
1230
1231         /* check if the first not void action is QUEUE or DROP. */
1232         act = next_no_void_action(actions, NULL);
1233         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1234             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1235                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1236                 rte_flow_error_set(error, EINVAL,
1237                         RTE_FLOW_ERROR_TYPE_ACTION,
1238                         act, "Not supported action.");
1239                 return -rte_errno;
1240         }
1241
1242         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1243                 act_q = (const struct rte_flow_action_queue *)act->conf;
1244                 rule->queue = act_q->index;
1245         } else { /* drop */
1246                 /* signature mode does not support drop action. */
1247                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1248                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1249                         rte_flow_error_set(error, EINVAL,
1250                                 RTE_FLOW_ERROR_TYPE_ACTION,
1251                                 act, "Not supported action.");
1252                         return -rte_errno;
1253                 }
1254                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1255         }
1256
1257         /* check if the next not void item is MARK */
1258         act = next_no_void_action(actions, act);
1259         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1260                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1261                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1262                 rte_flow_error_set(error, EINVAL,
1263                         RTE_FLOW_ERROR_TYPE_ACTION,
1264                         act, "Not supported action.");
1265                 return -rte_errno;
1266         }
1267
1268         rule->soft_id = 0;
1269
1270         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1271                 mark = (const struct rte_flow_action_mark *)act->conf;
1272                 rule->soft_id = mark->id;
1273                 act = next_no_void_action(actions, act);
1274         }
1275
1276         /* check if the next not void item is END */
1277         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1278                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1279                 rte_flow_error_set(error, EINVAL,
1280                         RTE_FLOW_ERROR_TYPE_ACTION,
1281                         act, "Not supported action.");
1282                 return -rte_errno;
1283         }
1284
1285         return 0;
1286 }
1287
1288 /* search next no void pattern and skip fuzzy */
1289 static inline
1290 const struct rte_flow_item *next_no_fuzzy_pattern(
1291                 const struct rte_flow_item pattern[],
1292                 const struct rte_flow_item *cur)
1293 {
1294         const struct rte_flow_item *next =
1295                 next_no_void_pattern(pattern, cur);
1296         while (1) {
1297                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1298                         return next;
1299                 next = next_no_void_pattern(pattern, next);
1300         }
1301 }
1302
1303 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1304 {
1305         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1306         const struct rte_flow_item *item;
1307         uint32_t sh, lh, mh;
1308         int i = 0;
1309
1310         while (1) {
1311                 item = pattern + i;
1312                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1313                         break;
1314
1315                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1316                         spec =
1317                         (const struct rte_flow_item_fuzzy *)item->spec;
1318                         last =
1319                         (const struct rte_flow_item_fuzzy *)item->last;
1320                         mask =
1321                         (const struct rte_flow_item_fuzzy *)item->mask;
1322
1323                         if (!spec || !mask)
1324                                 return 0;
1325
1326                         sh = spec->thresh;
1327
1328                         if (!last)
1329                                 lh = sh;
1330                         else
1331                                 lh = last->thresh;
1332
1333                         mh = mask->thresh;
1334                         sh = sh & mh;
1335                         lh = lh & mh;
1336
1337                         if (!sh || sh > lh)
1338                                 return 0;
1339
1340                         return 1;
1341                 }
1342
1343                 i++;
1344         }
1345
1346         return 0;
1347 }
1348
1349 /**
1350  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1351  * And get the flow director filter info BTW.
1352  * UDP/TCP/SCTP PATTERN:
1353  * The first not void item can be ETH or IPV4 or IPV6
1354  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1355  * The next not void item could be UDP or TCP or SCTP (optional)
1356  * The next not void item could be RAW (for flexbyte, optional)
1357  * The next not void item must be END.
1358  * A Fuzzy Match pattern can appear at any place before END.
1359  * Fuzzy Match is optional for IPV4 but is required for IPV6
1360  * MAC VLAN PATTERN:
1361  * The first not void item must be ETH.
1362  * The second not void item must be MAC VLAN.
1363  * The next not void item must be END.
1364  * ACTION:
1365  * The first not void action should be QUEUE or DROP.
1366  * The second not void optional action should be MARK,
1367  * mark_id is a uint32_t number.
1368  * The next not void action should be END.
1369  * UDP/TCP/SCTP pattern example:
1370  * ITEM         Spec                    Mask
1371  * ETH          NULL                    NULL
1372  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1373  *              dst_addr 192.167.3.50   0xFFFFFFFF
1374  * UDP/TCP/SCTP src_port        80      0xFFFF
1375  *              dst_port        80      0xFFFF
1376  * FLEX relative        0       0x1
1377  *              search          0       0x1
1378  *              reserved        0       0
1379  *              offset          12      0xFFFFFFFF
1380  *              limit           0       0xFFFF
1381  *              length          2       0xFFFF
1382  *              pattern[0]      0x86    0xFF
1383  *              pattern[1]      0xDD    0xFF
1384  * END
1385  * MAC VLAN pattern example:
1386  * ITEM         Spec                    Mask
1387  * ETH          dst_addr
1388                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1389                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1390  * MAC VLAN     tci     0x2016          0xEFFF
1391  * END
1392  * Other members in mask and spec should set to 0x00.
1393  * Item->last should be NULL.
1394  */
1395 static int
1396 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1397                                const struct rte_flow_attr *attr,
1398                                const struct rte_flow_item pattern[],
1399                                const struct rte_flow_action actions[],
1400                                struct ixgbe_fdir_rule *rule,
1401                                struct rte_flow_error *error)
1402 {
1403         const struct rte_flow_item *item;
1404         const struct rte_flow_item_eth *eth_spec;
1405         const struct rte_flow_item_eth *eth_mask;
1406         const struct rte_flow_item_ipv4 *ipv4_spec;
1407         const struct rte_flow_item_ipv4 *ipv4_mask;
1408         const struct rte_flow_item_ipv6 *ipv6_spec;
1409         const struct rte_flow_item_ipv6 *ipv6_mask;
1410         const struct rte_flow_item_tcp *tcp_spec;
1411         const struct rte_flow_item_tcp *tcp_mask;
1412         const struct rte_flow_item_udp *udp_spec;
1413         const struct rte_flow_item_udp *udp_mask;
1414         const struct rte_flow_item_sctp *sctp_spec;
1415         const struct rte_flow_item_sctp *sctp_mask;
1416         const struct rte_flow_item_vlan *vlan_spec;
1417         const struct rte_flow_item_vlan *vlan_mask;
1418         const struct rte_flow_item_raw *raw_mask;
1419         const struct rte_flow_item_raw *raw_spec;
1420         uint8_t j;
1421
1422         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1423
1424         if (!pattern) {
1425                 rte_flow_error_set(error, EINVAL,
1426                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1427                         NULL, "NULL pattern.");
1428                 return -rte_errno;
1429         }
1430
1431         if (!actions) {
1432                 rte_flow_error_set(error, EINVAL,
1433                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1434                                    NULL, "NULL action.");
1435                 return -rte_errno;
1436         }
1437
1438         if (!attr) {
1439                 rte_flow_error_set(error, EINVAL,
1440                                    RTE_FLOW_ERROR_TYPE_ATTR,
1441                                    NULL, "NULL attribute.");
1442                 return -rte_errno;
1443         }
1444
1445         /**
1446          * Some fields may not be provided. Set spec to 0 and mask to default
1447          * value. So, we need not do anything for the not provided fields later.
1448          */
1449         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1450         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1451         rule->mask.vlan_tci_mask = 0;
1452         rule->mask.flex_bytes_mask = 0;
1453
1454         /**
1455          * The first not void item should be
1456          * MAC or IPv4 or TCP or UDP or SCTP.
1457          */
1458         item = next_no_fuzzy_pattern(pattern, NULL);
1459         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1460             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1461             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1462             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1463             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1464             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1465                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1466                 rte_flow_error_set(error, EINVAL,
1467                         RTE_FLOW_ERROR_TYPE_ITEM,
1468                         item, "Not supported by fdir filter");
1469                 return -rte_errno;
1470         }
1471
1472         if (signature_match(pattern))
1473                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1474         else
1475                 rule->mode = RTE_FDIR_MODE_PERFECT;
1476
1477         /*Not supported last point for range*/
1478         if (item->last) {
1479                 rte_flow_error_set(error, EINVAL,
1480                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1481                         item, "Not supported last point for range");
1482                 return -rte_errno;
1483         }
1484
1485         /* Get the MAC info. */
1486         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1487                 /**
1488                  * Only support vlan and dst MAC address,
1489                  * others should be masked.
1490                  */
1491                 if (item->spec && !item->mask) {
1492                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1493                         rte_flow_error_set(error, EINVAL,
1494                                 RTE_FLOW_ERROR_TYPE_ITEM,
1495                                 item, "Not supported by fdir filter");
1496                         return -rte_errno;
1497                 }
1498
1499                 if (item->spec) {
1500                         rule->b_spec = TRUE;
1501                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1502
1503                         /* Get the dst MAC. */
1504                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1505                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1506                                         eth_spec->dst.addr_bytes[j];
1507                         }
1508                 }
1509
1510
1511                 if (item->mask) {
1512
1513                         rule->b_mask = TRUE;
1514                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1515
1516                         /* Ether type should be masked. */
1517                         if (eth_mask->type ||
1518                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1519                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1520                                 rte_flow_error_set(error, EINVAL,
1521                                         RTE_FLOW_ERROR_TYPE_ITEM,
1522                                         item, "Not supported by fdir filter");
1523                                 return -rte_errno;
1524                         }
1525
1526                         /* If ethernet has meaning, it means MAC VLAN mode. */
1527                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1528
1529                         /**
1530                          * src MAC address must be masked,
1531                          * and don't support dst MAC address mask.
1532                          */
1533                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1534                                 if (eth_mask->src.addr_bytes[j] ||
1535                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1536                                         memset(rule, 0,
1537                                         sizeof(struct ixgbe_fdir_rule));
1538                                         rte_flow_error_set(error, EINVAL,
1539                                         RTE_FLOW_ERROR_TYPE_ITEM,
1540                                         item, "Not supported by fdir filter");
1541                                         return -rte_errno;
1542                                 }
1543                         }
1544
1545                         /* When no VLAN, considered as full mask. */
1546                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1547                 }
1548                 /*** If both spec and mask are item,
1549                  * it means don't care about ETH.
1550                  * Do nothing.
1551                  */
1552
1553                 /**
1554                  * Check if the next not void item is vlan or ipv4.
1555                  * IPv6 is not supported.
1556                  */
1557                 item = next_no_fuzzy_pattern(pattern, item);
1558                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1559                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1560                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1561                                 rte_flow_error_set(error, EINVAL,
1562                                         RTE_FLOW_ERROR_TYPE_ITEM,
1563                                         item, "Not supported by fdir filter");
1564                                 return -rte_errno;
1565                         }
1566                 } else {
1567                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1568                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1569                                 rte_flow_error_set(error, EINVAL,
1570                                         RTE_FLOW_ERROR_TYPE_ITEM,
1571                                         item, "Not supported by fdir filter");
1572                                 return -rte_errno;
1573                         }
1574                 }
1575         }
1576
1577         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1578                 if (!(item->spec && item->mask)) {
1579                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1580                         rte_flow_error_set(error, EINVAL,
1581                                 RTE_FLOW_ERROR_TYPE_ITEM,
1582                                 item, "Not supported by fdir filter");
1583                         return -rte_errno;
1584                 }
1585
1586                 /*Not supported last point for range*/
1587                 if (item->last) {
1588                         rte_flow_error_set(error, EINVAL,
1589                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1590                                 item, "Not supported last point for range");
1591                         return -rte_errno;
1592                 }
1593
1594                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1595                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1596
1597                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1598
1599                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1600                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1601                 /* More than one tags are not supported. */
1602
1603                 /* Next not void item must be END */
1604                 item = next_no_fuzzy_pattern(pattern, item);
1605                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1606                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1607                         rte_flow_error_set(error, EINVAL,
1608                                 RTE_FLOW_ERROR_TYPE_ITEM,
1609                                 item, "Not supported by fdir filter");
1610                         return -rte_errno;
1611                 }
1612         }
1613
1614         /* Get the IPV4 info. */
1615         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1616                 /**
1617                  * Set the flow type even if there's no content
1618                  * as we must have a flow type.
1619                  */
1620                 rule->ixgbe_fdir.formatted.flow_type =
1621                         IXGBE_ATR_FLOW_TYPE_IPV4;
1622                 /*Not supported last point for range*/
1623                 if (item->last) {
1624                         rte_flow_error_set(error, EINVAL,
1625                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1626                                 item, "Not supported last point for range");
1627                         return -rte_errno;
1628                 }
1629                 /**
1630                  * Only care about src & dst addresses,
1631                  * others should be masked.
1632                  */
1633                 if (!item->mask) {
1634                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1635                         rte_flow_error_set(error, EINVAL,
1636                                 RTE_FLOW_ERROR_TYPE_ITEM,
1637                                 item, "Not supported by fdir filter");
1638                         return -rte_errno;
1639                 }
1640                 rule->b_mask = TRUE;
1641                 ipv4_mask =
1642                         (const struct rte_flow_item_ipv4 *)item->mask;
1643                 if (ipv4_mask->hdr.version_ihl ||
1644                     ipv4_mask->hdr.type_of_service ||
1645                     ipv4_mask->hdr.total_length ||
1646                     ipv4_mask->hdr.packet_id ||
1647                     ipv4_mask->hdr.fragment_offset ||
1648                     ipv4_mask->hdr.time_to_live ||
1649                     ipv4_mask->hdr.next_proto_id ||
1650                     ipv4_mask->hdr.hdr_checksum) {
1651                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1652                         rte_flow_error_set(error, EINVAL,
1653                                 RTE_FLOW_ERROR_TYPE_ITEM,
1654                                 item, "Not supported by fdir filter");
1655                         return -rte_errno;
1656                 }
1657                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1658                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1659
1660                 if (item->spec) {
1661                         rule->b_spec = TRUE;
1662                         ipv4_spec =
1663                                 (const struct rte_flow_item_ipv4 *)item->spec;
1664                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1665                                 ipv4_spec->hdr.dst_addr;
1666                         rule->ixgbe_fdir.formatted.src_ip[0] =
1667                                 ipv4_spec->hdr.src_addr;
1668                 }
1669
1670                 /**
1671                  * Check if the next not void item is
1672                  * TCP or UDP or SCTP or END.
1673                  */
1674                 item = next_no_fuzzy_pattern(pattern, item);
1675                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1676                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1677                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1678                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1679                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1680                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1681                         rte_flow_error_set(error, EINVAL,
1682                                 RTE_FLOW_ERROR_TYPE_ITEM,
1683                                 item, "Not supported by fdir filter");
1684                         return -rte_errno;
1685                 }
1686         }
1687
1688         /* Get the IPV6 info. */
1689         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1690                 /**
1691                  * Set the flow type even if there's no content
1692                  * as we must have a flow type.
1693                  */
1694                 rule->ixgbe_fdir.formatted.flow_type =
1695                         IXGBE_ATR_FLOW_TYPE_IPV6;
1696
1697                 /**
1698                  * 1. must signature match
1699                  * 2. not support last
1700                  * 3. mask must not null
1701                  */
1702                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1703                     item->last ||
1704                     !item->mask) {
1705                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1706                         rte_flow_error_set(error, EINVAL,
1707                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1708                                 item, "Not supported last point for range");
1709                         return -rte_errno;
1710                 }
1711
1712                 rule->b_mask = TRUE;
1713                 ipv6_mask =
1714                         (const struct rte_flow_item_ipv6 *)item->mask;
1715                 if (ipv6_mask->hdr.vtc_flow ||
1716                     ipv6_mask->hdr.payload_len ||
1717                     ipv6_mask->hdr.proto ||
1718                     ipv6_mask->hdr.hop_limits) {
1719                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1720                         rte_flow_error_set(error, EINVAL,
1721                                 RTE_FLOW_ERROR_TYPE_ITEM,
1722                                 item, "Not supported by fdir filter");
1723                         return -rte_errno;
1724                 }
1725
1726                 /* check src addr mask */
1727                 for (j = 0; j < 16; j++) {
1728                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1729                                 rule->mask.src_ipv6_mask |= 1 << j;
1730                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1731                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1732                                 rte_flow_error_set(error, EINVAL,
1733                                         RTE_FLOW_ERROR_TYPE_ITEM,
1734                                         item, "Not supported by fdir filter");
1735                                 return -rte_errno;
1736                         }
1737                 }
1738
1739                 /* check dst addr mask */
1740                 for (j = 0; j < 16; j++) {
1741                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1742                                 rule->mask.dst_ipv6_mask |= 1 << j;
1743                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1744                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1745                                 rte_flow_error_set(error, EINVAL,
1746                                         RTE_FLOW_ERROR_TYPE_ITEM,
1747                                         item, "Not supported by fdir filter");
1748                                 return -rte_errno;
1749                         }
1750                 }
1751
1752                 if (item->spec) {
1753                         rule->b_spec = TRUE;
1754                         ipv6_spec =
1755                                 (const struct rte_flow_item_ipv6 *)item->spec;
1756                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1757                                    ipv6_spec->hdr.src_addr, 16);
1758                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1759                                    ipv6_spec->hdr.dst_addr, 16);
1760                 }
1761
1762                 /**
1763                  * Check if the next not void item is
1764                  * TCP or UDP or SCTP or END.
1765                  */
1766                 item = next_no_fuzzy_pattern(pattern, item);
1767                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1768                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1769                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1770                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1771                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1772                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1773                         rte_flow_error_set(error, EINVAL,
1774                                 RTE_FLOW_ERROR_TYPE_ITEM,
1775                                 item, "Not supported by fdir filter");
1776                         return -rte_errno;
1777                 }
1778         }
1779
1780         /* Get the TCP info. */
1781         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1782                 /**
1783                  * Set the flow type even if there's no content
1784                  * as we must have a flow type.
1785                  */
1786                 rule->ixgbe_fdir.formatted.flow_type |=
1787                         IXGBE_ATR_L4TYPE_TCP;
1788                 /*Not supported last point for range*/
1789                 if (item->last) {
1790                         rte_flow_error_set(error, EINVAL,
1791                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1792                                 item, "Not supported last point for range");
1793                         return -rte_errno;
1794                 }
1795                 /**
1796                  * Only care about src & dst ports,
1797                  * others should be masked.
1798                  */
1799                 if (!item->mask) {
1800                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1801                         rte_flow_error_set(error, EINVAL,
1802                                 RTE_FLOW_ERROR_TYPE_ITEM,
1803                                 item, "Not supported by fdir filter");
1804                         return -rte_errno;
1805                 }
1806                 rule->b_mask = TRUE;
1807                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1808                 if (tcp_mask->hdr.sent_seq ||
1809                     tcp_mask->hdr.recv_ack ||
1810                     tcp_mask->hdr.data_off ||
1811                     tcp_mask->hdr.tcp_flags ||
1812                     tcp_mask->hdr.rx_win ||
1813                     tcp_mask->hdr.cksum ||
1814                     tcp_mask->hdr.tcp_urp) {
1815                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1816                         rte_flow_error_set(error, EINVAL,
1817                                 RTE_FLOW_ERROR_TYPE_ITEM,
1818                                 item, "Not supported by fdir filter");
1819                         return -rte_errno;
1820                 }
1821                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1822                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1823
1824                 if (item->spec) {
1825                         rule->b_spec = TRUE;
1826                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1827                         rule->ixgbe_fdir.formatted.src_port =
1828                                 tcp_spec->hdr.src_port;
1829                         rule->ixgbe_fdir.formatted.dst_port =
1830                                 tcp_spec->hdr.dst_port;
1831                 }
1832
1833                 item = next_no_fuzzy_pattern(pattern, item);
1834                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1835                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1836                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1837                         rte_flow_error_set(error, EINVAL,
1838                                 RTE_FLOW_ERROR_TYPE_ITEM,
1839                                 item, "Not supported by fdir filter");
1840                         return -rte_errno;
1841                 }
1842
1843         }
1844
1845         /* Get the UDP info */
1846         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1847                 /**
1848                  * Set the flow type even if there's no content
1849                  * as we must have a flow type.
1850                  */
1851                 rule->ixgbe_fdir.formatted.flow_type |=
1852                         IXGBE_ATR_L4TYPE_UDP;
1853                 /*Not supported last point for range*/
1854                 if (item->last) {
1855                         rte_flow_error_set(error, EINVAL,
1856                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1857                                 item, "Not supported last point for range");
1858                         return -rte_errno;
1859                 }
1860                 /**
1861                  * Only care about src & dst ports,
1862                  * others should be masked.
1863                  */
1864                 if (!item->mask) {
1865                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1866                         rte_flow_error_set(error, EINVAL,
1867                                 RTE_FLOW_ERROR_TYPE_ITEM,
1868                                 item, "Not supported by fdir filter");
1869                         return -rte_errno;
1870                 }
1871                 rule->b_mask = TRUE;
1872                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1873                 if (udp_mask->hdr.dgram_len ||
1874                     udp_mask->hdr.dgram_cksum) {
1875                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1876                         rte_flow_error_set(error, EINVAL,
1877                                 RTE_FLOW_ERROR_TYPE_ITEM,
1878                                 item, "Not supported by fdir filter");
1879                         return -rte_errno;
1880                 }
1881                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1882                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1883
1884                 if (item->spec) {
1885                         rule->b_spec = TRUE;
1886                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1887                         rule->ixgbe_fdir.formatted.src_port =
1888                                 udp_spec->hdr.src_port;
1889                         rule->ixgbe_fdir.formatted.dst_port =
1890                                 udp_spec->hdr.dst_port;
1891                 }
1892
1893                 item = next_no_fuzzy_pattern(pattern, item);
1894                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1895                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1896                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1897                         rte_flow_error_set(error, EINVAL,
1898                                 RTE_FLOW_ERROR_TYPE_ITEM,
1899                                 item, "Not supported by fdir filter");
1900                         return -rte_errno;
1901                 }
1902
1903         }
1904
1905         /* Get the SCTP info */
1906         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1907                 /**
1908                  * Set the flow type even if there's no content
1909                  * as we must have a flow type.
1910                  */
1911                 rule->ixgbe_fdir.formatted.flow_type |=
1912                         IXGBE_ATR_L4TYPE_SCTP;
1913                 /*Not supported last point for range*/
1914                 if (item->last) {
1915                         rte_flow_error_set(error, EINVAL,
1916                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1917                                 item, "Not supported last point for range");
1918                         return -rte_errno;
1919                 }
1920
1921                 /* only x550 family only support sctp port */
1922                 if (hw->mac.type == ixgbe_mac_X550 ||
1923                     hw->mac.type == ixgbe_mac_X550EM_x ||
1924                     hw->mac.type == ixgbe_mac_X550EM_a) {
1925                         /**
1926                          * Only care about src & dst ports,
1927                          * others should be masked.
1928                          */
1929                         if (!item->mask) {
1930                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1931                                 rte_flow_error_set(error, EINVAL,
1932                                         RTE_FLOW_ERROR_TYPE_ITEM,
1933                                         item, "Not supported by fdir filter");
1934                                 return -rte_errno;
1935                         }
1936                         rule->b_mask = TRUE;
1937                         sctp_mask =
1938                                 (const struct rte_flow_item_sctp *)item->mask;
1939                         if (sctp_mask->hdr.tag ||
1940                                 sctp_mask->hdr.cksum) {
1941                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1942                                 rte_flow_error_set(error, EINVAL,
1943                                         RTE_FLOW_ERROR_TYPE_ITEM,
1944                                         item, "Not supported by fdir filter");
1945                                 return -rte_errno;
1946                         }
1947                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1948                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1949
1950                         if (item->spec) {
1951                                 rule->b_spec = TRUE;
1952                                 sctp_spec =
1953                                 (const struct rte_flow_item_sctp *)item->spec;
1954                                 rule->ixgbe_fdir.formatted.src_port =
1955                                         sctp_spec->hdr.src_port;
1956                                 rule->ixgbe_fdir.formatted.dst_port =
1957                                         sctp_spec->hdr.dst_port;
1958                         }
1959                 /* others even sctp port is not supported */
1960                 } else {
1961                         sctp_mask =
1962                                 (const struct rte_flow_item_sctp *)item->mask;
1963                         if (sctp_mask &&
1964                                 (sctp_mask->hdr.src_port ||
1965                                  sctp_mask->hdr.dst_port ||
1966                                  sctp_mask->hdr.tag ||
1967                                  sctp_mask->hdr.cksum)) {
1968                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1969                                 rte_flow_error_set(error, EINVAL,
1970                                         RTE_FLOW_ERROR_TYPE_ITEM,
1971                                         item, "Not supported by fdir filter");
1972                                 return -rte_errno;
1973                         }
1974                 }
1975
1976                 item = next_no_fuzzy_pattern(pattern, item);
1977                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1978                         item->type != RTE_FLOW_ITEM_TYPE_END) {
1979                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1980                         rte_flow_error_set(error, EINVAL,
1981                                 RTE_FLOW_ERROR_TYPE_ITEM,
1982                                 item, "Not supported by fdir filter");
1983                         return -rte_errno;
1984                 }
1985         }
1986
1987         /* Get the flex byte info */
1988         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1989                 /* Not supported last point for range*/
1990                 if (item->last) {
1991                         rte_flow_error_set(error, EINVAL,
1992                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1993                                 item, "Not supported last point for range");
1994                         return -rte_errno;
1995                 }
1996                 /* mask should not be null */
1997                 if (!item->mask || !item->spec) {
1998                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1999                         rte_flow_error_set(error, EINVAL,
2000                                 RTE_FLOW_ERROR_TYPE_ITEM,
2001                                 item, "Not supported by fdir filter");
2002                         return -rte_errno;
2003                 }
2004
2005                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2006
2007                 /* check mask */
2008                 if (raw_mask->relative != 0x1 ||
2009                     raw_mask->search != 0x1 ||
2010                     raw_mask->reserved != 0x0 ||
2011                     (uint32_t)raw_mask->offset != 0xffffffff ||
2012                     raw_mask->limit != 0xffff ||
2013                     raw_mask->length != 0xffff) {
2014                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2015                         rte_flow_error_set(error, EINVAL,
2016                                 RTE_FLOW_ERROR_TYPE_ITEM,
2017                                 item, "Not supported by fdir filter");
2018                         return -rte_errno;
2019                 }
2020
2021                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2022
2023                 /* check spec */
2024                 if (raw_spec->relative != 0 ||
2025                     raw_spec->search != 0 ||
2026                     raw_spec->reserved != 0 ||
2027                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2028                     raw_spec->offset % 2 ||
2029                     raw_spec->limit != 0 ||
2030                     raw_spec->length != 2 ||
2031                     /* pattern can't be 0xffff */
2032                     (raw_spec->pattern[0] == 0xff &&
2033                      raw_spec->pattern[1] == 0xff)) {
2034                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2035                         rte_flow_error_set(error, EINVAL,
2036                                 RTE_FLOW_ERROR_TYPE_ITEM,
2037                                 item, "Not supported by fdir filter");
2038                         return -rte_errno;
2039                 }
2040
2041                 /* check pattern mask */
2042                 if (raw_mask->pattern[0] != 0xff ||
2043                     raw_mask->pattern[1] != 0xff) {
2044                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2045                         rte_flow_error_set(error, EINVAL,
2046                                 RTE_FLOW_ERROR_TYPE_ITEM,
2047                                 item, "Not supported by fdir filter");
2048                         return -rte_errno;
2049                 }
2050
2051                 rule->mask.flex_bytes_mask = 0xffff;
2052                 rule->ixgbe_fdir.formatted.flex_bytes =
2053                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2054                         raw_spec->pattern[0];
2055                 rule->flex_bytes_offset = raw_spec->offset;
2056         }
2057
2058         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2059                 /* check if the next not void item is END */
2060                 item = next_no_fuzzy_pattern(pattern, item);
2061                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2062                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2063                         rte_flow_error_set(error, EINVAL,
2064                                 RTE_FLOW_ERROR_TYPE_ITEM,
2065                                 item, "Not supported by fdir filter");
2066                         return -rte_errno;
2067                 }
2068         }
2069
2070         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2071 }
2072
2073 #define NVGRE_PROTOCOL 0x6558
2074
2075 /**
2076  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2077  * And get the flow director filter info BTW.
2078  * VxLAN PATTERN:
2079  * The first not void item must be ETH.
2080  * The second not void item must be IPV4/ IPV6.
2081  * The third not void item must be NVGRE.
2082  * The next not void item must be END.
2083  * NVGRE PATTERN:
2084  * The first not void item must be ETH.
2085  * The second not void item must be IPV4/ IPV6.
2086  * The third not void item must be NVGRE.
2087  * The next not void item must be END.
2088  * ACTION:
2089  * The first not void action should be QUEUE or DROP.
2090  * The second not void optional action should be MARK,
2091  * mark_id is a uint32_t number.
2092  * The next not void action should be END.
2093  * VxLAN pattern example:
2094  * ITEM         Spec                    Mask
2095  * ETH          NULL                    NULL
2096  * IPV4/IPV6    NULL                    NULL
2097  * UDP          NULL                    NULL
2098  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2099  * MAC VLAN     tci     0x2016          0xEFFF
2100  * END
2101  * NEGRV pattern example:
2102  * ITEM         Spec                    Mask
2103  * ETH          NULL                    NULL
2104  * IPV4/IPV6    NULL                    NULL
2105  * NVGRE        protocol        0x6558  0xFFFF
2106  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2107  * MAC VLAN     tci     0x2016          0xEFFF
2108  * END
2109  * other members in mask and spec should set to 0x00.
2110  * item->last should be NULL.
2111  */
2112 static int
2113 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2114                                const struct rte_flow_item pattern[],
2115                                const struct rte_flow_action actions[],
2116                                struct ixgbe_fdir_rule *rule,
2117                                struct rte_flow_error *error)
2118 {
2119         const struct rte_flow_item *item;
2120         const struct rte_flow_item_vxlan *vxlan_spec;
2121         const struct rte_flow_item_vxlan *vxlan_mask;
2122         const struct rte_flow_item_nvgre *nvgre_spec;
2123         const struct rte_flow_item_nvgre *nvgre_mask;
2124         const struct rte_flow_item_eth *eth_spec;
2125         const struct rte_flow_item_eth *eth_mask;
2126         const struct rte_flow_item_vlan *vlan_spec;
2127         const struct rte_flow_item_vlan *vlan_mask;
2128         uint32_t j;
2129
2130         if (!pattern) {
2131                 rte_flow_error_set(error, EINVAL,
2132                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2133                                    NULL, "NULL pattern.");
2134                 return -rte_errno;
2135         }
2136
2137         if (!actions) {
2138                 rte_flow_error_set(error, EINVAL,
2139                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2140                                    NULL, "NULL action.");
2141                 return -rte_errno;
2142         }
2143
2144         if (!attr) {
2145                 rte_flow_error_set(error, EINVAL,
2146                                    RTE_FLOW_ERROR_TYPE_ATTR,
2147                                    NULL, "NULL attribute.");
2148                 return -rte_errno;
2149         }
2150
2151         /**
2152          * Some fields may not be provided. Set spec to 0 and mask to default
2153          * value. So, we need not do anything for the not provided fields later.
2154          */
2155         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2156         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2157         rule->mask.vlan_tci_mask = 0;
2158
2159         /**
2160          * The first not void item should be
2161          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2162          */
2163         item = next_no_void_pattern(pattern, NULL);
2164         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2165             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2166             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2167             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2168             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2169             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2170                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2171                 rte_flow_error_set(error, EINVAL,
2172                         RTE_FLOW_ERROR_TYPE_ITEM,
2173                         item, "Not supported by fdir filter");
2174                 return -rte_errno;
2175         }
2176
2177         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2178
2179         /* Skip MAC. */
2180         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2181                 /* Only used to describe the protocol stack. */
2182                 if (item->spec || item->mask) {
2183                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2184                         rte_flow_error_set(error, EINVAL,
2185                                 RTE_FLOW_ERROR_TYPE_ITEM,
2186                                 item, "Not supported by fdir filter");
2187                         return -rte_errno;
2188                 }
2189                 /* Not supported last point for range*/
2190                 if (item->last) {
2191                         rte_flow_error_set(error, EINVAL,
2192                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2193                                 item, "Not supported last point for range");
2194                         return -rte_errno;
2195                 }
2196
2197                 /* Check if the next not void item is IPv4 or IPv6. */
2198                 item = next_no_void_pattern(pattern, item);
2199                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2200                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2201                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2202                         rte_flow_error_set(error, EINVAL,
2203                                 RTE_FLOW_ERROR_TYPE_ITEM,
2204                                 item, "Not supported by fdir filter");
2205                         return -rte_errno;
2206                 }
2207         }
2208
2209         /* Skip IP. */
2210         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2211             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2212                 /* Only used to describe the protocol stack. */
2213                 if (item->spec || item->mask) {
2214                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2215                         rte_flow_error_set(error, EINVAL,
2216                                 RTE_FLOW_ERROR_TYPE_ITEM,
2217                                 item, "Not supported by fdir filter");
2218                         return -rte_errno;
2219                 }
2220                 /*Not supported last point for range*/
2221                 if (item->last) {
2222                         rte_flow_error_set(error, EINVAL,
2223                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2224                                 item, "Not supported last point for range");
2225                         return -rte_errno;
2226                 }
2227
2228                 /* Check if the next not void item is UDP or NVGRE. */
2229                 item = next_no_void_pattern(pattern, item);
2230                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2231                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2232                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2233                         rte_flow_error_set(error, EINVAL,
2234                                 RTE_FLOW_ERROR_TYPE_ITEM,
2235                                 item, "Not supported by fdir filter");
2236                         return -rte_errno;
2237                 }
2238         }
2239
2240         /* Skip UDP. */
2241         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2242                 /* Only used to describe the protocol stack. */
2243                 if (item->spec || item->mask) {
2244                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2245                         rte_flow_error_set(error, EINVAL,
2246                                 RTE_FLOW_ERROR_TYPE_ITEM,
2247                                 item, "Not supported by fdir filter");
2248                         return -rte_errno;
2249                 }
2250                 /*Not supported last point for range*/
2251                 if (item->last) {
2252                         rte_flow_error_set(error, EINVAL,
2253                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2254                                 item, "Not supported last point for range");
2255                         return -rte_errno;
2256                 }
2257
2258                 /* Check if the next not void item is VxLAN. */
2259                 item = next_no_void_pattern(pattern, item);
2260                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2261                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2262                         rte_flow_error_set(error, EINVAL,
2263                                 RTE_FLOW_ERROR_TYPE_ITEM,
2264                                 item, "Not supported by fdir filter");
2265                         return -rte_errno;
2266                 }
2267         }
2268
2269         /* Get the VxLAN info */
2270         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2271                 rule->ixgbe_fdir.formatted.tunnel_type =
2272                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2273
2274                 /* Only care about VNI, others should be masked. */
2275                 if (!item->mask) {
2276                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2277                         rte_flow_error_set(error, EINVAL,
2278                                 RTE_FLOW_ERROR_TYPE_ITEM,
2279                                 item, "Not supported by fdir filter");
2280                         return -rte_errno;
2281                 }
2282                 /*Not supported last point for range*/
2283                 if (item->last) {
2284                         rte_flow_error_set(error, EINVAL,
2285                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2286                                 item, "Not supported last point for range");
2287                         return -rte_errno;
2288                 }
2289                 rule->b_mask = TRUE;
2290
2291                 /* Tunnel type is always meaningful. */
2292                 rule->mask.tunnel_type_mask = 1;
2293
2294                 vxlan_mask =
2295                         (const struct rte_flow_item_vxlan *)item->mask;
2296                 if (vxlan_mask->flags) {
2297                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2298                         rte_flow_error_set(error, EINVAL,
2299                                 RTE_FLOW_ERROR_TYPE_ITEM,
2300                                 item, "Not supported by fdir filter");
2301                         return -rte_errno;
2302                 }
2303                 /* VNI must be totally masked or not. */
2304                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2305                         vxlan_mask->vni[2]) &&
2306                         ((vxlan_mask->vni[0] != 0xFF) ||
2307                         (vxlan_mask->vni[1] != 0xFF) ||
2308                                 (vxlan_mask->vni[2] != 0xFF))) {
2309                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2310                         rte_flow_error_set(error, EINVAL,
2311                                 RTE_FLOW_ERROR_TYPE_ITEM,
2312                                 item, "Not supported by fdir filter");
2313                         return -rte_errno;
2314                 }
2315
2316                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2317                         RTE_DIM(vxlan_mask->vni));
2318
2319                 if (item->spec) {
2320                         rule->b_spec = TRUE;
2321                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2322                                         item->spec;
2323                         rte_memcpy(((uint8_t *)
2324                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2325                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2326                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2327                                 rule->ixgbe_fdir.formatted.tni_vni);
2328                 }
2329         }
2330
2331         /* Get the NVGRE info */
2332         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2333                 rule->ixgbe_fdir.formatted.tunnel_type =
2334                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2335
2336                 /**
2337                  * Only care about flags0, flags1, protocol and TNI,
2338                  * others should be masked.
2339                  */
2340                 if (!item->mask) {
2341                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2342                         rte_flow_error_set(error, EINVAL,
2343                                 RTE_FLOW_ERROR_TYPE_ITEM,
2344                                 item, "Not supported by fdir filter");
2345                         return -rte_errno;
2346                 }
2347                 /*Not supported last point for range*/
2348                 if (item->last) {
2349                         rte_flow_error_set(error, EINVAL,
2350                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2351                                 item, "Not supported last point for range");
2352                         return -rte_errno;
2353                 }
2354                 rule->b_mask = TRUE;
2355
2356                 /* Tunnel type is always meaningful. */
2357                 rule->mask.tunnel_type_mask = 1;
2358
2359                 nvgre_mask =
2360                         (const struct rte_flow_item_nvgre *)item->mask;
2361                 if (nvgre_mask->flow_id) {
2362                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2363                         rte_flow_error_set(error, EINVAL,
2364                                 RTE_FLOW_ERROR_TYPE_ITEM,
2365                                 item, "Not supported by fdir filter");
2366                         return -rte_errno;
2367                 }
2368                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2369                         rte_cpu_to_be_16(0x3000) ||
2370                     nvgre_mask->protocol != 0xFFFF) {
2371                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2372                         rte_flow_error_set(error, EINVAL,
2373                                 RTE_FLOW_ERROR_TYPE_ITEM,
2374                                 item, "Not supported by fdir filter");
2375                         return -rte_errno;
2376                 }
2377                 /* TNI must be totally masked or not. */
2378                 if (nvgre_mask->tni[0] &&
2379                     ((nvgre_mask->tni[0] != 0xFF) ||
2380                     (nvgre_mask->tni[1] != 0xFF) ||
2381                     (nvgre_mask->tni[2] != 0xFF))) {
2382                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2383                         rte_flow_error_set(error, EINVAL,
2384                                 RTE_FLOW_ERROR_TYPE_ITEM,
2385                                 item, "Not supported by fdir filter");
2386                         return -rte_errno;
2387                 }
2388                 /* tni is a 24-bits bit field */
2389                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2390                         RTE_DIM(nvgre_mask->tni));
2391                 rule->mask.tunnel_id_mask <<= 8;
2392
2393                 if (item->spec) {
2394                         rule->b_spec = TRUE;
2395                         nvgre_spec =
2396                                 (const struct rte_flow_item_nvgre *)item->spec;
2397                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2398                             rte_cpu_to_be_16(0x2000) ||
2399                             nvgre_spec->protocol !=
2400                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2401                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2402                                 rte_flow_error_set(error, EINVAL,
2403                                         RTE_FLOW_ERROR_TYPE_ITEM,
2404                                         item, "Not supported by fdir filter");
2405                                 return -rte_errno;
2406                         }
2407                         /* tni is a 24-bits bit field */
2408                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2409                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2410                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2411                 }
2412         }
2413
2414         /* check if the next not void item is MAC */
2415         item = next_no_void_pattern(pattern, item);
2416         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2417                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2418                 rte_flow_error_set(error, EINVAL,
2419                         RTE_FLOW_ERROR_TYPE_ITEM,
2420                         item, "Not supported by fdir filter");
2421                 return -rte_errno;
2422         }
2423
2424         /**
2425          * Only support vlan and dst MAC address,
2426          * others should be masked.
2427          */
2428
2429         if (!item->mask) {
2430                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2431                 rte_flow_error_set(error, EINVAL,
2432                         RTE_FLOW_ERROR_TYPE_ITEM,
2433                         item, "Not supported by fdir filter");
2434                 return -rte_errno;
2435         }
2436         /*Not supported last point for range*/
2437         if (item->last) {
2438                 rte_flow_error_set(error, EINVAL,
2439                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2440                         item, "Not supported last point for range");
2441                 return -rte_errno;
2442         }
2443         rule->b_mask = TRUE;
2444         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2445
2446         /* Ether type should be masked. */
2447         if (eth_mask->type) {
2448                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2449                 rte_flow_error_set(error, EINVAL,
2450                         RTE_FLOW_ERROR_TYPE_ITEM,
2451                         item, "Not supported by fdir filter");
2452                 return -rte_errno;
2453         }
2454
2455         /* src MAC address should be masked. */
2456         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2457                 if (eth_mask->src.addr_bytes[j]) {
2458                         memset(rule, 0,
2459                                sizeof(struct ixgbe_fdir_rule));
2460                         rte_flow_error_set(error, EINVAL,
2461                                 RTE_FLOW_ERROR_TYPE_ITEM,
2462                                 item, "Not supported by fdir filter");
2463                         return -rte_errno;
2464                 }
2465         }
2466         rule->mask.mac_addr_byte_mask = 0;
2467         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2468                 /* It's a per byte mask. */
2469                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2470                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2471                 } else if (eth_mask->dst.addr_bytes[j]) {
2472                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2473                         rte_flow_error_set(error, EINVAL,
2474                                 RTE_FLOW_ERROR_TYPE_ITEM,
2475                                 item, "Not supported by fdir filter");
2476                         return -rte_errno;
2477                 }
2478         }
2479
2480         /* When no vlan, considered as full mask. */
2481         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2482
2483         if (item->spec) {
2484                 rule->b_spec = TRUE;
2485                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2486
2487                 /* Get the dst MAC. */
2488                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2489                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2490                                 eth_spec->dst.addr_bytes[j];
2491                 }
2492         }
2493
2494         /**
2495          * Check if the next not void item is vlan or ipv4.
2496          * IPv6 is not supported.
2497          */
2498         item = next_no_void_pattern(pattern, item);
2499         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2500                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2501                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2502                 rte_flow_error_set(error, EINVAL,
2503                         RTE_FLOW_ERROR_TYPE_ITEM,
2504                         item, "Not supported by fdir filter");
2505                 return -rte_errno;
2506         }
2507         /*Not supported last point for range*/
2508         if (item->last) {
2509                 rte_flow_error_set(error, EINVAL,
2510                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2511                         item, "Not supported last point for range");
2512                 return -rte_errno;
2513         }
2514
2515         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2516                 if (!(item->spec && item->mask)) {
2517                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2518                         rte_flow_error_set(error, EINVAL,
2519                                 RTE_FLOW_ERROR_TYPE_ITEM,
2520                                 item, "Not supported by fdir filter");
2521                         return -rte_errno;
2522                 }
2523
2524                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2525                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2526
2527                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2528
2529                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2530                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2531                 /* More than one tags are not supported. */
2532
2533                 /* check if the next not void item is END */
2534                 item = next_no_void_pattern(pattern, item);
2535
2536                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2537                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2538                         rte_flow_error_set(error, EINVAL,
2539                                 RTE_FLOW_ERROR_TYPE_ITEM,
2540                                 item, "Not supported by fdir filter");
2541                         return -rte_errno;
2542                 }
2543         }
2544
2545         /**
2546          * If the tags is 0, it means don't care about the VLAN.
2547          * Do nothing.
2548          */
2549
2550         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2551 }
2552
2553 static int
2554 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2555                         const struct rte_flow_attr *attr,
2556                         const struct rte_flow_item pattern[],
2557                         const struct rte_flow_action actions[],
2558                         struct ixgbe_fdir_rule *rule,
2559                         struct rte_flow_error *error)
2560 {
2561         int ret;
2562         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2563         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2564
2565         if (hw->mac.type != ixgbe_mac_82599EB &&
2566                 hw->mac.type != ixgbe_mac_X540 &&
2567                 hw->mac.type != ixgbe_mac_X550 &&
2568                 hw->mac.type != ixgbe_mac_X550EM_x &&
2569                 hw->mac.type != ixgbe_mac_X550EM_a)
2570                 return -ENOTSUP;
2571
2572         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2573                                         actions, rule, error);
2574
2575         if (!ret)
2576                 goto step_next;
2577
2578         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2579                                         actions, rule, error);
2580
2581         if (ret)
2582                 return ret;
2583
2584 step_next:
2585
2586         if (hw->mac.type == ixgbe_mac_82599EB &&
2587                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2588                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2589                 rule->ixgbe_fdir.formatted.dst_port != 0))
2590                 return -ENOTSUP;
2591
2592         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2593             fdir_mode != rule->mode)
2594                 return -ENOTSUP;
2595
2596         if (rule->queue >= dev->data->nb_rx_queues)
2597                 return -ENOTSUP;
2598
2599         return ret;
2600 }
2601
2602 void
2603 ixgbe_filterlist_flush(void)
2604 {
2605         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2606         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2607         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2608         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2609         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2610         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2611
2612         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2613                 TAILQ_REMOVE(&filter_ntuple_list,
2614                                  ntuple_filter_ptr,
2615                                  entries);
2616                 rte_free(ntuple_filter_ptr);
2617         }
2618
2619         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2620                 TAILQ_REMOVE(&filter_ethertype_list,
2621                                  ethertype_filter_ptr,
2622                                  entries);
2623                 rte_free(ethertype_filter_ptr);
2624         }
2625
2626         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2627                 TAILQ_REMOVE(&filter_syn_list,
2628                                  syn_filter_ptr,
2629                                  entries);
2630                 rte_free(syn_filter_ptr);
2631         }
2632
2633         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2634                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2635                                  l2_tn_filter_ptr,
2636                                  entries);
2637                 rte_free(l2_tn_filter_ptr);
2638         }
2639
2640         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2641                 TAILQ_REMOVE(&filter_fdir_list,
2642                                  fdir_rule_ptr,
2643                                  entries);
2644                 rte_free(fdir_rule_ptr);
2645         }
2646
2647         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2648                 TAILQ_REMOVE(&ixgbe_flow_list,
2649                                  ixgbe_flow_mem_ptr,
2650                                  entries);
2651                 rte_free(ixgbe_flow_mem_ptr->flow);
2652                 rte_free(ixgbe_flow_mem_ptr);
2653         }
2654 }
2655
2656 /**
2657  * Create or destroy a flow rule.
2658  * Theorically one rule can match more than one filters.
2659  * We will let it use the filter which it hitt first.
2660  * So, the sequence matters.
2661  */
2662 static struct rte_flow *
2663 ixgbe_flow_create(struct rte_eth_dev *dev,
2664                   const struct rte_flow_attr *attr,
2665                   const struct rte_flow_item pattern[],
2666                   const struct rte_flow_action actions[],
2667                   struct rte_flow_error *error)
2668 {
2669         int ret;
2670         struct rte_eth_ntuple_filter ntuple_filter;
2671         struct rte_eth_ethertype_filter ethertype_filter;
2672         struct rte_eth_syn_filter syn_filter;
2673         struct ixgbe_fdir_rule fdir_rule;
2674         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2675         struct ixgbe_hw_fdir_info *fdir_info =
2676                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2677         struct rte_flow *flow = NULL;
2678         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2679         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2680         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2681         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2682         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2683         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2684         uint8_t first_mask = FALSE;
2685
2686         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2687         if (!flow) {
2688                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2689                 return (struct rte_flow *)flow;
2690         }
2691         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2692                         sizeof(struct ixgbe_flow_mem), 0);
2693         if (!ixgbe_flow_mem_ptr) {
2694                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2695                 rte_free(flow);
2696                 return NULL;
2697         }
2698         ixgbe_flow_mem_ptr->flow = flow;
2699         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2700                                 ixgbe_flow_mem_ptr, entries);
2701
2702         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2703         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2704                         actions, &ntuple_filter, error);
2705         if (!ret) {
2706                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2707                 if (!ret) {
2708                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2709                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2710                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2711                                 &ntuple_filter,
2712                                 sizeof(struct rte_eth_ntuple_filter));
2713                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2714                                 ntuple_filter_ptr, entries);
2715                         flow->rule = ntuple_filter_ptr;
2716                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2717                         return flow;
2718                 }
2719                 goto out;
2720         }
2721
2722         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2723         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2724                                 actions, &ethertype_filter, error);
2725         if (!ret) {
2726                 ret = ixgbe_add_del_ethertype_filter(dev,
2727                                 &ethertype_filter, TRUE);
2728                 if (!ret) {
2729                         ethertype_filter_ptr = rte_zmalloc(
2730                                 "ixgbe_ethertype_filter",
2731                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2732                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2733                                 &ethertype_filter,
2734                                 sizeof(struct rte_eth_ethertype_filter));
2735                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2736                                 ethertype_filter_ptr, entries);
2737                         flow->rule = ethertype_filter_ptr;
2738                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2739                         return flow;
2740                 }
2741                 goto out;
2742         }
2743
2744         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2745         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2746                                 actions, &syn_filter, error);
2747         if (!ret) {
2748                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2749                 if (!ret) {
2750                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2751                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2752                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2753                                 &syn_filter,
2754                                 sizeof(struct rte_eth_syn_filter));
2755                         TAILQ_INSERT_TAIL(&filter_syn_list,
2756                                 syn_filter_ptr,
2757                                 entries);
2758                         flow->rule = syn_filter_ptr;
2759                         flow->filter_type = RTE_ETH_FILTER_SYN;
2760                         return flow;
2761                 }
2762                 goto out;
2763         }
2764
2765         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2766         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2767                                 actions, &fdir_rule, error);
2768         if (!ret) {
2769                 /* A mask cannot be deleted. */
2770                 if (fdir_rule.b_mask) {
2771                         if (!fdir_info->mask_added) {
2772                                 /* It's the first time the mask is set. */
2773                                 rte_memcpy(&fdir_info->mask,
2774                                         &fdir_rule.mask,
2775                                         sizeof(struct ixgbe_hw_fdir_mask));
2776                                 fdir_info->flex_bytes_offset =
2777                                         fdir_rule.flex_bytes_offset;
2778
2779                                 if (fdir_rule.mask.flex_bytes_mask)
2780                                         ixgbe_fdir_set_flexbytes_offset(dev,
2781                                                 fdir_rule.flex_bytes_offset);
2782
2783                                 ret = ixgbe_fdir_set_input_mask(dev);
2784                                 if (ret)
2785                                         goto out;
2786
2787                                 fdir_info->mask_added = TRUE;
2788                                 first_mask = TRUE;
2789                         } else {
2790                                 /**
2791                                  * Only support one global mask,
2792                                  * all the masks should be the same.
2793                                  */
2794                                 ret = memcmp(&fdir_info->mask,
2795                                         &fdir_rule.mask,
2796                                         sizeof(struct ixgbe_hw_fdir_mask));
2797                                 if (ret)
2798                                         goto out;
2799
2800                                 if (fdir_info->flex_bytes_offset !=
2801                                                 fdir_rule.flex_bytes_offset)
2802                                         goto out;
2803                         }
2804                 }
2805
2806                 if (fdir_rule.b_spec) {
2807                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2808                                         FALSE, FALSE);
2809                         if (!ret) {
2810                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2811                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2812                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2813                                         &fdir_rule,
2814                                         sizeof(struct ixgbe_fdir_rule));
2815                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2816                                         fdir_rule_ptr, entries);
2817                                 flow->rule = fdir_rule_ptr;
2818                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2819
2820                                 return flow;
2821                         }
2822
2823                         if (ret) {
2824                                 /**
2825                                  * clean the mask_added flag if fail to
2826                                  * program
2827                                  **/
2828                                 if (first_mask)
2829                                         fdir_info->mask_added = FALSE;
2830                                 goto out;
2831                         }
2832                 }
2833
2834                 goto out;
2835         }
2836
2837         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2838         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2839                                         actions, &l2_tn_filter, error);
2840         if (!ret) {
2841                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2842                 if (!ret) {
2843                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2844                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2845                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2846                                 &l2_tn_filter,
2847                                 sizeof(struct rte_eth_l2_tunnel_conf));
2848                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2849                                 l2_tn_filter_ptr, entries);
2850                         flow->rule = l2_tn_filter_ptr;
2851                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2852                         return flow;
2853                 }
2854         }
2855
2856 out:
2857         TAILQ_REMOVE(&ixgbe_flow_list,
2858                 ixgbe_flow_mem_ptr, entries);
2859         rte_flow_error_set(error, -ret,
2860                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2861                            "Failed to create flow.");
2862         rte_free(ixgbe_flow_mem_ptr);
2863         rte_free(flow);
2864         return NULL;
2865 }
2866
2867 /**
2868  * Check if the flow rule is supported by ixgbe.
2869  * It only checkes the format. Don't guarantee the rule can be programmed into
2870  * the HW. Because there can be no enough room for the rule.
2871  */
2872 static int
2873 ixgbe_flow_validate(struct rte_eth_dev *dev,
2874                 const struct rte_flow_attr *attr,
2875                 const struct rte_flow_item pattern[],
2876                 const struct rte_flow_action actions[],
2877                 struct rte_flow_error *error)
2878 {
2879         struct rte_eth_ntuple_filter ntuple_filter;
2880         struct rte_eth_ethertype_filter ethertype_filter;
2881         struct rte_eth_syn_filter syn_filter;
2882         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2883         struct ixgbe_fdir_rule fdir_rule;
2884         int ret;
2885
2886         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2887         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2888                                 actions, &ntuple_filter, error);
2889         if (!ret)
2890                 return 0;
2891
2892         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2893         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2894                                 actions, &ethertype_filter, error);
2895         if (!ret)
2896                 return 0;
2897
2898         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2899         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2900                                 actions, &syn_filter, error);
2901         if (!ret)
2902                 return 0;
2903
2904         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2905         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2906                                 actions, &fdir_rule, error);
2907         if (!ret)
2908                 return 0;
2909
2910         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2911         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2912                                 actions, &l2_tn_filter, error);
2913
2914         return ret;
2915 }
2916
2917 /* Destroy a flow rule on ixgbe. */
2918 static int
2919 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2920                 struct rte_flow *flow,
2921                 struct rte_flow_error *error)
2922 {
2923         int ret;
2924         struct rte_flow *pmd_flow = flow;
2925         enum rte_filter_type filter_type = pmd_flow->filter_type;
2926         struct rte_eth_ntuple_filter ntuple_filter;
2927         struct rte_eth_ethertype_filter ethertype_filter;
2928         struct rte_eth_syn_filter syn_filter;
2929         struct ixgbe_fdir_rule fdir_rule;
2930         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2931         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2932         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2933         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2934         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2935         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2936         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2937         struct ixgbe_hw_fdir_info *fdir_info =
2938                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2939
2940         switch (filter_type) {
2941         case RTE_ETH_FILTER_NTUPLE:
2942                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2943                                         pmd_flow->rule;
2944                 (void)rte_memcpy(&ntuple_filter,
2945                         &ntuple_filter_ptr->filter_info,
2946                         sizeof(struct rte_eth_ntuple_filter));
2947                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2948                 if (!ret) {
2949                         TAILQ_REMOVE(&filter_ntuple_list,
2950                         ntuple_filter_ptr, entries);
2951                         rte_free(ntuple_filter_ptr);
2952                 }
2953                 break;
2954         case RTE_ETH_FILTER_ETHERTYPE:
2955                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2956                                         pmd_flow->rule;
2957                 (void)rte_memcpy(&ethertype_filter,
2958                         &ethertype_filter_ptr->filter_info,
2959                         sizeof(struct rte_eth_ethertype_filter));
2960                 ret = ixgbe_add_del_ethertype_filter(dev,
2961                                 &ethertype_filter, FALSE);
2962                 if (!ret) {
2963                         TAILQ_REMOVE(&filter_ethertype_list,
2964                                 ethertype_filter_ptr, entries);
2965                         rte_free(ethertype_filter_ptr);
2966                 }
2967                 break;
2968         case RTE_ETH_FILTER_SYN:
2969                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2970                                 pmd_flow->rule;
2971                 (void)rte_memcpy(&syn_filter,
2972                         &syn_filter_ptr->filter_info,
2973                         sizeof(struct rte_eth_syn_filter));
2974                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2975                 if (!ret) {
2976                         TAILQ_REMOVE(&filter_syn_list,
2977                                 syn_filter_ptr, entries);
2978                         rte_free(syn_filter_ptr);
2979                 }
2980                 break;
2981         case RTE_ETH_FILTER_FDIR:
2982                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2983                 (void)rte_memcpy(&fdir_rule,
2984                         &fdir_rule_ptr->filter_info,
2985                         sizeof(struct ixgbe_fdir_rule));
2986                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2987                 if (!ret) {
2988                         TAILQ_REMOVE(&filter_fdir_list,
2989                                 fdir_rule_ptr, entries);
2990                         rte_free(fdir_rule_ptr);
2991                         if (TAILQ_EMPTY(&filter_fdir_list))
2992                                 fdir_info->mask_added = false;
2993                 }
2994                 break;
2995         case RTE_ETH_FILTER_L2_TUNNEL:
2996                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2997                                 pmd_flow->rule;
2998                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2999                         sizeof(struct rte_eth_l2_tunnel_conf));
3000                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3001                 if (!ret) {
3002                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3003                                 l2_tn_filter_ptr, entries);
3004                         rte_free(l2_tn_filter_ptr);
3005                 }
3006                 break;
3007         default:
3008                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3009                             filter_type);
3010                 ret = -EINVAL;
3011                 break;
3012         }
3013
3014         if (ret) {
3015                 rte_flow_error_set(error, EINVAL,
3016                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3017                                 NULL, "Failed to destroy flow");
3018                 return ret;
3019         }
3020
3021         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3022                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3023                         TAILQ_REMOVE(&ixgbe_flow_list,
3024                                 ixgbe_flow_mem_ptr, entries);
3025                         rte_free(ixgbe_flow_mem_ptr);
3026                 }
3027         }
3028         rte_free(flow);
3029
3030         return ret;
3031 }
3032
3033 /*  Destroy all flow rules associated with a port on ixgbe. */
3034 static int
3035 ixgbe_flow_flush(struct rte_eth_dev *dev,
3036                 struct rte_flow_error *error)
3037 {
3038         int ret = 0;
3039
3040         ixgbe_clear_all_ntuple_filter(dev);
3041         ixgbe_clear_all_ethertype_filter(dev);
3042         ixgbe_clear_syn_filter(dev);
3043
3044         ret = ixgbe_clear_all_fdir_filter(dev);
3045         if (ret < 0) {
3046                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3047                                         NULL, "Failed to flush rule");
3048                 return ret;
3049         }
3050
3051         ret = ixgbe_clear_all_l2_tn_filter(dev);
3052         if (ret < 0) {
3053                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3054                                         NULL, "Failed to flush rule");
3055                 return ret;
3056         }
3057
3058         ixgbe_filterlist_flush();
3059
3060         return 0;
3061 }
3062
3063 const struct rte_flow_ops ixgbe_flow_ops = {
3064         .validate = ixgbe_flow_validate,
3065         .create = ixgbe_flow_create,
3066         .destroy = ixgbe_flow_destroy,
3067         .flush = ixgbe_flow_flush,
3068 };