da7b1cc812daf567d955a8af6c4f042d543297e1
[deb_dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <string.h>
39 #include <unistd.h>
40 #include <stdarg.h>
41 #include <inttypes.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
46
47 #include <rte_interrupts.h>
48 #include <rte_log.h>
49 #include <rte_debug.h>
50 #include <rte_pci.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
55 #include <rte_eal.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
62 #include <rte_dev.h>
63 #include <rte_hash_crc.h>
64 #include <rte_flow.h>
65 #include <rte_flow_driver.h>
66
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
77
78
79 #define IXGBE_MIN_N_TUPLE_PRIO 1
80 #define IXGBE_MAX_N_TUPLE_PRIO 7
81 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
82         do {            \
83                 item = pattern + index;\
84                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
85                 index++;                                \
86                 item = pattern + index;         \
87                 }                                               \
88         } while (0)
89
90 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
91         do {                                                            \
92                 act = actions + index;                                  \
93                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
94                 index++;                                        \
95                 act = actions + index;                          \
96                 }                                                       \
97         } while (0)
98
99 /**
100  * Please aware there's an asumption for all the parsers.
101  * rte_flow_item is using big endian, rte_flow_attr and
102  * rte_flow_action are using CPU order.
103  * Because the pattern is used to describe the packets,
104  * normally the packets should use network order.
105  */
106
107 /**
108  * Parse the rule to see if it is a n-tuple rule.
109  * And get the n-tuple filter info BTW.
110  * pattern:
111  * The first not void item can be ETH or IPV4.
112  * The second not void item must be IPV4 if the first one is ETH.
113  * The third not void item must be UDP or TCP.
114  * The next not void item must be END.
115  * action:
116  * The first not void action should be QUEUE.
117  * The next not void action should be END.
118  * pattern example:
119  * ITEM         Spec                    Mask
120  * ETH          NULL                    NULL
121  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
122  *              dst_addr 192.167.3.50   0xFFFFFFFF
123  *              next_proto_id   17      0xFF
124  * UDP/TCP/     src_port        80      0xFFFF
125  * SCTP         dst_port        80      0xFFFF
126  * END
127  * other members in mask and spec should set to 0x00.
128  * item->last should be NULL.
129  */
130 static int
131 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
132                          const struct rte_flow_item pattern[],
133                          const struct rte_flow_action actions[],
134                          struct rte_eth_ntuple_filter *filter,
135                          struct rte_flow_error *error)
136 {
137         const struct rte_flow_item *item;
138         const struct rte_flow_action *act;
139         const struct rte_flow_item_ipv4 *ipv4_spec;
140         const struct rte_flow_item_ipv4 *ipv4_mask;
141         const struct rte_flow_item_tcp *tcp_spec;
142         const struct rte_flow_item_tcp *tcp_mask;
143         const struct rte_flow_item_udp *udp_spec;
144         const struct rte_flow_item_udp *udp_mask;
145         const struct rte_flow_item_sctp *sctp_spec;
146         const struct rte_flow_item_sctp *sctp_mask;
147         uint32_t index;
148
149         if (!pattern) {
150                 rte_flow_error_set(error,
151                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
152                         NULL, "NULL pattern.");
153                 return -rte_errno;
154         }
155
156         if (!actions) {
157                 rte_flow_error_set(error, EINVAL,
158                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
159                                    NULL, "NULL action.");
160                 return -rte_errno;
161         }
162         if (!attr) {
163                 rte_flow_error_set(error, EINVAL,
164                                    RTE_FLOW_ERROR_TYPE_ATTR,
165                                    NULL, "NULL attribute.");
166                 return -rte_errno;
167         }
168
169         /* parse pattern */
170         index = 0;
171
172         /* the first not void item can be MAC or IPv4 */
173         NEXT_ITEM_OF_PATTERN(item, pattern, index);
174
175         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
176             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
177                 rte_flow_error_set(error, EINVAL,
178                         RTE_FLOW_ERROR_TYPE_ITEM,
179                         item, "Not supported by ntuple filter");
180                 return -rte_errno;
181         }
182         /* Skip Ethernet */
183         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
184                 /*Not supported last point for range*/
185                 if (item->last) {
186                         rte_flow_error_set(error,
187                           EINVAL,
188                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
189                           item, "Not supported last point for range");
190                         return -rte_errno;
191
192                 }
193                 /* if the first item is MAC, the content should be NULL */
194                 if (item->spec || item->mask) {
195                         rte_flow_error_set(error, EINVAL,
196                                 RTE_FLOW_ERROR_TYPE_ITEM,
197                                 item, "Not supported by ntuple filter");
198                         return -rte_errno;
199                 }
200                 /* check if the next not void item is IPv4 */
201                 index++;
202                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
203                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
204                         rte_flow_error_set(error,
205                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
206                           item, "Not supported by ntuple filter");
207                           return -rte_errno;
208                 }
209         }
210
211         /* get the IPv4 info */
212         if (!item->spec || !item->mask) {
213                 rte_flow_error_set(error, EINVAL,
214                         RTE_FLOW_ERROR_TYPE_ITEM,
215                         item, "Invalid ntuple mask");
216                 return -rte_errno;
217         }
218         /*Not supported last point for range*/
219         if (item->last) {
220                 rte_flow_error_set(error, EINVAL,
221                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
222                         item, "Not supported last point for range");
223                 return -rte_errno;
224
225         }
226
227         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
228         /**
229          * Only support src & dst addresses, protocol,
230          * others should be masked.
231          */
232         if (ipv4_mask->hdr.version_ihl ||
233             ipv4_mask->hdr.type_of_service ||
234             ipv4_mask->hdr.total_length ||
235             ipv4_mask->hdr.packet_id ||
236             ipv4_mask->hdr.fragment_offset ||
237             ipv4_mask->hdr.time_to_live ||
238             ipv4_mask->hdr.hdr_checksum) {
239                         rte_flow_error_set(error,
240                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
241                         item, "Not supported by ntuple filter");
242                 return -rte_errno;
243         }
244
245         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
246         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
247         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
248
249         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
250         filter->dst_ip = ipv4_spec->hdr.dst_addr;
251         filter->src_ip = ipv4_spec->hdr.src_addr;
252         filter->proto  = ipv4_spec->hdr.next_proto_id;
253
254         /* check if the next not void item is TCP or UDP */
255         index++;
256         NEXT_ITEM_OF_PATTERN(item, pattern, index);
257         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
258             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
259             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
260                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
261                 rte_flow_error_set(error, EINVAL,
262                         RTE_FLOW_ERROR_TYPE_ITEM,
263                         item, "Not supported by ntuple filter");
264                 return -rte_errno;
265         }
266
267         /* get the TCP/UDP info */
268         if (!item->spec || !item->mask) {
269                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
270                 rte_flow_error_set(error, EINVAL,
271                         RTE_FLOW_ERROR_TYPE_ITEM,
272                         item, "Invalid ntuple mask");
273                 return -rte_errno;
274         }
275
276         /*Not supported last point for range*/
277         if (item->last) {
278                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
279                 rte_flow_error_set(error, EINVAL,
280                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
281                         item, "Not supported last point for range");
282                 return -rte_errno;
283
284         }
285
286         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
287                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
288
289                 /**
290                  * Only support src & dst ports, tcp flags,
291                  * others should be masked.
292                  */
293                 if (tcp_mask->hdr.sent_seq ||
294                     tcp_mask->hdr.recv_ack ||
295                     tcp_mask->hdr.data_off ||
296                     tcp_mask->hdr.rx_win ||
297                     tcp_mask->hdr.cksum ||
298                     tcp_mask->hdr.tcp_urp) {
299                         memset(filter, 0,
300                                 sizeof(struct rte_eth_ntuple_filter));
301                         rte_flow_error_set(error, EINVAL,
302                                 RTE_FLOW_ERROR_TYPE_ITEM,
303                                 item, "Not supported by ntuple filter");
304                         return -rte_errno;
305                 }
306
307                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
308                 filter->src_port_mask  = tcp_mask->hdr.src_port;
309                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
310                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
311                 } else if (!tcp_mask->hdr.tcp_flags) {
312                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
313                 } else {
314                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
315                         rte_flow_error_set(error, EINVAL,
316                                 RTE_FLOW_ERROR_TYPE_ITEM,
317                                 item, "Not supported by ntuple filter");
318                         return -rte_errno;
319                 }
320
321                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
322                 filter->dst_port  = tcp_spec->hdr.dst_port;
323                 filter->src_port  = tcp_spec->hdr.src_port;
324                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
325         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
326                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
327
328                 /**
329                  * Only support src & dst ports,
330                  * others should be masked.
331                  */
332                 if (udp_mask->hdr.dgram_len ||
333                     udp_mask->hdr.dgram_cksum) {
334                         memset(filter, 0,
335                                 sizeof(struct rte_eth_ntuple_filter));
336                         rte_flow_error_set(error, EINVAL,
337                                 RTE_FLOW_ERROR_TYPE_ITEM,
338                                 item, "Not supported by ntuple filter");
339                         return -rte_errno;
340                 }
341
342                 filter->dst_port_mask = udp_mask->hdr.dst_port;
343                 filter->src_port_mask = udp_mask->hdr.src_port;
344
345                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
346                 filter->dst_port = udp_spec->hdr.dst_port;
347                 filter->src_port = udp_spec->hdr.src_port;
348         } else {
349                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
350
351                 /**
352                  * Only support src & dst ports,
353                  * others should be masked.
354                  */
355                 if (sctp_mask->hdr.tag ||
356                     sctp_mask->hdr.cksum) {
357                         memset(filter, 0,
358                                 sizeof(struct rte_eth_ntuple_filter));
359                         rte_flow_error_set(error, EINVAL,
360                                 RTE_FLOW_ERROR_TYPE_ITEM,
361                                 item, "Not supported by ntuple filter");
362                         return -rte_errno;
363                 }
364
365                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
366                 filter->src_port_mask = sctp_mask->hdr.src_port;
367
368                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
369                 filter->dst_port = sctp_spec->hdr.dst_port;
370                 filter->src_port = sctp_spec->hdr.src_port;
371         }
372
373         /* check if the next not void item is END */
374         index++;
375         NEXT_ITEM_OF_PATTERN(item, pattern, index);
376         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
377                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
378                 rte_flow_error_set(error, EINVAL,
379                         RTE_FLOW_ERROR_TYPE_ITEM,
380                         item, "Not supported by ntuple filter");
381                 return -rte_errno;
382         }
383
384         /* parse action */
385         index = 0;
386
387         /**
388          * n-tuple only supports forwarding,
389          * check if the first not void action is QUEUE.
390          */
391         NEXT_ITEM_OF_ACTION(act, actions, index);
392         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
393                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
394                 rte_flow_error_set(error, EINVAL,
395                         RTE_FLOW_ERROR_TYPE_ACTION,
396                         item, "Not supported action.");
397                 return -rte_errno;
398         }
399         filter->queue =
400                 ((const struct rte_flow_action_queue *)act->conf)->index;
401
402         /* check if the next not void item is END */
403         index++;
404         NEXT_ITEM_OF_ACTION(act, actions, index);
405         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
406                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
407                 rte_flow_error_set(error, EINVAL,
408                         RTE_FLOW_ERROR_TYPE_ACTION,
409                         act, "Not supported action.");
410                 return -rte_errno;
411         }
412
413         /* parse attr */
414         /* must be input direction */
415         if (!attr->ingress) {
416                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
417                 rte_flow_error_set(error, EINVAL,
418                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
419                                    attr, "Only support ingress.");
420                 return -rte_errno;
421         }
422
423         /* not supported */
424         if (attr->egress) {
425                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
426                 rte_flow_error_set(error, EINVAL,
427                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
428                                    attr, "Not support egress.");
429                 return -rte_errno;
430         }
431
432         if (attr->priority > 0xFFFF) {
433                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
434                 rte_flow_error_set(error, EINVAL,
435                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
436                                    attr, "Error priority.");
437                 return -rte_errno;
438         }
439         filter->priority = (uint16_t)attr->priority;
440         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
441             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
442             filter->priority = 1;
443
444         return 0;
445 }
446
447 /* a specific function for ixgbe because the flags is specific */
448 static int
449 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
450                           const struct rte_flow_attr *attr,
451                           const struct rte_flow_item pattern[],
452                           const struct rte_flow_action actions[],
453                           struct rte_eth_ntuple_filter *filter,
454                           struct rte_flow_error *error)
455 {
456         int ret;
457         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
458
459         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
460
461         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
462
463         if (ret)
464                 return ret;
465
466         /* Ixgbe doesn't support tcp flags. */
467         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
468                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
469                 rte_flow_error_set(error, EINVAL,
470                                    RTE_FLOW_ERROR_TYPE_ITEM,
471                                    NULL, "Not supported by ntuple filter");
472                 return -rte_errno;
473         }
474
475         /* Ixgbe doesn't support many priorities. */
476         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
477             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
478                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
479                 rte_flow_error_set(error, EINVAL,
480                         RTE_FLOW_ERROR_TYPE_ITEM,
481                         NULL, "Priority not supported by ntuple filter");
482                 return -rte_errno;
483         }
484
485         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
486                 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
487                 filter->priority < IXGBE_5TUPLE_MIN_PRI)
488                 return -rte_errno;
489
490         /* fixed value for ixgbe */
491         filter->flags = RTE_5TUPLE_FLAGS;
492         return 0;
493 }
494
495 /**
496  * Parse the rule to see if it is a ethertype rule.
497  * And get the ethertype filter info BTW.
498  * pattern:
499  * The first not void item can be ETH.
500  * The next not void item must be END.
501  * action:
502  * The first not void action should be QUEUE.
503  * The next not void action should be END.
504  * pattern example:
505  * ITEM         Spec                    Mask
506  * ETH          type    0x0807          0xFFFF
507  * END
508  * other members in mask and spec should set to 0x00.
509  * item->last should be NULL.
510  */
511 static int
512 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
513                             const struct rte_flow_item *pattern,
514                             const struct rte_flow_action *actions,
515                             struct rte_eth_ethertype_filter *filter,
516                             struct rte_flow_error *error)
517 {
518         const struct rte_flow_item *item;
519         const struct rte_flow_action *act;
520         const struct rte_flow_item_eth *eth_spec;
521         const struct rte_flow_item_eth *eth_mask;
522         const struct rte_flow_action_queue *act_q;
523         uint32_t index;
524
525         if (!pattern) {
526                 rte_flow_error_set(error, EINVAL,
527                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
528                                 NULL, "NULL pattern.");
529                 return -rte_errno;
530         }
531
532         if (!actions) {
533                 rte_flow_error_set(error, EINVAL,
534                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
535                                 NULL, "NULL action.");
536                 return -rte_errno;
537         }
538
539         if (!attr) {
540                 rte_flow_error_set(error, EINVAL,
541                                    RTE_FLOW_ERROR_TYPE_ATTR,
542                                    NULL, "NULL attribute.");
543                 return -rte_errno;
544         }
545
546         /* Parse pattern */
547         index = 0;
548
549         /* The first non-void item should be MAC. */
550         item = pattern + index;
551         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
552                 index++;
553                 item = pattern + index;
554         }
555         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
556                 rte_flow_error_set(error, EINVAL,
557                         RTE_FLOW_ERROR_TYPE_ITEM,
558                         item, "Not supported by ethertype filter");
559                 return -rte_errno;
560         }
561
562         /*Not supported last point for range*/
563         if (item->last) {
564                 rte_flow_error_set(error, EINVAL,
565                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
566                         item, "Not supported last point for range");
567                 return -rte_errno;
568         }
569
570         /* Get the MAC info. */
571         if (!item->spec || !item->mask) {
572                 rte_flow_error_set(error, EINVAL,
573                                 RTE_FLOW_ERROR_TYPE_ITEM,
574                                 item, "Not supported by ethertype filter");
575                 return -rte_errno;
576         }
577
578         eth_spec = (const struct rte_flow_item_eth *)item->spec;
579         eth_mask = (const struct rte_flow_item_eth *)item->mask;
580
581         /* Mask bits of source MAC address must be full of 0.
582          * Mask bits of destination MAC address must be full
583          * of 1 or full of 0.
584          */
585         if (!is_zero_ether_addr(&eth_mask->src) ||
586             (!is_zero_ether_addr(&eth_mask->dst) &&
587              !is_broadcast_ether_addr(&eth_mask->dst))) {
588                 rte_flow_error_set(error, EINVAL,
589                                 RTE_FLOW_ERROR_TYPE_ITEM,
590                                 item, "Invalid ether address mask");
591                 return -rte_errno;
592         }
593
594         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
595                 rte_flow_error_set(error, EINVAL,
596                                 RTE_FLOW_ERROR_TYPE_ITEM,
597                                 item, "Invalid ethertype mask");
598                 return -rte_errno;
599         }
600
601         /* If mask bits of destination MAC address
602          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
603          */
604         if (is_broadcast_ether_addr(&eth_mask->dst)) {
605                 filter->mac_addr = eth_spec->dst;
606                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
607         } else {
608                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
609         }
610         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
611
612         /* Check if the next non-void item is END. */
613         index++;
614         item = pattern + index;
615         while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
616                 index++;
617                 item = pattern + index;
618         }
619         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
620                 rte_flow_error_set(error, EINVAL,
621                                 RTE_FLOW_ERROR_TYPE_ITEM,
622                                 item, "Not supported by ethertype filter.");
623                 return -rte_errno;
624         }
625
626         /* Parse action */
627
628         index = 0;
629         /* Check if the first non-void action is QUEUE or DROP. */
630         act = actions + index;
631         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
632                 index++;
633                 act = actions + index;
634         }
635         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
636             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
637                 rte_flow_error_set(error, EINVAL,
638                                 RTE_FLOW_ERROR_TYPE_ACTION,
639                                 act, "Not supported action.");
640                 return -rte_errno;
641         }
642
643         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
644                 act_q = (const struct rte_flow_action_queue *)act->conf;
645                 filter->queue = act_q->index;
646         } else {
647                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
648         }
649
650         /* Check if the next non-void item is END */
651         index++;
652         act = actions + index;
653         while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
654                 index++;
655                 act = actions + index;
656         }
657         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
658                 rte_flow_error_set(error, EINVAL,
659                                 RTE_FLOW_ERROR_TYPE_ACTION,
660                                 act, "Not supported action.");
661                 return -rte_errno;
662         }
663
664         /* Parse attr */
665         /* Must be input direction */
666         if (!attr->ingress) {
667                 rte_flow_error_set(error, EINVAL,
668                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
669                                 attr, "Only support ingress.");
670                 return -rte_errno;
671         }
672
673         /* Not supported */
674         if (attr->egress) {
675                 rte_flow_error_set(error, EINVAL,
676                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
677                                 attr, "Not support egress.");
678                 return -rte_errno;
679         }
680
681         /* Not supported */
682         if (attr->priority) {
683                 rte_flow_error_set(error, EINVAL,
684                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
685                                 attr, "Not support priority.");
686                 return -rte_errno;
687         }
688
689         /* Not supported */
690         if (attr->group) {
691                 rte_flow_error_set(error, EINVAL,
692                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
693                                 attr, "Not support group.");
694                 return -rte_errno;
695         }
696
697         return 0;
698 }
699
700 static int
701 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
702                                  const struct rte_flow_attr *attr,
703                              const struct rte_flow_item pattern[],
704                              const struct rte_flow_action actions[],
705                              struct rte_eth_ethertype_filter *filter,
706                              struct rte_flow_error *error)
707 {
708         int ret;
709         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
710
711         MAC_TYPE_FILTER_SUP(hw->mac.type);
712
713         ret = cons_parse_ethertype_filter(attr, pattern,
714                                         actions, filter, error);
715
716         if (ret)
717                 return ret;
718
719         /* Ixgbe doesn't support MAC address. */
720         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
721                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
722                 rte_flow_error_set(error, EINVAL,
723                         RTE_FLOW_ERROR_TYPE_ITEM,
724                         NULL, "Not supported by ethertype filter");
725                 return -rte_errno;
726         }
727
728         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
729                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
730                 rte_flow_error_set(error, EINVAL,
731                         RTE_FLOW_ERROR_TYPE_ITEM,
732                         NULL, "queue index much too big");
733                 return -rte_errno;
734         }
735
736         if (filter->ether_type == ETHER_TYPE_IPv4 ||
737                 filter->ether_type == ETHER_TYPE_IPv6) {
738                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
739                 rte_flow_error_set(error, EINVAL,
740                         RTE_FLOW_ERROR_TYPE_ITEM,
741                         NULL, "IPv4/IPv6 not supported by ethertype filter");
742                 return -rte_errno;
743         }
744
745         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
746                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
747                 rte_flow_error_set(error, EINVAL,
748                         RTE_FLOW_ERROR_TYPE_ITEM,
749                         NULL, "mac compare is unsupported");
750                 return -rte_errno;
751         }
752
753         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
754                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
755                 rte_flow_error_set(error, EINVAL,
756                         RTE_FLOW_ERROR_TYPE_ITEM,
757                         NULL, "drop option is unsupported");
758                 return -rte_errno;
759         }
760
761         return 0;
762 }
763
764 /**
765  * Parse the rule to see if it is a TCP SYN rule.
766  * And get the TCP SYN filter info BTW.
767  * pattern:
768  * The first not void item must be ETH.
769  * The second not void item must be IPV4 or IPV6.
770  * The third not void item must be TCP.
771  * The next not void item must be END.
772  * action:
773  * The first not void action should be QUEUE.
774  * The next not void action should be END.
775  * pattern example:
776  * ITEM         Spec                    Mask
777  * ETH          NULL                    NULL
778  * IPV4/IPV6    NULL                    NULL
779  * TCP          tcp_flags       0x02    0xFF
780  * END
781  * other members in mask and spec should set to 0x00.
782  * item->last should be NULL.
783  */
784 static int
785 cons_parse_syn_filter(const struct rte_flow_attr *attr,
786                                 const struct rte_flow_item pattern[],
787                                 const struct rte_flow_action actions[],
788                                 struct rte_eth_syn_filter *filter,
789                                 struct rte_flow_error *error)
790 {
791         const struct rte_flow_item *item;
792         const struct rte_flow_action *act;
793         const struct rte_flow_item_tcp *tcp_spec;
794         const struct rte_flow_item_tcp *tcp_mask;
795         const struct rte_flow_action_queue *act_q;
796         uint32_t index;
797
798         if (!pattern) {
799                 rte_flow_error_set(error, EINVAL,
800                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
801                                 NULL, "NULL pattern.");
802                 return -rte_errno;
803         }
804
805         if (!actions) {
806                 rte_flow_error_set(error, EINVAL,
807                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
808                                 NULL, "NULL action.");
809                 return -rte_errno;
810         }
811
812         if (!attr) {
813                 rte_flow_error_set(error, EINVAL,
814                                    RTE_FLOW_ERROR_TYPE_ATTR,
815                                    NULL, "NULL attribute.");
816                 return -rte_errno;
817         }
818
819         /* parse pattern */
820         index = 0;
821
822         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
823         NEXT_ITEM_OF_PATTERN(item, pattern, index);
824         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
825             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
826             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
827             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
828                 rte_flow_error_set(error, EINVAL,
829                                 RTE_FLOW_ERROR_TYPE_ITEM,
830                                 item, "Not supported by syn filter");
831                 return -rte_errno;
832         }
833                 /*Not supported last point for range*/
834         if (item->last) {
835                 rte_flow_error_set(error, EINVAL,
836                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
837                         item, "Not supported last point for range");
838                 return -rte_errno;
839         }
840
841         /* Skip Ethernet */
842         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
843                 /* if the item is MAC, the content should be NULL */
844                 if (item->spec || item->mask) {
845                         rte_flow_error_set(error, EINVAL,
846                                 RTE_FLOW_ERROR_TYPE_ITEM,
847                                 item, "Invalid SYN address mask");
848                         return -rte_errno;
849                 }
850
851                 /* check if the next not void item is IPv4 or IPv6 */
852                 index++;
853                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
854                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
855                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
856                         rte_flow_error_set(error, EINVAL,
857                                 RTE_FLOW_ERROR_TYPE_ITEM,
858                                 item, "Not supported by syn filter");
859                         return -rte_errno;
860                 }
861         }
862
863         /* Skip IP */
864         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
865             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
866                 /* if the item is IP, the content should be NULL */
867                 if (item->spec || item->mask) {
868                         rte_flow_error_set(error, EINVAL,
869                                 RTE_FLOW_ERROR_TYPE_ITEM,
870                                 item, "Invalid SYN mask");
871                         return -rte_errno;
872                 }
873
874                 /* check if the next not void item is TCP */
875                 index++;
876                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
877                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
878                         rte_flow_error_set(error, EINVAL,
879                                 RTE_FLOW_ERROR_TYPE_ITEM,
880                                 item, "Not supported by syn filter");
881                         return -rte_errno;
882                 }
883         }
884
885         /* Get the TCP info. Only support SYN. */
886         if (!item->spec || !item->mask) {
887                 rte_flow_error_set(error, EINVAL,
888                                 RTE_FLOW_ERROR_TYPE_ITEM,
889                                 item, "Invalid SYN mask");
890                 return -rte_errno;
891         }
892         /*Not supported last point for range*/
893         if (item->last) {
894                 rte_flow_error_set(error, EINVAL,
895                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
896                         item, "Not supported last point for range");
897                 return -rte_errno;
898         }
899
900         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
901         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
902         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
903             tcp_mask->hdr.src_port ||
904             tcp_mask->hdr.dst_port ||
905             tcp_mask->hdr.sent_seq ||
906             tcp_mask->hdr.recv_ack ||
907             tcp_mask->hdr.data_off ||
908             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
909             tcp_mask->hdr.rx_win ||
910             tcp_mask->hdr.cksum ||
911             tcp_mask->hdr.tcp_urp) {
912                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
913                 rte_flow_error_set(error, EINVAL,
914                                 RTE_FLOW_ERROR_TYPE_ITEM,
915                                 item, "Not supported by syn filter");
916                 return -rte_errno;
917         }
918
919         /* check if the next not void item is END */
920         index++;
921         NEXT_ITEM_OF_PATTERN(item, pattern, index);
922         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
923                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
924                 rte_flow_error_set(error, EINVAL,
925                                 RTE_FLOW_ERROR_TYPE_ITEM,
926                                 item, "Not supported by syn filter");
927                 return -rte_errno;
928         }
929
930         /* parse action */
931         index = 0;
932
933         /* check if the first not void action is QUEUE. */
934         NEXT_ITEM_OF_ACTION(act, actions, index);
935         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
936                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
937                 rte_flow_error_set(error, EINVAL,
938                                 RTE_FLOW_ERROR_TYPE_ACTION,
939                                 act, "Not supported action.");
940                 return -rte_errno;
941         }
942
943         act_q = (const struct rte_flow_action_queue *)act->conf;
944         filter->queue = act_q->index;
945         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
946                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
947                 rte_flow_error_set(error, EINVAL,
948                                 RTE_FLOW_ERROR_TYPE_ACTION,
949                                 act, "Not supported action.");
950                 return -rte_errno;
951         }
952
953         /* check if the next not void item is END */
954         index++;
955         NEXT_ITEM_OF_ACTION(act, actions, index);
956         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
957                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
958                 rte_flow_error_set(error, EINVAL,
959                                 RTE_FLOW_ERROR_TYPE_ACTION,
960                                 act, "Not supported action.");
961                 return -rte_errno;
962         }
963
964         /* parse attr */
965         /* must be input direction */
966         if (!attr->ingress) {
967                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
968                 rte_flow_error_set(error, EINVAL,
969                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
970                         attr, "Only support ingress.");
971                 return -rte_errno;
972         }
973
974         /* not supported */
975         if (attr->egress) {
976                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
977                 rte_flow_error_set(error, EINVAL,
978                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
979                         attr, "Not support egress.");
980                 return -rte_errno;
981         }
982
983         /* Support 2 priorities, the lowest or highest. */
984         if (!attr->priority) {
985                 filter->hig_pri = 0;
986         } else if (attr->priority == (uint32_t)~0U) {
987                 filter->hig_pri = 1;
988         } else {
989                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
990                 rte_flow_error_set(error, EINVAL,
991                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
992                         attr, "Not support priority.");
993                 return -rte_errno;
994         }
995
996         return 0;
997 }
998
999 static int
1000 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1001                                  const struct rte_flow_attr *attr,
1002                              const struct rte_flow_item pattern[],
1003                              const struct rte_flow_action actions[],
1004                              struct rte_eth_syn_filter *filter,
1005                              struct rte_flow_error *error)
1006 {
1007         int ret;
1008         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1009
1010         MAC_TYPE_FILTER_SUP(hw->mac.type);
1011
1012         ret = cons_parse_syn_filter(attr, pattern,
1013                                         actions, filter, error);
1014
1015         if (ret)
1016                 return ret;
1017
1018         return 0;
1019 }
1020
1021 /**
1022  * Parse the rule to see if it is a L2 tunnel rule.
1023  * And get the L2 tunnel filter info BTW.
1024  * Only support E-tag now.
1025  * pattern:
1026  * The first not void item can be E_TAG.
1027  * The next not void item must be END.
1028  * action:
1029  * The first not void action should be QUEUE.
1030  * The next not void action should be END.
1031  * pattern example:
1032  * ITEM         Spec                    Mask
1033  * E_TAG        grp             0x1     0x3
1034                 e_cid_base      0x309   0xFFF
1035  * END
1036  * other members in mask and spec should set to 0x00.
1037  * item->last should be NULL.
1038  */
1039 static int
1040 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1041                         const struct rte_flow_item pattern[],
1042                         const struct rte_flow_action actions[],
1043                         struct rte_eth_l2_tunnel_conf *filter,
1044                         struct rte_flow_error *error)
1045 {
1046         const struct rte_flow_item *item;
1047         const struct rte_flow_item_e_tag *e_tag_spec;
1048         const struct rte_flow_item_e_tag *e_tag_mask;
1049         const struct rte_flow_action *act;
1050         const struct rte_flow_action_queue *act_q;
1051         uint32_t index;
1052
1053         if (!pattern) {
1054                 rte_flow_error_set(error, EINVAL,
1055                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1056                         NULL, "NULL pattern.");
1057                 return -rte_errno;
1058         }
1059
1060         if (!actions) {
1061                 rte_flow_error_set(error, EINVAL,
1062                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1063                                    NULL, "NULL action.");
1064                 return -rte_errno;
1065         }
1066
1067         if (!attr) {
1068                 rte_flow_error_set(error, EINVAL,
1069                                    RTE_FLOW_ERROR_TYPE_ATTR,
1070                                    NULL, "NULL attribute.");
1071                 return -rte_errno;
1072         }
1073         /* parse pattern */
1074         index = 0;
1075
1076         /* The first not void item should be e-tag. */
1077         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1078         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1079                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1080                 rte_flow_error_set(error, EINVAL,
1081                         RTE_FLOW_ERROR_TYPE_ITEM,
1082                         item, "Not supported by L2 tunnel filter");
1083                 return -rte_errno;
1084         }
1085
1086         if (!item->spec || !item->mask) {
1087                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1088                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1089                         item, "Not supported by L2 tunnel filter");
1090                 return -rte_errno;
1091         }
1092
1093         /*Not supported last point for range*/
1094         if (item->last) {
1095                 rte_flow_error_set(error, EINVAL,
1096                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1097                         item, "Not supported last point for range");
1098                 return -rte_errno;
1099         }
1100
1101         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1102         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1103
1104         /* Only care about GRP and E cid base. */
1105         if (e_tag_mask->epcp_edei_in_ecid_b ||
1106             e_tag_mask->in_ecid_e ||
1107             e_tag_mask->ecid_e ||
1108             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1109                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1110                 rte_flow_error_set(error, EINVAL,
1111                         RTE_FLOW_ERROR_TYPE_ITEM,
1112                         item, "Not supported by L2 tunnel filter");
1113                 return -rte_errno;
1114         }
1115
1116         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1117         /**
1118          * grp and e_cid_base are bit fields and only use 14 bits.
1119          * e-tag id is taken as little endian by HW.
1120          */
1121         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1122
1123         /* check if the next not void item is END */
1124         index++;
1125         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1126         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1127                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1128                 rte_flow_error_set(error, EINVAL,
1129                         RTE_FLOW_ERROR_TYPE_ITEM,
1130                         item, "Not supported by L2 tunnel filter");
1131                 return -rte_errno;
1132         }
1133
1134         /* parse attr */
1135         /* must be input direction */
1136         if (!attr->ingress) {
1137                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1138                 rte_flow_error_set(error, EINVAL,
1139                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1140                         attr, "Only support ingress.");
1141                 return -rte_errno;
1142         }
1143
1144         /* not supported */
1145         if (attr->egress) {
1146                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147                 rte_flow_error_set(error, EINVAL,
1148                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1149                         attr, "Not support egress.");
1150                 return -rte_errno;
1151         }
1152
1153         /* not supported */
1154         if (attr->priority) {
1155                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1156                 rte_flow_error_set(error, EINVAL,
1157                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1158                         attr, "Not support priority.");
1159                 return -rte_errno;
1160         }
1161
1162         /* parse action */
1163         index = 0;
1164
1165         /* check if the first not void action is QUEUE. */
1166         NEXT_ITEM_OF_ACTION(act, actions, index);
1167         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1168                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1169                 rte_flow_error_set(error, EINVAL,
1170                         RTE_FLOW_ERROR_TYPE_ACTION,
1171                         act, "Not supported action.");
1172                 return -rte_errno;
1173         }
1174
1175         act_q = (const struct rte_flow_action_queue *)act->conf;
1176         filter->pool = act_q->index;
1177
1178         /* check if the next not void item is END */
1179         index++;
1180         NEXT_ITEM_OF_ACTION(act, actions, index);
1181         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1182                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1183                 rte_flow_error_set(error, EINVAL,
1184                         RTE_FLOW_ERROR_TYPE_ACTION,
1185                         act, "Not supported action.");
1186                 return -rte_errno;
1187         }
1188
1189         return 0;
1190 }
1191
1192 static int
1193 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1194                         const struct rte_flow_attr *attr,
1195                         const struct rte_flow_item pattern[],
1196                         const struct rte_flow_action actions[],
1197                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1198                         struct rte_flow_error *error)
1199 {
1200         int ret = 0;
1201         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1202
1203         ret = cons_parse_l2_tn_filter(attr, pattern,
1204                                 actions, l2_tn_filter, error);
1205
1206         if (hw->mac.type != ixgbe_mac_X550 &&
1207                 hw->mac.type != ixgbe_mac_X550EM_x &&
1208                 hw->mac.type != ixgbe_mac_X550EM_a) {
1209                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1210                 rte_flow_error_set(error, EINVAL,
1211                         RTE_FLOW_ERROR_TYPE_ITEM,
1212                         NULL, "Not supported by L2 tunnel filter");
1213                 return -rte_errno;
1214         }
1215
1216         return ret;
1217 }
1218
1219 /* Parse to get the attr and action info of flow director rule. */
1220 static int
1221 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1222                           const struct rte_flow_action actions[],
1223                           struct ixgbe_fdir_rule *rule,
1224                           struct rte_flow_error *error)
1225 {
1226         const struct rte_flow_action *act;
1227         const struct rte_flow_action_queue *act_q;
1228         const struct rte_flow_action_mark *mark;
1229         uint32_t index;
1230
1231         /* parse attr */
1232         /* must be input direction */
1233         if (!attr->ingress) {
1234                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1235                 rte_flow_error_set(error, EINVAL,
1236                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1237                         attr, "Only support ingress.");
1238                 return -rte_errno;
1239         }
1240
1241         /* not supported */
1242         if (attr->egress) {
1243                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1244                 rte_flow_error_set(error, EINVAL,
1245                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1246                         attr, "Not support egress.");
1247                 return -rte_errno;
1248         }
1249
1250         /* not supported */
1251         if (attr->priority) {
1252                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1253                 rte_flow_error_set(error, EINVAL,
1254                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1255                         attr, "Not support priority.");
1256                 return -rte_errno;
1257         }
1258
1259         /* parse action */
1260         index = 0;
1261
1262         /* check if the first not void action is QUEUE or DROP. */
1263         NEXT_ITEM_OF_ACTION(act, actions, index);
1264         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1265             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1266                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1267                 rte_flow_error_set(error, EINVAL,
1268                         RTE_FLOW_ERROR_TYPE_ACTION,
1269                         act, "Not supported action.");
1270                 return -rte_errno;
1271         }
1272
1273         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1274                 act_q = (const struct rte_flow_action_queue *)act->conf;
1275                 rule->queue = act_q->index;
1276         } else { /* drop */
1277                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1278         }
1279
1280         /* check if the next not void item is MARK */
1281         index++;
1282         NEXT_ITEM_OF_ACTION(act, actions, index);
1283         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1284                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1285                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1286                 rte_flow_error_set(error, EINVAL,
1287                         RTE_FLOW_ERROR_TYPE_ACTION,
1288                         act, "Not supported action.");
1289                 return -rte_errno;
1290         }
1291
1292         rule->soft_id = 0;
1293
1294         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1295                 mark = (const struct rte_flow_action_mark *)act->conf;
1296                 rule->soft_id = mark->id;
1297                 index++;
1298                 NEXT_ITEM_OF_ACTION(act, actions, index);
1299         }
1300
1301         /* check if the next not void item is END */
1302         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1303                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1304                 rte_flow_error_set(error, EINVAL,
1305                         RTE_FLOW_ERROR_TYPE_ACTION,
1306                         act, "Not supported action.");
1307                 return -rte_errno;
1308         }
1309
1310         return 0;
1311 }
1312
1313 /**
1314  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1315  * And get the flow director filter info BTW.
1316  * UDP/TCP/SCTP PATTERN:
1317  * The first not void item can be ETH or IPV4.
1318  * The second not void item must be IPV4 if the first one is ETH.
1319  * The third not void item must be UDP or TCP or SCTP.
1320  * The next not void item must be END.
1321  * MAC VLAN PATTERN:
1322  * The first not void item must be ETH.
1323  * The second not void item must be MAC VLAN.
1324  * The next not void item must be END.
1325  * ACTION:
1326  * The first not void action should be QUEUE or DROP.
1327  * The second not void optional action should be MARK,
1328  * mark_id is a uint32_t number.
1329  * The next not void action should be END.
1330  * UDP/TCP/SCTP pattern example:
1331  * ITEM         Spec                    Mask
1332  * ETH          NULL                    NULL
1333  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1334  *              dst_addr 192.167.3.50   0xFFFFFFFF
1335  * UDP/TCP/SCTP src_port        80      0xFFFF
1336  *              dst_port        80      0xFFFF
1337  * END
1338  * MAC VLAN pattern example:
1339  * ITEM         Spec                    Mask
1340  * ETH          dst_addr
1341                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1342                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1343  * MAC VLAN     tci     0x2016          0xEFFF
1344  * END
1345  * Other members in mask and spec should set to 0x00.
1346  * Item->last should be NULL.
1347  */
1348 static int
1349 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1350                                const struct rte_flow_item pattern[],
1351                                const struct rte_flow_action actions[],
1352                                struct ixgbe_fdir_rule *rule,
1353                                struct rte_flow_error *error)
1354 {
1355         const struct rte_flow_item *item;
1356         const struct rte_flow_item_eth *eth_spec;
1357         const struct rte_flow_item_eth *eth_mask;
1358         const struct rte_flow_item_ipv4 *ipv4_spec;
1359         const struct rte_flow_item_ipv4 *ipv4_mask;
1360         const struct rte_flow_item_tcp *tcp_spec;
1361         const struct rte_flow_item_tcp *tcp_mask;
1362         const struct rte_flow_item_udp *udp_spec;
1363         const struct rte_flow_item_udp *udp_mask;
1364         const struct rte_flow_item_sctp *sctp_spec;
1365         const struct rte_flow_item_sctp *sctp_mask;
1366         const struct rte_flow_item_vlan *vlan_spec;
1367         const struct rte_flow_item_vlan *vlan_mask;
1368
1369         uint32_t index, j;
1370
1371         if (!pattern) {
1372                 rte_flow_error_set(error, EINVAL,
1373                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1374                         NULL, "NULL pattern.");
1375                 return -rte_errno;
1376         }
1377
1378         if (!actions) {
1379                 rte_flow_error_set(error, EINVAL,
1380                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1381                                    NULL, "NULL action.");
1382                 return -rte_errno;
1383         }
1384
1385         if (!attr) {
1386                 rte_flow_error_set(error, EINVAL,
1387                                    RTE_FLOW_ERROR_TYPE_ATTR,
1388                                    NULL, "NULL attribute.");
1389                 return -rte_errno;
1390         }
1391
1392         /**
1393          * Some fields may not be provided. Set spec to 0 and mask to default
1394          * value. So, we need not do anything for the not provided fields later.
1395          */
1396         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1397         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1398         rule->mask.vlan_tci_mask = 0;
1399
1400         /* parse pattern */
1401         index = 0;
1402
1403         /**
1404          * The first not void item should be
1405          * MAC or IPv4 or TCP or UDP or SCTP.
1406          */
1407         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1408         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1409             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1410             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1411             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1412             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1413                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1414                 rte_flow_error_set(error, EINVAL,
1415                         RTE_FLOW_ERROR_TYPE_ITEM,
1416                         item, "Not supported by fdir filter");
1417                 return -rte_errno;
1418         }
1419
1420         rule->mode = RTE_FDIR_MODE_PERFECT;
1421
1422         /*Not supported last point for range*/
1423         if (item->last) {
1424                 rte_flow_error_set(error, EINVAL,
1425                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1426                         item, "Not supported last point for range");
1427                 return -rte_errno;
1428         }
1429
1430         /* Get the MAC info. */
1431         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1432                 /**
1433                  * Only support vlan and dst MAC address,
1434                  * others should be masked.
1435                  */
1436                 if (item->spec && !item->mask) {
1437                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1438                         rte_flow_error_set(error, EINVAL,
1439                                 RTE_FLOW_ERROR_TYPE_ITEM,
1440                                 item, "Not supported by fdir filter");
1441                         return -rte_errno;
1442                 }
1443
1444                 if (item->spec) {
1445                         rule->b_spec = TRUE;
1446                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1447
1448                         /* Get the dst MAC. */
1449                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1450                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1451                                         eth_spec->dst.addr_bytes[j];
1452                         }
1453                 }
1454
1455
1456                 if (item->mask) {
1457                         /* If ethernet has meaning, it means MAC VLAN mode. */
1458                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1459
1460                         rule->b_mask = TRUE;
1461                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1462
1463                         /* Ether type should be masked. */
1464                         if (eth_mask->type) {
1465                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1466                                 rte_flow_error_set(error, EINVAL,
1467                                         RTE_FLOW_ERROR_TYPE_ITEM,
1468                                         item, "Not supported by fdir filter");
1469                                 return -rte_errno;
1470                         }
1471
1472                         /**
1473                          * src MAC address must be masked,
1474                          * and don't support dst MAC address mask.
1475                          */
1476                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1477                                 if (eth_mask->src.addr_bytes[j] ||
1478                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1479                                         memset(rule, 0,
1480                                         sizeof(struct ixgbe_fdir_rule));
1481                                         rte_flow_error_set(error, EINVAL,
1482                                         RTE_FLOW_ERROR_TYPE_ITEM,
1483                                         item, "Not supported by fdir filter");
1484                                         return -rte_errno;
1485                                 }
1486                         }
1487
1488                         /* When no VLAN, considered as full mask. */
1489                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1490                 }
1491                 /*** If both spec and mask are item,
1492                  * it means don't care about ETH.
1493                  * Do nothing.
1494                  */
1495
1496                 /**
1497                  * Check if the next not void item is vlan or ipv4.
1498                  * IPv6 is not supported.
1499                  */
1500                 index++;
1501                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1502                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1503                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1504                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1505                                 rte_flow_error_set(error, EINVAL,
1506                                         RTE_FLOW_ERROR_TYPE_ITEM,
1507                                         item, "Not supported by fdir filter");
1508                                 return -rte_errno;
1509                         }
1510                 } else {
1511                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1512                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1513                                 rte_flow_error_set(error, EINVAL,
1514                                         RTE_FLOW_ERROR_TYPE_ITEM,
1515                                         item, "Not supported by fdir filter");
1516                                 return -rte_errno;
1517                         }
1518                 }
1519         }
1520
1521         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1522                 if (!(item->spec && item->mask)) {
1523                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1524                         rte_flow_error_set(error, EINVAL,
1525                                 RTE_FLOW_ERROR_TYPE_ITEM,
1526                                 item, "Not supported by fdir filter");
1527                         return -rte_errno;
1528                 }
1529
1530                 /*Not supported last point for range*/
1531                 if (item->last) {
1532                         rte_flow_error_set(error, EINVAL,
1533                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1534                                 item, "Not supported last point for range");
1535                         return -rte_errno;
1536                 }
1537
1538                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1539                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1540
1541                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1542
1543                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1544                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1545                 /* More than one tags are not supported. */
1546
1547                 /**
1548                  * Check if the next not void item is not vlan.
1549                  */
1550                 index++;
1551                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1552                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1553                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1554                         rte_flow_error_set(error, EINVAL,
1555                                 RTE_FLOW_ERROR_TYPE_ITEM,
1556                                 item, "Not supported by fdir filter");
1557                         return -rte_errno;
1558                 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1559                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1560                         rte_flow_error_set(error, EINVAL,
1561                                 RTE_FLOW_ERROR_TYPE_ITEM,
1562                                 item, "Not supported by fdir filter");
1563                         return -rte_errno;
1564                 }
1565         }
1566
1567         /* Get the IP info. */
1568         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1569                 /**
1570                  * Set the flow type even if there's no content
1571                  * as we must have a flow type.
1572                  */
1573                 rule->ixgbe_fdir.formatted.flow_type =
1574                         IXGBE_ATR_FLOW_TYPE_IPV4;
1575                 /*Not supported last point for range*/
1576                 if (item->last) {
1577                         rte_flow_error_set(error, EINVAL,
1578                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1579                                 item, "Not supported last point for range");
1580                         return -rte_errno;
1581                 }
1582                 /**
1583                  * Only care about src & dst addresses,
1584                  * others should be masked.
1585                  */
1586                 if (!item->mask) {
1587                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1588                         rte_flow_error_set(error, EINVAL,
1589                                 RTE_FLOW_ERROR_TYPE_ITEM,
1590                                 item, "Not supported by fdir filter");
1591                         return -rte_errno;
1592                 }
1593                 rule->b_mask = TRUE;
1594                 ipv4_mask =
1595                         (const struct rte_flow_item_ipv4 *)item->mask;
1596                 if (ipv4_mask->hdr.version_ihl ||
1597                     ipv4_mask->hdr.type_of_service ||
1598                     ipv4_mask->hdr.total_length ||
1599                     ipv4_mask->hdr.packet_id ||
1600                     ipv4_mask->hdr.fragment_offset ||
1601                     ipv4_mask->hdr.time_to_live ||
1602                     ipv4_mask->hdr.next_proto_id ||
1603                     ipv4_mask->hdr.hdr_checksum) {
1604                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1605                         rte_flow_error_set(error, EINVAL,
1606                                 RTE_FLOW_ERROR_TYPE_ITEM,
1607                                 item, "Not supported by fdir filter");
1608                         return -rte_errno;
1609                 }
1610                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1611                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1612
1613                 if (item->spec) {
1614                         rule->b_spec = TRUE;
1615                         ipv4_spec =
1616                                 (const struct rte_flow_item_ipv4 *)item->spec;
1617                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1618                                 ipv4_spec->hdr.dst_addr;
1619                         rule->ixgbe_fdir.formatted.src_ip[0] =
1620                                 ipv4_spec->hdr.src_addr;
1621                 }
1622
1623                 /**
1624                  * Check if the next not void item is
1625                  * TCP or UDP or SCTP or END.
1626                  */
1627                 index++;
1628                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1629                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1630                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1631                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1632                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1633                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1634                         rte_flow_error_set(error, EINVAL,
1635                                 RTE_FLOW_ERROR_TYPE_ITEM,
1636                                 item, "Not supported by fdir filter");
1637                         return -rte_errno;
1638                 }
1639         }
1640
1641         /* Get the TCP info. */
1642         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1643                 /**
1644                  * Set the flow type even if there's no content
1645                  * as we must have a flow type.
1646                  */
1647                 rule->ixgbe_fdir.formatted.flow_type =
1648                         IXGBE_ATR_FLOW_TYPE_TCPV4;
1649                 /*Not supported last point for range*/
1650                 if (item->last) {
1651                         rte_flow_error_set(error, EINVAL,
1652                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1653                                 item, "Not supported last point for range");
1654                         return -rte_errno;
1655                 }
1656                 /**
1657                  * Only care about src & dst ports,
1658                  * others should be masked.
1659                  */
1660                 if (!item->mask) {
1661                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1662                         rte_flow_error_set(error, EINVAL,
1663                                 RTE_FLOW_ERROR_TYPE_ITEM,
1664                                 item, "Not supported by fdir filter");
1665                         return -rte_errno;
1666                 }
1667                 rule->b_mask = TRUE;
1668                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1669                 if (tcp_mask->hdr.sent_seq ||
1670                     tcp_mask->hdr.recv_ack ||
1671                     tcp_mask->hdr.data_off ||
1672                     tcp_mask->hdr.tcp_flags ||
1673                     tcp_mask->hdr.rx_win ||
1674                     tcp_mask->hdr.cksum ||
1675                     tcp_mask->hdr.tcp_urp) {
1676                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1677                         rte_flow_error_set(error, EINVAL,
1678                                 RTE_FLOW_ERROR_TYPE_ITEM,
1679                                 item, "Not supported by fdir filter");
1680                         return -rte_errno;
1681                 }
1682                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1683                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1684
1685                 if (item->spec) {
1686                         rule->b_spec = TRUE;
1687                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1688                         rule->ixgbe_fdir.formatted.src_port =
1689                                 tcp_spec->hdr.src_port;
1690                         rule->ixgbe_fdir.formatted.dst_port =
1691                                 tcp_spec->hdr.dst_port;
1692                 }
1693         }
1694
1695         /* Get the UDP info */
1696         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1697                 /**
1698                  * Set the flow type even if there's no content
1699                  * as we must have a flow type.
1700                  */
1701                 rule->ixgbe_fdir.formatted.flow_type =
1702                         IXGBE_ATR_FLOW_TYPE_UDPV4;
1703                 /*Not supported last point for range*/
1704                 if (item->last) {
1705                         rte_flow_error_set(error, EINVAL,
1706                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1707                                 item, "Not supported last point for range");
1708                         return -rte_errno;
1709                 }
1710                 /**
1711                  * Only care about src & dst ports,
1712                  * others should be masked.
1713                  */
1714                 if (!item->mask) {
1715                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1716                         rte_flow_error_set(error, EINVAL,
1717                                 RTE_FLOW_ERROR_TYPE_ITEM,
1718                                 item, "Not supported by fdir filter");
1719                         return -rte_errno;
1720                 }
1721                 rule->b_mask = TRUE;
1722                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1723                 if (udp_mask->hdr.dgram_len ||
1724                     udp_mask->hdr.dgram_cksum) {
1725                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1726                         rte_flow_error_set(error, EINVAL,
1727                                 RTE_FLOW_ERROR_TYPE_ITEM,
1728                                 item, "Not supported by fdir filter");
1729                         return -rte_errno;
1730                 }
1731                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1732                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1733
1734                 if (item->spec) {
1735                         rule->b_spec = TRUE;
1736                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
1737                         rule->ixgbe_fdir.formatted.src_port =
1738                                 udp_spec->hdr.src_port;
1739                         rule->ixgbe_fdir.formatted.dst_port =
1740                                 udp_spec->hdr.dst_port;
1741                 }
1742         }
1743
1744         /* Get the SCTP info */
1745         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1746                 /**
1747                  * Set the flow type even if there's no content
1748                  * as we must have a flow type.
1749                  */
1750                 rule->ixgbe_fdir.formatted.flow_type =
1751                         IXGBE_ATR_FLOW_TYPE_SCTPV4;
1752                 /*Not supported last point for range*/
1753                 if (item->last) {
1754                         rte_flow_error_set(error, EINVAL,
1755                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1756                                 item, "Not supported last point for range");
1757                         return -rte_errno;
1758                 }
1759                 /**
1760                  * Only care about src & dst ports,
1761                  * others should be masked.
1762                  */
1763                 if (!item->mask) {
1764                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1765                         rte_flow_error_set(error, EINVAL,
1766                                 RTE_FLOW_ERROR_TYPE_ITEM,
1767                                 item, "Not supported by fdir filter");
1768                         return -rte_errno;
1769                 }
1770                 rule->b_mask = TRUE;
1771                 sctp_mask =
1772                         (const struct rte_flow_item_sctp *)item->mask;
1773                 if (sctp_mask->hdr.tag ||
1774                     sctp_mask->hdr.cksum) {
1775                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1776                         rte_flow_error_set(error, EINVAL,
1777                                 RTE_FLOW_ERROR_TYPE_ITEM,
1778                                 item, "Not supported by fdir filter");
1779                         return -rte_errno;
1780                 }
1781                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1782                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1783
1784                 if (item->spec) {
1785                         rule->b_spec = TRUE;
1786                         sctp_spec =
1787                                 (const struct rte_flow_item_sctp *)item->spec;
1788                         rule->ixgbe_fdir.formatted.src_port =
1789                                 sctp_spec->hdr.src_port;
1790                         rule->ixgbe_fdir.formatted.dst_port =
1791                                 sctp_spec->hdr.dst_port;
1792                 }
1793         }
1794
1795         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1796                 /* check if the next not void item is END */
1797                 index++;
1798                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1799                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1800                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1801                         rte_flow_error_set(error, EINVAL,
1802                                 RTE_FLOW_ERROR_TYPE_ITEM,
1803                                 item, "Not supported by fdir filter");
1804                         return -rte_errno;
1805                 }
1806         }
1807
1808         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1809 }
1810
1811 #define NVGRE_PROTOCOL 0x6558
1812
1813 /**
1814  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1815  * And get the flow director filter info BTW.
1816  * VxLAN PATTERN:
1817  * The first not void item must be ETH.
1818  * The second not void item must be IPV4/ IPV6.
1819  * The third not void item must be NVGRE.
1820  * The next not void item must be END.
1821  * NVGRE PATTERN:
1822  * The first not void item must be ETH.
1823  * The second not void item must be IPV4/ IPV6.
1824  * The third not void item must be NVGRE.
1825  * The next not void item must be END.
1826  * ACTION:
1827  * The first not void action should be QUEUE or DROP.
1828  * The second not void optional action should be MARK,
1829  * mark_id is a uint32_t number.
1830  * The next not void action should be END.
1831  * VxLAN pattern example:
1832  * ITEM         Spec                    Mask
1833  * ETH          NULL                    NULL
1834  * IPV4/IPV6    NULL                    NULL
1835  * UDP          NULL                    NULL
1836  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1837  * MAC VLAN     tci     0x2016          0xEFFF
1838  * END
1839  * NEGRV pattern example:
1840  * ITEM         Spec                    Mask
1841  * ETH          NULL                    NULL
1842  * IPV4/IPV6    NULL                    NULL
1843  * NVGRE        protocol        0x6558  0xFFFF
1844  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
1845  * MAC VLAN     tci     0x2016          0xEFFF
1846  * END
1847  * other members in mask and spec should set to 0x00.
1848  * item->last should be NULL.
1849  */
1850 static int
1851 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1852                                const struct rte_flow_item pattern[],
1853                                const struct rte_flow_action actions[],
1854                                struct ixgbe_fdir_rule *rule,
1855                                struct rte_flow_error *error)
1856 {
1857         const struct rte_flow_item *item;
1858         const struct rte_flow_item_vxlan *vxlan_spec;
1859         const struct rte_flow_item_vxlan *vxlan_mask;
1860         const struct rte_flow_item_nvgre *nvgre_spec;
1861         const struct rte_flow_item_nvgre *nvgre_mask;
1862         const struct rte_flow_item_eth *eth_spec;
1863         const struct rte_flow_item_eth *eth_mask;
1864         const struct rte_flow_item_vlan *vlan_spec;
1865         const struct rte_flow_item_vlan *vlan_mask;
1866         uint32_t index, j;
1867
1868         if (!pattern) {
1869                 rte_flow_error_set(error, EINVAL,
1870                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1871                                    NULL, "NULL pattern.");
1872                 return -rte_errno;
1873         }
1874
1875         if (!actions) {
1876                 rte_flow_error_set(error, EINVAL,
1877                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1878                                    NULL, "NULL action.");
1879                 return -rte_errno;
1880         }
1881
1882         if (!attr) {
1883                 rte_flow_error_set(error, EINVAL,
1884                                    RTE_FLOW_ERROR_TYPE_ATTR,
1885                                    NULL, "NULL attribute.");
1886                 return -rte_errno;
1887         }
1888
1889         /**
1890          * Some fields may not be provided. Set spec to 0 and mask to default
1891          * value. So, we need not do anything for the not provided fields later.
1892          */
1893         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1894         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1895         rule->mask.vlan_tci_mask = 0;
1896
1897         /* parse pattern */
1898         index = 0;
1899
1900         /**
1901          * The first not void item should be
1902          * MAC or IPv4 or IPv6 or UDP or VxLAN.
1903          */
1904         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1905         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1906             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1907             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1908             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1909             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1910             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1911                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1912                 rte_flow_error_set(error, EINVAL,
1913                         RTE_FLOW_ERROR_TYPE_ITEM,
1914                         item, "Not supported by fdir filter");
1915                 return -rte_errno;
1916         }
1917
1918         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1919
1920         /* Skip MAC. */
1921         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1922                 /* Only used to describe the protocol stack. */
1923                 if (item->spec || item->mask) {
1924                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1925                         rte_flow_error_set(error, EINVAL,
1926                                 RTE_FLOW_ERROR_TYPE_ITEM,
1927                                 item, "Not supported by fdir filter");
1928                         return -rte_errno;
1929                 }
1930                 /*Not supported last point for range*/
1931                 if (item->last) {
1932                         rte_flow_error_set(error, EINVAL,
1933                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1934                                 item, "Not supported last point for range");
1935                         return -rte_errno;
1936                 }
1937
1938                 /* Check if the next not void item is IPv4 or IPv6. */
1939                 index++;
1940                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1941                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1942                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
1943                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1944                         rte_flow_error_set(error, EINVAL,
1945                                 RTE_FLOW_ERROR_TYPE_ITEM,
1946                                 item, "Not supported by fdir filter");
1947                         return -rte_errno;
1948                 }
1949         }
1950
1951         /* Skip IP. */
1952         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1953             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1954                 /* Only used to describe the protocol stack. */
1955                 if (item->spec || item->mask) {
1956                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1957                         rte_flow_error_set(error, EINVAL,
1958                                 RTE_FLOW_ERROR_TYPE_ITEM,
1959                                 item, "Not supported by fdir filter");
1960                         return -rte_errno;
1961                 }
1962                 /*Not supported last point for range*/
1963                 if (item->last) {
1964                         rte_flow_error_set(error, EINVAL,
1965                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1966                                 item, "Not supported last point for range");
1967                         return -rte_errno;
1968                 }
1969
1970                 /* Check if the next not void item is UDP or NVGRE. */
1971                 index++;
1972                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1973                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1974                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1975                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1976                         rte_flow_error_set(error, EINVAL,
1977                                 RTE_FLOW_ERROR_TYPE_ITEM,
1978                                 item, "Not supported by fdir filter");
1979                         return -rte_errno;
1980                 }
1981         }
1982
1983         /* Skip UDP. */
1984         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1985                 /* Only used to describe the protocol stack. */
1986                 if (item->spec || item->mask) {
1987                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1988                         rte_flow_error_set(error, EINVAL,
1989                                 RTE_FLOW_ERROR_TYPE_ITEM,
1990                                 item, "Not supported by fdir filter");
1991                         return -rte_errno;
1992                 }
1993                 /*Not supported last point for range*/
1994                 if (item->last) {
1995                         rte_flow_error_set(error, EINVAL,
1996                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1997                                 item, "Not supported last point for range");
1998                         return -rte_errno;
1999                 }
2000
2001                 /* Check if the next not void item is VxLAN. */
2002                 index++;
2003                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2004                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2005                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2006                         rte_flow_error_set(error, EINVAL,
2007                                 RTE_FLOW_ERROR_TYPE_ITEM,
2008                                 item, "Not supported by fdir filter");
2009                         return -rte_errno;
2010                 }
2011         }
2012
2013         /* Get the VxLAN info */
2014         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2015                 rule->ixgbe_fdir.formatted.tunnel_type =
2016                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2017
2018                 /* Only care about VNI, others should be masked. */
2019                 if (!item->mask) {
2020                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2021                         rte_flow_error_set(error, EINVAL,
2022                                 RTE_FLOW_ERROR_TYPE_ITEM,
2023                                 item, "Not supported by fdir filter");
2024                         return -rte_errno;
2025                 }
2026                 /*Not supported last point for range*/
2027                 if (item->last) {
2028                         rte_flow_error_set(error, EINVAL,
2029                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2030                                 item, "Not supported last point for range");
2031                         return -rte_errno;
2032                 }
2033                 rule->b_mask = TRUE;
2034
2035                 /* Tunnel type is always meaningful. */
2036                 rule->mask.tunnel_type_mask = 1;
2037
2038                 vxlan_mask =
2039                         (const struct rte_flow_item_vxlan *)item->mask;
2040                 if (vxlan_mask->flags) {
2041                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2042                         rte_flow_error_set(error, EINVAL,
2043                                 RTE_FLOW_ERROR_TYPE_ITEM,
2044                                 item, "Not supported by fdir filter");
2045                         return -rte_errno;
2046                 }
2047                 /* VNI must be totally masked or not. */
2048                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2049                         vxlan_mask->vni[2]) &&
2050                         ((vxlan_mask->vni[0] != 0xFF) ||
2051                         (vxlan_mask->vni[1] != 0xFF) ||
2052                                 (vxlan_mask->vni[2] != 0xFF))) {
2053                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2054                         rte_flow_error_set(error, EINVAL,
2055                                 RTE_FLOW_ERROR_TYPE_ITEM,
2056                                 item, "Not supported by fdir filter");
2057                         return -rte_errno;
2058                 }
2059
2060                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2061                         RTE_DIM(vxlan_mask->vni));
2062
2063                 if (item->spec) {
2064                         rule->b_spec = TRUE;
2065                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2066                                         item->spec;
2067                         rte_memcpy(((uint8_t *)
2068                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2069                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2070                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2071                                 rule->ixgbe_fdir.formatted.tni_vni);
2072                 }
2073         }
2074
2075         /* Get the NVGRE info */
2076         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2077                 rule->ixgbe_fdir.formatted.tunnel_type =
2078                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2079
2080                 /**
2081                  * Only care about flags0, flags1, protocol and TNI,
2082                  * others should be masked.
2083                  */
2084                 if (!item->mask) {
2085                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2086                         rte_flow_error_set(error, EINVAL,
2087                                 RTE_FLOW_ERROR_TYPE_ITEM,
2088                                 item, "Not supported by fdir filter");
2089                         return -rte_errno;
2090                 }
2091                 /*Not supported last point for range*/
2092                 if (item->last) {
2093                         rte_flow_error_set(error, EINVAL,
2094                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2095                                 item, "Not supported last point for range");
2096                         return -rte_errno;
2097                 }
2098                 rule->b_mask = TRUE;
2099
2100                 /* Tunnel type is always meaningful. */
2101                 rule->mask.tunnel_type_mask = 1;
2102
2103                 nvgre_mask =
2104                         (const struct rte_flow_item_nvgre *)item->mask;
2105                 if (nvgre_mask->flow_id) {
2106                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2107                         rte_flow_error_set(error, EINVAL,
2108                                 RTE_FLOW_ERROR_TYPE_ITEM,
2109                                 item, "Not supported by fdir filter");
2110                         return -rte_errno;
2111                 }
2112                 if (nvgre_mask->c_k_s_rsvd0_ver !=
2113                         rte_cpu_to_be_16(0x3000) ||
2114                     nvgre_mask->protocol != 0xFFFF) {
2115                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2116                         rte_flow_error_set(error, EINVAL,
2117                                 RTE_FLOW_ERROR_TYPE_ITEM,
2118                                 item, "Not supported by fdir filter");
2119                         return -rte_errno;
2120                 }
2121                 /* TNI must be totally masked or not. */
2122                 if (nvgre_mask->tni[0] &&
2123                     ((nvgre_mask->tni[0] != 0xFF) ||
2124                     (nvgre_mask->tni[1] != 0xFF) ||
2125                     (nvgre_mask->tni[2] != 0xFF))) {
2126                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2127                         rte_flow_error_set(error, EINVAL,
2128                                 RTE_FLOW_ERROR_TYPE_ITEM,
2129                                 item, "Not supported by fdir filter");
2130                         return -rte_errno;
2131                 }
2132                 /* tni is a 24-bits bit field */
2133                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2134                         RTE_DIM(nvgre_mask->tni));
2135                 rule->mask.tunnel_id_mask <<= 8;
2136
2137                 if (item->spec) {
2138                         rule->b_spec = TRUE;
2139                         nvgre_spec =
2140                                 (const struct rte_flow_item_nvgre *)item->spec;
2141                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2142                             rte_cpu_to_be_16(0x2000) ||
2143                             nvgre_spec->protocol !=
2144                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2145                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2146                                 rte_flow_error_set(error, EINVAL,
2147                                         RTE_FLOW_ERROR_TYPE_ITEM,
2148                                         item, "Not supported by fdir filter");
2149                                 return -rte_errno;
2150                         }
2151                         /* tni is a 24-bits bit field */
2152                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2153                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2154                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2155                 }
2156         }
2157
2158         /* check if the next not void item is MAC */
2159         index++;
2160         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2161         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2162                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2163                 rte_flow_error_set(error, EINVAL,
2164                         RTE_FLOW_ERROR_TYPE_ITEM,
2165                         item, "Not supported by fdir filter");
2166                 return -rte_errno;
2167         }
2168
2169         /**
2170          * Only support vlan and dst MAC address,
2171          * others should be masked.
2172          */
2173
2174         if (!item->mask) {
2175                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2176                 rte_flow_error_set(error, EINVAL,
2177                         RTE_FLOW_ERROR_TYPE_ITEM,
2178                         item, "Not supported by fdir filter");
2179                 return -rte_errno;
2180         }
2181         /*Not supported last point for range*/
2182         if (item->last) {
2183                 rte_flow_error_set(error, EINVAL,
2184                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2185                         item, "Not supported last point for range");
2186                 return -rte_errno;
2187         }
2188         rule->b_mask = TRUE;
2189         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2190
2191         /* Ether type should be masked. */
2192         if (eth_mask->type) {
2193                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2194                 rte_flow_error_set(error, EINVAL,
2195                         RTE_FLOW_ERROR_TYPE_ITEM,
2196                         item, "Not supported by fdir filter");
2197                 return -rte_errno;
2198         }
2199
2200         /* src MAC address should be masked. */
2201         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2202                 if (eth_mask->src.addr_bytes[j]) {
2203                         memset(rule, 0,
2204                                sizeof(struct ixgbe_fdir_rule));
2205                         rte_flow_error_set(error, EINVAL,
2206                                 RTE_FLOW_ERROR_TYPE_ITEM,
2207                                 item, "Not supported by fdir filter");
2208                         return -rte_errno;
2209                 }
2210         }
2211         rule->mask.mac_addr_byte_mask = 0;
2212         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2213                 /* It's a per byte mask. */
2214                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2215                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2216                 } else if (eth_mask->dst.addr_bytes[j]) {
2217                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2218                         rte_flow_error_set(error, EINVAL,
2219                                 RTE_FLOW_ERROR_TYPE_ITEM,
2220                                 item, "Not supported by fdir filter");
2221                         return -rte_errno;
2222                 }
2223         }
2224
2225         /* When no vlan, considered as full mask. */
2226         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2227
2228         if (item->spec) {
2229                 rule->b_spec = TRUE;
2230                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2231
2232                 /* Get the dst MAC. */
2233                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2234                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2235                                 eth_spec->dst.addr_bytes[j];
2236                 }
2237         }
2238
2239         /**
2240          * Check if the next not void item is vlan or ipv4.
2241          * IPv6 is not supported.
2242          */
2243         index++;
2244         NEXT_ITEM_OF_PATTERN(item, pattern, index);
2245         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2246                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2247                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2248                 rte_flow_error_set(error, EINVAL,
2249                         RTE_FLOW_ERROR_TYPE_ITEM,
2250                         item, "Not supported by fdir filter");
2251                 return -rte_errno;
2252         }
2253         /*Not supported last point for range*/
2254         if (item->last) {
2255                 rte_flow_error_set(error, EINVAL,
2256                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2257                         item, "Not supported last point for range");
2258                 return -rte_errno;
2259         }
2260
2261         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2262                 if (!(item->spec && item->mask)) {
2263                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2264                         rte_flow_error_set(error, EINVAL,
2265                                 RTE_FLOW_ERROR_TYPE_ITEM,
2266                                 item, "Not supported by fdir filter");
2267                         return -rte_errno;
2268                 }
2269
2270                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2271                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2272
2273                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2274
2275                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2276                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2277                 /* More than one tags are not supported. */
2278
2279                 /* check if the next not void item is END */
2280                 index++;
2281                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2282
2283                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2284                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2285                         rte_flow_error_set(error, EINVAL,
2286                                 RTE_FLOW_ERROR_TYPE_ITEM,
2287                                 item, "Not supported by fdir filter");
2288                         return -rte_errno;
2289                 }
2290         }
2291
2292         /**
2293          * If the tags is 0, it means don't care about the VLAN.
2294          * Do nothing.
2295          */
2296
2297         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2298 }
2299
2300 static int
2301 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2302                         const struct rte_flow_attr *attr,
2303                         const struct rte_flow_item pattern[],
2304                         const struct rte_flow_action actions[],
2305                         struct ixgbe_fdir_rule *rule,
2306                         struct rte_flow_error *error)
2307 {
2308         int ret;
2309         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2310         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2311
2312         if (hw->mac.type != ixgbe_mac_82599EB &&
2313                 hw->mac.type != ixgbe_mac_X540 &&
2314                 hw->mac.type != ixgbe_mac_X550 &&
2315                 hw->mac.type != ixgbe_mac_X550EM_x &&
2316                 hw->mac.type != ixgbe_mac_X550EM_a)
2317                 return -ENOTSUP;
2318
2319         ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2320                                         actions, rule, error);
2321
2322         if (!ret)
2323                 goto step_next;
2324
2325         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2326                                         actions, rule, error);
2327
2328 step_next:
2329         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2330             fdir_mode != rule->mode)
2331                 return -ENOTSUP;
2332         return ret;
2333 }
2334
2335 void
2336 ixgbe_filterlist_flush(void)
2337 {
2338         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2339         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2340         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2341         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2342         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2343         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2344
2345         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2346                 TAILQ_REMOVE(&filter_ntuple_list,
2347                                  ntuple_filter_ptr,
2348                                  entries);
2349                 rte_free(ntuple_filter_ptr);
2350         }
2351
2352         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2353                 TAILQ_REMOVE(&filter_ethertype_list,
2354                                  ethertype_filter_ptr,
2355                                  entries);
2356                 rte_free(ethertype_filter_ptr);
2357         }
2358
2359         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2360                 TAILQ_REMOVE(&filter_syn_list,
2361                                  syn_filter_ptr,
2362                                  entries);
2363                 rte_free(syn_filter_ptr);
2364         }
2365
2366         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2367                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2368                                  l2_tn_filter_ptr,
2369                                  entries);
2370                 rte_free(l2_tn_filter_ptr);
2371         }
2372
2373         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2374                 TAILQ_REMOVE(&filter_fdir_list,
2375                                  fdir_rule_ptr,
2376                                  entries);
2377                 rte_free(fdir_rule_ptr);
2378         }
2379
2380         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2381                 TAILQ_REMOVE(&ixgbe_flow_list,
2382                                  ixgbe_flow_mem_ptr,
2383                                  entries);
2384                 rte_free(ixgbe_flow_mem_ptr->flow);
2385                 rte_free(ixgbe_flow_mem_ptr);
2386         }
2387 }
2388
2389 /**
2390  * Create or destroy a flow rule.
2391  * Theorically one rule can match more than one filters.
2392  * We will let it use the filter which it hitt first.
2393  * So, the sequence matters.
2394  */
2395 static struct rte_flow *
2396 ixgbe_flow_create(struct rte_eth_dev *dev,
2397                   const struct rte_flow_attr *attr,
2398                   const struct rte_flow_item pattern[],
2399                   const struct rte_flow_action actions[],
2400                   struct rte_flow_error *error)
2401 {
2402         int ret;
2403         struct rte_eth_ntuple_filter ntuple_filter;
2404         struct rte_eth_ethertype_filter ethertype_filter;
2405         struct rte_eth_syn_filter syn_filter;
2406         struct ixgbe_fdir_rule fdir_rule;
2407         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2408         struct ixgbe_hw_fdir_info *fdir_info =
2409                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2410         struct rte_flow *flow = NULL;
2411         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2412         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2413         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2414         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2415         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2416         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2417
2418         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2419         if (!flow) {
2420                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2421                 return (struct rte_flow *)flow;
2422         }
2423         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2424                         sizeof(struct ixgbe_flow_mem), 0);
2425         if (!ixgbe_flow_mem_ptr) {
2426                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2427                 rte_free(flow);
2428                 return NULL;
2429         }
2430         ixgbe_flow_mem_ptr->flow = flow;
2431         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2432                                 ixgbe_flow_mem_ptr, entries);
2433
2434         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2435         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2436                         actions, &ntuple_filter, error);
2437         if (!ret) {
2438                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2439                 if (!ret) {
2440                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2441                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2442                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2443                                 &ntuple_filter,
2444                                 sizeof(struct rte_eth_ntuple_filter));
2445                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2446                                 ntuple_filter_ptr, entries);
2447                         flow->rule = ntuple_filter_ptr;
2448                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2449                         return flow;
2450                 }
2451                 goto out;
2452         }
2453
2454         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2455         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2456                                 actions, &ethertype_filter, error);
2457         if (!ret) {
2458                 ret = ixgbe_add_del_ethertype_filter(dev,
2459                                 &ethertype_filter, TRUE);
2460                 if (!ret) {
2461                         ethertype_filter_ptr = rte_zmalloc(
2462                                 "ixgbe_ethertype_filter",
2463                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2464                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
2465                                 &ethertype_filter,
2466                                 sizeof(struct rte_eth_ethertype_filter));
2467                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2468                                 ethertype_filter_ptr, entries);
2469                         flow->rule = ethertype_filter_ptr;
2470                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2471                         return flow;
2472                 }
2473                 goto out;
2474         }
2475
2476         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2477         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2478                                 actions, &syn_filter, error);
2479         if (!ret) {
2480                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2481                 if (!ret) {
2482                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2483                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2484                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
2485                                 &syn_filter,
2486                                 sizeof(struct rte_eth_syn_filter));
2487                         TAILQ_INSERT_TAIL(&filter_syn_list,
2488                                 syn_filter_ptr,
2489                                 entries);
2490                         flow->rule = syn_filter_ptr;
2491                         flow->filter_type = RTE_ETH_FILTER_SYN;
2492                         return flow;
2493                 }
2494                 goto out;
2495         }
2496
2497         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2498         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2499                                 actions, &fdir_rule, error);
2500         if (!ret) {
2501                 /* A mask cannot be deleted. */
2502                 if (fdir_rule.b_mask) {
2503                         if (!fdir_info->mask_added) {
2504                                 /* It's the first time the mask is set. */
2505                                 rte_memcpy(&fdir_info->mask,
2506                                         &fdir_rule.mask,
2507                                         sizeof(struct ixgbe_hw_fdir_mask));
2508                                 ret = ixgbe_fdir_set_input_mask(dev);
2509                                 if (ret)
2510                                         goto out;
2511
2512                                 fdir_info->mask_added = TRUE;
2513                         } else {
2514                                 /**
2515                                  * Only support one global mask,
2516                                  * all the masks should be the same.
2517                                  */
2518                                 ret = memcmp(&fdir_info->mask,
2519                                         &fdir_rule.mask,
2520                                         sizeof(struct ixgbe_hw_fdir_mask));
2521                                 if (ret)
2522                                         goto out;
2523                         }
2524                 }
2525
2526                 if (fdir_rule.b_spec) {
2527                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2528                                         FALSE, FALSE);
2529                         if (!ret) {
2530                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2531                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2532                                 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2533                                         &fdir_rule,
2534                                         sizeof(struct ixgbe_fdir_rule));
2535                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2536                                         fdir_rule_ptr, entries);
2537                                 flow->rule = fdir_rule_ptr;
2538                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2539
2540                                 return flow;
2541                         }
2542
2543                         if (ret)
2544                                 goto out;
2545                 }
2546
2547                 goto out;
2548         }
2549
2550         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2551         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2552                                         actions, &l2_tn_filter, error);
2553         if (!ret) {
2554                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2555                 if (!ret) {
2556                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2557                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2558                         (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2559                                 &l2_tn_filter,
2560                                 sizeof(struct rte_eth_l2_tunnel_conf));
2561                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2562                                 l2_tn_filter_ptr, entries);
2563                         flow->rule = l2_tn_filter_ptr;
2564                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2565                         return flow;
2566                 }
2567         }
2568
2569 out:
2570         TAILQ_REMOVE(&ixgbe_flow_list,
2571                 ixgbe_flow_mem_ptr, entries);
2572         rte_flow_error_set(error, -ret,
2573                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2574                            "Failed to create flow.");
2575         rte_free(ixgbe_flow_mem_ptr);
2576         rte_free(flow);
2577         return NULL;
2578 }
2579
2580 /**
2581  * Check if the flow rule is supported by ixgbe.
2582  * It only checkes the format. Don't guarantee the rule can be programmed into
2583  * the HW. Because there can be no enough room for the rule.
2584  */
2585 static int
2586 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
2587                 const struct rte_flow_attr *attr,
2588                 const struct rte_flow_item pattern[],
2589                 const struct rte_flow_action actions[],
2590                 struct rte_flow_error *error)
2591 {
2592         struct rte_eth_ntuple_filter ntuple_filter;
2593         struct rte_eth_ethertype_filter ethertype_filter;
2594         struct rte_eth_syn_filter syn_filter;
2595         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2596         struct ixgbe_fdir_rule fdir_rule;
2597         int ret;
2598
2599         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2600         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2601                                 actions, &ntuple_filter, error);
2602         if (!ret)
2603                 return 0;
2604
2605         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2606         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2607                                 actions, &ethertype_filter, error);
2608         if (!ret)
2609                 return 0;
2610
2611         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2612         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2613                                 actions, &syn_filter, error);
2614         if (!ret)
2615                 return 0;
2616
2617         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2618         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2619                                 actions, &fdir_rule, error);
2620         if (!ret)
2621                 return 0;
2622
2623         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2624         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2625                                 actions, &l2_tn_filter, error);
2626
2627         return ret;
2628 }
2629
2630 /* Destroy a flow rule on ixgbe. */
2631 static int
2632 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2633                 struct rte_flow *flow,
2634                 struct rte_flow_error *error)
2635 {
2636         int ret;
2637         struct rte_flow *pmd_flow = flow;
2638         enum rte_filter_type filter_type = pmd_flow->filter_type;
2639         struct rte_eth_ntuple_filter ntuple_filter;
2640         struct rte_eth_ethertype_filter ethertype_filter;
2641         struct rte_eth_syn_filter syn_filter;
2642         struct ixgbe_fdir_rule fdir_rule;
2643         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2644         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2645         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2646         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2647         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2648         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2649         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2650
2651         switch (filter_type) {
2652         case RTE_ETH_FILTER_NTUPLE:
2653                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2654                                         pmd_flow->rule;
2655                 (void)rte_memcpy(&ntuple_filter,
2656                         &ntuple_filter_ptr->filter_info,
2657                         sizeof(struct rte_eth_ntuple_filter));
2658                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2659                 if (!ret) {
2660                         TAILQ_REMOVE(&filter_ntuple_list,
2661                         ntuple_filter_ptr, entries);
2662                         rte_free(ntuple_filter_ptr);
2663                 }
2664                 break;
2665         case RTE_ETH_FILTER_ETHERTYPE:
2666                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2667                                         pmd_flow->rule;
2668                 (void)rte_memcpy(&ethertype_filter,
2669                         &ethertype_filter_ptr->filter_info,
2670                         sizeof(struct rte_eth_ethertype_filter));
2671                 ret = ixgbe_add_del_ethertype_filter(dev,
2672                                 &ethertype_filter, FALSE);
2673                 if (!ret) {
2674                         TAILQ_REMOVE(&filter_ethertype_list,
2675                                 ethertype_filter_ptr, entries);
2676                         rte_free(ethertype_filter_ptr);
2677                 }
2678                 break;
2679         case RTE_ETH_FILTER_SYN:
2680                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2681                                 pmd_flow->rule;
2682                 (void)rte_memcpy(&syn_filter,
2683                         &syn_filter_ptr->filter_info,
2684                         sizeof(struct rte_eth_syn_filter));
2685                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2686                 if (!ret) {
2687                         TAILQ_REMOVE(&filter_syn_list,
2688                                 syn_filter_ptr, entries);
2689                         rte_free(syn_filter_ptr);
2690                 }
2691                 break;
2692         case RTE_ETH_FILTER_FDIR:
2693                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2694                 (void)rte_memcpy(&fdir_rule,
2695                         &fdir_rule_ptr->filter_info,
2696                         sizeof(struct ixgbe_fdir_rule));
2697                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2698                 if (!ret) {
2699                         TAILQ_REMOVE(&filter_fdir_list,
2700                                 fdir_rule_ptr, entries);
2701                         rte_free(fdir_rule_ptr);
2702                 }
2703                 break;
2704         case RTE_ETH_FILTER_L2_TUNNEL:
2705                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2706                                 pmd_flow->rule;
2707                 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2708                         sizeof(struct rte_eth_l2_tunnel_conf));
2709                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2710                 if (!ret) {
2711                         TAILQ_REMOVE(&filter_l2_tunnel_list,
2712                                 l2_tn_filter_ptr, entries);
2713                         rte_free(l2_tn_filter_ptr);
2714                 }
2715                 break;
2716         default:
2717                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2718                             filter_type);
2719                 ret = -EINVAL;
2720                 break;
2721         }
2722
2723         if (ret) {
2724                 rte_flow_error_set(error, EINVAL,
2725                                 RTE_FLOW_ERROR_TYPE_HANDLE,
2726                                 NULL, "Failed to destroy flow");
2727                 return ret;
2728         }
2729
2730         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2731                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2732                         TAILQ_REMOVE(&ixgbe_flow_list,
2733                                 ixgbe_flow_mem_ptr, entries);
2734                         rte_free(ixgbe_flow_mem_ptr);
2735                 }
2736         }
2737         rte_free(flow);
2738
2739         return ret;
2740 }
2741
2742 /*  Destroy all flow rules associated with a port on ixgbe. */
2743 static int
2744 ixgbe_flow_flush(struct rte_eth_dev *dev,
2745                 struct rte_flow_error *error)
2746 {
2747         int ret = 0;
2748
2749         ixgbe_clear_all_ntuple_filter(dev);
2750         ixgbe_clear_all_ethertype_filter(dev);
2751         ixgbe_clear_syn_filter(dev);
2752
2753         ret = ixgbe_clear_all_fdir_filter(dev);
2754         if (ret < 0) {
2755                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2756                                         NULL, "Failed to flush rule");
2757                 return ret;
2758         }
2759
2760         ret = ixgbe_clear_all_l2_tn_filter(dev);
2761         if (ret < 0) {
2762                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2763                                         NULL, "Failed to flush rule");
2764                 return ret;
2765         }
2766
2767         ixgbe_filterlist_flush();
2768
2769         return 0;
2770 }
2771
2772 const struct rte_flow_ops ixgbe_flow_ops = {
2773         ixgbe_flow_validate,
2774         ixgbe_flow_create,
2775         ixgbe_flow_destroy,
2776         ixgbe_flow_flush,
2777         NULL,
2778 };