ed2ecc40ceda5d38df51ba109250a3a4d44756dd
[deb_dpdk.git] / drivers / net / e1000 / igb_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35 #include <stdio.h>
36 #include <errno.h>
37 #include <stdint.h>
38 #include <stdarg.h>
39
40 #include <rte_common.h>
41 #include <rte_interrupts.h>
42 #include <rte_byteorder.h>
43 #include <rte_log.h>
44 #include <rte_debug.h>
45 #include <rte_pci.h>
46 #include <rte_ether.h>
47 #include <rte_ethdev.h>
48 #include <rte_ethdev_pci.h>
49 #include <rte_memory.h>
50 #include <rte_memzone.h>
51 #include <rte_eal.h>
52 #include <rte_atomic.h>
53 #include <rte_malloc.h>
54 #include <rte_dev.h>
55 #include <rte_flow.h>
56 #include <rte_flow_driver.h>
57
58 #include "e1000_logs.h"
59 #include "base/e1000_api.h"
60 #include "e1000_ethdev.h"
61
62 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)              \
63         do {                                                    \
64                 item = (pattern) + (index);                     \
65                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
66                 (index)++;                                      \
67                 item = (pattern) + (index);                     \
68                 }                                               \
69         } while (0)
70
71 #define NEXT_ITEM_OF_ACTION(act, actions, index)                \
72         do {                                                    \
73                 act = (actions) + (index);                      \
74                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
75                 (index)++;                                      \
76                 act = (actions) + (index);                      \
77                 }                                               \
78         } while (0)
79
80 #define IGB_FLEX_RAW_NUM        12
81
82 /**
83  * Please aware there's an asumption for all the parsers.
84  * rte_flow_item is using big endian, rte_flow_attr and
85  * rte_flow_action are using CPU order.
86  * Because the pattern is used to describe the packets,
87  * normally the packets should use network order.
88  */
89
90 /**
91  * Parse the rule to see if it is a n-tuple rule.
92  * And get the n-tuple filter info BTW.
93  * pattern:
94  * The first not void item can be ETH or IPV4.
95  * The second not void item must be IPV4 if the first one is ETH.
96  * The third not void item must be UDP or TCP or SCTP
97  * The next not void item must be END.
98  * action:
99  * The first not void action should be QUEUE.
100  * The next not void action should be END.
101  * pattern example:
102  * ITEM         Spec                    Mask
103  * ETH          NULL                    NULL
104  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
105  *                      dst_addr 192.167.3.50   0xFFFFFFFF
106  *                      next_proto_id   17      0xFF
107  * UDP/TCP/     src_port        80      0xFFFF
108  * SCTP         dst_port        80      0xFFFF
109  * END
110  * other members in mask and spec should set to 0x00.
111  * item->last should be NULL.
112  */
113 static int
114 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
115                          const struct rte_flow_item pattern[],
116                          const struct rte_flow_action actions[],
117                          struct rte_eth_ntuple_filter *filter,
118                          struct rte_flow_error *error)
119 {
120         const struct rte_flow_item *item;
121         const struct rte_flow_action *act;
122         const struct rte_flow_item_ipv4 *ipv4_spec;
123         const struct rte_flow_item_ipv4 *ipv4_mask;
124         const struct rte_flow_item_tcp *tcp_spec;
125         const struct rte_flow_item_tcp *tcp_mask;
126         const struct rte_flow_item_udp *udp_spec;
127         const struct rte_flow_item_udp *udp_mask;
128         const struct rte_flow_item_sctp *sctp_spec;
129         const struct rte_flow_item_sctp *sctp_mask;
130         uint32_t index;
131
132         if (!pattern) {
133                 rte_flow_error_set(error,
134                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
135                         NULL, "NULL pattern.");
136                 return -rte_errno;
137         }
138
139         if (!actions) {
140                 rte_flow_error_set(error, EINVAL,
141                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
142                                    NULL, "NULL action.");
143                 return -rte_errno;
144         }
145         if (!attr) {
146                 rte_flow_error_set(error, EINVAL,
147                                    RTE_FLOW_ERROR_TYPE_ATTR,
148                                    NULL, "NULL attribute.");
149                 return -rte_errno;
150         }
151
152         /* parse pattern */
153         index = 0;
154
155         /* the first not void item can be MAC or IPv4 */
156         NEXT_ITEM_OF_PATTERN(item, pattern, index);
157
158         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
159             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
160                 rte_flow_error_set(error, EINVAL,
161                         RTE_FLOW_ERROR_TYPE_ITEM,
162                         item, "Not supported by ntuple filter");
163                 return -rte_errno;
164         }
165         /* Skip Ethernet */
166         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
167                 /*Not supported last point for range*/
168                 if (item->last) {
169                         rte_flow_error_set(error,
170                           EINVAL,
171                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
172                           item, "Not supported last point for range");
173                         return -rte_errno;
174                 }
175                 /* if the first item is MAC, the content should be NULL */
176                 if (item->spec || item->mask) {
177                         rte_flow_error_set(error, EINVAL,
178                                 RTE_FLOW_ERROR_TYPE_ITEM,
179                                 item, "Not supported by ntuple filter");
180                         return -rte_errno;
181                 }
182                 /* check if the next not void item is IPv4 */
183                 index++;
184                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
185                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
186                         rte_flow_error_set(error,
187                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
188                           item, "Not supported by ntuple filter");
189                         return -rte_errno;
190                 }
191         }
192
193         /* get the IPv4 info */
194         if (!item->spec || !item->mask) {
195                 rte_flow_error_set(error, EINVAL,
196                         RTE_FLOW_ERROR_TYPE_ITEM,
197                         item, "Invalid ntuple mask");
198                 return -rte_errno;
199         }
200         /* Not supported last point for range */
201         if (item->last) {
202                 rte_flow_error_set(error, EINVAL,
203                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
204                         item, "Not supported last point for range");
205                 return -rte_errno;
206         }
207
208         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
209         /**
210          * Only support src & dst addresses, protocol,
211          * others should be masked.
212          */
213
214         if (ipv4_mask->hdr.version_ihl ||
215                 ipv4_mask->hdr.type_of_service ||
216                 ipv4_mask->hdr.total_length ||
217                 ipv4_mask->hdr.packet_id ||
218                 ipv4_mask->hdr.fragment_offset ||
219                 ipv4_mask->hdr.time_to_live ||
220                 ipv4_mask->hdr.hdr_checksum) {
221                 rte_flow_error_set(error,
222                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
223                         item, "Not supported by ntuple filter");
224                 return -rte_errno;
225         }
226
227         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
228         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
229         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
230
231         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
232         filter->dst_ip = ipv4_spec->hdr.dst_addr;
233         filter->src_ip = ipv4_spec->hdr.src_addr;
234         filter->proto  = ipv4_spec->hdr.next_proto_id;
235
236         /* check if the next not void item is TCP or UDP or SCTP */
237         index++;
238         NEXT_ITEM_OF_PATTERN(item, pattern, index);
239         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
240             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
241             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
242                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
243                 rte_flow_error_set(error, EINVAL,
244                         RTE_FLOW_ERROR_TYPE_ITEM,
245                         item, "Not supported by ntuple filter");
246                 return -rte_errno;
247         }
248
249         /* Not supported last point for range */
250         if (item->last) {
251                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
252                 rte_flow_error_set(error, EINVAL,
253                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
254                         item, "Not supported last point for range");
255                 return -rte_errno;
256         }
257
258         /* get the TCP/UDP/SCTP info */
259         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
260                 if (item->spec && item->mask) {
261                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
262
263                         /**
264                          * Only support src & dst ports, tcp flags,
265                          * others should be masked.
266                          */
267                         if (tcp_mask->hdr.sent_seq ||
268                                 tcp_mask->hdr.recv_ack ||
269                                 tcp_mask->hdr.data_off ||
270                                 tcp_mask->hdr.rx_win ||
271                                 tcp_mask->hdr.cksum ||
272                                 tcp_mask->hdr.tcp_urp) {
273                                 memset(filter, 0,
274                                         sizeof(struct rte_eth_ntuple_filter));
275                                 rte_flow_error_set(error, EINVAL,
276                                         RTE_FLOW_ERROR_TYPE_ITEM,
277                                         item, "Not supported by ntuple filter");
278                                 return -rte_errno;
279                         }
280
281                         filter->dst_port_mask  = tcp_mask->hdr.dst_port;
282                         filter->src_port_mask  = tcp_mask->hdr.src_port;
283                         if (tcp_mask->hdr.tcp_flags == 0xFF) {
284                                 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
285                         } else if (!tcp_mask->hdr.tcp_flags) {
286                                 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
287                         } else {
288                                 memset(filter, 0,
289                                         sizeof(struct rte_eth_ntuple_filter));
290                                 rte_flow_error_set(error, EINVAL,
291                                         RTE_FLOW_ERROR_TYPE_ITEM,
292                                         item, "Not supported by ntuple filter");
293                                 return -rte_errno;
294                         }
295
296                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
297                         filter->dst_port  = tcp_spec->hdr.dst_port;
298                         filter->src_port  = tcp_spec->hdr.src_port;
299                         filter->tcp_flags = tcp_spec->hdr.tcp_flags;
300                 }
301         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
302                 if (item->spec && item->mask) {
303                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
304
305                         /**
306                          * Only support src & dst ports,
307                          * others should be masked.
308                          */
309                         if (udp_mask->hdr.dgram_len ||
310                             udp_mask->hdr.dgram_cksum) {
311                                 memset(filter, 0,
312                                         sizeof(struct rte_eth_ntuple_filter));
313                                 rte_flow_error_set(error, EINVAL,
314                                         RTE_FLOW_ERROR_TYPE_ITEM,
315                                         item, "Not supported by ntuple filter");
316                                 return -rte_errno;
317                         }
318
319                         filter->dst_port_mask = udp_mask->hdr.dst_port;
320                         filter->src_port_mask = udp_mask->hdr.src_port;
321
322                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
323                         filter->dst_port = udp_spec->hdr.dst_port;
324                         filter->src_port = udp_spec->hdr.src_port;
325                 }
326         } else {
327                 if (item->spec && item->mask) {
328                         sctp_mask = (const struct rte_flow_item_sctp *)
329                                         item->mask;
330
331                         /**
332                          * Only support src & dst ports,
333                          * others should be masked.
334                          */
335                         if (sctp_mask->hdr.tag ||
336                             sctp_mask->hdr.cksum) {
337                                 memset(filter, 0,
338                                         sizeof(struct rte_eth_ntuple_filter));
339                                 rte_flow_error_set(error, EINVAL,
340                                         RTE_FLOW_ERROR_TYPE_ITEM,
341                                         item, "Not supported by ntuple filter");
342                                 return -rte_errno;
343                         }
344
345                         filter->dst_port_mask = sctp_mask->hdr.dst_port;
346                         filter->src_port_mask = sctp_mask->hdr.src_port;
347
348                         sctp_spec = (const struct rte_flow_item_sctp *)
349                                         item->spec;
350                         filter->dst_port = sctp_spec->hdr.dst_port;
351                         filter->src_port = sctp_spec->hdr.src_port;
352                 }
353         }
354         /* check if the next not void item is END */
355         index++;
356         NEXT_ITEM_OF_PATTERN(item, pattern, index);
357         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
358                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
359                 rte_flow_error_set(error, EINVAL,
360                         RTE_FLOW_ERROR_TYPE_ITEM,
361                         item, "Not supported by ntuple filter");
362                 return -rte_errno;
363         }
364
365         /* parse action */
366         index = 0;
367
368         /**
369          * n-tuple only supports forwarding,
370          * check if the first not void action is QUEUE.
371          */
372         NEXT_ITEM_OF_ACTION(act, actions, index);
373         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
374                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
375                 rte_flow_error_set(error, EINVAL,
376                         RTE_FLOW_ERROR_TYPE_ACTION,
377                         item, "Not supported action.");
378                 return -rte_errno;
379         }
380         filter->queue =
381                 ((const struct rte_flow_action_queue *)act->conf)->index;
382
383         /* check if the next not void item is END */
384         index++;
385         NEXT_ITEM_OF_ACTION(act, actions, index);
386         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
387                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
388                 rte_flow_error_set(error, EINVAL,
389                         RTE_FLOW_ERROR_TYPE_ACTION,
390                         act, "Not supported action.");
391                 return -rte_errno;
392         }
393
394         /* parse attr */
395         /* must be input direction */
396         if (!attr->ingress) {
397                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
398                 rte_flow_error_set(error, EINVAL,
399                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
400                                    attr, "Only support ingress.");
401                 return -rte_errno;
402         }
403
404         /* not supported */
405         if (attr->egress) {
406                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
407                 rte_flow_error_set(error, EINVAL,
408                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
409                                    attr, "Not support egress.");
410                 return -rte_errno;
411         }
412
413         if (attr->priority > 0xFFFF) {
414                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
415                 rte_flow_error_set(error, EINVAL,
416                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
417                                    attr, "Error priority.");
418                 return -rte_errno;
419         }
420         filter->priority = (uint16_t)attr->priority;
421
422         return 0;
423 }
424
425 /* a specific function for igb because the flags is specific */
426 static int
427 igb_parse_ntuple_filter(struct rte_eth_dev *dev,
428                           const struct rte_flow_attr *attr,
429                           const struct rte_flow_item pattern[],
430                           const struct rte_flow_action actions[],
431                           struct rte_eth_ntuple_filter *filter,
432                           struct rte_flow_error *error)
433 {
434         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
435         int ret;
436
437         MAC_TYPE_FILTER_SUP(hw->mac.type);
438
439         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
440
441         if (ret)
442                 return ret;
443
444         /* Igb doesn't support many priorities. */
445         if (filter->priority > E1000_2TUPLE_MAX_PRI) {
446                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
447                 rte_flow_error_set(error, EINVAL,
448                         RTE_FLOW_ERROR_TYPE_ITEM,
449                         NULL, "Priority not supported by ntuple filter");
450                 return -rte_errno;
451         }
452
453         if (hw->mac.type == e1000_82576) {
454                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
455                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
456                         rte_flow_error_set(error, EINVAL,
457                                 RTE_FLOW_ERROR_TYPE_ITEM,
458                                 NULL, "queue number not "
459                                 "supported by ntuple filter");
460                         return -rte_errno;
461                 }
462                 filter->flags |= RTE_5TUPLE_FLAGS;
463         } else {
464                 if (filter->src_ip_mask || filter->dst_ip_mask ||
465                         filter->src_port_mask) {
466                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
467                         rte_flow_error_set(error, EINVAL,
468                                 RTE_FLOW_ERROR_TYPE_ITEM,
469                                 NULL, "only two tuple are "
470                                 "supported by this filter");
471                         return -rte_errno;
472                 }
473                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
474                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
475                         rte_flow_error_set(error, EINVAL,
476                                 RTE_FLOW_ERROR_TYPE_ITEM,
477                                 NULL, "queue number not "
478                                 "supported by ntuple filter");
479                         return -rte_errno;
480                 }
481                 filter->flags |= RTE_2TUPLE_FLAGS;
482         }
483
484         return 0;
485 }
486
487 /**
488  * Parse the rule to see if it is a ethertype rule.
489  * And get the ethertype filter info BTW.
490  * pattern:
491  * The first not void item can be ETH.
492  * The next not void item must be END.
493  * action:
494  * The first not void action should be QUEUE.
495  * The next not void action should be END.
496  * pattern example:
497  * ITEM         Spec                    Mask
498  * ETH          type    0x0807          0xFFFF
499  * END
500  * other members in mask and spec should set to 0x00.
501  * item->last should be NULL.
502  */
503 static int
504 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
505                             const struct rte_flow_item *pattern,
506                             const struct rte_flow_action *actions,
507                             struct rte_eth_ethertype_filter *filter,
508                             struct rte_flow_error *error)
509 {
510         const struct rte_flow_item *item;
511         const struct rte_flow_action *act;
512         const struct rte_flow_item_eth *eth_spec;
513         const struct rte_flow_item_eth *eth_mask;
514         const struct rte_flow_action_queue *act_q;
515         uint32_t index;
516
517         if (!pattern) {
518                 rte_flow_error_set(error, EINVAL,
519                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
520                                 NULL, "NULL pattern.");
521                 return -rte_errno;
522         }
523
524         if (!actions) {
525                 rte_flow_error_set(error, EINVAL,
526                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
527                                 NULL, "NULL action.");
528                 return -rte_errno;
529         }
530
531         if (!attr) {
532                 rte_flow_error_set(error, EINVAL,
533                                    RTE_FLOW_ERROR_TYPE_ATTR,
534                                    NULL, "NULL attribute.");
535                 return -rte_errno;
536         }
537
538         /* Parse pattern */
539         index = 0;
540
541         /* The first non-void item should be MAC. */
542         NEXT_ITEM_OF_PATTERN(item, pattern, index);
543         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
544                 rte_flow_error_set(error, EINVAL,
545                         RTE_FLOW_ERROR_TYPE_ITEM,
546                         item, "Not supported by ethertype filter");
547                 return -rte_errno;
548         }
549
550         /*Not supported last point for range*/
551         if (item->last) {
552                 rte_flow_error_set(error, EINVAL,
553                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
554                         item, "Not supported last point for range");
555                 return -rte_errno;
556         }
557
558         /* Get the MAC info. */
559         if (!item->spec || !item->mask) {
560                 rte_flow_error_set(error, EINVAL,
561                                 RTE_FLOW_ERROR_TYPE_ITEM,
562                                 item, "Not supported by ethertype filter");
563                 return -rte_errno;
564         }
565
566         eth_spec = (const struct rte_flow_item_eth *)item->spec;
567         eth_mask = (const struct rte_flow_item_eth *)item->mask;
568
569         /* Mask bits of source MAC address must be full of 0.
570          * Mask bits of destination MAC address must be full
571          * of 1 or full of 0.
572          */
573         if (!is_zero_ether_addr(&eth_mask->src) ||
574             (!is_zero_ether_addr(&eth_mask->dst) &&
575              !is_broadcast_ether_addr(&eth_mask->dst))) {
576                 rte_flow_error_set(error, EINVAL,
577                                 RTE_FLOW_ERROR_TYPE_ITEM,
578                                 item, "Invalid ether address mask");
579                 return -rte_errno;
580         }
581
582         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
583                 rte_flow_error_set(error, EINVAL,
584                                 RTE_FLOW_ERROR_TYPE_ITEM,
585                                 item, "Invalid ethertype mask");
586                 return -rte_errno;
587         }
588
589         /* If mask bits of destination MAC address
590          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
591          */
592         if (is_broadcast_ether_addr(&eth_mask->dst)) {
593                 filter->mac_addr = eth_spec->dst;
594                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
595         } else {
596                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
597         }
598         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
599
600         /* Check if the next non-void item is END. */
601         index++;
602         NEXT_ITEM_OF_PATTERN(item, pattern, index);
603         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
604                 rte_flow_error_set(error, EINVAL,
605                                 RTE_FLOW_ERROR_TYPE_ITEM,
606                                 item, "Not supported by ethertype filter.");
607                 return -rte_errno;
608         }
609
610         /* Parse action */
611
612         index = 0;
613         /* Check if the first non-void action is QUEUE or DROP. */
614         NEXT_ITEM_OF_ACTION(act, actions, index);
615         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
616             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
617                 rte_flow_error_set(error, EINVAL,
618                                 RTE_FLOW_ERROR_TYPE_ACTION,
619                                 act, "Not supported action.");
620                 return -rte_errno;
621         }
622
623         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
624                 act_q = (const struct rte_flow_action_queue *)act->conf;
625                 filter->queue = act_q->index;
626         } else {
627                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
628         }
629
630         /* Check if the next non-void item is END */
631         index++;
632         NEXT_ITEM_OF_ACTION(act, actions, index);
633         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
634                 rte_flow_error_set(error, EINVAL,
635                                 RTE_FLOW_ERROR_TYPE_ACTION,
636                                 act, "Not supported action.");
637                 return -rte_errno;
638         }
639
640         /* Parse attr */
641         /* Must be input direction */
642         if (!attr->ingress) {
643                 rte_flow_error_set(error, EINVAL,
644                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
645                                 attr, "Only support ingress.");
646                 return -rte_errno;
647         }
648
649         /* Not supported */
650         if (attr->egress) {
651                 rte_flow_error_set(error, EINVAL,
652                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
653                                 attr, "Not support egress.");
654                 return -rte_errno;
655         }
656
657         /* Not supported */
658         if (attr->priority) {
659                 rte_flow_error_set(error, EINVAL,
660                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
661                                 attr, "Not support priority.");
662                 return -rte_errno;
663         }
664
665         /* Not supported */
666         if (attr->group) {
667                 rte_flow_error_set(error, EINVAL,
668                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
669                                 attr, "Not support group.");
670                 return -rte_errno;
671         }
672
673         return 0;
674 }
675
676 static int
677 igb_parse_ethertype_filter(struct rte_eth_dev *dev,
678                                  const struct rte_flow_attr *attr,
679                              const struct rte_flow_item pattern[],
680                              const struct rte_flow_action actions[],
681                              struct rte_eth_ethertype_filter *filter,
682                              struct rte_flow_error *error)
683 {
684         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
685         int ret;
686
687         MAC_TYPE_FILTER_SUP(hw->mac.type);
688
689         ret = cons_parse_ethertype_filter(attr, pattern,
690                                         actions, filter, error);
691
692         if (ret)
693                 return ret;
694
695         if (hw->mac.type == e1000_82576) {
696                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
697                         memset(filter, 0, sizeof(
698                                         struct rte_eth_ethertype_filter));
699                         rte_flow_error_set(error, EINVAL,
700                                 RTE_FLOW_ERROR_TYPE_ITEM,
701                                 NULL, "queue number not supported "
702                                         "by ethertype filter");
703                         return -rte_errno;
704                 }
705         } else {
706                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
707                         memset(filter, 0, sizeof(
708                                         struct rte_eth_ethertype_filter));
709                         rte_flow_error_set(error, EINVAL,
710                                 RTE_FLOW_ERROR_TYPE_ITEM,
711                                 NULL, "queue number not supported "
712                                         "by ethertype filter");
713                         return -rte_errno;
714                 }
715         }
716
717         if (filter->ether_type == ETHER_TYPE_IPv4 ||
718                 filter->ether_type == ETHER_TYPE_IPv6) {
719                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
720                 rte_flow_error_set(error, EINVAL,
721                         RTE_FLOW_ERROR_TYPE_ITEM,
722                         NULL, "IPv4/IPv6 not supported by ethertype filter");
723                 return -rte_errno;
724         }
725
726         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
727                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
728                 rte_flow_error_set(error, EINVAL,
729                         RTE_FLOW_ERROR_TYPE_ITEM,
730                         NULL, "mac compare is unsupported");
731                 return -rte_errno;
732         }
733
734         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
735                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
736                 rte_flow_error_set(error, EINVAL,
737                         RTE_FLOW_ERROR_TYPE_ITEM,
738                         NULL, "drop option is unsupported");
739                 return -rte_errno;
740         }
741
742         return 0;
743 }
744
745 /**
746  * Parse the rule to see if it is a TCP SYN rule.
747  * And get the TCP SYN filter info BTW.
748  * pattern:
749  * The first not void item must be ETH.
750  * The second not void item must be IPV4 or IPV6.
751  * The third not void item must be TCP.
752  * The next not void item must be END.
753  * action:
754  * The first not void action should be QUEUE.
755  * The next not void action should be END.
756  * pattern example:
757  * ITEM         Spec                    Mask
758  * ETH          NULL                    NULL
759  * IPV4/IPV6    NULL                    NULL
760  * TCP          tcp_flags       0x02    0xFF
761  * END
762  * other members in mask and spec should set to 0x00.
763  * item->last should be NULL.
764  */
765 static int
766 cons_parse_syn_filter(const struct rte_flow_attr *attr,
767                                 const struct rte_flow_item pattern[],
768                                 const struct rte_flow_action actions[],
769                                 struct rte_eth_syn_filter *filter,
770                                 struct rte_flow_error *error)
771 {
772         const struct rte_flow_item *item;
773         const struct rte_flow_action *act;
774         const struct rte_flow_item_tcp *tcp_spec;
775         const struct rte_flow_item_tcp *tcp_mask;
776         const struct rte_flow_action_queue *act_q;
777         uint32_t index;
778
779         if (!pattern) {
780                 rte_flow_error_set(error, EINVAL,
781                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
782                                 NULL, "NULL pattern.");
783                 return -rte_errno;
784         }
785
786         if (!actions) {
787                 rte_flow_error_set(error, EINVAL,
788                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
789                                 NULL, "NULL action.");
790                 return -rte_errno;
791         }
792
793         if (!attr) {
794                 rte_flow_error_set(error, EINVAL,
795                                    RTE_FLOW_ERROR_TYPE_ATTR,
796                                    NULL, "NULL attribute.");
797                 return -rte_errno;
798         }
799
800         /* parse pattern */
801         index = 0;
802
803         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
804         NEXT_ITEM_OF_PATTERN(item, pattern, index);
805         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
806             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
807             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
808             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
809                 rte_flow_error_set(error, EINVAL,
810                                 RTE_FLOW_ERROR_TYPE_ITEM,
811                                 item, "Not supported by syn filter");
812                 return -rte_errno;
813         }
814                 /*Not supported last point for range*/
815         if (item->last) {
816                 rte_flow_error_set(error, EINVAL,
817                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
818                         item, "Not supported last point for range");
819                 return -rte_errno;
820         }
821
822         /* Skip Ethernet */
823         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
824                 /* if the item is MAC, the content should be NULL */
825                 if (item->spec || item->mask) {
826                         rte_flow_error_set(error, EINVAL,
827                                 RTE_FLOW_ERROR_TYPE_ITEM,
828                                 item, "Invalid SYN address mask");
829                         return -rte_errno;
830                 }
831
832                 /* check if the next not void item is IPv4 or IPv6 */
833                 index++;
834                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
835                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
836                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
837                         rte_flow_error_set(error, EINVAL,
838                                 RTE_FLOW_ERROR_TYPE_ITEM,
839                                 item, "Not supported by syn filter");
840                         return -rte_errno;
841                 }
842         }
843
844         /* Skip IP */
845         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
846             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
847                 /* if the item is IP, the content should be NULL */
848                 if (item->spec || item->mask) {
849                         rte_flow_error_set(error, EINVAL,
850                                 RTE_FLOW_ERROR_TYPE_ITEM,
851                                 item, "Invalid SYN mask");
852                         return -rte_errno;
853                 }
854
855                 /* check if the next not void item is TCP */
856                 index++;
857                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
858                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
859                         rte_flow_error_set(error, EINVAL,
860                                 RTE_FLOW_ERROR_TYPE_ITEM,
861                                 item, "Not supported by syn filter");
862                         return -rte_errno;
863                 }
864         }
865
866         /* Get the TCP info. Only support SYN. */
867         if (!item->spec || !item->mask) {
868                 rte_flow_error_set(error, EINVAL,
869                                 RTE_FLOW_ERROR_TYPE_ITEM,
870                                 item, "Invalid SYN mask");
871                 return -rte_errno;
872         }
873         /*Not supported last point for range*/
874         if (item->last) {
875                 rte_flow_error_set(error, EINVAL,
876                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
877                         item, "Not supported last point for range");
878                 return -rte_errno;
879         }
880
881         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
882         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
883         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
884             tcp_mask->hdr.src_port ||
885             tcp_mask->hdr.dst_port ||
886             tcp_mask->hdr.sent_seq ||
887             tcp_mask->hdr.recv_ack ||
888             tcp_mask->hdr.data_off ||
889             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
890             tcp_mask->hdr.rx_win ||
891             tcp_mask->hdr.cksum ||
892             tcp_mask->hdr.tcp_urp) {
893                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
894                 rte_flow_error_set(error, EINVAL,
895                                 RTE_FLOW_ERROR_TYPE_ITEM,
896                                 item, "Not supported by syn filter");
897                 return -rte_errno;
898         }
899
900         /* check if the next not void item is END */
901         index++;
902         NEXT_ITEM_OF_PATTERN(item, pattern, index);
903         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
904                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
905                 rte_flow_error_set(error, EINVAL,
906                                 RTE_FLOW_ERROR_TYPE_ITEM,
907                                 item, "Not supported by syn filter");
908                 return -rte_errno;
909         }
910
911         /* parse action */
912         index = 0;
913
914         /* check if the first not void action is QUEUE. */
915         NEXT_ITEM_OF_ACTION(act, actions, index);
916         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
917                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
918                 rte_flow_error_set(error, EINVAL,
919                                 RTE_FLOW_ERROR_TYPE_ACTION,
920                                 act, "Not supported action.");
921                 return -rte_errno;
922         }
923
924         act_q = (const struct rte_flow_action_queue *)act->conf;
925         filter->queue = act_q->index;
926
927         /* check if the next not void item is END */
928         index++;
929         NEXT_ITEM_OF_ACTION(act, actions, index);
930         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
931                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
932                 rte_flow_error_set(error, EINVAL,
933                                 RTE_FLOW_ERROR_TYPE_ACTION,
934                                 act, "Not supported action.");
935                 return -rte_errno;
936         }
937
938         /* parse attr */
939         /* must be input direction */
940         if (!attr->ingress) {
941                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
942                 rte_flow_error_set(error, EINVAL,
943                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
944                         attr, "Only support ingress.");
945                 return -rte_errno;
946         }
947
948         /* not supported */
949         if (attr->egress) {
950                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
951                 rte_flow_error_set(error, EINVAL,
952                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
953                         attr, "Not support egress.");
954                 return -rte_errno;
955         }
956
957         /* Support 2 priorities, the lowest or highest. */
958         if (!attr->priority) {
959                 filter->hig_pri = 0;
960         } else if (attr->priority == (uint32_t)~0U) {
961                 filter->hig_pri = 1;
962         } else {
963                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
964                 rte_flow_error_set(error, EINVAL,
965                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
966                         attr, "Not support priority.");
967                 return -rte_errno;
968         }
969
970         return 0;
971 }
972
973 static int
974 igb_parse_syn_filter(struct rte_eth_dev *dev,
975                                  const struct rte_flow_attr *attr,
976                              const struct rte_flow_item pattern[],
977                              const struct rte_flow_action actions[],
978                              struct rte_eth_syn_filter *filter,
979                              struct rte_flow_error *error)
980 {
981         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
982         int ret;
983
984         MAC_TYPE_FILTER_SUP(hw->mac.type);
985
986         ret = cons_parse_syn_filter(attr, pattern,
987                                         actions, filter, error);
988
989         if (hw->mac.type == e1000_82576) {
990                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
991                         memset(filter, 0, sizeof(struct rte_eth_syn_filter));
992                         rte_flow_error_set(error, EINVAL,
993                                 RTE_FLOW_ERROR_TYPE_ITEM,
994                                 NULL, "queue number not "
995                                         "supported by syn filter");
996                         return -rte_errno;
997                 }
998         } else {
999                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1000                         memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1001                         rte_flow_error_set(error, EINVAL,
1002                                 RTE_FLOW_ERROR_TYPE_ITEM,
1003                                 NULL, "queue number not "
1004                                         "supported by syn filter");
1005                         return -rte_errno;
1006                 }
1007         }
1008
1009         if (ret)
1010                 return ret;
1011
1012         return 0;
1013 }
1014
1015 /**
1016  * Parse the rule to see if it is a flex byte rule.
1017  * And get the flex byte filter info BTW.
1018  * pattern:
1019  * The first not void item must be RAW.
1020  * The second not void item can be RAW or END.
1021  * The third not void item can be RAW or END.
1022  * The last not void item must be END.
1023  * action:
1024  * The first not void action should be QUEUE.
1025  * The next not void action should be END.
1026  * pattern example:
1027  * ITEM         Spec                    Mask
1028  * RAW          relative        0               0x1
1029  *                      offset  0               0xFFFFFFFF
1030  *                      pattern {0x08, 0x06}            {0xFF, 0xFF}
1031  * RAW          relative        1               0x1
1032  *                      offset  100             0xFFFFFFFF
1033  *                      pattern {0x11, 0x22, 0x33}      {0xFF, 0xFF, 0xFF}
1034  * END
1035  * other members in mask and spec should set to 0x00.
1036  * item->last should be NULL.
1037  */
1038 static int
1039 cons_parse_flex_filter(const struct rte_flow_attr *attr,
1040                                 const struct rte_flow_item pattern[],
1041                                 const struct rte_flow_action actions[],
1042                                 struct rte_eth_flex_filter *filter,
1043                                 struct rte_flow_error *error)
1044 {
1045         const struct rte_flow_item *item;
1046         const struct rte_flow_action *act;
1047         const struct rte_flow_item_raw *raw_spec;
1048         const struct rte_flow_item_raw *raw_mask;
1049         const struct rte_flow_action_queue *act_q;
1050         uint32_t index, i, offset, total_offset;
1051         uint32_t max_offset = 0;
1052         int32_t shift, j, raw_index = 0;
1053         int32_t relative[IGB_FLEX_RAW_NUM] = {0};
1054         int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
1055
1056         if (!pattern) {
1057                 rte_flow_error_set(error, EINVAL,
1058                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1059                                 NULL, "NULL pattern.");
1060                 return -rte_errno;
1061         }
1062
1063         if (!actions) {
1064                 rte_flow_error_set(error, EINVAL,
1065                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1066                                 NULL, "NULL action.");
1067                 return -rte_errno;
1068         }
1069
1070         if (!attr) {
1071                 rte_flow_error_set(error, EINVAL,
1072                                    RTE_FLOW_ERROR_TYPE_ATTR,
1073                                    NULL, "NULL attribute.");
1074                 return -rte_errno;
1075         }
1076
1077         /* parse pattern */
1078         index = 0;
1079
1080 item_loop:
1081
1082         /* the first not void item should be RAW */
1083         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1084         if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1085                 rte_flow_error_set(error, EINVAL,
1086                                 RTE_FLOW_ERROR_TYPE_ITEM,
1087                                 item, "Not supported by flex filter");
1088                 return -rte_errno;
1089         }
1090                 /*Not supported last point for range*/
1091         if (item->last) {
1092                 rte_flow_error_set(error, EINVAL,
1093                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1094                         item, "Not supported last point for range");
1095                 return -rte_errno;
1096         }
1097
1098         raw_spec = (const struct rte_flow_item_raw *)item->spec;
1099         raw_mask = (const struct rte_flow_item_raw *)item->mask;
1100
1101         if (!raw_mask->length ||
1102             !raw_mask->relative) {
1103                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1104                 rte_flow_error_set(error, EINVAL,
1105                                 RTE_FLOW_ERROR_TYPE_ITEM,
1106                                 item, "Not supported by flex filter");
1107                 return -rte_errno;
1108         }
1109
1110         if (raw_mask->offset)
1111                 offset = raw_spec->offset;
1112         else
1113                 offset = 0;
1114
1115         for (j = 0; j < raw_spec->length; j++) {
1116                 if (raw_mask->pattern[j] != 0xFF) {
1117                         memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1118                         rte_flow_error_set(error, EINVAL,
1119                                         RTE_FLOW_ERROR_TYPE_ITEM,
1120                                         item, "Not supported by flex filter");
1121                         return -rte_errno;
1122                 }
1123         }
1124
1125         total_offset = 0;
1126
1127         if (raw_spec->relative) {
1128                 for (j = raw_index; j > 0; j--) {
1129                         total_offset += raw_offset[j - 1];
1130                         if (!relative[j - 1])
1131                                 break;
1132                 }
1133                 if (total_offset + raw_spec->length + offset > max_offset)
1134                         max_offset = total_offset + raw_spec->length + offset;
1135         } else {
1136                 if (raw_spec->length + offset > max_offset)
1137                         max_offset = raw_spec->length + offset;
1138         }
1139
1140         if ((raw_spec->length + offset + total_offset) >
1141                         RTE_FLEX_FILTER_MAXLEN) {
1142                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1143                 rte_flow_error_set(error, EINVAL,
1144                                 RTE_FLOW_ERROR_TYPE_ITEM,
1145                                 item, "Not supported by flex filter");
1146                 return -rte_errno;
1147         }
1148
1149         if (raw_spec->relative == 0) {
1150                 for (j = 0; j < raw_spec->length; j++)
1151                         filter->bytes[offset + j] =
1152                         raw_spec->pattern[j];
1153                 j = offset / CHAR_BIT;
1154                 shift = offset % CHAR_BIT;
1155         } else {
1156                 for (j = 0; j < raw_spec->length; j++)
1157                         filter->bytes[total_offset + offset + j] =
1158                                 raw_spec->pattern[j];
1159                 j = (total_offset + offset) / CHAR_BIT;
1160                 shift = (total_offset + offset) % CHAR_BIT;
1161         }
1162
1163         i = 0;
1164
1165         for ( ; shift < CHAR_BIT; shift++) {
1166                 filter->mask[j] |= (0x80 >> shift);
1167                 i++;
1168                 if (i == raw_spec->length)
1169                         break;
1170                 if (shift == (CHAR_BIT - 1)) {
1171                         j++;
1172                         shift = -1;
1173                 }
1174         }
1175
1176         relative[raw_index] = raw_spec->relative;
1177         raw_offset[raw_index] = offset + raw_spec->length;
1178         raw_index++;
1179
1180         /* check if the next not void item is RAW */
1181         index++;
1182         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1183         if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1184                 item->type != RTE_FLOW_ITEM_TYPE_END) {
1185                 rte_flow_error_set(error, EINVAL,
1186                                 RTE_FLOW_ERROR_TYPE_ITEM,
1187                                 item, "Not supported by flex filter");
1188                 return -rte_errno;
1189         }
1190
1191         /* go back to parser */
1192         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1193                 /* if the item is RAW, the content should be parse */
1194                 goto item_loop;
1195         }
1196
1197         filter->len = RTE_ALIGN(max_offset, 8);
1198
1199         /* parse action */
1200         index = 0;
1201
1202         /* check if the first not void action is QUEUE. */
1203         NEXT_ITEM_OF_ACTION(act, actions, index);
1204         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1205                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1206                 rte_flow_error_set(error, EINVAL,
1207                                 RTE_FLOW_ERROR_TYPE_ACTION,
1208                                 act, "Not supported action.");
1209                 return -rte_errno;
1210         }
1211
1212         act_q = (const struct rte_flow_action_queue *)act->conf;
1213         filter->queue = act_q->index;
1214
1215         /* check if the next not void item is END */
1216         index++;
1217         NEXT_ITEM_OF_ACTION(act, actions, index);
1218         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1219                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1220                 rte_flow_error_set(error, EINVAL,
1221                                 RTE_FLOW_ERROR_TYPE_ACTION,
1222                                 act, "Not supported action.");
1223                 return -rte_errno;
1224         }
1225
1226         /* parse attr */
1227         /* must be input direction */
1228         if (!attr->ingress) {
1229                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1230                 rte_flow_error_set(error, EINVAL,
1231                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1232                         attr, "Only support ingress.");
1233                 return -rte_errno;
1234         }
1235
1236         /* not supported */
1237         if (attr->egress) {
1238                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1239                 rte_flow_error_set(error, EINVAL,
1240                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1241                         attr, "Not support egress.");
1242                 return -rte_errno;
1243         }
1244
1245         if (attr->priority > 0xFFFF) {
1246                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1247                 rte_flow_error_set(error, EINVAL,
1248                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1249                                    attr, "Error priority.");
1250                 return -rte_errno;
1251         }
1252
1253         filter->priority = (uint16_t)attr->priority;
1254
1255         return 0;
1256 }
1257
1258 static int
1259 igb_parse_flex_filter(struct rte_eth_dev *dev,
1260                                  const struct rte_flow_attr *attr,
1261                              const struct rte_flow_item pattern[],
1262                              const struct rte_flow_action actions[],
1263                              struct rte_eth_flex_filter *filter,
1264                              struct rte_flow_error *error)
1265 {
1266         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1267         int ret;
1268
1269         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
1270
1271         ret = cons_parse_flex_filter(attr, pattern,
1272                                         actions, filter, error);
1273
1274         if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1275                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1276                 rte_flow_error_set(error, EINVAL,
1277                         RTE_FLOW_ERROR_TYPE_ITEM,
1278                         NULL, "queue number not supported by flex filter");
1279                 return -rte_errno;
1280         }
1281
1282         if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
1283                 filter->len % sizeof(uint64_t) != 0) {
1284                 PMD_DRV_LOG(ERR, "filter's length is out of range");
1285                 return -EINVAL;
1286         }
1287
1288         if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
1289                 PMD_DRV_LOG(ERR, "filter's priority is out of range");
1290                 return -EINVAL;
1291         }
1292
1293         if (ret)
1294                 return ret;
1295
1296         return 0;
1297 }
1298
1299 /**
1300  * Create a flow rule.
1301  * Theorically one rule can match more than one filters.
1302  * We will let it use the filter which it hitt first.
1303  * So, the sequence matters.
1304  */
1305 static struct rte_flow *
1306 igb_flow_create(struct rte_eth_dev *dev,
1307                   const struct rte_flow_attr *attr,
1308                   const struct rte_flow_item pattern[],
1309                   const struct rte_flow_action actions[],
1310                   struct rte_flow_error *error)
1311 {
1312         int ret;
1313         struct rte_eth_ntuple_filter ntuple_filter;
1314         struct rte_eth_ethertype_filter ethertype_filter;
1315         struct rte_eth_syn_filter syn_filter;
1316         struct rte_eth_flex_filter flex_filter;
1317         struct rte_flow *flow = NULL;
1318         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1319         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1320         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1321         struct igb_flex_filter_ele *flex_filter_ptr;
1322         struct igb_flow_mem *igb_flow_mem_ptr;
1323
1324         flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
1325         if (!flow) {
1326                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1327                 return (struct rte_flow *)flow;
1328         }
1329         igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
1330                         sizeof(struct igb_flow_mem), 0);
1331         if (!igb_flow_mem_ptr) {
1332                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1333                 rte_free(flow);
1334                 return NULL;
1335         }
1336         igb_flow_mem_ptr->flow = flow;
1337         igb_flow_mem_ptr->dev = dev;
1338         TAILQ_INSERT_TAIL(&igb_flow_list,
1339                                 igb_flow_mem_ptr, entries);
1340
1341         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1342         ret = igb_parse_ntuple_filter(dev, attr, pattern,
1343                         actions, &ntuple_filter, error);
1344         if (!ret) {
1345                 ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
1346                 if (!ret) {
1347                         ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
1348                                 sizeof(struct igb_ntuple_filter_ele), 0);
1349                         (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
1350                                 &ntuple_filter,
1351                                 sizeof(struct rte_eth_ntuple_filter));
1352                         TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
1353                                 ntuple_filter_ptr, entries);
1354                         flow->rule = ntuple_filter_ptr;
1355                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
1356                         return flow;
1357                 }
1358                 goto out;
1359         }
1360
1361         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1362         ret = igb_parse_ethertype_filter(dev, attr, pattern,
1363                                 actions, &ethertype_filter, error);
1364         if (!ret) {
1365                 ret = igb_add_del_ethertype_filter(dev,
1366                                 &ethertype_filter, TRUE);
1367                 if (!ret) {
1368                         ethertype_filter_ptr = rte_zmalloc(
1369                                 "igb_ethertype_filter",
1370                                 sizeof(struct igb_ethertype_filter_ele), 0);
1371                         (void)rte_memcpy(&ethertype_filter_ptr->filter_info,
1372                                 &ethertype_filter,
1373                                 sizeof(struct rte_eth_ethertype_filter));
1374                         TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
1375                                 ethertype_filter_ptr, entries);
1376                         flow->rule = ethertype_filter_ptr;
1377                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
1378                         return flow;
1379                 }
1380                 goto out;
1381         }
1382
1383         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1384         ret = igb_parse_syn_filter(dev, attr, pattern,
1385                                 actions, &syn_filter, error);
1386         if (!ret) {
1387                 ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
1388                 if (!ret) {
1389                         syn_filter_ptr = rte_zmalloc("igb_syn_filter",
1390                                 sizeof(struct igb_eth_syn_filter_ele), 0);
1391                         (void)rte_memcpy(&syn_filter_ptr->filter_info,
1392                                 &syn_filter,
1393                                 sizeof(struct rte_eth_syn_filter));
1394                         TAILQ_INSERT_TAIL(&igb_filter_syn_list,
1395                                 syn_filter_ptr,
1396                                 entries);
1397                         flow->rule = syn_filter_ptr;
1398                         flow->filter_type = RTE_ETH_FILTER_SYN;
1399                         return flow;
1400                 }
1401                 goto out;
1402         }
1403
1404         memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1405         ret = igb_parse_flex_filter(dev, attr, pattern,
1406                                         actions, &flex_filter, error);
1407         if (!ret) {
1408                 ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
1409                 if (!ret) {
1410                         flex_filter_ptr = rte_zmalloc("igb_flex_filter",
1411                                 sizeof(struct igb_flex_filter_ele), 0);
1412                         (void)rte_memcpy(&flex_filter_ptr->filter_info,
1413                                 &flex_filter,
1414                                 sizeof(struct rte_eth_flex_filter));
1415                         TAILQ_INSERT_TAIL(&igb_filter_flex_list,
1416                                 flex_filter_ptr, entries);
1417                         flow->rule = flex_filter_ptr;
1418                         flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
1419                         return flow;
1420                 }
1421         }
1422
1423 out:
1424         TAILQ_REMOVE(&igb_flow_list,
1425                 igb_flow_mem_ptr, entries);
1426         rte_flow_error_set(error, -ret,
1427                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1428                            "Failed to create flow.");
1429         rte_free(igb_flow_mem_ptr);
1430         rte_free(flow);
1431         return NULL;
1432 }
1433
1434 /**
1435  * Check if the flow rule is supported by igb.
1436  * It only checkes the format. Don't guarantee the rule can be programmed into
1437  * the HW. Because there can be no enough room for the rule.
1438  */
1439 static int
1440 igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
1441                 const struct rte_flow_attr *attr,
1442                 const struct rte_flow_item pattern[],
1443                 const struct rte_flow_action actions[],
1444                 struct rte_flow_error *error)
1445 {
1446         struct rte_eth_ntuple_filter ntuple_filter;
1447         struct rte_eth_ethertype_filter ethertype_filter;
1448         struct rte_eth_syn_filter syn_filter;
1449         struct rte_eth_flex_filter flex_filter;
1450         int ret;
1451
1452         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1453         ret = igb_parse_ntuple_filter(dev, attr, pattern,
1454                                 actions, &ntuple_filter, error);
1455         if (!ret)
1456                 return 0;
1457
1458         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1459         ret = igb_parse_ethertype_filter(dev, attr, pattern,
1460                                 actions, &ethertype_filter, error);
1461         if (!ret)
1462                 return 0;
1463
1464         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1465         ret = igb_parse_syn_filter(dev, attr, pattern,
1466                                 actions, &syn_filter, error);
1467         if (!ret)
1468                 return 0;
1469
1470         memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1471         ret = igb_parse_flex_filter(dev, attr, pattern,
1472                                 actions, &flex_filter, error);
1473
1474         return ret;
1475 }
1476
1477 /* Destroy a flow rule on igb. */
1478 static int
1479 igb_flow_destroy(struct rte_eth_dev *dev,
1480                 struct rte_flow *flow,
1481                 struct rte_flow_error *error)
1482 {
1483         int ret;
1484         struct rte_flow *pmd_flow = flow;
1485         enum rte_filter_type filter_type = pmd_flow->filter_type;
1486         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1487         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1488         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1489         struct igb_flex_filter_ele *flex_filter_ptr;
1490         struct igb_flow_mem *igb_flow_mem_ptr;
1491
1492         switch (filter_type) {
1493         case RTE_ETH_FILTER_NTUPLE:
1494                 ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
1495                                         pmd_flow->rule;
1496                 ret = igb_add_del_ntuple_filter(dev,
1497                                 &ntuple_filter_ptr->filter_info, FALSE);
1498                 if (!ret) {
1499                         TAILQ_REMOVE(&igb_filter_ntuple_list,
1500                         ntuple_filter_ptr, entries);
1501                         rte_free(ntuple_filter_ptr);
1502                 }
1503                 break;
1504         case RTE_ETH_FILTER_ETHERTYPE:
1505                 ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
1506                                         pmd_flow->rule;
1507                 ret = igb_add_del_ethertype_filter(dev,
1508                                 &ethertype_filter_ptr->filter_info, FALSE);
1509                 if (!ret) {
1510                         TAILQ_REMOVE(&igb_filter_ethertype_list,
1511                                 ethertype_filter_ptr, entries);
1512                         rte_free(ethertype_filter_ptr);
1513                 }
1514                 break;
1515         case RTE_ETH_FILTER_SYN:
1516                 syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
1517                                 pmd_flow->rule;
1518                 ret = eth_igb_syn_filter_set(dev,
1519                                 &syn_filter_ptr->filter_info, FALSE);
1520                 if (!ret) {
1521                         TAILQ_REMOVE(&igb_filter_syn_list,
1522                                 syn_filter_ptr, entries);
1523                         rte_free(syn_filter_ptr);
1524                 }
1525                 break;
1526         case RTE_ETH_FILTER_FLEXIBLE:
1527                 flex_filter_ptr = (struct igb_flex_filter_ele *)
1528                                 pmd_flow->rule;
1529                 ret = eth_igb_add_del_flex_filter(dev,
1530                                 &flex_filter_ptr->filter_info, FALSE);
1531                 if (!ret) {
1532                         TAILQ_REMOVE(&igb_filter_flex_list,
1533                                 flex_filter_ptr, entries);
1534                         rte_free(flex_filter_ptr);
1535                 }
1536                 break;
1537         default:
1538                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1539                             filter_type);
1540                 ret = -EINVAL;
1541                 break;
1542         }
1543
1544         if (ret) {
1545                 rte_flow_error_set(error, EINVAL,
1546                                 RTE_FLOW_ERROR_TYPE_HANDLE,
1547                                 NULL, "Failed to destroy flow");
1548                 return ret;
1549         }
1550
1551         TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1552                 if (igb_flow_mem_ptr->flow == pmd_flow) {
1553                         TAILQ_REMOVE(&igb_flow_list,
1554                                 igb_flow_mem_ptr, entries);
1555                         rte_free(igb_flow_mem_ptr);
1556                 }
1557         }
1558         rte_free(flow);
1559
1560         return ret;
1561 }
1562
1563 /* remove all the n-tuple filters */
1564 static void
1565 igb_clear_all_ntuple_filter(struct rte_eth_dev *dev)
1566 {
1567         struct e1000_filter_info *filter_info =
1568                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1569         struct e1000_5tuple_filter *p_5tuple;
1570         struct e1000_2tuple_filter *p_2tuple;
1571
1572         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
1573                 igb_delete_5tuple_filter_82576(dev, p_5tuple);
1574
1575         while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list)))
1576                 igb_delete_2tuple_filter(dev, p_2tuple);
1577 }
1578
1579 /* remove all the ether type filters */
1580 static void
1581 igb_clear_all_ethertype_filter(struct rte_eth_dev *dev)
1582 {
1583         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1584         struct e1000_filter_info *filter_info =
1585                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1586         int i;
1587
1588         for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
1589                 if (filter_info->ethertype_mask & (1 << i)) {
1590                         (void)igb_ethertype_filter_remove(filter_info,
1591                                                             (uint8_t)i);
1592                         E1000_WRITE_REG(hw, E1000_ETQF(i), 0);
1593                         E1000_WRITE_FLUSH(hw);
1594                 }
1595         }
1596 }
1597
1598 /* remove the SYN filter */
1599 static void
1600 igb_clear_syn_filter(struct rte_eth_dev *dev)
1601 {
1602         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1603         struct e1000_filter_info *filter_info =
1604                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1605
1606         if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) {
1607                 filter_info->syn_info = 0;
1608                 E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
1609                 E1000_WRITE_FLUSH(hw);
1610         }
1611 }
1612
1613 /* remove all the flex filters */
1614 static void
1615 igb_clear_all_flex_filter(struct rte_eth_dev *dev)
1616 {
1617         struct e1000_filter_info *filter_info =
1618                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1619         struct e1000_flex_filter *flex_filter;
1620
1621         while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list)))
1622                 igb_remove_flex_filter(dev, flex_filter);
1623 }
1624
1625 void
1626 igb_filterlist_flush(struct rte_eth_dev *dev)
1627 {
1628         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1629         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1630         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1631         struct igb_flex_filter_ele *flex_filter_ptr;
1632         struct igb_flow_mem *igb_flow_mem_ptr;
1633         enum rte_filter_type filter_type;
1634         struct rte_flow *pmd_flow;
1635
1636         TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1637                 if (igb_flow_mem_ptr->dev == dev) {
1638                         pmd_flow = igb_flow_mem_ptr->flow;
1639                         filter_type = pmd_flow->filter_type;
1640
1641                         switch (filter_type) {
1642                         case RTE_ETH_FILTER_NTUPLE:
1643                                 ntuple_filter_ptr =
1644                                 (struct igb_ntuple_filter_ele *)
1645                                         pmd_flow->rule;
1646                                 TAILQ_REMOVE(&igb_filter_ntuple_list,
1647                                                 ntuple_filter_ptr, entries);
1648                                 rte_free(ntuple_filter_ptr);
1649                                 break;
1650                         case RTE_ETH_FILTER_ETHERTYPE:
1651                                 ethertype_filter_ptr =
1652                                 (struct igb_ethertype_filter_ele *)
1653                                         pmd_flow->rule;
1654                                 TAILQ_REMOVE(&igb_filter_ethertype_list,
1655                                                 ethertype_filter_ptr, entries);
1656                                 rte_free(ethertype_filter_ptr);
1657                                 break;
1658                         case RTE_ETH_FILTER_SYN:
1659                                 syn_filter_ptr =
1660                                         (struct igb_eth_syn_filter_ele *)
1661                                                 pmd_flow->rule;
1662                                 TAILQ_REMOVE(&igb_filter_syn_list,
1663                                                 syn_filter_ptr, entries);
1664                                 rte_free(syn_filter_ptr);
1665                                 break;
1666                         case RTE_ETH_FILTER_FLEXIBLE:
1667                                 flex_filter_ptr =
1668                                         (struct igb_flex_filter_ele *)
1669                                                 pmd_flow->rule;
1670                                 TAILQ_REMOVE(&igb_filter_flex_list,
1671                                                 flex_filter_ptr, entries);
1672                                 rte_free(flex_filter_ptr);
1673                                 break;
1674                         default:
1675                                 PMD_DRV_LOG(WARNING, "Filter type"
1676                                         "(%d) not supported", filter_type);
1677                                 break;
1678                         }
1679                         TAILQ_REMOVE(&igb_flow_list,
1680                                  igb_flow_mem_ptr,
1681                                  entries);
1682                         rte_free(igb_flow_mem_ptr->flow);
1683                         rte_free(igb_flow_mem_ptr);
1684                 }
1685         }
1686 }
1687
1688 /*  Destroy all flow rules associated with a port on igb. */
1689 static int
1690 igb_flow_flush(struct rte_eth_dev *dev,
1691                 __rte_unused struct rte_flow_error *error)
1692 {
1693         igb_clear_all_ntuple_filter(dev);
1694         igb_clear_all_ethertype_filter(dev);
1695         igb_clear_syn_filter(dev);
1696         igb_clear_all_flex_filter(dev);
1697         igb_filterlist_flush(dev);
1698
1699         return 0;
1700 }
1701
1702 const struct rte_flow_ops igb_flow_ops = {
1703         .validate = igb_flow_validate,
1704         .create = igb_flow_create,
1705         .destroy = igb_flow_destroy,
1706         .flush = igb_flow_flush,
1707 };