4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_malloc.h>
59 #include <rte_random.h>
61 #include <rte_hash_crc.h>
63 #include <rte_flow_driver.h>
65 #include "ixgbe_logs.h"
66 #include "base/ixgbe_api.h"
67 #include "base/ixgbe_vf.h"
68 #include "base/ixgbe_common.h"
69 #include "ixgbe_ethdev.h"
70 #include "ixgbe_bypass.h"
71 #include "ixgbe_rxtx.h"
72 #include "base/ixgbe_type.h"
73 #include "base/ixgbe_phy.h"
74 #include "rte_pmd_ixgbe.h"
77 #define IXGBE_MIN_N_TUPLE_PRIO 1
78 #define IXGBE_MAX_N_TUPLE_PRIO 7
79 #define IXGBE_MAX_FLX_SOURCE_OFF 62
81 /* ntuple filter list structure */
82 struct ixgbe_ntuple_filter_ele {
83 TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
84 struct rte_eth_ntuple_filter filter_info;
86 /* ethertype filter list structure */
87 struct ixgbe_ethertype_filter_ele {
88 TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
89 struct rte_eth_ethertype_filter filter_info;
91 /* syn filter list structure */
92 struct ixgbe_eth_syn_filter_ele {
93 TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
94 struct rte_eth_syn_filter filter_info;
96 /* fdir filter list structure */
97 struct ixgbe_fdir_rule_ele {
98 TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
99 struct ixgbe_fdir_rule filter_info;
101 /* l2_tunnel filter list structure */
102 struct ixgbe_eth_l2_tunnel_conf_ele {
103 TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
104 struct rte_eth_l2_tunnel_conf filter_info;
106 /* ixgbe_flow memory list structure */
107 struct ixgbe_flow_mem {
108 TAILQ_ENTRY(ixgbe_flow_mem) entries;
109 struct rte_flow *flow;
112 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
113 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
114 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
115 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
116 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
117 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
119 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
120 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
121 static struct ixgbe_syn_filter_list filter_syn_list;
122 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
123 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
124 static struct ixgbe_flow_mem_list ixgbe_flow_list;
127 * Endless loop will never happen with below assumption
128 * 1. there is at least one no-void item(END)
129 * 2. cur is before END.
132 const struct rte_flow_item *next_no_void_pattern(
133 const struct rte_flow_item pattern[],
134 const struct rte_flow_item *cur)
136 const struct rte_flow_item *next =
137 cur ? cur + 1 : &pattern[0];
139 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
146 const struct rte_flow_action *next_no_void_action(
147 const struct rte_flow_action actions[],
148 const struct rte_flow_action *cur)
150 const struct rte_flow_action *next =
151 cur ? cur + 1 : &actions[0];
153 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
160 * Please aware there's an asumption for all the parsers.
161 * rte_flow_item is using big endian, rte_flow_attr and
162 * rte_flow_action are using CPU order.
163 * Because the pattern is used to describe the packets,
164 * normally the packets should use network order.
168 * Parse the rule to see if it is a n-tuple rule.
169 * And get the n-tuple filter info BTW.
171 * The first not void item can be ETH or IPV4.
172 * The second not void item must be IPV4 if the first one is ETH.
173 * The third not void item must be UDP or TCP.
174 * The next not void item must be END.
176 * The first not void action should be QUEUE.
177 * The next not void action should be END.
181 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
182 * dst_addr 192.167.3.50 0xFFFFFFFF
183 * next_proto_id 17 0xFF
184 * UDP/TCP/ src_port 80 0xFFFF
185 * SCTP dst_port 80 0xFFFF
187 * other members in mask and spec should set to 0x00.
188 * item->last should be NULL.
190 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
194 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
195 const struct rte_flow_item pattern[],
196 const struct rte_flow_action actions[],
197 struct rte_eth_ntuple_filter *filter,
198 struct rte_flow_error *error)
200 const struct rte_flow_item *item;
201 const struct rte_flow_action *act;
202 const struct rte_flow_item_ipv4 *ipv4_spec;
203 const struct rte_flow_item_ipv4 *ipv4_mask;
204 const struct rte_flow_item_tcp *tcp_spec;
205 const struct rte_flow_item_tcp *tcp_mask;
206 const struct rte_flow_item_udp *udp_spec;
207 const struct rte_flow_item_udp *udp_mask;
208 const struct rte_flow_item_sctp *sctp_spec;
209 const struct rte_flow_item_sctp *sctp_mask;
212 rte_flow_error_set(error,
213 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
214 NULL, "NULL pattern.");
219 rte_flow_error_set(error, EINVAL,
220 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
221 NULL, "NULL action.");
225 rte_flow_error_set(error, EINVAL,
226 RTE_FLOW_ERROR_TYPE_ATTR,
227 NULL, "NULL attribute.");
231 #ifdef RTE_LIBRTE_SECURITY
233 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
235 act = next_no_void_action(actions, NULL);
236 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
237 const void *conf = act->conf;
238 /* check if the next not void item is END */
239 act = next_no_void_action(actions, act);
240 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
241 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
242 rte_flow_error_set(error, EINVAL,
243 RTE_FLOW_ERROR_TYPE_ACTION,
244 act, "Not supported action.");
248 /* get the IP pattern*/
249 item = next_no_void_pattern(pattern, NULL);
250 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
251 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
253 item->type == RTE_FLOW_ITEM_TYPE_END) {
254 rte_flow_error_set(error, EINVAL,
255 RTE_FLOW_ERROR_TYPE_ITEM,
256 item, "IP pattern missing.");
259 item = next_no_void_pattern(pattern, item);
262 filter->proto = IPPROTO_ESP;
263 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
264 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
268 /* the first not void item can be MAC or IPv4 */
269 item = next_no_void_pattern(pattern, NULL);
271 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
272 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
273 rte_flow_error_set(error, EINVAL,
274 RTE_FLOW_ERROR_TYPE_ITEM,
275 item, "Not supported by ntuple filter");
279 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
280 /*Not supported last point for range*/
282 rte_flow_error_set(error,
284 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
285 item, "Not supported last point for range");
289 /* if the first item is MAC, the content should be NULL */
290 if (item->spec || item->mask) {
291 rte_flow_error_set(error, EINVAL,
292 RTE_FLOW_ERROR_TYPE_ITEM,
293 item, "Not supported by ntuple filter");
296 /* check if the next not void item is IPv4 */
297 item = next_no_void_pattern(pattern, item);
298 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
299 rte_flow_error_set(error,
300 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
301 item, "Not supported by ntuple filter");
306 /* get the IPv4 info */
307 if (!item->spec || !item->mask) {
308 rte_flow_error_set(error, EINVAL,
309 RTE_FLOW_ERROR_TYPE_ITEM,
310 item, "Invalid ntuple mask");
313 /*Not supported last point for range*/
315 rte_flow_error_set(error, EINVAL,
316 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
317 item, "Not supported last point for range");
322 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
324 * Only support src & dst addresses, protocol,
325 * others should be masked.
327 if (ipv4_mask->hdr.version_ihl ||
328 ipv4_mask->hdr.type_of_service ||
329 ipv4_mask->hdr.total_length ||
330 ipv4_mask->hdr.packet_id ||
331 ipv4_mask->hdr.fragment_offset ||
332 ipv4_mask->hdr.time_to_live ||
333 ipv4_mask->hdr.hdr_checksum) {
334 rte_flow_error_set(error,
335 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
336 item, "Not supported by ntuple filter");
340 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
341 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
342 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
344 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
345 filter->dst_ip = ipv4_spec->hdr.dst_addr;
346 filter->src_ip = ipv4_spec->hdr.src_addr;
347 filter->proto = ipv4_spec->hdr.next_proto_id;
349 /* check if the next not void item is TCP or UDP */
350 item = next_no_void_pattern(pattern, item);
351 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
352 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
353 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
354 item->type != RTE_FLOW_ITEM_TYPE_END) {
355 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
356 rte_flow_error_set(error, EINVAL,
357 RTE_FLOW_ERROR_TYPE_ITEM,
358 item, "Not supported by ntuple filter");
362 /* get the TCP/UDP info */
363 if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
364 (!item->spec || !item->mask)) {
365 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
366 rte_flow_error_set(error, EINVAL,
367 RTE_FLOW_ERROR_TYPE_ITEM,
368 item, "Invalid ntuple mask");
372 /*Not supported last point for range*/
374 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
375 rte_flow_error_set(error, EINVAL,
376 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
377 item, "Not supported last point for range");
382 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
383 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
386 * Only support src & dst ports, tcp flags,
387 * others should be masked.
389 if (tcp_mask->hdr.sent_seq ||
390 tcp_mask->hdr.recv_ack ||
391 tcp_mask->hdr.data_off ||
392 tcp_mask->hdr.rx_win ||
393 tcp_mask->hdr.cksum ||
394 tcp_mask->hdr.tcp_urp) {
396 sizeof(struct rte_eth_ntuple_filter));
397 rte_flow_error_set(error, EINVAL,
398 RTE_FLOW_ERROR_TYPE_ITEM,
399 item, "Not supported by ntuple filter");
403 filter->dst_port_mask = tcp_mask->hdr.dst_port;
404 filter->src_port_mask = tcp_mask->hdr.src_port;
405 if (tcp_mask->hdr.tcp_flags == 0xFF) {
406 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
407 } else if (!tcp_mask->hdr.tcp_flags) {
408 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
410 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
411 rte_flow_error_set(error, EINVAL,
412 RTE_FLOW_ERROR_TYPE_ITEM,
413 item, "Not supported by ntuple filter");
417 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
418 filter->dst_port = tcp_spec->hdr.dst_port;
419 filter->src_port = tcp_spec->hdr.src_port;
420 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
421 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
422 udp_mask = (const struct rte_flow_item_udp *)item->mask;
425 * Only support src & dst ports,
426 * others should be masked.
428 if (udp_mask->hdr.dgram_len ||
429 udp_mask->hdr.dgram_cksum) {
431 sizeof(struct rte_eth_ntuple_filter));
432 rte_flow_error_set(error, EINVAL,
433 RTE_FLOW_ERROR_TYPE_ITEM,
434 item, "Not supported by ntuple filter");
438 filter->dst_port_mask = udp_mask->hdr.dst_port;
439 filter->src_port_mask = udp_mask->hdr.src_port;
441 udp_spec = (const struct rte_flow_item_udp *)item->spec;
442 filter->dst_port = udp_spec->hdr.dst_port;
443 filter->src_port = udp_spec->hdr.src_port;
444 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
445 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
448 * Only support src & dst ports,
449 * others should be masked.
451 if (sctp_mask->hdr.tag ||
452 sctp_mask->hdr.cksum) {
454 sizeof(struct rte_eth_ntuple_filter));
455 rte_flow_error_set(error, EINVAL,
456 RTE_FLOW_ERROR_TYPE_ITEM,
457 item, "Not supported by ntuple filter");
461 filter->dst_port_mask = sctp_mask->hdr.dst_port;
462 filter->src_port_mask = sctp_mask->hdr.src_port;
464 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
465 filter->dst_port = sctp_spec->hdr.dst_port;
466 filter->src_port = sctp_spec->hdr.src_port;
471 /* check if the next not void item is END */
472 item = next_no_void_pattern(pattern, item);
473 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
474 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
475 rte_flow_error_set(error, EINVAL,
476 RTE_FLOW_ERROR_TYPE_ITEM,
477 item, "Not supported by ntuple filter");
484 * n-tuple only supports forwarding,
485 * check if the first not void action is QUEUE.
487 act = next_no_void_action(actions, NULL);
488 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
489 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490 rte_flow_error_set(error, EINVAL,
491 RTE_FLOW_ERROR_TYPE_ACTION,
492 item, "Not supported action.");
496 ((const struct rte_flow_action_queue *)act->conf)->index;
498 /* check if the next not void item is END */
499 act = next_no_void_action(actions, act);
500 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
501 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
502 rte_flow_error_set(error, EINVAL,
503 RTE_FLOW_ERROR_TYPE_ACTION,
504 act, "Not supported action.");
509 /* must be input direction */
510 if (!attr->ingress) {
511 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
512 rte_flow_error_set(error, EINVAL,
513 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
514 attr, "Only support ingress.");
520 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
521 rte_flow_error_set(error, EINVAL,
522 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
523 attr, "Not support egress.");
527 if (attr->priority > 0xFFFF) {
528 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
529 rte_flow_error_set(error, EINVAL,
530 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
531 attr, "Error priority.");
534 filter->priority = (uint16_t)attr->priority;
535 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
536 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
537 filter->priority = 1;
542 /* a specific function for ixgbe because the flags is specific */
544 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
545 const struct rte_flow_attr *attr,
546 const struct rte_flow_item pattern[],
547 const struct rte_flow_action actions[],
548 struct rte_eth_ntuple_filter *filter,
549 struct rte_flow_error *error)
552 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
554 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
556 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
561 #ifdef RTE_LIBRTE_SECURITY
562 /* ESP flow not really a flow*/
563 if (filter->proto == IPPROTO_ESP)
567 /* Ixgbe doesn't support tcp flags. */
568 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
569 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
570 rte_flow_error_set(error, EINVAL,
571 RTE_FLOW_ERROR_TYPE_ITEM,
572 NULL, "Not supported by ntuple filter");
576 /* Ixgbe doesn't support many priorities. */
577 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
578 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
579 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
580 rte_flow_error_set(error, EINVAL,
581 RTE_FLOW_ERROR_TYPE_ITEM,
582 NULL, "Priority not supported by ntuple filter");
586 if (filter->queue >= dev->data->nb_rx_queues)
589 /* fixed value for ixgbe */
590 filter->flags = RTE_5TUPLE_FLAGS;
595 * Parse the rule to see if it is a ethertype rule.
596 * And get the ethertype filter info BTW.
598 * The first not void item can be ETH.
599 * The next not void item must be END.
601 * The first not void action should be QUEUE.
602 * The next not void action should be END.
605 * ETH type 0x0807 0xFFFF
607 * other members in mask and spec should set to 0x00.
608 * item->last should be NULL.
611 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
612 const struct rte_flow_item *pattern,
613 const struct rte_flow_action *actions,
614 struct rte_eth_ethertype_filter *filter,
615 struct rte_flow_error *error)
617 const struct rte_flow_item *item;
618 const struct rte_flow_action *act;
619 const struct rte_flow_item_eth *eth_spec;
620 const struct rte_flow_item_eth *eth_mask;
621 const struct rte_flow_action_queue *act_q;
624 rte_flow_error_set(error, EINVAL,
625 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
626 NULL, "NULL pattern.");
631 rte_flow_error_set(error, EINVAL,
632 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
633 NULL, "NULL action.");
638 rte_flow_error_set(error, EINVAL,
639 RTE_FLOW_ERROR_TYPE_ATTR,
640 NULL, "NULL attribute.");
644 item = next_no_void_pattern(pattern, NULL);
645 /* The first non-void item should be MAC. */
646 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
647 rte_flow_error_set(error, EINVAL,
648 RTE_FLOW_ERROR_TYPE_ITEM,
649 item, "Not supported by ethertype filter");
653 /*Not supported last point for range*/
655 rte_flow_error_set(error, EINVAL,
656 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
657 item, "Not supported last point for range");
661 /* Get the MAC info. */
662 if (!item->spec || !item->mask) {
663 rte_flow_error_set(error, EINVAL,
664 RTE_FLOW_ERROR_TYPE_ITEM,
665 item, "Not supported by ethertype filter");
669 eth_spec = (const struct rte_flow_item_eth *)item->spec;
670 eth_mask = (const struct rte_flow_item_eth *)item->mask;
672 /* Mask bits of source MAC address must be full of 0.
673 * Mask bits of destination MAC address must be full
676 if (!is_zero_ether_addr(ð_mask->src) ||
677 (!is_zero_ether_addr(ð_mask->dst) &&
678 !is_broadcast_ether_addr(ð_mask->dst))) {
679 rte_flow_error_set(error, EINVAL,
680 RTE_FLOW_ERROR_TYPE_ITEM,
681 item, "Invalid ether address mask");
685 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
686 rte_flow_error_set(error, EINVAL,
687 RTE_FLOW_ERROR_TYPE_ITEM,
688 item, "Invalid ethertype mask");
692 /* If mask bits of destination MAC address
693 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
695 if (is_broadcast_ether_addr(ð_mask->dst)) {
696 filter->mac_addr = eth_spec->dst;
697 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
699 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
701 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
703 /* Check if the next non-void item is END. */
704 item = next_no_void_pattern(pattern, item);
705 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
706 rte_flow_error_set(error, EINVAL,
707 RTE_FLOW_ERROR_TYPE_ITEM,
708 item, "Not supported by ethertype filter.");
714 act = next_no_void_action(actions, NULL);
715 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
716 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
717 rte_flow_error_set(error, EINVAL,
718 RTE_FLOW_ERROR_TYPE_ACTION,
719 act, "Not supported action.");
723 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
724 act_q = (const struct rte_flow_action_queue *)act->conf;
725 filter->queue = act_q->index;
727 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
730 /* Check if the next non-void item is END */
731 act = next_no_void_action(actions, act);
732 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
733 rte_flow_error_set(error, EINVAL,
734 RTE_FLOW_ERROR_TYPE_ACTION,
735 act, "Not supported action.");
740 /* Must be input direction */
741 if (!attr->ingress) {
742 rte_flow_error_set(error, EINVAL,
743 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
744 attr, "Only support ingress.");
750 rte_flow_error_set(error, EINVAL,
751 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
752 attr, "Not support egress.");
757 if (attr->priority) {
758 rte_flow_error_set(error, EINVAL,
759 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
760 attr, "Not support priority.");
766 rte_flow_error_set(error, EINVAL,
767 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
768 attr, "Not support group.");
776 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
777 const struct rte_flow_attr *attr,
778 const struct rte_flow_item pattern[],
779 const struct rte_flow_action actions[],
780 struct rte_eth_ethertype_filter *filter,
781 struct rte_flow_error *error)
784 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
786 MAC_TYPE_FILTER_SUP(hw->mac.type);
788 ret = cons_parse_ethertype_filter(attr, pattern,
789 actions, filter, error);
794 /* Ixgbe doesn't support MAC address. */
795 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
796 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
797 rte_flow_error_set(error, EINVAL,
798 RTE_FLOW_ERROR_TYPE_ITEM,
799 NULL, "Not supported by ethertype filter");
803 if (filter->queue >= dev->data->nb_rx_queues) {
804 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
805 rte_flow_error_set(error, EINVAL,
806 RTE_FLOW_ERROR_TYPE_ITEM,
807 NULL, "queue index much too big");
811 if (filter->ether_type == ETHER_TYPE_IPv4 ||
812 filter->ether_type == ETHER_TYPE_IPv6) {
813 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
814 rte_flow_error_set(error, EINVAL,
815 RTE_FLOW_ERROR_TYPE_ITEM,
816 NULL, "IPv4/IPv6 not supported by ethertype filter");
820 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
821 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
822 rte_flow_error_set(error, EINVAL,
823 RTE_FLOW_ERROR_TYPE_ITEM,
824 NULL, "mac compare is unsupported");
828 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
829 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
830 rte_flow_error_set(error, EINVAL,
831 RTE_FLOW_ERROR_TYPE_ITEM,
832 NULL, "drop option is unsupported");
840 * Parse the rule to see if it is a TCP SYN rule.
841 * And get the TCP SYN filter info BTW.
843 * The first not void item must be ETH.
844 * The second not void item must be IPV4 or IPV6.
845 * The third not void item must be TCP.
846 * The next not void item must be END.
848 * The first not void action should be QUEUE.
849 * The next not void action should be END.
853 * IPV4/IPV6 NULL NULL
854 * TCP tcp_flags 0x02 0xFF
856 * other members in mask and spec should set to 0x00.
857 * item->last should be NULL.
860 cons_parse_syn_filter(const struct rte_flow_attr *attr,
861 const struct rte_flow_item pattern[],
862 const struct rte_flow_action actions[],
863 struct rte_eth_syn_filter *filter,
864 struct rte_flow_error *error)
866 const struct rte_flow_item *item;
867 const struct rte_flow_action *act;
868 const struct rte_flow_item_tcp *tcp_spec;
869 const struct rte_flow_item_tcp *tcp_mask;
870 const struct rte_flow_action_queue *act_q;
873 rte_flow_error_set(error, EINVAL,
874 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
875 NULL, "NULL pattern.");
880 rte_flow_error_set(error, EINVAL,
881 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
882 NULL, "NULL action.");
887 rte_flow_error_set(error, EINVAL,
888 RTE_FLOW_ERROR_TYPE_ATTR,
889 NULL, "NULL attribute.");
894 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
895 item = next_no_void_pattern(pattern, NULL);
896 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
897 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
898 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
899 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
900 rte_flow_error_set(error, EINVAL,
901 RTE_FLOW_ERROR_TYPE_ITEM,
902 item, "Not supported by syn filter");
905 /*Not supported last point for range*/
907 rte_flow_error_set(error, EINVAL,
908 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
909 item, "Not supported last point for range");
914 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
915 /* if the item is MAC, the content should be NULL */
916 if (item->spec || item->mask) {
917 rte_flow_error_set(error, EINVAL,
918 RTE_FLOW_ERROR_TYPE_ITEM,
919 item, "Invalid SYN address mask");
923 /* check if the next not void item is IPv4 or IPv6 */
924 item = next_no_void_pattern(pattern, item);
925 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
926 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
927 rte_flow_error_set(error, EINVAL,
928 RTE_FLOW_ERROR_TYPE_ITEM,
929 item, "Not supported by syn filter");
935 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
936 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
937 /* if the item is IP, the content should be NULL */
938 if (item->spec || item->mask) {
939 rte_flow_error_set(error, EINVAL,
940 RTE_FLOW_ERROR_TYPE_ITEM,
941 item, "Invalid SYN mask");
945 /* check if the next not void item is TCP */
946 item = next_no_void_pattern(pattern, item);
947 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
948 rte_flow_error_set(error, EINVAL,
949 RTE_FLOW_ERROR_TYPE_ITEM,
950 item, "Not supported by syn filter");
955 /* Get the TCP info. Only support SYN. */
956 if (!item->spec || !item->mask) {
957 rte_flow_error_set(error, EINVAL,
958 RTE_FLOW_ERROR_TYPE_ITEM,
959 item, "Invalid SYN mask");
962 /*Not supported last point for range*/
964 rte_flow_error_set(error, EINVAL,
965 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
966 item, "Not supported last point for range");
970 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
971 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
972 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
973 tcp_mask->hdr.src_port ||
974 tcp_mask->hdr.dst_port ||
975 tcp_mask->hdr.sent_seq ||
976 tcp_mask->hdr.recv_ack ||
977 tcp_mask->hdr.data_off ||
978 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
979 tcp_mask->hdr.rx_win ||
980 tcp_mask->hdr.cksum ||
981 tcp_mask->hdr.tcp_urp) {
982 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
983 rte_flow_error_set(error, EINVAL,
984 RTE_FLOW_ERROR_TYPE_ITEM,
985 item, "Not supported by syn filter");
989 /* check if the next not void item is END */
990 item = next_no_void_pattern(pattern, item);
991 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
992 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
993 rte_flow_error_set(error, EINVAL,
994 RTE_FLOW_ERROR_TYPE_ITEM,
995 item, "Not supported by syn filter");
999 /* check if the first not void action is QUEUE. */
1000 act = next_no_void_action(actions, NULL);
1001 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1002 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1003 rte_flow_error_set(error, EINVAL,
1004 RTE_FLOW_ERROR_TYPE_ACTION,
1005 act, "Not supported action.");
1009 act_q = (const struct rte_flow_action_queue *)act->conf;
1010 filter->queue = act_q->index;
1011 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1012 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1013 rte_flow_error_set(error, EINVAL,
1014 RTE_FLOW_ERROR_TYPE_ACTION,
1015 act, "Not supported action.");
1019 /* check if the next not void item is END */
1020 act = next_no_void_action(actions, act);
1021 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1022 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1023 rte_flow_error_set(error, EINVAL,
1024 RTE_FLOW_ERROR_TYPE_ACTION,
1025 act, "Not supported action.");
1030 /* must be input direction */
1031 if (!attr->ingress) {
1032 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1033 rte_flow_error_set(error, EINVAL,
1034 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1035 attr, "Only support ingress.");
1041 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1042 rte_flow_error_set(error, EINVAL,
1043 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1044 attr, "Not support egress.");
1048 /* Support 2 priorities, the lowest or highest. */
1049 if (!attr->priority) {
1050 filter->hig_pri = 0;
1051 } else if (attr->priority == (uint32_t)~0U) {
1052 filter->hig_pri = 1;
1054 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1055 rte_flow_error_set(error, EINVAL,
1056 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1057 attr, "Not support priority.");
1065 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1066 const struct rte_flow_attr *attr,
1067 const struct rte_flow_item pattern[],
1068 const struct rte_flow_action actions[],
1069 struct rte_eth_syn_filter *filter,
1070 struct rte_flow_error *error)
1073 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1075 MAC_TYPE_FILTER_SUP(hw->mac.type);
1077 ret = cons_parse_syn_filter(attr, pattern,
1078 actions, filter, error);
1080 if (filter->queue >= dev->data->nb_rx_queues)
1090 * Parse the rule to see if it is a L2 tunnel rule.
1091 * And get the L2 tunnel filter info BTW.
1092 * Only support E-tag now.
1094 * The first not void item can be E_TAG.
1095 * The next not void item must be END.
1097 * The first not void action should be VF or PF.
1098 * The next not void action should be END.
1102 e_cid_base 0x309 0xFFF
1104 * other members in mask and spec should set to 0x00.
1105 * item->last should be NULL.
1108 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1109 const struct rte_flow_attr *attr,
1110 const struct rte_flow_item pattern[],
1111 const struct rte_flow_action actions[],
1112 struct rte_eth_l2_tunnel_conf *filter,
1113 struct rte_flow_error *error)
1115 const struct rte_flow_item *item;
1116 const struct rte_flow_item_e_tag *e_tag_spec;
1117 const struct rte_flow_item_e_tag *e_tag_mask;
1118 const struct rte_flow_action *act;
1119 const struct rte_flow_action_vf *act_vf;
1120 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1123 rte_flow_error_set(error, EINVAL,
1124 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1125 NULL, "NULL pattern.");
1130 rte_flow_error_set(error, EINVAL,
1131 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1132 NULL, "NULL action.");
1137 rte_flow_error_set(error, EINVAL,
1138 RTE_FLOW_ERROR_TYPE_ATTR,
1139 NULL, "NULL attribute.");
1143 /* The first not void item should be e-tag. */
1144 item = next_no_void_pattern(pattern, NULL);
1145 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1146 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147 rte_flow_error_set(error, EINVAL,
1148 RTE_FLOW_ERROR_TYPE_ITEM,
1149 item, "Not supported by L2 tunnel filter");
1153 if (!item->spec || !item->mask) {
1154 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1155 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1156 item, "Not supported by L2 tunnel filter");
1160 /*Not supported last point for range*/
1162 rte_flow_error_set(error, EINVAL,
1163 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1164 item, "Not supported last point for range");
1168 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1169 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1171 /* Only care about GRP and E cid base. */
1172 if (e_tag_mask->epcp_edei_in_ecid_b ||
1173 e_tag_mask->in_ecid_e ||
1174 e_tag_mask->ecid_e ||
1175 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1176 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1177 rte_flow_error_set(error, EINVAL,
1178 RTE_FLOW_ERROR_TYPE_ITEM,
1179 item, "Not supported by L2 tunnel filter");
1183 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1185 * grp and e_cid_base are bit fields and only use 14 bits.
1186 * e-tag id is taken as little endian by HW.
1188 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1190 /* check if the next not void item is END */
1191 item = next_no_void_pattern(pattern, item);
1192 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1193 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1194 rte_flow_error_set(error, EINVAL,
1195 RTE_FLOW_ERROR_TYPE_ITEM,
1196 item, "Not supported by L2 tunnel filter");
1201 /* must be input direction */
1202 if (!attr->ingress) {
1203 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1204 rte_flow_error_set(error, EINVAL,
1205 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1206 attr, "Only support ingress.");
1212 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1213 rte_flow_error_set(error, EINVAL,
1214 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1215 attr, "Not support egress.");
1220 if (attr->priority) {
1221 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1222 rte_flow_error_set(error, EINVAL,
1223 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1224 attr, "Not support priority.");
1228 /* check if the first not void action is VF or PF. */
1229 act = next_no_void_action(actions, NULL);
1230 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1231 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1232 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1233 rte_flow_error_set(error, EINVAL,
1234 RTE_FLOW_ERROR_TYPE_ACTION,
1235 act, "Not supported action.");
1239 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1240 act_vf = (const struct rte_flow_action_vf *)act->conf;
1241 filter->pool = act_vf->id;
1243 filter->pool = pci_dev->max_vfs;
1246 /* check if the next not void item is END */
1247 act = next_no_void_action(actions, act);
1248 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1249 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1250 rte_flow_error_set(error, EINVAL,
1251 RTE_FLOW_ERROR_TYPE_ACTION,
1252 act, "Not supported action.");
1260 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1261 const struct rte_flow_attr *attr,
1262 const struct rte_flow_item pattern[],
1263 const struct rte_flow_action actions[],
1264 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1265 struct rte_flow_error *error)
1268 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1269 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1272 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1273 actions, l2_tn_filter, error);
1275 if (hw->mac.type != ixgbe_mac_X550 &&
1276 hw->mac.type != ixgbe_mac_X550EM_x &&
1277 hw->mac.type != ixgbe_mac_X550EM_a) {
1278 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1279 rte_flow_error_set(error, EINVAL,
1280 RTE_FLOW_ERROR_TYPE_ITEM,
1281 NULL, "Not supported by L2 tunnel filter");
1285 vf_num = pci_dev->max_vfs;
1287 if (l2_tn_filter->pool > vf_num)
1293 /* Parse to get the attr and action info of flow director rule. */
1295 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1296 const struct rte_flow_action actions[],
1297 struct ixgbe_fdir_rule *rule,
1298 struct rte_flow_error *error)
1300 const struct rte_flow_action *act;
1301 const struct rte_flow_action_queue *act_q;
1302 const struct rte_flow_action_mark *mark;
1305 /* must be input direction */
1306 if (!attr->ingress) {
1307 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1308 rte_flow_error_set(error, EINVAL,
1309 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1310 attr, "Only support ingress.");
1316 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1317 rte_flow_error_set(error, EINVAL,
1318 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1319 attr, "Not support egress.");
1324 if (attr->priority) {
1325 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1326 rte_flow_error_set(error, EINVAL,
1327 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1328 attr, "Not support priority.");
1332 /* check if the first not void action is QUEUE or DROP. */
1333 act = next_no_void_action(actions, NULL);
1334 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1335 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1336 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1337 rte_flow_error_set(error, EINVAL,
1338 RTE_FLOW_ERROR_TYPE_ACTION,
1339 act, "Not supported action.");
1343 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1344 act_q = (const struct rte_flow_action_queue *)act->conf;
1345 rule->queue = act_q->index;
1347 /* signature mode does not support drop action. */
1348 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1349 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1350 rte_flow_error_set(error, EINVAL,
1351 RTE_FLOW_ERROR_TYPE_ACTION,
1352 act, "Not supported action.");
1355 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1358 /* check if the next not void item is MARK */
1359 act = next_no_void_action(actions, act);
1360 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1361 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1362 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1363 rte_flow_error_set(error, EINVAL,
1364 RTE_FLOW_ERROR_TYPE_ACTION,
1365 act, "Not supported action.");
1371 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1372 mark = (const struct rte_flow_action_mark *)act->conf;
1373 rule->soft_id = mark->id;
1374 act = next_no_void_action(actions, act);
1377 /* check if the next not void item is END */
1378 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1379 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1380 rte_flow_error_set(error, EINVAL,
1381 RTE_FLOW_ERROR_TYPE_ACTION,
1382 act, "Not supported action.");
1389 /* search next no void pattern and skip fuzzy */
1391 const struct rte_flow_item *next_no_fuzzy_pattern(
1392 const struct rte_flow_item pattern[],
1393 const struct rte_flow_item *cur)
1395 const struct rte_flow_item *next =
1396 next_no_void_pattern(pattern, cur);
1398 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1400 next = next_no_void_pattern(pattern, next);
1404 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1406 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1407 const struct rte_flow_item *item;
1408 uint32_t sh, lh, mh;
1413 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1416 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1418 (const struct rte_flow_item_fuzzy *)item->spec;
1420 (const struct rte_flow_item_fuzzy *)item->last;
1422 (const struct rte_flow_item_fuzzy *)item->mask;
1451 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1452 * And get the flow director filter info BTW.
1453 * UDP/TCP/SCTP PATTERN:
1454 * The first not void item can be ETH or IPV4 or IPV6
1455 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1456 * The next not void item could be UDP or TCP or SCTP (optional)
1457 * The next not void item could be RAW (for flexbyte, optional)
1458 * The next not void item must be END.
1459 * A Fuzzy Match pattern can appear at any place before END.
1460 * Fuzzy Match is optional for IPV4 but is required for IPV6
1462 * The first not void item must be ETH.
1463 * The second not void item must be MAC VLAN.
1464 * The next not void item must be END.
1466 * The first not void action should be QUEUE or DROP.
1467 * The second not void optional action should be MARK,
1468 * mark_id is a uint32_t number.
1469 * The next not void action should be END.
1470 * UDP/TCP/SCTP pattern example:
1473 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1474 * dst_addr 192.167.3.50 0xFFFFFFFF
1475 * UDP/TCP/SCTP src_port 80 0xFFFF
1476 * dst_port 80 0xFFFF
1477 * FLEX relative 0 0x1
1480 * offset 12 0xFFFFFFFF
1483 * pattern[0] 0x86 0xFF
1484 * pattern[1] 0xDD 0xFF
1486 * MAC VLAN pattern example:
1489 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1490 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1491 * MAC VLAN tci 0x2016 0xEFFF
1493 * Other members in mask and spec should set to 0x00.
1494 * Item->last should be NULL.
1497 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1498 const struct rte_flow_attr *attr,
1499 const struct rte_flow_item pattern[],
1500 const struct rte_flow_action actions[],
1501 struct ixgbe_fdir_rule *rule,
1502 struct rte_flow_error *error)
1504 const struct rte_flow_item *item;
1505 const struct rte_flow_item_eth *eth_spec;
1506 const struct rte_flow_item_eth *eth_mask;
1507 const struct rte_flow_item_ipv4 *ipv4_spec;
1508 const struct rte_flow_item_ipv4 *ipv4_mask;
1509 const struct rte_flow_item_ipv6 *ipv6_spec;
1510 const struct rte_flow_item_ipv6 *ipv6_mask;
1511 const struct rte_flow_item_tcp *tcp_spec;
1512 const struct rte_flow_item_tcp *tcp_mask;
1513 const struct rte_flow_item_udp *udp_spec;
1514 const struct rte_flow_item_udp *udp_mask;
1515 const struct rte_flow_item_sctp *sctp_spec;
1516 const struct rte_flow_item_sctp *sctp_mask;
1517 const struct rte_flow_item_vlan *vlan_spec;
1518 const struct rte_flow_item_vlan *vlan_mask;
1519 const struct rte_flow_item_raw *raw_mask;
1520 const struct rte_flow_item_raw *raw_spec;
1523 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1526 rte_flow_error_set(error, EINVAL,
1527 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1528 NULL, "NULL pattern.");
1533 rte_flow_error_set(error, EINVAL,
1534 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1535 NULL, "NULL action.");
1540 rte_flow_error_set(error, EINVAL,
1541 RTE_FLOW_ERROR_TYPE_ATTR,
1542 NULL, "NULL attribute.");
1547 * Some fields may not be provided. Set spec to 0 and mask to default
1548 * value. So, we need not do anything for the not provided fields later.
1550 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1551 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1552 rule->mask.vlan_tci_mask = 0;
1553 rule->mask.flex_bytes_mask = 0;
1556 * The first not void item should be
1557 * MAC or IPv4 or TCP or UDP or SCTP.
1559 item = next_no_fuzzy_pattern(pattern, NULL);
1560 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1561 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1562 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1563 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1564 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1565 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1566 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1567 rte_flow_error_set(error, EINVAL,
1568 RTE_FLOW_ERROR_TYPE_ITEM,
1569 item, "Not supported by fdir filter");
1573 if (signature_match(pattern))
1574 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1576 rule->mode = RTE_FDIR_MODE_PERFECT;
1578 /*Not supported last point for range*/
1580 rte_flow_error_set(error, EINVAL,
1581 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1582 item, "Not supported last point for range");
1586 /* Get the MAC info. */
1587 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1589 * Only support vlan and dst MAC address,
1590 * others should be masked.
1592 if (item->spec && !item->mask) {
1593 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1594 rte_flow_error_set(error, EINVAL,
1595 RTE_FLOW_ERROR_TYPE_ITEM,
1596 item, "Not supported by fdir filter");
1601 rule->b_spec = TRUE;
1602 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1604 /* Get the dst MAC. */
1605 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1606 rule->ixgbe_fdir.formatted.inner_mac[j] =
1607 eth_spec->dst.addr_bytes[j];
1614 rule->b_mask = TRUE;
1615 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1617 /* Ether type should be masked. */
1618 if (eth_mask->type ||
1619 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1620 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1621 rte_flow_error_set(error, EINVAL,
1622 RTE_FLOW_ERROR_TYPE_ITEM,
1623 item, "Not supported by fdir filter");
1627 /* If ethernet has meaning, it means MAC VLAN mode. */
1628 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1631 * src MAC address must be masked,
1632 * and don't support dst MAC address mask.
1634 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1635 if (eth_mask->src.addr_bytes[j] ||
1636 eth_mask->dst.addr_bytes[j] != 0xFF) {
1638 sizeof(struct ixgbe_fdir_rule));
1639 rte_flow_error_set(error, EINVAL,
1640 RTE_FLOW_ERROR_TYPE_ITEM,
1641 item, "Not supported by fdir filter");
1646 /* When no VLAN, considered as full mask. */
1647 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1649 /*** If both spec and mask are item,
1650 * it means don't care about ETH.
1655 * Check if the next not void item is vlan or ipv4.
1656 * IPv6 is not supported.
1658 item = next_no_fuzzy_pattern(pattern, item);
1659 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1660 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1661 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1662 rte_flow_error_set(error, EINVAL,
1663 RTE_FLOW_ERROR_TYPE_ITEM,
1664 item, "Not supported by fdir filter");
1668 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1669 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1670 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1671 rte_flow_error_set(error, EINVAL,
1672 RTE_FLOW_ERROR_TYPE_ITEM,
1673 item, "Not supported by fdir filter");
1679 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1680 if (!(item->spec && item->mask)) {
1681 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1682 rte_flow_error_set(error, EINVAL,
1683 RTE_FLOW_ERROR_TYPE_ITEM,
1684 item, "Not supported by fdir filter");
1688 /*Not supported last point for range*/
1690 rte_flow_error_set(error, EINVAL,
1691 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1692 item, "Not supported last point for range");
1696 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1697 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1699 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1701 rule->mask.vlan_tci_mask = vlan_mask->tci;
1702 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1703 /* More than one tags are not supported. */
1705 /* Next not void item must be END */
1706 item = next_no_fuzzy_pattern(pattern, item);
1707 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1708 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1709 rte_flow_error_set(error, EINVAL,
1710 RTE_FLOW_ERROR_TYPE_ITEM,
1711 item, "Not supported by fdir filter");
1716 /* Get the IPV4 info. */
1717 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1719 * Set the flow type even if there's no content
1720 * as we must have a flow type.
1722 rule->ixgbe_fdir.formatted.flow_type =
1723 IXGBE_ATR_FLOW_TYPE_IPV4;
1724 /*Not supported last point for range*/
1726 rte_flow_error_set(error, EINVAL,
1727 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1728 item, "Not supported last point for range");
1732 * Only care about src & dst addresses,
1733 * others should be masked.
1736 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1737 rte_flow_error_set(error, EINVAL,
1738 RTE_FLOW_ERROR_TYPE_ITEM,
1739 item, "Not supported by fdir filter");
1742 rule->b_mask = TRUE;
1744 (const struct rte_flow_item_ipv4 *)item->mask;
1745 if (ipv4_mask->hdr.version_ihl ||
1746 ipv4_mask->hdr.type_of_service ||
1747 ipv4_mask->hdr.total_length ||
1748 ipv4_mask->hdr.packet_id ||
1749 ipv4_mask->hdr.fragment_offset ||
1750 ipv4_mask->hdr.time_to_live ||
1751 ipv4_mask->hdr.next_proto_id ||
1752 ipv4_mask->hdr.hdr_checksum) {
1753 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1754 rte_flow_error_set(error, EINVAL,
1755 RTE_FLOW_ERROR_TYPE_ITEM,
1756 item, "Not supported by fdir filter");
1759 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1760 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1763 rule->b_spec = TRUE;
1765 (const struct rte_flow_item_ipv4 *)item->spec;
1766 rule->ixgbe_fdir.formatted.dst_ip[0] =
1767 ipv4_spec->hdr.dst_addr;
1768 rule->ixgbe_fdir.formatted.src_ip[0] =
1769 ipv4_spec->hdr.src_addr;
1773 * Check if the next not void item is
1774 * TCP or UDP or SCTP or END.
1776 item = next_no_fuzzy_pattern(pattern, item);
1777 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1778 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1779 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1780 item->type != RTE_FLOW_ITEM_TYPE_END &&
1781 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1782 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1783 rte_flow_error_set(error, EINVAL,
1784 RTE_FLOW_ERROR_TYPE_ITEM,
1785 item, "Not supported by fdir filter");
1790 /* Get the IPV6 info. */
1791 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1793 * Set the flow type even if there's no content
1794 * as we must have a flow type.
1796 rule->ixgbe_fdir.formatted.flow_type =
1797 IXGBE_ATR_FLOW_TYPE_IPV6;
1800 * 1. must signature match
1801 * 2. not support last
1802 * 3. mask must not null
1804 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1807 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1808 rte_flow_error_set(error, EINVAL,
1809 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1810 item, "Not supported last point for range");
1814 rule->b_mask = TRUE;
1816 (const struct rte_flow_item_ipv6 *)item->mask;
1817 if (ipv6_mask->hdr.vtc_flow ||
1818 ipv6_mask->hdr.payload_len ||
1819 ipv6_mask->hdr.proto ||
1820 ipv6_mask->hdr.hop_limits) {
1821 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1822 rte_flow_error_set(error, EINVAL,
1823 RTE_FLOW_ERROR_TYPE_ITEM,
1824 item, "Not supported by fdir filter");
1828 /* check src addr mask */
1829 for (j = 0; j < 16; j++) {
1830 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1831 rule->mask.src_ipv6_mask |= 1 << j;
1832 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1833 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1834 rte_flow_error_set(error, EINVAL,
1835 RTE_FLOW_ERROR_TYPE_ITEM,
1836 item, "Not supported by fdir filter");
1841 /* check dst addr mask */
1842 for (j = 0; j < 16; j++) {
1843 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1844 rule->mask.dst_ipv6_mask |= 1 << j;
1845 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1846 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1847 rte_flow_error_set(error, EINVAL,
1848 RTE_FLOW_ERROR_TYPE_ITEM,
1849 item, "Not supported by fdir filter");
1855 rule->b_spec = TRUE;
1857 (const struct rte_flow_item_ipv6 *)item->spec;
1858 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1859 ipv6_spec->hdr.src_addr, 16);
1860 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1861 ipv6_spec->hdr.dst_addr, 16);
1865 * Check if the next not void item is
1866 * TCP or UDP or SCTP or END.
1868 item = next_no_fuzzy_pattern(pattern, item);
1869 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1870 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1871 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1872 item->type != RTE_FLOW_ITEM_TYPE_END &&
1873 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1874 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1875 rte_flow_error_set(error, EINVAL,
1876 RTE_FLOW_ERROR_TYPE_ITEM,
1877 item, "Not supported by fdir filter");
1882 /* Get the TCP info. */
1883 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1885 * Set the flow type even if there's no content
1886 * as we must have a flow type.
1888 rule->ixgbe_fdir.formatted.flow_type |=
1889 IXGBE_ATR_L4TYPE_TCP;
1890 /*Not supported last point for range*/
1892 rte_flow_error_set(error, EINVAL,
1893 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1894 item, "Not supported last point for range");
1898 * Only care about src & dst ports,
1899 * others should be masked.
1902 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1903 rte_flow_error_set(error, EINVAL,
1904 RTE_FLOW_ERROR_TYPE_ITEM,
1905 item, "Not supported by fdir filter");
1908 rule->b_mask = TRUE;
1909 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1910 if (tcp_mask->hdr.sent_seq ||
1911 tcp_mask->hdr.recv_ack ||
1912 tcp_mask->hdr.data_off ||
1913 tcp_mask->hdr.tcp_flags ||
1914 tcp_mask->hdr.rx_win ||
1915 tcp_mask->hdr.cksum ||
1916 tcp_mask->hdr.tcp_urp) {
1917 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1918 rte_flow_error_set(error, EINVAL,
1919 RTE_FLOW_ERROR_TYPE_ITEM,
1920 item, "Not supported by fdir filter");
1923 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1924 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1927 rule->b_spec = TRUE;
1928 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1929 rule->ixgbe_fdir.formatted.src_port =
1930 tcp_spec->hdr.src_port;
1931 rule->ixgbe_fdir.formatted.dst_port =
1932 tcp_spec->hdr.dst_port;
1935 item = next_no_fuzzy_pattern(pattern, item);
1936 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1937 item->type != RTE_FLOW_ITEM_TYPE_END) {
1938 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1939 rte_flow_error_set(error, EINVAL,
1940 RTE_FLOW_ERROR_TYPE_ITEM,
1941 item, "Not supported by fdir filter");
1947 /* Get the UDP info */
1948 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1950 * Set the flow type even if there's no content
1951 * as we must have a flow type.
1953 rule->ixgbe_fdir.formatted.flow_type |=
1954 IXGBE_ATR_L4TYPE_UDP;
1955 /*Not supported last point for range*/
1957 rte_flow_error_set(error, EINVAL,
1958 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1959 item, "Not supported last point for range");
1963 * Only care about src & dst ports,
1964 * others should be masked.
1967 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1968 rte_flow_error_set(error, EINVAL,
1969 RTE_FLOW_ERROR_TYPE_ITEM,
1970 item, "Not supported by fdir filter");
1973 rule->b_mask = TRUE;
1974 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1975 if (udp_mask->hdr.dgram_len ||
1976 udp_mask->hdr.dgram_cksum) {
1977 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1978 rte_flow_error_set(error, EINVAL,
1979 RTE_FLOW_ERROR_TYPE_ITEM,
1980 item, "Not supported by fdir filter");
1983 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1984 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1987 rule->b_spec = TRUE;
1988 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1989 rule->ixgbe_fdir.formatted.src_port =
1990 udp_spec->hdr.src_port;
1991 rule->ixgbe_fdir.formatted.dst_port =
1992 udp_spec->hdr.dst_port;
1995 item = next_no_fuzzy_pattern(pattern, item);
1996 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1997 item->type != RTE_FLOW_ITEM_TYPE_END) {
1998 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1999 rte_flow_error_set(error, EINVAL,
2000 RTE_FLOW_ERROR_TYPE_ITEM,
2001 item, "Not supported by fdir filter");
2007 /* Get the SCTP info */
2008 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2010 * Set the flow type even if there's no content
2011 * as we must have a flow type.
2013 rule->ixgbe_fdir.formatted.flow_type |=
2014 IXGBE_ATR_L4TYPE_SCTP;
2015 /*Not supported last point for range*/
2017 rte_flow_error_set(error, EINVAL,
2018 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2019 item, "Not supported last point for range");
2023 /* only x550 family only support sctp port */
2024 if (hw->mac.type == ixgbe_mac_X550 ||
2025 hw->mac.type == ixgbe_mac_X550EM_x ||
2026 hw->mac.type == ixgbe_mac_X550EM_a) {
2028 * Only care about src & dst ports,
2029 * others should be masked.
2032 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2033 rte_flow_error_set(error, EINVAL,
2034 RTE_FLOW_ERROR_TYPE_ITEM,
2035 item, "Not supported by fdir filter");
2038 rule->b_mask = TRUE;
2040 (const struct rte_flow_item_sctp *)item->mask;
2041 if (sctp_mask->hdr.tag ||
2042 sctp_mask->hdr.cksum) {
2043 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2044 rte_flow_error_set(error, EINVAL,
2045 RTE_FLOW_ERROR_TYPE_ITEM,
2046 item, "Not supported by fdir filter");
2049 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2050 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2053 rule->b_spec = TRUE;
2055 (const struct rte_flow_item_sctp *)item->spec;
2056 rule->ixgbe_fdir.formatted.src_port =
2057 sctp_spec->hdr.src_port;
2058 rule->ixgbe_fdir.formatted.dst_port =
2059 sctp_spec->hdr.dst_port;
2061 /* others even sctp port is not supported */
2064 (const struct rte_flow_item_sctp *)item->mask;
2066 (sctp_mask->hdr.src_port ||
2067 sctp_mask->hdr.dst_port ||
2068 sctp_mask->hdr.tag ||
2069 sctp_mask->hdr.cksum)) {
2070 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2071 rte_flow_error_set(error, EINVAL,
2072 RTE_FLOW_ERROR_TYPE_ITEM,
2073 item, "Not supported by fdir filter");
2078 item = next_no_fuzzy_pattern(pattern, item);
2079 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2080 item->type != RTE_FLOW_ITEM_TYPE_END) {
2081 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2082 rte_flow_error_set(error, EINVAL,
2083 RTE_FLOW_ERROR_TYPE_ITEM,
2084 item, "Not supported by fdir filter");
2089 /* Get the flex byte info */
2090 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2091 /* Not supported last point for range*/
2093 rte_flow_error_set(error, EINVAL,
2094 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2095 item, "Not supported last point for range");
2098 /* mask should not be null */
2099 if (!item->mask || !item->spec) {
2100 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2101 rte_flow_error_set(error, EINVAL,
2102 RTE_FLOW_ERROR_TYPE_ITEM,
2103 item, "Not supported by fdir filter");
2107 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2110 if (raw_mask->relative != 0x1 ||
2111 raw_mask->search != 0x1 ||
2112 raw_mask->reserved != 0x0 ||
2113 (uint32_t)raw_mask->offset != 0xffffffff ||
2114 raw_mask->limit != 0xffff ||
2115 raw_mask->length != 0xffff) {
2116 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2117 rte_flow_error_set(error, EINVAL,
2118 RTE_FLOW_ERROR_TYPE_ITEM,
2119 item, "Not supported by fdir filter");
2123 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2126 if (raw_spec->relative != 0 ||
2127 raw_spec->search != 0 ||
2128 raw_spec->reserved != 0 ||
2129 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2130 raw_spec->offset % 2 ||
2131 raw_spec->limit != 0 ||
2132 raw_spec->length != 2 ||
2133 /* pattern can't be 0xffff */
2134 (raw_spec->pattern[0] == 0xff &&
2135 raw_spec->pattern[1] == 0xff)) {
2136 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2137 rte_flow_error_set(error, EINVAL,
2138 RTE_FLOW_ERROR_TYPE_ITEM,
2139 item, "Not supported by fdir filter");
2143 /* check pattern mask */
2144 if (raw_mask->pattern[0] != 0xff ||
2145 raw_mask->pattern[1] != 0xff) {
2146 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2147 rte_flow_error_set(error, EINVAL,
2148 RTE_FLOW_ERROR_TYPE_ITEM,
2149 item, "Not supported by fdir filter");
2153 rule->mask.flex_bytes_mask = 0xffff;
2154 rule->ixgbe_fdir.formatted.flex_bytes =
2155 (((uint16_t)raw_spec->pattern[1]) << 8) |
2156 raw_spec->pattern[0];
2157 rule->flex_bytes_offset = raw_spec->offset;
2160 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2161 /* check if the next not void item is END */
2162 item = next_no_fuzzy_pattern(pattern, item);
2163 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2164 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2165 rte_flow_error_set(error, EINVAL,
2166 RTE_FLOW_ERROR_TYPE_ITEM,
2167 item, "Not supported by fdir filter");
2172 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2175 #define NVGRE_PROTOCOL 0x6558
2178 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2179 * And get the flow director filter info BTW.
2181 * The first not void item must be ETH.
2182 * The second not void item must be IPV4/ IPV6.
2183 * The third not void item must be NVGRE.
2184 * The next not void item must be END.
2186 * The first not void item must be ETH.
2187 * The second not void item must be IPV4/ IPV6.
2188 * The third not void item must be NVGRE.
2189 * The next not void item must be END.
2191 * The first not void action should be QUEUE or DROP.
2192 * The second not void optional action should be MARK,
2193 * mark_id is a uint32_t number.
2194 * The next not void action should be END.
2195 * VxLAN pattern example:
2198 * IPV4/IPV6 NULL NULL
2200 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2201 * MAC VLAN tci 0x2016 0xEFFF
2203 * NEGRV pattern example:
2206 * IPV4/IPV6 NULL NULL
2207 * NVGRE protocol 0x6558 0xFFFF
2208 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2209 * MAC VLAN tci 0x2016 0xEFFF
2211 * other members in mask and spec should set to 0x00.
2212 * item->last should be NULL.
2215 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2216 const struct rte_flow_item pattern[],
2217 const struct rte_flow_action actions[],
2218 struct ixgbe_fdir_rule *rule,
2219 struct rte_flow_error *error)
2221 const struct rte_flow_item *item;
2222 const struct rte_flow_item_vxlan *vxlan_spec;
2223 const struct rte_flow_item_vxlan *vxlan_mask;
2224 const struct rte_flow_item_nvgre *nvgre_spec;
2225 const struct rte_flow_item_nvgre *nvgre_mask;
2226 const struct rte_flow_item_eth *eth_spec;
2227 const struct rte_flow_item_eth *eth_mask;
2228 const struct rte_flow_item_vlan *vlan_spec;
2229 const struct rte_flow_item_vlan *vlan_mask;
2233 rte_flow_error_set(error, EINVAL,
2234 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2235 NULL, "NULL pattern.");
2240 rte_flow_error_set(error, EINVAL,
2241 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2242 NULL, "NULL action.");
2247 rte_flow_error_set(error, EINVAL,
2248 RTE_FLOW_ERROR_TYPE_ATTR,
2249 NULL, "NULL attribute.");
2254 * Some fields may not be provided. Set spec to 0 and mask to default
2255 * value. So, we need not do anything for the not provided fields later.
2257 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2258 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2259 rule->mask.vlan_tci_mask = 0;
2262 * The first not void item should be
2263 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2265 item = next_no_void_pattern(pattern, NULL);
2266 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2267 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2268 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2269 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2270 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2271 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2272 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2273 rte_flow_error_set(error, EINVAL,
2274 RTE_FLOW_ERROR_TYPE_ITEM,
2275 item, "Not supported by fdir filter");
2279 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2282 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2283 /* Only used to describe the protocol stack. */
2284 if (item->spec || item->mask) {
2285 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2286 rte_flow_error_set(error, EINVAL,
2287 RTE_FLOW_ERROR_TYPE_ITEM,
2288 item, "Not supported by fdir filter");
2291 /* Not supported last point for range*/
2293 rte_flow_error_set(error, EINVAL,
2294 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2295 item, "Not supported last point for range");
2299 /* Check if the next not void item is IPv4 or IPv6. */
2300 item = next_no_void_pattern(pattern, item);
2301 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2302 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2303 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2304 rte_flow_error_set(error, EINVAL,
2305 RTE_FLOW_ERROR_TYPE_ITEM,
2306 item, "Not supported by fdir filter");
2312 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2313 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2314 /* Only used to describe the protocol stack. */
2315 if (item->spec || item->mask) {
2316 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2317 rte_flow_error_set(error, EINVAL,
2318 RTE_FLOW_ERROR_TYPE_ITEM,
2319 item, "Not supported by fdir filter");
2322 /*Not supported last point for range*/
2324 rte_flow_error_set(error, EINVAL,
2325 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2326 item, "Not supported last point for range");
2330 /* Check if the next not void item is UDP or NVGRE. */
2331 item = next_no_void_pattern(pattern, item);
2332 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2333 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2334 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2335 rte_flow_error_set(error, EINVAL,
2336 RTE_FLOW_ERROR_TYPE_ITEM,
2337 item, "Not supported by fdir filter");
2343 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2344 /* Only used to describe the protocol stack. */
2345 if (item->spec || item->mask) {
2346 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2347 rte_flow_error_set(error, EINVAL,
2348 RTE_FLOW_ERROR_TYPE_ITEM,
2349 item, "Not supported by fdir filter");
2352 /*Not supported last point for range*/
2354 rte_flow_error_set(error, EINVAL,
2355 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2356 item, "Not supported last point for range");
2360 /* Check if the next not void item is VxLAN. */
2361 item = next_no_void_pattern(pattern, item);
2362 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2363 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2364 rte_flow_error_set(error, EINVAL,
2365 RTE_FLOW_ERROR_TYPE_ITEM,
2366 item, "Not supported by fdir filter");
2371 /* Get the VxLAN info */
2372 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2373 rule->ixgbe_fdir.formatted.tunnel_type =
2374 IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
2376 /* Only care about VNI, others should be masked. */
2378 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2379 rte_flow_error_set(error, EINVAL,
2380 RTE_FLOW_ERROR_TYPE_ITEM,
2381 item, "Not supported by fdir filter");
2384 /*Not supported last point for range*/
2386 rte_flow_error_set(error, EINVAL,
2387 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2388 item, "Not supported last point for range");
2391 rule->b_mask = TRUE;
2393 /* Tunnel type is always meaningful. */
2394 rule->mask.tunnel_type_mask = 1;
2397 (const struct rte_flow_item_vxlan *)item->mask;
2398 if (vxlan_mask->flags) {
2399 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2400 rte_flow_error_set(error, EINVAL,
2401 RTE_FLOW_ERROR_TYPE_ITEM,
2402 item, "Not supported by fdir filter");
2405 /* VNI must be totally masked or not. */
2406 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2407 vxlan_mask->vni[2]) &&
2408 ((vxlan_mask->vni[0] != 0xFF) ||
2409 (vxlan_mask->vni[1] != 0xFF) ||
2410 (vxlan_mask->vni[2] != 0xFF))) {
2411 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2412 rte_flow_error_set(error, EINVAL,
2413 RTE_FLOW_ERROR_TYPE_ITEM,
2414 item, "Not supported by fdir filter");
2418 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2419 RTE_DIM(vxlan_mask->vni));
2422 rule->b_spec = TRUE;
2423 vxlan_spec = (const struct rte_flow_item_vxlan *)
2425 rte_memcpy(((uint8_t *)
2426 &rule->ixgbe_fdir.formatted.tni_vni),
2427 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2431 /* Get the NVGRE info */
2432 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2433 rule->ixgbe_fdir.formatted.tunnel_type =
2434 IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
2437 * Only care about flags0, flags1, protocol and TNI,
2438 * others should be masked.
2441 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2442 rte_flow_error_set(error, EINVAL,
2443 RTE_FLOW_ERROR_TYPE_ITEM,
2444 item, "Not supported by fdir filter");
2447 /*Not supported last point for range*/
2449 rte_flow_error_set(error, EINVAL,
2450 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2451 item, "Not supported last point for range");
2454 rule->b_mask = TRUE;
2456 /* Tunnel type is always meaningful. */
2457 rule->mask.tunnel_type_mask = 1;
2460 (const struct rte_flow_item_nvgre *)item->mask;
2461 if (nvgre_mask->flow_id) {
2462 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2463 rte_flow_error_set(error, EINVAL,
2464 RTE_FLOW_ERROR_TYPE_ITEM,
2465 item, "Not supported by fdir filter");
2468 if (nvgre_mask->protocol &&
2469 nvgre_mask->protocol != 0xFFFF) {
2470 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2471 rte_flow_error_set(error, EINVAL,
2472 RTE_FLOW_ERROR_TYPE_ITEM,
2473 item, "Not supported by fdir filter");
2476 if (nvgre_mask->c_k_s_rsvd0_ver &&
2477 nvgre_mask->c_k_s_rsvd0_ver !=
2478 rte_cpu_to_be_16(0xFFFF)) {
2479 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2480 rte_flow_error_set(error, EINVAL,
2481 RTE_FLOW_ERROR_TYPE_ITEM,
2482 item, "Not supported by fdir filter");
2485 /* TNI must be totally masked or not. */
2486 if (nvgre_mask->tni[0] &&
2487 ((nvgre_mask->tni[0] != 0xFF) ||
2488 (nvgre_mask->tni[1] != 0xFF) ||
2489 (nvgre_mask->tni[2] != 0xFF))) {
2490 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2491 rte_flow_error_set(error, EINVAL,
2492 RTE_FLOW_ERROR_TYPE_ITEM,
2493 item, "Not supported by fdir filter");
2496 /* tni is a 24-bits bit field */
2497 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2498 RTE_DIM(nvgre_mask->tni));
2499 rule->mask.tunnel_id_mask <<= 8;
2502 rule->b_spec = TRUE;
2504 (const struct rte_flow_item_nvgre *)item->spec;
2505 if (nvgre_spec->c_k_s_rsvd0_ver !=
2506 rte_cpu_to_be_16(0x2000) &&
2507 nvgre_mask->c_k_s_rsvd0_ver) {
2508 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2509 rte_flow_error_set(error, EINVAL,
2510 RTE_FLOW_ERROR_TYPE_ITEM,
2511 item, "Not supported by fdir filter");
2514 if (nvgre_mask->protocol &&
2515 nvgre_spec->protocol !=
2516 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2517 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2518 rte_flow_error_set(error, EINVAL,
2519 RTE_FLOW_ERROR_TYPE_ITEM,
2520 item, "Not supported by fdir filter");
2523 /* tni is a 24-bits bit field */
2524 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2525 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2529 /* check if the next not void item is MAC */
2530 item = next_no_void_pattern(pattern, item);
2531 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2532 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2533 rte_flow_error_set(error, EINVAL,
2534 RTE_FLOW_ERROR_TYPE_ITEM,
2535 item, "Not supported by fdir filter");
2540 * Only support vlan and dst MAC address,
2541 * others should be masked.
2545 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2546 rte_flow_error_set(error, EINVAL,
2547 RTE_FLOW_ERROR_TYPE_ITEM,
2548 item, "Not supported by fdir filter");
2551 /*Not supported last point for range*/
2553 rte_flow_error_set(error, EINVAL,
2554 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2555 item, "Not supported last point for range");
2558 rule->b_mask = TRUE;
2559 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2561 /* Ether type should be masked. */
2562 if (eth_mask->type) {
2563 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2564 rte_flow_error_set(error, EINVAL,
2565 RTE_FLOW_ERROR_TYPE_ITEM,
2566 item, "Not supported by fdir filter");
2570 /* src MAC address should be masked. */
2571 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2572 if (eth_mask->src.addr_bytes[j]) {
2574 sizeof(struct ixgbe_fdir_rule));
2575 rte_flow_error_set(error, EINVAL,
2576 RTE_FLOW_ERROR_TYPE_ITEM,
2577 item, "Not supported by fdir filter");
2581 rule->mask.mac_addr_byte_mask = 0;
2582 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2583 /* It's a per byte mask. */
2584 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2585 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2586 } else if (eth_mask->dst.addr_bytes[j]) {
2587 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2588 rte_flow_error_set(error, EINVAL,
2589 RTE_FLOW_ERROR_TYPE_ITEM,
2590 item, "Not supported by fdir filter");
2595 /* When no vlan, considered as full mask. */
2596 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2599 rule->b_spec = TRUE;
2600 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2602 /* Get the dst MAC. */
2603 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2604 rule->ixgbe_fdir.formatted.inner_mac[j] =
2605 eth_spec->dst.addr_bytes[j];
2610 * Check if the next not void item is vlan or ipv4.
2611 * IPv6 is not supported.
2613 item = next_no_void_pattern(pattern, item);
2614 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2615 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2616 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2617 rte_flow_error_set(error, EINVAL,
2618 RTE_FLOW_ERROR_TYPE_ITEM,
2619 item, "Not supported by fdir filter");
2622 /*Not supported last point for range*/
2624 rte_flow_error_set(error, EINVAL,
2625 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2626 item, "Not supported last point for range");
2630 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2631 if (!(item->spec && item->mask)) {
2632 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2633 rte_flow_error_set(error, EINVAL,
2634 RTE_FLOW_ERROR_TYPE_ITEM,
2635 item, "Not supported by fdir filter");
2639 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2640 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2642 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2644 rule->mask.vlan_tci_mask = vlan_mask->tci;
2645 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2646 /* More than one tags are not supported. */
2648 /* check if the next not void item is END */
2649 item = next_no_void_pattern(pattern, item);
2651 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2652 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2653 rte_flow_error_set(error, EINVAL,
2654 RTE_FLOW_ERROR_TYPE_ITEM,
2655 item, "Not supported by fdir filter");
2661 * If the tags is 0, it means don't care about the VLAN.
2665 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2669 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2670 const struct rte_flow_attr *attr,
2671 const struct rte_flow_item pattern[],
2672 const struct rte_flow_action actions[],
2673 struct ixgbe_fdir_rule *rule,
2674 struct rte_flow_error *error)
2677 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2678 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2680 if (hw->mac.type != ixgbe_mac_82599EB &&
2681 hw->mac.type != ixgbe_mac_X540 &&
2682 hw->mac.type != ixgbe_mac_X550 &&
2683 hw->mac.type != ixgbe_mac_X550EM_x &&
2684 hw->mac.type != ixgbe_mac_X550EM_a)
2687 ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2688 actions, rule, error);
2693 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2694 actions, rule, error);
2701 if (hw->mac.type == ixgbe_mac_82599EB &&
2702 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2703 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2704 rule->ixgbe_fdir.formatted.dst_port != 0))
2707 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2708 fdir_mode != rule->mode)
2711 if (rule->queue >= dev->data->nb_rx_queues)
2718 ixgbe_filterlist_init(void)
2720 TAILQ_INIT(&filter_ntuple_list);
2721 TAILQ_INIT(&filter_ethertype_list);
2722 TAILQ_INIT(&filter_syn_list);
2723 TAILQ_INIT(&filter_fdir_list);
2724 TAILQ_INIT(&filter_l2_tunnel_list);
2725 TAILQ_INIT(&ixgbe_flow_list);
2729 ixgbe_filterlist_flush(void)
2731 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2732 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2733 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2734 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2735 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2736 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2738 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2739 TAILQ_REMOVE(&filter_ntuple_list,
2742 rte_free(ntuple_filter_ptr);
2745 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2746 TAILQ_REMOVE(&filter_ethertype_list,
2747 ethertype_filter_ptr,
2749 rte_free(ethertype_filter_ptr);
2752 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2753 TAILQ_REMOVE(&filter_syn_list,
2756 rte_free(syn_filter_ptr);
2759 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2760 TAILQ_REMOVE(&filter_l2_tunnel_list,
2763 rte_free(l2_tn_filter_ptr);
2766 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2767 TAILQ_REMOVE(&filter_fdir_list,
2770 rte_free(fdir_rule_ptr);
2773 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2774 TAILQ_REMOVE(&ixgbe_flow_list,
2777 rte_free(ixgbe_flow_mem_ptr->flow);
2778 rte_free(ixgbe_flow_mem_ptr);
2783 * Create or destroy a flow rule.
2784 * Theorically one rule can match more than one filters.
2785 * We will let it use the filter which it hitt first.
2786 * So, the sequence matters.
2788 static struct rte_flow *
2789 ixgbe_flow_create(struct rte_eth_dev *dev,
2790 const struct rte_flow_attr *attr,
2791 const struct rte_flow_item pattern[],
2792 const struct rte_flow_action actions[],
2793 struct rte_flow_error *error)
2796 struct rte_eth_ntuple_filter ntuple_filter;
2797 struct rte_eth_ethertype_filter ethertype_filter;
2798 struct rte_eth_syn_filter syn_filter;
2799 struct ixgbe_fdir_rule fdir_rule;
2800 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2801 struct ixgbe_hw_fdir_info *fdir_info =
2802 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2803 struct rte_flow *flow = NULL;
2804 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2805 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2806 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2807 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2808 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2809 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2810 uint8_t first_mask = FALSE;
2812 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2814 PMD_DRV_LOG(ERR, "failed to allocate memory");
2815 return (struct rte_flow *)flow;
2817 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2818 sizeof(struct ixgbe_flow_mem), 0);
2819 if (!ixgbe_flow_mem_ptr) {
2820 PMD_DRV_LOG(ERR, "failed to allocate memory");
2824 ixgbe_flow_mem_ptr->flow = flow;
2825 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2826 ixgbe_flow_mem_ptr, entries);
2828 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2829 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2830 actions, &ntuple_filter, error);
2832 #ifdef RTE_LIBRTE_SECURITY
2833 /* ESP flow not really a flow*/
2834 if (ntuple_filter.proto == IPPROTO_ESP)
2839 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2841 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2842 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2843 if (!ntuple_filter_ptr) {
2844 PMD_DRV_LOG(ERR, "failed to allocate memory");
2847 rte_memcpy(&ntuple_filter_ptr->filter_info,
2849 sizeof(struct rte_eth_ntuple_filter));
2850 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2851 ntuple_filter_ptr, entries);
2852 flow->rule = ntuple_filter_ptr;
2853 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2859 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2860 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2861 actions, ðertype_filter, error);
2863 ret = ixgbe_add_del_ethertype_filter(dev,
2864 ðertype_filter, TRUE);
2866 ethertype_filter_ptr = rte_zmalloc(
2867 "ixgbe_ethertype_filter",
2868 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2869 if (!ethertype_filter_ptr) {
2870 PMD_DRV_LOG(ERR, "failed to allocate memory");
2873 rte_memcpy(ðertype_filter_ptr->filter_info,
2875 sizeof(struct rte_eth_ethertype_filter));
2876 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2877 ethertype_filter_ptr, entries);
2878 flow->rule = ethertype_filter_ptr;
2879 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2885 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2886 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2887 actions, &syn_filter, error);
2889 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2891 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2892 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2893 if (!syn_filter_ptr) {
2894 PMD_DRV_LOG(ERR, "failed to allocate memory");
2897 rte_memcpy(&syn_filter_ptr->filter_info,
2899 sizeof(struct rte_eth_syn_filter));
2900 TAILQ_INSERT_TAIL(&filter_syn_list,
2903 flow->rule = syn_filter_ptr;
2904 flow->filter_type = RTE_ETH_FILTER_SYN;
2910 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2911 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2912 actions, &fdir_rule, error);
2914 /* A mask cannot be deleted. */
2915 if (fdir_rule.b_mask) {
2916 if (!fdir_info->mask_added) {
2917 /* It's the first time the mask is set. */
2918 rte_memcpy(&fdir_info->mask,
2920 sizeof(struct ixgbe_hw_fdir_mask));
2921 fdir_info->flex_bytes_offset =
2922 fdir_rule.flex_bytes_offset;
2924 if (fdir_rule.mask.flex_bytes_mask)
2925 ixgbe_fdir_set_flexbytes_offset(dev,
2926 fdir_rule.flex_bytes_offset);
2928 ret = ixgbe_fdir_set_input_mask(dev);
2932 fdir_info->mask_added = TRUE;
2936 * Only support one global mask,
2937 * all the masks should be the same.
2939 ret = memcmp(&fdir_info->mask,
2941 sizeof(struct ixgbe_hw_fdir_mask));
2945 if (fdir_info->flex_bytes_offset !=
2946 fdir_rule.flex_bytes_offset)
2951 if (fdir_rule.b_spec) {
2952 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2955 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2956 sizeof(struct ixgbe_fdir_rule_ele), 0);
2957 if (!fdir_rule_ptr) {
2958 PMD_DRV_LOG(ERR, "failed to allocate memory");
2961 rte_memcpy(&fdir_rule_ptr->filter_info,
2963 sizeof(struct ixgbe_fdir_rule));
2964 TAILQ_INSERT_TAIL(&filter_fdir_list,
2965 fdir_rule_ptr, entries);
2966 flow->rule = fdir_rule_ptr;
2967 flow->filter_type = RTE_ETH_FILTER_FDIR;
2974 * clean the mask_added flag if fail to
2978 fdir_info->mask_added = FALSE;
2986 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2987 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2988 actions, &l2_tn_filter, error);
2990 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2992 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2993 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2994 if (!l2_tn_filter_ptr) {
2995 PMD_DRV_LOG(ERR, "failed to allocate memory");
2998 rte_memcpy(&l2_tn_filter_ptr->filter_info,
3000 sizeof(struct rte_eth_l2_tunnel_conf));
3001 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3002 l2_tn_filter_ptr, entries);
3003 flow->rule = l2_tn_filter_ptr;
3004 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3010 TAILQ_REMOVE(&ixgbe_flow_list,
3011 ixgbe_flow_mem_ptr, entries);
3012 rte_flow_error_set(error, -ret,
3013 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3014 "Failed to create flow.");
3015 rte_free(ixgbe_flow_mem_ptr);
3021 * Check if the flow rule is supported by ixgbe.
3022 * It only checkes the format. Don't guarantee the rule can be programmed into
3023 * the HW. Because there can be no enough room for the rule.
3026 ixgbe_flow_validate(struct rte_eth_dev *dev,
3027 const struct rte_flow_attr *attr,
3028 const struct rte_flow_item pattern[],
3029 const struct rte_flow_action actions[],
3030 struct rte_flow_error *error)
3032 struct rte_eth_ntuple_filter ntuple_filter;
3033 struct rte_eth_ethertype_filter ethertype_filter;
3034 struct rte_eth_syn_filter syn_filter;
3035 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3036 struct ixgbe_fdir_rule fdir_rule;
3039 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3040 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3041 actions, &ntuple_filter, error);
3045 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3046 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3047 actions, ðertype_filter, error);
3051 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3052 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3053 actions, &syn_filter, error);
3057 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3058 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3059 actions, &fdir_rule, error);
3063 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3064 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3065 actions, &l2_tn_filter, error);
3070 /* Destroy a flow rule on ixgbe. */
3072 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3073 struct rte_flow *flow,
3074 struct rte_flow_error *error)
3077 struct rte_flow *pmd_flow = flow;
3078 enum rte_filter_type filter_type = pmd_flow->filter_type;
3079 struct rte_eth_ntuple_filter ntuple_filter;
3080 struct rte_eth_ethertype_filter ethertype_filter;
3081 struct rte_eth_syn_filter syn_filter;
3082 struct ixgbe_fdir_rule fdir_rule;
3083 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3084 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3085 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3086 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3087 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3088 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3089 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3090 struct ixgbe_hw_fdir_info *fdir_info =
3091 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3093 switch (filter_type) {
3094 case RTE_ETH_FILTER_NTUPLE:
3095 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3097 rte_memcpy(&ntuple_filter,
3098 &ntuple_filter_ptr->filter_info,
3099 sizeof(struct rte_eth_ntuple_filter));
3100 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3102 TAILQ_REMOVE(&filter_ntuple_list,
3103 ntuple_filter_ptr, entries);
3104 rte_free(ntuple_filter_ptr);
3107 case RTE_ETH_FILTER_ETHERTYPE:
3108 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3110 rte_memcpy(ðertype_filter,
3111 ðertype_filter_ptr->filter_info,
3112 sizeof(struct rte_eth_ethertype_filter));
3113 ret = ixgbe_add_del_ethertype_filter(dev,
3114 ðertype_filter, FALSE);
3116 TAILQ_REMOVE(&filter_ethertype_list,
3117 ethertype_filter_ptr, entries);
3118 rte_free(ethertype_filter_ptr);
3121 case RTE_ETH_FILTER_SYN:
3122 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3124 rte_memcpy(&syn_filter,
3125 &syn_filter_ptr->filter_info,
3126 sizeof(struct rte_eth_syn_filter));
3127 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3129 TAILQ_REMOVE(&filter_syn_list,
3130 syn_filter_ptr, entries);
3131 rte_free(syn_filter_ptr);
3134 case RTE_ETH_FILTER_FDIR:
3135 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3136 rte_memcpy(&fdir_rule,
3137 &fdir_rule_ptr->filter_info,
3138 sizeof(struct ixgbe_fdir_rule));
3139 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3141 TAILQ_REMOVE(&filter_fdir_list,
3142 fdir_rule_ptr, entries);
3143 rte_free(fdir_rule_ptr);
3144 if (TAILQ_EMPTY(&filter_fdir_list))
3145 fdir_info->mask_added = false;
3148 case RTE_ETH_FILTER_L2_TUNNEL:
3149 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3151 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3152 sizeof(struct rte_eth_l2_tunnel_conf));
3153 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3155 TAILQ_REMOVE(&filter_l2_tunnel_list,
3156 l2_tn_filter_ptr, entries);
3157 rte_free(l2_tn_filter_ptr);
3161 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3168 rte_flow_error_set(error, EINVAL,
3169 RTE_FLOW_ERROR_TYPE_HANDLE,
3170 NULL, "Failed to destroy flow");
3174 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3175 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3176 TAILQ_REMOVE(&ixgbe_flow_list,
3177 ixgbe_flow_mem_ptr, entries);
3178 rte_free(ixgbe_flow_mem_ptr);
3186 /* Destroy all flow rules associated with a port on ixgbe. */
3188 ixgbe_flow_flush(struct rte_eth_dev *dev,
3189 struct rte_flow_error *error)
3193 ixgbe_clear_all_ntuple_filter(dev);
3194 ixgbe_clear_all_ethertype_filter(dev);
3195 ixgbe_clear_syn_filter(dev);
3197 ret = ixgbe_clear_all_fdir_filter(dev);
3199 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3200 NULL, "Failed to flush rule");
3204 ret = ixgbe_clear_all_l2_tn_filter(dev);
3206 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3207 NULL, "Failed to flush rule");
3211 ixgbe_filterlist_flush();
3216 const struct rte_flow_ops ixgbe_flow_ops = {
3217 .validate = ixgbe_flow_validate,
3218 .create = ixgbe_flow_create,
3219 .destroy = ixgbe_flow_destroy,
3220 .flush = ixgbe_flow_flush,