1 From 813dc1da330eb21cf5ed399dfcff8ee7bde6aafd Mon Sep 17 00:00:00 2001
2 From: Chenmin Sun <chenmin.sun@intel.com>
3 Date: Fri, 17 Apr 2020 05:46:45 +0800
4 Subject: [DPDK 17/17] net/iavf: add support for FDIR basic rule
6 This patch adds FDIR create/destroy/validate function in AVF.
7 Common pattern and queue/qgroup/passthru/drop actions are supported.
9 Signed-off-by: Simei Su <simei.su@intel.com>
10 Signed-off-by: Chenmin Sun <chenmin.sun@intel.com>
12 drivers/net/iavf/Makefile | 1 +
13 drivers/net/iavf/iavf.h | 18 +
14 drivers/net/iavf/iavf_fdir.c | 949 ++++++++++++++++++++++++++++++++++
15 drivers/net/iavf/iavf_vchnl.c | 154 +++++-
16 drivers/net/iavf/meson.build | 1 +
17 5 files changed, 1122 insertions(+), 1 deletion(-)
18 create mode 100644 drivers/net/iavf/iavf_fdir.c
20 diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
21 index 1bf0f26b5..193bc55a7 100644
22 --- a/drivers/net/iavf/Makefile
23 +++ b/drivers/net/iavf/Makefile
24 @@ -24,6 +24,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_ethdev.c
25 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
26 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
27 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
28 +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
29 ifeq ($(CONFIG_RTE_ARCH_X86), y)
30 SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c
32 diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
33 index 78bdaff20..5fb7881c9 100644
34 --- a/drivers/net/iavf/iavf.h
35 +++ b/drivers/net/iavf/iavf.h
36 @@ -92,6 +92,18 @@ TAILQ_HEAD(iavf_flow_list, rte_flow);
37 struct iavf_flow_parser_node;
38 TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
40 +struct iavf_fdir_conf {
41 + struct virtchnl_fdir_add add_fltr;
42 + struct virtchnl_fdir_del del_fltr;
48 +struct iavf_fdir_info {
49 + struct iavf_fdir_conf conf;
52 /* TODO: is that correct to assume the max number to be 16 ?*/
53 #define IAVF_MAX_MSIX_VECTORS 16
55 @@ -131,6 +143,8 @@ struct iavf_info {
56 rte_spinlock_t flow_ops_lock;
57 struct iavf_parser_list rss_parser_list;
58 struct iavf_parser_list dist_parser_list;
60 + struct iavf_fdir_info fdir; /* flow director info */
63 #define IAVF_MAX_PKT_TYPE 1024
64 @@ -252,4 +266,8 @@ int iavf_config_promisc(struct iavf_adapter *adapter, bool enable_unicast,
65 int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
66 struct rte_ether_addr *addr, bool add);
67 int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
68 +int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
69 +int iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
70 +int iavf_fdir_check(struct iavf_adapter *adapter,
71 + struct iavf_fdir_conf *filter);
72 #endif /* _IAVF_ETHDEV_H_ */
73 diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
75 index 000000000..fc1a4f817
77 +++ b/drivers/net/iavf/iavf_fdir.c
79 +/* SPDX-License-Identifier: BSD-3-Clause
80 + * Copyright(c) 2019 Intel Corporation
83 +#include <sys/queue.h>
91 +#include <rte_ether.h>
92 +#include <rte_ethdev_driver.h>
93 +#include <rte_malloc.h>
94 +#include <rte_tailq.h>
97 +#include "iavf_generic_flow.h"
98 +#include "virtchnl.h"
99 +#include "iavf_rxtx.h"
101 +#define IAVF_FDIR_MAX_QREGION_SIZE 128
103 +#define IAVF_FDIR_IPV6_TC_OFFSET 20
104 +#define IAVF_IPV6_TC_MASK (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
106 +#define IAVF_FDIR_INSET_ETH (\
107 + IAVF_INSET_ETHERTYPE)
109 +#define IAVF_FDIR_INSET_ETH_IPV4 (\
110 + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
111 + IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
112 + IAVF_INSET_IPV4_TTL)
114 +#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
115 + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
116 + IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
117 + IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
119 +#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
120 + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
121 + IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
122 + IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
124 +#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
125 + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
126 + IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
127 + IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
129 +#define IAVF_FDIR_INSET_ETH_IPV6 (\
130 + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
131 + IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
132 + IAVF_INSET_IPV6_HOP_LIMIT)
134 +#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
135 + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
136 + IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
137 + IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
139 +#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
140 + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
141 + IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
142 + IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
144 +#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
145 + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
146 + IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
147 + IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
149 +#define IAVF_FDIR_INSET_GTPU (\
150 + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
151 + IAVF_INSET_GTPU_TEID)
153 +#define IAVF_FDIR_INSET_GTPU_EH (\
154 + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
155 + IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
157 +#define IAVF_FDIR_INSET_L2TPV3OIP (\
158 + IAVF_L2TPV3OIP_SESSION_ID)
160 +#define IAVF_FDIR_INSET_ESP (\
161 + IAVF_INSET_ESP_SPI)
163 +#define IAVF_FDIR_INSET_AH (\
166 +#define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
167 + IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
168 + IAVF_INSET_ESP_SPI)
170 +#define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
171 + IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
172 + IAVF_INSET_ESP_SPI)
174 +#define IAVF_FDIR_INSET_PFCP (\
175 + IAVF_INSET_PFCP_S_FIELD)
177 +static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
178 + {iavf_pattern_ethertype, IAVF_FDIR_INSET_ETH, IAVF_INSET_NONE},
179 + {iavf_pattern_eth_ipv4, IAVF_FDIR_INSET_ETH_IPV4, IAVF_INSET_NONE},
180 + {iavf_pattern_eth_ipv4_udp, IAVF_FDIR_INSET_ETH_IPV4_UDP, IAVF_INSET_NONE},
181 + {iavf_pattern_eth_ipv4_tcp, IAVF_FDIR_INSET_ETH_IPV4_TCP, IAVF_INSET_NONE},
182 + {iavf_pattern_eth_ipv4_sctp, IAVF_FDIR_INSET_ETH_IPV4_SCTP, IAVF_INSET_NONE},
183 + {iavf_pattern_eth_ipv6, IAVF_FDIR_INSET_ETH_IPV6, IAVF_INSET_NONE},
184 + {iavf_pattern_eth_ipv6_udp, IAVF_FDIR_INSET_ETH_IPV6_UDP, IAVF_INSET_NONE},
185 + {iavf_pattern_eth_ipv6_tcp, IAVF_FDIR_INSET_ETH_IPV6_TCP, IAVF_INSET_NONE},
186 + {iavf_pattern_eth_ipv6_sctp, IAVF_FDIR_INSET_ETH_IPV6_SCTP, IAVF_INSET_NONE},
187 + {iavf_pattern_eth_ipv4_gtpu, IAVF_FDIR_INSET_GTPU, IAVF_INSET_NONE},
188 + {iavf_pattern_eth_ipv4_gtpu_eh, IAVF_FDIR_INSET_GTPU_EH, IAVF_INSET_NONE},
189 + {iavf_pattern_eth_ipv4_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
190 + {iavf_pattern_eth_ipv6_l2tpv3, IAVF_FDIR_INSET_L2TPV3OIP, IAVF_INSET_NONE},
191 + {iavf_pattern_eth_ipv4_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE},
192 + {iavf_pattern_eth_ipv6_esp, IAVF_FDIR_INSET_ESP, IAVF_INSET_NONE},
193 + {iavf_pattern_eth_ipv4_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
194 + {iavf_pattern_eth_ipv6_ah, IAVF_FDIR_INSET_AH, IAVF_INSET_NONE},
195 + {iavf_pattern_eth_ipv4_udp_esp, IAVF_FDIR_INSET_IPV4_NATT_ESP, IAVF_INSET_NONE},
196 + {iavf_pattern_eth_ipv6_udp_esp, IAVF_FDIR_INSET_IPV6_NATT_ESP, IAVF_INSET_NONE},
199 +static struct iavf_flow_parser iavf_fdir_parser;
202 +iavf_fdir_init(struct iavf_adapter *ad)
204 + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
205 + struct iavf_flow_parser *parser;
207 + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
208 + parser = &iavf_fdir_parser;
212 + return iavf_register_parser(parser, ad);
216 +iavf_fdir_uninit(struct iavf_adapter *ad)
218 + struct iavf_flow_parser *parser;
220 + parser = &iavf_fdir_parser;
222 + iavf_unregister_parser(parser, ad);
226 +iavf_fdir_create(struct iavf_adapter *ad,
227 + struct rte_flow *flow,
229 + struct rte_flow_error *error)
231 + struct iavf_fdir_conf *filter = meta;
232 + struct iavf_fdir_conf *rule;
235 + rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
237 + rte_flow_error_set(error, ENOMEM,
238 + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
239 + "Failed to allocate memory");
243 + ret = iavf_fdir_add(ad, filter);
245 + rte_flow_error_set(error, -ret,
246 + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
247 + "Add filter rule failed.");
251 + if (filter->mark_flag == 1)
252 + iavf_fdir_rx_proc_enable(ad, 1);
254 + rte_memcpy(rule, filter, sizeof(*rule));
265 +iavf_fdir_destroy(struct iavf_adapter *ad,
266 + struct rte_flow *flow,
267 + struct rte_flow_error *error)
269 + struct iavf_fdir_conf *filter;
272 + filter = (struct iavf_fdir_conf *)flow->rule;
274 + ret = iavf_fdir_del(ad, filter);
276 + rte_flow_error_set(error, -ret,
277 + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
278 + "Del filter rule failed.");
282 + if (filter->mark_flag == 1)
283 + iavf_fdir_rx_proc_enable(ad, 0);
292 +iavf_fdir_validation(struct iavf_adapter *ad,
293 + __rte_unused struct rte_flow *flow,
295 + struct rte_flow_error *error)
297 + struct iavf_fdir_conf *filter = meta;
300 + ret = iavf_fdir_check(ad, filter);
302 + rte_flow_error_set(error, -ret,
303 + RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
304 + "Validate filter rule failed.");
311 +static struct iavf_flow_engine iavf_fdir_engine = {
312 + .init = iavf_fdir_init,
313 + .uninit = iavf_fdir_uninit,
314 + .create = iavf_fdir_create,
315 + .destroy = iavf_fdir_destroy,
316 + .validation = iavf_fdir_validation,
317 + .type = IAVF_FLOW_ENGINE_FDIR,
321 +iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
322 + struct rte_flow_error *error,
323 + const struct rte_flow_action *act,
324 + struct virtchnl_filter_action *filter_action)
326 + const struct rte_flow_action_rss *rss = act->conf;
329 + if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
330 + rte_flow_error_set(error, EINVAL,
331 + RTE_FLOW_ERROR_TYPE_ACTION, act,
332 + "Invalid action.");
336 + if (rss->queue_num <= 1) {
337 + rte_flow_error_set(error, EINVAL,
338 + RTE_FLOW_ERROR_TYPE_ACTION, act,
339 + "Queue region size can't be 0 or 1.");
343 + /* check if queue index for queue region is continuous */
344 + for (i = 0; i < rss->queue_num - 1; i++) {
345 + if (rss->queue[i + 1] != rss->queue[i] + 1) {
346 + rte_flow_error_set(error, EINVAL,
347 + RTE_FLOW_ERROR_TYPE_ACTION, act,
348 + "Discontinuous queue region");
353 + if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
354 + rte_flow_error_set(error, EINVAL,
355 + RTE_FLOW_ERROR_TYPE_ACTION, act,
356 + "Invalid queue region indexes.");
360 + if (!(rte_is_power_of_2(rss->queue_num) &&
361 + (rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE))) {
362 + rte_flow_error_set(error, EINVAL,
363 + RTE_FLOW_ERROR_TYPE_ACTION, act,
364 + "The region size should be any of the following values:"
365 + "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
366 + "of queues do not exceed the VSI allocation.");
370 + filter_action->act_conf.queue.index = rss->queue[0];
371 + filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
377 +iavf_fdir_parse_action(struct iavf_adapter *ad,
378 + const struct rte_flow_action actions[],
379 + struct rte_flow_error *error,
380 + struct iavf_fdir_conf *filter)
382 + const struct rte_flow_action_queue *act_q;
383 + const struct rte_flow_action_mark *mark_spec = NULL;
384 + uint32_t dest_num = 0;
385 + uint32_t mark_num = 0;
389 + struct virtchnl_filter_action *filter_action;
391 + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
392 + switch (actions->type) {
393 + case RTE_FLOW_ACTION_TYPE_VOID:
396 + case RTE_FLOW_ACTION_TYPE_PASSTHRU:
399 + filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
401 + filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
403 + filter->add_fltr.rule_cfg.action_set.count = ++number;
406 + case RTE_FLOW_ACTION_TYPE_DROP:
409 + filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
411 + filter_action->type = VIRTCHNL_ACTION_DROP;
413 + filter->add_fltr.rule_cfg.action_set.count = ++number;
416 + case RTE_FLOW_ACTION_TYPE_QUEUE:
419 + act_q = actions->conf;
420 + filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
422 + filter_action->type = VIRTCHNL_ACTION_QUEUE;
423 + filter_action->act_conf.queue.index = act_q->index;
425 + if (filter_action->act_conf.queue.index >=
426 + ad->eth_dev->data->nb_rx_queues) {
427 + rte_flow_error_set(error, EINVAL,
428 + RTE_FLOW_ERROR_TYPE_ACTION,
429 + actions, "Invalid queue for FDIR.");
433 + filter->add_fltr.rule_cfg.action_set.count = ++number;
436 + case RTE_FLOW_ACTION_TYPE_RSS:
439 + filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
441 + filter_action->type = VIRTCHNL_ACTION_Q_REGION;
443 + ret = iavf_fdir_parse_action_qregion(ad,
444 + error, actions, filter_action);
448 + filter->add_fltr.rule_cfg.action_set.count = ++number;
451 + case RTE_FLOW_ACTION_TYPE_MARK:
454 + filter->mark_flag = 1;
455 + mark_spec = actions->conf;
456 + filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
458 + filter_action->type = VIRTCHNL_ACTION_MARK;
459 + filter_action->act_conf.mark_id = mark_spec->id;
461 + filter->add_fltr.rule_cfg.action_set.count = ++number;
465 + rte_flow_error_set(error, EINVAL,
466 + RTE_FLOW_ERROR_TYPE_ACTION, actions,
467 + "Invalid action.");
472 + if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
473 + rte_flow_error_set(error, EINVAL,
474 + RTE_FLOW_ERROR_TYPE_ACTION, actions,
475 + "Action numbers exceed the maximum value");
479 + if (dest_num >= 2) {
480 + rte_flow_error_set(error, EINVAL,
481 + RTE_FLOW_ERROR_TYPE_ACTION, actions,
482 + "Unsupported action combination");
486 + if (mark_num >= 2) {
487 + rte_flow_error_set(error, EINVAL,
488 + RTE_FLOW_ERROR_TYPE_ACTION, actions,
489 + "Too many mark actions");
493 + if (dest_num + mark_num == 0) {
494 + rte_flow_error_set(error, EINVAL,
495 + RTE_FLOW_ERROR_TYPE_ACTION, actions,
500 + /* Mark only is equal to mark + passthru. */
501 + if (dest_num == 0) {
502 + filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
503 + filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
504 + filter->add_fltr.rule_cfg.action_set.count = ++number;
511 +iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
512 + const struct rte_flow_item pattern[],
513 + struct rte_flow_error *error,
514 + struct iavf_fdir_conf *filter)
516 + const struct rte_flow_item *item = pattern;
517 + enum rte_flow_item_type item_type;
518 + enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
519 + const struct rte_flow_item_eth *eth_spec, *eth_mask;
520 + const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
521 + const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
522 + const struct rte_flow_item_udp *udp_spec, *udp_mask;
523 + const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
524 + const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
525 + const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
526 + const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
527 + const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
528 + const struct rte_flow_item_esp *esp_spec, *esp_mask;
529 + const struct rte_flow_item_ah *ah_spec, *ah_mask;
530 + uint64_t input_set = IAVF_INSET_NONE;
532 + enum rte_flow_item_type next_type;
533 + uint16_t ether_type;
536 + struct virtchnl_proto_hdr *hdr;
538 + uint8_t ipv6_addr_mask[16] = {
539 + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
540 + 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
543 + for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
545 + rte_flow_error_set(error, EINVAL,
546 + RTE_FLOW_ERROR_TYPE_ITEM, item,
547 + "Not support range");
550 + item_type = item->type;
552 + switch (item_type) {
553 + case RTE_FLOW_ITEM_TYPE_ETH:
554 + eth_spec = item->spec;
555 + eth_mask = item->mask;
556 + next_type = (item + 1)->type;
558 + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
560 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
562 + if (next_type == RTE_FLOW_ITEM_TYPE_END &&
563 + (!eth_spec || !eth_mask)) {
564 + rte_flow_error_set(error, EINVAL,
565 + RTE_FLOW_ERROR_TYPE_ITEM,
566 + item, "NULL eth spec/mask.");
570 + if (eth_spec && eth_mask) {
571 + if (!rte_is_zero_ether_addr(ð_mask->src) ||
572 + !rte_is_zero_ether_addr(ð_mask->dst)) {
573 + rte_flow_error_set(error, EINVAL,
574 + RTE_FLOW_ERROR_TYPE_ITEM, item,
575 + "Invalid MAC_addr mask.");
580 + if (eth_spec && eth_mask && eth_mask->type) {
581 + if (eth_mask->type != RTE_BE16(0xffff)) {
582 + rte_flow_error_set(error, EINVAL,
583 + RTE_FLOW_ERROR_TYPE_ITEM,
584 + item, "Invalid type mask.");
588 + ether_type = rte_be_to_cpu_16(eth_spec->type);
589 + if (ether_type == RTE_ETHER_TYPE_IPV4 ||
590 + ether_type == RTE_ETHER_TYPE_IPV6) {
591 + rte_flow_error_set(error, EINVAL,
592 + RTE_FLOW_ERROR_TYPE_ITEM,
594 + "Unsupported ether_type.");
598 + input_set |= IAVF_INSET_ETHERTYPE;
599 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
601 + rte_memcpy(hdr->buffer,
602 + eth_spec, sizeof(*eth_spec));
605 + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
608 + case RTE_FLOW_ITEM_TYPE_IPV4:
609 + l3 = RTE_FLOW_ITEM_TYPE_IPV4;
610 + ipv4_spec = item->spec;
611 + ipv4_mask = item->mask;
613 + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
615 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
617 + if (ipv4_spec && ipv4_mask) {
618 + if (ipv4_mask->hdr.version_ihl ||
619 + ipv4_mask->hdr.total_length ||
620 + ipv4_mask->hdr.packet_id ||
621 + ipv4_mask->hdr.fragment_offset ||
622 + ipv4_mask->hdr.hdr_checksum) {
623 + rte_flow_error_set(error, EINVAL,
624 + RTE_FLOW_ERROR_TYPE_ITEM,
625 + item, "Invalid IPv4 mask.");
629 + if (ipv4_mask->hdr.type_of_service ==
631 + input_set |= IAVF_INSET_IPV4_TOS;
632 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
634 + if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
635 + input_set |= IAVF_INSET_IPV4_PROTO;
636 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
638 + if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
639 + input_set |= IAVF_INSET_IPV4_TTL;
640 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
642 + if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
643 + input_set |= IAVF_INSET_IPV4_SRC;
644 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
646 + if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
647 + input_set |= IAVF_INSET_IPV4_DST;
648 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
651 + rte_memcpy(hdr->buffer,
653 + sizeof(ipv4_spec->hdr));
656 + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
659 + case RTE_FLOW_ITEM_TYPE_IPV6:
660 + l3 = RTE_FLOW_ITEM_TYPE_IPV6;
661 + ipv6_spec = item->spec;
662 + ipv6_mask = item->mask;
664 + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
666 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
668 + if (ipv6_spec && ipv6_mask) {
669 + if (ipv6_mask->hdr.payload_len) {
670 + rte_flow_error_set(error, EINVAL,
671 + RTE_FLOW_ERROR_TYPE_ITEM,
672 + item, "Invalid IPv6 mask");
676 + if ((ipv6_mask->hdr.vtc_flow &
677 + rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
678 + == rte_cpu_to_be_32(
679 + IAVF_IPV6_TC_MASK)) {
680 + input_set |= IAVF_INSET_IPV6_TC;
681 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
683 + if (ipv6_mask->hdr.proto == UINT8_MAX) {
684 + input_set |= IAVF_INSET_IPV6_NEXT_HDR;
685 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
687 + if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
688 + input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
689 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
691 + if (!memcmp(ipv6_mask->hdr.src_addr,
693 + RTE_DIM(ipv6_mask->hdr.src_addr))) {
694 + input_set |= IAVF_INSET_IPV6_SRC;
695 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
697 + if (!memcmp(ipv6_mask->hdr.dst_addr,
699 + RTE_DIM(ipv6_mask->hdr.dst_addr))) {
700 + input_set |= IAVF_INSET_IPV6_DST;
701 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
704 + rte_memcpy(hdr->buffer,
706 + sizeof(ipv6_spec->hdr));
709 + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
712 + case RTE_FLOW_ITEM_TYPE_UDP:
713 + udp_spec = item->spec;
714 + udp_mask = item->mask;
716 + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
718 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
720 + if (udp_spec && udp_mask) {
721 + if (udp_mask->hdr.dgram_len ||
722 + udp_mask->hdr.dgram_cksum) {
723 + rte_flow_error_set(error, EINVAL,
724 + RTE_FLOW_ERROR_TYPE_ITEM, item,
725 + "Invalid UDP mask");
729 + if (udp_mask->hdr.src_port == UINT16_MAX) {
730 + input_set |= IAVF_INSET_UDP_SRC_PORT;
731 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
733 + if (udp_mask->hdr.dst_port == UINT16_MAX) {
734 + input_set |= IAVF_INSET_UDP_DST_PORT;
735 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
738 + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
739 + rte_memcpy(hdr->buffer,
741 + sizeof(udp_spec->hdr));
742 + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
743 + rte_memcpy(hdr->buffer,
745 + sizeof(udp_spec->hdr));
748 + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
751 + case RTE_FLOW_ITEM_TYPE_TCP:
752 + tcp_spec = item->spec;
753 + tcp_mask = item->mask;
755 + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
757 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
759 + if (tcp_spec && tcp_mask) {
760 + if (tcp_mask->hdr.sent_seq ||
761 + tcp_mask->hdr.recv_ack ||
762 + tcp_mask->hdr.data_off ||
763 + tcp_mask->hdr.tcp_flags ||
764 + tcp_mask->hdr.rx_win ||
765 + tcp_mask->hdr.cksum ||
766 + tcp_mask->hdr.tcp_urp) {
767 + rte_flow_error_set(error, EINVAL,
768 + RTE_FLOW_ERROR_TYPE_ITEM, item,
769 + "Invalid TCP mask");
773 + if (tcp_mask->hdr.src_port == UINT16_MAX) {
774 + input_set |= IAVF_INSET_TCP_SRC_PORT;
775 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
777 + if (tcp_mask->hdr.dst_port == UINT16_MAX) {
778 + input_set |= IAVF_INSET_TCP_DST_PORT;
779 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
782 + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
783 + rte_memcpy(hdr->buffer,
785 + sizeof(tcp_spec->hdr));
786 + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
787 + rte_memcpy(hdr->buffer,
789 + sizeof(tcp_spec->hdr));
792 + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
795 + case RTE_FLOW_ITEM_TYPE_SCTP:
796 + sctp_spec = item->spec;
797 + sctp_mask = item->mask;
799 + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
801 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
803 + if (sctp_spec && sctp_mask) {
804 + if (sctp_mask->hdr.cksum) {
805 + rte_flow_error_set(error, EINVAL,
806 + RTE_FLOW_ERROR_TYPE_ITEM, item,
807 + "Invalid UDP mask");
811 + if (sctp_mask->hdr.src_port == UINT16_MAX) {
812 + input_set |= IAVF_INSET_SCTP_SRC_PORT;
813 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
815 + if (sctp_mask->hdr.dst_port == UINT16_MAX) {
816 + input_set |= IAVF_INSET_SCTP_DST_PORT;
817 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
820 + if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
821 + rte_memcpy(hdr->buffer,
823 + sizeof(sctp_spec->hdr));
824 + else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
825 + rte_memcpy(hdr->buffer,
827 + sizeof(sctp_spec->hdr));
830 + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
833 + case RTE_FLOW_ITEM_TYPE_GTPU:
834 + gtp_spec = item->spec;
835 + gtp_mask = item->mask;
837 + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
839 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
841 + if (gtp_spec && gtp_mask) {
842 + if (gtp_mask->v_pt_rsv_flags ||
843 + gtp_mask->msg_type ||
844 + gtp_mask->msg_len) {
845 + rte_flow_error_set(error, EINVAL,
846 + RTE_FLOW_ERROR_TYPE_ITEM,
847 + item, "Invalid GTP mask");
851 + if (gtp_mask->teid == UINT32_MAX) {
852 + input_set |= IAVF_INSET_GTPU_TEID;
853 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
856 + rte_memcpy(hdr->buffer,
857 + gtp_spec, sizeof(*gtp_spec));
860 + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
863 + case RTE_FLOW_ITEM_TYPE_GTP_PSC:
864 + gtp_psc_spec = item->spec;
865 + gtp_psc_mask = item->mask;
867 + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
869 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
871 + if (gtp_psc_spec && gtp_psc_mask) {
872 + if (gtp_psc_mask->qfi == UINT8_MAX) {
873 + input_set |= IAVF_INSET_GTPU_QFI;
874 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
877 + rte_memcpy(hdr->buffer, gtp_psc_spec,
878 + sizeof(*gtp_psc_spec));
881 + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
884 + case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
885 + l2tpv3oip_spec = item->spec;
886 + l2tpv3oip_mask = item->mask;
888 + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
890 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
892 + if (l2tpv3oip_spec && l2tpv3oip_mask) {
893 + if (l2tpv3oip_mask->session_id == UINT32_MAX) {
894 + input_set |= IAVF_L2TPV3OIP_SESSION_ID;
895 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
898 + rte_memcpy(hdr->buffer, l2tpv3oip_spec,
899 + sizeof(*l2tpv3oip_spec));
902 + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
905 + case RTE_FLOW_ITEM_TYPE_ESP:
906 + esp_spec = item->spec;
907 + esp_mask = item->mask;
909 + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
911 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
913 + if (esp_spec && esp_mask) {
914 + if (esp_mask->hdr.spi == UINT32_MAX) {
915 + input_set |= IAVF_INSET_ESP_SPI;
916 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
919 + rte_memcpy(hdr->buffer, &esp_spec->hdr,
920 + sizeof(esp_spec->hdr));
923 + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
926 + case RTE_FLOW_ITEM_TYPE_AH:
927 + ah_spec = item->spec;
928 + ah_mask = item->mask;
930 + hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
932 + VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
934 + if (ah_spec && ah_mask) {
935 + if (ah_mask->spi == UINT32_MAX) {
936 + input_set |= IAVF_INSET_AH_SPI;
937 + VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
940 + rte_memcpy(hdr->buffer, ah_spec,
944 + filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
947 + case RTE_FLOW_ITEM_TYPE_VOID:
951 + rte_flow_error_set(error, EINVAL,
952 + RTE_FLOW_ERROR_TYPE_ITEM, item,
953 + "Invalid pattern item.");
958 + if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
959 + rte_flow_error_set(error, EINVAL,
960 + RTE_FLOW_ERROR_TYPE_ITEM, item,
961 + "Protocol header layers exceed the maximum value");
965 + filter->input_set = input_set;
971 +iavf_fdir_parse(struct iavf_adapter *ad,
972 + struct iavf_pattern_match_item *array,
973 + uint32_t array_len,
974 + const struct rte_flow_item pattern[],
975 + const struct rte_flow_action actions[],
977 + struct rte_flow_error *error)
979 + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
980 + struct iavf_fdir_conf *filter = &vf->fdir.conf;
981 + struct iavf_pattern_match_item *item = NULL;
982 + uint64_t input_set;
985 + memset(filter, 0, sizeof(*filter));
987 + item = iavf_search_pattern_match_item(pattern, array, array_len, error);
991 + ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
995 + input_set = filter->input_set;
996 + if (!input_set || input_set & ~item->input_set_mask) {
997 + rte_flow_error_set(error, EINVAL,
998 + RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
999 + "Invalid input set");
1004 + ret = iavf_fdir_parse_action(ad, actions, error, filter);
1016 +static struct iavf_flow_parser iavf_fdir_parser = {
1017 + .engine = &iavf_fdir_engine,
1018 + .array = iavf_fdir_pattern,
1019 + .array_len = RTE_DIM(iavf_fdir_pattern),
1020 + .parse_pattern_action = iavf_fdir_parse,
1021 + .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
1024 +RTE_INIT(iavf_fdir_engine_register)
1026 + iavf_register_flow_engine(&iavf_fdir_engine);
1028 diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
1029 index 3f0d23a92..25e490bc4 100644
1030 --- a/drivers/net/iavf/iavf_vchnl.c
1031 +++ b/drivers/net/iavf/iavf_vchnl.c
1032 @@ -340,7 +340,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
1035 caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
1036 - VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
1037 + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
1038 + VIRTCHNL_VF_OFFLOAD_FDIR_PF;
1040 args.in_args = (uint8_t *)∩︀
1041 args.in_args_size = sizeof(caps);
1042 @@ -842,3 +843,154 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
1048 +iavf_fdir_add(struct iavf_adapter *adapter,
1049 + struct iavf_fdir_conf *filter)
1051 + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1052 + struct virtchnl_fdir_add *fdir_ret;
1054 + struct iavf_cmd_info args;
1057 + filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
1058 + filter->add_fltr.validate_only = 0;
1060 + args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
1061 + args.in_args = (uint8_t *)(&filter->add_fltr);
1062 + args.in_args_size = sizeof(*(&filter->add_fltr));
1063 + args.out_buffer = vf->aq_resp;
1064 + args.out_size = IAVF_AQ_BUF_SZ;
1066 + err = iavf_execute_vf_cmd(adapter, &args);
1068 + PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
1072 + fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
1073 + filter->flow_id = fdir_ret->flow_id;
1075 + if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
1077 + "add rule request is successfully done by PF");
1078 + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE) {
1080 + "add rule request is failed due to no hw resource");
1082 + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_EXIST) {
1084 + "add rule request is failed due to the rule is already existed");
1086 + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT) {
1088 + "add rule request is failed due to the rule is conflict with existing rule");
1090 + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
1092 + "add rule request is failed due to the hw doesn't support");
1094 + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
1096 + "add rule request is failed due to time out for programming");
1100 + "add rule request is failed due to other reasons");
1108 +iavf_fdir_del(struct iavf_adapter *adapter,
1109 + struct iavf_fdir_conf *filter)
1111 + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1112 + struct virtchnl_fdir_del *fdir_ret;
1114 + struct iavf_cmd_info args;
1117 + filter->del_fltr.vsi_id = vf->vsi_res->vsi_id;
1118 + filter->del_fltr.flow_id = filter->flow_id;
1120 + args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER;
1121 + args.in_args = (uint8_t *)(&filter->del_fltr);
1122 + args.in_args_size = sizeof(filter->del_fltr);
1123 + args.out_buffer = vf->aq_resp;
1124 + args.out_size = IAVF_AQ_BUF_SZ;
1126 + err = iavf_execute_vf_cmd(adapter, &args);
1128 + PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
1132 + fdir_ret = (struct virtchnl_fdir_del *)args.out_buffer;
1134 + if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
1136 + "delete rule request is successfully done by PF");
1137 + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
1139 + "delete rule request is failed due to this rule doesn't exist");
1141 + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
1143 + "delete rule request is failed due to time out for programming");
1147 + "delete rule request is failed due to other reasons");
1155 +iavf_fdir_check(struct iavf_adapter *adapter,
1156 + struct iavf_fdir_conf *filter)
1158 + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1159 + struct virtchnl_fdir_add *fdir_ret;
1161 + struct iavf_cmd_info args;
1164 + filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
1165 + filter->add_fltr.validate_only = 1;
1167 + args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
1168 + args.in_args = (uint8_t *)(&filter->add_fltr);
1169 + args.in_args_size = sizeof(*(&filter->add_fltr));
1170 + args.out_buffer = vf->aq_resp;
1171 + args.out_size = IAVF_AQ_BUF_SZ;
1173 + err = iavf_execute_vf_cmd(adapter, &args);
1175 + PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
1179 + fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
1181 + if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
1183 + "check rule request is successfully done by PF");
1184 + } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
1186 + "check rule request is failed due to parameters validation"
1187 + " or HW doesn't support");
1191 + "check rule request is failed due to other reasons");
1197 diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
1198 index 32eabca4b..ce71054fb 100644
1199 --- a/drivers/net/iavf/meson.build
1200 +++ b/drivers/net/iavf/meson.build
1201 @@ -13,6 +13,7 @@ sources = files(
1204 'iavf_generic_flow.c',
1208 if arch_subdir == 'x86'