dpdk: DPDK 20.05 iavf flow director backporting to DPDK 20.02
[vpp.git] / build / external / patches / dpdk_20.02 / 0017-net-iavf-add-support-for-FDIR-basic-rule.patch
1 From 813dc1da330eb21cf5ed399dfcff8ee7bde6aafd Mon Sep 17 00:00:00 2001
2 From: Chenmin Sun <chenmin.sun@intel.com>
3 Date: Fri, 17 Apr 2020 05:46:45 +0800
4 Subject: [DPDK 17/17] net/iavf: add support for FDIR basic rule
5
6 This patch adds FDIR create/destroy/validate function in AVF.
7 Common pattern and queue/qgroup/passthru/drop actions are supported.
8
9 Signed-off-by: Simei Su <simei.su@intel.com>
10 Signed-off-by: Chenmin Sun <chenmin.sun@intel.com>
11 ---
12  drivers/net/iavf/Makefile     |   1 +
13  drivers/net/iavf/iavf.h       |  18 +
14  drivers/net/iavf/iavf_fdir.c  | 949 ++++++++++++++++++++++++++++++++++
15  drivers/net/iavf/iavf_vchnl.c | 154 +++++-
16  drivers/net/iavf/meson.build  |   1 +
17  5 files changed, 1122 insertions(+), 1 deletion(-)
18  create mode 100644 drivers/net/iavf/iavf_fdir.c
19
20 diff --git a/drivers/net/iavf/Makefile b/drivers/net/iavf/Makefile
21 index 1bf0f26b5..193bc55a7 100644
22 --- a/drivers/net/iavf/Makefile
23 +++ b/drivers/net/iavf/Makefile
24 @@ -24,6 +24,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_ethdev.c
25  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_vchnl.c
26  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx.c
27  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_generic_flow.c
28 +SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_fdir.c
29  ifeq ($(CONFIG_RTE_ARCH_X86), y)
30  SRCS-$(CONFIG_RTE_LIBRTE_IAVF_PMD) += iavf_rxtx_vec_sse.c
31  endif
32 diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
33 index 78bdaff20..5fb7881c9 100644
34 --- a/drivers/net/iavf/iavf.h
35 +++ b/drivers/net/iavf/iavf.h
36 @@ -92,6 +92,18 @@ TAILQ_HEAD(iavf_flow_list, rte_flow);
37  struct iavf_flow_parser_node;
38  TAILQ_HEAD(iavf_parser_list, iavf_flow_parser_node);
39  
40 +struct iavf_fdir_conf {
41 +       struct virtchnl_fdir_add add_fltr;
42 +       struct virtchnl_fdir_del del_fltr;
43 +       uint64_t input_set;
44 +       uint32_t flow_id;
45 +       uint32_t mark_flag;
46 +};
47 +
48 +struct iavf_fdir_info {
49 +       struct iavf_fdir_conf conf;
50 +};
51 +
52  /* TODO: is that correct to assume the max number to be 16 ?*/
53  #define IAVF_MAX_MSIX_VECTORS   16
54  
55 @@ -131,6 +143,8 @@ struct iavf_info {
56         rte_spinlock_t flow_ops_lock;
57         struct iavf_parser_list rss_parser_list;
58         struct iavf_parser_list dist_parser_list;
59 +
60 +       struct iavf_fdir_info fdir; /* flow director info */
61  };
62  
63  #define IAVF_MAX_PKT_TYPE 1024
64 @@ -252,4 +266,8 @@ int iavf_config_promisc(struct iavf_adapter *adapter, bool enable_unicast,
65  int iavf_add_del_eth_addr(struct iavf_adapter *adapter,
66                          struct rte_ether_addr *addr, bool add);
67  int iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add);
68 +int iavf_fdir_add(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
69 +int iavf_fdir_del(struct iavf_adapter *adapter, struct iavf_fdir_conf *filter);
70 +int iavf_fdir_check(struct iavf_adapter *adapter,
71 +               struct iavf_fdir_conf *filter);
72  #endif /* _IAVF_ETHDEV_H_ */
73 diff --git a/drivers/net/iavf/iavf_fdir.c b/drivers/net/iavf/iavf_fdir.c
74 new file mode 100644
75 index 000000000..fc1a4f817
76 --- /dev/null
77 +++ b/drivers/net/iavf/iavf_fdir.c
78 @@ -0,0 +1,949 @@
79 +/* SPDX-License-Identifier: BSD-3-Clause
80 + * Copyright(c) 2019 Intel Corporation
81 + */
82 +
83 +#include <sys/queue.h>
84 +#include <stdio.h>
85 +#include <errno.h>
86 +#include <stdint.h>
87 +#include <string.h>
88 +#include <unistd.h>
89 +#include <stdarg.h>
90 +
91 +#include <rte_ether.h>
92 +#include <rte_ethdev_driver.h>
93 +#include <rte_malloc.h>
94 +#include <rte_tailq.h>
95 +
96 +#include "iavf.h"
97 +#include "iavf_generic_flow.h"
98 +#include "virtchnl.h"
99 +#include "iavf_rxtx.h"
100 +
101 +#define IAVF_FDIR_MAX_QREGION_SIZE 128
102 +
103 +#define IAVF_FDIR_IPV6_TC_OFFSET 20
104 +#define IAVF_IPV6_TC_MASK  (0xFF << IAVF_FDIR_IPV6_TC_OFFSET)
105 +
106 +#define IAVF_FDIR_INSET_ETH (\
107 +       IAVF_INSET_ETHERTYPE)
108 +
109 +#define IAVF_FDIR_INSET_ETH_IPV4 (\
110 +       IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
111 +       IAVF_INSET_IPV4_PROTO | IAVF_INSET_IPV4_TOS | \
112 +       IAVF_INSET_IPV4_TTL)
113 +
114 +#define IAVF_FDIR_INSET_ETH_IPV4_UDP (\
115 +       IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
116 +       IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
117 +       IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
118 +
119 +#define IAVF_FDIR_INSET_ETH_IPV4_TCP (\
120 +       IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
121 +       IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
122 +       IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
123 +
124 +#define IAVF_FDIR_INSET_ETH_IPV4_SCTP (\
125 +       IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
126 +       IAVF_INSET_IPV4_TOS | IAVF_INSET_IPV4_TTL | \
127 +       IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
128 +
129 +#define IAVF_FDIR_INSET_ETH_IPV6 (\
130 +       IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
131 +       IAVF_INSET_IPV6_NEXT_HDR | IAVF_INSET_IPV6_TC | \
132 +       IAVF_INSET_IPV6_HOP_LIMIT)
133 +
134 +#define IAVF_FDIR_INSET_ETH_IPV6_UDP (\
135 +       IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
136 +       IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
137 +       IAVF_INSET_UDP_SRC_PORT | IAVF_INSET_UDP_DST_PORT)
138 +
139 +#define IAVF_FDIR_INSET_ETH_IPV6_TCP (\
140 +       IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
141 +       IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
142 +       IAVF_INSET_TCP_SRC_PORT | IAVF_INSET_TCP_DST_PORT)
143 +
144 +#define IAVF_FDIR_INSET_ETH_IPV6_SCTP (\
145 +       IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
146 +       IAVF_INSET_IPV6_TC | IAVF_INSET_IPV6_HOP_LIMIT | \
147 +       IAVF_INSET_SCTP_SRC_PORT | IAVF_INSET_SCTP_DST_PORT)
148 +
149 +#define IAVF_FDIR_INSET_GTPU (\
150 +       IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
151 +       IAVF_INSET_GTPU_TEID)
152 +
153 +#define IAVF_FDIR_INSET_GTPU_EH (\
154 +       IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
155 +       IAVF_INSET_GTPU_TEID | IAVF_INSET_GTPU_QFI)
156 +
157 +#define IAVF_FDIR_INSET_L2TPV3OIP (\
158 +       IAVF_L2TPV3OIP_SESSION_ID)
159 +
160 +#define IAVF_FDIR_INSET_ESP (\
161 +       IAVF_INSET_ESP_SPI)
162 +
163 +#define IAVF_FDIR_INSET_AH (\
164 +       IAVF_INSET_AH_SPI)
165 +
166 +#define IAVF_FDIR_INSET_IPV4_NATT_ESP (\
167 +       IAVF_INSET_IPV4_SRC | IAVF_INSET_IPV4_DST | \
168 +       IAVF_INSET_ESP_SPI)
169 +
170 +#define IAVF_FDIR_INSET_IPV6_NATT_ESP (\
171 +       IAVF_INSET_IPV6_SRC | IAVF_INSET_IPV6_DST | \
172 +       IAVF_INSET_ESP_SPI)
173 +
174 +#define IAVF_FDIR_INSET_PFCP (\
175 +       IAVF_INSET_PFCP_S_FIELD)
176 +
177 +static struct iavf_pattern_match_item iavf_fdir_pattern[] = {
178 +       {iavf_pattern_ethertype,                IAVF_FDIR_INSET_ETH,                    IAVF_INSET_NONE},
179 +       {iavf_pattern_eth_ipv4,                 IAVF_FDIR_INSET_ETH_IPV4,               IAVF_INSET_NONE},
180 +       {iavf_pattern_eth_ipv4_udp,             IAVF_FDIR_INSET_ETH_IPV4_UDP,           IAVF_INSET_NONE},
181 +       {iavf_pattern_eth_ipv4_tcp,             IAVF_FDIR_INSET_ETH_IPV4_TCP,           IAVF_INSET_NONE},
182 +       {iavf_pattern_eth_ipv4_sctp,            IAVF_FDIR_INSET_ETH_IPV4_SCTP,          IAVF_INSET_NONE},
183 +       {iavf_pattern_eth_ipv6,                 IAVF_FDIR_INSET_ETH_IPV6,               IAVF_INSET_NONE},
184 +       {iavf_pattern_eth_ipv6_udp,             IAVF_FDIR_INSET_ETH_IPV6_UDP,           IAVF_INSET_NONE},
185 +       {iavf_pattern_eth_ipv6_tcp,             IAVF_FDIR_INSET_ETH_IPV6_TCP,           IAVF_INSET_NONE},
186 +       {iavf_pattern_eth_ipv6_sctp,            IAVF_FDIR_INSET_ETH_IPV6_SCTP,          IAVF_INSET_NONE},
187 +       {iavf_pattern_eth_ipv4_gtpu,            IAVF_FDIR_INSET_GTPU,                   IAVF_INSET_NONE},
188 +       {iavf_pattern_eth_ipv4_gtpu_eh,         IAVF_FDIR_INSET_GTPU_EH,                IAVF_INSET_NONE},
189 +       {iavf_pattern_eth_ipv4_l2tpv3,          IAVF_FDIR_INSET_L2TPV3OIP,              IAVF_INSET_NONE},
190 +       {iavf_pattern_eth_ipv6_l2tpv3,          IAVF_FDIR_INSET_L2TPV3OIP,              IAVF_INSET_NONE},
191 +       {iavf_pattern_eth_ipv4_esp,             IAVF_FDIR_INSET_ESP,                    IAVF_INSET_NONE},
192 +       {iavf_pattern_eth_ipv6_esp,             IAVF_FDIR_INSET_ESP,                    IAVF_INSET_NONE},
193 +       {iavf_pattern_eth_ipv4_ah,              IAVF_FDIR_INSET_AH,                     IAVF_INSET_NONE},
194 +       {iavf_pattern_eth_ipv6_ah,              IAVF_FDIR_INSET_AH,                     IAVF_INSET_NONE},
195 +       {iavf_pattern_eth_ipv4_udp_esp,         IAVF_FDIR_INSET_IPV4_NATT_ESP,          IAVF_INSET_NONE},
196 +       {iavf_pattern_eth_ipv6_udp_esp,         IAVF_FDIR_INSET_IPV6_NATT_ESP,          IAVF_INSET_NONE},
197 +};
198 +
199 +static struct iavf_flow_parser iavf_fdir_parser;
200 +
201 +static int
202 +iavf_fdir_init(struct iavf_adapter *ad)
203 +{
204 +       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
205 +       struct iavf_flow_parser *parser;
206 +
207 +       if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF)
208 +               parser = &iavf_fdir_parser;
209 +       else
210 +               return -ENOTSUP;
211 +
212 +       return iavf_register_parser(parser, ad);
213 +}
214 +
215 +static void
216 +iavf_fdir_uninit(struct iavf_adapter *ad)
217 +{
218 +       struct iavf_flow_parser *parser;
219 +
220 +       parser = &iavf_fdir_parser;
221 +
222 +       iavf_unregister_parser(parser, ad);
223 +}
224 +
225 +static int
226 +iavf_fdir_create(struct iavf_adapter *ad,
227 +               struct rte_flow *flow,
228 +               void *meta,
229 +               struct rte_flow_error *error)
230 +{
231 +       struct iavf_fdir_conf *filter = meta;
232 +       struct iavf_fdir_conf *rule;
233 +       int ret;
234 +
235 +       rule = rte_zmalloc("fdir_entry", sizeof(*rule), 0);
236 +       if (!rule) {
237 +               rte_flow_error_set(error, ENOMEM,
238 +                               RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
239 +                               "Failed to allocate memory");
240 +               return -rte_errno;
241 +       }
242 +
243 +       ret = iavf_fdir_add(ad, filter);
244 +       if (ret) {
245 +               rte_flow_error_set(error, -ret,
246 +                               RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
247 +                               "Add filter rule failed.");
248 +               goto free_entry;
249 +       }
250 +
251 +       if (filter->mark_flag == 1)
252 +               iavf_fdir_rx_proc_enable(ad, 1);
253 +
254 +       rte_memcpy(rule, filter, sizeof(*rule));
255 +       flow->rule = rule;
256 +
257 +       return 0;
258 +
259 +free_entry:
260 +       rte_free(rule);
261 +       return -rte_errno;
262 +}
263 +
264 +static int
265 +iavf_fdir_destroy(struct iavf_adapter *ad,
266 +               struct rte_flow *flow,
267 +               struct rte_flow_error *error)
268 +{
269 +       struct iavf_fdir_conf *filter;
270 +       int ret;
271 +
272 +       filter = (struct iavf_fdir_conf *)flow->rule;
273 +
274 +       ret = iavf_fdir_del(ad, filter);
275 +       if (ret) {
276 +               rte_flow_error_set(error, -ret,
277 +                               RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
278 +                               "Del filter rule failed.");
279 +               return -rte_errno;
280 +       }
281 +
282 +       if (filter->mark_flag == 1)
283 +               iavf_fdir_rx_proc_enable(ad, 0);
284 +
285 +       flow->rule = NULL;
286 +       rte_free(filter);
287 +
288 +       return 0;
289 +}
290 +
291 +static int
292 +iavf_fdir_validation(struct iavf_adapter *ad,
293 +               __rte_unused struct rte_flow *flow,
294 +               void *meta,
295 +               struct rte_flow_error *error)
296 +{
297 +       struct iavf_fdir_conf *filter = meta;
298 +       int ret;
299 +
300 +       ret = iavf_fdir_check(ad, filter);
301 +       if (ret) {
302 +               rte_flow_error_set(error, -ret,
303 +                               RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
304 +                               "Validate filter rule failed.");
305 +               return -rte_errno;
306 +       }
307 +
308 +       return 0;
309 +};
310 +
311 +static struct iavf_flow_engine iavf_fdir_engine = {
312 +       .init = iavf_fdir_init,
313 +       .uninit = iavf_fdir_uninit,
314 +       .create = iavf_fdir_create,
315 +       .destroy = iavf_fdir_destroy,
316 +       .validation = iavf_fdir_validation,
317 +       .type = IAVF_FLOW_ENGINE_FDIR,
318 +};
319 +
320 +static int
321 +iavf_fdir_parse_action_qregion(struct iavf_adapter *ad,
322 +                       struct rte_flow_error *error,
323 +                       const struct rte_flow_action *act,
324 +                       struct virtchnl_filter_action *filter_action)
325 +{
326 +       const struct rte_flow_action_rss *rss = act->conf;
327 +       uint32_t i;
328 +
329 +       if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
330 +               rte_flow_error_set(error, EINVAL,
331 +                               RTE_FLOW_ERROR_TYPE_ACTION, act,
332 +                               "Invalid action.");
333 +               return -rte_errno;
334 +       }
335 +
336 +       if (rss->queue_num <= 1) {
337 +               rte_flow_error_set(error, EINVAL,
338 +                               RTE_FLOW_ERROR_TYPE_ACTION, act,
339 +                               "Queue region size can't be 0 or 1.");
340 +               return -rte_errno;
341 +       }
342 +
343 +       /* check if queue index for queue region is continuous */
344 +       for (i = 0; i < rss->queue_num - 1; i++) {
345 +               if (rss->queue[i + 1] != rss->queue[i] + 1) {
346 +                       rte_flow_error_set(error, EINVAL,
347 +                                       RTE_FLOW_ERROR_TYPE_ACTION, act,
348 +                                       "Discontinuous queue region");
349 +                       return -rte_errno;
350 +               }
351 +       }
352 +
353 +       if (rss->queue[rss->queue_num - 1] >= ad->eth_dev->data->nb_rx_queues) {
354 +               rte_flow_error_set(error, EINVAL,
355 +                               RTE_FLOW_ERROR_TYPE_ACTION, act,
356 +                               "Invalid queue region indexes.");
357 +               return -rte_errno;
358 +       }
359 +
360 +       if (!(rte_is_power_of_2(rss->queue_num) &&
361 +               (rss->queue_num <= IAVF_FDIR_MAX_QREGION_SIZE))) {
362 +               rte_flow_error_set(error, EINVAL,
363 +                               RTE_FLOW_ERROR_TYPE_ACTION, act,
364 +                               "The region size should be any of the following values:"
365 +                               "1, 2, 4, 8, 16, 32, 64, 128 as long as the total number "
366 +                               "of queues do not exceed the VSI allocation.");
367 +               return -rte_errno;
368 +       }
369 +
370 +       filter_action->act_conf.queue.index = rss->queue[0];
371 +       filter_action->act_conf.queue.region = rte_fls_u32(rss->queue_num) - 1;
372 +
373 +       return 0;
374 +}
375 +
376 +static int
377 +iavf_fdir_parse_action(struct iavf_adapter *ad,
378 +                       const struct rte_flow_action actions[],
379 +                       struct rte_flow_error *error,
380 +                       struct iavf_fdir_conf *filter)
381 +{
382 +       const struct rte_flow_action_queue *act_q;
383 +       const struct rte_flow_action_mark *mark_spec = NULL;
384 +       uint32_t dest_num = 0;
385 +       uint32_t mark_num = 0;
386 +       int ret;
387 +
388 +       int number = 0;
389 +       struct virtchnl_filter_action *filter_action;
390 +
391 +       for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
392 +               switch (actions->type) {
393 +               case RTE_FLOW_ACTION_TYPE_VOID:
394 +                       break;
395 +
396 +               case RTE_FLOW_ACTION_TYPE_PASSTHRU:
397 +                       dest_num++;
398 +
399 +                       filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
400 +
401 +                       filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
402 +
403 +                       filter->add_fltr.rule_cfg.action_set.count = ++number;
404 +                       break;
405 +
406 +               case RTE_FLOW_ACTION_TYPE_DROP:
407 +                       dest_num++;
408 +
409 +                       filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
410 +
411 +                       filter_action->type = VIRTCHNL_ACTION_DROP;
412 +
413 +                       filter->add_fltr.rule_cfg.action_set.count = ++number;
414 +                       break;
415 +
416 +               case RTE_FLOW_ACTION_TYPE_QUEUE:
417 +                       dest_num++;
418 +
419 +                       act_q = actions->conf;
420 +                       filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
421 +
422 +                       filter_action->type = VIRTCHNL_ACTION_QUEUE;
423 +                       filter_action->act_conf.queue.index = act_q->index;
424 +
425 +                       if (filter_action->act_conf.queue.index >=
426 +                               ad->eth_dev->data->nb_rx_queues) {
427 +                               rte_flow_error_set(error, EINVAL,
428 +                                       RTE_FLOW_ERROR_TYPE_ACTION,
429 +                                       actions, "Invalid queue for FDIR.");
430 +                               return -rte_errno;
431 +                       }
432 +
433 +                       filter->add_fltr.rule_cfg.action_set.count = ++number;
434 +                       break;
435 +
436 +               case RTE_FLOW_ACTION_TYPE_RSS:
437 +                       dest_num++;
438 +
439 +                       filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
440 +
441 +                       filter_action->type = VIRTCHNL_ACTION_Q_REGION;
442 +
443 +                       ret = iavf_fdir_parse_action_qregion(ad,
444 +                                               error, actions, filter_action);
445 +                       if (ret)
446 +                               return ret;
447 +
448 +                       filter->add_fltr.rule_cfg.action_set.count = ++number;
449 +                       break;
450 +
451 +               case RTE_FLOW_ACTION_TYPE_MARK:
452 +                       mark_num++;
453 +
454 +                       filter->mark_flag = 1;
455 +                       mark_spec = actions->conf;
456 +                       filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
457 +
458 +                       filter_action->type = VIRTCHNL_ACTION_MARK;
459 +                       filter_action->act_conf.mark_id = mark_spec->id;
460 +
461 +                       filter->add_fltr.rule_cfg.action_set.count = ++number;
462 +                       break;
463 +
464 +               default:
465 +                       rte_flow_error_set(error, EINVAL,
466 +                                       RTE_FLOW_ERROR_TYPE_ACTION, actions,
467 +                                       "Invalid action.");
468 +                       return -rte_errno;
469 +               }
470 +       }
471 +
472 +       if (number > VIRTCHNL_MAX_NUM_ACTIONS) {
473 +               rte_flow_error_set(error, EINVAL,
474 +                       RTE_FLOW_ERROR_TYPE_ACTION, actions,
475 +                       "Action numbers exceed the maximum value");
476 +               return -rte_errno;
477 +       }
478 +
479 +       if (dest_num >= 2) {
480 +               rte_flow_error_set(error, EINVAL,
481 +                       RTE_FLOW_ERROR_TYPE_ACTION, actions,
482 +                       "Unsupported action combination");
483 +               return -rte_errno;
484 +       }
485 +
486 +       if (mark_num >= 2) {
487 +               rte_flow_error_set(error, EINVAL,
488 +                       RTE_FLOW_ERROR_TYPE_ACTION, actions,
489 +                       "Too many mark actions");
490 +               return -rte_errno;
491 +       }
492 +
493 +       if (dest_num + mark_num == 0) {
494 +               rte_flow_error_set(error, EINVAL,
495 +                       RTE_FLOW_ERROR_TYPE_ACTION, actions,
496 +                       "Emtpy action");
497 +               return -rte_errno;
498 +       }
499 +
500 +       /* Mark only is equal to mark + passthru. */
501 +       if (dest_num == 0) {
502 +               filter_action = &filter->add_fltr.rule_cfg.action_set.actions[number];
503 +               filter_action->type = VIRTCHNL_ACTION_PASSTHRU;
504 +               filter->add_fltr.rule_cfg.action_set.count = ++number;
505 +       }
506 +
507 +       return 0;
508 +}
509 +
510 +static int
511 +iavf_fdir_parse_pattern(__rte_unused struct iavf_adapter *ad,
512 +                       const struct rte_flow_item pattern[],
513 +                       struct rte_flow_error *error,
514 +                       struct iavf_fdir_conf *filter)
515 +{
516 +       const struct rte_flow_item *item = pattern;
517 +       enum rte_flow_item_type item_type;
518 +       enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
519 +       const struct rte_flow_item_eth *eth_spec, *eth_mask;
520 +       const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
521 +       const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
522 +       const struct rte_flow_item_udp *udp_spec, *udp_mask;
523 +       const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
524 +       const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
525 +       const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
526 +       const struct rte_flow_item_gtp_psc *gtp_psc_spec, *gtp_psc_mask;
527 +       const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
528 +       const struct rte_flow_item_esp *esp_spec, *esp_mask;
529 +       const struct rte_flow_item_ah *ah_spec, *ah_mask;
530 +       uint64_t input_set = IAVF_INSET_NONE;
531 +
532 +       enum rte_flow_item_type next_type;
533 +       uint16_t ether_type;
534 +
535 +       int layer = 0;
536 +       struct virtchnl_proto_hdr *hdr;
537 +
538 +       uint8_t  ipv6_addr_mask[16] = {
539 +               0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
540 +               0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF
541 +       };
542 +
543 +       for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
544 +               if (item->last) {
545 +                       rte_flow_error_set(error, EINVAL,
546 +                                       RTE_FLOW_ERROR_TYPE_ITEM, item,
547 +                                       "Not support range");
548 +               }
549 +
550 +               item_type = item->type;
551 +
552 +               switch (item_type) {
553 +               case RTE_FLOW_ITEM_TYPE_ETH:
554 +                       eth_spec = item->spec;
555 +                       eth_mask = item->mask;
556 +                       next_type = (item + 1)->type;
557 +
558 +                       hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
559 +
560 +                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ETH);
561 +
562 +                       if (next_type == RTE_FLOW_ITEM_TYPE_END &&
563 +                               (!eth_spec || !eth_mask)) {
564 +                               rte_flow_error_set(error, EINVAL,
565 +                                               RTE_FLOW_ERROR_TYPE_ITEM,
566 +                                               item, "NULL eth spec/mask.");
567 +                               return -rte_errno;
568 +                       }
569 +
570 +                       if (eth_spec && eth_mask) {
571 +                               if (!rte_is_zero_ether_addr(&eth_mask->src) ||
572 +                                   !rte_is_zero_ether_addr(&eth_mask->dst)) {
573 +                                       rte_flow_error_set(error, EINVAL,
574 +                                               RTE_FLOW_ERROR_TYPE_ITEM, item,
575 +                                               "Invalid MAC_addr mask.");
576 +                                       return -rte_errno;
577 +                               }
578 +                       }
579 +
580 +                       if (eth_spec && eth_mask && eth_mask->type) {
581 +                               if (eth_mask->type != RTE_BE16(0xffff)) {
582 +                                       rte_flow_error_set(error, EINVAL,
583 +                                               RTE_FLOW_ERROR_TYPE_ITEM,
584 +                                               item, "Invalid type mask.");
585 +                                       return -rte_errno;
586 +                               }
587 +
588 +                               ether_type = rte_be_to_cpu_16(eth_spec->type);
589 +                               if (ether_type == RTE_ETHER_TYPE_IPV4 ||
590 +                                       ether_type == RTE_ETHER_TYPE_IPV6) {
591 +                                       rte_flow_error_set(error, EINVAL,
592 +                                               RTE_FLOW_ERROR_TYPE_ITEM,
593 +                                               item,
594 +                                               "Unsupported ether_type.");
595 +                                       return -rte_errno;
596 +                               }
597 +
598 +                               input_set |= IAVF_INSET_ETHERTYPE;
599 +                               VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ETH, ETHERTYPE);
600 +
601 +                               rte_memcpy(hdr->buffer,
602 +                                       eth_spec, sizeof(*eth_spec));
603 +                       }
604 +
605 +                       filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
606 +                       break;
607 +
608 +               case RTE_FLOW_ITEM_TYPE_IPV4:
609 +                       l3 = RTE_FLOW_ITEM_TYPE_IPV4;
610 +                       ipv4_spec = item->spec;
611 +                       ipv4_mask = item->mask;
612 +
613 +                       hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
614 +
615 +                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV4);
616 +
617 +                       if (ipv4_spec && ipv4_mask) {
618 +                               if (ipv4_mask->hdr.version_ihl ||
619 +                                       ipv4_mask->hdr.total_length ||
620 +                                       ipv4_mask->hdr.packet_id ||
621 +                                       ipv4_mask->hdr.fragment_offset ||
622 +                                       ipv4_mask->hdr.hdr_checksum) {
623 +                                       rte_flow_error_set(error, EINVAL,
624 +                                               RTE_FLOW_ERROR_TYPE_ITEM,
625 +                                               item, "Invalid IPv4 mask.");
626 +                                       return -rte_errno;
627 +                               }
628 +
629 +                               if (ipv4_mask->hdr.type_of_service ==
630 +                                                               UINT8_MAX) {
631 +                                       input_set |= IAVF_INSET_IPV4_TOS;
632 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DSCP);
633 +                               }
634 +                               if (ipv4_mask->hdr.next_proto_id == UINT8_MAX) {
635 +                                       input_set |= IAVF_INSET_IPV4_PROTO;
636 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, PROT);
637 +                               }
638 +                               if (ipv4_mask->hdr.time_to_live == UINT8_MAX) {
639 +                                       input_set |= IAVF_INSET_IPV4_TTL;
640 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, TTL);
641 +                               }
642 +                               if (ipv4_mask->hdr.src_addr == UINT32_MAX) {
643 +                                       input_set |= IAVF_INSET_IPV4_SRC;
644 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, SRC);
645 +                               }
646 +                               if (ipv4_mask->hdr.dst_addr == UINT32_MAX) {
647 +                                       input_set |= IAVF_INSET_IPV4_DST;
648 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST);
649 +                               }
650 +
651 +                               rte_memcpy(hdr->buffer,
652 +                                       &ipv4_spec->hdr,
653 +                                       sizeof(ipv4_spec->hdr));
654 +                       }
655 +
656 +                       filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
657 +                       break;
658 +
659 +               case RTE_FLOW_ITEM_TYPE_IPV6:
660 +                       l3 = RTE_FLOW_ITEM_TYPE_IPV6;
661 +                       ipv6_spec = item->spec;
662 +                       ipv6_mask = item->mask;
663 +
664 +                       hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
665 +
666 +                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, IPV6);
667 +
668 +                       if (ipv6_spec && ipv6_mask) {
669 +                               if (ipv6_mask->hdr.payload_len) {
670 +                                       rte_flow_error_set(error, EINVAL,
671 +                                               RTE_FLOW_ERROR_TYPE_ITEM,
672 +                                               item, "Invalid IPv6 mask");
673 +                                       return -rte_errno;
674 +                               }
675 +
676 +                               if ((ipv6_mask->hdr.vtc_flow &
677 +                                       rte_cpu_to_be_32(IAVF_IPV6_TC_MASK))
678 +                                       == rte_cpu_to_be_32(
679 +                                                       IAVF_IPV6_TC_MASK)) {
680 +                                       input_set |= IAVF_INSET_IPV6_TC;
681 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, TC);
682 +                               }
683 +                               if (ipv6_mask->hdr.proto == UINT8_MAX) {
684 +                                       input_set |= IAVF_INSET_IPV6_NEXT_HDR;
685 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, PROT);
686 +                               }
687 +                               if (ipv6_mask->hdr.hop_limits == UINT8_MAX) {
688 +                                       input_set |= IAVF_INSET_IPV6_HOP_LIMIT;
689 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, HOP_LIMIT);
690 +                               }
691 +                               if (!memcmp(ipv6_mask->hdr.src_addr,
692 +                                       ipv6_addr_mask,
693 +                                       RTE_DIM(ipv6_mask->hdr.src_addr))) {
694 +                                       input_set |= IAVF_INSET_IPV6_SRC;
695 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, SRC);
696 +                               }
697 +                               if (!memcmp(ipv6_mask->hdr.dst_addr,
698 +                                       ipv6_addr_mask,
699 +                                       RTE_DIM(ipv6_mask->hdr.dst_addr))) {
700 +                                       input_set |= IAVF_INSET_IPV6_DST;
701 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST);
702 +                               }
703 +
704 +                               rte_memcpy(hdr->buffer,
705 +                                       &ipv6_spec->hdr,
706 +                                       sizeof(ipv6_spec->hdr));
707 +                       }
708 +
709 +                       filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
710 +                       break;
711 +
712 +               case RTE_FLOW_ITEM_TYPE_UDP:
713 +                       udp_spec = item->spec;
714 +                       udp_mask = item->mask;
715 +
716 +                       hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
717 +
718 +                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, UDP);
719 +
720 +                       if (udp_spec && udp_mask) {
721 +                               if (udp_mask->hdr.dgram_len ||
722 +                                       udp_mask->hdr.dgram_cksum) {
723 +                                       rte_flow_error_set(error, EINVAL,
724 +                                               RTE_FLOW_ERROR_TYPE_ITEM, item,
725 +                                               "Invalid UDP mask");
726 +                                       return -rte_errno;
727 +                               }
728 +
729 +                               if (udp_mask->hdr.src_port == UINT16_MAX) {
730 +                                       input_set |= IAVF_INSET_UDP_SRC_PORT;
731 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, SRC_PORT);
732 +                               }
733 +                               if (udp_mask->hdr.dst_port == UINT16_MAX) {
734 +                                       input_set |= IAVF_INSET_UDP_DST_PORT;
735 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, UDP, DST_PORT);
736 +                               }
737 +
738 +                               if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
739 +                                       rte_memcpy(hdr->buffer,
740 +                                               &udp_spec->hdr,
741 +                                               sizeof(udp_spec->hdr));
742 +                               else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
743 +                                       rte_memcpy(hdr->buffer,
744 +                                               &udp_spec->hdr,
745 +                                               sizeof(udp_spec->hdr));
746 +                       }
747 +
748 +                       filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
749 +                       break;
750 +
751 +               case RTE_FLOW_ITEM_TYPE_TCP:
752 +                       tcp_spec = item->spec;
753 +                       tcp_mask = item->mask;
754 +
755 +                       hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
756 +
757 +                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, TCP);
758 +
759 +                       if (tcp_spec && tcp_mask) {
760 +                               if (tcp_mask->hdr.sent_seq ||
761 +                                       tcp_mask->hdr.recv_ack ||
762 +                                       tcp_mask->hdr.data_off ||
763 +                                       tcp_mask->hdr.tcp_flags ||
764 +                                       tcp_mask->hdr.rx_win ||
765 +                                       tcp_mask->hdr.cksum ||
766 +                                       tcp_mask->hdr.tcp_urp) {
767 +                                       rte_flow_error_set(error, EINVAL,
768 +                                               RTE_FLOW_ERROR_TYPE_ITEM, item,
769 +                                               "Invalid TCP mask");
770 +                                       return -rte_errno;
771 +                               }
772 +
773 +                               if (tcp_mask->hdr.src_port == UINT16_MAX) {
774 +                                       input_set |= IAVF_INSET_TCP_SRC_PORT;
775 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, SRC_PORT);
776 +                               }
777 +                               if (tcp_mask->hdr.dst_port == UINT16_MAX) {
778 +                                       input_set |= IAVF_INSET_TCP_DST_PORT;
779 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, TCP, DST_PORT);
780 +                               }
781 +
782 +                               if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
783 +                                       rte_memcpy(hdr->buffer,
784 +                                               &tcp_spec->hdr,
785 +                                               sizeof(tcp_spec->hdr));
786 +                               else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
787 +                                       rte_memcpy(hdr->buffer,
788 +                                               &tcp_spec->hdr,
789 +                                               sizeof(tcp_spec->hdr));
790 +                       }
791 +
792 +                       filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
793 +                       break;
794 +
795 +               case RTE_FLOW_ITEM_TYPE_SCTP:
796 +                       sctp_spec = item->spec;
797 +                       sctp_mask = item->mask;
798 +
799 +                       hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
800 +
801 +                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, SCTP);
802 +
803 +                       if (sctp_spec && sctp_mask) {
804 +                               if (sctp_mask->hdr.cksum) {
805 +                                       rte_flow_error_set(error, EINVAL,
806 +                                               RTE_FLOW_ERROR_TYPE_ITEM, item,
807 +                                               "Invalid UDP mask");
808 +                                       return -rte_errno;
809 +                               }
810 +
811 +                               if (sctp_mask->hdr.src_port == UINT16_MAX) {
812 +                                       input_set |= IAVF_INSET_SCTP_SRC_PORT;
813 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, SRC_PORT);
814 +                               }
815 +                               if (sctp_mask->hdr.dst_port == UINT16_MAX) {
816 +                                       input_set |= IAVF_INSET_SCTP_DST_PORT;
817 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, SCTP, DST_PORT);
818 +                               }
819 +
820 +                               if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
821 +                                       rte_memcpy(hdr->buffer,
822 +                                               &sctp_spec->hdr,
823 +                                               sizeof(sctp_spec->hdr));
824 +                               else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
825 +                                       rte_memcpy(hdr->buffer,
826 +                                               &sctp_spec->hdr,
827 +                                               sizeof(sctp_spec->hdr));
828 +                       }
829 +
830 +                       filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
831 +                       break;
832 +
833 +               case RTE_FLOW_ITEM_TYPE_GTPU:
834 +                       gtp_spec = item->spec;
835 +                       gtp_mask = item->mask;
836 +
837 +                       hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
838 +
839 +                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_IP);
840 +
841 +                       if (gtp_spec && gtp_mask) {
842 +                               if (gtp_mask->v_pt_rsv_flags ||
843 +                                       gtp_mask->msg_type ||
844 +                                       gtp_mask->msg_len) {
845 +                                       rte_flow_error_set(error, EINVAL,
846 +                                               RTE_FLOW_ERROR_TYPE_ITEM,
847 +                                               item, "Invalid GTP mask");
848 +                                       return -rte_errno;
849 +                               }
850 +
851 +                               if (gtp_mask->teid == UINT32_MAX) {
852 +                                       input_set |= IAVF_INSET_GTPU_TEID;
853 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_IP, TEID);
854 +                               }
855 +
856 +                               rte_memcpy(hdr->buffer,
857 +                                       gtp_spec, sizeof(*gtp_spec));
858 +                       }
859 +
860 +                       filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
861 +                       break;
862 +
863 +               case RTE_FLOW_ITEM_TYPE_GTP_PSC:
864 +                       gtp_psc_spec = item->spec;
865 +                       gtp_psc_mask = item->mask;
866 +
867 +                       hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
868 +
869 +                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, GTPU_EH);
870 +
871 +                       if (gtp_psc_spec && gtp_psc_mask) {
872 +                               if (gtp_psc_mask->qfi == UINT8_MAX) {
873 +                                       input_set |= IAVF_INSET_GTPU_QFI;
874 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, GTPU_EH, QFI);
875 +                               }
876 +
877 +                               rte_memcpy(hdr->buffer, gtp_psc_spec,
878 +                                       sizeof(*gtp_psc_spec));
879 +                       }
880 +
881 +                       filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
882 +                       break;
883 +
884 +               case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
885 +                       l2tpv3oip_spec = item->spec;
886 +                       l2tpv3oip_mask = item->mask;
887 +
888 +                       hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
889 +
890 +                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, L2TPV3);
891 +
892 +                       if (l2tpv3oip_spec && l2tpv3oip_mask) {
893 +                               if (l2tpv3oip_mask->session_id == UINT32_MAX) {
894 +                                       input_set |= IAVF_L2TPV3OIP_SESSION_ID;
895 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, L2TPV3, SESS_ID);
896 +                               }
897 +
898 +                               rte_memcpy(hdr->buffer, l2tpv3oip_spec,
899 +                                       sizeof(*l2tpv3oip_spec));
900 +                       }
901 +
902 +                       filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
903 +                       break;
904 +
905 +               case RTE_FLOW_ITEM_TYPE_ESP:
906 +                       esp_spec = item->spec;
907 +                       esp_mask = item->mask;
908 +
909 +                       hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
910 +
911 +                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, ESP);
912 +
913 +                       if (esp_spec && esp_mask) {
914 +                               if (esp_mask->hdr.spi == UINT32_MAX) {
915 +                                       input_set |= IAVF_INSET_ESP_SPI;
916 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, ESP, SPI);
917 +                               }
918 +
919 +                               rte_memcpy(hdr->buffer, &esp_spec->hdr,
920 +                                       sizeof(esp_spec->hdr));
921 +                       }
922 +
923 +                       filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
924 +                       break;
925 +
926 +               case RTE_FLOW_ITEM_TYPE_AH:
927 +                       ah_spec = item->spec;
928 +                       ah_mask = item->mask;
929 +
930 +                       hdr = &filter->add_fltr.rule_cfg.proto_hdrs.proto_hdr[layer];
931 +
932 +                       VIRTCHNL_SET_PROTO_HDR_TYPE(hdr, AH);
933 +
934 +                       if (ah_spec && ah_mask) {
935 +                               if (ah_mask->spi == UINT32_MAX) {
936 +                                       input_set |= IAVF_INSET_AH_SPI;
937 +                                       VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, AH, SPI);
938 +                               }
939 +
940 +                               rte_memcpy(hdr->buffer, ah_spec,
941 +                                       sizeof(*ah_spec));
942 +                       }
943 +
944 +                       filter->add_fltr.rule_cfg.proto_hdrs.count = ++layer;
945 +                       break;
946 +
947 +               case RTE_FLOW_ITEM_TYPE_VOID:
948 +                       break;
949 +
950 +               default:
951 +                       rte_flow_error_set(error, EINVAL,
952 +                                       RTE_FLOW_ERROR_TYPE_ITEM, item,
953 +                                       "Invalid pattern item.");
954 +                       return -rte_errno;
955 +               }
956 +       }
957 +
958 +       if (layer > VIRTCHNL_MAX_NUM_PROTO_HDRS) {
959 +               rte_flow_error_set(error, EINVAL,
960 +                       RTE_FLOW_ERROR_TYPE_ITEM, item,
961 +                       "Protocol header layers exceed the maximum value");
962 +               return -rte_errno;
963 +       }
964 +
965 +       filter->input_set = input_set;
966 +
967 +       return 0;
968 +}
969 +
970 +static int
971 +iavf_fdir_parse(struct iavf_adapter *ad,
972 +               struct iavf_pattern_match_item *array,
973 +               uint32_t array_len,
974 +               const struct rte_flow_item pattern[],
975 +               const struct rte_flow_action actions[],
976 +               void **meta,
977 +               struct rte_flow_error *error)
978 +{
979 +       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
980 +       struct iavf_fdir_conf *filter = &vf->fdir.conf;
981 +       struct iavf_pattern_match_item *item = NULL;
982 +       uint64_t input_set;
983 +       int ret;
984 +
985 +       memset(filter, 0, sizeof(*filter));
986 +
987 +       item = iavf_search_pattern_match_item(pattern, array, array_len, error);
988 +       if (!item)
989 +               return -rte_errno;
990 +
991 +       ret = iavf_fdir_parse_pattern(ad, pattern, error, filter);
992 +       if (ret)
993 +               goto error;
994 +
995 +       input_set = filter->input_set;
996 +       if (!input_set || input_set & ~item->input_set_mask) {
997 +               rte_flow_error_set(error, EINVAL,
998 +                               RTE_FLOW_ERROR_TYPE_ITEM_SPEC, pattern,
999 +                               "Invalid input set");
1000 +               ret = -rte_errno;
1001 +               goto error;
1002 +       }
1003 +
1004 +       ret = iavf_fdir_parse_action(ad, actions, error, filter);
1005 +       if (ret)
1006 +               goto error;
1007 +
1008 +       if (meta)
1009 +               *meta = filter;
1010 +
1011 +error:
1012 +       rte_free(item);
1013 +       return ret;
1014 +}
1015 +
1016 +static struct iavf_flow_parser iavf_fdir_parser = {
1017 +       .engine = &iavf_fdir_engine,
1018 +       .array = iavf_fdir_pattern,
1019 +       .array_len = RTE_DIM(iavf_fdir_pattern),
1020 +       .parse_pattern_action = iavf_fdir_parse,
1021 +       .stage = IAVF_FLOW_STAGE_DISTRIBUTOR,
1022 +};
1023 +
1024 +RTE_INIT(iavf_fdir_engine_register)
1025 +{
1026 +       iavf_register_flow_engine(&iavf_fdir_engine);
1027 +}
1028 diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
1029 index 3f0d23a92..25e490bc4 100644
1030 --- a/drivers/net/iavf/iavf_vchnl.c
1031 +++ b/drivers/net/iavf/iavf_vchnl.c
1032 @@ -340,7 +340,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
1033          */
1034  
1035         caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
1036 -               VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
1037 +               VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC |
1038 +               VIRTCHNL_VF_OFFLOAD_FDIR_PF;
1039  
1040         args.in_args = (uint8_t *)&caps;
1041         args.in_args_size = sizeof(caps);
1042 @@ -842,3 +843,154 @@ iavf_add_del_vlan(struct iavf_adapter *adapter, uint16_t vlanid, bool add)
1043  
1044         return err;
1045  }
1046 +
1047 +int
1048 +iavf_fdir_add(struct iavf_adapter *adapter,
1049 +       struct iavf_fdir_conf *filter)
1050 +{
1051 +       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1052 +       struct virtchnl_fdir_add *fdir_ret;
1053 +
1054 +       struct iavf_cmd_info args;
1055 +       int err;
1056 +
1057 +       filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
1058 +       filter->add_fltr.validate_only = 0;
1059 +
1060 +       args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
1061 +       args.in_args = (uint8_t *)(&filter->add_fltr);
1062 +       args.in_args_size = sizeof(*(&filter->add_fltr));
1063 +       args.out_buffer = vf->aq_resp;
1064 +       args.out_size = IAVF_AQ_BUF_SZ;
1065 +
1066 +       err = iavf_execute_vf_cmd(adapter, &args);
1067 +       if (err) {
1068 +               PMD_DRV_LOG(ERR, "fail to execute command OP_ADD_FDIR_FILTER");
1069 +               return err;
1070 +       }
1071 +
1072 +       fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
1073 +       filter->flow_id = fdir_ret->flow_id;
1074 +
1075 +       if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
1076 +               PMD_DRV_LOG(INFO,
1077 +                       "add rule request is successfully done by PF");
1078 +       } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NORESOURCE) {
1079 +               PMD_DRV_LOG(ERR,
1080 +                       "add rule request is failed due to no hw resource");
1081 +               return -1;
1082 +       } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_EXIST) {
1083 +               PMD_DRV_LOG(ERR,
1084 +                       "add rule request is failed due to the rule is already existed");
1085 +               return -1;
1086 +       } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_CONFLICT) {
1087 +               PMD_DRV_LOG(ERR,
1088 +                       "add rule request is failed due to the rule is conflict with existing rule");
1089 +               return -1;
1090 +       } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
1091 +               PMD_DRV_LOG(ERR,
1092 +                       "add rule request is failed due to the hw doesn't support");
1093 +               return -1;
1094 +       } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
1095 +               PMD_DRV_LOG(ERR,
1096 +                       "add rule request is failed due to time out for programming");
1097 +               return -1;
1098 +       } else {
1099 +               PMD_DRV_LOG(ERR,
1100 +                       "add rule request is failed due to other reasons");
1101 +               return -1;
1102 +       }
1103 +
1104 +       return 0;
1105 +};
1106 +
1107 +int
1108 +iavf_fdir_del(struct iavf_adapter *adapter,
1109 +       struct iavf_fdir_conf *filter)
1110 +{
1111 +       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1112 +       struct virtchnl_fdir_del *fdir_ret;
1113 +
1114 +       struct iavf_cmd_info args;
1115 +       int err;
1116 +
1117 +       filter->del_fltr.vsi_id = vf->vsi_res->vsi_id;
1118 +       filter->del_fltr.flow_id = filter->flow_id;
1119 +
1120 +       args.ops = VIRTCHNL_OP_DEL_FDIR_FILTER;
1121 +       args.in_args = (uint8_t *)(&filter->del_fltr);
1122 +       args.in_args_size = sizeof(filter->del_fltr);
1123 +       args.out_buffer = vf->aq_resp;
1124 +       args.out_size = IAVF_AQ_BUF_SZ;
1125 +
1126 +       err = iavf_execute_vf_cmd(adapter, &args);
1127 +       if (err) {
1128 +               PMD_DRV_LOG(ERR, "fail to execute command OP_DEL_FDIR_FILTER");
1129 +               return err;
1130 +       }
1131 +
1132 +       fdir_ret = (struct virtchnl_fdir_del *)args.out_buffer;
1133 +
1134 +       if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
1135 +               PMD_DRV_LOG(INFO,
1136 +                       "delete rule request is successfully done by PF");
1137 +       } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_NONEXIST) {
1138 +               PMD_DRV_LOG(ERR,
1139 +                       "delete rule request is failed due to this rule doesn't exist");
1140 +               return -1;
1141 +       } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_TIMEOUT) {
1142 +               PMD_DRV_LOG(ERR,
1143 +                       "delete rule request is failed due to time out for programming");
1144 +               return -1;
1145 +       } else {
1146 +               PMD_DRV_LOG(ERR,
1147 +                       "delete rule request is failed due to other reasons");
1148 +               return -1;
1149 +       }
1150 +
1151 +       return 0;
1152 +};
1153 +
1154 +int
1155 +iavf_fdir_check(struct iavf_adapter *adapter,
1156 +               struct iavf_fdir_conf *filter)
1157 +{
1158 +       struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
1159 +       struct virtchnl_fdir_add *fdir_ret;
1160 +
1161 +       struct iavf_cmd_info args;
1162 +       int err;
1163 +
1164 +       filter->add_fltr.vsi_id = vf->vsi_res->vsi_id;
1165 +       filter->add_fltr.validate_only = 1;
1166 +
1167 +       args.ops = VIRTCHNL_OP_ADD_FDIR_FILTER;
1168 +       args.in_args = (uint8_t *)(&filter->add_fltr);
1169 +       args.in_args_size = sizeof(*(&filter->add_fltr));
1170 +       args.out_buffer = vf->aq_resp;
1171 +       args.out_size = IAVF_AQ_BUF_SZ;
1172 +
1173 +       err = iavf_execute_vf_cmd(adapter, &args);
1174 +       if (err) {
1175 +               PMD_DRV_LOG(ERR, "fail to check flow direcotor rule");
1176 +               return err;
1177 +       }
1178 +
1179 +       fdir_ret = (struct virtchnl_fdir_add *)args.out_buffer;
1180 +
1181 +       if (fdir_ret->status == VIRTCHNL_FDIR_SUCCESS) {
1182 +               PMD_DRV_LOG(INFO,
1183 +                       "check rule request is successfully done by PF");
1184 +       } else if (fdir_ret->status == VIRTCHNL_FDIR_FAILURE_RULE_INVALID) {
1185 +               PMD_DRV_LOG(ERR,
1186 +                       "check rule request is failed due to parameters validation"
1187 +                       " or HW doesn't support");
1188 +               return -1;
1189 +       } else {
1190 +               PMD_DRV_LOG(ERR,
1191 +                       "check rule request is failed due to other reasons");
1192 +               return -1;
1193 +       }
1194 +
1195 +       return 0;
1196 +}
1197 diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
1198 index 32eabca4b..ce71054fb 100644
1199 --- a/drivers/net/iavf/meson.build
1200 +++ b/drivers/net/iavf/meson.build
1201 @@ -13,6 +13,7 @@ sources = files(
1202         'iavf_rxtx.c',
1203         'iavf_vchnl.c',
1204         'iavf_generic_flow.c',
1205 +       'iavf_fdir.c',
1206  )
1207  
1208  if arch_subdir == 'x86'
1209 -- 
1210 2.17.1
1211