New upstream version 18.11.2
[deb_dpdk.git] / drivers / net / enic / enic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <rte_log.h>
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_udp.h>
13
14 #include "enic_compat.h"
15 #include "enic.h"
16 #include "vnic_dev.h"
17 #include "vnic_nic.h"
18
19 #define FLOW_TRACE() \
20         rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
21                 "%s()\n", __func__)
22 #define FLOW_LOG(level, fmt, args...) \
23         rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
24                 fmt "\n", ##args)
25
26 /*
27  * Common arguments passed to copy_item functions. Use this structure
28  * so we can easily add new arguments.
29  * item: Item specification.
30  * filter: Partially filled in NIC filter structure.
31  * inner_ofst: If zero, this is an outer header. If non-zero, this is
32  *   the offset into L5 where the header begins.
33  * l2_proto_off: offset to EtherType eth or vlan header.
34  * l3_proto_off: offset to next protocol field in IPv4 or 6 header.
35  */
36 struct copy_item_args {
37         const struct rte_flow_item *item;
38         struct filter_v2 *filter;
39         uint8_t *inner_ofst;
40         uint8_t l2_proto_off;
41         uint8_t l3_proto_off;
42         struct enic *enic;
43 };
44
45 /* functions for copying items into enic filters */
46 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
47
48 /** Info about how to copy items into enic filters. */
49 struct enic_items {
50         /** Function for copying and validating an item. */
51         enic_copy_item_fn *copy_item;
52         /** List of valid previous items. */
53         const enum rte_flow_item_type * const prev_items;
54         /** True if it's OK for this item to be the first item. For some NIC
55          * versions, it's invalid to start the stack above layer 3.
56          */
57         const u8 valid_start_item;
58         /* Inner packet version of copy_item. */
59         enic_copy_item_fn *inner_copy_item;
60 };
61
62 /** Filtering capabilities for various NIC and firmware versions. */
63 struct enic_filter_cap {
64         /** list of valid items and their handlers and attributes. */
65         const struct enic_items *item_info;
66         /* Max type in the above list, used to detect unsupported types */
67         enum rte_flow_item_type max_item_type;
68 };
69
70 /* functions for copying flow actions into enic actions */
71 typedef int (copy_action_fn)(const struct rte_flow_action actions[],
72                              struct filter_action_v2 *enic_action);
73
74 /** Action capabilities for various NICs. */
75 struct enic_action_cap {
76         /** list of valid actions */
77         const enum rte_flow_action_type *actions;
78         /** copy function for a particular NIC */
79         int (*copy_fn)(const struct rte_flow_action actions[],
80                        struct filter_action_v2 *enic_action);
81 };
82
83 /* Forward declarations */
84 static enic_copy_item_fn enic_copy_item_ipv4_v1;
85 static enic_copy_item_fn enic_copy_item_udp_v1;
86 static enic_copy_item_fn enic_copy_item_tcp_v1;
87 static enic_copy_item_fn enic_copy_item_eth_v2;
88 static enic_copy_item_fn enic_copy_item_vlan_v2;
89 static enic_copy_item_fn enic_copy_item_ipv4_v2;
90 static enic_copy_item_fn enic_copy_item_ipv6_v2;
91 static enic_copy_item_fn enic_copy_item_udp_v2;
92 static enic_copy_item_fn enic_copy_item_tcp_v2;
93 static enic_copy_item_fn enic_copy_item_sctp_v2;
94 static enic_copy_item_fn enic_copy_item_vxlan_v2;
95 static enic_copy_item_fn enic_copy_item_inner_eth_v2;
96 static enic_copy_item_fn enic_copy_item_inner_vlan_v2;
97 static enic_copy_item_fn enic_copy_item_inner_ipv4_v2;
98 static enic_copy_item_fn enic_copy_item_inner_ipv6_v2;
99 static enic_copy_item_fn enic_copy_item_inner_udp_v2;
100 static enic_copy_item_fn enic_copy_item_inner_tcp_v2;
101 static copy_action_fn enic_copy_action_v1;
102 static copy_action_fn enic_copy_action_v2;
103
104 /**
105  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
106  * is supported.
107  */
108 static const struct enic_items enic_items_v1[] = {
109         [RTE_FLOW_ITEM_TYPE_IPV4] = {
110                 .copy_item = enic_copy_item_ipv4_v1,
111                 .valid_start_item = 1,
112                 .prev_items = (const enum rte_flow_item_type[]) {
113                                RTE_FLOW_ITEM_TYPE_END,
114                 },
115                 .inner_copy_item = NULL,
116         },
117         [RTE_FLOW_ITEM_TYPE_UDP] = {
118                 .copy_item = enic_copy_item_udp_v1,
119                 .valid_start_item = 0,
120                 .prev_items = (const enum rte_flow_item_type[]) {
121                                RTE_FLOW_ITEM_TYPE_IPV4,
122                                RTE_FLOW_ITEM_TYPE_END,
123                 },
124                 .inner_copy_item = NULL,
125         },
126         [RTE_FLOW_ITEM_TYPE_TCP] = {
127                 .copy_item = enic_copy_item_tcp_v1,
128                 .valid_start_item = 0,
129                 .prev_items = (const enum rte_flow_item_type[]) {
130                                RTE_FLOW_ITEM_TYPE_IPV4,
131                                RTE_FLOW_ITEM_TYPE_END,
132                 },
133                 .inner_copy_item = NULL,
134         },
135 };
136
137 /**
138  * NICs have Advanced Filters capability but they are disabled. This means
139  * that layer 3 must be specified.
140  */
141 static const struct enic_items enic_items_v2[] = {
142         [RTE_FLOW_ITEM_TYPE_ETH] = {
143                 .copy_item = enic_copy_item_eth_v2,
144                 .valid_start_item = 1,
145                 .prev_items = (const enum rte_flow_item_type[]) {
146                                RTE_FLOW_ITEM_TYPE_VXLAN,
147                                RTE_FLOW_ITEM_TYPE_END,
148                 },
149                 .inner_copy_item = enic_copy_item_inner_eth_v2,
150         },
151         [RTE_FLOW_ITEM_TYPE_VLAN] = {
152                 .copy_item = enic_copy_item_vlan_v2,
153                 .valid_start_item = 1,
154                 .prev_items = (const enum rte_flow_item_type[]) {
155                                RTE_FLOW_ITEM_TYPE_ETH,
156                                RTE_FLOW_ITEM_TYPE_END,
157                 },
158                 .inner_copy_item = enic_copy_item_inner_vlan_v2,
159         },
160         [RTE_FLOW_ITEM_TYPE_IPV4] = {
161                 .copy_item = enic_copy_item_ipv4_v2,
162                 .valid_start_item = 1,
163                 .prev_items = (const enum rte_flow_item_type[]) {
164                                RTE_FLOW_ITEM_TYPE_ETH,
165                                RTE_FLOW_ITEM_TYPE_VLAN,
166                                RTE_FLOW_ITEM_TYPE_END,
167                 },
168                 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
169         },
170         [RTE_FLOW_ITEM_TYPE_IPV6] = {
171                 .copy_item = enic_copy_item_ipv6_v2,
172                 .valid_start_item = 1,
173                 .prev_items = (const enum rte_flow_item_type[]) {
174                                RTE_FLOW_ITEM_TYPE_ETH,
175                                RTE_FLOW_ITEM_TYPE_VLAN,
176                                RTE_FLOW_ITEM_TYPE_END,
177                 },
178                 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
179         },
180         [RTE_FLOW_ITEM_TYPE_UDP] = {
181                 .copy_item = enic_copy_item_udp_v2,
182                 .valid_start_item = 0,
183                 .prev_items = (const enum rte_flow_item_type[]) {
184                                RTE_FLOW_ITEM_TYPE_IPV4,
185                                RTE_FLOW_ITEM_TYPE_IPV6,
186                                RTE_FLOW_ITEM_TYPE_END,
187                 },
188                 .inner_copy_item = enic_copy_item_inner_udp_v2,
189         },
190         [RTE_FLOW_ITEM_TYPE_TCP] = {
191                 .copy_item = enic_copy_item_tcp_v2,
192                 .valid_start_item = 0,
193                 .prev_items = (const enum rte_flow_item_type[]) {
194                                RTE_FLOW_ITEM_TYPE_IPV4,
195                                RTE_FLOW_ITEM_TYPE_IPV6,
196                                RTE_FLOW_ITEM_TYPE_END,
197                 },
198                 .inner_copy_item = enic_copy_item_inner_tcp_v2,
199         },
200         [RTE_FLOW_ITEM_TYPE_SCTP] = {
201                 .copy_item = enic_copy_item_sctp_v2,
202                 .valid_start_item = 0,
203                 .prev_items = (const enum rte_flow_item_type[]) {
204                                RTE_FLOW_ITEM_TYPE_IPV4,
205                                RTE_FLOW_ITEM_TYPE_IPV6,
206                                RTE_FLOW_ITEM_TYPE_END,
207                 },
208                 .inner_copy_item = NULL,
209         },
210         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
211                 .copy_item = enic_copy_item_vxlan_v2,
212                 .valid_start_item = 0,
213                 .prev_items = (const enum rte_flow_item_type[]) {
214                                RTE_FLOW_ITEM_TYPE_UDP,
215                                RTE_FLOW_ITEM_TYPE_END,
216                 },
217                 .inner_copy_item = NULL,
218         },
219 };
220
221 /** NICs with Advanced filters enabled */
222 static const struct enic_items enic_items_v3[] = {
223         [RTE_FLOW_ITEM_TYPE_ETH] = {
224                 .copy_item = enic_copy_item_eth_v2,
225                 .valid_start_item = 1,
226                 .prev_items = (const enum rte_flow_item_type[]) {
227                                RTE_FLOW_ITEM_TYPE_VXLAN,
228                                RTE_FLOW_ITEM_TYPE_END,
229                 },
230                 .inner_copy_item = enic_copy_item_inner_eth_v2,
231         },
232         [RTE_FLOW_ITEM_TYPE_VLAN] = {
233                 .copy_item = enic_copy_item_vlan_v2,
234                 .valid_start_item = 1,
235                 .prev_items = (const enum rte_flow_item_type[]) {
236                                RTE_FLOW_ITEM_TYPE_ETH,
237                                RTE_FLOW_ITEM_TYPE_END,
238                 },
239                 .inner_copy_item = enic_copy_item_inner_vlan_v2,
240         },
241         [RTE_FLOW_ITEM_TYPE_IPV4] = {
242                 .copy_item = enic_copy_item_ipv4_v2,
243                 .valid_start_item = 1,
244                 .prev_items = (const enum rte_flow_item_type[]) {
245                                RTE_FLOW_ITEM_TYPE_ETH,
246                                RTE_FLOW_ITEM_TYPE_VLAN,
247                                RTE_FLOW_ITEM_TYPE_END,
248                 },
249                 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
250         },
251         [RTE_FLOW_ITEM_TYPE_IPV6] = {
252                 .copy_item = enic_copy_item_ipv6_v2,
253                 .valid_start_item = 1,
254                 .prev_items = (const enum rte_flow_item_type[]) {
255                                RTE_FLOW_ITEM_TYPE_ETH,
256                                RTE_FLOW_ITEM_TYPE_VLAN,
257                                RTE_FLOW_ITEM_TYPE_END,
258                 },
259                 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
260         },
261         [RTE_FLOW_ITEM_TYPE_UDP] = {
262                 .copy_item = enic_copy_item_udp_v2,
263                 .valid_start_item = 1,
264                 .prev_items = (const enum rte_flow_item_type[]) {
265                                RTE_FLOW_ITEM_TYPE_IPV4,
266                                RTE_FLOW_ITEM_TYPE_IPV6,
267                                RTE_FLOW_ITEM_TYPE_END,
268                 },
269                 .inner_copy_item = enic_copy_item_inner_udp_v2,
270         },
271         [RTE_FLOW_ITEM_TYPE_TCP] = {
272                 .copy_item = enic_copy_item_tcp_v2,
273                 .valid_start_item = 1,
274                 .prev_items = (const enum rte_flow_item_type[]) {
275                                RTE_FLOW_ITEM_TYPE_IPV4,
276                                RTE_FLOW_ITEM_TYPE_IPV6,
277                                RTE_FLOW_ITEM_TYPE_END,
278                 },
279                 .inner_copy_item = enic_copy_item_inner_tcp_v2,
280         },
281         [RTE_FLOW_ITEM_TYPE_SCTP] = {
282                 .copy_item = enic_copy_item_sctp_v2,
283                 .valid_start_item = 0,
284                 .prev_items = (const enum rte_flow_item_type[]) {
285                                RTE_FLOW_ITEM_TYPE_IPV4,
286                                RTE_FLOW_ITEM_TYPE_IPV6,
287                                RTE_FLOW_ITEM_TYPE_END,
288                 },
289                 .inner_copy_item = NULL,
290         },
291         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
292                 .copy_item = enic_copy_item_vxlan_v2,
293                 .valid_start_item = 1,
294                 .prev_items = (const enum rte_flow_item_type[]) {
295                                RTE_FLOW_ITEM_TYPE_UDP,
296                                RTE_FLOW_ITEM_TYPE_END,
297                 },
298                 .inner_copy_item = NULL,
299         },
300 };
301
302 /** Filtering capabilities indexed this NICs supported filter type. */
303 static const struct enic_filter_cap enic_filter_cap[] = {
304         [FILTER_IPV4_5TUPLE] = {
305                 .item_info = enic_items_v1,
306                 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
307         },
308         [FILTER_USNIC_IP] = {
309                 .item_info = enic_items_v2,
310                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
311         },
312         [FILTER_DPDK_1] = {
313                 .item_info = enic_items_v3,
314                 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
315         },
316 };
317
318 /** Supported actions for older NICs */
319 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
320         RTE_FLOW_ACTION_TYPE_QUEUE,
321         RTE_FLOW_ACTION_TYPE_END,
322 };
323
324 /** Supported actions for newer NICs */
325 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
326         RTE_FLOW_ACTION_TYPE_QUEUE,
327         RTE_FLOW_ACTION_TYPE_MARK,
328         RTE_FLOW_ACTION_TYPE_FLAG,
329         RTE_FLOW_ACTION_TYPE_END,
330 };
331
332 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
333         RTE_FLOW_ACTION_TYPE_QUEUE,
334         RTE_FLOW_ACTION_TYPE_MARK,
335         RTE_FLOW_ACTION_TYPE_FLAG,
336         RTE_FLOW_ACTION_TYPE_DROP,
337         RTE_FLOW_ACTION_TYPE_END,
338 };
339
340 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
341         RTE_FLOW_ACTION_TYPE_QUEUE,
342         RTE_FLOW_ACTION_TYPE_MARK,
343         RTE_FLOW_ACTION_TYPE_FLAG,
344         RTE_FLOW_ACTION_TYPE_DROP,
345         RTE_FLOW_ACTION_TYPE_COUNT,
346         RTE_FLOW_ACTION_TYPE_END,
347 };
348
349 /** Action capabilities indexed by NIC version information */
350 static const struct enic_action_cap enic_action_cap[] = {
351         [FILTER_ACTION_RQ_STEERING_FLAG] = {
352                 .actions = enic_supported_actions_v1,
353                 .copy_fn = enic_copy_action_v1,
354         },
355         [FILTER_ACTION_FILTER_ID_FLAG] = {
356                 .actions = enic_supported_actions_v2_id,
357                 .copy_fn = enic_copy_action_v2,
358         },
359         [FILTER_ACTION_DROP_FLAG] = {
360                 .actions = enic_supported_actions_v2_drop,
361                 .copy_fn = enic_copy_action_v2,
362         },
363         [FILTER_ACTION_COUNTER_FLAG] = {
364                 .actions = enic_supported_actions_v2_count,
365                 .copy_fn = enic_copy_action_v2,
366         },
367 };
368
369 static int
370 mask_exact_match(const u8 *supported, const u8 *supplied,
371                  unsigned int size)
372 {
373         unsigned int i;
374         for (i = 0; i < size; i++) {
375                 if (supported[i] != supplied[i])
376                         return 0;
377         }
378         return 1;
379 }
380
381 static int
382 enic_copy_item_ipv4_v1(struct copy_item_args *arg)
383 {
384         const struct rte_flow_item *item = arg->item;
385         struct filter_v2 *enic_filter = arg->filter;
386         const struct rte_flow_item_ipv4 *spec = item->spec;
387         const struct rte_flow_item_ipv4 *mask = item->mask;
388         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
389         struct ipv4_hdr supported_mask = {
390                 .src_addr = 0xffffffff,
391                 .dst_addr = 0xffffffff,
392         };
393
394         FLOW_TRACE();
395
396         if (!mask)
397                 mask = &rte_flow_item_ipv4_mask;
398
399         /* This is an exact match filter, both fields must be set */
400         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
401                 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
402                 return ENOTSUP;
403         }
404
405         /* check that the suppied mask exactly matches capabilty */
406         if (!mask_exact_match((const u8 *)&supported_mask,
407                               (const u8 *)item->mask, sizeof(*mask))) {
408                 FLOW_LOG(ERR, "IPv4 exact match mask");
409                 return ENOTSUP;
410         }
411
412         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
413         enic_5tup->src_addr = spec->hdr.src_addr;
414         enic_5tup->dst_addr = spec->hdr.dst_addr;
415
416         return 0;
417 }
418
419 static int
420 enic_copy_item_udp_v1(struct copy_item_args *arg)
421 {
422         const struct rte_flow_item *item = arg->item;
423         struct filter_v2 *enic_filter = arg->filter;
424         const struct rte_flow_item_udp *spec = item->spec;
425         const struct rte_flow_item_udp *mask = item->mask;
426         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
427         struct udp_hdr supported_mask = {
428                 .src_port = 0xffff,
429                 .dst_port = 0xffff,
430         };
431
432         FLOW_TRACE();
433
434         if (!mask)
435                 mask = &rte_flow_item_udp_mask;
436
437         /* This is an exact match filter, both ports must be set */
438         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
439                 FLOW_LOG(ERR, "UDP exact match src/dst addr");
440                 return ENOTSUP;
441         }
442
443         /* check that the suppied mask exactly matches capabilty */
444         if (!mask_exact_match((const u8 *)&supported_mask,
445                               (const u8 *)item->mask, sizeof(*mask))) {
446                 FLOW_LOG(ERR, "UDP exact match mask");
447                 return ENOTSUP;
448         }
449
450         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
451         enic_5tup->src_port = spec->hdr.src_port;
452         enic_5tup->dst_port = spec->hdr.dst_port;
453         enic_5tup->protocol = PROTO_UDP;
454
455         return 0;
456 }
457
458 static int
459 enic_copy_item_tcp_v1(struct copy_item_args *arg)
460 {
461         const struct rte_flow_item *item = arg->item;
462         struct filter_v2 *enic_filter = arg->filter;
463         const struct rte_flow_item_tcp *spec = item->spec;
464         const struct rte_flow_item_tcp *mask = item->mask;
465         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
466         struct tcp_hdr supported_mask = {
467                 .src_port = 0xffff,
468                 .dst_port = 0xffff,
469         };
470
471         FLOW_TRACE();
472
473         if (!mask)
474                 mask = &rte_flow_item_tcp_mask;
475
476         /* This is an exact match filter, both ports must be set */
477         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
478                 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
479                 return ENOTSUP;
480         }
481
482         /* check that the suppied mask exactly matches capabilty */
483         if (!mask_exact_match((const u8 *)&supported_mask,
484                              (const u8 *)item->mask, sizeof(*mask))) {
485                 FLOW_LOG(ERR, "TCP exact match mask");
486                 return ENOTSUP;
487         }
488
489         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
490         enic_5tup->src_port = spec->hdr.src_port;
491         enic_5tup->dst_port = spec->hdr.dst_port;
492         enic_5tup->protocol = PROTO_TCP;
493
494         return 0;
495 }
496
497 /*
498  * The common 'copy' function for all inner packet patterns. Patterns are
499  * first appended to the L5 pattern buffer. Then, since the NIC filter
500  * API has no special support for inner packet matching at the moment,
501  * we set EtherType and IP proto as necessary.
502  */
503 static int
504 copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,
505                   const void *val, const void *mask, uint8_t val_size,
506                   uint8_t proto_off, uint16_t proto_val, uint8_t proto_size)
507 {
508         uint8_t *l5_mask, *l5_val;
509         uint8_t start_off;
510
511         /* No space left in the L5 pattern buffer. */
512         start_off = *inner_ofst;
513         if ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN)
514                 return ENOTSUP;
515         l5_mask = gp->layer[FILTER_GENERIC_1_L5].mask;
516         l5_val = gp->layer[FILTER_GENERIC_1_L5].val;
517         /* Copy the pattern into the L5 buffer. */
518         if (val) {
519                 memcpy(l5_mask + start_off, mask, val_size);
520                 memcpy(l5_val + start_off, val, val_size);
521         }
522         /* Set the protocol field in the previous header. */
523         if (proto_off) {
524                 void *m, *v;
525
526                 m = l5_mask + proto_off;
527                 v = l5_val + proto_off;
528                 if (proto_size == 1) {
529                         *(uint8_t *)m = 0xff;
530                         *(uint8_t *)v = (uint8_t)proto_val;
531                 } else if (proto_size == 2) {
532                         *(uint16_t *)m = 0xffff;
533                         *(uint16_t *)v = proto_val;
534                 }
535         }
536         /* All inner headers land in L5 buffer even if their spec is null. */
537         *inner_ofst += val_size;
538         return 0;
539 }
540
541 static int
542 enic_copy_item_inner_eth_v2(struct copy_item_args *arg)
543 {
544         const void *mask = arg->item->mask;
545         uint8_t *off = arg->inner_ofst;
546
547         FLOW_TRACE();
548         if (!mask)
549                 mask = &rte_flow_item_eth_mask;
550         arg->l2_proto_off = *off + offsetof(struct ether_hdr, ether_type);
551         return copy_inner_common(&arg->filter->u.generic_1, off,
552                 arg->item->spec, mask, sizeof(struct ether_hdr),
553                 0 /* no previous protocol */, 0, 0);
554 }
555
556 static int
557 enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)
558 {
559         const void *mask = arg->item->mask;
560         uint8_t *off = arg->inner_ofst;
561         uint8_t eth_type_off;
562
563         FLOW_TRACE();
564         if (!mask)
565                 mask = &rte_flow_item_vlan_mask;
566         /* Append vlan header to L5 and set ether type = TPID */
567         eth_type_off = arg->l2_proto_off;
568         arg->l2_proto_off = *off + offsetof(struct vlan_hdr, eth_proto);
569         return copy_inner_common(&arg->filter->u.generic_1, off,
570                 arg->item->spec, mask, sizeof(struct vlan_hdr),
571                 eth_type_off, rte_cpu_to_be_16(ETHER_TYPE_VLAN), 2);
572 }
573
574 static int
575 enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)
576 {
577         const void *mask = arg->item->mask;
578         uint8_t *off = arg->inner_ofst;
579
580         FLOW_TRACE();
581         if (!mask)
582                 mask = &rte_flow_item_ipv4_mask;
583         /* Append ipv4 header to L5 and set ether type = ipv4 */
584         arg->l3_proto_off = *off + offsetof(struct ipv4_hdr, next_proto_id);
585         return copy_inner_common(&arg->filter->u.generic_1, off,
586                 arg->item->spec, mask, sizeof(struct ipv4_hdr),
587                 arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv4), 2);
588 }
589
590 static int
591 enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)
592 {
593         const void *mask = arg->item->mask;
594         uint8_t *off = arg->inner_ofst;
595
596         FLOW_TRACE();
597         if (!mask)
598                 mask = &rte_flow_item_ipv6_mask;
599         /* Append ipv6 header to L5 and set ether type = ipv6 */
600         arg->l3_proto_off = *off + offsetof(struct ipv6_hdr, proto);
601         return copy_inner_common(&arg->filter->u.generic_1, off,
602                 arg->item->spec, mask, sizeof(struct ipv6_hdr),
603                 arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv6), 2);
604 }
605
606 static int
607 enic_copy_item_inner_udp_v2(struct copy_item_args *arg)
608 {
609         const void *mask = arg->item->mask;
610         uint8_t *off = arg->inner_ofst;
611
612         FLOW_TRACE();
613         if (!mask)
614                 mask = &rte_flow_item_udp_mask;
615         /* Append udp header to L5 and set ip proto = udp */
616         return copy_inner_common(&arg->filter->u.generic_1, off,
617                 arg->item->spec, mask, sizeof(struct udp_hdr),
618                 arg->l3_proto_off, IPPROTO_UDP, 1);
619 }
620
621 static int
622 enic_copy_item_inner_tcp_v2(struct copy_item_args *arg)
623 {
624         const void *mask = arg->item->mask;
625         uint8_t *off = arg->inner_ofst;
626
627         FLOW_TRACE();
628         if (!mask)
629                 mask = &rte_flow_item_tcp_mask;
630         /* Append tcp header to L5 and set ip proto = tcp */
631         return copy_inner_common(&arg->filter->u.generic_1, off,
632                 arg->item->spec, mask, sizeof(struct tcp_hdr),
633                 arg->l3_proto_off, IPPROTO_TCP, 1);
634 }
635
636 static int
637 enic_copy_item_eth_v2(struct copy_item_args *arg)
638 {
639         const struct rte_flow_item *item = arg->item;
640         struct filter_v2 *enic_filter = arg->filter;
641         struct ether_hdr enic_spec;
642         struct ether_hdr enic_mask;
643         const struct rte_flow_item_eth *spec = item->spec;
644         const struct rte_flow_item_eth *mask = item->mask;
645         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
646
647         FLOW_TRACE();
648
649         /* Match all if no spec */
650         if (!spec)
651                 return 0;
652
653         if (!mask)
654                 mask = &rte_flow_item_eth_mask;
655
656         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
657                ETHER_ADDR_LEN);
658         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
659                ETHER_ADDR_LEN);
660
661         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
662                ETHER_ADDR_LEN);
663         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
664                ETHER_ADDR_LEN);
665         enic_spec.ether_type = spec->type;
666         enic_mask.ether_type = mask->type;
667
668         /* outer header */
669         memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
670                sizeof(struct ether_hdr));
671         memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
672                sizeof(struct ether_hdr));
673         return 0;
674 }
675
676 static int
677 enic_copy_item_vlan_v2(struct copy_item_args *arg)
678 {
679         const struct rte_flow_item *item = arg->item;
680         struct filter_v2 *enic_filter = arg->filter;
681         const struct rte_flow_item_vlan *spec = item->spec;
682         const struct rte_flow_item_vlan *mask = item->mask;
683         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
684         struct ether_hdr *eth_mask;
685         struct ether_hdr *eth_val;
686
687         FLOW_TRACE();
688
689         /* Match all if no spec */
690         if (!spec)
691                 return 0;
692
693         if (!mask)
694                 mask = &rte_flow_item_vlan_mask;
695
696         eth_mask = (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
697         eth_val = (void *)gp->layer[FILTER_GENERIC_1_L2].val;
698         /* Outer TPID cannot be matched */
699         if (eth_mask->ether_type)
700                 return ENOTSUP;
701         /*
702          * For recent models:
703          * When packet matching, the VIC always compares vlan-stripped
704          * L2, regardless of vlan stripping settings. So, the inner type
705          * from vlan becomes the ether type of the eth header.
706          *
707          * Older models w/o hardware vxlan parser have a different
708          * behavior when vlan stripping is disabled. In this case,
709          * vlan tag remains in the L2 buffer.
710          */
711         if (!arg->enic->vxlan && !arg->enic->ig_vlan_strip_en) {
712                 struct vlan_hdr *vlan;
713
714                 vlan = (struct vlan_hdr *)(eth_mask + 1);
715                 vlan->eth_proto = mask->inner_type;
716                 vlan = (struct vlan_hdr *)(eth_val + 1);
717                 vlan->eth_proto = spec->inner_type;
718         } else {
719                 eth_mask->ether_type = mask->inner_type;
720                 eth_val->ether_type = spec->inner_type;
721         }
722         /* For TCI, use the vlan mask/val fields (little endian). */
723         gp->mask_vlan = rte_be_to_cpu_16(mask->tci);
724         gp->val_vlan = rte_be_to_cpu_16(spec->tci);
725         return 0;
726 }
727
728 static int
729 enic_copy_item_ipv4_v2(struct copy_item_args *arg)
730 {
731         const struct rte_flow_item *item = arg->item;
732         struct filter_v2 *enic_filter = arg->filter;
733         const struct rte_flow_item_ipv4 *spec = item->spec;
734         const struct rte_flow_item_ipv4 *mask = item->mask;
735         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
736
737         FLOW_TRACE();
738
739         /* Match IPv4 */
740         gp->mask_flags |= FILTER_GENERIC_1_IPV4;
741         gp->val_flags |= FILTER_GENERIC_1_IPV4;
742
743         /* Match all if no spec */
744         if (!spec)
745                 return 0;
746
747         if (!mask)
748                 mask = &rte_flow_item_ipv4_mask;
749
750         memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
751                sizeof(struct ipv4_hdr));
752         memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
753                sizeof(struct ipv4_hdr));
754         return 0;
755 }
756
757 static int
758 enic_copy_item_ipv6_v2(struct copy_item_args *arg)
759 {
760         const struct rte_flow_item *item = arg->item;
761         struct filter_v2 *enic_filter = arg->filter;
762         const struct rte_flow_item_ipv6 *spec = item->spec;
763         const struct rte_flow_item_ipv6 *mask = item->mask;
764         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
765
766         FLOW_TRACE();
767
768         /* Match IPv6 */
769         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
770         gp->val_flags |= FILTER_GENERIC_1_IPV6;
771
772         /* Match all if no spec */
773         if (!spec)
774                 return 0;
775
776         if (!mask)
777                 mask = &rte_flow_item_ipv6_mask;
778
779         memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
780                sizeof(struct ipv6_hdr));
781         memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
782                sizeof(struct ipv6_hdr));
783         return 0;
784 }
785
786 static int
787 enic_copy_item_udp_v2(struct copy_item_args *arg)
788 {
789         const struct rte_flow_item *item = arg->item;
790         struct filter_v2 *enic_filter = arg->filter;
791         const struct rte_flow_item_udp *spec = item->spec;
792         const struct rte_flow_item_udp *mask = item->mask;
793         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
794
795         FLOW_TRACE();
796
797         /* Match UDP */
798         gp->mask_flags |= FILTER_GENERIC_1_UDP;
799         gp->val_flags |= FILTER_GENERIC_1_UDP;
800
801         /* Match all if no spec */
802         if (!spec)
803                 return 0;
804
805         if (!mask)
806                 mask = &rte_flow_item_udp_mask;
807
808         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
809                sizeof(struct udp_hdr));
810         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
811                sizeof(struct udp_hdr));
812         return 0;
813 }
814
815 static int
816 enic_copy_item_tcp_v2(struct copy_item_args *arg)
817 {
818         const struct rte_flow_item *item = arg->item;
819         struct filter_v2 *enic_filter = arg->filter;
820         const struct rte_flow_item_tcp *spec = item->spec;
821         const struct rte_flow_item_tcp *mask = item->mask;
822         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
823
824         FLOW_TRACE();
825
826         /* Match TCP */
827         gp->mask_flags |= FILTER_GENERIC_1_TCP;
828         gp->val_flags |= FILTER_GENERIC_1_TCP;
829
830         /* Match all if no spec */
831         if (!spec)
832                 return 0;
833
834         if (!mask)
835                 return ENOTSUP;
836
837         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
838                sizeof(struct tcp_hdr));
839         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
840                sizeof(struct tcp_hdr));
841         return 0;
842 }
843
844 static int
845 enic_copy_item_sctp_v2(struct copy_item_args *arg)
846 {
847         const struct rte_flow_item *item = arg->item;
848         struct filter_v2 *enic_filter = arg->filter;
849         const struct rte_flow_item_sctp *spec = item->spec;
850         const struct rte_flow_item_sctp *mask = item->mask;
851         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
852         uint8_t *ip_proto_mask = NULL;
853         uint8_t *ip_proto = NULL;
854
855         FLOW_TRACE();
856
857         /*
858          * The NIC filter API has no flags for "match sctp", so explicitly set
859          * the protocol number in the IP pattern.
860          */
861         if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
862                 struct ipv4_hdr *ip;
863                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
864                 ip_proto_mask = &ip->next_proto_id;
865                 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
866                 ip_proto = &ip->next_proto_id;
867         } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
868                 struct ipv6_hdr *ip;
869                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
870                 ip_proto_mask = &ip->proto;
871                 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
872                 ip_proto = &ip->proto;
873         } else {
874                 /* Need IPv4/IPv6 pattern first */
875                 return EINVAL;
876         }
877         *ip_proto = IPPROTO_SCTP;
878         *ip_proto_mask = 0xff;
879
880         /* Match all if no spec */
881         if (!spec)
882                 return 0;
883
884         if (!mask)
885                 mask = &rte_flow_item_sctp_mask;
886
887         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
888                sizeof(struct sctp_hdr));
889         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
890                sizeof(struct sctp_hdr));
891         return 0;
892 }
893
894 static int
895 enic_copy_item_vxlan_v2(struct copy_item_args *arg)
896 {
897         const struct rte_flow_item *item = arg->item;
898         struct filter_v2 *enic_filter = arg->filter;
899         uint8_t *inner_ofst = arg->inner_ofst;
900         const struct rte_flow_item_vxlan *spec = item->spec;
901         const struct rte_flow_item_vxlan *mask = item->mask;
902         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
903         struct udp_hdr *udp;
904
905         FLOW_TRACE();
906
907         /*
908          * The NIC filter API has no flags for "match vxlan". Set UDP port to
909          * avoid false positives.
910          */
911         gp->mask_flags |= FILTER_GENERIC_1_UDP;
912         gp->val_flags |= FILTER_GENERIC_1_UDP;
913         udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
914         udp->dst_port = 0xffff;
915         udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
916         udp->dst_port = RTE_BE16(4789);
917         /* Match all if no spec */
918         if (!spec)
919                 return 0;
920
921         if (!mask)
922                 mask = &rte_flow_item_vxlan_mask;
923
924         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
925                sizeof(struct vxlan_hdr));
926         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
927                sizeof(struct vxlan_hdr));
928
929         *inner_ofst = sizeof(struct vxlan_hdr);
930         return 0;
931 }
932
933 /**
934  * Return 1 if current item is valid on top of the previous one.
935  *
936  * @param prev_item[in]
937  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
938  *   is the first item.
939  * @param item_info[in]
940  *   Info about this item, like valid previous items.
941  * @param is_first[in]
942  *   True if this the first item in the pattern.
943  */
944 static int
945 item_stacking_valid(enum rte_flow_item_type prev_item,
946                     const struct enic_items *item_info, u8 is_first_item)
947 {
948         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
949
950         FLOW_TRACE();
951
952         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
953                 if (prev_item == *allowed_items)
954                         return 1;
955         }
956
957         /* This is the first item in the stack. Check if that's cool */
958         if (is_first_item && item_info->valid_start_item)
959                 return 1;
960
961         return 0;
962 }
963
964 /*
965  * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5.
966  * Instead it is in L4 following the UDP header. Append the vxlan
967  * pattern to L4 (udp) and shift any inner packet pattern in L5.
968  */
969 static void
970 fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
971                uint8_t inner_ofst)
972 {
973         uint8_t layer[FILTER_GENERIC_1_KEY_LEN];
974         uint8_t inner;
975         uint8_t vxlan;
976
977         if (!(inner_ofst > 0 && enic->vxlan))
978                 return;
979         FLOW_TRACE();
980         vxlan = sizeof(struct vxlan_hdr);
981         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
982                gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
983         memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
984                gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
985         inner = inner_ofst - vxlan;
986         memset(layer, 0, sizeof(layer));
987         memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner);
988         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer));
989         memset(layer, 0, sizeof(layer));
990         memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner);
991         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer));
992 }
993
994 /**
995  * Build the intenal enic filter structure from the provided pattern. The
996  * pattern is validated as the items are copied.
997  *
998  * @param pattern[in]
999  * @param items_info[in]
1000  *   Info about this NICs item support, like valid previous items.
1001  * @param enic_filter[out]
1002  *   NIC specfilc filters derived from the pattern.
1003  * @param error[out]
1004  */
1005 static int
1006 enic_copy_filter(const struct rte_flow_item pattern[],
1007                  const struct enic_filter_cap *cap,
1008                  struct enic *enic,
1009                  struct filter_v2 *enic_filter,
1010                  struct rte_flow_error *error)
1011 {
1012         int ret;
1013         const struct rte_flow_item *item = pattern;
1014         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
1015         enum rte_flow_item_type prev_item;
1016         const struct enic_items *item_info;
1017         struct copy_item_args args;
1018         enic_copy_item_fn *copy_fn;
1019         u8 is_first_item = 1;
1020
1021         FLOW_TRACE();
1022
1023         prev_item = 0;
1024
1025         args.filter = enic_filter;
1026         args.inner_ofst = &inner_ofst;
1027         args.enic = enic;
1028         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1029                 /* Get info about how to validate and copy the item. If NULL
1030                  * is returned the nic does not support the item.
1031                  */
1032                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1033                         continue;
1034
1035                 item_info = &cap->item_info[item->type];
1036                 if (item->type > cap->max_item_type ||
1037                     item_info->copy_item == NULL ||
1038                     (inner_ofst > 0 && item_info->inner_copy_item == NULL)) {
1039                         rte_flow_error_set(error, ENOTSUP,
1040                                 RTE_FLOW_ERROR_TYPE_ITEM,
1041                                 NULL, "Unsupported item.");
1042                         return -rte_errno;
1043                 }
1044
1045                 /* check to see if item stacking is valid */
1046                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
1047                         goto stacking_error;
1048
1049                 args.item = item;
1050                 copy_fn = inner_ofst > 0 ? item_info->inner_copy_item :
1051                         item_info->copy_item;
1052                 ret = copy_fn(&args);
1053                 if (ret)
1054                         goto item_not_supported;
1055                 prev_item = item->type;
1056                 is_first_item = 0;
1057         }
1058         fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst);
1059
1060         return 0;
1061
1062 item_not_supported:
1063         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
1064                            NULL, "enic type error");
1065         return -rte_errno;
1066
1067 stacking_error:
1068         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1069                            item, "stacking error");
1070         return -rte_errno;
1071 }
1072
1073 /**
1074  * Build the intenal version 1 NIC action structure from the provided pattern.
1075  * The pattern is validated as the items are copied.
1076  *
1077  * @param actions[in]
1078  * @param enic_action[out]
1079  *   NIC specfilc actions derived from the actions.
1080  * @param error[out]
1081  */
1082 static int
1083 enic_copy_action_v1(const struct rte_flow_action actions[],
1084                     struct filter_action_v2 *enic_action)
1085 {
1086         enum { FATE = 1, };
1087         uint32_t overlap = 0;
1088
1089         FLOW_TRACE();
1090
1091         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1092                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1093                         continue;
1094
1095                 switch (actions->type) {
1096                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1097                         const struct rte_flow_action_queue *queue =
1098                                 (const struct rte_flow_action_queue *)
1099                                 actions->conf;
1100
1101                         if (overlap & FATE)
1102                                 return ENOTSUP;
1103                         overlap |= FATE;
1104                         enic_action->rq_idx =
1105                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1106                         break;
1107                 }
1108                 default:
1109                         RTE_ASSERT(0);
1110                         break;
1111                 }
1112         }
1113         if (!(overlap & FATE))
1114                 return ENOTSUP;
1115         enic_action->type = FILTER_ACTION_RQ_STEERING;
1116         return 0;
1117 }
1118
1119 /**
1120  * Build the intenal version 2 NIC action structure from the provided pattern.
1121  * The pattern is validated as the items are copied.
1122  *
1123  * @param actions[in]
1124  * @param enic_action[out]
1125  *   NIC specfilc actions derived from the actions.
1126  * @param error[out]
1127  */
1128 static int
1129 enic_copy_action_v2(const struct rte_flow_action actions[],
1130                     struct filter_action_v2 *enic_action)
1131 {
1132         enum { FATE = 1, MARK = 2, };
1133         uint32_t overlap = 0;
1134
1135         FLOW_TRACE();
1136
1137         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1138                 switch (actions->type) {
1139                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1140                         const struct rte_flow_action_queue *queue =
1141                                 (const struct rte_flow_action_queue *)
1142                                 actions->conf;
1143
1144                         if (overlap & FATE)
1145                                 return ENOTSUP;
1146                         overlap |= FATE;
1147                         enic_action->rq_idx =
1148                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1149                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1150                         break;
1151                 }
1152                 case RTE_FLOW_ACTION_TYPE_MARK: {
1153                         const struct rte_flow_action_mark *mark =
1154                                 (const struct rte_flow_action_mark *)
1155                                 actions->conf;
1156
1157                         if (overlap & MARK)
1158                                 return ENOTSUP;
1159                         overlap |= MARK;
1160                         /*
1161                          * Map mark ID (32-bit) to filter ID (16-bit):
1162                          * - Reject values > 16 bits
1163                          * - Filter ID 0 is reserved for filters that steer
1164                          *   but not mark. So add 1 to the mark ID to avoid
1165                          *   using 0.
1166                          * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1167                          *   reserved for the "flag" action below.
1168                          */
1169                         if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1170                                 return EINVAL;
1171                         enic_action->filter_id = mark->id + 1;
1172                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1173                         break;
1174                 }
1175                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1176                         if (overlap & MARK)
1177                                 return ENOTSUP;
1178                         overlap |= MARK;
1179                         /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1180                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1181                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1182                         break;
1183                 }
1184                 case RTE_FLOW_ACTION_TYPE_DROP: {
1185                         if (overlap & FATE)
1186                                 return ENOTSUP;
1187                         overlap |= FATE;
1188                         enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1189                         break;
1190                 }
1191                 case RTE_FLOW_ACTION_TYPE_COUNT: {
1192                         enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1193                         break;
1194                 }
1195                 case RTE_FLOW_ACTION_TYPE_VOID:
1196                         continue;
1197                 default:
1198                         RTE_ASSERT(0);
1199                         break;
1200                 }
1201         }
1202         if (!(overlap & FATE))
1203                 return ENOTSUP;
1204         enic_action->type = FILTER_ACTION_V2;
1205         return 0;
1206 }
1207
1208 /** Check if the action is supported */
1209 static int
1210 enic_match_action(const struct rte_flow_action *action,
1211                   const enum rte_flow_action_type *supported_actions)
1212 {
1213         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1214              supported_actions++) {
1215                 if (action->type == *supported_actions)
1216                         return 1;
1217         }
1218         return 0;
1219 }
1220
1221 /** Get the NIC filter capabilties structure */
1222 static const struct enic_filter_cap *
1223 enic_get_filter_cap(struct enic *enic)
1224 {
1225         if (enic->flow_filter_mode)
1226                 return &enic_filter_cap[enic->flow_filter_mode];
1227
1228         return NULL;
1229 }
1230
1231 /** Get the actions for this NIC version. */
1232 static const struct enic_action_cap *
1233 enic_get_action_cap(struct enic *enic)
1234 {
1235         const struct enic_action_cap *ea;
1236         uint8_t actions;
1237
1238         actions = enic->filter_actions;
1239         if (actions & FILTER_ACTION_COUNTER_FLAG)
1240                 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1241         else if (actions & FILTER_ACTION_DROP_FLAG)
1242                 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1243         else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1244                 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1245         else
1246                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1247         return ea;
1248 }
1249
1250 /* Debug function to dump internal NIC action structure. */
1251 static void
1252 enic_dump_actions(const struct filter_action_v2 *ea)
1253 {
1254         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1255                 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1256         } else if (ea->type == FILTER_ACTION_V2) {
1257                 FLOW_LOG(INFO, "Actions(V2)\n");
1258                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1259                         FLOW_LOG(INFO, "\tqueue: %u\n",
1260                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1261                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1262                         FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1263         }
1264 }
1265
1266 /* Debug function to dump internal NIC filter structure. */
1267 static void
1268 enic_dump_filter(const struct filter_v2 *filt)
1269 {
1270         const struct filter_generic_1 *gp;
1271         int i, j, mbyte;
1272         char buf[128], *bp;
1273         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1274         char l4csum[16], ipfrag[16];
1275
1276         switch (filt->type) {
1277         case FILTER_IPV4_5TUPLE:
1278                 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1279                 break;
1280         case FILTER_USNIC_IP:
1281         case FILTER_DPDK_1:
1282                 /* FIXME: this should be a loop */
1283                 gp = &filt->u.generic_1;
1284                 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1285                        gp->val_vlan, gp->mask_vlan);
1286
1287                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1288                         sprintf(ip4, "%s ",
1289                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1290                                  ? "ip4(y)" : "ip4(n)");
1291                 else
1292                         sprintf(ip4, "%s ", "ip4(x)");
1293
1294                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1295                         sprintf(ip6, "%s ",
1296                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1297                                  ? "ip6(y)" : "ip6(n)");
1298                 else
1299                         sprintf(ip6, "%s ", "ip6(x)");
1300
1301                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1302                         sprintf(udp, "%s ",
1303                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1304                                  ? "udp(y)" : "udp(n)");
1305                 else
1306                         sprintf(udp, "%s ", "udp(x)");
1307
1308                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1309                         sprintf(tcp, "%s ",
1310                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1311                                  ? "tcp(y)" : "tcp(n)");
1312                 else
1313                         sprintf(tcp, "%s ", "tcp(x)");
1314
1315                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1316                         sprintf(tcpudp, "%s ",
1317                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1318                                  ? "tcpudp(y)" : "tcpudp(n)");
1319                 else
1320                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1321
1322                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1323                         sprintf(ip4csum, "%s ",
1324                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1325                                  ? "ip4csum(y)" : "ip4csum(n)");
1326                 else
1327                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1328
1329                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1330                         sprintf(l4csum, "%s ",
1331                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1332                                  ? "l4csum(y)" : "l4csum(n)");
1333                 else
1334                         sprintf(l4csum, "%s ", "l4csum(x)");
1335
1336                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1337                         sprintf(ipfrag, "%s ",
1338                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1339                                  ? "ipfrag(y)" : "ipfrag(n)");
1340                 else
1341                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1342                 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1343                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1344
1345                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1346                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1347                         while (mbyte && !gp->layer[i].mask[mbyte])
1348                                 mbyte--;
1349                         if (mbyte == 0)
1350                                 continue;
1351
1352                         bp = buf;
1353                         for (j = 0; j <= mbyte; j++) {
1354                                 sprintf(bp, "%02x",
1355                                         gp->layer[i].mask[j]);
1356                                 bp += 2;
1357                         }
1358                         *bp = '\0';
1359                         FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1360                         bp = buf;
1361                         for (j = 0; j <= mbyte; j++) {
1362                                 sprintf(bp, "%02x",
1363                                         gp->layer[i].val[j]);
1364                                 bp += 2;
1365                         }
1366                         *bp = '\0';
1367                         FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1368                 }
1369                 break;
1370         default:
1371                 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1372                 break;
1373         }
1374 }
1375
1376 /* Debug function to dump internal NIC flow structures. */
1377 static void
1378 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1379 {
1380         enic_dump_filter(filt);
1381         enic_dump_actions(ea);
1382 }
1383
1384
1385 /**
1386  * Internal flow parse/validate function.
1387  *
1388  * @param dev[in]
1389  *   This device pointer.
1390  * @param pattern[in]
1391  * @param actions[in]
1392  * @param error[out]
1393  * @param enic_filter[out]
1394  *   Internal NIC filter structure pointer.
1395  * @param enic_action[out]
1396  *   Internal NIC action structure pointer.
1397  */
1398 static int
1399 enic_flow_parse(struct rte_eth_dev *dev,
1400                 const struct rte_flow_attr *attrs,
1401                 const struct rte_flow_item pattern[],
1402                 const struct rte_flow_action actions[],
1403                 struct rte_flow_error *error,
1404                 struct filter_v2 *enic_filter,
1405                 struct filter_action_v2 *enic_action)
1406 {
1407         unsigned int ret = 0;
1408         struct enic *enic = pmd_priv(dev);
1409         const struct enic_filter_cap *enic_filter_cap;
1410         const struct enic_action_cap *enic_action_cap;
1411         const struct rte_flow_action *action;
1412
1413         FLOW_TRACE();
1414
1415         memset(enic_filter, 0, sizeof(*enic_filter));
1416         memset(enic_action, 0, sizeof(*enic_action));
1417
1418         if (!pattern) {
1419                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1420                                    NULL, "No pattern specified");
1421                 return -rte_errno;
1422         }
1423
1424         if (!actions) {
1425                 rte_flow_error_set(error, EINVAL,
1426                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1427                                    NULL, "No action specified");
1428                 return -rte_errno;
1429         }
1430
1431         if (attrs) {
1432                 if (attrs->group) {
1433                         rte_flow_error_set(error, ENOTSUP,
1434                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1435                                            NULL,
1436                                            "priority groups are not supported");
1437                         return -rte_errno;
1438                 } else if (attrs->priority) {
1439                         rte_flow_error_set(error, ENOTSUP,
1440                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1441                                            NULL,
1442                                            "priorities are not supported");
1443                         return -rte_errno;
1444                 } else if (attrs->egress) {
1445                         rte_flow_error_set(error, ENOTSUP,
1446                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1447                                            NULL,
1448                                            "egress is not supported");
1449                         return -rte_errno;
1450                 } else if (attrs->transfer) {
1451                         rte_flow_error_set(error, ENOTSUP,
1452                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1453                                            NULL,
1454                                            "transfer is not supported");
1455                         return -rte_errno;
1456                 } else if (!attrs->ingress) {
1457                         rte_flow_error_set(error, ENOTSUP,
1458                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1459                                            NULL,
1460                                            "only ingress is supported");
1461                         return -rte_errno;
1462                 }
1463
1464         } else {
1465                 rte_flow_error_set(error, EINVAL,
1466                                    RTE_FLOW_ERROR_TYPE_ATTR,
1467                                    NULL, "No attribute specified");
1468                 return -rte_errno;
1469         }
1470
1471         /* Verify Actions. */
1472         enic_action_cap =  enic_get_action_cap(enic);
1473         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1474              action++) {
1475                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1476                         continue;
1477                 else if (!enic_match_action(action, enic_action_cap->actions))
1478                         break;
1479         }
1480         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1481                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1482                                    action, "Invalid action.");
1483                 return -rte_errno;
1484         }
1485         ret = enic_action_cap->copy_fn(actions, enic_action);
1486         if (ret) {
1487                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1488                            NULL, "Unsupported action.");
1489                 return -rte_errno;
1490         }
1491
1492         /* Verify Flow items. If copying the filter from flow format to enic
1493          * format fails, the flow is not supported
1494          */
1495         enic_filter_cap =  enic_get_filter_cap(enic);
1496         if (enic_filter_cap == NULL) {
1497                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1498                            NULL, "Flow API not available");
1499                 return -rte_errno;
1500         }
1501         enic_filter->type = enic->flow_filter_mode;
1502         ret = enic_copy_filter(pattern, enic_filter_cap, enic,
1503                                        enic_filter, error);
1504         return ret;
1505 }
1506
1507 /**
1508  * Push filter/action to the NIC.
1509  *
1510  * @param enic[in]
1511  *   Device structure pointer.
1512  * @param enic_filter[in]
1513  *   Internal NIC filter structure pointer.
1514  * @param enic_action[in]
1515  *   Internal NIC action structure pointer.
1516  * @param error[out]
1517  */
1518 static struct rte_flow *
1519 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1520                    struct filter_action_v2 *enic_action,
1521                    struct rte_flow_error *error)
1522 {
1523         struct rte_flow *flow;
1524         int err;
1525         uint16_t entry;
1526         int ctr_idx;
1527         int last_max_flow_ctr;
1528
1529         FLOW_TRACE();
1530
1531         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1532         if (!flow) {
1533                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1534                                    NULL, "cannot allocate flow memory");
1535                 return NULL;
1536         }
1537
1538         flow->counter_idx = -1;
1539         last_max_flow_ctr = -1;
1540         if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1541                 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1542                         rte_flow_error_set(error, ENOMEM,
1543                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1544                                            NULL, "cannot allocate counter");
1545                         goto unwind_flow_alloc;
1546                 }
1547                 flow->counter_idx = ctr_idx;
1548                 enic_action->counter_index = ctr_idx;
1549
1550                 /* If index is the largest, increase the counter DMA size */
1551                 if (ctr_idx > enic->max_flow_counter) {
1552                         err = vnic_dev_counter_dma_cfg(enic->vdev,
1553                                                  VNIC_FLOW_COUNTER_UPDATE_MSECS,
1554                                                  ctr_idx + 1);
1555                         if (err) {
1556                                 rte_flow_error_set(error, -err,
1557                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1558                                            NULL, "counter DMA config failed");
1559                                 goto unwind_ctr_alloc;
1560                         }
1561                         last_max_flow_ctr = enic->max_flow_counter;
1562                         enic->max_flow_counter = ctr_idx;
1563                 }
1564         }
1565
1566         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1567         entry = enic_action->rq_idx;
1568         err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1569                                   enic_action);
1570         if (err) {
1571                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1572                                    NULL, "vnic_dev_classifier error");
1573                 goto unwind_ctr_dma_cfg;
1574         }
1575
1576         flow->enic_filter_id = entry;
1577         flow->enic_filter = *enic_filter;
1578
1579         return flow;
1580
1581 /* unwind if there are errors */
1582 unwind_ctr_dma_cfg:
1583         if (last_max_flow_ctr != -1) {
1584                 /* reduce counter DMA size */
1585                 vnic_dev_counter_dma_cfg(enic->vdev,
1586                                          VNIC_FLOW_COUNTER_UPDATE_MSECS,
1587                                          last_max_flow_ctr + 1);
1588                 enic->max_flow_counter = last_max_flow_ctr;
1589         }
1590 unwind_ctr_alloc:
1591         if (flow->counter_idx != -1)
1592                 vnic_dev_counter_free(enic->vdev, ctr_idx);
1593 unwind_flow_alloc:
1594         rte_free(flow);
1595         return NULL;
1596 }
1597
1598 /**
1599  * Remove filter/action from the NIC.
1600  *
1601  * @param enic[in]
1602  *   Device structure pointer.
1603  * @param filter_id[in]
1604  *   Id of NIC filter.
1605  * @param enic_action[in]
1606  *   Internal NIC action structure pointer.
1607  * @param error[out]
1608  */
1609 static int
1610 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1611                    struct rte_flow_error *error)
1612 {
1613         u16 filter_id;
1614         int err;
1615
1616         FLOW_TRACE();
1617
1618         filter_id = flow->enic_filter_id;
1619         err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1620         if (err) {
1621                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1622                                    NULL, "vnic_dev_classifier failed");
1623                 return -err;
1624         }
1625
1626         if (flow->counter_idx != -1) {
1627                 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1628                         dev_err(enic, "counter free failed, idx: %d\n",
1629                                 flow->counter_idx);
1630                 flow->counter_idx = -1;
1631         }
1632         return 0;
1633 }
1634
1635 /*
1636  * The following functions are callbacks for Generic flow API.
1637  */
1638
1639 /**
1640  * Validate a flow supported by the NIC.
1641  *
1642  * @see rte_flow_validate()
1643  * @see rte_flow_ops
1644  */
1645 static int
1646 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1647                    const struct rte_flow_item pattern[],
1648                    const struct rte_flow_action actions[],
1649                    struct rte_flow_error *error)
1650 {
1651         struct filter_v2 enic_filter;
1652         struct filter_action_v2 enic_action;
1653         int ret;
1654
1655         FLOW_TRACE();
1656
1657         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1658                                &enic_filter, &enic_action);
1659         if (!ret)
1660                 enic_dump_flow(&enic_action, &enic_filter);
1661         return ret;
1662 }
1663
1664 /**
1665  * Create a flow supported by the NIC.
1666  *
1667  * @see rte_flow_create()
1668  * @see rte_flow_ops
1669  */
1670 static struct rte_flow *
1671 enic_flow_create(struct rte_eth_dev *dev,
1672                  const struct rte_flow_attr *attrs,
1673                  const struct rte_flow_item pattern[],
1674                  const struct rte_flow_action actions[],
1675                  struct rte_flow_error *error)
1676 {
1677         int ret;
1678         struct filter_v2 enic_filter;
1679         struct filter_action_v2 enic_action;
1680         struct rte_flow *flow;
1681         struct enic *enic = pmd_priv(dev);
1682
1683         FLOW_TRACE();
1684
1685         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1686                               &enic_action);
1687         if (ret < 0)
1688                 return NULL;
1689
1690         rte_spinlock_lock(&enic->flows_lock);
1691         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1692                                     error);
1693         if (flow)
1694                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1695         rte_spinlock_unlock(&enic->flows_lock);
1696
1697         return flow;
1698 }
1699
1700 /**
1701  * Destroy a flow supported by the NIC.
1702  *
1703  * @see rte_flow_destroy()
1704  * @see rte_flow_ops
1705  */
1706 static int
1707 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1708                   __rte_unused struct rte_flow_error *error)
1709 {
1710         struct enic *enic = pmd_priv(dev);
1711
1712         FLOW_TRACE();
1713
1714         rte_spinlock_lock(&enic->flows_lock);
1715         enic_flow_del_filter(enic, flow, error);
1716         LIST_REMOVE(flow, next);
1717         rte_spinlock_unlock(&enic->flows_lock);
1718         rte_free(flow);
1719         return 0;
1720 }
1721
1722 /**
1723  * Flush all flows on the device.
1724  *
1725  * @see rte_flow_flush()
1726  * @see rte_flow_ops
1727  */
1728 static int
1729 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1730 {
1731         struct rte_flow *flow;
1732         struct enic *enic = pmd_priv(dev);
1733
1734         FLOW_TRACE();
1735
1736         rte_spinlock_lock(&enic->flows_lock);
1737
1738         while (!LIST_EMPTY(&enic->flows)) {
1739                 flow = LIST_FIRST(&enic->flows);
1740                 enic_flow_del_filter(enic, flow, error);
1741                 LIST_REMOVE(flow, next);
1742                 rte_free(flow);
1743         }
1744         rte_spinlock_unlock(&enic->flows_lock);
1745         return 0;
1746 }
1747
1748 static int
1749 enic_flow_query_count(struct rte_eth_dev *dev,
1750                       struct rte_flow *flow, void *data,
1751                       struct rte_flow_error *error)
1752 {
1753         struct enic *enic = pmd_priv(dev);
1754         struct rte_flow_query_count *query;
1755         uint64_t packets, bytes;
1756
1757         FLOW_TRACE();
1758
1759         if (flow->counter_idx == -1) {
1760                 return rte_flow_error_set(error, ENOTSUP,
1761                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1762                                           NULL,
1763                                           "flow does not have counter");
1764         }
1765         query = (struct rte_flow_query_count *)data;
1766         if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1767                                     !!query->reset, &packets, &bytes)) {
1768                 return rte_flow_error_set
1769                         (error, EINVAL,
1770                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1771                          NULL,
1772                          "cannot read counter");
1773         }
1774         query->hits_set = 1;
1775         query->bytes_set = 1;
1776         query->hits = packets;
1777         query->bytes = bytes;
1778         return 0;
1779 }
1780
1781 static int
1782 enic_flow_query(struct rte_eth_dev *dev,
1783                 struct rte_flow *flow,
1784                 const struct rte_flow_action *actions,
1785                 void *data,
1786                 struct rte_flow_error *error)
1787 {
1788         int ret = 0;
1789
1790         FLOW_TRACE();
1791
1792         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1793                 switch (actions->type) {
1794                 case RTE_FLOW_ACTION_TYPE_VOID:
1795                         break;
1796                 case RTE_FLOW_ACTION_TYPE_COUNT:
1797                         ret = enic_flow_query_count(dev, flow, data, error);
1798                         break;
1799                 default:
1800                         return rte_flow_error_set(error, ENOTSUP,
1801                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1802                                                   actions,
1803                                                   "action not supported");
1804                 }
1805                 if (ret < 0)
1806                         return ret;
1807         }
1808         return 0;
1809 }
1810
1811 /**
1812  * Flow callback registration.
1813  *
1814  * @see rte_flow_ops
1815  */
1816 const struct rte_flow_ops enic_flow_ops = {
1817         .validate = enic_flow_validate,
1818         .create = enic_flow_create,
1819         .destroy = enic_flow_destroy,
1820         .flush = enic_flow_flush,
1821         .query = enic_flow_query,
1822 };