1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2008-2017 Cisco Systems, Inc. All rights reserved.
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
14 #include "enic_compat.h"
19 #define FLOW_TRACE() \
20 rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
22 #define FLOW_LOG(level, fmt, args...) \
23 rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
27 * Common arguments passed to copy_item functions. Use this structure
28 * so we can easily add new arguments.
29 * item: Item specification.
30 * filter: Partially filled in NIC filter structure.
31 * inner_ofst: If zero, this is an outer header. If non-zero, this is
32 * the offset into L5 where the header begins.
33 * l2_proto_off: offset to EtherType eth or vlan header.
34 * l3_proto_off: offset to next protocol field in IPv4 or 6 header.
36 struct copy_item_args {
37 const struct rte_flow_item *item;
38 struct filter_v2 *filter;
45 /* functions for copying items into enic filters */
46 typedef int (enic_copy_item_fn)(struct copy_item_args *arg);
48 /** Info about how to copy items into enic filters. */
50 /** Function for copying and validating an item. */
51 enic_copy_item_fn *copy_item;
52 /** List of valid previous items. */
53 const enum rte_flow_item_type * const prev_items;
54 /** True if it's OK for this item to be the first item. For some NIC
55 * versions, it's invalid to start the stack above layer 3.
57 const u8 valid_start_item;
58 /* Inner packet version of copy_item. */
59 enic_copy_item_fn *inner_copy_item;
62 /** Filtering capabilities for various NIC and firmware versions. */
63 struct enic_filter_cap {
64 /** list of valid items and their handlers and attributes. */
65 const struct enic_items *item_info;
66 /* Max type in the above list, used to detect unsupported types */
67 enum rte_flow_item_type max_item_type;
70 /* functions for copying flow actions into enic actions */
71 typedef int (copy_action_fn)(const struct rte_flow_action actions[],
72 struct filter_action_v2 *enic_action);
74 /** Action capabilities for various NICs. */
75 struct enic_action_cap {
76 /** list of valid actions */
77 const enum rte_flow_action_type *actions;
78 /** copy function for a particular NIC */
79 int (*copy_fn)(const struct rte_flow_action actions[],
80 struct filter_action_v2 *enic_action);
83 /* Forward declarations */
84 static enic_copy_item_fn enic_copy_item_ipv4_v1;
85 static enic_copy_item_fn enic_copy_item_udp_v1;
86 static enic_copy_item_fn enic_copy_item_tcp_v1;
87 static enic_copy_item_fn enic_copy_item_eth_v2;
88 static enic_copy_item_fn enic_copy_item_vlan_v2;
89 static enic_copy_item_fn enic_copy_item_ipv4_v2;
90 static enic_copy_item_fn enic_copy_item_ipv6_v2;
91 static enic_copy_item_fn enic_copy_item_udp_v2;
92 static enic_copy_item_fn enic_copy_item_tcp_v2;
93 static enic_copy_item_fn enic_copy_item_sctp_v2;
94 static enic_copy_item_fn enic_copy_item_vxlan_v2;
95 static enic_copy_item_fn enic_copy_item_inner_eth_v2;
96 static enic_copy_item_fn enic_copy_item_inner_vlan_v2;
97 static enic_copy_item_fn enic_copy_item_inner_ipv4_v2;
98 static enic_copy_item_fn enic_copy_item_inner_ipv6_v2;
99 static enic_copy_item_fn enic_copy_item_inner_udp_v2;
100 static enic_copy_item_fn enic_copy_item_inner_tcp_v2;
101 static copy_action_fn enic_copy_action_v1;
102 static copy_action_fn enic_copy_action_v2;
105 * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
108 static const struct enic_items enic_items_v1[] = {
109 [RTE_FLOW_ITEM_TYPE_IPV4] = {
110 .copy_item = enic_copy_item_ipv4_v1,
111 .valid_start_item = 1,
112 .prev_items = (const enum rte_flow_item_type[]) {
113 RTE_FLOW_ITEM_TYPE_END,
115 .inner_copy_item = NULL,
117 [RTE_FLOW_ITEM_TYPE_UDP] = {
118 .copy_item = enic_copy_item_udp_v1,
119 .valid_start_item = 0,
120 .prev_items = (const enum rte_flow_item_type[]) {
121 RTE_FLOW_ITEM_TYPE_IPV4,
122 RTE_FLOW_ITEM_TYPE_END,
124 .inner_copy_item = NULL,
126 [RTE_FLOW_ITEM_TYPE_TCP] = {
127 .copy_item = enic_copy_item_tcp_v1,
128 .valid_start_item = 0,
129 .prev_items = (const enum rte_flow_item_type[]) {
130 RTE_FLOW_ITEM_TYPE_IPV4,
131 RTE_FLOW_ITEM_TYPE_END,
133 .inner_copy_item = NULL,
138 * NICs have Advanced Filters capability but they are disabled. This means
139 * that layer 3 must be specified.
141 static const struct enic_items enic_items_v2[] = {
142 [RTE_FLOW_ITEM_TYPE_ETH] = {
143 .copy_item = enic_copy_item_eth_v2,
144 .valid_start_item = 1,
145 .prev_items = (const enum rte_flow_item_type[]) {
146 RTE_FLOW_ITEM_TYPE_VXLAN,
147 RTE_FLOW_ITEM_TYPE_END,
149 .inner_copy_item = enic_copy_item_inner_eth_v2,
151 [RTE_FLOW_ITEM_TYPE_VLAN] = {
152 .copy_item = enic_copy_item_vlan_v2,
153 .valid_start_item = 1,
154 .prev_items = (const enum rte_flow_item_type[]) {
155 RTE_FLOW_ITEM_TYPE_ETH,
156 RTE_FLOW_ITEM_TYPE_END,
158 .inner_copy_item = enic_copy_item_inner_vlan_v2,
160 [RTE_FLOW_ITEM_TYPE_IPV4] = {
161 .copy_item = enic_copy_item_ipv4_v2,
162 .valid_start_item = 1,
163 .prev_items = (const enum rte_flow_item_type[]) {
164 RTE_FLOW_ITEM_TYPE_ETH,
165 RTE_FLOW_ITEM_TYPE_VLAN,
166 RTE_FLOW_ITEM_TYPE_END,
168 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
170 [RTE_FLOW_ITEM_TYPE_IPV6] = {
171 .copy_item = enic_copy_item_ipv6_v2,
172 .valid_start_item = 1,
173 .prev_items = (const enum rte_flow_item_type[]) {
174 RTE_FLOW_ITEM_TYPE_ETH,
175 RTE_FLOW_ITEM_TYPE_VLAN,
176 RTE_FLOW_ITEM_TYPE_END,
178 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
180 [RTE_FLOW_ITEM_TYPE_UDP] = {
181 .copy_item = enic_copy_item_udp_v2,
182 .valid_start_item = 0,
183 .prev_items = (const enum rte_flow_item_type[]) {
184 RTE_FLOW_ITEM_TYPE_IPV4,
185 RTE_FLOW_ITEM_TYPE_IPV6,
186 RTE_FLOW_ITEM_TYPE_END,
188 .inner_copy_item = enic_copy_item_inner_udp_v2,
190 [RTE_FLOW_ITEM_TYPE_TCP] = {
191 .copy_item = enic_copy_item_tcp_v2,
192 .valid_start_item = 0,
193 .prev_items = (const enum rte_flow_item_type[]) {
194 RTE_FLOW_ITEM_TYPE_IPV4,
195 RTE_FLOW_ITEM_TYPE_IPV6,
196 RTE_FLOW_ITEM_TYPE_END,
198 .inner_copy_item = enic_copy_item_inner_tcp_v2,
200 [RTE_FLOW_ITEM_TYPE_SCTP] = {
201 .copy_item = enic_copy_item_sctp_v2,
202 .valid_start_item = 0,
203 .prev_items = (const enum rte_flow_item_type[]) {
204 RTE_FLOW_ITEM_TYPE_IPV4,
205 RTE_FLOW_ITEM_TYPE_IPV6,
206 RTE_FLOW_ITEM_TYPE_END,
208 .inner_copy_item = NULL,
210 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
211 .copy_item = enic_copy_item_vxlan_v2,
212 .valid_start_item = 0,
213 .prev_items = (const enum rte_flow_item_type[]) {
214 RTE_FLOW_ITEM_TYPE_UDP,
215 RTE_FLOW_ITEM_TYPE_END,
217 .inner_copy_item = NULL,
221 /** NICs with Advanced filters enabled */
222 static const struct enic_items enic_items_v3[] = {
223 [RTE_FLOW_ITEM_TYPE_ETH] = {
224 .copy_item = enic_copy_item_eth_v2,
225 .valid_start_item = 1,
226 .prev_items = (const enum rte_flow_item_type[]) {
227 RTE_FLOW_ITEM_TYPE_VXLAN,
228 RTE_FLOW_ITEM_TYPE_END,
230 .inner_copy_item = enic_copy_item_inner_eth_v2,
232 [RTE_FLOW_ITEM_TYPE_VLAN] = {
233 .copy_item = enic_copy_item_vlan_v2,
234 .valid_start_item = 1,
235 .prev_items = (const enum rte_flow_item_type[]) {
236 RTE_FLOW_ITEM_TYPE_ETH,
237 RTE_FLOW_ITEM_TYPE_END,
239 .inner_copy_item = enic_copy_item_inner_vlan_v2,
241 [RTE_FLOW_ITEM_TYPE_IPV4] = {
242 .copy_item = enic_copy_item_ipv4_v2,
243 .valid_start_item = 1,
244 .prev_items = (const enum rte_flow_item_type[]) {
245 RTE_FLOW_ITEM_TYPE_ETH,
246 RTE_FLOW_ITEM_TYPE_VLAN,
247 RTE_FLOW_ITEM_TYPE_END,
249 .inner_copy_item = enic_copy_item_inner_ipv4_v2,
251 [RTE_FLOW_ITEM_TYPE_IPV6] = {
252 .copy_item = enic_copy_item_ipv6_v2,
253 .valid_start_item = 1,
254 .prev_items = (const enum rte_flow_item_type[]) {
255 RTE_FLOW_ITEM_TYPE_ETH,
256 RTE_FLOW_ITEM_TYPE_VLAN,
257 RTE_FLOW_ITEM_TYPE_END,
259 .inner_copy_item = enic_copy_item_inner_ipv6_v2,
261 [RTE_FLOW_ITEM_TYPE_UDP] = {
262 .copy_item = enic_copy_item_udp_v2,
263 .valid_start_item = 1,
264 .prev_items = (const enum rte_flow_item_type[]) {
265 RTE_FLOW_ITEM_TYPE_IPV4,
266 RTE_FLOW_ITEM_TYPE_IPV6,
267 RTE_FLOW_ITEM_TYPE_END,
269 .inner_copy_item = enic_copy_item_inner_udp_v2,
271 [RTE_FLOW_ITEM_TYPE_TCP] = {
272 .copy_item = enic_copy_item_tcp_v2,
273 .valid_start_item = 1,
274 .prev_items = (const enum rte_flow_item_type[]) {
275 RTE_FLOW_ITEM_TYPE_IPV4,
276 RTE_FLOW_ITEM_TYPE_IPV6,
277 RTE_FLOW_ITEM_TYPE_END,
279 .inner_copy_item = enic_copy_item_inner_tcp_v2,
281 [RTE_FLOW_ITEM_TYPE_SCTP] = {
282 .copy_item = enic_copy_item_sctp_v2,
283 .valid_start_item = 0,
284 .prev_items = (const enum rte_flow_item_type[]) {
285 RTE_FLOW_ITEM_TYPE_IPV4,
286 RTE_FLOW_ITEM_TYPE_IPV6,
287 RTE_FLOW_ITEM_TYPE_END,
289 .inner_copy_item = NULL,
291 [RTE_FLOW_ITEM_TYPE_VXLAN] = {
292 .copy_item = enic_copy_item_vxlan_v2,
293 .valid_start_item = 1,
294 .prev_items = (const enum rte_flow_item_type[]) {
295 RTE_FLOW_ITEM_TYPE_UDP,
296 RTE_FLOW_ITEM_TYPE_END,
298 .inner_copy_item = NULL,
302 /** Filtering capabilities indexed this NICs supported filter type. */
303 static const struct enic_filter_cap enic_filter_cap[] = {
304 [FILTER_IPV4_5TUPLE] = {
305 .item_info = enic_items_v1,
306 .max_item_type = RTE_FLOW_ITEM_TYPE_TCP,
308 [FILTER_USNIC_IP] = {
309 .item_info = enic_items_v2,
310 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
313 .item_info = enic_items_v3,
314 .max_item_type = RTE_FLOW_ITEM_TYPE_VXLAN,
318 /** Supported actions for older NICs */
319 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
320 RTE_FLOW_ACTION_TYPE_QUEUE,
321 RTE_FLOW_ACTION_TYPE_END,
324 /** Supported actions for newer NICs */
325 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
326 RTE_FLOW_ACTION_TYPE_QUEUE,
327 RTE_FLOW_ACTION_TYPE_MARK,
328 RTE_FLOW_ACTION_TYPE_FLAG,
329 RTE_FLOW_ACTION_TYPE_END,
332 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
333 RTE_FLOW_ACTION_TYPE_QUEUE,
334 RTE_FLOW_ACTION_TYPE_MARK,
335 RTE_FLOW_ACTION_TYPE_FLAG,
336 RTE_FLOW_ACTION_TYPE_DROP,
337 RTE_FLOW_ACTION_TYPE_END,
340 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
341 RTE_FLOW_ACTION_TYPE_QUEUE,
342 RTE_FLOW_ACTION_TYPE_MARK,
343 RTE_FLOW_ACTION_TYPE_FLAG,
344 RTE_FLOW_ACTION_TYPE_DROP,
345 RTE_FLOW_ACTION_TYPE_COUNT,
346 RTE_FLOW_ACTION_TYPE_END,
349 /** Action capabilities indexed by NIC version information */
350 static const struct enic_action_cap enic_action_cap[] = {
351 [FILTER_ACTION_RQ_STEERING_FLAG] = {
352 .actions = enic_supported_actions_v1,
353 .copy_fn = enic_copy_action_v1,
355 [FILTER_ACTION_FILTER_ID_FLAG] = {
356 .actions = enic_supported_actions_v2_id,
357 .copy_fn = enic_copy_action_v2,
359 [FILTER_ACTION_DROP_FLAG] = {
360 .actions = enic_supported_actions_v2_drop,
361 .copy_fn = enic_copy_action_v2,
363 [FILTER_ACTION_COUNTER_FLAG] = {
364 .actions = enic_supported_actions_v2_count,
365 .copy_fn = enic_copy_action_v2,
370 mask_exact_match(const u8 *supported, const u8 *supplied,
374 for (i = 0; i < size; i++) {
375 if (supported[i] != supplied[i])
382 enic_copy_item_ipv4_v1(struct copy_item_args *arg)
384 const struct rte_flow_item *item = arg->item;
385 struct filter_v2 *enic_filter = arg->filter;
386 const struct rte_flow_item_ipv4 *spec = item->spec;
387 const struct rte_flow_item_ipv4 *mask = item->mask;
388 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
389 struct ipv4_hdr supported_mask = {
390 .src_addr = 0xffffffff,
391 .dst_addr = 0xffffffff,
397 mask = &rte_flow_item_ipv4_mask;
399 /* This is an exact match filter, both fields must be set */
400 if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
401 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
405 /* check that the suppied mask exactly matches capabilty */
406 if (!mask_exact_match((const u8 *)&supported_mask,
407 (const u8 *)item->mask, sizeof(*mask))) {
408 FLOW_LOG(ERR, "IPv4 exact match mask");
412 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
413 enic_5tup->src_addr = spec->hdr.src_addr;
414 enic_5tup->dst_addr = spec->hdr.dst_addr;
420 enic_copy_item_udp_v1(struct copy_item_args *arg)
422 const struct rte_flow_item *item = arg->item;
423 struct filter_v2 *enic_filter = arg->filter;
424 const struct rte_flow_item_udp *spec = item->spec;
425 const struct rte_flow_item_udp *mask = item->mask;
426 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
427 struct udp_hdr supported_mask = {
435 mask = &rte_flow_item_udp_mask;
437 /* This is an exact match filter, both ports must be set */
438 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
439 FLOW_LOG(ERR, "UDP exact match src/dst addr");
443 /* check that the suppied mask exactly matches capabilty */
444 if (!mask_exact_match((const u8 *)&supported_mask,
445 (const u8 *)item->mask, sizeof(*mask))) {
446 FLOW_LOG(ERR, "UDP exact match mask");
450 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
451 enic_5tup->src_port = spec->hdr.src_port;
452 enic_5tup->dst_port = spec->hdr.dst_port;
453 enic_5tup->protocol = PROTO_UDP;
459 enic_copy_item_tcp_v1(struct copy_item_args *arg)
461 const struct rte_flow_item *item = arg->item;
462 struct filter_v2 *enic_filter = arg->filter;
463 const struct rte_flow_item_tcp *spec = item->spec;
464 const struct rte_flow_item_tcp *mask = item->mask;
465 struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
466 struct tcp_hdr supported_mask = {
474 mask = &rte_flow_item_tcp_mask;
476 /* This is an exact match filter, both ports must be set */
477 if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
478 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
482 /* check that the suppied mask exactly matches capabilty */
483 if (!mask_exact_match((const u8 *)&supported_mask,
484 (const u8 *)item->mask, sizeof(*mask))) {
485 FLOW_LOG(ERR, "TCP exact match mask");
489 enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
490 enic_5tup->src_port = spec->hdr.src_port;
491 enic_5tup->dst_port = spec->hdr.dst_port;
492 enic_5tup->protocol = PROTO_TCP;
498 * The common 'copy' function for all inner packet patterns. Patterns are
499 * first appended to the L5 pattern buffer. Then, since the NIC filter
500 * API has no special support for inner packet matching at the moment,
501 * we set EtherType and IP proto as necessary.
504 copy_inner_common(struct filter_generic_1 *gp, uint8_t *inner_ofst,
505 const void *val, const void *mask, uint8_t val_size,
506 uint8_t proto_off, uint16_t proto_val, uint8_t proto_size)
508 uint8_t *l5_mask, *l5_val;
511 /* No space left in the L5 pattern buffer. */
512 start_off = *inner_ofst;
513 if ((start_off + val_size) > FILTER_GENERIC_1_KEY_LEN)
515 l5_mask = gp->layer[FILTER_GENERIC_1_L5].mask;
516 l5_val = gp->layer[FILTER_GENERIC_1_L5].val;
517 /* Copy the pattern into the L5 buffer. */
519 memcpy(l5_mask + start_off, mask, val_size);
520 memcpy(l5_val + start_off, val, val_size);
522 /* Set the protocol field in the previous header. */
526 m = l5_mask + proto_off;
527 v = l5_val + proto_off;
528 if (proto_size == 1) {
529 *(uint8_t *)m = 0xff;
530 *(uint8_t *)v = (uint8_t)proto_val;
531 } else if (proto_size == 2) {
532 *(uint16_t *)m = 0xffff;
533 *(uint16_t *)v = proto_val;
536 /* All inner headers land in L5 buffer even if their spec is null. */
537 *inner_ofst += val_size;
542 enic_copy_item_inner_eth_v2(struct copy_item_args *arg)
544 const void *mask = arg->item->mask;
545 uint8_t *off = arg->inner_ofst;
549 mask = &rte_flow_item_eth_mask;
550 arg->l2_proto_off = *off + offsetof(struct ether_hdr, ether_type);
551 return copy_inner_common(&arg->filter->u.generic_1, off,
552 arg->item->spec, mask, sizeof(struct ether_hdr),
553 0 /* no previous protocol */, 0, 0);
557 enic_copy_item_inner_vlan_v2(struct copy_item_args *arg)
559 const void *mask = arg->item->mask;
560 uint8_t *off = arg->inner_ofst;
561 uint8_t eth_type_off;
565 mask = &rte_flow_item_vlan_mask;
566 /* Append vlan header to L5 and set ether type = TPID */
567 eth_type_off = arg->l2_proto_off;
568 arg->l2_proto_off = *off + offsetof(struct vlan_hdr, eth_proto);
569 return copy_inner_common(&arg->filter->u.generic_1, off,
570 arg->item->spec, mask, sizeof(struct vlan_hdr),
571 eth_type_off, rte_cpu_to_be_16(ETHER_TYPE_VLAN), 2);
575 enic_copy_item_inner_ipv4_v2(struct copy_item_args *arg)
577 const void *mask = arg->item->mask;
578 uint8_t *off = arg->inner_ofst;
582 mask = &rte_flow_item_ipv4_mask;
583 /* Append ipv4 header to L5 and set ether type = ipv4 */
584 arg->l3_proto_off = *off + offsetof(struct ipv4_hdr, next_proto_id);
585 return copy_inner_common(&arg->filter->u.generic_1, off,
586 arg->item->spec, mask, sizeof(struct ipv4_hdr),
587 arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv4), 2);
591 enic_copy_item_inner_ipv6_v2(struct copy_item_args *arg)
593 const void *mask = arg->item->mask;
594 uint8_t *off = arg->inner_ofst;
598 mask = &rte_flow_item_ipv6_mask;
599 /* Append ipv6 header to L5 and set ether type = ipv6 */
600 arg->l3_proto_off = *off + offsetof(struct ipv6_hdr, proto);
601 return copy_inner_common(&arg->filter->u.generic_1, off,
602 arg->item->spec, mask, sizeof(struct ipv6_hdr),
603 arg->l2_proto_off, rte_cpu_to_be_16(ETHER_TYPE_IPv6), 2);
607 enic_copy_item_inner_udp_v2(struct copy_item_args *arg)
609 const void *mask = arg->item->mask;
610 uint8_t *off = arg->inner_ofst;
614 mask = &rte_flow_item_udp_mask;
615 /* Append udp header to L5 and set ip proto = udp */
616 return copy_inner_common(&arg->filter->u.generic_1, off,
617 arg->item->spec, mask, sizeof(struct udp_hdr),
618 arg->l3_proto_off, IPPROTO_UDP, 1);
622 enic_copy_item_inner_tcp_v2(struct copy_item_args *arg)
624 const void *mask = arg->item->mask;
625 uint8_t *off = arg->inner_ofst;
629 mask = &rte_flow_item_tcp_mask;
630 /* Append tcp header to L5 and set ip proto = tcp */
631 return copy_inner_common(&arg->filter->u.generic_1, off,
632 arg->item->spec, mask, sizeof(struct tcp_hdr),
633 arg->l3_proto_off, IPPROTO_TCP, 1);
637 enic_copy_item_eth_v2(struct copy_item_args *arg)
639 const struct rte_flow_item *item = arg->item;
640 struct filter_v2 *enic_filter = arg->filter;
641 struct ether_hdr enic_spec;
642 struct ether_hdr enic_mask;
643 const struct rte_flow_item_eth *spec = item->spec;
644 const struct rte_flow_item_eth *mask = item->mask;
645 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
649 /* Match all if no spec */
654 mask = &rte_flow_item_eth_mask;
656 memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
658 memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
661 memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
663 memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
665 enic_spec.ether_type = spec->type;
666 enic_mask.ether_type = mask->type;
669 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
670 sizeof(struct ether_hdr));
671 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
672 sizeof(struct ether_hdr));
677 enic_copy_item_vlan_v2(struct copy_item_args *arg)
679 const struct rte_flow_item *item = arg->item;
680 struct filter_v2 *enic_filter = arg->filter;
681 const struct rte_flow_item_vlan *spec = item->spec;
682 const struct rte_flow_item_vlan *mask = item->mask;
683 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
684 struct ether_hdr *eth_mask;
685 struct ether_hdr *eth_val;
689 /* Match all if no spec */
694 mask = &rte_flow_item_vlan_mask;
696 eth_mask = (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
697 eth_val = (void *)gp->layer[FILTER_GENERIC_1_L2].val;
698 /* Outer TPID cannot be matched */
699 if (eth_mask->ether_type)
703 * When packet matching, the VIC always compares vlan-stripped
704 * L2, regardless of vlan stripping settings. So, the inner type
705 * from vlan becomes the ether type of the eth header.
707 * Older models w/o hardware vxlan parser have a different
708 * behavior when vlan stripping is disabled. In this case,
709 * vlan tag remains in the L2 buffer.
711 if (!arg->enic->vxlan && !arg->enic->ig_vlan_strip_en) {
712 struct vlan_hdr *vlan;
714 vlan = (struct vlan_hdr *)(eth_mask + 1);
715 vlan->eth_proto = mask->inner_type;
716 vlan = (struct vlan_hdr *)(eth_val + 1);
717 vlan->eth_proto = spec->inner_type;
719 eth_mask->ether_type = mask->inner_type;
720 eth_val->ether_type = spec->inner_type;
722 /* For TCI, use the vlan mask/val fields (little endian). */
723 gp->mask_vlan = rte_be_to_cpu_16(mask->tci);
724 gp->val_vlan = rte_be_to_cpu_16(spec->tci);
729 enic_copy_item_ipv4_v2(struct copy_item_args *arg)
731 const struct rte_flow_item *item = arg->item;
732 struct filter_v2 *enic_filter = arg->filter;
733 const struct rte_flow_item_ipv4 *spec = item->spec;
734 const struct rte_flow_item_ipv4 *mask = item->mask;
735 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
740 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
741 gp->val_flags |= FILTER_GENERIC_1_IPV4;
743 /* Match all if no spec */
748 mask = &rte_flow_item_ipv4_mask;
750 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
751 sizeof(struct ipv4_hdr));
752 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
753 sizeof(struct ipv4_hdr));
758 enic_copy_item_ipv6_v2(struct copy_item_args *arg)
760 const struct rte_flow_item *item = arg->item;
761 struct filter_v2 *enic_filter = arg->filter;
762 const struct rte_flow_item_ipv6 *spec = item->spec;
763 const struct rte_flow_item_ipv6 *mask = item->mask;
764 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
769 gp->mask_flags |= FILTER_GENERIC_1_IPV6;
770 gp->val_flags |= FILTER_GENERIC_1_IPV6;
772 /* Match all if no spec */
777 mask = &rte_flow_item_ipv6_mask;
779 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
780 sizeof(struct ipv6_hdr));
781 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
782 sizeof(struct ipv6_hdr));
787 enic_copy_item_udp_v2(struct copy_item_args *arg)
789 const struct rte_flow_item *item = arg->item;
790 struct filter_v2 *enic_filter = arg->filter;
791 const struct rte_flow_item_udp *spec = item->spec;
792 const struct rte_flow_item_udp *mask = item->mask;
793 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
798 gp->mask_flags |= FILTER_GENERIC_1_UDP;
799 gp->val_flags |= FILTER_GENERIC_1_UDP;
801 /* Match all if no spec */
806 mask = &rte_flow_item_udp_mask;
808 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
809 sizeof(struct udp_hdr));
810 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
811 sizeof(struct udp_hdr));
816 enic_copy_item_tcp_v2(struct copy_item_args *arg)
818 const struct rte_flow_item *item = arg->item;
819 struct filter_v2 *enic_filter = arg->filter;
820 const struct rte_flow_item_tcp *spec = item->spec;
821 const struct rte_flow_item_tcp *mask = item->mask;
822 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
827 gp->mask_flags |= FILTER_GENERIC_1_TCP;
828 gp->val_flags |= FILTER_GENERIC_1_TCP;
830 /* Match all if no spec */
837 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
838 sizeof(struct tcp_hdr));
839 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
840 sizeof(struct tcp_hdr));
845 enic_copy_item_sctp_v2(struct copy_item_args *arg)
847 const struct rte_flow_item *item = arg->item;
848 struct filter_v2 *enic_filter = arg->filter;
849 const struct rte_flow_item_sctp *spec = item->spec;
850 const struct rte_flow_item_sctp *mask = item->mask;
851 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
852 uint8_t *ip_proto_mask = NULL;
853 uint8_t *ip_proto = NULL;
858 * The NIC filter API has no flags for "match sctp", so explicitly set
859 * the protocol number in the IP pattern.
861 if (gp->val_flags & FILTER_GENERIC_1_IPV4) {
863 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
864 ip_proto_mask = &ip->next_proto_id;
865 ip = (struct ipv4_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
866 ip_proto = &ip->next_proto_id;
867 } else if (gp->val_flags & FILTER_GENERIC_1_IPV6) {
869 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].mask;
870 ip_proto_mask = &ip->proto;
871 ip = (struct ipv6_hdr *)gp->layer[FILTER_GENERIC_1_L3].val;
872 ip_proto = &ip->proto;
874 /* Need IPv4/IPv6 pattern first */
877 *ip_proto = IPPROTO_SCTP;
878 *ip_proto_mask = 0xff;
880 /* Match all if no spec */
885 mask = &rte_flow_item_sctp_mask;
887 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
888 sizeof(struct sctp_hdr));
889 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
890 sizeof(struct sctp_hdr));
895 enic_copy_item_vxlan_v2(struct copy_item_args *arg)
897 const struct rte_flow_item *item = arg->item;
898 struct filter_v2 *enic_filter = arg->filter;
899 uint8_t *inner_ofst = arg->inner_ofst;
900 const struct rte_flow_item_vxlan *spec = item->spec;
901 const struct rte_flow_item_vxlan *mask = item->mask;
902 struct filter_generic_1 *gp = &enic_filter->u.generic_1;
908 * The NIC filter API has no flags for "match vxlan". Set UDP port to
909 * avoid false positives.
911 gp->mask_flags |= FILTER_GENERIC_1_UDP;
912 gp->val_flags |= FILTER_GENERIC_1_UDP;
913 udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].mask;
914 udp->dst_port = 0xffff;
915 udp = (struct udp_hdr *)gp->layer[FILTER_GENERIC_1_L4].val;
916 udp->dst_port = RTE_BE16(4789);
917 /* Match all if no spec */
922 mask = &rte_flow_item_vxlan_mask;
924 memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
925 sizeof(struct vxlan_hdr));
926 memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
927 sizeof(struct vxlan_hdr));
929 *inner_ofst = sizeof(struct vxlan_hdr);
934 * Return 1 if current item is valid on top of the previous one.
936 * @param prev_item[in]
937 * The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
939 * @param item_info[in]
940 * Info about this item, like valid previous items.
941 * @param is_first[in]
942 * True if this the first item in the pattern.
945 item_stacking_valid(enum rte_flow_item_type prev_item,
946 const struct enic_items *item_info, u8 is_first_item)
948 enum rte_flow_item_type const *allowed_items = item_info->prev_items;
952 for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
953 if (prev_item == *allowed_items)
957 /* This is the first item in the stack. Check if that's cool */
958 if (is_first_item && item_info->valid_start_item)
965 * Fix up the L5 layer.. HW vxlan parsing removes vxlan header from L5.
966 * Instead it is in L4 following the UDP header. Append the vxlan
967 * pattern to L4 (udp) and shift any inner packet pattern in L5.
970 fixup_l5_layer(struct enic *enic, struct filter_generic_1 *gp,
973 uint8_t layer[FILTER_GENERIC_1_KEY_LEN];
977 if (!(inner_ofst > 0 && enic->vxlan))
980 vxlan = sizeof(struct vxlan_hdr);
981 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask + sizeof(struct udp_hdr),
982 gp->layer[FILTER_GENERIC_1_L5].mask, vxlan);
983 memcpy(gp->layer[FILTER_GENERIC_1_L4].val + sizeof(struct udp_hdr),
984 gp->layer[FILTER_GENERIC_1_L5].val, vxlan);
985 inner = inner_ofst - vxlan;
986 memset(layer, 0, sizeof(layer));
987 memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].mask + vxlan, inner);
988 memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, layer, sizeof(layer));
989 memset(layer, 0, sizeof(layer));
990 memcpy(layer, gp->layer[FILTER_GENERIC_1_L5].val + vxlan, inner);
991 memcpy(gp->layer[FILTER_GENERIC_1_L5].val, layer, sizeof(layer));
995 * Build the intenal enic filter structure from the provided pattern. The
996 * pattern is validated as the items are copied.
999 * @param items_info[in]
1000 * Info about this NICs item support, like valid previous items.
1001 * @param enic_filter[out]
1002 * NIC specfilc filters derived from the pattern.
1006 enic_copy_filter(const struct rte_flow_item pattern[],
1007 const struct enic_filter_cap *cap,
1009 struct filter_v2 *enic_filter,
1010 struct rte_flow_error *error)
1013 const struct rte_flow_item *item = pattern;
1014 u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
1015 enum rte_flow_item_type prev_item;
1016 const struct enic_items *item_info;
1017 struct copy_item_args args;
1018 enic_copy_item_fn *copy_fn;
1019 u8 is_first_item = 1;
1025 args.filter = enic_filter;
1026 args.inner_ofst = &inner_ofst;
1028 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1029 /* Get info about how to validate and copy the item. If NULL
1030 * is returned the nic does not support the item.
1032 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
1035 item_info = &cap->item_info[item->type];
1036 if (item->type > cap->max_item_type ||
1037 item_info->copy_item == NULL ||
1038 (inner_ofst > 0 && item_info->inner_copy_item == NULL)) {
1039 rte_flow_error_set(error, ENOTSUP,
1040 RTE_FLOW_ERROR_TYPE_ITEM,
1041 NULL, "Unsupported item.");
1045 /* check to see if item stacking is valid */
1046 if (!item_stacking_valid(prev_item, item_info, is_first_item))
1047 goto stacking_error;
1050 copy_fn = inner_ofst > 0 ? item_info->inner_copy_item :
1051 item_info->copy_item;
1052 ret = copy_fn(&args);
1054 goto item_not_supported;
1055 prev_item = item->type;
1058 fixup_l5_layer(enic, &enic_filter->u.generic_1, inner_ofst);
1063 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
1064 NULL, "enic type error");
1068 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1069 item, "stacking error");
1074 * Build the intenal version 1 NIC action structure from the provided pattern.
1075 * The pattern is validated as the items are copied.
1077 * @param actions[in]
1078 * @param enic_action[out]
1079 * NIC specfilc actions derived from the actions.
1083 enic_copy_action_v1(const struct rte_flow_action actions[],
1084 struct filter_action_v2 *enic_action)
1087 uint32_t overlap = 0;
1091 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1092 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
1095 switch (actions->type) {
1096 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1097 const struct rte_flow_action_queue *queue =
1098 (const struct rte_flow_action_queue *)
1104 enic_action->rq_idx =
1105 enic_rte_rq_idx_to_sop_idx(queue->index);
1113 if (!(overlap & FATE))
1115 enic_action->type = FILTER_ACTION_RQ_STEERING;
1120 * Build the intenal version 2 NIC action structure from the provided pattern.
1121 * The pattern is validated as the items are copied.
1123 * @param actions[in]
1124 * @param enic_action[out]
1125 * NIC specfilc actions derived from the actions.
1129 enic_copy_action_v2(const struct rte_flow_action actions[],
1130 struct filter_action_v2 *enic_action)
1132 enum { FATE = 1, MARK = 2, };
1133 uint32_t overlap = 0;
1137 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1138 switch (actions->type) {
1139 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1140 const struct rte_flow_action_queue *queue =
1141 (const struct rte_flow_action_queue *)
1147 enic_action->rq_idx =
1148 enic_rte_rq_idx_to_sop_idx(queue->index);
1149 enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1152 case RTE_FLOW_ACTION_TYPE_MARK: {
1153 const struct rte_flow_action_mark *mark =
1154 (const struct rte_flow_action_mark *)
1161 * Map mark ID (32-bit) to filter ID (16-bit):
1162 * - Reject values > 16 bits
1163 * - Filter ID 0 is reserved for filters that steer
1164 * but not mark. So add 1 to the mark ID to avoid
1166 * - Filter ID (ENIC_MAGIC_FILTER_ID = 0xffff) is
1167 * reserved for the "flag" action below.
1169 if (mark->id >= ENIC_MAGIC_FILTER_ID - 1)
1171 enic_action->filter_id = mark->id + 1;
1172 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1175 case RTE_FLOW_ACTION_TYPE_FLAG: {
1179 /* ENIC_MAGIC_FILTER_ID is reserved for flagging */
1180 enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1181 enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1184 case RTE_FLOW_ACTION_TYPE_DROP: {
1188 enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1191 case RTE_FLOW_ACTION_TYPE_COUNT: {
1192 enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1195 case RTE_FLOW_ACTION_TYPE_VOID:
1202 if (!(overlap & FATE))
1204 enic_action->type = FILTER_ACTION_V2;
1208 /** Check if the action is supported */
1210 enic_match_action(const struct rte_flow_action *action,
1211 const enum rte_flow_action_type *supported_actions)
1213 for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1214 supported_actions++) {
1215 if (action->type == *supported_actions)
1221 /** Get the NIC filter capabilties structure */
1222 static const struct enic_filter_cap *
1223 enic_get_filter_cap(struct enic *enic)
1225 if (enic->flow_filter_mode)
1226 return &enic_filter_cap[enic->flow_filter_mode];
1231 /** Get the actions for this NIC version. */
1232 static const struct enic_action_cap *
1233 enic_get_action_cap(struct enic *enic)
1235 const struct enic_action_cap *ea;
1238 actions = enic->filter_actions;
1239 if (actions & FILTER_ACTION_COUNTER_FLAG)
1240 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1241 else if (actions & FILTER_ACTION_DROP_FLAG)
1242 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1243 else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1244 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1246 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1250 /* Debug function to dump internal NIC action structure. */
1252 enic_dump_actions(const struct filter_action_v2 *ea)
1254 if (ea->type == FILTER_ACTION_RQ_STEERING) {
1255 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1256 } else if (ea->type == FILTER_ACTION_V2) {
1257 FLOW_LOG(INFO, "Actions(V2)\n");
1258 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1259 FLOW_LOG(INFO, "\tqueue: %u\n",
1260 enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1261 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1262 FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1266 /* Debug function to dump internal NIC filter structure. */
1268 enic_dump_filter(const struct filter_v2 *filt)
1270 const struct filter_generic_1 *gp;
1273 char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1274 char l4csum[16], ipfrag[16];
1276 switch (filt->type) {
1277 case FILTER_IPV4_5TUPLE:
1278 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1280 case FILTER_USNIC_IP:
1282 /* FIXME: this should be a loop */
1283 gp = &filt->u.generic_1;
1284 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1285 gp->val_vlan, gp->mask_vlan);
1287 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1289 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1290 ? "ip4(y)" : "ip4(n)");
1292 sprintf(ip4, "%s ", "ip4(x)");
1294 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1296 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1297 ? "ip6(y)" : "ip6(n)");
1299 sprintf(ip6, "%s ", "ip6(x)");
1301 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1303 (gp->val_flags & FILTER_GENERIC_1_UDP)
1304 ? "udp(y)" : "udp(n)");
1306 sprintf(udp, "%s ", "udp(x)");
1308 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1310 (gp->val_flags & FILTER_GENERIC_1_TCP)
1311 ? "tcp(y)" : "tcp(n)");
1313 sprintf(tcp, "%s ", "tcp(x)");
1315 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1316 sprintf(tcpudp, "%s ",
1317 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1318 ? "tcpudp(y)" : "tcpudp(n)");
1320 sprintf(tcpudp, "%s ", "tcpudp(x)");
1322 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1323 sprintf(ip4csum, "%s ",
1324 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1325 ? "ip4csum(y)" : "ip4csum(n)");
1327 sprintf(ip4csum, "%s ", "ip4csum(x)");
1329 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1330 sprintf(l4csum, "%s ",
1331 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1332 ? "l4csum(y)" : "l4csum(n)");
1334 sprintf(l4csum, "%s ", "l4csum(x)");
1336 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1337 sprintf(ipfrag, "%s ",
1338 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1339 ? "ipfrag(y)" : "ipfrag(n)");
1341 sprintf(ipfrag, "%s ", "ipfrag(x)");
1342 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1343 tcp, tcpudp, ip4csum, l4csum, ipfrag);
1345 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1346 mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1347 while (mbyte && !gp->layer[i].mask[mbyte])
1353 for (j = 0; j <= mbyte; j++) {
1355 gp->layer[i].mask[j]);
1359 FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1361 for (j = 0; j <= mbyte; j++) {
1363 gp->layer[i].val[j]);
1367 FLOW_LOG(INFO, "\tL%u val: %s\n", i + 2, buf);
1371 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1376 /* Debug function to dump internal NIC flow structures. */
1378 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1380 enic_dump_filter(filt);
1381 enic_dump_actions(ea);
1386 * Internal flow parse/validate function.
1389 * This device pointer.
1390 * @param pattern[in]
1391 * @param actions[in]
1393 * @param enic_filter[out]
1394 * Internal NIC filter structure pointer.
1395 * @param enic_action[out]
1396 * Internal NIC action structure pointer.
1399 enic_flow_parse(struct rte_eth_dev *dev,
1400 const struct rte_flow_attr *attrs,
1401 const struct rte_flow_item pattern[],
1402 const struct rte_flow_action actions[],
1403 struct rte_flow_error *error,
1404 struct filter_v2 *enic_filter,
1405 struct filter_action_v2 *enic_action)
1407 unsigned int ret = 0;
1408 struct enic *enic = pmd_priv(dev);
1409 const struct enic_filter_cap *enic_filter_cap;
1410 const struct enic_action_cap *enic_action_cap;
1411 const struct rte_flow_action *action;
1415 memset(enic_filter, 0, sizeof(*enic_filter));
1416 memset(enic_action, 0, sizeof(*enic_action));
1419 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1420 NULL, "No pattern specified");
1425 rte_flow_error_set(error, EINVAL,
1426 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1427 NULL, "No action specified");
1433 rte_flow_error_set(error, ENOTSUP,
1434 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1436 "priority groups are not supported");
1438 } else if (attrs->priority) {
1439 rte_flow_error_set(error, ENOTSUP,
1440 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1442 "priorities are not supported");
1444 } else if (attrs->egress) {
1445 rte_flow_error_set(error, ENOTSUP,
1446 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1448 "egress is not supported");
1450 } else if (attrs->transfer) {
1451 rte_flow_error_set(error, ENOTSUP,
1452 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1454 "transfer is not supported");
1456 } else if (!attrs->ingress) {
1457 rte_flow_error_set(error, ENOTSUP,
1458 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1460 "only ingress is supported");
1465 rte_flow_error_set(error, EINVAL,
1466 RTE_FLOW_ERROR_TYPE_ATTR,
1467 NULL, "No attribute specified");
1471 /* Verify Actions. */
1472 enic_action_cap = enic_get_action_cap(enic);
1473 for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1475 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1477 else if (!enic_match_action(action, enic_action_cap->actions))
1480 if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1481 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1482 action, "Invalid action.");
1485 ret = enic_action_cap->copy_fn(actions, enic_action);
1487 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1488 NULL, "Unsupported action.");
1492 /* Verify Flow items. If copying the filter from flow format to enic
1493 * format fails, the flow is not supported
1495 enic_filter_cap = enic_get_filter_cap(enic);
1496 if (enic_filter_cap == NULL) {
1497 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1498 NULL, "Flow API not available");
1501 enic_filter->type = enic->flow_filter_mode;
1502 ret = enic_copy_filter(pattern, enic_filter_cap, enic,
1503 enic_filter, error);
1508 * Push filter/action to the NIC.
1511 * Device structure pointer.
1512 * @param enic_filter[in]
1513 * Internal NIC filter structure pointer.
1514 * @param enic_action[in]
1515 * Internal NIC action structure pointer.
1518 static struct rte_flow *
1519 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1520 struct filter_action_v2 *enic_action,
1521 struct rte_flow_error *error)
1523 struct rte_flow *flow;
1527 int last_max_flow_ctr;
1531 flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1533 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1534 NULL, "cannot allocate flow memory");
1538 flow->counter_idx = -1;
1539 last_max_flow_ctr = -1;
1540 if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1541 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1542 rte_flow_error_set(error, ENOMEM,
1543 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1544 NULL, "cannot allocate counter");
1545 goto unwind_flow_alloc;
1547 flow->counter_idx = ctr_idx;
1548 enic_action->counter_index = ctr_idx;
1550 /* If index is the largest, increase the counter DMA size */
1551 if (ctr_idx > enic->max_flow_counter) {
1552 err = vnic_dev_counter_dma_cfg(enic->vdev,
1553 VNIC_FLOW_COUNTER_UPDATE_MSECS,
1556 rte_flow_error_set(error, -err,
1557 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1558 NULL, "counter DMA config failed");
1559 goto unwind_ctr_alloc;
1561 last_max_flow_ctr = enic->max_flow_counter;
1562 enic->max_flow_counter = ctr_idx;
1566 /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1567 entry = enic_action->rq_idx;
1568 err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1571 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1572 NULL, "vnic_dev_classifier error");
1573 goto unwind_ctr_dma_cfg;
1576 flow->enic_filter_id = entry;
1577 flow->enic_filter = *enic_filter;
1581 /* unwind if there are errors */
1583 if (last_max_flow_ctr != -1) {
1584 /* reduce counter DMA size */
1585 vnic_dev_counter_dma_cfg(enic->vdev,
1586 VNIC_FLOW_COUNTER_UPDATE_MSECS,
1587 last_max_flow_ctr + 1);
1588 enic->max_flow_counter = last_max_flow_ctr;
1591 if (flow->counter_idx != -1)
1592 vnic_dev_counter_free(enic->vdev, ctr_idx);
1599 * Remove filter/action from the NIC.
1602 * Device structure pointer.
1603 * @param filter_id[in]
1605 * @param enic_action[in]
1606 * Internal NIC action structure pointer.
1610 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1611 struct rte_flow_error *error)
1618 filter_id = flow->enic_filter_id;
1619 err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1621 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1622 NULL, "vnic_dev_classifier failed");
1626 if (flow->counter_idx != -1) {
1627 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1628 dev_err(enic, "counter free failed, idx: %d\n",
1630 flow->counter_idx = -1;
1636 * The following functions are callbacks for Generic flow API.
1640 * Validate a flow supported by the NIC.
1642 * @see rte_flow_validate()
1646 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1647 const struct rte_flow_item pattern[],
1648 const struct rte_flow_action actions[],
1649 struct rte_flow_error *error)
1651 struct filter_v2 enic_filter;
1652 struct filter_action_v2 enic_action;
1657 ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1658 &enic_filter, &enic_action);
1660 enic_dump_flow(&enic_action, &enic_filter);
1665 * Create a flow supported by the NIC.
1667 * @see rte_flow_create()
1670 static struct rte_flow *
1671 enic_flow_create(struct rte_eth_dev *dev,
1672 const struct rte_flow_attr *attrs,
1673 const struct rte_flow_item pattern[],
1674 const struct rte_flow_action actions[],
1675 struct rte_flow_error *error)
1678 struct filter_v2 enic_filter;
1679 struct filter_action_v2 enic_action;
1680 struct rte_flow *flow;
1681 struct enic *enic = pmd_priv(dev);
1685 ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1690 rte_spinlock_lock(&enic->flows_lock);
1691 flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1694 LIST_INSERT_HEAD(&enic->flows, flow, next);
1695 rte_spinlock_unlock(&enic->flows_lock);
1701 * Destroy a flow supported by the NIC.
1703 * @see rte_flow_destroy()
1707 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1708 __rte_unused struct rte_flow_error *error)
1710 struct enic *enic = pmd_priv(dev);
1714 rte_spinlock_lock(&enic->flows_lock);
1715 enic_flow_del_filter(enic, flow, error);
1716 LIST_REMOVE(flow, next);
1717 rte_spinlock_unlock(&enic->flows_lock);
1723 * Flush all flows on the device.
1725 * @see rte_flow_flush()
1729 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1731 struct rte_flow *flow;
1732 struct enic *enic = pmd_priv(dev);
1736 rte_spinlock_lock(&enic->flows_lock);
1738 while (!LIST_EMPTY(&enic->flows)) {
1739 flow = LIST_FIRST(&enic->flows);
1740 enic_flow_del_filter(enic, flow, error);
1741 LIST_REMOVE(flow, next);
1744 rte_spinlock_unlock(&enic->flows_lock);
1749 enic_flow_query_count(struct rte_eth_dev *dev,
1750 struct rte_flow *flow, void *data,
1751 struct rte_flow_error *error)
1753 struct enic *enic = pmd_priv(dev);
1754 struct rte_flow_query_count *query;
1755 uint64_t packets, bytes;
1759 if (flow->counter_idx == -1) {
1760 return rte_flow_error_set(error, ENOTSUP,
1761 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1763 "flow does not have counter");
1765 query = (struct rte_flow_query_count *)data;
1766 if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1767 !!query->reset, &packets, &bytes)) {
1768 return rte_flow_error_set
1770 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1772 "cannot read counter");
1774 query->hits_set = 1;
1775 query->bytes_set = 1;
1776 query->hits = packets;
1777 query->bytes = bytes;
1782 enic_flow_query(struct rte_eth_dev *dev,
1783 struct rte_flow *flow,
1784 const struct rte_flow_action *actions,
1786 struct rte_flow_error *error)
1792 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1793 switch (actions->type) {
1794 case RTE_FLOW_ACTION_TYPE_VOID:
1796 case RTE_FLOW_ACTION_TYPE_COUNT:
1797 ret = enic_flow_query_count(dev, flow, data, error);
1800 return rte_flow_error_set(error, ENOTSUP,
1801 RTE_FLOW_ERROR_TYPE_ACTION,
1803 "action not supported");
1812 * Flow callback registration.
1816 const struct rte_flow_ops enic_flow_ops = {
1817 .validate = enic_flow_validate,
1818 .create = enic_flow_create,
1819 .destroy = enic_flow_destroy,
1820 .flush = enic_flow_flush,
1821 .query = enic_flow_query,