2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/vnet.h>
17 #include <vppinfra/vec.h>
18 #include <vppinfra/format.h>
21 #include <vnet/ip/ip.h>
22 #include <vnet/ethernet/ethernet.h>
23 #include <vnet/ethernet/arp_packet.h>
24 #include <vnet/vxlan/vxlan.h>
25 #include <dpdk/device/dpdk.h>
26 #include <dpdk/device/dpdk_priv.h>
27 #include <vppinfra/error.h>
29 #define FLOW_IS_ETHERNET_CLASS(f) \
30 (f->type == VNET_FLOW_TYPE_ETHERNET)
32 #define FLOW_IS_IPV4_CLASS(f) \
33 ((f->type == VNET_FLOW_TYPE_IP4) || \
34 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
35 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
36 (f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
37 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
38 (f->type == VNET_FLOW_TYPE_IP4_GTPU) || \
39 (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP) || \
40 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP) || \
41 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH))
43 #define FLOW_IS_IPV6_CLASS(f) \
44 ((f->type == VNET_FLOW_TYPE_IP6) || \
45 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
46 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED) || \
47 (f->type == VNET_FLOW_TYPE_IP6_VXLAN))
49 /* check if flow is VLAN sensitive */
50 #define FLOW_HAS_VLAN_TAG(f) \
51 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
52 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
54 /* check if flow is L3 type */
55 #define FLOW_IS_L3_TYPE(f) \
56 ((f->type == VNET_FLOW_TYPE_IP4) || \
57 (f->type == VNET_FLOW_TYPE_IP6))
59 /* check if flow is L4 type */
60 #define FLOW_IS_L4_TYPE(f) \
61 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
62 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
63 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
64 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
66 /* check if flow is L4 tunnel type */
67 #define FLOW_IS_L4_TUNNEL_TYPE(f) \
68 ((f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
69 (f->type == VNET_FLOW_TYPE_IP6_VXLAN) || \
70 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
71 (f->type == VNET_FLOW_TYPE_IP4_GTPU))
73 /* constant structs */
74 static const struct rte_flow_attr ingress = {.ingress = 1 };
77 mac_address_is_all_zero (const u8 addr[6])
81 for (i = 0; i < 6; i++)
89 dpdk_flow_convert_rss_types (u64 type, u64 * dpdk_rss_type)
91 #define BIT_IS_SET(v, b) \
98 if (n != -1 && BIT_IS_SET(type, n)) \
106 /** Maximum number of queue indices in struct rte_flow_action_rss. */
107 #define ACTION_RSS_QUEUE_NUM 128
110 dpdk_flow_convert_rss_queues (u32 queue_index, u32 queue_num,
111 struct rte_flow_action_rss *rss)
113 u16 *queues = clib_mem_alloc (sizeof (*queues) * ACTION_RSS_QUEUE_NUM);
116 for (i = 0; i < queue_num; i++)
117 queues[i] = queue_index++;
119 rss->queue_num = queue_num;
125 static inline enum rte_eth_hash_function
126 dpdk_flow_convert_rss_func (vnet_rss_function_t func)
128 enum rte_eth_hash_function rss_func;
132 case VNET_RSS_FUNC_DEFAULT:
133 rss_func = RTE_ETH_HASH_FUNCTION_DEFAULT;
135 case VNET_RSS_FUNC_TOEPLITZ:
136 rss_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
138 case VNET_RSS_FUNC_SIMPLE_XOR:
139 rss_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
141 case VNET_RSS_FUNC_SYMMETRIC_TOEPLITZ:
142 rss_func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
145 rss_func = RTE_ETH_HASH_FUNCTION_MAX;
153 dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
155 struct rte_flow_item_eth eth[2] = { };
156 struct rte_flow_item_ipv4 ip4[2] = { };
157 struct rte_flow_item_ipv6 ip6[2] = { };
158 struct rte_flow_item_udp udp[2] = { };
159 struct rte_flow_item_tcp tcp[2] = { };
160 struct rte_flow_item_gtp gtp[2] = { };
161 struct rte_flow_item_l2tpv3oip l2tp[2] = { };
162 struct rte_flow_item_esp esp[2] = { };
163 struct rte_flow_item_ah ah[2] = { };
164 struct rte_flow_item_raw generic[2] = {};
165 struct rte_flow_action_mark mark = { 0 };
166 struct rte_flow_action_queue queue = { 0 };
167 struct rte_flow_action_rss rss = { 0 };
168 struct rte_flow_item *item, *items = 0;
169 struct rte_flow_action *action, *actions = 0;
174 vxlan_hdr_sz = sizeof (vxlan_header_t),
175 raw_sz = sizeof (struct rte_flow_item_raw)
180 struct rte_flow_item_raw item;
181 u8 val[raw_sz + vxlan_hdr_sz];
184 u16 src_port = 0, dst_port = 0, src_port_mask = 0, dst_port_mask = 0;
185 u8 protocol = IP_PROTOCOL_RESERVED;
188 /* Handle generic flow first */
189 if (f->type == VNET_FLOW_TYPE_GENERIC)
191 generic[0].pattern = f->generic.pattern.spec;
192 generic[1].pattern = f->generic.pattern.mask;
194 vec_add2 (items, item, 1);
195 item->type = RTE_FLOW_ITEM_TYPE_RAW;
196 item->spec = generic;
197 item->mask = generic + 1;
208 } flow_class = FLOW_UNKNOWN_CLASS;
210 if (FLOW_IS_ETHERNET_CLASS (f))
211 flow_class = FLOW_ETHERNET_CLASS;
212 else if (FLOW_IS_IPV4_CLASS (f))
213 flow_class = FLOW_IPV4_CLASS;
214 else if (FLOW_IS_IPV6_CLASS (f))
215 flow_class = FLOW_IPV6_CLASS;
217 return VNET_FLOW_ERROR_NOT_SUPPORTED;
219 if (f->actions & (~xd->supported_flow_actions))
220 return VNET_FLOW_ERROR_NOT_SUPPORTED;
223 /* Layer 2, Ethernet */
224 vec_add2 (items, item, 1);
225 item->type = RTE_FLOW_ITEM_TYPE_ETH;
227 if (flow_class == FLOW_ETHERNET_CLASS)
229 vnet_flow_ethernet_t *te = &f->ethernet;
231 clib_memset (ð[0], 0, sizeof (eth[0]));
232 clib_memset (ð[1], 0, sizeof (eth[1]));
234 /* check if SMAC/DMAC/Ether_type assigned */
235 if (!mac_address_is_all_zero (te->eth_hdr.dst_address))
237 clib_memcpy_fast (ð[0].dst, &te->eth_hdr.dst_address,
238 sizeof (eth[0].dst));
239 clib_memset (ð[1].dst, 0xFF, sizeof (eth[1].dst));
242 if (!mac_address_is_all_zero (te->eth_hdr.src_address))
244 clib_memcpy_fast (ð[0].src, &te->eth_hdr.src_address,
245 sizeof (eth[0].src));
246 clib_memset (ð[1].src, 0xFF, sizeof (eth[1].src));
249 if (te->eth_hdr.type)
251 eth[0].type = clib_host_to_net_u16 (te->eth_hdr.type);
252 eth[1].type = clib_host_to_net_u16 (0xFFFF);
256 item->mask = eth + 1;
264 /* currently only single empty vlan tag is supported */
265 if (FLOW_HAS_VLAN_TAG (f))
267 vec_add2 (items, item, 1);
268 item->type = RTE_FLOW_ITEM_TYPE_VLAN;
273 if (FLOW_IS_ETHERNET_CLASS (f))
277 vec_add2 (items, item, 1);
278 if (flow_class == FLOW_IPV4_CLASS)
280 vnet_flow_ip4_t *ip4_ptr = &f->ip4;
282 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
283 if ((!ip4_ptr->src_addr.mask.as_u32) &&
284 (!ip4_ptr->dst_addr.mask.as_u32) && (!ip4_ptr->protocol.mask))
291 ip4[0].hdr.src_addr = ip4_ptr->src_addr.addr.as_u32;
292 ip4[1].hdr.src_addr = ip4_ptr->src_addr.mask.as_u32;
293 ip4[0].hdr.dst_addr = ip4_ptr->dst_addr.addr.as_u32;
294 ip4[1].hdr.dst_addr = ip4_ptr->dst_addr.mask.as_u32;
295 ip4[0].hdr.next_proto_id = ip4_ptr->protocol.prot;
296 ip4[1].hdr.next_proto_id = ip4_ptr->protocol.mask;
299 item->mask = ip4 + 1;
302 if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f))
304 vnet_flow_ip4_n_tuple_t *ip4_n_ptr = &f->ip4_n_tuple;
306 src_port = ip4_n_ptr->src_port.port;
307 dst_port = ip4_n_ptr->dst_port.port;
308 src_port_mask = ip4_n_ptr->src_port.mask;
309 dst_port_mask = ip4_n_ptr->dst_port.mask;
312 protocol = ip4_ptr->protocol.prot;
314 else if (flow_class == FLOW_IPV6_CLASS)
316 vnet_flow_ip6_t *ip6_ptr = &f->ip6;
318 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
320 if ((ip6_ptr->src_addr.mask.as_u64[0] == 0) &&
321 (ip6_ptr->src_addr.mask.as_u64[1] == 0) &&
322 (ip6_ptr->dst_addr.mask.as_u64[0] == 0) &&
323 (ip6_ptr->dst_addr.mask.as_u64[1] == 0) && (!ip6_ptr->protocol.mask))
330 clib_memcpy (ip6[0].hdr.src_addr, &ip6_ptr->src_addr.addr,
331 ARRAY_LEN (ip6_ptr->src_addr.addr.as_u8));
332 clib_memcpy (ip6[1].hdr.src_addr, &ip6_ptr->src_addr.mask,
333 ARRAY_LEN (ip6_ptr->src_addr.mask.as_u8));
334 clib_memcpy (ip6[0].hdr.dst_addr, &ip6_ptr->dst_addr.addr,
335 ARRAY_LEN (ip6_ptr->dst_addr.addr.as_u8));
336 clib_memcpy (ip6[1].hdr.dst_addr, &ip6_ptr->dst_addr.mask,
337 ARRAY_LEN (ip6_ptr->dst_addr.mask.as_u8));
338 ip6[0].hdr.proto = ip6_ptr->protocol.prot;
339 ip6[1].hdr.proto = ip6_ptr->protocol.mask;
342 item->mask = ip6 + 1;
345 if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f))
347 vnet_flow_ip6_n_tuple_t *ip6_n_ptr = &f->ip6_n_tuple;
349 src_port = ip6_n_ptr->src_port.port;
350 dst_port = ip6_n_ptr->dst_port.port;
351 src_port_mask = ip6_n_ptr->src_port.mask;
352 dst_port_mask = ip6_n_ptr->dst_port.mask;
355 protocol = ip6_ptr->protocol.prot;
358 if (FLOW_IS_L3_TYPE (f))
362 vec_add2 (items, item, 1);
365 case IP_PROTOCOL_L2TP:
366 item->type = RTE_FLOW_ITEM_TYPE_L2TPV3OIP;
367 l2tp[0].session_id = clib_host_to_net_u32 (f->ip4_l2tpv3oip.session_id);
368 l2tp[1].session_id = ~0;
371 item->mask = l2tp + 1;
374 case IP_PROTOCOL_IPSEC_ESP:
375 item->type = RTE_FLOW_ITEM_TYPE_ESP;
376 esp[0].hdr.spi = clib_host_to_net_u32 (f->ip4_ipsec_esp.spi);
380 item->mask = esp + 1;
383 case IP_PROTOCOL_IPSEC_AH:
384 item->type = RTE_FLOW_ITEM_TYPE_AH;
385 ah[0].spi = clib_host_to_net_u32 (f->ip4_ipsec_ah.spi);
391 case IP_PROTOCOL_TCP:
392 item->type = RTE_FLOW_ITEM_TYPE_TCP;
393 if ((src_port_mask == 0) && (dst_port_mask == 0))
400 tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
401 tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
402 tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
403 tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
405 item->mask = tcp + 1;
409 case IP_PROTOCOL_UDP:
410 item->type = RTE_FLOW_ITEM_TYPE_UDP;
411 if ((src_port_mask == 0) && (dst_port_mask == 0))
418 udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
419 udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
420 udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
421 udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
423 item->mask = udp + 1;
426 /* handle the UDP tunnels */
427 if (f->type == VNET_FLOW_TYPE_IP4_GTPC)
429 gtp[0].teid = clib_host_to_net_u32 (f->ip4_gtpc.teid);
432 vec_add2 (items, item, 1);
433 item->type = RTE_FLOW_ITEM_TYPE_GTPC;
435 item->mask = gtp + 1;
437 else if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
439 gtp[0].teid = clib_host_to_net_u32 (f->ip4_gtpu.teid);
442 vec_add2 (items, item, 1);
443 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
445 item->mask = gtp + 1;
447 else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
449 u32 vni = f->ip4_vxlan.vni;
451 vxlan_header_t spec_hdr = {
452 .flags = VXLAN_FLAGS_I,
453 .vni_reserved = clib_host_to_net_u32 (vni << 8)
455 vxlan_header_t mask_hdr = {
457 .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
460 clib_memset (raw, 0, sizeof raw);
461 raw[0].item.relative = 1;
462 raw[0].item.length = vxlan_hdr_sz;
464 clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
465 raw[0].item.pattern = raw[0].val + raw_sz;
466 clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
467 raw[1].item.pattern = raw[1].val + raw_sz;
469 vec_add2 (items, item, 1);
470 item->type = RTE_FLOW_ITEM_TYPE_RAW;
472 item->mask = raw + 1;
477 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
482 if ((f->actions & VNET_FLOW_ACTION_RSS) &&
483 (f->rss_types & (1ULL << VNET_FLOW_RSS_TYPES_ESP)))
486 vec_add2 (items, item, 1);
487 item->type = RTE_FLOW_ITEM_TYPE_ESP;
490 vec_add2 (items, item, 1);
491 item->type = RTE_FLOW_ITEM_TYPE_END;
494 /* Only one 'fate' can be assigned */
495 if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
497 vec_add2 (actions, action, 1);
498 queue.index = f->redirect_queue;
499 action->type = RTE_FLOW_ACTION_TYPE_QUEUE;
500 action->conf = &queue;
504 if (f->actions & VNET_FLOW_ACTION_DROP)
506 vec_add2 (actions, action, 1);
507 action->type = RTE_FLOW_ACTION_TYPE_DROP;
510 rv = VNET_FLOW_ERROR_INTERNAL;
517 if (f->actions & VNET_FLOW_ACTION_RSS)
521 vec_add2 (actions, action, 1);
522 action->type = RTE_FLOW_ACTION_TYPE_RSS;
525 /* convert types to DPDK rss bitmask */
526 dpdk_flow_convert_rss_types (f->rss_types, &rss_type);
529 /* convert rss queues to array */
530 dpdk_flow_convert_rss_queues (f->queue_index, f->queue_num, &rss);
532 rss.types = rss_type;
533 if ((rss.func = dpdk_flow_convert_rss_func (f->rss_fun)) ==
534 RTE_ETH_HASH_FUNCTION_MAX)
536 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
542 rv = VNET_FLOW_ERROR_INTERNAL;
551 vec_add2 (actions, action, 1);
552 action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;
555 if (f->actions & VNET_FLOW_ACTION_MARK)
557 vec_add2 (actions, action, 1);
559 action->type = RTE_FLOW_ACTION_TYPE_MARK;
560 action->conf = &mark;
563 vec_add2 (actions, action, 1);
564 action->type = RTE_FLOW_ACTION_TYPE_END;
566 rv = rte_flow_validate (xd->device_index, &ingress, items, actions,
567 &xd->last_flow_error);
572 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
573 else if (rv == -EEXIST)
574 rv = VNET_FLOW_ERROR_ALREADY_EXISTS;
576 rv = VNET_FLOW_ERROR_INTERNAL;
581 fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions,
582 &xd->last_flow_error);
585 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
594 dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance,
595 u32 flow_index, uword * private_data)
597 vlib_main_t *vm = vlib_get_main ();
598 dpdk_main_t *dm = &dpdk_main;
599 vnet_flow_t *flow = vnet_get_flow (flow_index);
600 dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
601 dpdk_flow_entry_t *fe;
602 dpdk_flow_lookup_entry_t *fle = 0;
605 /* recycle old flow lookup entries only after the main loop counter
606 increases - i.e. previously DMA'ed packets were handled */
607 if (vec_len (xd->parked_lookup_indexes) > 0 &&
608 xd->parked_loop_count != vm->main_loop_count)
612 vec_foreach (fl_index, xd->parked_lookup_indexes)
613 pool_put_index (xd->flow_lookup_entries, *fl_index);
614 vec_reset_length (xd->parked_lookup_indexes);
617 if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
619 fe = vec_elt_at_index (xd->flow_entries, *private_data);
621 if ((rv = rte_flow_destroy (xd->device_index, fe->handle,
622 &xd->last_flow_error)))
623 return VNET_FLOW_ERROR_INTERNAL;
627 /* make sure no action is taken for in-flight (marked) packets */
628 fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark);
629 clib_memset (fle, -1, sizeof (*fle));
630 vec_add1 (xd->parked_lookup_indexes, fe->mark);
631 xd->parked_loop_count = vm->main_loop_count;
634 clib_memset (fe, 0, sizeof (*fe));
635 pool_put (xd->flow_entries, fe);
637 goto disable_rx_offload;
640 if (op != VNET_FLOW_DEV_OP_ADD_FLOW)
641 return VNET_FLOW_ERROR_NOT_SUPPORTED;
643 pool_get (xd->flow_entries, fe);
644 fe->flow_index = flow->index;
646 if (flow->actions == 0)
648 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
652 /* if we need to mark packets, assign one mark */
653 if (flow->actions & (VNET_FLOW_ACTION_MARK |
654 VNET_FLOW_ACTION_REDIRECT_TO_NODE |
655 VNET_FLOW_ACTION_BUFFER_ADVANCE))
658 if (xd->flow_lookup_entries == 0)
659 pool_get_aligned (xd->flow_lookup_entries, fle,
660 CLIB_CACHE_LINE_BYTES);
661 pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES);
662 fe->mark = fle - xd->flow_lookup_entries;
664 /* install entry in the lookup table */
665 clib_memset (fle, -1, sizeof (*fle));
666 if (flow->actions & VNET_FLOW_ACTION_MARK)
667 fle->flow_id = flow->mark_flow_id;
668 if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
669 fle->next_index = flow->redirect_device_input_next_index;
670 if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
671 fle->buffer_advance = flow->buffer_advance;
676 if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
678 xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
679 dpdk_device_setup (xd);
684 case VNET_FLOW_TYPE_ETHERNET:
685 case VNET_FLOW_TYPE_IP4:
686 case VNET_FLOW_TYPE_IP6:
687 case VNET_FLOW_TYPE_IP4_N_TUPLE:
688 case VNET_FLOW_TYPE_IP6_N_TUPLE:
689 case VNET_FLOW_TYPE_IP4_VXLAN:
690 case VNET_FLOW_TYPE_IP4_GTPC:
691 case VNET_FLOW_TYPE_IP4_GTPU:
692 case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
693 case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
694 case VNET_FLOW_TYPE_IP4_IPSEC_AH:
695 case VNET_FLOW_TYPE_GENERIC:
696 if ((rv = dpdk_flow_add (xd, flow, fe)))
700 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
704 *private_data = fe - xd->flow_entries;
709 clib_memset (fe, 0, sizeof (*fe));
710 pool_put (xd->flow_entries, fe);
713 clib_memset (fle, -1, sizeof (*fle));
714 pool_put (xd->flow_lookup_entries, fle);
718 if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
719 && pool_elts (xd->flow_entries) == 0)
721 xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
722 dpdk_device_setup (xd);
729 format_dpdk_flow (u8 * s, va_list * args)
731 u32 dev_instance = va_arg (*args, u32);
732 u32 flow_index = va_arg (*args, u32);
733 uword private_data = va_arg (*args, uword);
734 dpdk_main_t *dm = &dpdk_main;
735 dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
736 dpdk_flow_entry_t *fe;
738 if (flow_index == ~0)
740 s = format (s, "%-25s: %U\n", "supported flow actions",
741 format_flow_actions, xd->supported_flow_actions);
742 s = format (s, "%-25s: %d\n", "last DPDK error type",
743 xd->last_flow_error.type);
744 s = format (s, "%-25s: %s\n", "last DPDK error message",
745 xd->last_flow_error.message ? xd->last_flow_error.message :
750 if (private_data >= vec_len (xd->flow_entries))
751 return format (s, "unknown flow");
753 fe = vec_elt_at_index (xd->flow_entries, private_data);
754 s = format (s, "mark %u", fe->mark);
759 * fd.io coding-style-patch-verification: ON
762 * eval: (c-set-style "gnu")