2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/vnet.h>
17 #include <vppinfra/vec.h>
18 #include <vppinfra/format.h>
19 #include <vlib/unix/cj.h>
22 #include <vnet/ip/ip.h>
23 #include <vnet/ethernet/ethernet.h>
24 #include <vnet/ethernet/arp_packet.h>
25 #include <vnet/vxlan/vxlan.h>
26 #include <dpdk/device/dpdk.h>
28 #include <dpdk/device/dpdk_priv.h>
29 #include <vppinfra/error.h>
31 /* check if flow is L2 flow */
32 #define FLOW_IS_L2_LAYER(f) \
33 (f->type == VNET_FLOW_TYPE_ETHERNET)
35 /* check if flow is VLAN sensitive */
36 #define FLOW_IS_VLAN_TAGGED(f) \
37 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
38 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
40 /* check if flow is L4 type */
41 #define FLOW_IS_L4_LAYER(f) \
42 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
43 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE))
45 /* check if flow is L4 tunnel type */
46 #define FLOW_IS_L4_TUNNEL_LAYER(f) \
47 ((f->type >= VNET_FLOW_TYPE_IP4_VXLAN) || \
48 (f->type <= VNET_FLOW_TYPE_IP6_GTPU_IP6))
50 /* constant structs */
51 static const struct rte_flow_attr ingress = {.ingress = 1 };
54 mac_address_is_all_zero (const u8 addr[6])
58 for (i = 0; i < 6; i++)
66 dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
68 struct rte_flow_item_eth eth[2] = { };
69 struct rte_flow_item_ipv4 ip4[2] = { };
70 struct rte_flow_item_ipv4 inner_ip4[2] = { };
71 struct rte_flow_item_ipv6 ip6[2] = { };
72 struct rte_flow_item_ipv6 inner_ip6[2] = { };
73 struct rte_flow_item_udp udp[2] = { };
74 struct rte_flow_item_tcp tcp[2] = { };
75 struct rte_flow_item_gtp gtp[2] = { };
76 struct rte_flow_action_mark mark = { 0 };
77 struct rte_flow_action_queue queue = { 0 };
78 struct rte_flow_item *item, *items = 0;
79 struct rte_flow_action *action, *actions = 0;
84 vxlan_hdr_sz = sizeof (vxlan_header_t),
85 raw_sz = sizeof (struct rte_flow_item_raw)
90 struct rte_flow_item_raw item;
91 u8 val[raw_sz + vxlan_hdr_sz];
94 u16 src_port, dst_port, src_port_mask, dst_port_mask;
98 if (f->actions & (~xd->supported_flow_actions))
99 return VNET_FLOW_ERROR_NOT_SUPPORTED;
103 vec_add2 (items, item, 1);
104 item->type = RTE_FLOW_ITEM_TYPE_ETH;
105 if (f->type == VNET_FLOW_TYPE_ETHERNET)
107 vnet_flow_ethernet_t *te = &f->ethernet;
109 clib_memset (ð[0], 0, sizeof (eth[0]));
110 clib_memset (ð[1], 0, sizeof (eth[1]));
112 /* check if SMAC/DMAC/Ether_type assigned */
113 if (!mac_address_is_all_zero (te->eth_hdr.dst_address))
115 clib_memcpy_fast (ð[0].dst, &te->eth_hdr.dst_address,
116 sizeof (eth[0].dst));
117 clib_memset (ð[1].dst, 0xFF, sizeof (eth[1].dst));
120 if (!mac_address_is_all_zero (te->eth_hdr.src_address))
122 clib_memcpy_fast (ð[0].src, &te->eth_hdr.src_address,
123 sizeof (eth[0].src));
124 clib_memset (ð[1].src, 0xFF, sizeof (eth[1].src));
127 if (te->eth_hdr.type)
129 eth[0].type = clib_host_to_net_u16 (te->eth_hdr.type);
130 eth[1].type = clib_host_to_net_u16 (0xFFFF);
134 item->mask = eth + 1;
142 if (FLOW_IS_VLAN_TAGGED (f))
144 vec_add2 (items, item, 1);
145 item->type = RTE_FLOW_ITEM_TYPE_VLAN;
150 if (FLOW_IS_L2_LAYER (f))
154 vec_add2 (items, item, 1);
155 if ((f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) ||
156 (f->type == VNET_FLOW_TYPE_IP6_GTPC) ||
157 (f->type == VNET_FLOW_TYPE_IP6_GTPU) ||
158 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
159 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
161 vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
162 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
164 if (!clib_memcmp (&t6->src_addr.mask, &zero_addr, 16) &&
165 !clib_memcmp (&t6->dst_addr.mask, &zero_addr, 16))
172 clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
173 clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
174 clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
175 clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
177 item->mask = ip6 + 1;
180 src_port = t6->src_port.port;
181 dst_port = t6->dst_port.port;
182 src_port_mask = t6->src_port.mask;
183 dst_port_mask = t6->dst_port.mask;
184 protocol = t6->protocol;
186 else if ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
187 (f->type == VNET_FLOW_TYPE_IP4_GTPC) ||
188 (f->type == VNET_FLOW_TYPE_IP4_GTPU) ||
189 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
190 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
192 vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
193 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
195 if (!t4->src_addr.mask.as_u32 && !t4->dst_addr.mask.as_u32)
202 ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
203 ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
204 ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
205 ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
207 item->mask = ip4 + 1;
210 src_port = t4->src_port.port;
211 dst_port = t4->dst_port.port;
212 src_port_mask = t4->src_port.mask;
213 dst_port_mask = t4->dst_port.mask;
214 protocol = t4->protocol;
216 else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
218 vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan;
219 ip4[0].hdr.src_addr = v4->src_addr.as_u32;
220 ip4[1].hdr.src_addr = -1;
221 ip4[0].hdr.dst_addr = v4->dst_addr.as_u32;
222 ip4[1].hdr.dst_addr = -1;
223 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
225 item->mask = ip4 + 1;
227 dst_port = v4->dst_port;
231 protocol = IP_PROTOCOL_UDP;
235 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
240 vec_add2 (items, item, 1);
241 if (protocol == IP_PROTOCOL_UDP)
243 item->type = RTE_FLOW_ITEM_TYPE_UDP;
245 if ((src_port_mask == 0) && (dst_port_mask == 0))
252 udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
253 udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
254 udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
255 udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
257 item->mask = udp + 1;
260 else if (protocol == IP_PROTOCOL_TCP)
262 item->type = RTE_FLOW_ITEM_TYPE_TCP;
264 if ((src_port_mask == 0) && (dst_port_mask == 0))
270 tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
271 tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
272 tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
273 tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
275 item->mask = tcp + 1;
279 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
283 /* Tunnel header match */
284 if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
286 u32 vni = f->ip4_vxlan.vni;
287 vxlan_header_t spec_hdr = {
288 .flags = VXLAN_FLAGS_I,
289 .vni_reserved = clib_host_to_net_u32 (vni << 8)
291 vxlan_header_t mask_hdr = {
293 .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
296 clib_memset (raw, 0, sizeof raw);
297 raw[0].item.relative = 1;
298 raw[0].item.length = vxlan_hdr_sz;
300 clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
301 raw[0].item.pattern = raw[0].val + raw_sz;
302 clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
303 raw[1].item.pattern = raw[1].val + raw_sz;
305 vec_add2 (items, item, 1);
306 item->type = RTE_FLOW_ITEM_TYPE_RAW;
308 item->mask = raw + 1;
310 else if (f->type == VNET_FLOW_TYPE_IP4_GTPC)
312 vnet_flow_ip4_gtpc_t *gc = &f->ip4_gtpc;
313 gtp[0].teid = clib_host_to_net_u32 (gc->teid);
316 vec_add2 (items, item, 1);
317 item->type = RTE_FLOW_ITEM_TYPE_GTPC;
319 item->mask = gtp + 1;
321 else if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
323 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
324 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
327 vec_add2 (items, item, 1);
328 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
330 item->mask = gtp + 1;
332 else if ((f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
333 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
335 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
336 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
339 vec_add2 (items, item, 1);
340 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
342 item->mask = gtp + 1;
344 /* inner IP4 header */
345 if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4)
347 vec_add2 (items, item, 1);
348 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
350 vnet_flow_ip4_gtpu_ip4_t *gu4 = &f->ip4_gtpu_ip4;
351 if (!gu4->inner_src_addr.mask.as_u32 &&
352 !gu4->inner_dst_addr.mask.as_u32)
359 inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
360 inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
361 inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
362 inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
363 item->spec = inner_ip4;
364 item->mask = inner_ip4 + 1;
367 else if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6)
369 ip6_address_t zero_addr;
370 vnet_flow_ip4_gtpu_ip6_t *gu6 = &f->ip4_gtpu_ip6;
372 clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
374 vec_add2 (items, item, 1);
375 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
377 if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
378 !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
385 clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
386 &gu6->inner_src_addr.addr, 16);
387 clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
388 &gu6->inner_src_addr.mask, 16);
389 clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
390 &gu6->inner_dst_addr.addr, 16);
391 clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
392 &gu6->inner_dst_addr.mask, 16);
393 item->spec = inner_ip6;
394 item->mask = inner_ip6 + 1;
398 else if (f->type == VNET_FLOW_TYPE_IP6_GTPC)
400 vnet_flow_ip6_gtpc_t *gc = &f->ip6_gtpc;
401 gtp[0].teid = clib_host_to_net_u32 (gc->teid);
404 vec_add2 (items, item, 1);
405 item->type = RTE_FLOW_ITEM_TYPE_GTPC;
407 item->mask = gtp + 1;
409 else if (f->type == VNET_FLOW_TYPE_IP6_GTPU)
411 vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
412 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
415 vec_add2 (items, item, 1);
416 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
418 item->mask = gtp + 1;
420 else if ((f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
421 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
423 vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
424 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
427 vec_add2 (items, item, 1);
428 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
430 item->mask = gtp + 1;
432 /* inner IP4 header */
433 if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4)
435 vec_add2 (items, item, 1);
436 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
438 vnet_flow_ip6_gtpu_ip4_t *gu4 = &f->ip6_gtpu_ip4;
440 if (!gu4->inner_src_addr.mask.as_u32 &&
441 !gu4->inner_dst_addr.mask.as_u32)
448 inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
449 inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
450 inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
451 inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
452 item->spec = inner_ip4;
453 item->mask = inner_ip4 + 1;
457 if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6)
459 ip6_address_t zero_addr;
460 vnet_flow_ip6_gtpu_ip6_t *gu6 = &f->ip6_gtpu_ip6;
462 clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
464 vec_add2 (items, item, 1);
465 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
467 if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
468 !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
475 clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
476 &gu6->inner_src_addr.addr, 16);
477 clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
478 &gu6->inner_src_addr.mask, 16);
479 clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
480 &gu6->inner_dst_addr.addr, 16);
481 clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
482 &gu6->inner_dst_addr.mask, 16);
483 item->spec = inner_ip6;
484 item->mask = inner_ip6 + 1;
491 vec_add2 (items, item, 1);
492 item->type = RTE_FLOW_ITEM_TYPE_END;
495 /* Only one 'fate' can be assigned */
496 if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
498 vec_add2 (actions, action, 1);
499 queue.index = f->redirect_queue;
500 action->type = RTE_FLOW_ACTION_TYPE_QUEUE;
501 action->conf = &queue;
504 if (f->actions & VNET_FLOW_ACTION_DROP)
506 vec_add2 (actions, action, 1);
507 action->type = RTE_FLOW_ACTION_TYPE_DROP;
510 rv = VNET_FLOW_ERROR_INTERNAL;
518 vec_add2 (actions, action, 1);
519 action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;
522 if (f->actions & VNET_FLOW_ACTION_MARK)
524 vec_add2 (actions, action, 1);
526 action->type = RTE_FLOW_ACTION_TYPE_MARK;
527 action->conf = &mark;
530 vec_add2 (actions, action, 1);
531 action->type = RTE_FLOW_ACTION_TYPE_END;
533 rv = rte_flow_validate (xd->device_index, &ingress, items, actions,
534 &xd->last_flow_error);
539 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
540 else if (rv == -EEXIST)
541 rv = VNET_FLOW_ERROR_ALREADY_EXISTS;
543 rv = VNET_FLOW_ERROR_INTERNAL;
547 fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions,
548 &xd->last_flow_error);
551 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
560 dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance,
561 u32 flow_index, uword * private_data)
563 dpdk_main_t *dm = &dpdk_main;
564 vnet_flow_t *flow = vnet_get_flow (flow_index);
565 dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
566 dpdk_flow_entry_t *fe;
567 dpdk_flow_lookup_entry_t *fle = 0;
570 /* recycle old flow lookup entries only after the main loop counter
571 increases - i.e. previously DMA'ed packets were handled */
572 if (vec_len (xd->parked_lookup_indexes) > 0 &&
573 xd->parked_loop_count != dm->vlib_main->main_loop_count)
577 vec_foreach (fl_index, xd->parked_lookup_indexes)
578 pool_put_index (xd->flow_lookup_entries, *fl_index);
579 vec_reset_length (xd->parked_lookup_indexes);
582 if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
584 fe = vec_elt_at_index (xd->flow_entries, *private_data);
586 if ((rv = rte_flow_destroy (xd->device_index, fe->handle,
587 &xd->last_flow_error)))
588 return VNET_FLOW_ERROR_INTERNAL;
592 /* make sure no action is taken for in-flight (marked) packets */
593 fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark);
594 clib_memset (fle, -1, sizeof (*fle));
595 vec_add1 (xd->parked_lookup_indexes, fe->mark);
596 xd->parked_loop_count = dm->vlib_main->main_loop_count;
599 clib_memset (fe, 0, sizeof (*fe));
600 pool_put (xd->flow_entries, fe);
602 goto disable_rx_offload;
605 if (op != VNET_FLOW_DEV_OP_ADD_FLOW)
606 return VNET_FLOW_ERROR_NOT_SUPPORTED;
608 pool_get (xd->flow_entries, fe);
609 fe->flow_index = flow->index;
611 if (flow->actions == 0)
613 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
617 /* if we need to mark packets, assign one mark */
618 if (flow->actions & (VNET_FLOW_ACTION_MARK |
619 VNET_FLOW_ACTION_REDIRECT_TO_NODE |
620 VNET_FLOW_ACTION_BUFFER_ADVANCE))
623 if (xd->flow_lookup_entries == 0)
624 pool_get_aligned (xd->flow_lookup_entries, fle,
625 CLIB_CACHE_LINE_BYTES);
626 pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES);
627 fe->mark = fle - xd->flow_lookup_entries;
629 /* install entry in the lookup table */
630 clib_memset (fle, -1, sizeof (*fle));
631 if (flow->actions & VNET_FLOW_ACTION_MARK)
632 fle->flow_id = flow->mark_flow_id;
633 if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
634 fle->next_index = flow->redirect_device_input_next_index;
635 if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
636 fle->buffer_advance = flow->buffer_advance;
641 if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
643 xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
644 dpdk_device_setup (xd);
649 case VNET_FLOW_TYPE_ETHERNET:
650 case VNET_FLOW_TYPE_IP4_N_TUPLE:
651 case VNET_FLOW_TYPE_IP6_N_TUPLE:
652 case VNET_FLOW_TYPE_IP4_VXLAN:
653 case VNET_FLOW_TYPE_IP4_GTPC:
654 case VNET_FLOW_TYPE_IP4_GTPU:
655 case VNET_FLOW_TYPE_IP4_GTPU_IP4:
656 case VNET_FLOW_TYPE_IP4_GTPU_IP6:
657 case VNET_FLOW_TYPE_IP6_GTPC:
658 case VNET_FLOW_TYPE_IP6_GTPU:
659 case VNET_FLOW_TYPE_IP6_GTPU_IP4:
660 case VNET_FLOW_TYPE_IP6_GTPU_IP6:
661 if ((rv = dpdk_flow_add (xd, flow, fe)))
665 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
669 *private_data = fe - xd->flow_entries;
674 clib_memset (fe, 0, sizeof (*fe));
675 pool_put (xd->flow_entries, fe);
678 clib_memset (fle, -1, sizeof (*fle));
679 pool_put (xd->flow_lookup_entries, fle);
683 if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
684 && pool_elts (xd->flow_entries) == 0)
686 xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
687 dpdk_device_setup (xd);
694 format_dpdk_flow (u8 * s, va_list * args)
696 u32 dev_instance = va_arg (*args, u32);
697 u32 flow_index = va_arg (*args, u32);
698 uword private_data = va_arg (*args, uword);
699 dpdk_main_t *dm = &dpdk_main;
700 dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
701 dpdk_flow_entry_t *fe;
703 if (flow_index == ~0)
705 s = format (s, "%-25s: %U\n", "supported flow actions",
706 format_flow_actions, xd->supported_flow_actions);
707 s = format (s, "%-25s: %d\n", "last DPDK error type",
708 xd->last_flow_error.type);
709 s = format (s, "%-25s: %s\n", "last DPDK error message",
710 xd->last_flow_error.message ? xd->last_flow_error.message :
715 if (private_data >= vec_len (xd->flow_entries))
716 return format (s, "unknown flow");
718 fe = vec_elt_at_index (xd->flow_entries, private_data);
719 s = format (s, "mark %u", fe->mark);
724 * fd.io coding-style-patch-verification: ON
727 * eval: (c-set-style "gnu")