2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/vnet.h>
17 #include <vppinfra/vec.h>
18 #include <vppinfra/format.h>
21 #include <vnet/ip/ip.h>
22 #include <vnet/ethernet/ethernet.h>
23 #include <vnet/ethernet/arp_packet.h>
24 #include <vnet/vxlan/vxlan.h>
25 #include <dpdk/device/dpdk.h>
27 #include <dpdk/device/dpdk_priv.h>
28 #include <vppinfra/error.h>
30 /* check if flow is L2 flow */
31 #define FLOW_IS_L2_LAYER(f) \
32 (f->type == VNET_FLOW_TYPE_ETHERNET)
34 /* check if flow is VLAN sensitive */
35 #define FLOW_IS_VLAN_TAGGED(f) \
36 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
37 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
39 /* check if flow is L4 type */
40 #define FLOW_IS_L4_LAYER(f) \
41 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
42 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE))
44 /* check if flow is L4 tunnel type */
45 #define FLOW_IS_L4_TUNNEL_LAYER(f) \
46 ((f->type >= VNET_FLOW_TYPE_IP4_VXLAN) || \
47 (f->type <= VNET_FLOW_TYPE_IP6_GTPU_IP6))
49 /* constant structs */
50 static const struct rte_flow_attr ingress = {.ingress = 1 };
53 mac_address_is_all_zero (const u8 addr[6])
57 for (i = 0; i < 6; i++)
65 dpdk_flow_convert_rss_types (u64 type, u64 * dpdk_rss_type)
67 #define BIT_IS_SET(v, b) \
74 if (n != -1 && BIT_IS_SET(type, n)) \
83 dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
85 struct rte_flow_item_eth eth[2] = { };
86 struct rte_flow_item_ipv4 ip4[2] = { };
87 struct rte_flow_item_ipv4 inner_ip4[2] = { };
88 struct rte_flow_item_ipv6 ip6[2] = { };
89 struct rte_flow_item_ipv6 inner_ip6[2] = { };
90 struct rte_flow_item_udp udp[2] = { };
91 struct rte_flow_item_tcp tcp[2] = { };
92 struct rte_flow_item_gtp gtp[2] = { };
93 struct rte_flow_action_mark mark = { 0 };
94 struct rte_flow_action_queue queue = { 0 };
95 struct rte_flow_action_rss rss = { 0 };
96 struct rte_flow_item *item, *items = 0;
97 struct rte_flow_action *action, *actions = 0;
102 vxlan_hdr_sz = sizeof (vxlan_header_t),
103 raw_sz = sizeof (struct rte_flow_item_raw)
108 struct rte_flow_item_raw item;
109 u8 val[raw_sz + vxlan_hdr_sz];
112 u16 src_port, dst_port, src_port_mask, dst_port_mask;
116 if (f->actions & (~xd->supported_flow_actions))
117 return VNET_FLOW_ERROR_NOT_SUPPORTED;
121 vec_add2 (items, item, 1);
122 item->type = RTE_FLOW_ITEM_TYPE_ETH;
123 if (f->type == VNET_FLOW_TYPE_ETHERNET)
125 vnet_flow_ethernet_t *te = &f->ethernet;
127 clib_memset (ð[0], 0, sizeof (eth[0]));
128 clib_memset (ð[1], 0, sizeof (eth[1]));
130 /* check if SMAC/DMAC/Ether_type assigned */
131 if (!mac_address_is_all_zero (te->eth_hdr.dst_address))
133 clib_memcpy_fast (ð[0].dst, &te->eth_hdr.dst_address,
134 sizeof (eth[0].dst));
135 clib_memset (ð[1].dst, 0xFF, sizeof (eth[1].dst));
138 if (!mac_address_is_all_zero (te->eth_hdr.src_address))
140 clib_memcpy_fast (ð[0].src, &te->eth_hdr.src_address,
141 sizeof (eth[0].src));
142 clib_memset (ð[1].src, 0xFF, sizeof (eth[1].src));
145 if (te->eth_hdr.type)
147 eth[0].type = clib_host_to_net_u16 (te->eth_hdr.type);
148 eth[1].type = clib_host_to_net_u16 (0xFFFF);
152 item->mask = eth + 1;
160 if (FLOW_IS_VLAN_TAGGED (f))
162 vec_add2 (items, item, 1);
163 item->type = RTE_FLOW_ITEM_TYPE_VLAN;
168 if (FLOW_IS_L2_LAYER (f))
172 vec_add2 (items, item, 1);
173 if ((f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) ||
174 (f->type == VNET_FLOW_TYPE_IP6_GTPC) ||
175 (f->type == VNET_FLOW_TYPE_IP6_GTPU) ||
176 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
177 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
179 vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
180 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
182 if (!clib_memcmp (&t6->src_addr.mask, &zero_addr, 16) &&
183 !clib_memcmp (&t6->dst_addr.mask, &zero_addr, 16))
190 clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
191 clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
192 clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
193 clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
195 item->mask = ip6 + 1;
198 src_port = t6->src_port.port;
199 dst_port = t6->dst_port.port;
200 src_port_mask = t6->src_port.mask;
201 dst_port_mask = t6->dst_port.mask;
202 protocol = t6->protocol;
204 else if ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
205 (f->type == VNET_FLOW_TYPE_IP4_GTPC) ||
206 (f->type == VNET_FLOW_TYPE_IP4_GTPU) ||
207 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
208 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
210 vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
211 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
213 if (!t4->src_addr.mask.as_u32 && !t4->dst_addr.mask.as_u32)
220 ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
221 ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
222 ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
223 ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
225 item->mask = ip4 + 1;
228 src_port = t4->src_port.port;
229 dst_port = t4->dst_port.port;
230 src_port_mask = t4->src_port.mask;
231 dst_port_mask = t4->dst_port.mask;
232 protocol = t4->protocol;
234 else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
236 vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan;
237 ip4[0].hdr.src_addr = v4->src_addr.as_u32;
238 ip4[1].hdr.src_addr = -1;
239 ip4[0].hdr.dst_addr = v4->dst_addr.as_u32;
240 ip4[1].hdr.dst_addr = -1;
241 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
243 item->mask = ip4 + 1;
245 dst_port = v4->dst_port;
249 protocol = IP_PROTOCOL_UDP;
253 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
258 vec_add2 (items, item, 1);
259 if (protocol == IP_PROTOCOL_UDP)
261 item->type = RTE_FLOW_ITEM_TYPE_UDP;
263 if ((src_port_mask == 0) && (dst_port_mask == 0))
270 udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
271 udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
272 udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
273 udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
275 item->mask = udp + 1;
278 else if (protocol == IP_PROTOCOL_TCP)
280 item->type = RTE_FLOW_ITEM_TYPE_TCP;
282 if ((src_port_mask == 0) && (dst_port_mask == 0))
289 tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
290 tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
291 tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
292 tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
294 item->mask = tcp + 1;
299 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
303 /* Tunnel header match */
304 if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
306 u32 vni = f->ip4_vxlan.vni;
307 vxlan_header_t spec_hdr = {
308 .flags = VXLAN_FLAGS_I,
309 .vni_reserved = clib_host_to_net_u32 (vni << 8)
311 vxlan_header_t mask_hdr = {
313 .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
316 clib_memset (raw, 0, sizeof raw);
317 raw[0].item.relative = 1;
318 raw[0].item.length = vxlan_hdr_sz;
320 clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
321 raw[0].item.pattern = raw[0].val + raw_sz;
322 clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
323 raw[1].item.pattern = raw[1].val + raw_sz;
325 vec_add2 (items, item, 1);
326 item->type = RTE_FLOW_ITEM_TYPE_RAW;
328 item->mask = raw + 1;
330 else if (f->type == VNET_FLOW_TYPE_IP4_GTPC)
332 vnet_flow_ip4_gtpc_t *gc = &f->ip4_gtpc;
333 gtp[0].teid = clib_host_to_net_u32 (gc->teid);
336 vec_add2 (items, item, 1);
337 item->type = RTE_FLOW_ITEM_TYPE_GTPC;
339 item->mask = gtp + 1;
341 else if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
343 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
344 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
347 vec_add2 (items, item, 1);
348 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
350 item->mask = gtp + 1;
352 else if ((f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
353 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
355 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
356 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
359 vec_add2 (items, item, 1);
360 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
362 item->mask = gtp + 1;
364 /* inner IP4 header */
365 if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4)
367 vec_add2 (items, item, 1);
368 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
370 vnet_flow_ip4_gtpu_ip4_t *gu4 = &f->ip4_gtpu_ip4;
371 if (!gu4->inner_src_addr.mask.as_u32 &&
372 !gu4->inner_dst_addr.mask.as_u32)
379 inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
380 inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
381 inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
382 inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
383 item->spec = inner_ip4;
384 item->mask = inner_ip4 + 1;
387 else if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6)
389 ip6_address_t zero_addr;
390 vnet_flow_ip4_gtpu_ip6_t *gu6 = &f->ip4_gtpu_ip6;
392 clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
394 vec_add2 (items, item, 1);
395 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
397 if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
398 !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
405 clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
406 &gu6->inner_src_addr.addr, 16);
407 clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
408 &gu6->inner_src_addr.mask, 16);
409 clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
410 &gu6->inner_dst_addr.addr, 16);
411 clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
412 &gu6->inner_dst_addr.mask, 16);
413 item->spec = inner_ip6;
414 item->mask = inner_ip6 + 1;
418 else if (f->type == VNET_FLOW_TYPE_IP6_GTPC)
420 vnet_flow_ip6_gtpc_t *gc = &f->ip6_gtpc;
421 gtp[0].teid = clib_host_to_net_u32 (gc->teid);
424 vec_add2 (items, item, 1);
425 item->type = RTE_FLOW_ITEM_TYPE_GTPC;
427 item->mask = gtp + 1;
429 else if (f->type == VNET_FLOW_TYPE_IP6_GTPU)
431 vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
432 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
435 vec_add2 (items, item, 1);
436 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
438 item->mask = gtp + 1;
440 else if ((f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
441 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
443 vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
444 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
447 vec_add2 (items, item, 1);
448 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
450 item->mask = gtp + 1;
452 /* inner IP4 header */
453 if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4)
455 vec_add2 (items, item, 1);
456 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
458 vnet_flow_ip6_gtpu_ip4_t *gu4 = &f->ip6_gtpu_ip4;
460 if (!gu4->inner_src_addr.mask.as_u32 &&
461 !gu4->inner_dst_addr.mask.as_u32)
468 inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
469 inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
470 inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
471 inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
472 item->spec = inner_ip4;
473 item->mask = inner_ip4 + 1;
477 if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6)
479 ip6_address_t zero_addr;
480 vnet_flow_ip6_gtpu_ip6_t *gu6 = &f->ip6_gtpu_ip6;
482 clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
484 vec_add2 (items, item, 1);
485 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
487 if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
488 !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
495 clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
496 &gu6->inner_src_addr.addr, 16);
497 clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
498 &gu6->inner_src_addr.mask, 16);
499 clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
500 &gu6->inner_dst_addr.addr, 16);
501 clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
502 &gu6->inner_dst_addr.mask, 16);
503 item->spec = inner_ip6;
504 item->mask = inner_ip6 + 1;
511 vec_add2 (items, item, 1);
512 item->type = RTE_FLOW_ITEM_TYPE_END;
515 /* Only one 'fate' can be assigned */
516 if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
518 vec_add2 (actions, action, 1);
519 queue.index = f->redirect_queue;
520 action->type = RTE_FLOW_ACTION_TYPE_QUEUE;
521 action->conf = &queue;
524 if (f->actions & VNET_FLOW_ACTION_DROP)
526 vec_add2 (actions, action, 1);
527 action->type = RTE_FLOW_ACTION_TYPE_DROP;
530 rv = VNET_FLOW_ERROR_INTERNAL;
536 if (f->actions & VNET_FLOW_ACTION_RSS)
539 vec_add2 (actions, action, 1);
540 action->type = RTE_FLOW_ACTION_TYPE_RSS;
543 /* convert types to DPDK rss bitmask */
544 dpdk_flow_convert_rss_types (f->rss_types, &rss_type);
546 rss.types = rss_type;
547 rss.func = (enum rte_eth_hash_function) f->rss_fun;
551 rv = VNET_FLOW_ERROR_INTERNAL;
559 vec_add2 (actions, action, 1);
560 action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;
563 if (f->actions & VNET_FLOW_ACTION_MARK)
565 vec_add2 (actions, action, 1);
567 action->type = RTE_FLOW_ACTION_TYPE_MARK;
568 action->conf = &mark;
571 vec_add2 (actions, action, 1);
572 action->type = RTE_FLOW_ACTION_TYPE_END;
574 rv = rte_flow_validate (xd->device_index, &ingress, items, actions,
575 &xd->last_flow_error);
580 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
581 else if (rv == -EEXIST)
582 rv = VNET_FLOW_ERROR_ALREADY_EXISTS;
584 rv = VNET_FLOW_ERROR_INTERNAL;
588 fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions,
589 &xd->last_flow_error);
592 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
601 dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance,
602 u32 flow_index, uword * private_data)
604 dpdk_main_t *dm = &dpdk_main;
605 vnet_flow_t *flow = vnet_get_flow (flow_index);
606 dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
607 dpdk_flow_entry_t *fe;
608 dpdk_flow_lookup_entry_t *fle = 0;
611 /* recycle old flow lookup entries only after the main loop counter
612 increases - i.e. previously DMA'ed packets were handled */
613 if (vec_len (xd->parked_lookup_indexes) > 0 &&
614 xd->parked_loop_count != dm->vlib_main->main_loop_count)
618 vec_foreach (fl_index, xd->parked_lookup_indexes)
619 pool_put_index (xd->flow_lookup_entries, *fl_index);
620 vec_reset_length (xd->parked_lookup_indexes);
623 if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
625 fe = vec_elt_at_index (xd->flow_entries, *private_data);
627 if ((rv = rte_flow_destroy (xd->device_index, fe->handle,
628 &xd->last_flow_error)))
629 return VNET_FLOW_ERROR_INTERNAL;
633 /* make sure no action is taken for in-flight (marked) packets */
634 fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark);
635 clib_memset (fle, -1, sizeof (*fle));
636 vec_add1 (xd->parked_lookup_indexes, fe->mark);
637 xd->parked_loop_count = dm->vlib_main->main_loop_count;
640 clib_memset (fe, 0, sizeof (*fe));
641 pool_put (xd->flow_entries, fe);
643 goto disable_rx_offload;
646 if (op != VNET_FLOW_DEV_OP_ADD_FLOW)
647 return VNET_FLOW_ERROR_NOT_SUPPORTED;
649 pool_get (xd->flow_entries, fe);
650 fe->flow_index = flow->index;
652 if (flow->actions == 0)
654 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
658 /* if we need to mark packets, assign one mark */
659 if (flow->actions & (VNET_FLOW_ACTION_MARK |
660 VNET_FLOW_ACTION_REDIRECT_TO_NODE |
661 VNET_FLOW_ACTION_BUFFER_ADVANCE))
664 if (xd->flow_lookup_entries == 0)
665 pool_get_aligned (xd->flow_lookup_entries, fle,
666 CLIB_CACHE_LINE_BYTES);
667 pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES);
668 fe->mark = fle - xd->flow_lookup_entries;
670 /* install entry in the lookup table */
671 clib_memset (fle, -1, sizeof (*fle));
672 if (flow->actions & VNET_FLOW_ACTION_MARK)
673 fle->flow_id = flow->mark_flow_id;
674 if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
675 fle->next_index = flow->redirect_device_input_next_index;
676 if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
677 fle->buffer_advance = flow->buffer_advance;
682 if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
684 xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
685 dpdk_device_setup (xd);
690 case VNET_FLOW_TYPE_ETHERNET:
691 case VNET_FLOW_TYPE_IP4_N_TUPLE:
692 case VNET_FLOW_TYPE_IP6_N_TUPLE:
693 case VNET_FLOW_TYPE_IP4_VXLAN:
694 case VNET_FLOW_TYPE_IP4_GTPC:
695 case VNET_FLOW_TYPE_IP4_GTPU:
696 case VNET_FLOW_TYPE_IP4_GTPU_IP4:
697 case VNET_FLOW_TYPE_IP4_GTPU_IP6:
698 case VNET_FLOW_TYPE_IP6_GTPC:
699 case VNET_FLOW_TYPE_IP6_GTPU:
700 case VNET_FLOW_TYPE_IP6_GTPU_IP4:
701 case VNET_FLOW_TYPE_IP6_GTPU_IP6:
702 if ((rv = dpdk_flow_add (xd, flow, fe)))
706 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
710 *private_data = fe - xd->flow_entries;
715 clib_memset (fe, 0, sizeof (*fe));
716 pool_put (xd->flow_entries, fe);
719 clib_memset (fle, -1, sizeof (*fle));
720 pool_put (xd->flow_lookup_entries, fle);
724 if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
725 && pool_elts (xd->flow_entries) == 0)
727 xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
728 dpdk_device_setup (xd);
735 format_dpdk_flow (u8 * s, va_list * args)
737 u32 dev_instance = va_arg (*args, u32);
738 u32 flow_index = va_arg (*args, u32);
739 uword private_data = va_arg (*args, uword);
740 dpdk_main_t *dm = &dpdk_main;
741 dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
742 dpdk_flow_entry_t *fe;
744 if (flow_index == ~0)
746 s = format (s, "%-25s: %U\n", "supported flow actions",
747 format_flow_actions, xd->supported_flow_actions);
748 s = format (s, "%-25s: %d\n", "last DPDK error type",
749 xd->last_flow_error.type);
750 s = format (s, "%-25s: %s\n", "last DPDK error message",
751 xd->last_flow_error.message ? xd->last_flow_error.message :
756 if (private_data >= vec_len (xd->flow_entries))
757 return format (s, "unknown flow");
759 fe = vec_elt_at_index (xd->flow_entries, private_data);
760 s = format (s, "mark %u", fe->mark);
765 * fd.io coding-style-patch-verification: ON
768 * eval: (c-set-style "gnu")