2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/vnet.h>
17 #include <vppinfra/vec.h>
18 #include <vppinfra/format.h>
21 #include <vnet/ip/ip.h>
22 #include <vnet/ethernet/ethernet.h>
23 #include <vnet/ethernet/arp_packet.h>
24 #include <vnet/vxlan/vxlan.h>
25 #include <dpdk/device/dpdk.h>
27 #include <dpdk/device/dpdk_priv.h>
28 #include <vppinfra/error.h>
30 /* check if flow is L2 flow */
31 #define FLOW_IS_L2_LAYER(f) \
32 (f->type == VNET_FLOW_TYPE_ETHERNET)
34 /* check if flow is VLAN sensitive */
35 #define FLOW_IS_VLAN_TAGGED(f) \
36 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
37 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
39 /* check if flow is L4 type */
40 #define FLOW_IS_L4_LAYER(f) \
41 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
42 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE))
44 /* check if flow is L4 tunnel type */
45 #define FLOW_IS_L4_TUNNEL_LAYER(f) \
46 ((f->type >= VNET_FLOW_TYPE_IP4_VXLAN) || \
47 (f->type <= VNET_FLOW_TYPE_IP6_GTPU_IP6))
49 /* constant structs */
50 static const struct rte_flow_attr ingress = {.ingress = 1 };
53 mac_address_is_all_zero (const u8 addr[6])
57 for (i = 0; i < 6; i++)
65 dpdk_flow_convert_rss_types (u64 type, u64 * dpdk_rss_type)
67 #define BIT_IS_SET(v, b) \
74 if (n != -1 && BIT_IS_SET(type, n)) \
83 dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
85 struct rte_flow_item_eth eth[2] = { };
86 struct rte_flow_item_ipv4 ip4[2] = { };
87 struct rte_flow_item_ipv4 inner_ip4[2] = { };
88 struct rte_flow_item_ipv6 ip6[2] = { };
89 struct rte_flow_item_ipv6 inner_ip6[2] = { };
90 struct rte_flow_item_udp udp[2] = { };
91 struct rte_flow_item_tcp tcp[2] = { };
92 struct rte_flow_item_gtp gtp[2] = { };
93 struct rte_flow_item_l2tpv3oip l2tp[2] = { };
94 struct rte_flow_action_mark mark = { 0 };
95 struct rte_flow_action_queue queue = { 0 };
96 struct rte_flow_action_rss rss = { 0 };
97 struct rte_flow_item *item, *items = 0;
98 struct rte_flow_action *action, *actions = 0;
103 vxlan_hdr_sz = sizeof (vxlan_header_t),
104 raw_sz = sizeof (struct rte_flow_item_raw)
109 struct rte_flow_item_raw item;
110 u8 val[raw_sz + vxlan_hdr_sz];
113 u16 src_port = 0, dst_port = 0, src_port_mask = 0, dst_port_mask = 0;
114 u8 protocol = IP_PROTOCOL_RESERVED;
117 if (f->actions & (~xd->supported_flow_actions))
118 return VNET_FLOW_ERROR_NOT_SUPPORTED;
122 vec_add2 (items, item, 1);
123 item->type = RTE_FLOW_ITEM_TYPE_ETH;
124 if (f->type == VNET_FLOW_TYPE_ETHERNET)
126 vnet_flow_ethernet_t *te = &f->ethernet;
128 clib_memset (ð[0], 0, sizeof (eth[0]));
129 clib_memset (ð[1], 0, sizeof (eth[1]));
131 /* check if SMAC/DMAC/Ether_type assigned */
132 if (!mac_address_is_all_zero (te->eth_hdr.dst_address))
134 clib_memcpy_fast (ð[0].dst, &te->eth_hdr.dst_address,
135 sizeof (eth[0].dst));
136 clib_memset (ð[1].dst, 0xFF, sizeof (eth[1].dst));
139 if (!mac_address_is_all_zero (te->eth_hdr.src_address))
141 clib_memcpy_fast (ð[0].src, &te->eth_hdr.src_address,
142 sizeof (eth[0].src));
143 clib_memset (ð[1].src, 0xFF, sizeof (eth[1].src));
146 if (te->eth_hdr.type)
148 eth[0].type = clib_host_to_net_u16 (te->eth_hdr.type);
149 eth[1].type = clib_host_to_net_u16 (0xFFFF);
153 item->mask = eth + 1;
161 if (FLOW_IS_VLAN_TAGGED (f))
163 vec_add2 (items, item, 1);
164 item->type = RTE_FLOW_ITEM_TYPE_VLAN;
169 if (FLOW_IS_L2_LAYER (f))
173 vec_add2 (items, item, 1);
174 if (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP)
176 vnet_flow_ip4_l2tpv3oip_t *l2tp = &f->ip4_l2tpv3oip;
177 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
179 if (!l2tp->src_addr.mask.as_u32 && !l2tp->dst_addr.mask.as_u32)
186 ip4[0].hdr.src_addr = l2tp->src_addr.addr.as_u32;
187 ip4[1].hdr.src_addr = l2tp->src_addr.mask.as_u32;
188 ip4[0].hdr.dst_addr = l2tp->dst_addr.addr.as_u32;
189 ip4[1].hdr.dst_addr = l2tp->dst_addr.mask.as_u32;
191 item->mask = ip4 + 1;
193 protocol = l2tp->protocol;
195 else if ((f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) ||
196 (f->type == VNET_FLOW_TYPE_IP6_GTPC) ||
197 (f->type == VNET_FLOW_TYPE_IP6_GTPU) ||
198 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
199 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
201 vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
202 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
204 if (!clib_memcmp (&t6->src_addr.mask, &zero_addr, 16) &&
205 !clib_memcmp (&t6->dst_addr.mask, &zero_addr, 16))
212 clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
213 clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
214 clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
215 clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
217 item->mask = ip6 + 1;
220 src_port = t6->src_port.port;
221 dst_port = t6->dst_port.port;
222 src_port_mask = t6->src_port.mask;
223 dst_port_mask = t6->dst_port.mask;
224 protocol = t6->protocol;
226 else if ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
227 (f->type == VNET_FLOW_TYPE_IP4_GTPC) ||
228 (f->type == VNET_FLOW_TYPE_IP4_GTPU) ||
229 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
230 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
232 vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
233 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
235 if (!t4->src_addr.mask.as_u32 && !t4->dst_addr.mask.as_u32)
242 ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
243 ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
244 ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
245 ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
247 item->mask = ip4 + 1;
250 src_port = t4->src_port.port;
251 dst_port = t4->dst_port.port;
252 src_port_mask = t4->src_port.mask;
253 dst_port_mask = t4->dst_port.mask;
254 protocol = t4->protocol;
256 else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
258 vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan;
259 ip4[0].hdr.src_addr = v4->src_addr.as_u32;
260 ip4[1].hdr.src_addr = -1;
261 ip4[0].hdr.dst_addr = v4->dst_addr.as_u32;
262 ip4[1].hdr.dst_addr = -1;
263 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
265 item->mask = ip4 + 1;
267 dst_port = v4->dst_port;
271 protocol = IP_PROTOCOL_UDP;
275 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
280 if (protocol == IP_PROTOCOL_UDP)
282 vec_add2 (items, item, 1);
283 item->type = RTE_FLOW_ITEM_TYPE_UDP;
285 if ((src_port_mask == 0) && (dst_port_mask == 0))
292 udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
293 udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
294 udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
295 udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
297 item->mask = udp + 1;
300 else if (protocol == IP_PROTOCOL_TCP)
302 vec_add2 (items, item, 1);
303 item->type = RTE_FLOW_ITEM_TYPE_TCP;
305 if ((src_port_mask == 0) && (dst_port_mask == 0))
312 tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
313 tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
314 tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
315 tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
317 item->mask = tcp + 1;
320 else if (protocol == IP_PROTOCOL_RESERVED)
322 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
326 /* Tunnel header match */
327 if (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP)
329 vec_add2 (items, item, 1);
330 item->type = RTE_FLOW_ITEM_TYPE_L2TPV3OIP;
332 vnet_flow_ip4_l2tpv3oip_t *tl2tp = &f->ip4_l2tpv3oip;
333 l2tp[0].session_id = clib_host_to_net_u32 (tl2tp->session_id);
334 l2tp[1].session_id = ~0;
337 item->mask = l2tp + 1;
339 if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
341 u32 vni = f->ip4_vxlan.vni;
342 vxlan_header_t spec_hdr = {
343 .flags = VXLAN_FLAGS_I,
344 .vni_reserved = clib_host_to_net_u32 (vni << 8)
346 vxlan_header_t mask_hdr = {
348 .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
351 clib_memset (raw, 0, sizeof raw);
352 raw[0].item.relative = 1;
353 raw[0].item.length = vxlan_hdr_sz;
355 clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
356 raw[0].item.pattern = raw[0].val + raw_sz;
357 clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
358 raw[1].item.pattern = raw[1].val + raw_sz;
360 vec_add2 (items, item, 1);
361 item->type = RTE_FLOW_ITEM_TYPE_RAW;
363 item->mask = raw + 1;
365 else if (f->type == VNET_FLOW_TYPE_IP4_GTPC)
367 vnet_flow_ip4_gtpc_t *gc = &f->ip4_gtpc;
368 gtp[0].teid = clib_host_to_net_u32 (gc->teid);
371 vec_add2 (items, item, 1);
372 item->type = RTE_FLOW_ITEM_TYPE_GTPC;
374 item->mask = gtp + 1;
376 else if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
378 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
379 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
382 vec_add2 (items, item, 1);
383 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
385 item->mask = gtp + 1;
387 else if ((f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
388 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
390 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
391 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
394 vec_add2 (items, item, 1);
395 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
397 item->mask = gtp + 1;
399 /* inner IP4 header */
400 if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4)
402 vec_add2 (items, item, 1);
403 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
405 vnet_flow_ip4_gtpu_ip4_t *gu4 = &f->ip4_gtpu_ip4;
406 if (!gu4->inner_src_addr.mask.as_u32 &&
407 !gu4->inner_dst_addr.mask.as_u32)
414 inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
415 inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
416 inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
417 inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
418 item->spec = inner_ip4;
419 item->mask = inner_ip4 + 1;
422 else if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6)
424 ip6_address_t zero_addr;
425 vnet_flow_ip4_gtpu_ip6_t *gu6 = &f->ip4_gtpu_ip6;
427 clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
429 vec_add2 (items, item, 1);
430 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
432 if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
433 !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
440 clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
441 &gu6->inner_src_addr.addr, 16);
442 clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
443 &gu6->inner_src_addr.mask, 16);
444 clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
445 &gu6->inner_dst_addr.addr, 16);
446 clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
447 &gu6->inner_dst_addr.mask, 16);
448 item->spec = inner_ip6;
449 item->mask = inner_ip6 + 1;
453 else if (f->type == VNET_FLOW_TYPE_IP6_GTPC)
455 vnet_flow_ip6_gtpc_t *gc = &f->ip6_gtpc;
456 gtp[0].teid = clib_host_to_net_u32 (gc->teid);
459 vec_add2 (items, item, 1);
460 item->type = RTE_FLOW_ITEM_TYPE_GTPC;
462 item->mask = gtp + 1;
464 else if (f->type == VNET_FLOW_TYPE_IP6_GTPU)
466 vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
467 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
470 vec_add2 (items, item, 1);
471 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
473 item->mask = gtp + 1;
475 else if ((f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
476 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
478 vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
479 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
482 vec_add2 (items, item, 1);
483 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
485 item->mask = gtp + 1;
487 /* inner IP4 header */
488 if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4)
490 vec_add2 (items, item, 1);
491 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
493 vnet_flow_ip6_gtpu_ip4_t *gu4 = &f->ip6_gtpu_ip4;
495 if (!gu4->inner_src_addr.mask.as_u32 &&
496 !gu4->inner_dst_addr.mask.as_u32)
503 inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
504 inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
505 inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
506 inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
507 item->spec = inner_ip4;
508 item->mask = inner_ip4 + 1;
512 if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6)
514 ip6_address_t zero_addr;
515 vnet_flow_ip6_gtpu_ip6_t *gu6 = &f->ip6_gtpu_ip6;
517 clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
519 vec_add2 (items, item, 1);
520 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
522 if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
523 !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
530 clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
531 &gu6->inner_src_addr.addr, 16);
532 clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
533 &gu6->inner_src_addr.mask, 16);
534 clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
535 &gu6->inner_dst_addr.addr, 16);
536 clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
537 &gu6->inner_dst_addr.mask, 16);
538 item->spec = inner_ip6;
539 item->mask = inner_ip6 + 1;
546 vec_add2 (items, item, 1);
547 item->type = RTE_FLOW_ITEM_TYPE_END;
550 /* Only one 'fate' can be assigned */
551 if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
553 vec_add2 (actions, action, 1);
554 queue.index = f->redirect_queue;
555 action->type = RTE_FLOW_ACTION_TYPE_QUEUE;
556 action->conf = &queue;
559 if (f->actions & VNET_FLOW_ACTION_DROP)
561 vec_add2 (actions, action, 1);
562 action->type = RTE_FLOW_ACTION_TYPE_DROP;
565 rv = VNET_FLOW_ERROR_INTERNAL;
571 if (f->actions & VNET_FLOW_ACTION_RSS)
574 vec_add2 (actions, action, 1);
575 action->type = RTE_FLOW_ACTION_TYPE_RSS;
578 /* convert types to DPDK rss bitmask */
579 dpdk_flow_convert_rss_types (f->rss_types, &rss_type);
581 rss.types = rss_type;
582 rss.func = (enum rte_eth_hash_function) f->rss_fun;
586 rv = VNET_FLOW_ERROR_INTERNAL;
594 vec_add2 (actions, action, 1);
595 action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;
598 if (f->actions & VNET_FLOW_ACTION_MARK)
600 vec_add2 (actions, action, 1);
602 action->type = RTE_FLOW_ACTION_TYPE_MARK;
603 action->conf = &mark;
606 vec_add2 (actions, action, 1);
607 action->type = RTE_FLOW_ACTION_TYPE_END;
609 rv = rte_flow_validate (xd->device_index, &ingress, items, actions,
610 &xd->last_flow_error);
615 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
616 else if (rv == -EEXIST)
617 rv = VNET_FLOW_ERROR_ALREADY_EXISTS;
619 rv = VNET_FLOW_ERROR_INTERNAL;
623 fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions,
624 &xd->last_flow_error);
627 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
636 dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance,
637 u32 flow_index, uword * private_data)
639 dpdk_main_t *dm = &dpdk_main;
640 vnet_flow_t *flow = vnet_get_flow (flow_index);
641 dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
642 dpdk_flow_entry_t *fe;
643 dpdk_flow_lookup_entry_t *fle = 0;
646 /* recycle old flow lookup entries only after the main loop counter
647 increases - i.e. previously DMA'ed packets were handled */
648 if (vec_len (xd->parked_lookup_indexes) > 0 &&
649 xd->parked_loop_count != dm->vlib_main->main_loop_count)
653 vec_foreach (fl_index, xd->parked_lookup_indexes)
654 pool_put_index (xd->flow_lookup_entries, *fl_index);
655 vec_reset_length (xd->parked_lookup_indexes);
658 if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
660 fe = vec_elt_at_index (xd->flow_entries, *private_data);
662 if ((rv = rte_flow_destroy (xd->device_index, fe->handle,
663 &xd->last_flow_error)))
664 return VNET_FLOW_ERROR_INTERNAL;
668 /* make sure no action is taken for in-flight (marked) packets */
669 fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark);
670 clib_memset (fle, -1, sizeof (*fle));
671 vec_add1 (xd->parked_lookup_indexes, fe->mark);
672 xd->parked_loop_count = dm->vlib_main->main_loop_count;
675 clib_memset (fe, 0, sizeof (*fe));
676 pool_put (xd->flow_entries, fe);
678 goto disable_rx_offload;
681 if (op != VNET_FLOW_DEV_OP_ADD_FLOW)
682 return VNET_FLOW_ERROR_NOT_SUPPORTED;
684 pool_get (xd->flow_entries, fe);
685 fe->flow_index = flow->index;
687 if (flow->actions == 0)
689 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
693 /* if we need to mark packets, assign one mark */
694 if (flow->actions & (VNET_FLOW_ACTION_MARK |
695 VNET_FLOW_ACTION_REDIRECT_TO_NODE |
696 VNET_FLOW_ACTION_BUFFER_ADVANCE))
699 if (xd->flow_lookup_entries == 0)
700 pool_get_aligned (xd->flow_lookup_entries, fle,
701 CLIB_CACHE_LINE_BYTES);
702 pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES);
703 fe->mark = fle - xd->flow_lookup_entries;
705 /* install entry in the lookup table */
706 clib_memset (fle, -1, sizeof (*fle));
707 if (flow->actions & VNET_FLOW_ACTION_MARK)
708 fle->flow_id = flow->mark_flow_id;
709 if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
710 fle->next_index = flow->redirect_device_input_next_index;
711 if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
712 fle->buffer_advance = flow->buffer_advance;
717 if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
719 xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
720 dpdk_device_setup (xd);
725 case VNET_FLOW_TYPE_ETHERNET:
726 case VNET_FLOW_TYPE_IP4_N_TUPLE:
727 case VNET_FLOW_TYPE_IP6_N_TUPLE:
728 case VNET_FLOW_TYPE_IP4_VXLAN:
729 case VNET_FLOW_TYPE_IP4_GTPC:
730 case VNET_FLOW_TYPE_IP4_GTPU:
731 case VNET_FLOW_TYPE_IP4_GTPU_IP4:
732 case VNET_FLOW_TYPE_IP4_GTPU_IP6:
733 case VNET_FLOW_TYPE_IP6_GTPC:
734 case VNET_FLOW_TYPE_IP6_GTPU:
735 case VNET_FLOW_TYPE_IP6_GTPU_IP4:
736 case VNET_FLOW_TYPE_IP6_GTPU_IP6:
737 case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
738 if ((rv = dpdk_flow_add (xd, flow, fe)))
742 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
746 *private_data = fe - xd->flow_entries;
751 clib_memset (fe, 0, sizeof (*fe));
752 pool_put (xd->flow_entries, fe);
755 clib_memset (fle, -1, sizeof (*fle));
756 pool_put (xd->flow_lookup_entries, fle);
760 if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
761 && pool_elts (xd->flow_entries) == 0)
763 xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
764 dpdk_device_setup (xd);
771 format_dpdk_flow (u8 * s, va_list * args)
773 u32 dev_instance = va_arg (*args, u32);
774 u32 flow_index = va_arg (*args, u32);
775 uword private_data = va_arg (*args, uword);
776 dpdk_main_t *dm = &dpdk_main;
777 dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
778 dpdk_flow_entry_t *fe;
780 if (flow_index == ~0)
782 s = format (s, "%-25s: %U\n", "supported flow actions",
783 format_flow_actions, xd->supported_flow_actions);
784 s = format (s, "%-25s: %d\n", "last DPDK error type",
785 xd->last_flow_error.type);
786 s = format (s, "%-25s: %s\n", "last DPDK error message",
787 xd->last_flow_error.message ? xd->last_flow_error.message :
792 if (private_data >= vec_len (xd->flow_entries))
793 return format (s, "unknown flow");
795 fe = vec_elt_at_index (xd->flow_entries, private_data);
796 s = format (s, "mark %u", fe->mark);
801 * fd.io coding-style-patch-verification: ON
804 * eval: (c-set-style "gnu")