2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/vnet.h>
17 #include <vppinfra/vec.h>
18 #include <vppinfra/format.h>
21 #include <vnet/ip/ip.h>
22 #include <vnet/ethernet/ethernet.h>
23 #include <vnet/ethernet/arp_packet.h>
24 #include <vnet/vxlan/vxlan.h>
25 #include <dpdk/device/dpdk.h>
27 #include <dpdk/device/dpdk_priv.h>
28 #include <vppinfra/error.h>
30 /* check if flow is L2 flow */
31 #define FLOW_IS_L2_LAYER(f) \
32 (f->type == VNET_FLOW_TYPE_ETHERNET)
34 /* check if flow is VLAN sensitive */
35 #define FLOW_IS_VLAN_TAGGED(f) \
36 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
37 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
39 /* check if flow is L4 type */
40 #define FLOW_IS_L4_LAYER(f) \
41 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
42 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE))
44 /* check if flow is L4 tunnel type */
45 #define FLOW_IS_L4_TUNNEL_LAYER(f) \
46 ((f->type >= VNET_FLOW_TYPE_IP4_VXLAN) || \
47 (f->type <= VNET_FLOW_TYPE_IP6_GTPU_IP6))
49 /* constant structs */
50 static const struct rte_flow_attr ingress = {.ingress = 1 };
53 mac_address_is_all_zero (const u8 addr[6])
57 for (i = 0; i < 6; i++)
65 dpdk_flow_convert_rss_types (u64 type, u64 * dpdk_rss_type)
67 #define BIT_IS_SET(v, b) \
74 if (n != -1 && BIT_IS_SET(type, n)) \
82 static inline enum rte_eth_hash_function
83 dpdk_flow_convert_rss_func (vnet_rss_function_t func)
85 enum rte_eth_hash_function rss_func;
89 case VNET_RSS_FUNC_DEFAULT:
90 rss_func = RTE_ETH_HASH_FUNCTION_DEFAULT;
92 case VNET_RSS_FUNC_TOEPLITZ:
93 rss_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
95 case VNET_RSS_FUNC_SIMPLE_XOR:
96 rss_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
98 case VNET_RSS_FUNC_SYMMETRIC_TOEPLITZ:
99 rss_func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
102 rss_func = RTE_ETH_HASH_FUNCTION_MAX;
110 dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
112 struct rte_flow_item_eth eth[2] = { };
113 struct rte_flow_item_ipv4 ip4[2] = { };
114 struct rte_flow_item_ipv4 inner_ip4[2] = { };
115 struct rte_flow_item_ipv6 ip6[2] = { };
116 struct rte_flow_item_ipv6 inner_ip6[2] = { };
117 struct rte_flow_item_udp udp[2] = { };
118 struct rte_flow_item_tcp tcp[2] = { };
119 struct rte_flow_item_gtp gtp[2] = { };
120 struct rte_flow_item_l2tpv3oip l2tp[2] = { };
121 struct rte_flow_action_mark mark = { 0 };
122 struct rte_flow_action_queue queue = { 0 };
123 struct rte_flow_action_rss rss = { 0 };
124 struct rte_flow_item *item, *items = 0;
125 struct rte_flow_action *action, *actions = 0;
130 vxlan_hdr_sz = sizeof (vxlan_header_t),
131 raw_sz = sizeof (struct rte_flow_item_raw)
136 struct rte_flow_item_raw item;
137 u8 val[raw_sz + vxlan_hdr_sz];
140 u16 src_port = 0, dst_port = 0, src_port_mask = 0, dst_port_mask = 0;
141 u8 protocol = IP_PROTOCOL_RESERVED;
144 if (f->actions & (~xd->supported_flow_actions))
145 return VNET_FLOW_ERROR_NOT_SUPPORTED;
149 vec_add2 (items, item, 1);
150 item->type = RTE_FLOW_ITEM_TYPE_ETH;
151 if (f->type == VNET_FLOW_TYPE_ETHERNET)
153 vnet_flow_ethernet_t *te = &f->ethernet;
155 clib_memset (ð[0], 0, sizeof (eth[0]));
156 clib_memset (ð[1], 0, sizeof (eth[1]));
158 /* check if SMAC/DMAC/Ether_type assigned */
159 if (!mac_address_is_all_zero (te->eth_hdr.dst_address))
161 clib_memcpy_fast (ð[0].dst, &te->eth_hdr.dst_address,
162 sizeof (eth[0].dst));
163 clib_memset (ð[1].dst, 0xFF, sizeof (eth[1].dst));
166 if (!mac_address_is_all_zero (te->eth_hdr.src_address))
168 clib_memcpy_fast (ð[0].src, &te->eth_hdr.src_address,
169 sizeof (eth[0].src));
170 clib_memset (ð[1].src, 0xFF, sizeof (eth[1].src));
173 if (te->eth_hdr.type)
175 eth[0].type = clib_host_to_net_u16 (te->eth_hdr.type);
176 eth[1].type = clib_host_to_net_u16 (0xFFFF);
180 item->mask = eth + 1;
188 if (FLOW_IS_VLAN_TAGGED (f))
190 vec_add2 (items, item, 1);
191 item->type = RTE_FLOW_ITEM_TYPE_VLAN;
196 if (FLOW_IS_L2_LAYER (f))
200 vec_add2 (items, item, 1);
201 if (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP)
203 vnet_flow_ip4_l2tpv3oip_t *l2tp = &f->ip4_l2tpv3oip;
204 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
206 if (!l2tp->src_addr.mask.as_u32 && !l2tp->dst_addr.mask.as_u32)
213 ip4[0].hdr.src_addr = l2tp->src_addr.addr.as_u32;
214 ip4[1].hdr.src_addr = l2tp->src_addr.mask.as_u32;
215 ip4[0].hdr.dst_addr = l2tp->dst_addr.addr.as_u32;
216 ip4[1].hdr.dst_addr = l2tp->dst_addr.mask.as_u32;
218 item->mask = ip4 + 1;
220 protocol = l2tp->protocol;
222 else if ((f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) ||
223 (f->type == VNET_FLOW_TYPE_IP6_GTPC) ||
224 (f->type == VNET_FLOW_TYPE_IP6_GTPU) ||
225 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
226 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
228 vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
229 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
231 if (!clib_memcmp (&t6->src_addr.mask, &zero_addr, 16) &&
232 !clib_memcmp (&t6->dst_addr.mask, &zero_addr, 16))
239 clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
240 clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
241 clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
242 clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
244 item->mask = ip6 + 1;
247 src_port = t6->src_port.port;
248 dst_port = t6->dst_port.port;
249 src_port_mask = t6->src_port.mask;
250 dst_port_mask = t6->dst_port.mask;
251 protocol = t6->protocol;
253 else if ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
254 (f->type == VNET_FLOW_TYPE_IP4_GTPC) ||
255 (f->type == VNET_FLOW_TYPE_IP4_GTPU) ||
256 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
257 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
259 vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
260 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
262 if (!t4->src_addr.mask.as_u32 && !t4->dst_addr.mask.as_u32)
269 ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
270 ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
271 ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
272 ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
274 item->mask = ip4 + 1;
277 src_port = t4->src_port.port;
278 dst_port = t4->dst_port.port;
279 src_port_mask = t4->src_port.mask;
280 dst_port_mask = t4->dst_port.mask;
281 protocol = t4->protocol;
283 else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
285 vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan;
286 ip4[0].hdr.src_addr = v4->src_addr.as_u32;
287 ip4[1].hdr.src_addr = -1;
288 ip4[0].hdr.dst_addr = v4->dst_addr.as_u32;
289 ip4[1].hdr.dst_addr = -1;
290 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
292 item->mask = ip4 + 1;
294 dst_port = v4->dst_port;
298 protocol = IP_PROTOCOL_UDP;
302 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
307 if (protocol == IP_PROTOCOL_UDP)
309 vec_add2 (items, item, 1);
310 item->type = RTE_FLOW_ITEM_TYPE_UDP;
312 if ((src_port_mask == 0) && (dst_port_mask == 0))
319 udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
320 udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
321 udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
322 udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
324 item->mask = udp + 1;
327 else if (protocol == IP_PROTOCOL_TCP)
329 vec_add2 (items, item, 1);
330 item->type = RTE_FLOW_ITEM_TYPE_TCP;
332 if ((src_port_mask == 0) && (dst_port_mask == 0))
339 tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
340 tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
341 tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
342 tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
344 item->mask = tcp + 1;
347 else if (protocol == IP_PROTOCOL_RESERVED)
349 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
353 /* Tunnel header match */
354 if (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP)
356 vec_add2 (items, item, 1);
357 item->type = RTE_FLOW_ITEM_TYPE_L2TPV3OIP;
359 vnet_flow_ip4_l2tpv3oip_t *tl2tp = &f->ip4_l2tpv3oip;
360 l2tp[0].session_id = clib_host_to_net_u32 (tl2tp->session_id);
361 l2tp[1].session_id = ~0;
364 item->mask = l2tp + 1;
366 if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
368 u32 vni = f->ip4_vxlan.vni;
369 vxlan_header_t spec_hdr = {
370 .flags = VXLAN_FLAGS_I,
371 .vni_reserved = clib_host_to_net_u32 (vni << 8)
373 vxlan_header_t mask_hdr = {
375 .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
378 clib_memset (raw, 0, sizeof raw);
379 raw[0].item.relative = 1;
380 raw[0].item.length = vxlan_hdr_sz;
382 clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
383 raw[0].item.pattern = raw[0].val + raw_sz;
384 clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
385 raw[1].item.pattern = raw[1].val + raw_sz;
387 vec_add2 (items, item, 1);
388 item->type = RTE_FLOW_ITEM_TYPE_RAW;
390 item->mask = raw + 1;
392 else if (f->type == VNET_FLOW_TYPE_IP4_GTPC)
394 vnet_flow_ip4_gtpc_t *gc = &f->ip4_gtpc;
395 gtp[0].teid = clib_host_to_net_u32 (gc->teid);
398 vec_add2 (items, item, 1);
399 item->type = RTE_FLOW_ITEM_TYPE_GTPC;
401 item->mask = gtp + 1;
403 else if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
405 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
406 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
409 vec_add2 (items, item, 1);
410 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
412 item->mask = gtp + 1;
414 else if ((f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
415 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
417 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
418 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
421 vec_add2 (items, item, 1);
422 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
424 item->mask = gtp + 1;
426 /* inner IP4 header */
427 if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4)
429 vec_add2 (items, item, 1);
430 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
432 vnet_flow_ip4_gtpu_ip4_t *gu4 = &f->ip4_gtpu_ip4;
433 if (!gu4->inner_src_addr.mask.as_u32 &&
434 !gu4->inner_dst_addr.mask.as_u32)
441 inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
442 inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
443 inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
444 inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
445 item->spec = inner_ip4;
446 item->mask = inner_ip4 + 1;
449 else if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6)
451 ip6_address_t zero_addr;
452 vnet_flow_ip4_gtpu_ip6_t *gu6 = &f->ip4_gtpu_ip6;
454 clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
456 vec_add2 (items, item, 1);
457 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
459 if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
460 !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
467 clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
468 &gu6->inner_src_addr.addr, 16);
469 clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
470 &gu6->inner_src_addr.mask, 16);
471 clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
472 &gu6->inner_dst_addr.addr, 16);
473 clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
474 &gu6->inner_dst_addr.mask, 16);
475 item->spec = inner_ip6;
476 item->mask = inner_ip6 + 1;
480 else if (f->type == VNET_FLOW_TYPE_IP6_GTPC)
482 vnet_flow_ip6_gtpc_t *gc = &f->ip6_gtpc;
483 gtp[0].teid = clib_host_to_net_u32 (gc->teid);
486 vec_add2 (items, item, 1);
487 item->type = RTE_FLOW_ITEM_TYPE_GTPC;
489 item->mask = gtp + 1;
491 else if (f->type == VNET_FLOW_TYPE_IP6_GTPU)
493 vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
494 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
497 vec_add2 (items, item, 1);
498 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
500 item->mask = gtp + 1;
502 else if ((f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
503 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
505 vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
506 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
509 vec_add2 (items, item, 1);
510 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
512 item->mask = gtp + 1;
514 /* inner IP4 header */
515 if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4)
517 vec_add2 (items, item, 1);
518 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
520 vnet_flow_ip6_gtpu_ip4_t *gu4 = &f->ip6_gtpu_ip4;
522 if (!gu4->inner_src_addr.mask.as_u32 &&
523 !gu4->inner_dst_addr.mask.as_u32)
530 inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
531 inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
532 inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
533 inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
534 item->spec = inner_ip4;
535 item->mask = inner_ip4 + 1;
539 if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6)
541 ip6_address_t zero_addr;
542 vnet_flow_ip6_gtpu_ip6_t *gu6 = &f->ip6_gtpu_ip6;
544 clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
546 vec_add2 (items, item, 1);
547 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
549 if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
550 !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
557 clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
558 &gu6->inner_src_addr.addr, 16);
559 clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
560 &gu6->inner_src_addr.mask, 16);
561 clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
562 &gu6->inner_dst_addr.addr, 16);
563 clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
564 &gu6->inner_dst_addr.mask, 16);
565 item->spec = inner_ip6;
566 item->mask = inner_ip6 + 1;
573 vec_add2 (items, item, 1);
574 item->type = RTE_FLOW_ITEM_TYPE_END;
577 /* Only one 'fate' can be assigned */
578 if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
580 vec_add2 (actions, action, 1);
581 queue.index = f->redirect_queue;
582 action->type = RTE_FLOW_ACTION_TYPE_QUEUE;
583 action->conf = &queue;
586 if (f->actions & VNET_FLOW_ACTION_DROP)
588 vec_add2 (actions, action, 1);
589 action->type = RTE_FLOW_ACTION_TYPE_DROP;
592 rv = VNET_FLOW_ERROR_INTERNAL;
598 if (f->actions & VNET_FLOW_ACTION_RSS)
602 vec_add2 (actions, action, 1);
603 action->type = RTE_FLOW_ACTION_TYPE_RSS;
606 /* convert types to DPDK rss bitmask */
607 dpdk_flow_convert_rss_types (f->rss_types, &rss_type);
609 rss.types = rss_type;
610 if ((rss.func = dpdk_flow_convert_rss_func (f->rss_fun)) ==
611 RTE_ETH_HASH_FUNCTION_MAX)
613 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
619 rv = VNET_FLOW_ERROR_INTERNAL;
627 vec_add2 (actions, action, 1);
628 action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;
631 if (f->actions & VNET_FLOW_ACTION_MARK)
633 vec_add2 (actions, action, 1);
635 action->type = RTE_FLOW_ACTION_TYPE_MARK;
636 action->conf = &mark;
639 vec_add2 (actions, action, 1);
640 action->type = RTE_FLOW_ACTION_TYPE_END;
642 rv = rte_flow_validate (xd->device_index, &ingress, items, actions,
643 &xd->last_flow_error);
648 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
649 else if (rv == -EEXIST)
650 rv = VNET_FLOW_ERROR_ALREADY_EXISTS;
652 rv = VNET_FLOW_ERROR_INTERNAL;
656 fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions,
657 &xd->last_flow_error);
660 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
669 dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance,
670 u32 flow_index, uword * private_data)
672 dpdk_main_t *dm = &dpdk_main;
673 vnet_flow_t *flow = vnet_get_flow (flow_index);
674 dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
675 dpdk_flow_entry_t *fe;
676 dpdk_flow_lookup_entry_t *fle = 0;
679 /* recycle old flow lookup entries only after the main loop counter
680 increases - i.e. previously DMA'ed packets were handled */
681 if (vec_len (xd->parked_lookup_indexes) > 0 &&
682 xd->parked_loop_count != dm->vlib_main->main_loop_count)
686 vec_foreach (fl_index, xd->parked_lookup_indexes)
687 pool_put_index (xd->flow_lookup_entries, *fl_index);
688 vec_reset_length (xd->parked_lookup_indexes);
691 if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
693 fe = vec_elt_at_index (xd->flow_entries, *private_data);
695 if ((rv = rte_flow_destroy (xd->device_index, fe->handle,
696 &xd->last_flow_error)))
697 return VNET_FLOW_ERROR_INTERNAL;
701 /* make sure no action is taken for in-flight (marked) packets */
702 fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark);
703 clib_memset (fle, -1, sizeof (*fle));
704 vec_add1 (xd->parked_lookup_indexes, fe->mark);
705 xd->parked_loop_count = dm->vlib_main->main_loop_count;
708 clib_memset (fe, 0, sizeof (*fe));
709 pool_put (xd->flow_entries, fe);
711 goto disable_rx_offload;
714 if (op != VNET_FLOW_DEV_OP_ADD_FLOW)
715 return VNET_FLOW_ERROR_NOT_SUPPORTED;
717 pool_get (xd->flow_entries, fe);
718 fe->flow_index = flow->index;
720 if (flow->actions == 0)
722 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
726 /* if we need to mark packets, assign one mark */
727 if (flow->actions & (VNET_FLOW_ACTION_MARK |
728 VNET_FLOW_ACTION_REDIRECT_TO_NODE |
729 VNET_FLOW_ACTION_BUFFER_ADVANCE))
732 if (xd->flow_lookup_entries == 0)
733 pool_get_aligned (xd->flow_lookup_entries, fle,
734 CLIB_CACHE_LINE_BYTES);
735 pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES);
736 fe->mark = fle - xd->flow_lookup_entries;
738 /* install entry in the lookup table */
739 clib_memset (fle, -1, sizeof (*fle));
740 if (flow->actions & VNET_FLOW_ACTION_MARK)
741 fle->flow_id = flow->mark_flow_id;
742 if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
743 fle->next_index = flow->redirect_device_input_next_index;
744 if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
745 fle->buffer_advance = flow->buffer_advance;
750 if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
752 xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
753 dpdk_device_setup (xd);
758 case VNET_FLOW_TYPE_ETHERNET:
759 case VNET_FLOW_TYPE_IP4_N_TUPLE:
760 case VNET_FLOW_TYPE_IP6_N_TUPLE:
761 case VNET_FLOW_TYPE_IP4_VXLAN:
762 case VNET_FLOW_TYPE_IP4_GTPC:
763 case VNET_FLOW_TYPE_IP4_GTPU:
764 case VNET_FLOW_TYPE_IP4_GTPU_IP4:
765 case VNET_FLOW_TYPE_IP4_GTPU_IP6:
766 case VNET_FLOW_TYPE_IP6_GTPC:
767 case VNET_FLOW_TYPE_IP6_GTPU:
768 case VNET_FLOW_TYPE_IP6_GTPU_IP4:
769 case VNET_FLOW_TYPE_IP6_GTPU_IP6:
770 case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
771 if ((rv = dpdk_flow_add (xd, flow, fe)))
775 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
779 *private_data = fe - xd->flow_entries;
784 clib_memset (fe, 0, sizeof (*fe));
785 pool_put (xd->flow_entries, fe);
788 clib_memset (fle, -1, sizeof (*fle));
789 pool_put (xd->flow_lookup_entries, fle);
793 if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
794 && pool_elts (xd->flow_entries) == 0)
796 xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
797 dpdk_device_setup (xd);
804 format_dpdk_flow (u8 * s, va_list * args)
806 u32 dev_instance = va_arg (*args, u32);
807 u32 flow_index = va_arg (*args, u32);
808 uword private_data = va_arg (*args, uword);
809 dpdk_main_t *dm = &dpdk_main;
810 dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
811 dpdk_flow_entry_t *fe;
813 if (flow_index == ~0)
815 s = format (s, "%-25s: %U\n", "supported flow actions",
816 format_flow_actions, xd->supported_flow_actions);
817 s = format (s, "%-25s: %d\n", "last DPDK error type",
818 xd->last_flow_error.type);
819 s = format (s, "%-25s: %s\n", "last DPDK error message",
820 xd->last_flow_error.message ? xd->last_flow_error.message :
825 if (private_data >= vec_len (xd->flow_entries))
826 return format (s, "unknown flow");
828 fe = vec_elt_at_index (xd->flow_entries, private_data);
829 s = format (s, "mark %u", fe->mark);
834 * fd.io coding-style-patch-verification: ON
837 * eval: (c-set-style "gnu")