2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/vnet.h>
17 #include <vppinfra/vec.h>
18 #include <vppinfra/format.h>
21 #include <vnet/ip/ip.h>
22 #include <vnet/ethernet/ethernet.h>
23 #include <vnet/ethernet/arp_packet.h>
24 #include <vnet/vxlan/vxlan.h>
25 #include <dpdk/device/dpdk.h>
27 #include <dpdk/device/dpdk_priv.h>
28 #include <vppinfra/error.h>
30 /* check if flow is L2 flow */
31 #define FLOW_IS_L2_LAYER(f) \
32 (f->type == VNET_FLOW_TYPE_ETHERNET)
34 /* check if flow is VLAN sensitive */
35 #define FLOW_IS_VLAN_TAGGED(f) \
36 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
37 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
39 /* check if flow is L4 type */
40 #define FLOW_IS_L4_LAYER(f) \
41 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
42 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE))
44 /* check if flow is L4 tunnel type */
45 #define FLOW_IS_L4_TUNNEL_LAYER(f) \
46 ((f->type >= VNET_FLOW_TYPE_IP4_VXLAN) || \
47 (f->type <= VNET_FLOW_TYPE_IP6_GTPU_IP6))
49 /* constant structs */
50 static const struct rte_flow_attr ingress = {.ingress = 1 };
53 mac_address_is_all_zero (const u8 addr[6])
57 for (i = 0; i < 6; i++)
65 dpdk_flow_convert_rss_types (u64 type, u64 * dpdk_rss_type)
67 #define BIT_IS_SET(v, b) \
74 if (n != -1 && BIT_IS_SET(type, n)) \
82 static inline enum rte_eth_hash_function
83 dpdk_flow_convert_rss_func (vnet_rss_function_t func)
85 enum rte_eth_hash_function rss_func;
89 case VNET_RSS_FUNC_DEFAULT:
90 rss_func = RTE_ETH_HASH_FUNCTION_DEFAULT;
92 case VNET_RSS_FUNC_TOEPLITZ:
93 rss_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
95 case VNET_RSS_FUNC_SIMPLE_XOR:
96 rss_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
98 case VNET_RSS_FUNC_SYMMETRIC_TOEPLITZ:
99 rss_func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
102 rss_func = RTE_ETH_HASH_FUNCTION_MAX;
110 dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
112 struct rte_flow_item_eth eth[2] = { };
113 struct rte_flow_item_ipv4 ip4[2] = { };
114 struct rte_flow_item_ipv4 inner_ip4[2] = { };
115 struct rte_flow_item_ipv6 ip6[2] = { };
116 struct rte_flow_item_ipv6 inner_ip6[2] = { };
117 struct rte_flow_item_udp udp[2] = { };
118 struct rte_flow_item_tcp tcp[2] = { };
119 struct rte_flow_item_gtp gtp[2] = { };
120 struct rte_flow_item_l2tpv3oip l2tp[2] = { };
121 struct rte_flow_item_esp esp[2] = { };
122 struct rte_flow_item_ah ah[2] = { };
123 struct rte_flow_action_mark mark = { 0 };
124 struct rte_flow_action_queue queue = { 0 };
125 struct rte_flow_action_rss rss = { 0 };
126 struct rte_flow_item *item, *items = 0;
127 struct rte_flow_action *action, *actions = 0;
132 vxlan_hdr_sz = sizeof (vxlan_header_t),
133 raw_sz = sizeof (struct rte_flow_item_raw)
138 struct rte_flow_item_raw item;
139 u8 val[raw_sz + vxlan_hdr_sz];
142 u16 src_port = 0, dst_port = 0, src_port_mask = 0, dst_port_mask = 0;
143 u8 protocol = IP_PROTOCOL_RESERVED;
146 if (f->actions & (~xd->supported_flow_actions))
147 return VNET_FLOW_ERROR_NOT_SUPPORTED;
151 vec_add2 (items, item, 1);
152 item->type = RTE_FLOW_ITEM_TYPE_ETH;
153 if (f->type == VNET_FLOW_TYPE_ETHERNET)
155 vnet_flow_ethernet_t *te = &f->ethernet;
157 clib_memset (ð[0], 0, sizeof (eth[0]));
158 clib_memset (ð[1], 0, sizeof (eth[1]));
160 /* check if SMAC/DMAC/Ether_type assigned */
161 if (!mac_address_is_all_zero (te->eth_hdr.dst_address))
163 clib_memcpy_fast (ð[0].dst, &te->eth_hdr.dst_address,
164 sizeof (eth[0].dst));
165 clib_memset (ð[1].dst, 0xFF, sizeof (eth[1].dst));
168 if (!mac_address_is_all_zero (te->eth_hdr.src_address))
170 clib_memcpy_fast (ð[0].src, &te->eth_hdr.src_address,
171 sizeof (eth[0].src));
172 clib_memset (ð[1].src, 0xFF, sizeof (eth[1].src));
175 if (te->eth_hdr.type)
177 eth[0].type = clib_host_to_net_u16 (te->eth_hdr.type);
178 eth[1].type = clib_host_to_net_u16 (0xFFFF);
182 item->mask = eth + 1;
190 if (FLOW_IS_VLAN_TAGGED (f))
192 vec_add2 (items, item, 1);
193 item->type = RTE_FLOW_ITEM_TYPE_VLAN;
198 if (FLOW_IS_L2_LAYER (f))
202 vec_add2 (items, item, 1);
203 if (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP)
205 vnet_flow_ip4_l2tpv3oip_t *l2tp = &f->ip4_l2tpv3oip;
206 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
208 if (!l2tp->src_addr.mask.as_u32 && !l2tp->dst_addr.mask.as_u32)
215 ip4[0].hdr.src_addr = l2tp->src_addr.addr.as_u32;
216 ip4[1].hdr.src_addr = l2tp->src_addr.mask.as_u32;
217 ip4[0].hdr.dst_addr = l2tp->dst_addr.addr.as_u32;
218 ip4[1].hdr.dst_addr = l2tp->dst_addr.mask.as_u32;
220 item->mask = ip4 + 1;
222 protocol = l2tp->protocol;
224 if (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP)
226 vnet_flow_ip4_ipsec_esp_t *tesp = &f->ip4_ipsec_esp;
227 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
229 if (!tesp->src_addr.mask.as_u32 && !tesp->dst_addr.mask.as_u32)
236 ip4[0].hdr.src_addr = tesp->src_addr.addr.as_u32;
237 ip4[1].hdr.src_addr = tesp->src_addr.mask.as_u32;
238 ip4[0].hdr.dst_addr = tesp->dst_addr.addr.as_u32;
239 ip4[1].hdr.dst_addr = tesp->dst_addr.mask.as_u32;
241 item->mask = ip4 + 1;
243 protocol = tesp->protocol;
245 else if (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH)
247 vnet_flow_ip4_ipsec_ah_t *tah = &f->ip4_ipsec_ah;
248 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
250 if (!tah->src_addr.mask.as_u32 && !tah->dst_addr.mask.as_u32)
257 ip4[0].hdr.src_addr = tah->src_addr.addr.as_u32;
258 ip4[1].hdr.src_addr = tah->src_addr.mask.as_u32;
259 ip4[0].hdr.dst_addr = tah->dst_addr.addr.as_u32;
260 ip4[1].hdr.dst_addr = tah->dst_addr.mask.as_u32;
262 item->mask = ip4 + 1;
264 protocol = tah->protocol;
266 else if ((f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) ||
267 (f->type == VNET_FLOW_TYPE_IP6_GTPC) ||
268 (f->type == VNET_FLOW_TYPE_IP6_GTPU) ||
269 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
270 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
272 vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
273 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
275 if (!clib_memcmp (&t6->src_addr.mask, &zero_addr, 16) &&
276 !clib_memcmp (&t6->dst_addr.mask, &zero_addr, 16))
283 clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
284 clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
285 clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
286 clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
288 item->mask = ip6 + 1;
291 src_port = t6->src_port.port;
292 dst_port = t6->dst_port.port;
293 src_port_mask = t6->src_port.mask;
294 dst_port_mask = t6->dst_port.mask;
295 protocol = t6->protocol;
297 else if ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
298 (f->type == VNET_FLOW_TYPE_IP4_GTPC) ||
299 (f->type == VNET_FLOW_TYPE_IP4_GTPU) ||
300 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
301 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
303 vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
304 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
306 if (!t4->src_addr.mask.as_u32 && !t4->dst_addr.mask.as_u32)
313 ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
314 ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
315 ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
316 ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
318 item->mask = ip4 + 1;
321 src_port = t4->src_port.port;
322 dst_port = t4->dst_port.port;
323 src_port_mask = t4->src_port.mask;
324 dst_port_mask = t4->dst_port.mask;
325 protocol = t4->protocol;
327 else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
329 vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan;
330 ip4[0].hdr.src_addr = v4->src_addr.as_u32;
331 ip4[1].hdr.src_addr = -1;
332 ip4[0].hdr.dst_addr = v4->dst_addr.as_u32;
333 ip4[1].hdr.dst_addr = -1;
334 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
336 item->mask = ip4 + 1;
338 dst_port = v4->dst_port;
342 protocol = IP_PROTOCOL_UDP;
346 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
351 if (protocol == IP_PROTOCOL_UDP)
353 vec_add2 (items, item, 1);
354 item->type = RTE_FLOW_ITEM_TYPE_UDP;
356 if ((src_port_mask == 0) && (dst_port_mask == 0))
363 udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
364 udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
365 udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
366 udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
368 item->mask = udp + 1;
371 else if (protocol == IP_PROTOCOL_TCP)
373 vec_add2 (items, item, 1);
374 item->type = RTE_FLOW_ITEM_TYPE_TCP;
376 if ((src_port_mask == 0) && (dst_port_mask == 0))
383 tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
384 tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
385 tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
386 tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
388 item->mask = tcp + 1;
391 else if (protocol == IP_PROTOCOL_IPSEC_ESP)
393 vec_add2 (items, item, 1);
394 item->type = RTE_FLOW_ITEM_TYPE_ESP;
396 vnet_flow_ip4_ipsec_esp_t *tesp = &f->ip4_ipsec_esp;
397 esp[0].hdr.spi = clib_host_to_net_u32 (tesp->spi);
401 item->mask = esp + 1;
403 else if (protocol == IP_PROTOCOL_IPSEC_AH)
405 vec_add2 (items, item, 1);
406 item->type = RTE_FLOW_ITEM_TYPE_AH;
408 vnet_flow_ip4_ipsec_ah_t *tah = &f->ip4_ipsec_ah;
409 ah[0].spi = clib_host_to_net_u32 (tah->spi);
415 else if (protocol == IP_PROTOCOL_RESERVED)
417 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
421 /* Tunnel header match */
422 if (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP)
424 vec_add2 (items, item, 1);
425 item->type = RTE_FLOW_ITEM_TYPE_L2TPV3OIP;
427 vnet_flow_ip4_l2tpv3oip_t *tl2tp = &f->ip4_l2tpv3oip;
428 l2tp[0].session_id = clib_host_to_net_u32 (tl2tp->session_id);
429 l2tp[1].session_id = ~0;
432 item->mask = l2tp + 1;
435 if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
437 u32 vni = f->ip4_vxlan.vni;
438 vxlan_header_t spec_hdr = {
439 .flags = VXLAN_FLAGS_I,
440 .vni_reserved = clib_host_to_net_u32 (vni << 8)
442 vxlan_header_t mask_hdr = {
444 .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8)
447 clib_memset (raw, 0, sizeof raw);
448 raw[0].item.relative = 1;
449 raw[0].item.length = vxlan_hdr_sz;
451 clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz);
452 raw[0].item.pattern = raw[0].val + raw_sz;
453 clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);
454 raw[1].item.pattern = raw[1].val + raw_sz;
456 vec_add2 (items, item, 1);
457 item->type = RTE_FLOW_ITEM_TYPE_RAW;
459 item->mask = raw + 1;
461 else if (f->type == VNET_FLOW_TYPE_IP4_GTPC)
463 vnet_flow_ip4_gtpc_t *gc = &f->ip4_gtpc;
464 gtp[0].teid = clib_host_to_net_u32 (gc->teid);
467 vec_add2 (items, item, 1);
468 item->type = RTE_FLOW_ITEM_TYPE_GTPC;
470 item->mask = gtp + 1;
472 else if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
474 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
475 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
478 vec_add2 (items, item, 1);
479 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
481 item->mask = gtp + 1;
483 else if ((f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
484 (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
486 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
487 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
490 vec_add2 (items, item, 1);
491 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
493 item->mask = gtp + 1;
495 /* inner IP4 header */
496 if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4)
498 vec_add2 (items, item, 1);
499 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
501 vnet_flow_ip4_gtpu_ip4_t *gu4 = &f->ip4_gtpu_ip4;
502 if (!gu4->inner_src_addr.mask.as_u32 &&
503 !gu4->inner_dst_addr.mask.as_u32)
510 inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
511 inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
512 inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
513 inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
514 item->spec = inner_ip4;
515 item->mask = inner_ip4 + 1;
518 else if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6)
520 ip6_address_t zero_addr;
521 vnet_flow_ip4_gtpu_ip6_t *gu6 = &f->ip4_gtpu_ip6;
523 clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
525 vec_add2 (items, item, 1);
526 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
528 if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
529 !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
536 clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
537 &gu6->inner_src_addr.addr, 16);
538 clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
539 &gu6->inner_src_addr.mask, 16);
540 clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
541 &gu6->inner_dst_addr.addr, 16);
542 clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
543 &gu6->inner_dst_addr.mask, 16);
544 item->spec = inner_ip6;
545 item->mask = inner_ip6 + 1;
549 else if (f->type == VNET_FLOW_TYPE_IP6_GTPC)
551 vnet_flow_ip6_gtpc_t *gc = &f->ip6_gtpc;
552 gtp[0].teid = clib_host_to_net_u32 (gc->teid);
555 vec_add2 (items, item, 1);
556 item->type = RTE_FLOW_ITEM_TYPE_GTPC;
558 item->mask = gtp + 1;
560 else if (f->type == VNET_FLOW_TYPE_IP6_GTPU)
562 vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
563 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
566 vec_add2 (items, item, 1);
567 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
569 item->mask = gtp + 1;
571 else if ((f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
572 (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
574 vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
575 gtp[0].teid = clib_host_to_net_u32 (gu->teid);
578 vec_add2 (items, item, 1);
579 item->type = RTE_FLOW_ITEM_TYPE_GTPU;
581 item->mask = gtp + 1;
583 /* inner IP4 header */
584 if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4)
586 vec_add2 (items, item, 1);
587 item->type = RTE_FLOW_ITEM_TYPE_IPV4;
589 vnet_flow_ip6_gtpu_ip4_t *gu4 = &f->ip6_gtpu_ip4;
591 if (!gu4->inner_src_addr.mask.as_u32 &&
592 !gu4->inner_dst_addr.mask.as_u32)
599 inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
600 inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
601 inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
602 inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
603 item->spec = inner_ip4;
604 item->mask = inner_ip4 + 1;
608 if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6)
610 ip6_address_t zero_addr;
611 vnet_flow_ip6_gtpu_ip6_t *gu6 = &f->ip6_gtpu_ip6;
613 clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
615 vec_add2 (items, item, 1);
616 item->type = RTE_FLOW_ITEM_TYPE_IPV6;
618 if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
619 !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
626 clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
627 &gu6->inner_src_addr.addr, 16);
628 clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
629 &gu6->inner_src_addr.mask, 16);
630 clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
631 &gu6->inner_dst_addr.addr, 16);
632 clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
633 &gu6->inner_dst_addr.mask, 16);
634 item->spec = inner_ip6;
635 item->mask = inner_ip6 + 1;
642 vec_add2 (items, item, 1);
643 item->type = RTE_FLOW_ITEM_TYPE_END;
646 /* Only one 'fate' can be assigned */
647 if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
649 vec_add2 (actions, action, 1);
650 queue.index = f->redirect_queue;
651 action->type = RTE_FLOW_ACTION_TYPE_QUEUE;
652 action->conf = &queue;
655 if (f->actions & VNET_FLOW_ACTION_DROP)
657 vec_add2 (actions, action, 1);
658 action->type = RTE_FLOW_ACTION_TYPE_DROP;
661 rv = VNET_FLOW_ERROR_INTERNAL;
667 if (f->actions & VNET_FLOW_ACTION_RSS)
671 vec_add2 (actions, action, 1);
672 action->type = RTE_FLOW_ACTION_TYPE_RSS;
675 /* convert types to DPDK rss bitmask */
676 dpdk_flow_convert_rss_types (f->rss_types, &rss_type);
678 rss.types = rss_type;
679 if ((rss.func = dpdk_flow_convert_rss_func (f->rss_fun)) ==
680 RTE_ETH_HASH_FUNCTION_MAX)
682 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
688 rv = VNET_FLOW_ERROR_INTERNAL;
696 vec_add2 (actions, action, 1);
697 action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU;
700 if (f->actions & VNET_FLOW_ACTION_MARK)
702 vec_add2 (actions, action, 1);
704 action->type = RTE_FLOW_ACTION_TYPE_MARK;
705 action->conf = &mark;
708 vec_add2 (actions, action, 1);
709 action->type = RTE_FLOW_ACTION_TYPE_END;
711 rv = rte_flow_validate (xd->device_index, &ingress, items, actions,
712 &xd->last_flow_error);
717 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
718 else if (rv == -EEXIST)
719 rv = VNET_FLOW_ERROR_ALREADY_EXISTS;
721 rv = VNET_FLOW_ERROR_INTERNAL;
725 fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions,
726 &xd->last_flow_error);
729 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
738 dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance,
739 u32 flow_index, uword * private_data)
741 dpdk_main_t *dm = &dpdk_main;
742 vnet_flow_t *flow = vnet_get_flow (flow_index);
743 dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
744 dpdk_flow_entry_t *fe;
745 dpdk_flow_lookup_entry_t *fle = 0;
748 /* recycle old flow lookup entries only after the main loop counter
749 increases - i.e. previously DMA'ed packets were handled */
750 if (vec_len (xd->parked_lookup_indexes) > 0 &&
751 xd->parked_loop_count != dm->vlib_main->main_loop_count)
755 vec_foreach (fl_index, xd->parked_lookup_indexes)
756 pool_put_index (xd->flow_lookup_entries, *fl_index);
757 vec_reset_length (xd->parked_lookup_indexes);
760 if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
762 fe = vec_elt_at_index (xd->flow_entries, *private_data);
764 if ((rv = rte_flow_destroy (xd->device_index, fe->handle,
765 &xd->last_flow_error)))
766 return VNET_FLOW_ERROR_INTERNAL;
770 /* make sure no action is taken for in-flight (marked) packets */
771 fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark);
772 clib_memset (fle, -1, sizeof (*fle));
773 vec_add1 (xd->parked_lookup_indexes, fe->mark);
774 xd->parked_loop_count = dm->vlib_main->main_loop_count;
777 clib_memset (fe, 0, sizeof (*fe));
778 pool_put (xd->flow_entries, fe);
780 goto disable_rx_offload;
783 if (op != VNET_FLOW_DEV_OP_ADD_FLOW)
784 return VNET_FLOW_ERROR_NOT_SUPPORTED;
786 pool_get (xd->flow_entries, fe);
787 fe->flow_index = flow->index;
789 if (flow->actions == 0)
791 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
795 /* if we need to mark packets, assign one mark */
796 if (flow->actions & (VNET_FLOW_ACTION_MARK |
797 VNET_FLOW_ACTION_REDIRECT_TO_NODE |
798 VNET_FLOW_ACTION_BUFFER_ADVANCE))
801 if (xd->flow_lookup_entries == 0)
802 pool_get_aligned (xd->flow_lookup_entries, fle,
803 CLIB_CACHE_LINE_BYTES);
804 pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES);
805 fe->mark = fle - xd->flow_lookup_entries;
807 /* install entry in the lookup table */
808 clib_memset (fle, -1, sizeof (*fle));
809 if (flow->actions & VNET_FLOW_ACTION_MARK)
810 fle->flow_id = flow->mark_flow_id;
811 if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
812 fle->next_index = flow->redirect_device_input_next_index;
813 if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
814 fle->buffer_advance = flow->buffer_advance;
819 if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) == 0)
821 xd->flags |= DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
822 dpdk_device_setup (xd);
827 case VNET_FLOW_TYPE_ETHERNET:
828 case VNET_FLOW_TYPE_IP4_N_TUPLE:
829 case VNET_FLOW_TYPE_IP6_N_TUPLE:
830 case VNET_FLOW_TYPE_IP4_VXLAN:
831 case VNET_FLOW_TYPE_IP4_GTPC:
832 case VNET_FLOW_TYPE_IP4_GTPU:
833 case VNET_FLOW_TYPE_IP4_GTPU_IP4:
834 case VNET_FLOW_TYPE_IP4_GTPU_IP6:
835 case VNET_FLOW_TYPE_IP6_GTPC:
836 case VNET_FLOW_TYPE_IP6_GTPU:
837 case VNET_FLOW_TYPE_IP6_GTPU_IP4:
838 case VNET_FLOW_TYPE_IP6_GTPU_IP6:
839 case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
840 case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
841 case VNET_FLOW_TYPE_IP4_IPSEC_AH:
842 if ((rv = dpdk_flow_add (xd, flow, fe)))
846 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
850 *private_data = fe - xd->flow_entries;
855 clib_memset (fe, 0, sizeof (*fe));
856 pool_put (xd->flow_entries, fe);
859 clib_memset (fle, -1, sizeof (*fle));
860 pool_put (xd->flow_lookup_entries, fle);
864 if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0
865 && pool_elts (xd->flow_entries) == 0)
867 xd->flags &= ~DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD;
868 dpdk_device_setup (xd);
875 format_dpdk_flow (u8 * s, va_list * args)
877 u32 dev_instance = va_arg (*args, u32);
878 u32 flow_index = va_arg (*args, u32);
879 uword private_data = va_arg (*args, uword);
880 dpdk_main_t *dm = &dpdk_main;
881 dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
882 dpdk_flow_entry_t *fe;
884 if (flow_index == ~0)
886 s = format (s, "%-25s: %U\n", "supported flow actions",
887 format_flow_actions, xd->supported_flow_actions);
888 s = format (s, "%-25s: %d\n", "last DPDK error type",
889 xd->last_flow_error.type);
890 s = format (s, "%-25s: %s\n", "last DPDK error message",
891 xd->last_flow_error.message ? xd->last_flow_error.message :
896 if (private_data >= vec_len (xd->flow_entries))
897 return format (s, "unknown flow");
899 fe = vec_elt_at_index (xd->flow_entries, private_data);
900 s = format (s, "mark %u", fe->mark);
905 * fd.io coding-style-patch-verification: ON
908 * eval: (c-set-style "gnu")