X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fdpdk%2Fdevice%2Fflow.c;h=9f765a6f8450c62c5a9881d1b0d0fc0c478bd4cd;hb=b95e6d4e7;hp=63f04b8323db91e74f9e340c7c975f71ff2ce2b0;hpb=178cf493d009995b28fdf220f04c98860ff79a9b;p=vpp.git diff --git a/src/plugins/dpdk/device/flow.c b/src/plugins/dpdk/device/flow.c index 63f04b8323d..9f765a6f845 100644 --- a/src/plugins/dpdk/device/flow.c +++ b/src/plugins/dpdk/device/flow.c @@ -16,7 +16,6 @@ #include #include #include -#include #include #include @@ -24,25 +23,151 @@ #include #include #include - #include #include +#define FLOW_IS_ETHERNET_CLASS(f) \ + (f->type == VNET_FLOW_TYPE_ETHERNET) + +#define FLOW_IS_IPV4_CLASS(f) \ + ((f->type == VNET_FLOW_TYPE_IP4) || \ + (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \ + (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \ + (f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \ + (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \ + (f->type == VNET_FLOW_TYPE_IP4_GTPU) || \ + (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP) || \ + (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP) || \ + (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH)) + +#define FLOW_IS_IPV6_CLASS(f) \ + ((f->type == VNET_FLOW_TYPE_IP6) || \ + (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \ + (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED) || \ + (f->type == VNET_FLOW_TYPE_IP6_VXLAN)) + +/* check if flow is VLAN sensitive */ +#define FLOW_HAS_VLAN_TAG(f) \ + ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \ + (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED)) + +/* check if flow is L3 type */ +#define FLOW_IS_L3_TYPE(f) \ + ((f->type == VNET_FLOW_TYPE_IP4) || \ + (f->type == VNET_FLOW_TYPE_IP6)) + +/* check if flow is L4 type */ +#define FLOW_IS_L4_TYPE(f) \ + ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \ + (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \ + (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \ + (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED)) + +/* check if flow is L4 tunnel type */ +#define FLOW_IS_L4_TUNNEL_TYPE(f) \ + ((f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \ + (f->type == VNET_FLOW_TYPE_IP6_VXLAN) || \ + (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \ + (f->type == VNET_FLOW_TYPE_IP4_GTPU)) + /* constant structs */ static const struct rte_flow_attr ingress = {.ingress = 1 }; -static const struct rte_flow_item_eth any_eth[2] = { }; -static const struct rte_flow_item_vlan any_vlan[2] = { }; + +static inline bool +mac_address_is_all_zero (const u8 addr[6]) +{ + int i = 0; + + for (i = 0; i < 6; i++) + if (addr[i] != 0) + return false; + + return true; +} + +static inline void +dpdk_flow_convert_rss_types (u64 type, u64 * dpdk_rss_type) +{ +#define BIT_IS_SET(v, b) \ + ((v) & (u64)1<<(b)) + + *dpdk_rss_type = 0; + +#undef _ +#define _(n, f, s) \ + if (n != -1 && BIT_IS_SET(type, n)) \ + *dpdk_rss_type |= f; + + foreach_dpdk_rss_hf +#undef _ + return; +} + +/** Maximum number of queue indices in struct rte_flow_action_rss. */ +#define ACTION_RSS_QUEUE_NUM 128 + +static inline void +dpdk_flow_convert_rss_queues (u32 queue_index, u32 queue_num, + struct rte_flow_action_rss *rss) +{ + u16 *queues = clib_mem_alloc (sizeof (*queues) * ACTION_RSS_QUEUE_NUM); + int i; + + for (i = 0; i < queue_num; i++) + queues[i] = queue_index++; + + rss->queue_num = queue_num; + rss->queue = queues; + + return; +} + +static inline enum rte_eth_hash_function +dpdk_flow_convert_rss_func (vnet_rss_function_t func) +{ + enum rte_eth_hash_function rss_func; + + switch (func) + { + case VNET_RSS_FUNC_DEFAULT: + rss_func = RTE_ETH_HASH_FUNCTION_DEFAULT; + break; + case VNET_RSS_FUNC_TOEPLITZ: + rss_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ; + break; + case VNET_RSS_FUNC_SIMPLE_XOR: + rss_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR; + break; + case VNET_RSS_FUNC_SYMMETRIC_TOEPLITZ: + rss_func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ; + break; + default: + rss_func = RTE_ETH_HASH_FUNCTION_MAX; + break; + } + + return rss_func; +} static int dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe) { + struct rte_flow_item_eth eth[2] = { }; struct rte_flow_item_ipv4 ip4[2] = { }; struct rte_flow_item_ipv6 ip6[2] = { }; struct rte_flow_item_udp udp[2] = { }; struct rte_flow_item_tcp tcp[2] = { }; + struct rte_flow_item_gtp gtp[2] = { }; + struct rte_flow_item_l2tpv3oip l2tp[2] = { }; + struct rte_flow_item_esp esp[2] = { }; + struct rte_flow_item_ah ah[2] = { }; + struct rte_flow_item_raw generic[2] = {}; struct rte_flow_action_mark mark = { 0 }; + struct rte_flow_action_queue queue = { 0 }; + struct rte_flow_action_rss rss = { 0 }; struct rte_flow_item *item, *items = 0; struct rte_flow_action *action, *actions = 0; + bool fate = false; enum { @@ -56,159 +181,402 @@ dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe) u8 val[raw_sz + vxlan_hdr_sz]; } raw[2]; - u16 src_port, dst_port, src_port_mask, dst_port_mask; - u8 protocol; + u16 src_port = 0, dst_port = 0, src_port_mask = 0, dst_port_mask = 0; + u8 protocol = IP_PROTOCOL_RESERVED; int rv = 0; + /* Handle generic flow first */ + if (f->type == VNET_FLOW_TYPE_GENERIC) + { + generic[0].pattern = f->generic.pattern.spec; + generic[1].pattern = f->generic.pattern.mask; + + vec_add2 (items, item, 1); + item->type = RTE_FLOW_ITEM_TYPE_RAW; + item->spec = generic; + item->mask = generic + 1; + + goto pattern_end; + } + + enum + { + FLOW_UNKNOWN_CLASS, + FLOW_ETHERNET_CLASS, + FLOW_IPV4_CLASS, + FLOW_IPV6_CLASS, + } flow_class = FLOW_UNKNOWN_CLASS; + + if (FLOW_IS_ETHERNET_CLASS (f)) + flow_class = FLOW_ETHERNET_CLASS; + else if (FLOW_IS_IPV4_CLASS (f)) + flow_class = FLOW_IPV4_CLASS; + else if (FLOW_IS_IPV6_CLASS (f)) + flow_class = FLOW_IPV6_CLASS; + else + return VNET_FLOW_ERROR_NOT_SUPPORTED; + if (f->actions & (~xd->supported_flow_actions)) return VNET_FLOW_ERROR_NOT_SUPPORTED; /* Match items */ - /* Ethernet */ + /* Layer 2, Ethernet */ vec_add2 (items, item, 1); item->type = RTE_FLOW_ITEM_TYPE_ETH; - item->spec = any_eth; - item->mask = any_eth + 1; - /* VLAN */ - if (f->type != VNET_FLOW_TYPE_IP4_VXLAN) + if (flow_class == FLOW_ETHERNET_CLASS) { - vec_add2 (items, item, 1); - item->type = RTE_FLOW_ITEM_TYPE_VLAN; - item->spec = any_vlan; - item->mask = any_vlan + 1; - } + vnet_flow_ethernet_t *te = &f->ethernet; - /* IP */ - vec_add2 (items, item, 1); - if (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) + clib_memset (ð[0], 0, sizeof (eth[0])); + clib_memset (ð[1], 0, sizeof (eth[1])); + + /* check if SMAC/DMAC/Ether_type assigned */ + if (!mac_address_is_all_zero (te->eth_hdr.dst_address)) + { + clib_memcpy_fast (ð[0].dst, &te->eth_hdr.dst_address, + sizeof (eth[0].dst)); + clib_memset (ð[1].dst, 0xFF, sizeof (eth[1].dst)); + } + + if (!mac_address_is_all_zero (te->eth_hdr.src_address)) + { + clib_memcpy_fast (ð[0].src, &te->eth_hdr.src_address, + sizeof (eth[0].src)); + clib_memset (ð[1].src, 0xFF, sizeof (eth[1].src)); + } + + if (te->eth_hdr.type) + { + eth[0].type = clib_host_to_net_u16 (te->eth_hdr.type); + eth[1].type = clib_host_to_net_u16 (0xFFFF); + } + + item->spec = eth; + item->mask = eth + 1; + } + else { - vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple; - clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16); - clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16); - clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16); - clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16); - item->type = RTE_FLOW_ITEM_TYPE_IPV6; - item->spec = ip6; - item->mask = ip6 + 1; - - src_port = t6->src_port.port; - dst_port = t6->dst_port.port; - src_port_mask = t6->src_port.mask; - dst_port_mask = t6->dst_port.mask; - protocol = t6->protocol; + item->spec = NULL; + item->mask = NULL; } - else if (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) + + /* currently only single empty vlan tag is supported */ + if (FLOW_HAS_VLAN_TAG (f)) { - vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple; - ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32; - ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32; - ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32; - ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32; - item->type = RTE_FLOW_ITEM_TYPE_IPV4; - item->spec = ip4; - item->mask = ip4 + 1; - - src_port = t4->src_port.port; - dst_port = t4->dst_port.port; - src_port_mask = t4->src_port.mask; - dst_port_mask = t4->dst_port.mask; - protocol = t4->protocol; + vec_add2 (items, item, 1); + item->type = RTE_FLOW_ITEM_TYPE_VLAN; + item->spec = NULL; + item->mask = NULL; } - else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN) + + if (FLOW_IS_ETHERNET_CLASS (f)) + goto pattern_end; + + /* Layer 3, IP */ + vec_add2 (items, item, 1); + if (flow_class == FLOW_IPV4_CLASS) { - vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan; - ip4[0].hdr.src_addr = v4->src_addr.as_u32; - ip4[1].hdr.src_addr = -1; - ip4[0].hdr.dst_addr = v4->dst_addr.as_u32; - ip4[1].hdr.dst_addr = -1; + vnet_flow_ip4_t *ip4_ptr = &f->ip4; + item->type = RTE_FLOW_ITEM_TYPE_IPV4; - item->spec = ip4; - item->mask = ip4 + 1; - - dst_port = v4->dst_port; - dst_port_mask = -1; - src_port = 0; - src_port_mask = 0; - protocol = IP_PROTOCOL_UDP; + if ((!ip4_ptr->src_addr.mask.as_u32) && + (!ip4_ptr->dst_addr.mask.as_u32) && (!ip4_ptr->protocol.mask)) + { + item->spec = NULL; + item->mask = NULL; + } + else + { + ip4[0].hdr.src_addr = ip4_ptr->src_addr.addr.as_u32; + ip4[1].hdr.src_addr = ip4_ptr->src_addr.mask.as_u32; + ip4[0].hdr.dst_addr = ip4_ptr->dst_addr.addr.as_u32; + ip4[1].hdr.dst_addr = ip4_ptr->dst_addr.mask.as_u32; + ip4[0].hdr.next_proto_id = ip4_ptr->protocol.prot; + ip4[1].hdr.next_proto_id = ip4_ptr->protocol.mask; + + item->spec = ip4; + item->mask = ip4 + 1; + } + + if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f)) + { + vnet_flow_ip4_n_tuple_t *ip4_n_ptr = &f->ip4_n_tuple; + + src_port = ip4_n_ptr->src_port.port; + dst_port = ip4_n_ptr->dst_port.port; + src_port_mask = ip4_n_ptr->src_port.mask; + dst_port_mask = ip4_n_ptr->dst_port.mask; + } + + protocol = ip4_ptr->protocol.prot; } - else + else if (flow_class == FLOW_IPV6_CLASS) { - rv = VNET_FLOW_ERROR_NOT_SUPPORTED; - goto done; + vnet_flow_ip6_t *ip6_ptr = &f->ip6; + + item->type = RTE_FLOW_ITEM_TYPE_IPV6; + + if ((ip6_ptr->src_addr.mask.as_u64[0] == 0) && + (ip6_ptr->src_addr.mask.as_u64[1] == 0) && + (!ip6_ptr->protocol.mask)) + { + item->spec = NULL; + item->mask = NULL; + } + else + { + clib_memcpy (ip6[0].hdr.src_addr, &ip6_ptr->src_addr.addr, + ARRAY_LEN (ip6_ptr->src_addr.addr.as_u8)); + clib_memcpy (ip6[1].hdr.src_addr, &ip6_ptr->src_addr.mask, + ARRAY_LEN (ip6_ptr->src_addr.mask.as_u8)); + clib_memcpy (ip6[0].hdr.dst_addr, &ip6_ptr->dst_addr.addr, + ARRAY_LEN (ip6_ptr->dst_addr.addr.as_u8)); + clib_memcpy (ip6[1].hdr.dst_addr, &ip6_ptr->dst_addr.mask, + ARRAY_LEN (ip6_ptr->dst_addr.mask.as_u8)); + ip6[0].hdr.proto = ip6_ptr->protocol.prot; + ip6[1].hdr.proto = ip6_ptr->protocol.mask; + + item->spec = ip6; + item->mask = ip6 + 1; + } + + if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f)) + { + vnet_flow_ip6_n_tuple_t *ip6_n_ptr = &f->ip6_n_tuple; + + src_port = ip6_n_ptr->src_port.port; + dst_port = ip6_n_ptr->dst_port.port; + src_port_mask = ip6_n_ptr->src_port.mask; + dst_port_mask = ip6_n_ptr->dst_port.mask; + } + + protocol = ip6_ptr->protocol.prot; } - /* Layer 4 */ + if (FLOW_IS_L3_TYPE (f)) + goto pattern_end; + + /* Layer 3, IP */ vec_add2 (items, item, 1); - if (protocol == IP_PROTOCOL_UDP) + switch (protocol) { - udp[0].hdr.src_port = clib_host_to_net_u16 (src_port); - udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask); - udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port); - udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask); - item->type = RTE_FLOW_ITEM_TYPE_UDP; - item->spec = udp; - item->mask = udp + 1; - } - else if (protocol == IP_PROTOCOL_TCP) - { - tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port); - tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask); - tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port); - tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask); + case IP_PROTOCOL_L2TP: + item->type = RTE_FLOW_ITEM_TYPE_L2TPV3OIP; + l2tp[0].session_id = clib_host_to_net_u32 (f->ip4_l2tpv3oip.session_id); + l2tp[1].session_id = ~0; + + item->spec = l2tp; + item->mask = l2tp + 1; + break; + + case IP_PROTOCOL_IPSEC_ESP: + item->type = RTE_FLOW_ITEM_TYPE_ESP; + esp[0].hdr.spi = clib_host_to_net_u32 (f->ip4_ipsec_esp.spi); + esp[1].hdr.spi = ~0; + + item->spec = esp; + item->mask = esp + 1; + break; + + case IP_PROTOCOL_IPSEC_AH: + item->type = RTE_FLOW_ITEM_TYPE_AH; + ah[0].spi = clib_host_to_net_u32 (f->ip4_ipsec_ah.spi); + ah[1].spi = ~0; + + item->spec = ah; + item->mask = ah + 1; + break; + case IP_PROTOCOL_TCP: item->type = RTE_FLOW_ITEM_TYPE_TCP; - item->spec = tcp; - item->mask = tcp + 1; - } - else - { + if ((src_port_mask == 0) && (dst_port_mask == 0)) + { + item->spec = NULL; + item->mask = NULL; + } + else + { + tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port); + tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask); + tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port); + tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask); + item->spec = tcp; + item->mask = tcp + 1; + } + break; + + case IP_PROTOCOL_UDP: + item->type = RTE_FLOW_ITEM_TYPE_UDP; + if ((src_port_mask == 0) && (dst_port_mask == 0)) + { + item->spec = NULL; + item->mask = NULL; + } + else + { + udp[0].hdr.src_port = clib_host_to_net_u16 (src_port); + udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask); + udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port); + udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask); + item->spec = udp; + item->mask = udp + 1; + } + + /* handle the UDP tunnels */ + if (f->type == VNET_FLOW_TYPE_IP4_GTPC) + { + gtp[0].teid = clib_host_to_net_u32 (f->ip4_gtpc.teid); + gtp[1].teid = ~0; + + vec_add2 (items, item, 1); + item->type = RTE_FLOW_ITEM_TYPE_GTPC; + item->spec = gtp; + item->mask = gtp + 1; + } + else if (f->type == VNET_FLOW_TYPE_IP4_GTPU) + { + gtp[0].teid = clib_host_to_net_u32 (f->ip4_gtpu.teid); + gtp[1].teid = ~0; + + vec_add2 (items, item, 1); + item->type = RTE_FLOW_ITEM_TYPE_GTPU; + item->spec = gtp; + item->mask = gtp + 1; + } + else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN) + { + u32 vni = f->ip4_vxlan.vni; + + vxlan_header_t spec_hdr = { + .flags = VXLAN_FLAGS_I, + .vni_reserved = clib_host_to_net_u32 (vni << 8) + }; + vxlan_header_t mask_hdr = { + .flags = 0xff, + .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8) + }; + + clib_memset (raw, 0, sizeof raw); + raw[0].item.relative = 1; + raw[0].item.length = vxlan_hdr_sz; + + clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz); + raw[0].item.pattern = raw[0].val + raw_sz; + clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz); + raw[1].item.pattern = raw[1].val + raw_sz; + + vec_add2 (items, item, 1); + item->type = RTE_FLOW_ITEM_TYPE_RAW; + item->spec = raw; + item->mask = raw + 1; + } + break; + + default: rv = VNET_FLOW_ERROR_NOT_SUPPORTED; goto done; } - /* Tunnel header match */ - if (f->type == VNET_FLOW_TYPE_IP4_VXLAN) +pattern_end: + if ((f->actions & VNET_FLOW_ACTION_RSS) && + (f->rss_types & (1ULL << VNET_FLOW_RSS_TYPES_ESP))) { - u32 vni = f->ip4_vxlan.vni; - vxlan_header_t spec_hdr = { - .flags = VXLAN_FLAGS_I, - .vni_reserved = clib_host_to_net_u32 (vni << 8) - }; - vxlan_header_t mask_hdr = { - .flags = 0xff, - .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8) - }; - - clib_memset (raw, 0, sizeof raw); - raw[0].item.relative = 1; - raw[0].item.length = vxlan_hdr_sz; - - clib_memcpy_fast (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz); - raw[0].item.pattern = raw[0].val + raw_sz; - clib_memcpy_fast (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz); - raw[1].item.pattern = raw[1].val + raw_sz; vec_add2 (items, item, 1); - item->type = RTE_FLOW_ITEM_TYPE_RAW; - item->spec = raw; - item->mask = raw + 1; + item->type = RTE_FLOW_ITEM_TYPE_ESP; } vec_add2 (items, item, 1); item->type = RTE_FLOW_ITEM_TYPE_END; /* Actions */ - vec_add2 (actions, action, 1); - action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU; + /* Only one 'fate' can be assigned */ + if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE) + { + vec_add2 (actions, action, 1); + queue.index = f->redirect_queue; + action->type = RTE_FLOW_ACTION_TYPE_QUEUE; + action->conf = &queue; + fate = true; + } - vec_add2 (actions, action, 1); - mark.id = fe->mark; - action->type = RTE_FLOW_ACTION_TYPE_MARK; - action->conf = &mark; + if (f->actions & VNET_FLOW_ACTION_DROP) + { + vec_add2 (actions, action, 1); + action->type = RTE_FLOW_ACTION_TYPE_DROP; + if (fate == true) + { + rv = VNET_FLOW_ERROR_INTERNAL; + goto done; + } + else + fate = true; + } + + if (f->actions & VNET_FLOW_ACTION_RSS) + { + u64 rss_type = 0; + + vec_add2 (actions, action, 1); + action->type = RTE_FLOW_ACTION_TYPE_RSS; + action->conf = &rss; + + /* convert types to DPDK rss bitmask */ + dpdk_flow_convert_rss_types (f->rss_types, &rss_type); + + if (f->queue_num) + /* convert rss queues to array */ + dpdk_flow_convert_rss_queues (f->queue_index, f->queue_num, &rss); + + rss.types = rss_type; + if ((rss.func = dpdk_flow_convert_rss_func (f->rss_fun)) == + RTE_ETH_HASH_FUNCTION_MAX) + { + rv = VNET_FLOW_ERROR_NOT_SUPPORTED; + goto done; + } + + if (fate == true) + { + rv = VNET_FLOW_ERROR_INTERNAL; + goto done; + } + else + fate = true; + } + + if (fate == false) + { + vec_add2 (actions, action, 1); + action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU; + } + + if (f->actions & VNET_FLOW_ACTION_MARK) + { + vec_add2 (actions, action, 1); + mark.id = fe->mark; + action->type = RTE_FLOW_ACTION_TYPE_MARK; + action->conf = &mark; + } vec_add2 (actions, action, 1); action->type = RTE_FLOW_ACTION_TYPE_END; + rv = rte_flow_validate (xd->device_index, &ingress, items, actions, + &xd->last_flow_error); + + if (rv) + { + if (rv == -EINVAL) + rv = VNET_FLOW_ERROR_NOT_SUPPORTED; + else if (rv == -EEXIST) + rv = VNET_FLOW_ERROR_ALREADY_EXISTS; + else + rv = VNET_FLOW_ERROR_INTERNAL; + + goto done; + } + fe->handle = rte_flow_create (xd->device_index, &ingress, items, actions, &xd->last_flow_error); @@ -225,6 +593,7 @@ int dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance, u32 flow_index, uword * private_data) { + vlib_main_t *vm = vlib_get_main (); dpdk_main_t *dm = &dpdk_main; vnet_flow_t *flow = vnet_get_flow (flow_index); dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance); @@ -235,19 +604,17 @@ dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance, /* recycle old flow lookup entries only after the main loop counter increases - i.e. previously DMA'ed packets were handled */ if (vec_len (xd->parked_lookup_indexes) > 0 && - xd->parked_loop_count != dm->vlib_main->main_loop_count) + xd->parked_loop_count != vm->main_loop_count) { u32 *fl_index; vec_foreach (fl_index, xd->parked_lookup_indexes) pool_put_index (xd->flow_lookup_entries, *fl_index); - vec_reset_length (xd->flow_lookup_entries); + vec_reset_length (xd->parked_lookup_indexes); } if (op == VNET_FLOW_DEV_OP_DEL_FLOW) { - ASSERT (*private_data >= vec_len (xd->flow_entries)); - fe = vec_elt_at_index (xd->flow_entries, *private_data); if ((rv = rte_flow_destroy (xd->device_index, fe->handle, @@ -260,7 +627,7 @@ dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance, fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark); clib_memset (fle, -1, sizeof (*fle)); vec_add1 (xd->parked_lookup_indexes, fe->mark); - xd->parked_loop_count = dm->vlib_main->main_loop_count; + xd->parked_loop_count = vm->main_loop_count; } clib_memset (fe, 0, sizeof (*fe)); @@ -313,9 +680,18 @@ dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance, switch (flow->type) { + case VNET_FLOW_TYPE_ETHERNET: + case VNET_FLOW_TYPE_IP4: + case VNET_FLOW_TYPE_IP6: case VNET_FLOW_TYPE_IP4_N_TUPLE: case VNET_FLOW_TYPE_IP6_N_TUPLE: case VNET_FLOW_TYPE_IP4_VXLAN: + case VNET_FLOW_TYPE_IP4_GTPC: + case VNET_FLOW_TYPE_IP4_GTPU: + case VNET_FLOW_TYPE_IP4_L2TPV3OIP: + case VNET_FLOW_TYPE_IP4_IPSEC_ESP: + case VNET_FLOW_TYPE_IP4_IPSEC_AH: + case VNET_FLOW_TYPE_GENERIC: if ((rv = dpdk_flow_add (xd, flow, fe))) goto done; break;