#include <vnet/vnet.h>
#include <vppinfra/vec.h>
#include <vppinfra/format.h>
-#include <vlib/unix/cj.h>
#include <assert.h>
#include <vnet/ip/ip.h>
return true;
}
+static inline void
+dpdk_flow_convert_rss_types (u64 type, u64 * dpdk_rss_type)
+{
+#define BIT_IS_SET(v, b) \
+ ((v) & (u64)1<<(b))
+
+ *dpdk_rss_type = 0;
+
+#undef _
+#define _(n, f, s) \
+ if (n != -1 && BIT_IS_SET(type, n)) \
+ *dpdk_rss_type |= f;
+
+ foreach_dpdk_rss_hf
+#undef _
+ return;
+}
+
+static inline enum rte_eth_hash_function
+dpdk_flow_convert_rss_func (vnet_rss_function_t func)
+{
+ enum rte_eth_hash_function rss_func;
+
+ switch (func)
+ {
+ case VNET_RSS_FUNC_DEFAULT:
+ rss_func = RTE_ETH_HASH_FUNCTION_DEFAULT;
+ break;
+ case VNET_RSS_FUNC_TOEPLITZ:
+ rss_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
+ break;
+ case VNET_RSS_FUNC_SIMPLE_XOR:
+ rss_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+ break;
+ case VNET_RSS_FUNC_SYMMETRIC_TOEPLITZ:
+ rss_func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
+ break;
+ default:
+ rss_func = RTE_ETH_HASH_FUNCTION_MAX;
+ break;
+ }
+
+ return rss_func;
+}
+
static int
dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
{
struct rte_flow_item_udp udp[2] = { };
struct rte_flow_item_tcp tcp[2] = { };
struct rte_flow_item_gtp gtp[2] = { };
+ struct rte_flow_item_l2tpv3oip l2tp[2] = { };
+ struct rte_flow_item_esp esp[2] = { };
+ struct rte_flow_item_ah ah[2] = { };
struct rte_flow_action_mark mark = { 0 };
struct rte_flow_action_queue queue = { 0 };
+ struct rte_flow_action_rss rss = { 0 };
struct rte_flow_item *item, *items = 0;
struct rte_flow_action *action, *actions = 0;
bool fate = false;
u8 val[raw_sz + vxlan_hdr_sz];
} raw[2];
- u16 src_port, dst_port, src_port_mask, dst_port_mask;
- u8 protocol;
+ u16 src_port = 0, dst_port = 0, src_port_mask = 0, dst_port_mask = 0;
+ u8 protocol = IP_PROTOCOL_RESERVED;
int rv = 0;
if (f->actions & (~xd->supported_flow_actions))
/* IP */
vec_add2 (items, item, 1);
- if ((f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) ||
- (f->type == VNET_FLOW_TYPE_IP6_GTPC) ||
- (f->type == VNET_FLOW_TYPE_IP6_GTPU) ||
- (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
- (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
+ if (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP)
+ {
+ vnet_flow_ip4_l2tpv3oip_t *l2tp = &f->ip4_l2tpv3oip;
+ item->type = RTE_FLOW_ITEM_TYPE_IPV4;
+
+ if (!l2tp->src_addr.mask.as_u32 && !l2tp->dst_addr.mask.as_u32)
+ {
+ item->spec = NULL;
+ item->mask = NULL;
+ }
+ else
+ {
+ ip4[0].hdr.src_addr = l2tp->src_addr.addr.as_u32;
+ ip4[1].hdr.src_addr = l2tp->src_addr.mask.as_u32;
+ ip4[0].hdr.dst_addr = l2tp->dst_addr.addr.as_u32;
+ ip4[1].hdr.dst_addr = l2tp->dst_addr.mask.as_u32;
+ item->spec = ip4;
+ item->mask = ip4 + 1;
+ }
+ protocol = l2tp->protocol;
+ }
+ if (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP)
+ {
+ vnet_flow_ip4_ipsec_esp_t *tesp = &f->ip4_ipsec_esp;
+ item->type = RTE_FLOW_ITEM_TYPE_IPV4;
+
+ if (!tesp->src_addr.mask.as_u32 && !tesp->dst_addr.mask.as_u32)
+ {
+ item->spec = NULL;
+ item->mask = NULL;
+ }
+ else
+ {
+ ip4[0].hdr.src_addr = tesp->src_addr.addr.as_u32;
+ ip4[1].hdr.src_addr = tesp->src_addr.mask.as_u32;
+ ip4[0].hdr.dst_addr = tesp->dst_addr.addr.as_u32;
+ ip4[1].hdr.dst_addr = tesp->dst_addr.mask.as_u32;
+ item->spec = ip4;
+ item->mask = ip4 + 1;
+ }
+ protocol = tesp->protocol;
+ }
+ else if (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH)
+ {
+ vnet_flow_ip4_ipsec_ah_t *tah = &f->ip4_ipsec_ah;
+ item->type = RTE_FLOW_ITEM_TYPE_IPV4;
+
+ if (!tah->src_addr.mask.as_u32 && !tah->dst_addr.mask.as_u32)
+ {
+ item->spec = NULL;
+ item->mask = NULL;
+ }
+ else
+ {
+ ip4[0].hdr.src_addr = tah->src_addr.addr.as_u32;
+ ip4[1].hdr.src_addr = tah->src_addr.mask.as_u32;
+ ip4[0].hdr.dst_addr = tah->dst_addr.addr.as_u32;
+ ip4[1].hdr.dst_addr = tah->dst_addr.mask.as_u32;
+ item->spec = ip4;
+ item->mask = ip4 + 1;
+ }
+ protocol = tah->protocol;
+ }
+ else if ((f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) ||
+ (f->type == VNET_FLOW_TYPE_IP6_GTPC) ||
+ (f->type == VNET_FLOW_TYPE_IP6_GTPU) ||
+ (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
+ (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
{
vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
item->type = RTE_FLOW_ITEM_TYPE_IPV6;
}
/* Layer 4 */
- vec_add2 (items, item, 1);
if (protocol == IP_PROTOCOL_UDP)
{
+ vec_add2 (items, item, 1);
item->type = RTE_FLOW_ITEM_TYPE_UDP;
if ((src_port_mask == 0) && (dst_port_mask == 0))
}
else if (protocol == IP_PROTOCOL_TCP)
{
+ vec_add2 (items, item, 1);
item->type = RTE_FLOW_ITEM_TYPE_TCP;
if ((src_port_mask == 0) && (dst_port_mask == 0))
item->spec = NULL;
item->mask = NULL;
}
+ else
+ {
+ tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
+ tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
+ tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
+ tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
+ item->spec = tcp;
+ item->mask = tcp + 1;
+ }
+ }
+ else if (protocol == IP_PROTOCOL_IPSEC_ESP)
+ {
+ vec_add2 (items, item, 1);
+ item->type = RTE_FLOW_ITEM_TYPE_ESP;
- tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
- tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
- tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
- tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
- item->spec = tcp;
- item->mask = tcp + 1;
+ vnet_flow_ip4_ipsec_esp_t *tesp = &f->ip4_ipsec_esp;
+ esp[0].hdr.spi = clib_host_to_net_u32 (tesp->spi);
+ esp[1].hdr.spi = ~0;
+
+ item->spec = esp;
+ item->mask = esp + 1;
}
- else
+ else if (protocol == IP_PROTOCOL_IPSEC_AH)
+ {
+ vec_add2 (items, item, 1);
+ item->type = RTE_FLOW_ITEM_TYPE_AH;
+
+ vnet_flow_ip4_ipsec_ah_t *tah = &f->ip4_ipsec_ah;
+ ah[0].spi = clib_host_to_net_u32 (tah->spi);
+ ah[1].spi = ~0;
+
+ item->spec = ah;
+ item->mask = ah + 1;
+ }
+ else if (protocol == IP_PROTOCOL_RESERVED)
{
rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
goto done;
}
/* Tunnel header match */
+ if (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP)
+ {
+ vec_add2 (items, item, 1);
+ item->type = RTE_FLOW_ITEM_TYPE_L2TPV3OIP;
+
+ vnet_flow_ip4_l2tpv3oip_t *tl2tp = &f->ip4_l2tpv3oip;
+ l2tp[0].session_id = clib_host_to_net_u32 (tl2tp->session_id);
+ l2tp[1].session_id = ~0;
+
+ item->spec = l2tp;
+ item->mask = l2tp + 1;
+ }
+
if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
{
u32 vni = f->ip4_vxlan.vni;
else
fate = true;
}
+ if (f->actions & VNET_FLOW_ACTION_RSS)
+ {
+ u64 rss_type = 0;
+
+ vec_add2 (actions, action, 1);
+ action->type = RTE_FLOW_ACTION_TYPE_RSS;
+ action->conf = &rss;
+
+ /* convert types to DPDK rss bitmask */
+ dpdk_flow_convert_rss_types (f->rss_types, &rss_type);
+
+ rss.types = rss_type;
+ if ((rss.func = dpdk_flow_convert_rss_func (f->rss_fun)) ==
+ RTE_ETH_HASH_FUNCTION_MAX)
+ {
+ rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
+ goto done;
+ }
+
+ if (fate == true)
+ {
+ rv = VNET_FLOW_ERROR_INTERNAL;
+ goto done;
+ }
+ else
+ fate = true;
+ }
if (fate == false)
{
vec_add2 (actions, action, 1);
case VNET_FLOW_TYPE_IP6_GTPU:
case VNET_FLOW_TYPE_IP6_GTPU_IP4:
case VNET_FLOW_TYPE_IP6_GTPU_IP6:
+ case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
+ case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
+ case VNET_FLOW_TYPE_IP4_IPSEC_AH:
if ((rv = dpdk_flow_add (xd, flow, fe)))
goto done;
break;