return;
}
+/** Maximum number of queue indices in struct rte_flow_action_rss. */
+#define ACTION_RSS_QUEUE_NUM 128
+
+static inline void
+dpdk_flow_convert_rss_queues (u32 queue_index, u32 queue_num,
+ struct rte_flow_action_rss *rss)
+{
+ u16 *queues = clib_mem_alloc (sizeof (*queues) * ACTION_RSS_QUEUE_NUM);
+ int i;
+
+ for (i = 0; i < queue_num; i++)
+ queues[i] = queue_index++;
+
+ rss->queue_num = queue_num;
+ rss->queue = queues;
+
+ return;
+}
+
static inline enum rte_eth_hash_function
dpdk_flow_convert_rss_func (vnet_rss_function_t func)
{
struct rte_flow_item_l2tpv3oip l2tp[2] = { };
struct rte_flow_item_esp esp[2] = { };
struct rte_flow_item_ah ah[2] = { };
+ struct rte_flow_item_raw generic[2] = {};
struct rte_flow_action_mark mark = { 0 };
struct rte_flow_action_queue queue = { 0 };
struct rte_flow_action_rss rss = { 0 };
u8 protocol = IP_PROTOCOL_RESERVED;
int rv = 0;
+ /* Handle generic flow first */
+ if (f->type == VNET_FLOW_TYPE_GENERIC)
+ {
+ generic[0].pattern = f->generic.pattern.spec;
+ generic[1].pattern = f->generic.pattern.mask;
+
+ vec_add2 (items, item, 1);
+ item->type = RTE_FLOW_ITEM_TYPE_RAW;
+ item->spec = generic;
+ item->mask = generic + 1;
+
+ goto pattern_end;
+ }
+
enum
{
FLOW_UNKNOWN_CLASS,
}
pattern_end:
+ if ((f->actions & VNET_FLOW_ACTION_RSS) &&
+ (f->rss_types & (1ULL << VNET_FLOW_RSS_TYPES_ESP)))
+ {
+
+ vec_add2 (items, item, 1);
+ item->type = RTE_FLOW_ITEM_TYPE_ESP;
+ }
+
vec_add2 (items, item, 1);
item->type = RTE_FLOW_ITEM_TYPE_END;
/* convert types to DPDK rss bitmask */
dpdk_flow_convert_rss_types (f->rss_types, &rss_type);
+ if (f->queue_num)
+ /* convert rss queues to array */
+ dpdk_flow_convert_rss_queues (f->queue_index, f->queue_num, &rss);
+
rss.types = rss_type;
if ((rss.func = dpdk_flow_convert_rss_func (f->rss_fun)) ==
RTE_ETH_HASH_FUNCTION_MAX)
dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance,
u32 flow_index, uword * private_data)
{
+ vlib_main_t *vm = vlib_get_main ();
dpdk_main_t *dm = &dpdk_main;
vnet_flow_t *flow = vnet_get_flow (flow_index);
dpdk_device_t *xd = vec_elt_at_index (dm->devices, dev_instance);
/* recycle old flow lookup entries only after the main loop counter
increases - i.e. previously DMA'ed packets were handled */
if (vec_len (xd->parked_lookup_indexes) > 0 &&
- xd->parked_loop_count != dm->vlib_main->main_loop_count)
+ xd->parked_loop_count != vm->main_loop_count)
{
u32 *fl_index;
fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark);
clib_memset (fle, -1, sizeof (*fle));
vec_add1 (xd->parked_lookup_indexes, fe->mark);
- xd->parked_loop_count = dm->vlib_main->main_loop_count;
+ xd->parked_loop_count = vm->main_loop_count;
}
clib_memset (fe, 0, sizeof (*fe));
case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
case VNET_FLOW_TYPE_IP4_IPSEC_AH:
+ case VNET_FLOW_TYPE_GENERIC:
if ((rv = dpdk_flow_add (xd, flow, fe)))
goto done;
break;