X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fdpdk%2Fdevice%2Fflow.c;h=351390b6d346dc27661082b0b6d01ed2491d198a;hb=cd30774fa9280736ffaea3e9a51948593e8eebc2;hp=5080a9c83bcd1fa6c8e58e35bdc70038c047b183;hpb=bd81bdf226c1dc03baaf05f17cf68fbb17bc5dd7;p=vpp.git diff --git a/src/plugins/dpdk/device/flow.c b/src/plugins/dpdk/device/flow.c index 5080a9c83bc..351390b6d34 100644 --- a/src/plugins/dpdk/device/flow.c +++ b/src/plugins/dpdk/device/flow.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -33,8 +34,7 @@ static const struct rte_flow_item_eth any_eth[2] = { }; static const struct rte_flow_item_vlan any_vlan[2] = { }; static int -dpdk_flow_add_n_touple (dpdk_device_t * xd, vnet_flow_t * f, - dpdk_flow_entry_t * fe) +dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe) { struct rte_flow_item_ipv4 ip4[2] = { }; struct rte_flow_item_ipv6 ip6[2] = { }; @@ -43,6 +43,19 @@ dpdk_flow_add_n_touple (dpdk_device_t * xd, vnet_flow_t * f, struct rte_flow_action_mark mark = { 0 }; struct rte_flow_item *item, *items = 0; struct rte_flow_action *action, *actions = 0; + + enum + { + vxlan_hdr_sz = sizeof (vxlan_header_t), + raw_sz = sizeof (struct rte_flow_item_raw) + }; + + union + { + struct rte_flow_item_raw item; + u8 val[raw_sz + vxlan_hdr_sz]; + } raw[2]; + u16 src_port, dst_port, src_port_mask, dst_port_mask; u8 protocol; int rv = 0; @@ -50,6 +63,7 @@ dpdk_flow_add_n_touple (dpdk_device_t * xd, vnet_flow_t * f, if (f->actions & (~xd->supported_flow_actions)) return VNET_FLOW_ERROR_NOT_SUPPORTED; + /* Match items */ /* Ethernet */ vec_add2 (items, item, 1); item->type = RTE_FLOW_ITEM_TYPE_ETH; @@ -57,10 +71,13 @@ dpdk_flow_add_n_touple (dpdk_device_t * xd, vnet_flow_t * f, item->mask = any_eth + 1; /* VLAN */ - vec_add2 (items, item, 1); - item->type = RTE_FLOW_ITEM_TYPE_VLAN; - item->spec = any_vlan; - item->mask = any_vlan + 1; + if (f->type != VNET_FLOW_TYPE_IP4_VXLAN) + { + vec_add2 (items, item, 1); + item->type = RTE_FLOW_ITEM_TYPE_VLAN; + item->spec = any_vlan; + item->mask = any_vlan + 1; + } /* IP */ vec_add2 (items, item, 1); @@ -81,11 +98,10 @@ dpdk_flow_add_n_touple (dpdk_device_t * xd, vnet_flow_t * f, dst_port_mask = t6->dst_port.mask; protocol = t6->protocol; } - else + else if (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) { vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple; - ASSERT (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE); - ip4[0].hdr.src_addr = t4->src_addr.mask.as_u32; + ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32; ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32; ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32; ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32; @@ -94,11 +110,33 @@ dpdk_flow_add_n_touple (dpdk_device_t * xd, vnet_flow_t * f, item->mask = ip4 + 1; src_port = t4->src_port.port; - dst_port = t4->dst_port.mask; + dst_port = t4->dst_port.port; src_port_mask = t4->src_port.mask; dst_port_mask = t4->dst_port.mask; protocol = t4->protocol; } + else if (f->type == VNET_FLOW_TYPE_IP4_VXLAN) + { + vnet_flow_ip4_vxlan_t *v4 = &f->ip4_vxlan; + ip4[0].hdr.src_addr = v4->src_addr.as_u32; + ip4[1].hdr.src_addr = -1; + ip4[0].hdr.dst_addr = v4->dst_addr.as_u32; + ip4[1].hdr.dst_addr = -1; + item->type = RTE_FLOW_ITEM_TYPE_IPV4; + item->spec = ip4; + item->mask = ip4 + 1; + + dst_port = v4->dst_port; + dst_port_mask = -1; + src_port = 0; + src_port_mask = 0; + protocol = IP_PROTOCOL_UDP; + } + else + { + rv = VNET_FLOW_ERROR_NOT_SUPPORTED; + goto done; + } /* Layer 4 */ vec_add2 (items, item, 1); @@ -128,10 +166,36 @@ dpdk_flow_add_n_touple (dpdk_device_t * xd, vnet_flow_t * f, goto done; } - /* The End */ + /* Tunnel header match */ + if (f->type == VNET_FLOW_TYPE_IP4_VXLAN) + { + u32 vni = f->ip4_vxlan.vni; + vxlan_header_t spec_hdr = { + .flags = VXLAN_FLAGS_I, + .vni_reserved = clib_host_to_net_u32 (vni << 8) + }; + vxlan_header_t mask_hdr = { + .flags = 0xff, + .vni_reserved = clib_host_to_net_u32 (((u32) - 1) << 8) + }; + + memset (raw, 0, sizeof raw); + raw[0].item.relative = 1; + raw[0].item.length = vxlan_hdr_sz; + + clib_memcpy (raw[0].val + raw_sz, &spec_hdr, vxlan_hdr_sz); + clib_memcpy (raw[1].val + raw_sz, &mask_hdr, vxlan_hdr_sz);; + + vec_add2 (items, item, 1); + item->type = RTE_FLOW_ITEM_TYPE_RAW; + item->spec = raw; + item->mask = raw + 1; + } + vec_add2 (items, item, 1); item->type = RTE_FLOW_ITEM_TYPE_END; + /* Actions */ vec_add2 (actions, action, 1); action->type = RTE_FLOW_ACTION_TYPE_PASSTHRU; @@ -166,6 +230,18 @@ dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance, dpdk_flow_lookup_entry_t *fle = 0; int rv; + /* recycle old flow lookup entries only after the main loop counter + increases - i.e. previously DMA'ed packets were handled */ + if (vec_len (xd->parked_lookup_indexes) > 0 && + xd->parked_loop_count != dm->vlib_main->main_loop_count) + { + u32 *fl_index; + + vec_foreach (fl_index, xd->parked_lookup_indexes) + pool_put_index (xd->flow_lookup_entries, *fl_index); + vec_reset_length (xd->flow_lookup_entries); + } + if (op == VNET_FLOW_DEV_OP_DEL_FLOW) { ASSERT (*private_data >= vec_len (xd->flow_entries)); @@ -176,6 +252,15 @@ dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance, &xd->last_flow_error))) return VNET_FLOW_ERROR_INTERNAL; + if (fe->mark) + { + /* make sure no action is taken for in-flight (marked) packets */ + fle = pool_elt_at_index (xd->flow_lookup_entries, fe->mark); + memset (fle, -1, sizeof (*fle)); + vec_add1 (xd->parked_lookup_indexes, fe->mark); + xd->parked_loop_count = dm->vlib_main->main_loop_count; + } + memset (fe, 0, sizeof (*fe)); pool_put (xd->flow_entries, fe); @@ -205,6 +290,15 @@ dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance, CLIB_CACHE_LINE_BYTES); pool_get_aligned (xd->flow_lookup_entries, fle, CLIB_CACHE_LINE_BYTES); fe->mark = fle - xd->flow_lookup_entries; + + /* install entry in the lookup table */ + memset (fle, -1, sizeof (*fle)); + if (flow->actions & VNET_FLOW_ACTION_MARK) + fle->flow_id = flow->mark_flow_id; + if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE) + fle->next_index = flow->redirect_device_input_next_index; + if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE) + fle->buffer_advance = flow->buffer_advance; } else fe->mark = 0; @@ -219,7 +313,8 @@ dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance, { case VNET_FLOW_TYPE_IP4_N_TUPLE: case VNET_FLOW_TYPE_IP6_N_TUPLE: - if ((rv = dpdk_flow_add_n_touple (xd, flow, fe))) + case VNET_FLOW_TYPE_IP4_VXLAN: + if ((rv = dpdk_flow_add (xd, flow, fe))) goto done; break; default: @@ -229,15 +324,6 @@ dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance, *private_data = fe - xd->flow_entries; - /* install entry in the lookup table */ - memset (fle, -1, sizeof (*fle)); - if (flow->actions & VNET_FLOW_ACTION_MARK) - fle->flow_id = flow->mark_flow_id; - if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE) - fle->next_index = flow->redirect_device_input_next_index; - if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE) - fle->buffer_advance = flow->buffer_advance; - done: if (rv) { @@ -245,7 +331,7 @@ done: pool_put (xd->flow_entries, fe); if (fle) { - memset (fle, 0, sizeof (*fle)); + memset (fle, -1, sizeof (*fle)); pool_put (xd->flow_lookup_entries, fle); } } @@ -282,11 +368,10 @@ format_dpdk_flow (u8 * s, va_list * args) return s; } - fe = vec_elt_at_index (xd->flow_entries, private_data); - - if (!fe) + if (private_data >= vec_len (xd->flow_entries)) return format (s, "unknown flow"); + fe = vec_elt_at_index (xd->flow_entries, private_data); s = format (s, "mark %u", fe->mark); return s; }