X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fdpdk%2Fdevice%2Fnode.c;h=256c399ea32ed953f21bd9e2ec8fd41a3fa12f51;hb=1d104c5ecdce37301fdfea0e62a533a2e5342ee0;hp=b8fe834755caf91614675347e7fcfe3039d6b25f;hpb=aa682a39b76ee043f65313f23e134bf18fe7a47e;p=vpp.git diff --git a/src/plugins/dpdk/device/node.c b/src/plugins/dpdk/device/node.c index b8fe834755c..256c399ea32 100644 --- a/src/plugins/dpdk/device/node.c +++ b/src/plugins/dpdk/device/node.c @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -28,54 +29,19 @@ #include -#ifndef CLIB_MULTIARCH_VARIANT static char *dpdk_error_strings[] = { #define _(n,s) s, foreach_dpdk_error #undef _ }; -#endif -STATIC_ASSERT (VNET_DEVICE_INPUT_NEXT_IP4_INPUT - 1 == - VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT, - "IP4_INPUT must follow IP4_NCS_INPUT"); - -enum -{ - DPDK_RX_F_CKSUM_GOOD = 7, - DPDK_RX_F_CKSUM_BAD = 4, - DPDK_RX_F_FDIR = 2, -}; - -/* currently we are just copying bit positions from DPDK, but that - might change in future, in case we strart to be interested in something - stored in upper bytes. Curently we store only lower byte for perf reasons */ -STATIC_ASSERT (1 << DPDK_RX_F_CKSUM_GOOD == PKT_RX_IP_CKSUM_GOOD, ""); -STATIC_ASSERT (1 << DPDK_RX_F_CKSUM_BAD == PKT_RX_IP_CKSUM_BAD, ""); -STATIC_ASSERT (1 << DPDK_RX_F_FDIR == PKT_RX_FDIR, ""); -STATIC_ASSERT ((PKT_RX_IP_CKSUM_GOOD | PKT_RX_IP_CKSUM_BAD | PKT_RX_FDIR) < +/* make sure all flags we need are stored in lower 8 bits */ +STATIC_ASSERT ((PKT_RX_IP_CKSUM_BAD | PKT_RX_FDIR) < 256, "dpdk flags not un lower byte, fix needed"); -always_inline u32 -dpdk_rx_next (vlib_node_runtime_t * node, u16 etype, u8 flags) -{ - if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_IP4))) - { - /* keep it branchless */ - u32 is_good = (flags >> DPDK_RX_F_CKSUM_GOOD) & 1; - return VNET_DEVICE_INPUT_NEXT_IP4_INPUT - is_good; - } - else if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_IP6))) - return VNET_DEVICE_INPUT_NEXT_IP6_INPUT; - else if (PREDICT_TRUE (etype == clib_host_to_net_u16 (ETHERNET_TYPE_MPLS))) - return VNET_DEVICE_INPUT_NEXT_MPLS_INPUT; - else - return VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; -} - static_always_inline uword dpdk_process_subseq_segs (vlib_main_t * vm, vlib_buffer_t * b, - struct rte_mbuf * mb, vlib_buffer_free_list_t * fl) + struct rte_mbuf *mb, vlib_buffer_t * bt) { u8 nb_seg = 1; struct rte_mbuf *mb_seg = 0; @@ -94,10 +60,7 @@ dpdk_process_subseq_segs (vlib_main_t * vm, vlib_buffer_t * b, ASSERT (mb_seg != 0); b_seg = vlib_buffer_from_rte_mbuf (mb_seg); - vlib_buffer_init_for_free_list (b_seg, fl); - - ASSERT ((b_seg->flags & VLIB_BUFFER_NEXT_PRESENT) == 0); - ASSERT (b_seg->current_data == 0); + vlib_buffer_copy_template (b_seg, bt); /* * The driver (e.g. virtio) may not put the packet data at the start @@ -142,38 +105,6 @@ dpdk_prefetch_buffer_x4 (struct rte_mbuf *mb[]) CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD); } -static_always_inline void -dpdk_prefetch_buffer_data_x4 (struct rte_mbuf *mb[]) -{ - vlib_buffer_t *b; - b = vlib_buffer_from_rte_mbuf (mb[0]); - CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD); - b = vlib_buffer_from_rte_mbuf (mb[1]); - CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD); - b = vlib_buffer_from_rte_mbuf (mb[2]); - CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD); - b = vlib_buffer_from_rte_mbuf (mb[3]); - CLIB_PREFETCH (b->data, CLIB_CACHE_LINE_BYTES, LOAD); -} - -static inline void -poll_rate_limit (dpdk_main_t * dm) -{ - /* Limit the poll rate by sleeping for N msec between polls */ - if (PREDICT_FALSE (dm->poll_sleep_usec != 0)) - { - struct timespec ts, tsrem; - - ts.tv_sec = 0; - ts.tv_nsec = 1000 * dm->poll_sleep_usec; - - while (nanosleep (&ts, &tsrem) < 0) - { - ts = tsrem; - } - } -} - /** \brief Main DPDK input node @node dpdk-input @@ -213,50 +144,16 @@ poll_rate_limit (dpdk_main_t * dm) xd->per_interface_next_index */ -static_always_inline void -dpdk_mbuf_to_buffer_index_x4 (vlib_main_t * vm, struct rte_mbuf **mb, - u32 * buffers) -{ -#ifdef CLIB_HAVE_VEC256 - vlib_buffer_main_t *bm = &buffer_main; - u64x4 v = *(u64x4 *) mb; - u32x8 v2, mask = { 0, 2, 4, 6, 1, 3, 5, 7 }; - - /* load 4 pointers into 256-bit register */ - v = u64x4_load_unaligned (mb); - - /* vlib_buffer_t is straight after rte_mbuf so advance all 4 - pointers for size of rte_mbuf */ - v += u64x4_splat (sizeof (struct rte_mbuf)); - - /* calculate 4 buffer indices in paralled */ - v = (v - u64x4_splat (bm->buffer_mem_start)) >> CLIB_LOG2_CACHE_LINE_BYTES; - - /* permute 256-bit register so lower u32s of each buffer index are - * placed into lower 128-bits */ - v2 = u32x8_permute ((u32x8) v, mask); - - /* extract lower 128-bits and save them to the array of buffer indices */ - u32x4_store_unaligned (u32x8_extract_lo (v2), buffers); -#else - /* equivalent non-nector implementation */ - buffers[0] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[0])); - buffers[1] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[1])); - buffers[2] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[2])); - buffers[3] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[3])); -#endif -} - -static_always_inline u8 -dpdk_ol_flags_extract (struct rte_mbuf **mb, u8 * flags, int count) +static_always_inline u16 +dpdk_ol_flags_extract (struct rte_mbuf **mb, u16 * flags, int count) { - u8 rv = 0; + u16 rv = 0; int i; for (i = 0; i < count; i++) { /* all flags we are interested in are in lower 8 bits but that might change */ - flags[i] = (u8) mb[i]->ol_flags; + flags[i] = (u16) mb[i]->ol_flags; rv |= flags[i]; } return rv; @@ -264,27 +161,23 @@ dpdk_ol_flags_extract (struct rte_mbuf **mb, u8 * flags, int count) static_always_inline uword dpdk_process_rx_burst (vlib_main_t * vm, dpdk_per_thread_data_t * ptd, - uword n_rx_packets, int maybe_multiseg, u8 * or_flagsp) + uword n_rx_packets, int maybe_multiseg, + u16 * or_flagsp) { u32 n_left = n_rx_packets; vlib_buffer_t *b[4]; - vlib_buffer_free_list_t *fl; struct rte_mbuf **mb = ptd->mbufs; uword n_bytes = 0; - i16 off; - u8 *flags, or_flags = 0; - u16 *next; - - fl = vlib_buffer_get_free_list (vm, VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX); + u16 *flags, or_flags = 0; + vlib_buffer_t bt; mb = ptd->mbufs; flags = ptd->flags; - next = ptd->next; + /* copy template into local variable - will save per packet load */ + vlib_buffer_copy_template (&bt, &ptd->buffer_template); while (n_left >= 8) { - CLIB_PREFETCH (mb + 8, CLIB_CACHE_LINE_BYTES, LOAD); - dpdk_prefetch_buffer_x4 (mb + 4); b[0] = vlib_buffer_from_rte_mbuf (mb[0]); @@ -292,56 +185,34 @@ dpdk_process_rx_burst (vlib_main_t * vm, dpdk_per_thread_data_t * ptd, b[2] = vlib_buffer_from_rte_mbuf (mb[2]); b[3] = vlib_buffer_from_rte_mbuf (mb[3]); - clib_memcpy64_x4 (b[0], b[1], b[2], b[3], &ptd->buffer_template); + vlib_buffer_copy_template (b[0], &bt); + vlib_buffer_copy_template (b[1], &bt); + vlib_buffer_copy_template (b[2], &bt); + vlib_buffer_copy_template (b[3], &bt); dpdk_prefetch_mbuf_x4 (mb + 4); or_flags |= dpdk_ol_flags_extract (mb, flags, 4); flags += 4; - /* we temporary store relative offset of ethertype into next[x] - so we can prefetch and get it faster later */ - - off = mb[0]->data_off; - next[0] = off + STRUCT_OFFSET_OF (ethernet_header_t, type); - off -= RTE_PKTMBUF_HEADROOM; - vnet_buffer (b[0])->l2_hdr_offset = off; - b[0]->current_data = off; - - off = mb[0]->data_off; - next[1] = off + STRUCT_OFFSET_OF (ethernet_header_t, type); - off -= RTE_PKTMBUF_HEADROOM; - vnet_buffer (b[1])->l2_hdr_offset = off; - b[1]->current_data = off; - - off = mb[0]->data_off; - next[2] = off + STRUCT_OFFSET_OF (ethernet_header_t, type); - off -= RTE_PKTMBUF_HEADROOM; - vnet_buffer (b[2])->l2_hdr_offset = off; - b[2]->current_data = off; - - off = mb[0]->data_off; - next[3] = off + STRUCT_OFFSET_OF (ethernet_header_t, type); - off -= RTE_PKTMBUF_HEADROOM; - vnet_buffer (b[3])->l2_hdr_offset = off; - b[3]->current_data = off; - - b[0]->current_length = mb[0]->data_len; - b[1]->current_length = mb[1]->data_len; - b[2]->current_length = mb[2]->data_len; - b[3]->current_length = mb[3]->data_len; - - n_bytes += mb[0]->data_len; - n_bytes += mb[1]->data_len; - n_bytes += mb[2]->data_len; - n_bytes += mb[3]->data_len; + b[0]->current_data = mb[0]->data_off - RTE_PKTMBUF_HEADROOM; + n_bytes += b[0]->current_length = mb[0]->data_len; + + b[1]->current_data = mb[1]->data_off - RTE_PKTMBUF_HEADROOM; + n_bytes += b[1]->current_length = mb[1]->data_len; + + b[2]->current_data = mb[2]->data_off - RTE_PKTMBUF_HEADROOM; + n_bytes += b[2]->current_length = mb[2]->data_len; + + b[3]->current_data = mb[3]->data_off - RTE_PKTMBUF_HEADROOM; + n_bytes += b[3]->current_length = mb[3]->data_len; if (maybe_multiseg) { - n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], fl); - n_bytes += dpdk_process_subseq_segs (vm, b[1], mb[1], fl); - n_bytes += dpdk_process_subseq_segs (vm, b[2], mb[2], fl); - n_bytes += dpdk_process_subseq_segs (vm, b[3], mb[3], fl); + n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], &bt); + n_bytes += dpdk_process_subseq_segs (vm, b[1], mb[1], &bt); + n_bytes += dpdk_process_subseq_segs (vm, b[2], mb[2], &bt); + n_bytes += dpdk_process_subseq_segs (vm, b[3], mb[3], &bt); } VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); @@ -352,31 +223,25 @@ dpdk_process_rx_burst (vlib_main_t * vm, dpdk_per_thread_data_t * ptd, /* next */ mb += 4; n_left -= 4; - next += 4; } while (n_left) { b[0] = vlib_buffer_from_rte_mbuf (mb[0]); - clib_memcpy (b[0], &ptd->buffer_template, 64); + vlib_buffer_copy_template (b[0], &bt); or_flags |= dpdk_ol_flags_extract (mb, flags, 1); flags += 1; - off = mb[0]->data_off; - next[0] = off + STRUCT_OFFSET_OF (ethernet_header_t, type); - off -= RTE_PKTMBUF_HEADROOM; - vnet_buffer (b[0])->l2_hdr_offset = off; - b[0]->current_data = off; - b[0]->current_length = mb[0]->data_len; - n_bytes += mb[0]->data_len; + b[0]->current_data = mb[0]->data_off - RTE_PKTMBUF_HEADROOM; + n_bytes += b[0]->current_length = mb[0]->data_len; + if (maybe_multiseg) - n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], fl); + n_bytes += dpdk_process_subseq_segs (vm, b[0], mb[0], &bt); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); /* next */ mb += 1; n_left -= 1; - next += 1; } *or_flagsp = or_flags; @@ -384,68 +249,36 @@ dpdk_process_rx_burst (vlib_main_t * vm, dpdk_per_thread_data_t * ptd, } static_always_inline void -dpdk_set_next_from_etype (vlib_main_t * vm, vlib_node_runtime_t * node, - dpdk_per_thread_data_t * ptd, uword n_rx_packets) +dpdk_process_flow_offload (dpdk_device_t * xd, dpdk_per_thread_data_t * ptd, + uword n_rx_packets) { - vlib_buffer_t *b[4]; - i16 adv[4]; - u16 etype[4]; - struct rte_mbuf **mb = ptd->mbufs; - u8 *flags = ptd->flags; - u16 *next = ptd->next; - u32 n_left = n_rx_packets; + uword n; + dpdk_flow_lookup_entry_t *fle; + vlib_buffer_t *b0; - while (n_left >= 12) + /* TODO prefetch and quad-loop */ + for (n = 0; n < n_rx_packets; n++) { - dpdk_prefetch_buffer_data_x4 (mb + 8); - dpdk_prefetch_buffer_x4 (mb + 8); + if ((ptd->flags[n] & PKT_RX_FDIR_ID) == 0) + continue; - b[0] = vlib_buffer_from_rte_mbuf (mb[0]); - b[1] = vlib_buffer_from_rte_mbuf (mb[1]); - b[2] = vlib_buffer_from_rte_mbuf (mb[2]); - b[3] = vlib_buffer_from_rte_mbuf (mb[3]); - etype[0] = *(u16 *) ((u8 *) mb[0] + next[0] + sizeof (vlib_buffer_t)); - etype[1] = *(u16 *) ((u8 *) mb[1] + next[1] + sizeof (vlib_buffer_t)); - etype[2] = *(u16 *) ((u8 *) mb[2] + next[2] + sizeof (vlib_buffer_t)); - etype[3] = *(u16 *) ((u8 *) mb[3] + next[3] + sizeof (vlib_buffer_t)); - next[0] = dpdk_rx_next (node, etype[0], flags[0]); - next[1] = dpdk_rx_next (node, etype[1], flags[1]); - next[2] = dpdk_rx_next (node, etype[2], flags[2]); - next[3] = dpdk_rx_next (node, etype[3], flags[3]); - adv[0] = device_input_next_node_advance[next[0]]; - adv[1] = device_input_next_node_advance[next[1]]; - adv[2] = device_input_next_node_advance[next[2]]; - adv[3] = device_input_next_node_advance[next[3]]; - b[0]->current_data += adv[0]; - b[1]->current_data += adv[1]; - b[2]->current_data += adv[2]; - b[3]->current_data += adv[3]; - b[0]->current_length -= adv[0]; - b[1]->current_length -= adv[1]; - b[2]->current_length -= adv[2]; - b[3]->current_length -= adv[3]; + fle = pool_elt_at_index (xd->flow_lookup_entries, + ptd->mbufs[n]->hash.fdir.hi); - /* next */ - next += 4; - mb += 4; - n_left -= 4; - flags += 4; - } + if (fle->next_index != (u16) ~ 0) + ptd->next[n] = fle->next_index; - while (n_left) - { - b[0] = vlib_buffer_from_rte_mbuf (mb[0]); - next[0] = *(u16 *) ((u8 *) mb[0] + next[0] + sizeof (vlib_buffer_t)); - next[0] = dpdk_rx_next (node, next[0], flags[0]); - adv[0] = device_input_next_node_advance[next[0]]; - b[0]->current_data += adv[0]; - b[0]->current_length -= adv[0]; + if (fle->flow_id != ~0) + { + b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]); + b0->flow_id = fle->flow_id; + } - /* next */ - next += 1; - mb += 1; - n_left -= 1; - flags += 1; + if (fle->buffer_advance != ~0) + { + b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]); + vlib_buffer_advance (b0, fle->buffer_advance); + } } } @@ -454,15 +287,16 @@ dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd, vlib_node_runtime_t * node, u32 thread_index, u16 queue_id) { uword n_rx_packets = 0, n_rx_bytes; + dpdk_rx_queue_t *rxq = vec_elt_at_index (xd->rx_queues, queue_id); u32 n_left, n_trace; u32 *buffers; u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; struct rte_mbuf **mb; vlib_buffer_t *b0; - int known_next = 0; u16 *next; - u8 or_flags; + u16 or_flags; u32 n; + int single_next = 0; dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data, thread_index); @@ -474,7 +308,7 @@ dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd, /* get up to DPDK_RX_BURST_SZ buffers from PMD */ while (n_rx_packets < DPDK_RX_BURST_SZ) { - n = rte_eth_rx_burst (xd->device_index, queue_id, + n = rte_eth_rx_burst (xd->port_id, queue_id, ptd->mbufs + n_rx_packets, DPDK_RX_BURST_SZ - n_rx_packets); n_rx_packets += n; @@ -491,160 +325,112 @@ dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd, bt->error = node->errors[DPDK_ERROR_NONE]; /* as DPDK is allocating empty buffers from mempool provided before interface start for each queue, it is safe to store this in the template */ - bt->buffer_pool_index = xd->buffer_pool_for_queue[queue_id]; + bt->buffer_pool_index = rxq->buffer_pool_index; + bt->ref_count = 1; + vnet_buffer (bt)->feature_arc_index = 0; + bt->current_config_index = 0; /* receive burst of packets from DPDK PMD */ if (PREDICT_FALSE (xd->per_interface_next_index != ~0)) - { - known_next = 1; - next_index = xd->per_interface_next_index; - } + next_index = xd->per_interface_next_index; - /* as all packets belong to thr same interface feature arc lookup + /* as all packets belong to the same interface feature arc lookup can be don once and result stored in the buffer template */ if (PREDICT_FALSE (vnet_device_input_have_features (xd->sw_if_index))) - { - vnet_feature_start_device_input_x1 (xd->sw_if_index, &next_index, bt); - known_next = 1; - } + vnet_feature_start_device_input_x1 (xd->sw_if_index, &next_index, bt); if (xd->flags & DPDK_DEVICE_FLAG_MAYBE_MULTISEG) n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 1, &or_flags); else n_rx_bytes = dpdk_process_rx_burst (vm, ptd, n_rx_packets, 0, &or_flags); - if (PREDICT_FALSE (known_next)) + if (PREDICT_FALSE (or_flags & PKT_RX_FDIR)) { + /* some packets will need to go to different next nodes */ for (n = 0; n < n_rx_packets; n++) ptd->next[n] = next_index; - vnet_buffer (bt)->feature_arc_index = 0; - bt->current_config_index = 0; + /* flow offload - process if rx flow offload enabled and at least one + packet is marked */ + if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) && + (or_flags & PKT_RX_FDIR))) + dpdk_process_flow_offload (xd, ptd, n_rx_packets); + + /* enqueue buffers to the next node */ + vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs, + ptd->buffers, n_rx_packets, + sizeof (struct rte_mbuf)); + + vlib_buffer_enqueue_to_next (vm, node, ptd->buffers, ptd->next, + n_rx_packets); } else - dpdk_set_next_from_etype (vm, node, ptd, n_rx_packets); - - /* is at least one packet marked as ip4 checksum bad? */ - if (PREDICT_FALSE (or_flags & (1 << DPDK_RX_F_CKSUM_BAD))) - for (n = 0; n < n_rx_packets; n++) - { - if ((ptd->flags[n] & (1 << DPDK_RX_F_CKSUM_BAD)) == 0) - continue; - if (ptd->next[n] != VNET_DEVICE_INPUT_NEXT_IP4_INPUT) - continue; - - b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]); - b0->error = node->errors[DPDK_ERROR_IP_CHECKSUM_ERROR]; - ptd->next[n] = VNET_DEVICE_INPUT_NEXT_DROP; - } - - /* enqueue buffers to the next node */ - n_left = n_rx_packets; - next = ptd->next; - buffers = ptd->buffers; - mb = ptd->mbufs; - while (n_left) { - u32 n_left_to_next; - u32 *to_next; - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); -#ifdef CLIB_HAVE_VEC256 - while (n_left >= 16 && n_left_to_next >= 16) - { - u16x16 next16 = u16x16_load_unaligned (next); - if (u16x16_is_all_equal (next16, next_index)) - { - dpdk_mbuf_to_buffer_index_x4 (vm, mb, buffers); - dpdk_mbuf_to_buffer_index_x4 (vm, mb + 4, buffers + 4); - dpdk_mbuf_to_buffer_index_x4 (vm, mb + 8, buffers + 8); - dpdk_mbuf_to_buffer_index_x4 (vm, mb + 12, buffers + 12); - clib_memcpy (to_next, buffers, 16 * sizeof (u32)); - to_next += 16; - n_left_to_next -= 16; - buffers += 16; - n_left -= 16; - next += 16; - mb += 16; - } - else - { - dpdk_mbuf_to_buffer_index_x4 (vm, mb, buffers); - clib_memcpy (to_next, buffers, 4 * sizeof (u32)); - to_next += 4; - n_left_to_next -= 4; - - vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, - n_left_to_next, buffers[0], - buffers[1], buffers[2], - buffers[3], next[0], next[1], - next[2], next[3]); - /* next */ - buffers += 4; - n_left -= 4; - next += 4; - mb += 4; - } - } -#endif - while (n_left >= 4 && n_left_to_next >= 4) - { - dpdk_mbuf_to_buffer_index_x4 (vm, mb, buffers); - clib_memcpy (to_next, buffers, 4 * sizeof (u32)); - to_next += 4; - n_left_to_next -= 4; - - vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, - n_left_to_next, buffers[0], - buffers[1], buffers[2], buffers[3], - next[0], next[1], next[2], - next[3]); - /* next */ - buffers += 4; - n_left -= 4; - next += 4; - mb += 4; - } - while (n_left && n_left_to_next) + u32 *to_next, n_left_to_next; + + vlib_get_new_next_frame (vm, node, next_index, to_next, n_left_to_next); + vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs, to_next, + n_rx_packets, + sizeof (struct rte_mbuf)); + + if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT)) { - to_next[0] = buffers[0] = - vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[0])); - to_next += 1; - n_left_to_next -= 1; - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, - n_left_to_next, buffers[0], - next[0]); - /* next */ - buffers += 1; - n_left -= 1; - next += 1; - mb += 1; + vlib_next_frame_t *nf; + vlib_frame_t *f; + ethernet_input_frame_t *ef; + nf = vlib_node_runtime_get_next_frame (vm, node, next_index); + f = vlib_get_frame (vm, nf->frame); + f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX; + + ef = vlib_frame_scalar_args (f); + ef->sw_if_index = xd->sw_if_index; + ef->hw_if_index = xd->hw_if_index; + + /* if PMD supports ip4 checksum check and there are no packets + marked as ip4 checksum bad we can notify ethernet input so it + can send pacets to ip4-input-no-checksum node */ + if (xd->flags & DPDK_DEVICE_FLAG_RX_IP4_CKSUM && + (or_flags & PKT_RX_IP_CKSUM_BAD) == 0) + f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK; + vlib_frame_no_append (f); } + n_left_to_next -= n_rx_packets; vlib_put_next_frame (vm, node, next_index, n_left_to_next); + single_next = 1; } /* packet trace if enabled */ - if ((n_trace = vlib_get_trace_count (vm, node))) + if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node)))) { + if (single_next) + vlib_get_buffer_indices_with_offset (vm, (void **) ptd->mbufs, + ptd->buffers, n_rx_packets, + sizeof (struct rte_mbuf)); + n_left = n_rx_packets; buffers = ptd->buffers; mb = ptd->mbufs; next = ptd->next; + while (n_trace && n_left) { b0 = vlib_get_buffer (vm, buffers[0]); - vlib_trace_buffer (vm, node, next[0], b0, /* follow_chain */ 0); + if (single_next == 0) + next_index = next[0]; + vlib_trace_buffer (vm, node, next_index, b0, /* follow_chain */ 0); dpdk_rx_trace_t *t0 = vlib_add_trace (vm, node, b0, sizeof t0[0]); t0->queue_index = queue_id; t0->device_index = xd->device_index; t0->buffer_index = vlib_get_buffer_index (vm, b0); - clib_memcpy (&t0->mb, mb[0], sizeof t0->mb); - clib_memcpy (&t0->buffer, b0, sizeof b0[0] - sizeof b0->pre_data); - clib_memcpy (t0->buffer.pre_data, b0->data, - sizeof t0->buffer.pre_data); - clib_memcpy (&t0->data, mb[0]->buf_addr + mb[0]->data_off, - sizeof t0->data); + clib_memcpy_fast (&t0->mb, mb[0], sizeof t0->mb); + clib_memcpy_fast (&t0->buffer, b0, + sizeof b0[0] - sizeof b0->pre_data); + clib_memcpy_fast (t0->buffer.pre_data, b0->data, + sizeof t0->buffer.pre_data); + clib_memcpy_fast (&t0->data, mb[0]->buf_addr + mb[0]->data_off, + sizeof t0->data); n_trace--; n_left--; buffers++; @@ -664,8 +450,7 @@ dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd, return n_rx_packets; } -uword CLIB_CPU_OPTIMIZED -CLIB_MULTIARCH_FN (dpdk_input) (vlib_main_t * vm, vlib_node_runtime_t * node, +VLIB_NODE_FN (dpdk_input_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * f) { dpdk_main_t *dm = &dpdk_main; @@ -682,25 +467,19 @@ CLIB_MULTIARCH_FN (dpdk_input) (vlib_main_t * vm, vlib_node_runtime_t * node, foreach_device_and_queue (dq, rt->devices_and_queues) { xd = vec_elt_at_index(dm->devices, dq->dev_instance); - if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_BOND_SLAVE)) - continue; /* Do not poll slave to a bonded interface */ n_rx_packets += dpdk_device_input (vm, dm, xd, node, thread_index, dq->queue_id); } /* *INDENT-ON* */ - - poll_rate_limit (dm); - return n_rx_packets; } -#ifndef CLIB_MULTIARCH_VARIANT /* *INDENT-OFF* */ VLIB_REGISTER_NODE (dpdk_input_node) = { - .function = dpdk_input, .type = VLIB_NODE_TYPE_INPUT, .name = "dpdk-input", .sibling_of = "device-input", + .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED, /* Will be enabled if/when hardware is detected. */ .state = VLIB_NODE_STATE_DISABLED, @@ -713,21 +492,6 @@ VLIB_REGISTER_NODE (dpdk_input_node) = { }; /* *INDENT-ON* */ -vlib_node_function_t __clib_weak dpdk_input_avx512; -vlib_node_function_t __clib_weak dpdk_input_avx2; - -#if __x86_64__ -static void __clib_constructor -dpdk_input_multiarch_select (void) -{ - if (dpdk_input_avx512 && clib_cpu_supports_avx512f ()) - dpdk_input_node.function = dpdk_input_avx512; - else if (dpdk_input_avx2 && clib_cpu_supports_avx2 ()) - dpdk_input_node.function = dpdk_input_avx2; -} -#endif -#endif - /* * fd.io coding-style-patch-verification: ON *