X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fmemif%2Fnode.c;h=3cb79541c17d84024ca9cc11ebc2172e2bb5150b;hb=41684c2d78d5cefa94dcb4e19bcd8eb7feb00bd0;hp=e1ee63b47bb4ccfa7df2990c45133a6b748a50a1;hpb=cef1db9c13f57a1fc49c9e500adffafa0b9ca728;p=vpp.git diff --git a/src/plugins/memif/node.c b/src/plugins/memif/node.c index e1ee63b47bb..3cb79541c17 100644 --- a/src/plugins/memif/node.c +++ b/src/plugins/memif/node.c @@ -181,13 +181,13 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, memif_ring_t *ring; memif_queue_t *mq; u16 buffer_size = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES; - u32 next_index; uword n_trace = vlib_get_trace_count (vm, node); + u16 nexts[MEMIF_RX_VECTOR_SZ], *next = nexts; + u32 _to_next_bufs[MEMIF_RX_VECTOR_SZ], *to_next_bufs = _to_next_bufs, *bi; u32 n_rx_packets = 0, n_rx_bytes = 0; - u32 n_left, *to_next = 0; - u32 bi0, bi1, bi2, bi3; + u32 n_left, n_left_to_next, next_index; vlib_buffer_t *b0, *b1, *b2, *b3; - u32 thread_index = vlib_get_thread_index (); + u32 thread_index = vm->thread_index; memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data, thread_index); vlib_buffer_t *bt = &ptd->buffer_template; @@ -204,9 +204,6 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, ring_size = 1 << mq->log2_ring_size; mask = ring_size - 1; - next_index = (mode == MEMIF_INTERFACE_MODE_IP) ? - VNET_DEVICE_INPUT_NEXT_IP6_INPUT : VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; - /* asume that somebody will want to add ethernet header on the packet so start with IP header at offset 14 */ start_offset = (mode == MEMIF_INTERFACE_MODE_IP) ? 14 : 0; @@ -308,14 +305,14 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, b2 = vlib_get_buffer (vm, ptd->buffers[co[2].buffer_vec_index]); b3 = vlib_get_buffer (vm, ptd->buffers[co[3].buffer_vec_index]); - clib_memcpy (b0->data + co[0].buffer_offset, co[0].data, - co[0].data_len); - clib_memcpy (b1->data + co[1].buffer_offset, co[1].data, - co[1].data_len); - clib_memcpy (b2->data + co[2].buffer_offset, co[2].data, - co[2].data_len); - clib_memcpy (b3->data + co[3].buffer_offset, co[3].data, - co[3].data_len); + clib_memcpy_fast (b0->data + co[0].buffer_offset, co[0].data, + co[0].data_len); + clib_memcpy_fast (b1->data + co[1].buffer_offset, co[1].data, + co[1].data_len); + clib_memcpy_fast (b2->data + co[2].buffer_offset, co[2].data, + co[2].data_len); + clib_memcpy_fast (b3->data + co[3].buffer_offset, co[3].data, + co[3].data_len); co += 4; n_left -= 4; @@ -323,8 +320,8 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, while (n_left) { b0 = vlib_get_buffer (vm, ptd->buffers[co[0].buffer_vec_index]); - clib_memcpy (b0->data + co[0].buffer_offset, co[0].data, - co[0].data_len); + clib_memcpy_fast (b0->data + co[0].buffer_offset, co[0].data, + co[0].data_len); co += 1; n_left -= 1; } @@ -340,156 +337,162 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node, mq->last_tail = cur_slot; } - u32 n_from = n_rx_packets; - po = ptd->packet_ops; - + /* prepare buffer template and next indices */ vnet_buffer (bt)->sw_if_index[VLIB_RX] = mif->sw_if_index; + vnet_buffer (bt)->feature_arc_index = 0; bt->current_data = start_offset; + bt->current_config_index = 0; - while (n_from) + if (mode == MEMIF_INTERFACE_MODE_ETHERNET) { - u32 n_left_to_next; - u32 next0, next1, next2, next3; - - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - while (n_from >= 8 && n_left_to_next >= 4) + next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT; + if (mif->per_interface_next_index != ~0) + next_index = mif->per_interface_next_index; + else + vnet_feature_start_device_input_x1 (mif->sw_if_index, &next_index, + bt); + + vlib_get_new_next_frame (vm, node, next_index, to_next_bufs, + n_left_to_next); + if (PREDICT_TRUE (next_index == VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT)) { - b0 = vlib_get_buffer (vm, po[4].first_buffer_vec_index); - b1 = vlib_get_buffer (vm, po[5].first_buffer_vec_index); - b2 = vlib_get_buffer (vm, po[6].first_buffer_vec_index); - b3 = vlib_get_buffer (vm, po[7].first_buffer_vec_index); - vlib_prefetch_buffer_header (b0, STORE); - vlib_prefetch_buffer_header (b1, STORE); - vlib_prefetch_buffer_header (b2, STORE); - vlib_prefetch_buffer_header (b3, STORE); - - /* enqueue buffer */ - u32 fbvi0 = po[0].first_buffer_vec_index; - u32 fbvi1 = po[1].first_buffer_vec_index; - u32 fbvi2 = po[2].first_buffer_vec_index; - u32 fbvi3 = po[3].first_buffer_vec_index; - to_next[0] = bi0 = ptd->buffers[fbvi0]; - to_next[1] = bi1 = ptd->buffers[fbvi1]; - to_next[2] = bi2 = ptd->buffers[fbvi2]; - to_next[3] = bi3 = ptd->buffers[fbvi3]; - to_next += 4; - n_left_to_next -= 4; - - b0 = vlib_get_buffer (vm, bi0); - b1 = vlib_get_buffer (vm, bi1); - b2 = vlib_get_buffer (vm, bi2); - b3 = vlib_get_buffer (vm, bi3); - - clib_memcpy64_x4 (b0, b1, b2, b3, bt); - - b0->current_length = po[0].packet_len; - b1->current_length = po[1].packet_len; - b2->current_length = po[2].packet_len; - b3->current_length = po[3].packet_len; - - memif_add_to_chain (vm, b0, ptd->buffers + fbvi0 + 1, buffer_size); - memif_add_to_chain (vm, b1, ptd->buffers + fbvi1 + 1, buffer_size); - memif_add_to_chain (vm, b2, ptd->buffers + fbvi2 + 1, buffer_size); - memif_add_to_chain (vm, b3, ptd->buffers + fbvi3 + 1, buffer_size); - - if (mode == MEMIF_INTERFACE_MODE_IP) - { - next0 = memif_next_from_ip_hdr (node, b0); - next1 = memif_next_from_ip_hdr (node, b1); - next2 = memif_next_from_ip_hdr (node, b2); - next3 = memif_next_from_ip_hdr (node, b3); - } - else if (mode == MEMIF_INTERFACE_MODE_ETHERNET) - { - if (PREDICT_FALSE (mif->per_interface_next_index != ~0)) - { - next0 = mif->per_interface_next_index; - next1 = mif->per_interface_next_index; - next2 = mif->per_interface_next_index; - next3 = mif->per_interface_next_index; - } - else - { - next0 = next1 = next2 = next3 = next_index; - /* redirect if feature path enabled */ - vnet_feature_start_device_input_x1 (mif->sw_if_index, - &next0, b0); - vnet_feature_start_device_input_x1 (mif->sw_if_index, - &next1, b1); - vnet_feature_start_device_input_x1 (mif->sw_if_index, - &next2, b2); - vnet_feature_start_device_input_x1 (mif->sw_if_index, - &next3, b3); - } - } - - /* trace */ - if (PREDICT_FALSE (n_trace > 0)) - { - memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace); - if (PREDICT_FALSE (n_trace > 0)) - memif_trace_buffer (vm, node, mif, b1, next1, qid, &n_trace); - if (PREDICT_FALSE (n_trace > 0)) - memif_trace_buffer (vm, node, mif, b2, next2, qid, &n_trace); - if (PREDICT_FALSE (n_trace > 0)) - memif_trace_buffer (vm, node, mif, b3, next3, qid, &n_trace); - } - - /* enqueue */ - vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, - n_left_to_next, bi0, bi1, bi2, bi3, - next0, next1, next2, next3); - - /* next */ - n_from -= 4; - po += 4; + vlib_next_frame_t *nf; + vlib_frame_t *f; + ethernet_input_frame_t *ef; + nf = vlib_node_runtime_get_next_frame (vm, node, next_index); + f = vlib_get_frame (vm, nf->frame_index); + f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX; + + ef = vlib_frame_scalar_args (f); + ef->sw_if_index = mif->sw_if_index; + ef->hw_if_index = mif->hw_if_index; } - while (n_from && n_left_to_next) - { - /* enqueue buffer */ - u32 fbvi0 = po->first_buffer_vec_index; - to_next[0] = bi0 = ptd->buffers[fbvi0]; - to_next += 1; - n_left_to_next--; - - b0 = vlib_get_buffer (vm, bi0); - clib_memcpy (b0, bt, 64); - b0->current_length = po->packet_len; + } - memif_add_to_chain (vm, b0, ptd->buffers + fbvi0 + 1, buffer_size); + /* process buffer metadata */ + u32 n_from = n_rx_packets; + po = ptd->packet_ops; + bi = to_next_bufs; - if (mode == MEMIF_INTERFACE_MODE_IP) - { - next0 = memif_next_from_ip_hdr (node, b0); - } - else if (mode == MEMIF_INTERFACE_MODE_ETHERNET) - { - if (PREDICT_FALSE (mif->per_interface_next_index != ~0)) - next0 = mif->per_interface_next_index; - else - { - next0 = next_index; - /* redirect if feature path enabled */ - vnet_feature_start_device_input_x1 (mif->sw_if_index, - &next0, b0); - } + while (n_from >= 8) + { + b0 = vlib_get_buffer (vm, po[4].first_buffer_vec_index); + b1 = vlib_get_buffer (vm, po[5].first_buffer_vec_index); + b2 = vlib_get_buffer (vm, po[6].first_buffer_vec_index); + b3 = vlib_get_buffer (vm, po[7].first_buffer_vec_index); + vlib_prefetch_buffer_header (b0, STORE); + vlib_prefetch_buffer_header (b1, STORE); + vlib_prefetch_buffer_header (b2, STORE); + vlib_prefetch_buffer_header (b3, STORE); + + /* enqueue buffer */ + u32 fbvi[4]; + fbvi[0] = po[0].first_buffer_vec_index; + fbvi[1] = po[1].first_buffer_vec_index; + fbvi[2] = po[2].first_buffer_vec_index; + fbvi[3] = po[3].first_buffer_vec_index; + + bi[0] = ptd->buffers[fbvi[0]]; + bi[1] = ptd->buffers[fbvi[1]]; + bi[2] = ptd->buffers[fbvi[2]]; + bi[3] = ptd->buffers[fbvi[3]]; + + b0 = vlib_get_buffer (vm, bi[0]); + b1 = vlib_get_buffer (vm, bi[1]); + b2 = vlib_get_buffer (vm, bi[2]); + b3 = vlib_get_buffer (vm, bi[3]); + + clib_memcpy64_x4 (b0, b1, b2, b3, bt); + + b0->current_length = po[0].packet_len; + n_rx_bytes += b0->current_length; + b1->current_length = po[1].packet_len; + n_rx_bytes += b1->current_length; + b2->current_length = po[2].packet_len; + n_rx_bytes += b2->current_length; + b3->current_length = po[3].packet_len; + n_rx_bytes += b3->current_length; + + memif_add_to_chain (vm, b0, ptd->buffers + fbvi[0] + 1, buffer_size); + memif_add_to_chain (vm, b1, ptd->buffers + fbvi[1] + 1, buffer_size); + memif_add_to_chain (vm, b2, ptd->buffers + fbvi[2] + 1, buffer_size); + memif_add_to_chain (vm, b3, ptd->buffers + fbvi[3] + 1, buffer_size); + + if (mode == MEMIF_INTERFACE_MODE_IP) + { + next[0] = memif_next_from_ip_hdr (node, b0); + next[1] = memif_next_from_ip_hdr (node, b1); + next[2] = memif_next_from_ip_hdr (node, b2); + next[3] = memif_next_from_ip_hdr (node, b3); + } - } + /* next */ + n_from -= 4; + po += 4; + bi += 4; + next += 4; + } + while (n_from) + { + u32 fbvi[4]; + /* enqueue buffer */ + fbvi[0] = po[0].first_buffer_vec_index; + bi[0] = ptd->buffers[fbvi[0]]; + b0 = vlib_get_buffer (vm, bi[0]); + clib_memcpy_fast (b0, bt, 64); + b0->current_length = po->packet_len; + n_rx_bytes += b0->current_length; + + memif_add_to_chain (vm, b0, ptd->buffers + fbvi[0] + 1, buffer_size); + + if (mode == MEMIF_INTERFACE_MODE_IP) + { + next[0] = memif_next_from_ip_hdr (node, b0); + } - /* trace */ - if (PREDICT_FALSE (n_trace > 0)) - memif_trace_buffer (vm, node, mif, b0, next0, qid, &n_trace); + /* next */ + n_from -= 1; + po += 1; + bi += 1; + next += 1; + } - /* enqueue */ - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, - n_left_to_next, bi0, next0); + /* packet trace if enabled */ + if (PREDICT_FALSE ((n_trace = vlib_get_trace_count (vm, node)))) + { + u32 n_left = n_rx_packets; + bi = to_next_bufs; + next = nexts; + u32 ni = next_index; + while (n_trace && n_left) + { + vlib_buffer_t *b; + memif_input_trace_t *tr; + if (mode != MEMIF_INTERFACE_MODE_ETHERNET) + ni = next[0]; + b = vlib_get_buffer (vm, bi[0]); + vlib_trace_buffer (vm, node, ni, b, /* follow_chain */ 0); + tr = vlib_add_trace (vm, node, b, sizeof (*tr)); + tr->next_index = ni; + tr->hw_if_index = mif->hw_if_index; + tr->ring = qid; /* next */ - n_from--; - po++; + n_trace--; + n_left--; + bi++; + next++; } + vlib_set_trace_count (vm, node, n_trace); + } + + if (mode == MEMIF_INTERFACE_MODE_ETHERNET) + { + n_left_to_next -= n_rx_packets; vlib_put_next_frame (vm, node, next_index, n_left_to_next); } + else + vlib_buffer_enqueue_to_next (vm, node, to_next_bufs, nexts, n_rx_packets); vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX, thread_index, @@ -534,15 +537,16 @@ memif_device_input_zc_inline (vlib_main_t * vm, vlib_node_runtime_t * node, u32 n_rx_packets = 0, n_rx_bytes = 0; u32 *to_next = 0, *buffers; u32 bi0, bi1, bi2, bi3; + u16 s0, s1, s2, s3; + memif_desc_t *d0, *d1, *d2, *d3; vlib_buffer_t *b0, *b1, *b2, *b3; - u32 thread_index = vlib_get_thread_index (); + u32 thread_index = vm->thread_index; memif_per_thread_data_t *ptd = vec_elt_at_index (mm->per_thread_data, thread_index); - vlib_buffer_t *bt = &ptd->buffer_template; u16 cur_slot, last_slot, ring_size, n_slots, mask, head; i16 start_offset; u32 buffer_length; - u16 n_alloc; + u16 n_alloc, n_from; mq = vec_elt_at_index (mif->rx_queues, qid); ring = mq->ring; @@ -568,8 +572,6 @@ memif_device_input_zc_inline (vlib_main_t * vm, vlib_node_runtime_t * node, CLIB_CACHE_LINE_BYTES); while (n_slots && n_rx_packets < MEMIF_RX_VECTOR_SZ) { - u16 s0; - memif_desc_t *d0; vlib_buffer_t *hb; s0 = cur_slot & mask; @@ -582,7 +584,7 @@ memif_device_input_zc_inline (vlib_main_t * vm, vlib_node_runtime_t * node, hb = b0 = vlib_get_buffer (vm, bi0); b0->current_data = start_offset; b0->current_length = start_offset + d0->length; - + n_rx_bytes += d0->length; if (0 && memif_desc_is_invalid (mif, d0, buffer_length)) return 0; @@ -597,7 +599,7 @@ memif_device_input_zc_inline (vlib_main_t * vm, vlib_node_runtime_t * node, d0 = &ring->desc[s0]; bi0 = mq->buffers[s0]; - /*previous buffer */ + /* previous buffer */ b0->next_buffer = bi0; b0->flags |= VLIB_BUFFER_NEXT_PRESENT; @@ -606,6 +608,7 @@ memif_device_input_zc_inline (vlib_main_t * vm, vlib_node_runtime_t * node, b0->current_data = start_offset; b0->current_length = start_offset + d0->length; hb->total_length_not_including_first_buffer += d0->length; + n_rx_bytes += d0->length; cur_slot++; n_slots--; @@ -617,10 +620,7 @@ memif_device_input_zc_inline (vlib_main_t * vm, vlib_node_runtime_t * node, /* release slots from the ring */ mq->last_tail = cur_slot; - u32 n_from = n_rx_packets; - - vnet_buffer (bt)->sw_if_index[VLIB_RX] = mif->sw_if_index; - + n_from = n_rx_packets; buffers = ptd->buffers; while (n_from) @@ -769,12 +769,12 @@ refill: head = ring->head; n_slots = ring_size - head + mq->last_tail; - if (n_slots < 8) + if (n_slots < 32) goto done; memif_desc_t *dt = &ptd->desc_template; - memset (dt, 0, sizeof (memif_desc_t)); - dt->length = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES - start_offset; + clib_memset (dt, 0, sizeof (memif_desc_t)); + dt->length = buffer_length; n_alloc = vlib_buffer_alloc_to_ring (vm, mq->buffers, head & mask, ring_size, n_slots); @@ -785,15 +785,64 @@ refill: MEMIF_INPUT_ERROR_BUFFER_ALLOC_FAIL, 1); } - while (n_alloc--) + while (n_alloc >= 32) + { + bi0 = mq->buffers[(head + 4) & mask]; + vlib_prefetch_buffer_with_index (vm, bi0, LOAD); + bi1 = mq->buffers[(head + 5) & mask]; + vlib_prefetch_buffer_with_index (vm, bi1, LOAD); + bi2 = mq->buffers[(head + 6) & mask]; + vlib_prefetch_buffer_with_index (vm, bi2, LOAD); + bi3 = mq->buffers[(head + 7) & mask]; + vlib_prefetch_buffer_with_index (vm, bi3, LOAD); + + s0 = head++ & mask; + s1 = head++ & mask; + s2 = head++ & mask; + s3 = head++ & mask; + + d0 = &ring->desc[s0]; + d1 = &ring->desc[s1]; + d2 = &ring->desc[s2]; + d3 = &ring->desc[s3]; + + clib_memcpy_fast (d0, dt, sizeof (memif_desc_t)); + clib_memcpy_fast (d1, dt, sizeof (memif_desc_t)); + clib_memcpy_fast (d2, dt, sizeof (memif_desc_t)); + clib_memcpy_fast (d3, dt, sizeof (memif_desc_t)); + + b0 = vlib_get_buffer (vm, mq->buffers[s0]); + b1 = vlib_get_buffer (vm, mq->buffers[s1]); + b2 = vlib_get_buffer (vm, mq->buffers[s2]); + b3 = vlib_get_buffer (vm, mq->buffers[s3]); + + d0->region = b0->buffer_pool_index + 1; + d1->region = b1->buffer_pool_index + 1; + d2->region = b2->buffer_pool_index + 1; + d3->region = b3->buffer_pool_index + 1; + + d0->offset = + (void *) b0->data - mif->regions[d0->region].shm + start_offset; + d1->offset = + (void *) b1->data - mif->regions[d1->region].shm + start_offset; + d2->offset = + (void *) b2->data - mif->regions[d2->region].shm + start_offset; + d3->offset = + (void *) b3->data - mif->regions[d3->region].shm + start_offset; + + n_alloc -= 4; + } + while (n_alloc) { - u16 s = head++ & mask; - memif_desc_t *d = &ring->desc[s]; - clib_memcpy (d, dt, sizeof (memif_desc_t)); - b0 = vlib_get_buffer (vm, mq->buffers[s]); - d->region = b0->buffer_pool_index + 1; - d->offset = - (void *) b0->data - mif->regions[d->region].shm + start_offset; + s0 = head++ & mask; + d0 = &ring->desc[s0]; + clib_memcpy_fast (d0, dt, sizeof (memif_desc_t)); + b0 = vlib_get_buffer (vm, mq->buffers[s0]); + d0->region = b0->buffer_pool_index + 1; + d0->offset = + (void *) b0->data - mif->regions[d0->region].shm + start_offset; + + n_alloc -= 1; } CLIB_MEMORY_STORE_BARRIER (); @@ -803,10 +852,10 @@ done: return n_rx_packets; } -uword -CLIB_MULTIARCH_FN (memif_input_fn) (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) + +VLIB_NODE_FN (memif_input_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { u32 n_rx = 0; memif_main_t *mm = &memif_main; @@ -831,7 +880,7 @@ CLIB_MULTIARCH_FN (memif_input_fn) (vlib_main_t * vm, n_rx += memif_device_input_zc_inline (vm, node, frame, mif, dq->queue_id, mode_eth); } - if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE) + else if (mif->flags & MEMIF_IF_FLAG_IS_SLAVE) { if (mif->mode == MEMIF_INTERFACE_MODE_IP) n_rx += memif_device_input_inline (vm, node, frame, mif, @@ -859,10 +908,8 @@ CLIB_MULTIARCH_FN (memif_input_fn) (vlib_main_t * vm, return n_rx; } -#ifndef CLIB_MULTIARCH_VARIANT /* *INDENT-OFF* */ VLIB_REGISTER_NODE (memif_input_node) = { - .function = memif_input_fn, .name = "memif-input", .sibling_of = "device-input", .format_trace = format_memif_input_trace, @@ -872,21 +919,6 @@ VLIB_REGISTER_NODE (memif_input_node) = { .error_strings = memif_input_error_strings, }; -vlib_node_function_t __clib_weak memif_input_fn_avx512; -vlib_node_function_t __clib_weak memif_input_fn_avx2; - -#if __x86_64__ -static void __clib_constructor -memif_input_multiarch_select (void) -{ - if (memif_input_fn_avx512 && clib_cpu_supports_avx512f ()) - memif_input_node.function = memif_input_fn_avx512; - else if (memif_input_fn_avx2 && clib_cpu_supports_avx2 ()) - memif_input_node.function = memif_input_fn_avx2; -} -#endif -#endif - /* *INDENT-ON* */