X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlib%2Fbuffer_node.h;h=bd82b1037a9440cbafc602d377ae4d3887e37389;hb=9fb6d40eb3d4a2da8f45187de773498b784596e6;hp=cfdb0567a0c7ee01286859d13926f60532a22f05;hpb=4d56e059f78b991cb19ec4e5cf4a07a5607a0642;p=vpp.git diff --git a/src/vlib/buffer_node.h b/src/vlib/buffer_node.h index cfdb0567a0c..bd82b1037a9 100644 --- a/src/vlib/buffer_node.h +++ b/src/vlib/buffer_node.h @@ -350,26 +350,31 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node, max = clib_min (n_left_to_next, count); } #if defined(CLIB_HAVE_VEC512) - u16x32 next32 = u16x32_load_unaligned (nexts); + u16x32 next32 = CLIB_MEM_OVERFLOW_LOAD (u16x32_load_unaligned, nexts); next32 = (next32 == u16x32_splat (next32[0])); u64 bitmap = u16x32_msb_mask (next32); n_enqueued = count_trailing_zeros (~bitmap); #elif defined(CLIB_HAVE_VEC256) - u16x16 next16 = u16x16_load_unaligned (nexts); + u16x16 next16 = CLIB_MEM_OVERFLOW_LOAD (u16x16_load_unaligned, nexts); next16 = (next16 == u16x16_splat (next16[0])); u64 bitmap = u8x32_msb_mask ((u8x32) next16); n_enqueued = count_trailing_zeros (~bitmap) / 2; #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK) - u16x8 next8 = u16x8_load_unaligned (nexts); + u16x8 next8 = CLIB_MEM_OVERFLOW_LOAD (u16x8_load_unaligned, nexts); next8 = (next8 == u16x8_splat (next8[0])); u64 bitmap = u8x16_msb_mask ((u8x16) next8); n_enqueued = count_trailing_zeros (~bitmap) / 2; #else u16 x = 0; - x |= next_index ^ nexts[1]; - x |= next_index ^ nexts[2]; - x |= next_index ^ nexts[3]; - n_enqueued = (x == 0) ? 4 : 1; + if (count + 3 < max) + { + x |= next_index ^ nexts[1]; + x |= next_index ^ nexts[2]; + x |= next_index ^ nexts[3]; + n_enqueued = (x == 0) ? 4 : 1; + } + else + n_enqueued = 1; #endif if (PREDICT_FALSE (n_enqueued > max)) @@ -378,7 +383,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node, #ifdef CLIB_HAVE_VEC512 if (n_enqueued >= 32) { - clib_memcpy (to_next, buffers, 32 * sizeof (u32)); + vlib_buffer_copy_indices (to_next, buffers, 32); nexts += 32; to_next += 32; buffers += 32; @@ -392,7 +397,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node, #ifdef CLIB_HAVE_VEC256 if (n_enqueued >= 16) { - clib_memcpy (to_next, buffers, 16 * sizeof (u32)); + vlib_buffer_copy_indices (to_next, buffers, 16); nexts += 16; to_next += 16; buffers += 16; @@ -406,7 +411,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node, #ifdef CLIB_HAVE_VEC128 if (n_enqueued >= 8) { - clib_memcpy (to_next, buffers, 8 * sizeof (u32)); + vlib_buffer_copy_indices (to_next, buffers, 8); nexts += 8; to_next += 8; buffers += 8; @@ -419,7 +424,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node, if (n_enqueued >= 4) { - clib_memcpy (to_next, buffers, 4 * sizeof (u32)); + vlib_buffer_copy_indices (to_next, buffers, 4); nexts += 4; to_next += 4; buffers += 4; @@ -444,27 +449,57 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node, } static_always_inline void +vlib_buffer_enqueue_to_single_next (vlib_main_t * vm, + vlib_node_runtime_t * node, u32 * buffers, + u16 next_index, u32 count) +{ + u32 *to_next, n_left_to_next, n_enq; + + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + + if (PREDICT_TRUE (n_left_to_next >= count)) + { + vlib_buffer_copy_indices (to_next, buffers, count); + n_left_to_next -= count; + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + return; + } + + n_enq = n_left_to_next; +next: + vlib_buffer_copy_indices (to_next, buffers, n_enq); + n_left_to_next -= n_enq; + + if (PREDICT_FALSE (count > n_enq)) + { + count -= n_enq; + buffers += n_enq; + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); + n_enq = clib_min (n_left_to_next, count); + goto next; + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); +} + +static_always_inline u32 vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index, u32 * buffer_indices, u16 * thread_indices, - u32 n_left) + u32 n_packets, int drop_on_congestion) { vlib_thread_main_t *tm = vlib_get_thread_main (); - static __thread vlib_frame_queue_elt_t **handoff_queue_elt_by_thread_index = - 0; - static __thread vlib_frame_queue_t **congested_handoff_queue_by_thread_index - = 0; + vlib_frame_queue_main_t *fqm; + vlib_frame_queue_per_thread_data_t *ptd; + u32 n_left = n_packets; + u32 drop_list[VLIB_FRAME_SIZE], *dbi = drop_list, n_drop = 0; vlib_frame_queue_elt_t *hf = 0; u32 n_left_to_next_thread = 0, *to_next_thread = 0; u32 next_thread_index, current_thread_index = ~0; int i; - if (PREDICT_FALSE (handoff_queue_elt_by_thread_index == 0)) - { - vec_validate (handoff_queue_elt_by_thread_index, tm->n_vlib_mains - 1); - vec_validate_init_empty (congested_handoff_queue_by_thread_index, - tm->n_vlib_mains - 1, - (vlib_frame_queue_t *) (~0)); - } + fqm = vec_elt_at_index (tm->frame_queue_mains, frame_queue_index); + ptd = vec_elt_at_index (fqm->per_thread_data, vm->thread_index); while (n_left) { @@ -472,12 +507,23 @@ vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index, if (next_thread_index != current_thread_index) { + if (drop_on_congestion && + is_vlib_frame_queue_congested + (frame_queue_index, next_thread_index, fqm->queue_hi_thresh, + ptd->congested_handoff_queue_by_thread_index)) + { + dbi[0] = buffer_indices[0]; + dbi++; + n_drop++; + goto next; + } + if (hf) hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread; hf = vlib_get_worker_handoff_queue_elt (frame_queue_index, next_thread_index, - handoff_queue_elt_by_thread_index); + ptd->handoff_queue_elt_by_thread_index); n_left_to_next_thread = VLIB_FRAME_SIZE - hf->n_vectors; to_next_thread = &hf->buffer_index[hf->n_vectors]; @@ -492,12 +538,14 @@ vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index, { hf->n_vectors = VLIB_FRAME_SIZE; vlib_put_frame_queue_elt (hf); + vlib_mains[current_thread_index]->check_frame_queues = 1; current_thread_index = ~0; - handoff_queue_elt_by_thread_index[next_thread_index] = 0; + ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0; hf = 0; } /* next */ + next: thread_indices += 1; buffer_indices += 1; n_left -= 1; @@ -507,11 +555,11 @@ vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index, hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread; /* Ship frames to the thread nodes */ - for (i = 0; i < vec_len (handoff_queue_elt_by_thread_index); i++) + for (i = 0; i < vec_len (ptd->handoff_queue_elt_by_thread_index); i++) { - if (handoff_queue_elt_by_thread_index[i]) + if (ptd->handoff_queue_elt_by_thread_index[i]) { - hf = handoff_queue_elt_by_thread_index[i]; + hf = ptd->handoff_queue_elt_by_thread_index[i]; /* * It works better to let the handoff node * rate-adapt, always ship the handoff queue element. @@ -519,14 +567,20 @@ vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index, if (1 || hf->n_vectors == hf->last_n_vectors) { vlib_put_frame_queue_elt (hf); - handoff_queue_elt_by_thread_index[i] = 0; + vlib_mains[i]->check_frame_queues = 1; + ptd->handoff_queue_elt_by_thread_index[i] = 0; } else hf->last_n_vectors = hf->n_vectors; } - congested_handoff_queue_by_thread_index[i] = + ptd->congested_handoff_queue_by_thread_index[i] = (vlib_frame_queue_t *) (~0); } + + if (drop_on_congestion && n_drop) + vlib_buffer_free (vm, drop_list, n_drop); + + return n_packets - n_drop; } #endif /* included_vlib_buffer_node_h */