X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlib%2Fbuffer_node.h;h=c0268b215627800133dfbe84cd3536dc1d511a6c;hb=refs%2Fchanges%2F52%2F40352%2F2;hp=c9f4895c73973f2db1b0aad51fbfb353c98010c0;hpb=398fdc1a874564c6d31a90fc58ec117fcf58af40;p=vpp.git diff --git a/src/vlib/buffer_node.h b/src/vlib/buffer_node.h index c9f4895c739..c0268b21562 100644 --- a/src/vlib/buffer_node.h +++ b/src/vlib/buffer_node.h @@ -69,6 +69,8 @@ #define vlib_validate_buffer_enqueue_x2(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,next0,next1) \ do { \ + ASSERT (bi0 != 0); \ + ASSERT (bi1 != 0); \ int enqueue_code = (next0 != next_index) + 2*(next1 != next_index); \ \ if (PREDICT_FALSE (enqueue_code != 0)) \ @@ -137,6 +139,10 @@ do { \ #define vlib_validate_buffer_enqueue_x4(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,bi2,bi3,next0,next1,next2,next3) \ do { \ + ASSERT (bi0 != 0); \ + ASSERT (bi1 != 0); \ + ASSERT (bi2 != 0); \ + ASSERT (bi3 != 0); \ /* After the fact: check the [speculative] enqueue to "next" */ \ u32 fix_speculation = (next_index ^ next0) | (next_index ^ next1) \ | (next_index ^ next2) | (next_index ^ next3); \ @@ -217,6 +223,7 @@ do { \ */ #define vlib_validate_buffer_enqueue_x1(vm,node,next_index,to_next,n_left_to_next,bi0,next0) \ do { \ + ASSERT (bi0 != 0); \ if (PREDICT_FALSE (next0 != next_index)) \ { \ vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1); \ @@ -229,6 +236,53 @@ do { \ } \ } while (0) +/** \brief Finish enqueueing one buffer forward in the graph, along with its + aux_data if possible. Standard single loop boilerplate element. This is a + MACRO, with MULTIPLE SIDE EFFECTS. In the ideal case, next_index == + next0, which means that the speculative enqueue at the top of the + single loop has correctly dealt with the packet in hand. In that case, the + macro does nothing at all. This function MAY return to_next_aux = NULL if + next_index does not support aux data + + @param vm vlib_main_t pointer, varies by thread + @param node current node vlib_node_runtime_t pointer + @param next_index speculated next index used for both packets + @param to_next speculated vector pointer used for both packets + @param to_next_aux speculated aux_data pointer used for both packets + @param n_left_to_next number of slots left in speculated vector + @param bi0 first buffer index + @param aux0 first aux_data + @param next0 actual next index to be used for the first packet + + @return @c next_index -- speculative next index to be used for future packets + @return @c to_next -- speculative frame to be used for future packets + @return @c n_left_to_next -- number of slots left in speculative frame +*/ +#define vlib_validate_buffer_enqueue_with_aux_x1( \ + vm, node, next_index, to_next, to_next_aux, n_left_to_next, bi0, aux0, \ + next0) \ + do \ + { \ + ASSERT (bi0 != 0); \ + if (PREDICT_FALSE (next0 != next_index)) \ + { \ + vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1); \ + next_index = next0; \ + vlib_get_next_frame_with_aux_safe (vm, node, next_index, to_next, \ + to_next_aux, n_left_to_next); \ + \ + to_next[0] = bi0; \ + to_next += 1; \ + if (to_next_aux) \ + { \ + to_next_aux[0] = aux0; \ + to_next_aux += 1; \ + } \ + n_left_to_next -= 1; \ + } \ + } \ + while (0) + always_inline uword generic_buffer_node_inline (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -280,8 +334,8 @@ generic_buffer_node_inline (vlib_main_t * vm, vlib_prefetch_buffer_header (p2, LOAD); vlib_prefetch_buffer_header (p3, LOAD); - CLIB_PREFETCH (p2->data, 64, LOAD); - CLIB_PREFETCH (p3->data, 64, LOAD); + clib_prefetch_load (p2->data); + clib_prefetch_load (p3->data); } pi0 = to_next[0] = from[0]; @@ -328,119 +382,95 @@ generic_buffer_node_inline (vlib_main_t * vm, return frame->n_vectors; } +/* Minimum size for the 'buffers' and 'nexts' arrays to be used when calling + * vlib_buffer_enqueue_to_next(). + * Because of optimizations, vlib_buffer_enqueue_to_next() will access + * past 'count' elements in the 'buffers' and 'nexts' arrays, IOW it + * will overflow. + * Those overflow elements are ignored in the final result so they do not + * need to be properly initialized, however if the array is allocated right + * before the end of a page and the next page is not mapped, accessing the + * overflow elements will trigger a segfault. */ +#define VLIB_BUFFER_ENQUEUE_MIN_SIZE(n) round_pow2 ((n), 64) + static_always_inline void vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node, u32 * buffers, u16 * nexts, uword count) { - u32 *to_next, n_left_to_next, max; - u16 next_index; - - next_index = nexts[0]; - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - max = clib_min (n_left_to_next, count); + vlib_buffer_enqueue_to_next_fn_t *fn; + fn = vlib_buffer_func_main.buffer_enqueue_to_next_fn; + (fn) (vm, node, buffers, nexts, count); +} - while (count) - { - u32 n_enqueued; - if ((nexts[0] != next_index) || n_left_to_next == 0) - { - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - next_index = nexts[0]; - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - max = clib_min (n_left_to_next, count); - } -#if defined(CLIB_HAVE_VEC512) - u16x32 next32 = u16x32_load_unaligned (nexts); - next32 = (next32 == u16x32_splat (next32[0])); - u64 bitmap = u16x32_msb_mask (next32); - n_enqueued = count_trailing_zeros (~bitmap); -#elif defined(CLIB_HAVE_VEC256) - u16x16 next16 = u16x16_load_unaligned (nexts); - next16 = (next16 == u16x16_splat (next16[0])); - u64 bitmap = u8x32_msb_mask ((u8x32) next16); - n_enqueued = count_trailing_zeros (~bitmap) / 2; -#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK) - u16x8 next8 = u16x8_load_unaligned (nexts); - next8 = (next8 == u16x8_splat (next8[0])); - u64 bitmap = u8x16_msb_mask ((u8x16) next8); - n_enqueued = count_trailing_zeros (~bitmap) / 2; -#else - u16 x = 0; - x |= next_index ^ nexts[1]; - x |= next_index ^ nexts[2]; - x |= next_index ^ nexts[3]; - n_enqueued = (x == 0) ? 4 : 1; -#endif - - if (PREDICT_FALSE (n_enqueued > max)) - n_enqueued = max; - -#ifdef CLIB_HAVE_VEC512 - if (n_enqueued >= 32) - { - clib_memcpy (to_next, buffers, 32 * sizeof (u32)); - nexts += 32; - to_next += 32; - buffers += 32; - n_left_to_next -= 32; - count -= 32; - max -= 32; - continue; - } -#endif +static_always_inline void +vlib_buffer_enqueue_to_next_with_aux (vlib_main_t *vm, + vlib_node_runtime_t *node, u32 *buffers, + u32 *aux_data, u16 *nexts, uword count) +{ + vlib_buffer_enqueue_to_next_with_aux_fn_t *fn; + fn = vlib_buffer_func_main.buffer_enqueue_to_next_with_aux_fn; + (fn) (vm, node, buffers, aux_data, nexts, count); +} -#ifdef CLIB_HAVE_VEC256 - if (n_enqueued >= 16) - { - clib_memcpy (to_next, buffers, 16 * sizeof (u32)); - nexts += 16; - to_next += 16; - buffers += 16; - n_left_to_next -= 16; - count -= 16; - max -= 16; - continue; - } -#endif +static_always_inline void +vlib_buffer_enqueue_to_next_vec (vlib_main_t *vm, vlib_node_runtime_t *node, + u32 **buffers, u16 **nexts, uword count) +{ + const u32 bl = vec_len (*buffers), nl = vec_len (*nexts); + const u32 c = VLIB_BUFFER_ENQUEUE_MIN_SIZE (count); + ASSERT (bl >= count && nl >= count); + vec_validate (*buffers, c); + vec_validate (*nexts, c); + vlib_buffer_enqueue_to_next (vm, node, *buffers, *nexts, count); + vec_set_len (*buffers, bl); + vec_set_len (*nexts, nl); +} -#ifdef CLIB_HAVE_VEC128 - if (n_enqueued >= 8) - { - clib_memcpy (to_next, buffers, 8 * sizeof (u32)); - nexts += 8; - to_next += 8; - buffers += 8; - n_left_to_next -= 8; - count -= 8; - max -= 8; - continue; - } -#endif +static_always_inline void +vlib_buffer_enqueue_to_single_next (vlib_main_t * vm, + vlib_node_runtime_t * node, u32 * buffers, + u16 next_index, u32 count) +{ + vlib_buffer_enqueue_to_single_next_fn_t *fn; + fn = vlib_buffer_func_main.buffer_enqueue_to_single_next_fn; + (fn) (vm, node, buffers, next_index, count); +} - if (n_enqueued >= 4) - { - clib_memcpy (to_next, buffers, 4 * sizeof (u32)); - nexts += 4; - to_next += 4; - buffers += 4; - n_left_to_next -= 4; - count -= 4; - max -= 4; - continue; - } +static_always_inline void +vlib_buffer_enqueue_to_single_next_with_aux (vlib_main_t *vm, + vlib_node_runtime_t *node, + u32 *buffers, u32 *aux_data, + u16 next_index, u32 count) +{ + vlib_buffer_enqueue_to_single_next_with_aux_fn_t *fn; + fn = vlib_buffer_func_main.buffer_enqueue_to_single_next_with_aux_fn; + (fn) (vm, node, buffers, aux_data, next_index, count); +} - /* copy */ - to_next[0] = buffers[0]; +static_always_inline u32 +vlib_buffer_enqueue_to_thread (vlib_main_t *vm, vlib_node_runtime_t *node, + u32 frame_queue_index, u32 *buffer_indices, + u16 *thread_indices, u32 n_packets, + int drop_on_congestion) +{ + vlib_buffer_enqueue_to_thread_fn_t *fn; + fn = vlib_buffer_func_main.buffer_enqueue_to_thread_fn; + return (fn) (vm, node, frame_queue_index, buffer_indices, thread_indices, + n_packets, drop_on_congestion); +} - /* next */ - nexts += 1; - to_next += 1; - buffers += 1; - n_left_to_next -= 1; - count -= 1; - max -= 1; - } - vlib_put_next_frame (vm, node, next_index, n_left_to_next); +static_always_inline u32 +vlib_buffer_enqueue_to_thread_with_aux (vlib_main_t *vm, + vlib_node_runtime_t *node, + u32 frame_queue_index, + u32 *buffer_indices, u32 *aux, + u16 *thread_indices, u32 n_packets, + int drop_on_congestion) +{ + vlib_buffer_enqueue_to_thread_with_aux_fn_t *fn; + fn = vlib_buffer_func_main.buffer_enqueue_to_thread_with_aux_fn; + return (fn) (vm, node, frame_queue_index, buffer_indices, aux, + thread_indices, n_packets, drop_on_congestion); } #endif /* included_vlib_buffer_node_h */