X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlib%2Fbuffer_funcs.h;h=95b622c202c102576f4ea4105856735b7b44f80f;hb=90d05bc7fb834b5cf25bdd9bb6d92bb35e602494;hp=2ba9f1cb894903291668e341c30d2e387b52485a;hpb=b6e8b1a7c8bf9f9fbd05cdc3c90111d9e7a6897b;p=vpp.git diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h index 2ba9f1cb894..95b622c202c 100644 --- a/src/vlib/buffer_funcs.h +++ b/src/vlib/buffer_funcs.h @@ -140,6 +140,42 @@ vlib_buffer_copy_indices (u32 * dst, u32 * src, u32 n_indices) } } +always_inline void +vlib_buffer_copy_indices_from_ring (u32 * dst, u32 * ring, u32 start, + u32 ring_size, u32 n_buffers) +{ + ASSERT (n_buffers <= ring_size); + + if (PREDICT_TRUE (start + n_buffers <= ring_size)) + { + vlib_buffer_copy_indices (dst, ring + start, n_buffers); + } + else + { + u32 n = ring_size - start; + vlib_buffer_copy_indices (dst, ring + start, n); + vlib_buffer_copy_indices (dst + n, ring, n_buffers - n); + } +} + +always_inline void +vlib_buffer_copy_indices_to_ring (u32 * ring, u32 * src, u32 start, + u32 ring_size, u32 n_buffers) +{ + ASSERT (n_buffers <= ring_size); + + if (PREDICT_TRUE (start + n_buffers <= ring_size)) + { + vlib_buffer_copy_indices (ring + start, src, n_buffers); + } + else + { + u32 n = ring_size - start; + vlib_buffer_copy_indices (ring + start, src, n); + vlib_buffer_copy_indices (ring, src + n, n_buffers - n); + } +} + STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, template_end, 64); static_always_inline void vlib_buffer_copy_template (vlib_buffer_t * b, vlib_buffer_t * bt) @@ -185,8 +221,8 @@ vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count, se we maintain two-in-parallel variant */ while (count >= 8) { - u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi)); - u64x4 b1 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi + 4)); + u64x4 b0 = u64x4_from_u32x4 (u32x4_load_unaligned (bi)); + u64x4 b1 = u64x4_from_u32x4 (u32x4_load_unaligned (bi + 4)); /* shift and add to get vlib_buffer_t pointer */ u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b); u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4); @@ -198,18 +234,18 @@ vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count, while (count >= 4) { #ifdef CLIB_HAVE_VEC256 - u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi)); + u64x4 b0 = u64x4_from_u32x4 (u32x4_load_unaligned (bi)); /* shift and add to get vlib_buffer_t pointer */ u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b); #elif defined (CLIB_HAVE_VEC128) u64x2 off = u64x2_splat (buffer_mem_start + offset); u32x4 bi4 = u32x4_load_unaligned (bi); - u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4); + u64x2 b0 = u64x2_from_u32x4 ((u32x4) bi4); #if defined (__aarch64__) - u64x2 b1 = u32x4_extend_to_u64x2_high ((u32x4) bi4); + u64x2 b1 = u64x2_from_u32x4_high ((u32x4) bi4); #else bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1); - u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4); + u64x2 b1 = u64x2_from_u32x4 ((u32x4) bi4); #endif u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b); u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2); @@ -474,6 +510,13 @@ vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index) u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index, uword follow_chain); +u8 *vlib_validate_buffers (vlib_main_t * vm, + u32 * buffers, + uword next_buffer_stride, + uword n_buffers, + vlib_buffer_known_state_t known_state, + uword follow_buffer_next); + static_always_inline vlib_buffer_pool_t * vlib_get_buffer_pool (vlib_main_t * vm, u8 buffer_pool_index) { @@ -481,7 +524,7 @@ vlib_get_buffer_pool (vlib_main_t * vm, u8 buffer_pool_index) return vec_elt_at_index (bm->buffer_pools, buffer_pool_index); } -static_always_inline uword +static_always_inline __clib_warn_unused_result uword vlib_buffer_pool_get (vlib_main_t * vm, u8 buffer_pool_index, u32 * buffers, u32 n_buffers) { @@ -519,7 +562,7 @@ vlib_buffer_pool_get (vlib_main_t * vm, u8 buffer_pool_index, u32 * buffers, less than the number requested or zero */ -always_inline u32 +always_inline __clib_warn_unused_result u32 vlib_buffer_alloc_from_pool (vlib_main_t * vm, u32 * buffers, u32 n_buffers, u8 buffer_pool_index) { @@ -528,6 +571,17 @@ vlib_buffer_alloc_from_pool (vlib_main_t * vm, u32 * buffers, u32 n_buffers, vlib_buffer_pool_thread_t *bpt; u32 *src, *dst, len, n_left; + /* If buffer allocation fault injection is configured */ + if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR > 0) + { + u32 vlib_buffer_alloc_may_fail (vlib_main_t *, u32); + + /* See how many buffers we're willing to allocate */ + n_buffers = vlib_buffer_alloc_may_fail (vm, n_buffers); + if (n_buffers == 0) + return (n_buffers); + } + bp = vec_elt_at_index (bm->buffer_pools, buffer_pool_index); bpt = vec_elt_at_index (bp->threads, vm->thread_index); @@ -602,7 +656,7 @@ vlib_buffer_alloc_from_pool (vlib_main_t * vm, u32 * buffers, u32 n_buffers, @return - (u32) number of buffers actually allocated, may be less than the number requested or zero */ -always_inline u32 +always_inline __clib_warn_unused_result u32 vlib_buffer_alloc_on_numa (vlib_main_t * vm, u32 * buffers, u32 n_buffers, u32 numa_node) { @@ -619,7 +673,7 @@ vlib_buffer_alloc_on_numa (vlib_main_t * vm, u32 * buffers, u32 n_buffers, less than the number requested or zero */ -always_inline u32 +always_inline __clib_warn_unused_result u32 vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers) { return vlib_buffer_alloc_on_numa (vm, buffers, n_buffers, vm->numa_node); @@ -635,7 +689,7 @@ vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers) @return - (u32) number of buffers actually allocated, may be less than the number requested or zero */ -always_inline u32 +always_inline __clib_warn_unused_result u32 vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start, u32 ring_size, u32 n_buffers) { @@ -664,7 +718,7 @@ vlib_buffer_alloc_to_ring (vlib_main_t * vm, u32 * ring, u32 start, @return - (u32) number of buffers actually allocated, may be less than the number requested or zero */ -always_inline u32 +always_inline __clib_warn_unused_result u32 vlib_buffer_alloc_to_ring_from_pool (vlib_main_t * vm, u32 * ring, u32 start, u32 ring_size, u32 n_buffers, u8 buffer_pool_index) @@ -1165,11 +1219,11 @@ vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers, d->next_buffer = src_buffer; } vlib_buffer_advance (s, head_end_offset); - s->ref_count = n_buffers; + s->ref_count = n_buffers ? n_buffers : s->ref_count; while (s->flags & VLIB_BUFFER_NEXT_PRESENT) { s = vlib_get_buffer (vm, s->next_buffer); - s->ref_count = n_buffers; + s->ref_count = n_buffers ? n_buffers : s->ref_count; } return n_buffers; @@ -1332,7 +1386,7 @@ vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm, void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first); format_function_t format_vlib_buffer, format_vlib_buffer_and_data, - format_vlib_buffer_contents; + format_vlib_buffer_contents, format_vlib_buffer_no_chain; typedef struct {