+ vlib_buffer_main_t *bm = vm->buffer_main;
+ vlib_buffer_t *b;
+
+ b = vlib_buffer_ptr_from_index (bm->buffer_mem_start, buffer_index, 0);
+ vlib_buffer_validate (vm, b);
+ return b;
+}
+
+static_always_inline u32
+vlib_buffer_get_default_data_size (vlib_main_t * vm)
+{
+ return vm->buffer_main->default_data_size;
+}
+
+static_always_inline void
+vlib_buffer_copy_indices (u32 * dst, u32 * src, u32 n_indices)
+{
+#if defined(CLIB_HAVE_VEC512)
+ while (n_indices >= 16)
+ {
+ u32x16_store_unaligned (u32x16_load_unaligned (src), dst);
+ dst += 16;
+ src += 16;
+ n_indices -= 16;
+ }
+#endif
+
+#if defined(CLIB_HAVE_VEC256)
+ while (n_indices >= 8)
+ {
+ u32x8_store_unaligned (u32x8_load_unaligned (src), dst);
+ dst += 8;
+ src += 8;
+ n_indices -= 8;
+ }
+#endif
+
+#if defined(CLIB_HAVE_VEC128)
+ while (n_indices >= 4)
+ {
+ u32x4_store_unaligned (u32x4_load_unaligned (src), dst);
+ dst += 4;
+ src += 4;
+ n_indices -= 4;
+ }
+#endif
+
+ while (n_indices)
+ {
+ dst[0] = src[0];
+ dst += 1;
+ src += 1;
+ n_indices -= 1;
+ }
+}
+
+always_inline void
+vlib_buffer_copy_indices_from_ring (u32 * dst, u32 * ring, u32 start,
+ u32 ring_size, u32 n_buffers)
+{
+ ASSERT (n_buffers <= ring_size);
+
+ if (PREDICT_TRUE (start + n_buffers <= ring_size))
+ {
+ vlib_buffer_copy_indices (dst, ring + start, n_buffers);
+ }
+ else
+ {
+ u32 n = ring_size - start;
+ vlib_buffer_copy_indices (dst, ring + start, n);
+ vlib_buffer_copy_indices (dst + n, ring, n_buffers - n);
+ }
+}
+
+always_inline void
+vlib_buffer_copy_indices_to_ring (u32 * ring, u32 * src, u32 start,
+ u32 ring_size, u32 n_buffers)
+{
+ ASSERT (n_buffers <= ring_size);
+
+ if (PREDICT_TRUE (start + n_buffers <= ring_size))
+ {
+ vlib_buffer_copy_indices (ring + start, src, n_buffers);
+ }
+ else
+ {
+ u32 n = ring_size - start;
+ vlib_buffer_copy_indices (ring + start, src, n);
+ vlib_buffer_copy_indices (ring, src + n, n_buffers - n);
+ }
+}
+
+STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, template_end, 64);
+static_always_inline void
+vlib_buffer_copy_template (vlib_buffer_t * b, vlib_buffer_t * bt)
+{
+#if defined CLIB_HAVE_VEC512
+ b->as_u8x64[0] = bt->as_u8x64[0];
+#elif defined (CLIB_HAVE_VEC256)
+ b->as_u8x32[0] = bt->as_u8x32[0];
+ b->as_u8x32[1] = bt->as_u8x32[1];
+#elif defined (CLIB_HAVE_VEC128)
+ b->as_u8x16[0] = bt->as_u8x16[0];
+ b->as_u8x16[1] = bt->as_u8x16[1];
+ b->as_u8x16[2] = bt->as_u8x16[2];
+ b->as_u8x16[3] = bt->as_u8x16[3];
+#else
+ clib_memcpy_fast (b, bt, 64);
+#endif
+}
+
+always_inline u8
+vlib_buffer_pool_get_default_for_numa (vlib_main_t * vm, u32 numa_node)
+{
+ ASSERT (numa_node < VLIB_BUFFER_MAX_NUMA_NODES);
+ return vm->buffer_main->default_buffer_pool_index_for_numa[numa_node];
+}
+
+/** \brief Translate array of buffer indices into buffer pointers with offset
+
+ @param vm - (vlib_main_t *) vlib main data structure pointer
+ @param bi - (u32 *) array of buffer indices
+ @param b - (void **) array to store buffer pointers
+ @param count - (uword) number of elements
+ @param offset - (i32) offset applied to each pointer
+*/
+static_always_inline void
+vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count,
+ i32 offset)
+{
+ uword buffer_mem_start = vm->buffer_main->buffer_mem_start;
+#ifdef CLIB_HAVE_VEC256
+ u64x4 off = u64x4_splat (buffer_mem_start + offset);
+ /* if count is not const, compiler will not unroll while loop
+ se we maintain two-in-parallel variant */
+ while (count >= 8)
+ {
+ u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
+ u64x4 b1 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi + 4));
+ /* shift and add to get vlib_buffer_t pointer */
+ u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
+ u64x4_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 4);
+ b += 8;
+ bi += 8;
+ count -= 8;
+ }
+#endif
+ while (count >= 4)
+ {
+#ifdef CLIB_HAVE_VEC256
+ u64x4 b0 = u32x4_extend_to_u64x4 (u32x4_load_unaligned (bi));
+ /* shift and add to get vlib_buffer_t pointer */
+ u64x4_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
+#elif defined (CLIB_HAVE_VEC128)
+ u64x2 off = u64x2_splat (buffer_mem_start + offset);
+ u32x4 bi4 = u32x4_load_unaligned (bi);
+ u64x2 b0 = u32x4_extend_to_u64x2 ((u32x4) bi4);
+#if defined (__aarch64__)
+ u64x2 b1 = u32x4_extend_to_u64x2_high ((u32x4) bi4);
+#else
+ bi4 = u32x4_shuffle (bi4, 2, 3, 0, 1);
+ u64x2 b1 = u32x4_extend_to_u64x2 ((u32x4) bi4);
+#endif
+ u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b);
+ u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2);
+#else
+ b[0] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[0], offset);
+ b[1] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[1], offset);
+ b[2] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[2], offset);
+ b[3] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[3], offset);
+#endif
+ b += 4;
+ bi += 4;
+ count -= 4;
+ }
+ while (count)
+ {
+ b[0] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[0], offset);
+ b += 1;
+ bi += 1;
+ count -= 1;
+ }
+}
+
+/** \brief Translate array of buffer indices into buffer pointers
+
+ @param vm - (vlib_main_t *) vlib main data structure pointer
+ @param bi - (u32 *) array of buffer indices
+ @param b - (vlib_buffer_t **) array to store buffer pointers
+ @param count - (uword) number of elements
+*/
+
+static_always_inline void
+vlib_get_buffers (vlib_main_t * vm, u32 * bi, vlib_buffer_t ** b, int count)
+{
+ vlib_get_buffers_with_offset (vm, bi, (void **) b, count, 0);