+#if defined(CLIB_HAVE_VEC512)
+ while (n_indices >= 16)
+ {
+ u32x16_store_unaligned (u32x16_load_unaligned (src), dst);
+ dst += 16;
+ src += 16;
+ n_indices -= 16;
+ }
+#endif
+
+#if defined(CLIB_HAVE_VEC256)
+ while (n_indices >= 8)
+ {
+ u32x8_store_unaligned (u32x8_load_unaligned (src), dst);
+ dst += 8;
+ src += 8;
+ n_indices -= 8;
+ }
+#endif
+
+#if defined(CLIB_HAVE_VEC128)
+ while (n_indices >= 4)
+ {
+ u32x4_store_unaligned (u32x4_load_unaligned (src), dst);
+ dst += 4;
+ src += 4;
+ n_indices -= 4;
+ }
+#endif
+
+ while (n_indices)
+ {
+ dst[0] = src[0];
+ dst += 1;
+ src += 1;
+ n_indices -= 1;
+ }
+}
+
+always_inline void
+vlib_buffer_copy_indices_from_ring (u32 * dst, u32 * ring, u32 start,
+ u32 ring_size, u32 n_buffers)
+{
+ ASSERT (n_buffers <= ring_size);
+
+ if (PREDICT_TRUE (start + n_buffers <= ring_size))
+ {
+ vlib_buffer_copy_indices (dst, ring + start, n_buffers);
+ }
+ else
+ {
+ u32 n = ring_size - start;
+ vlib_buffer_copy_indices (dst, ring + start, n);
+ vlib_buffer_copy_indices (dst + n, ring, n_buffers - n);
+ }
+}
+
+always_inline void
+vlib_buffer_copy_indices_to_ring (u32 * ring, u32 * src, u32 start,
+ u32 ring_size, u32 n_buffers)
+{
+ ASSERT (n_buffers <= ring_size);
+
+ if (PREDICT_TRUE (start + n_buffers <= ring_size))
+ {
+ vlib_buffer_copy_indices (ring + start, src, n_buffers);
+ }
+ else
+ {
+ u32 n = ring_size - start;
+ vlib_buffer_copy_indices (ring + start, src, n);
+ vlib_buffer_copy_indices (ring, src + n, n_buffers - n);
+ }