}
}
+always_inline void
+vlib_buffer_copy_indices_from_ring (u32 * dst, u32 * ring, u32 start,
+ u32 ring_size, u32 n_buffers)
+{
+ ASSERT (n_buffers <= ring_size);
+
+ if (PREDICT_TRUE (start + n_buffers <= ring_size))
+ {
+ vlib_buffer_copy_indices (dst, ring + start, n_buffers);
+ }
+ else
+ {
+ u32 n = ring_size - start;
+ vlib_buffer_copy_indices (dst, ring + start, n);
+ vlib_buffer_copy_indices (dst + n, ring, n_buffers - n);
+ }
+}
+
+always_inline void
+vlib_buffer_copy_indices_to_ring (u32 * ring, u32 * src, u32 start,
+ u32 ring_size, u32 n_buffers)
+{
+ ASSERT (n_buffers <= ring_size);
+
+ if (PREDICT_TRUE (start + n_buffers <= ring_size))
+ {
+ vlib_buffer_copy_indices (ring + start, src, n_buffers);
+ }
+ else
+ {
+ u32 n = ring_size - start;
+ vlib_buffer_copy_indices (ring + start, src, n);
+ vlib_buffer_copy_indices (ring, src + n, n_buffers - n);
+ }
+}
+
STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, template_end, 64);
static_always_inline void
vlib_buffer_copy_template (vlib_buffer_t * b, vlib_buffer_t * bt)
vlib_buffer_pool_thread_t *bpt;
u32 *src, *dst, len, n_left;
+ /* If buffer allocation fault injection is configured */
+ if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR > 0)
+ {
+ u32 vlib_buffer_alloc_may_fail (vlib_main_t *, u32);
+
+ /* See how many buffers we're willing to allocate */
+ n_buffers = vlib_buffer_alloc_may_fail (vm, n_buffers);
+ if (n_buffers == 0)
+ return (n_buffers);
+ }
+
bp = vec_elt_at_index (bm->buffer_pools, buffer_pool_index);
bpt = vec_elt_at_index (bp->threads, vm->thread_index);
d->next_buffer = src_buffer;
}
vlib_buffer_advance (s, head_end_offset);
- s->ref_count = n_buffers;
+ s->ref_count = n_buffers ? n_buffers : s->ref_count;
while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
{
s = vlib_get_buffer (vm, s->next_buffer);
- s->ref_count = n_buffers;
+ s->ref_count = n_buffers ? n_buffers : s->ref_count;
}
return n_buffers;