#include <vppinfra/hash.h>
#include <vppinfra/fifo.h>
+#include <vlib/buffer.h>
+#include <vlib/physmem_funcs.h>
+#include <vlib/main.h>
+#include <vlib/node.h>
/** \file
vlib buffer access methods.
/* reference count in allocated buffer always must be 1 or higher */
ASSERT (b->ref_count > 0);
- /* verify that buffer pointer is from buffer memory range */
- ASSERT (pointer_to_uword (b) >= bm->buffer_mem_start);
- ASSERT (pointer_to_uword (b) < bm->buffer_mem_start + bm->buffer_mem_size -
- VLIB_BUFFER_DATA_SIZE);
-
/* verify that buffer pool index is valid */
bp = vec_elt_at_index (bm->buffer_pools, b->buffer_pool_index);
ASSERT (pointer_to_uword (b) >= bp->start);
ASSERT (pointer_to_uword (b) < bp->start + bp->size -
- VLIB_BUFFER_DATA_SIZE);
+ (bp->data_size + sizeof (vlib_buffer_t)));
}
always_inline void *
return b;
}
+static_always_inline u32
+vlib_buffer_get_default_data_size (vlib_main_t * vm)
+{
+ return vm->buffer_main->default_data_size;
+}
+
static_always_inline void
vlib_buffer_copy_indices (u32 * dst, u32 * src, u32 n_indices)
{
- clib_memcpy_fast (dst, src, n_indices * sizeof (u32));
+#if defined(CLIB_HAVE_VEC512)
+ while (n_indices >= 16)
+ {
+ u32x16_store_unaligned (u32x16_load_unaligned (src), dst);
+ dst += 16;
+ src += 16;
+ n_indices -= 16;
+ }
+#endif
+
+#if defined(CLIB_HAVE_VEC256)
+ while (n_indices >= 8)
+ {
+ u32x8_store_unaligned (u32x8_load_unaligned (src), dst);
+ dst += 8;
+ src += 8;
+ n_indices -= 8;
+ }
+#endif
+
+#if defined(CLIB_HAVE_VEC128)
+ while (n_indices >= 4)
+ {
+ u32x4_store_unaligned (u32x4_load_unaligned (src), dst);
+ dst += 4;
+ src += 4;
+ n_indices -= 4;
+ }
+#endif
+
+ while (n_indices)
+ {
+ dst[0] = src[0];
+ dst += 1;
+ src += 1;
+ n_indices -= 1;
+ }
+}
+
+always_inline void
+vlib_buffer_copy_indices_from_ring (u32 * dst, u32 * ring, u32 start,
+ u32 ring_size, u32 n_buffers)
+{
+ ASSERT (n_buffers <= ring_size);
+
+ if (PREDICT_TRUE (start + n_buffers <= ring_size))
+ {
+ vlib_buffer_copy_indices (dst, ring + start, n_buffers);
+ }
+ else
+ {
+ u32 n = ring_size - start;
+ vlib_buffer_copy_indices (dst, ring + start, n);
+ vlib_buffer_copy_indices (dst + n, ring, n_buffers - n);
+ }
+}
+
+always_inline void
+vlib_buffer_copy_indices_to_ring (u32 * ring, u32 * src, u32 start,
+ u32 ring_size, u32 n_buffers)
+{
+ ASSERT (n_buffers <= ring_size);
+
+ if (PREDICT_TRUE (start + n_buffers <= ring_size))
+ {
+ vlib_buffer_copy_indices (ring + start, src, n_buffers);
+ }
+ else
+ {
+ u32 n = ring_size - start;
+ vlib_buffer_copy_indices (ring + start, src, n);
+ vlib_buffer_copy_indices (ring, src + n, n_buffers - n);
+ }
}
STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, template_end, 64);
always_inline u8
vlib_buffer_pool_get_default_for_numa (vlib_main_t * vm, u32 numa_node)
{
- ASSERT (numa_node < vm->buffer_main->n_numa_nodes);
- return numa_node;
+ ASSERT (numa_node < VLIB_BUFFER_MAX_NUMA_NODES);
+ return vm->buffer_main->default_buffer_pool_index_for_numa[numa_node];
}
/** \brief Translate array of buffer indices into buffer pointers with offset
u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
uword follow_chain);
+u8 *vlib_validate_buffers (vlib_main_t * vm,
+ u32 * buffers,
+ uword next_buffer_stride,
+ uword n_buffers,
+ vlib_buffer_known_state_t known_state,
+ uword follow_buffer_next);
+
static_always_inline vlib_buffer_pool_t *
vlib_get_buffer_pool (vlib_main_t * vm, u8 buffer_pool_index)
{
ASSERT (bp->buffers);
clib_spinlock_lock (&bp->lock);
- len = vec_len (bp->buffers);
+ len = bp->n_avail;
if (PREDICT_TRUE (n_buffers < len))
{
len -= n_buffers;
vlib_buffer_copy_indices (buffers, bp->buffers + len, n_buffers);
- _vec_len (bp->buffers) = len;
+ bp->n_avail = len;
clib_spinlock_unlock (&bp->lock);
return n_buffers;
}
else
{
vlib_buffer_copy_indices (buffers, bp->buffers, len);
- _vec_len (bp->buffers) = 0;
+ bp->n_avail = 0;
clib_spinlock_unlock (&bp->lock);
return len;
}
dst = buffers;
n_left = n_buffers;
- len = vec_len (bpt->cached_buffers);
+ len = bpt->n_cached;
/* per-thread cache contains enough buffers */
if (len >= n_buffers)
{
src = bpt->cached_buffers + len - n_buffers;
vlib_buffer_copy_indices (dst, src, n_buffers);
- _vec_len (bpt->cached_buffers) -= n_buffers;
+ bpt->n_cached -= n_buffers;
+
+ if (CLIB_DEBUG > 0)
+ vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
+ VLIB_BUFFER_KNOWN_FREE);
+ return n_buffers;
+ }
+
+ /* alloc bigger than cache - take buffers directly from main pool */
+ if (n_buffers >= VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ)
+ {
+ n_buffers = vlib_buffer_pool_get (vm, buffer_pool_index, buffers,
+ n_buffers);
if (CLIB_DEBUG > 0)
vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
if (len)
{
vlib_buffer_copy_indices (dst, bpt->cached_buffers, len);
- _vec_len (bpt->cached_buffers) = 0;
+ bpt->n_cached = 0;
dst += len;
n_left -= len;
}
len = round_pow2 (n_left, 32);
- vec_validate_aligned (bpt->cached_buffers, len - 1, CLIB_CACHE_LINE_BYTES);
len = vlib_buffer_pool_get (vm, buffer_pool_index, bpt->cached_buffers,
len);
- _vec_len (bpt->cached_buffers) = len;
+ bpt->n_cached = len;
if (len)
{
u32 n_copy = clib_min (len, n_left);
src = bpt->cached_buffers + len - n_copy;
vlib_buffer_copy_indices (dst, src, n_copy);
- _vec_len (bpt->cached_buffers) -= n_copy;
+ bpt->n_cached -= n_copy;
n_left -= n_copy;
}
return n_alloc;
}
-static void
+static_always_inline void
vlib_buffer_pool_put (vlib_main_t * vm, u8 buffer_pool_index,
u32 * buffers, u32 n_buffers)
{
vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index);
- vlib_buffer_pool_thread_t *bpt =
- vec_elt_at_index (bp->threads, vm->thread_index);
+ vlib_buffer_pool_thread_t *bpt = vec_elt_at_index (bp->threads,
+ vm->thread_index);
+ u32 n_cached, n_empty;
- vec_add_aligned (bpt->cached_buffers, buffers, n_buffers,
- CLIB_CACHE_LINE_BYTES);
+ if (CLIB_DEBUG > 0)
+ vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
+ VLIB_BUFFER_KNOWN_ALLOCATED);
- if (vec_len (bpt->cached_buffers) > 4 * VLIB_FRAME_SIZE)
+ n_cached = bpt->n_cached;
+ n_empty = VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ - n_cached;
+ if (n_buffers <= n_empty)
{
- clib_spinlock_lock (&bp->lock);
- /* keep last stored buffers, as they are more likely hot in the cache */
- vec_add_aligned (bp->buffers, bpt->cached_buffers, VLIB_FRAME_SIZE,
- CLIB_CACHE_LINE_BYTES);
- vec_delete (bpt->cached_buffers, VLIB_FRAME_SIZE, 0);
- bpt->n_alloc -= VLIB_FRAME_SIZE;
- clib_spinlock_unlock (&bp->lock);
+ vlib_buffer_copy_indices (bpt->cached_buffers + n_cached,
+ buffers, n_buffers);
+ bpt->n_cached = n_cached + n_buffers;
+ return;
}
+
+ vlib_buffer_copy_indices (bpt->cached_buffers + n_cached,
+ buffers + n_buffers - n_empty, n_empty);
+ bpt->n_cached = VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ;
+
+ clib_spinlock_lock (&bp->lock);
+ vlib_buffer_copy_indices (bp->buffers + bp->n_avail, buffers,
+ n_buffers - n_empty);
+ bp->n_avail += n_buffers - n_empty;
+ clib_spinlock_unlock (&bp->lock);
}
static_always_inline void
u8 buffer_pool_index = ~0;
u32 n_queue = 0, queue[queue_size + 4];
vlib_buffer_t bt = { };
-#if defined(CLIB_HAVE_VEC128) && !__aarch64__
+#if defined(CLIB_HAVE_VEC128)
vlib_buffer_t bpi_mask = {.buffer_pool_index = ~0 };
vlib_buffer_t bpi_vec = {.buffer_pool_index = ~0 };
vlib_buffer_t flags_refs_mask = {
.flags = VLIB_BUFFER_NEXT_PRESENT,
- .ref_count = ~0
+ .ref_count = ~1
};
#endif
vlib_prefetch_buffer_header (b[6], LOAD);
vlib_prefetch_buffer_header (b[7], LOAD);
-#if defined(CLIB_HAVE_VEC128) && !__aarch64__
+#if defined(CLIB_HAVE_VEC128)
u8x16 p0, p1, p2, p3, r;
p0 = u8x16_load_unaligned (b[0]);
p1 = u8x16_load_unaligned (b[1]);
vlib_buffer_copy_template (b[3], &bt);
n_queue += 4;
- if (CLIB_DEBUG > 0)
- vlib_buffer_validate_alloc_free (vm, buffers, 4,
- VLIB_BUFFER_KNOWN_ALLOCATED);
-
vlib_buffer_validate (vm, b[0]);
vlib_buffer_validate (vm, b[1]);
vlib_buffer_validate (vm, b[2]);
if (PREDICT_FALSE (buffer_pool_index != b[0]->buffer_pool_index))
{
- buffer_pool_index = b[0]->buffer_pool_index;
-#if defined(CLIB_HAVE_VEC128) && !__aarch64__
- bpi_vec.buffer_pool_index = buffer_pool_index;
-#endif
- bp = vlib_get_buffer_pool (vm, buffer_pool_index);
- vlib_buffer_copy_template (&bt, &bp->buffer_template);
if (n_queue)
{
vlib_buffer_pool_put (vm, buffer_pool_index, queue, n_queue);
n_queue = 0;
}
+
+ buffer_pool_index = b[0]->buffer_pool_index;
+#if defined(CLIB_HAVE_VEC128)
+ bpi_vec.buffer_pool_index = buffer_pool_index;
+#endif
+ bp = vlib_get_buffer_pool (vm, buffer_pool_index);
+ vlib_buffer_copy_template (&bt, &bp->buffer_template);
}
vlib_buffer_validate (vm, b[0]);
if (clib_atomic_sub_fetch (&b[0]->ref_count, 1) == 0)
{
- if (CLIB_DEBUG > 0)
- vlib_buffer_validate_alloc_free (vm, &bi, 1,
- VLIB_BUFFER_KNOWN_ALLOCATED);
vlib_buffer_copy_template (b[0], &bt);
queue[n_queue++] = bi;
}
n_queue = 0;
}
- if (flags & VLIB_BUFFER_NEXT_PRESENT)
+ if (maybe_next && (flags & VLIB_BUFFER_NEXT_PRESENT))
{
bi = next;
goto next_in_chain;
int vlib_buffer_add_data (vlib_main_t * vm, u32 * buffer_index, void *data,
u32 n_data_bytes);
+/* Define vlib_buffer and vnet_buffer flags bits preserved for copy/clone */
+#define VLIB_BUFFER_COPY_CLONE_FLAGS_MASK \
+ (VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID | \
+ VLIB_BUFFER_IS_TRACED | ~VLIB_BUFFER_FLAGS_ALL)
+
/* duplicate all buffers in chain */
always_inline vlib_buffer_t *
vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b)
{
vlib_buffer_t *s, *d, *fd;
uword n_alloc, n_buffers = 1;
- u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ u32 flag_mask = VLIB_BUFFER_COPY_CLONE_FLAGS_MASK;
int i;
s = b;
d->current_data = s->current_data;
d->current_length = s->current_length;
d->flags = s->flags & flag_mask;
+ d->trace_handle = s->trace_handle;
d->total_length_not_including_first_buffer =
s->total_length_not_including_first_buffer;
clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
return fd;
}
+/* duplicate first buffer in chain */
+always_inline vlib_buffer_t *
+vlib_buffer_copy_no_chain (vlib_main_t * vm, vlib_buffer_t * b, u32 * di)
+{
+ vlib_buffer_t *d;
+
+ if ((vlib_buffer_alloc (vm, di, 1)) != 1)
+ return 0;
+
+ d = vlib_get_buffer (vm, *di);
+ /* 1st segment */
+ d->current_data = b->current_data;
+ d->current_length = b->current_length;
+ clib_memcpy_fast (d->opaque, b->opaque, sizeof (b->opaque));
+ clib_memcpy_fast (d->opaque2, b->opaque2, sizeof (b->opaque2));
+ clib_memcpy_fast (vlib_buffer_get_current (d),
+ vlib_buffer_get_current (b), b->current_length);
+
+ return d;
+}
+
+/* \brief Move packet from current position to offset position in buffer.
+ Only work for small packet using one buffer with room to fit the move
+ @param vm - (vlib_main_t *) vlib main data structure pointer
+ @param b - (vlib_buffer_t *) pointer to buffer
+ @param offset - (i16) position to move the packet in buffer
+ */
+always_inline void
+vlib_buffer_move (vlib_main_t * vm, vlib_buffer_t * b, i16 offset)
+{
+ ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0);
+ ASSERT (offset + VLIB_BUFFER_PRE_DATA_SIZE >= 0);
+ ASSERT (offset + b->current_length <
+ vlib_buffer_get_default_data_size (vm));
+
+ u8 *source = vlib_buffer_get_current (b);
+ b->current_data = offset;
+ u8 *destination = vlib_buffer_get_current (b);
+ u16 length = b->current_length;
+
+ if (source + length <= destination) /* no overlap */
+ clib_memcpy_fast (destination, source, length);
+ else
+ memmove (destination, source, length);
+}
+
/** \brief Create a maximum of 256 clones of buffer and store them
in the supplied array
@param n_buffers - (u16) number of buffer clones requested (<=256)
@param head_end_offset - (u16) offset relative to current position
where packet head ends
+ @param offset - (i16) copy packet head at current position if 0,
+ else at offset position to change headroom space as specified
@return - (u16) number of buffers actually cloned, may be
less than the number requested or zero
*/
always_inline u16
vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
- u16 n_buffers, u16 head_end_offset)
+ u16 n_buffers, u16 head_end_offset, i16 offset)
{
u16 i;
vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
ASSERT (s->ref_count == 1);
ASSERT (n_buffers);
ASSERT (n_buffers <= 256);
+ ASSERT (offset + VLIB_BUFFER_PRE_DATA_SIZE >= 0);
+ ASSERT ((offset + head_end_offset) <
+ vlib_buffer_get_default_data_size (vm));
if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2)
{
buffers[0] = src_buffer;
+ if (offset)
+ vlib_buffer_move (vm, s, offset);
+
for (i = 1; i < n_buffers; i++)
{
vlib_buffer_t *d;
return n_buffers;
}
- if (PREDICT_FALSE (n_buffers == 1))
+ if (PREDICT_FALSE ((n_buffers == 1) && (offset == 0)))
{
buffers[0] = src_buffer;
return 1;
for (i = 0; i < n_buffers; i++)
{
vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]);
- d->current_data = s->current_data;
+ if (offset)
+ d->current_data = offset;
+ else
+ d->current_data = s->current_data;
+
d->current_length = head_end_offset;
ASSERT (d->buffer_pool_index == s->buffer_pool_index);
d->total_length_not_including_first_buffer +=
s->total_length_not_including_first_buffer;
}
- d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT;
- d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
+ d->flags = (s->flags & VLIB_BUFFER_COPY_CLONE_FLAGS_MASK) |
+ VLIB_BUFFER_NEXT_PRESENT;
+ d->trace_handle = s->trace_handle;
clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque));
clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2));
clib_memcpy_fast (vlib_buffer_get_current (d),
@param n_buffers - (u16) number of buffer clones requested (<=256)
@param head_end_offset - (u16) offset relative to current position
where packet head ends
+ @param offset - (i16) copy packet head at current position if 0,
+ else at offset position to change headroom space as specified
@return - (u16) number of buffers actually cloned, may be
less than the number requested or zero
*/
always_inline u16
-vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
- u16 n_buffers, u16 head_end_offset)
+vlib_buffer_clone_at_offset (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
+ u16 n_buffers, u16 head_end_offset, i16 offset)
{
vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer);
u16 n_cloned = 0;
n_cloned += vlib_buffer_clone_256 (vm,
vlib_get_buffer_index (vm, copy),
(buffers + n_cloned),
- 256, head_end_offset);
+ 256, head_end_offset, offset);
n_buffers -= 256;
}
n_cloned += vlib_buffer_clone_256 (vm, src_buffer,
buffers + n_cloned,
- n_buffers, head_end_offset);
+ n_buffers, head_end_offset, offset);
return n_cloned;
}
+/** \brief Create multiple clones of buffer and store them
+ in the supplied array
+
+ @param vm - (vlib_main_t *) vlib main data structure pointer
+ @param src_buffer - (u32) source buffer index
+ @param buffers - (u32 * ) buffer index array
+ @param n_buffers - (u16) number of buffer clones requested (<=256)
+ @param head_end_offset - (u16) offset relative to current position
+ where packet head ends
+ @return - (u16) number of buffers actually cloned, may be
+ less than the number requested or zero
+*/
+always_inline u16
+vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers,
+ u16 n_buffers, u16 head_end_offset)
+{
+ return vlib_buffer_clone_at_offset (vm, src_buffer, buffers, n_buffers,
+ head_end_offset, 0);
+}
+
/** \brief Attach cloned tail to the buffer
@param vm - (vlib_main_t *) vlib main data structure pointer
vlib_buffer_t * first,
vlib_buffer_t * last, void *data, u16 data_len)
{
- u32 n_buffer_bytes = VLIB_BUFFER_DATA_SIZE;
+ u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
ASSERT (n_buffer_bytes >= last->current_length + last->current_data);
u16 len = clib_min (data_len,
n_buffer_bytes - last->current_length -
void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first);
format_function_t format_vlib_buffer, format_vlib_buffer_and_data,
- format_vlib_buffer_contents;
+ format_vlib_buffer_contents, format_vlib_buffer_no_chain;
typedef struct
{
vec_free (t->packet_data);
}
-/**
- * @brief compress buffer chain in a way where the first buffer is at least
- * VLIB_BUFFER_CLONE_HEAD_SIZE long
- *
- * @param[in] vm - vlib_main
- * @param[in,out] first - first buffer in chain
- * @param[in,out] discard_vector - vector of buffer indexes which were removed
- * from the chain
- */
-always_inline void
-vlib_buffer_chain_compress (vlib_main_t * vm,
- vlib_buffer_t * first, u32 ** discard_vector)
+always_inline u32
+vlib_buffer_space_left_at_end (vlib_main_t * vm, vlib_buffer_t * b)
{
- if (first->current_length >= VLIB_BUFFER_CLONE_HEAD_SIZE ||
- !(first->flags & VLIB_BUFFER_NEXT_PRESENT))
+ return b->data + vlib_buffer_get_default_data_size (vm) -
+ ((u8 *) vlib_buffer_get_current (b) + b->current_length);
+}
+
+always_inline u32
+vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * b)
+{
+ vlib_buffer_t *db = b, *sb, *first = b;
+ int is_cloned = 0;
+ u32 bytes_left = 0, data_size;
+ u16 src_left, dst_left, n_buffers = 1;
+ u8 *dp, *sp;
+ u32 to_free = 0;
+
+ if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0))
+ return 1;
+
+ data_size = vlib_buffer_get_default_data_size (vm);
+
+ dst_left = vlib_buffer_space_left_at_end (vm, b);
+
+ while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
{
- /* this is already big enough or not a chain */
- return;
+ b = vlib_get_buffer (vm, b->next_buffer);
+ if (b->ref_count > 1)
+ is_cloned = 1;
+ bytes_left += b->current_length;
+ n_buffers++;
}
- u32 want_first_size = clib_min (VLIB_BUFFER_CLONE_HEAD_SIZE,
- VLIB_BUFFER_DATA_SIZE -
- first->current_data);
- do
+ /* if buffer is cloned, create completely new chain - unless everything fits
+ * into one buffer */
+ if (is_cloned && bytes_left >= dst_left)
{
- vlib_buffer_t *second = vlib_get_buffer (vm, first->next_buffer);
- u32 need = want_first_size - first->current_length;
- u32 amount_to_copy = clib_min (need, second->current_length);
- clib_memcpy_fast (((u8 *) vlib_buffer_get_current (first)) +
- first->current_length,
- vlib_buffer_get_current (second), amount_to_copy);
- first->current_length += amount_to_copy;
- second->current_data += amount_to_copy;
- second->current_length -= amount_to_copy;
- if (first->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
+ u32 len = 0;
+ u32 space_needed = bytes_left - dst_left;
+ u32 tail;
+
+ if (vlib_buffer_alloc (vm, &tail, 1) == 0)
+ return 0;
+
+ ++n_buffers;
+ len += data_size;
+ b = vlib_get_buffer (vm, tail);
+
+ while (len < space_needed)
{
- first->total_length_not_including_first_buffer -= amount_to_copy;
+ u32 bi;
+ if (vlib_buffer_alloc (vm, &bi, 1) == 0)
+ {
+ vlib_buffer_free_one (vm, tail);
+ return 0;
+ }
+ b->flags = VLIB_BUFFER_NEXT_PRESENT;
+ b->next_buffer = bi;
+ b = vlib_get_buffer (vm, bi);
+ len += data_size;
+ n_buffers++;
}
- if (!second->current_length)
+ sb = vlib_get_buffer (vm, first->next_buffer);
+ to_free = first->next_buffer;
+ first->next_buffer = tail;
+ }
+ else
+ sb = vlib_get_buffer (vm, first->next_buffer);
+
+ src_left = sb->current_length;
+ sp = vlib_buffer_get_current (sb);
+ dp = vlib_buffer_get_tail (db);
+
+ while (bytes_left)
+ {
+ u16 bytes_to_copy;
+
+ if (dst_left == 0)
{
- vec_add1 (*discard_vector, first->next_buffer);
- if (second->flags & VLIB_BUFFER_NEXT_PRESENT)
+ db->current_length = dp - (u8 *) vlib_buffer_get_current (db);
+ ASSERT (db->flags & VLIB_BUFFER_NEXT_PRESENT);
+ db = vlib_get_buffer (vm, db->next_buffer);
+ dst_left = data_size;
+ if (db->current_data > 0)
{
- first->next_buffer = second->next_buffer;
+ db->current_data = 0;
}
else
{
- first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
+ dst_left += -db->current_data;
}
- second->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
+ dp = vlib_buffer_get_current (db);
}
- }
- while ((first->current_length < want_first_size) &&
- (first->flags & VLIB_BUFFER_NEXT_PRESENT));
-}
-/**
- * @brief linearize buffer chain - the first buffer is filled, if needed,
- * buffers are allocated and filled, returns free space in last buffer or
- * negative on failure
- *
- * @param[in] vm - vlib_main
- * @param[in,out] first - first buffer in chain
- */
-always_inline int
-vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * first)
-{
- vlib_buffer_t *b = first;
- u32 buf_len = VLIB_BUFFER_DATA_SIZE;
- // free buffer chain starting from the second buffer
- int free_count = (b->flags & VLIB_BUFFER_NEXT_PRESENT) != 0;
- u32 chain_to_free = b->next_buffer;
-
- u32 len = vlib_buffer_length_in_chain (vm, b);
- u32 free_len = buf_len - b->current_data - b->current_length;
- int alloc_len = clib_max (len - free_len, 0); //use the free len in the first buffer
- int n_buffers = (alloc_len + buf_len - 1) / buf_len;
- u32 new_buffers[n_buffers];
+ while (src_left == 0)
+ {
+ ASSERT (sb->flags & VLIB_BUFFER_NEXT_PRESENT);
+ sb = vlib_get_buffer (vm, sb->next_buffer);
+ src_left = sb->current_length;
+ sp = vlib_buffer_get_current (sb);
+ }
- u32 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers);
- if (n_alloc != n_buffers)
- {
- vlib_buffer_free_no_next (vm, new_buffers, n_alloc);
- return -1;
+ bytes_to_copy = clib_min (dst_left, src_left);
+
+ if (dp != sp)
+ {
+ if (sb == db)
+ bytes_to_copy = clib_min (bytes_to_copy, sp - dp);
+
+ clib_memcpy_fast (dp, sp, bytes_to_copy);
+ }
+
+ src_left -= bytes_to_copy;
+ dst_left -= bytes_to_copy;
+ dp += bytes_to_copy;
+ sp += bytes_to_copy;
+ bytes_left -= bytes_to_copy;
}
+ if (db != first)
+ db->current_data = 0;
+ db->current_length = dp - (u8 *) vlib_buffer_get_current (db);
- vlib_buffer_t *s = b;
- while (s->flags & VLIB_BUFFER_NEXT_PRESENT)
+ if (is_cloned && to_free)
+ vlib_buffer_free_one (vm, to_free);
+ else
{
- s = vlib_get_buffer (vm, s->next_buffer);
- int d_free_len = buf_len - b->current_data - b->current_length;
- ASSERT (d_free_len >= 0);
- // chain buf and split write
- u32 copy_len = clib_min (d_free_len, s->current_length);
- u8 *d = vlib_buffer_put_uninit (b, copy_len);
- clib_memcpy (d, vlib_buffer_get_current (s), copy_len);
- int rest = s->current_length - copy_len;
- if (rest > 0)
+ if (db->flags & VLIB_BUFFER_NEXT_PRESENT)
+ vlib_buffer_free_one (vm, db->next_buffer);
+ db->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
+ b = first;
+ n_buffers = 1;
+ while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
{
- //prev buf is full
- ASSERT (vlib_buffer_get_tail (b) == b->data + buf_len);
- ASSERT (n_buffers > 0);
- b = vlib_buffer_chain_buffer (vm, b, new_buffers[--n_buffers]);
- //make full use of the new buffers
- b->current_data = 0;
- d = vlib_buffer_put_uninit (b, rest);
- clib_memcpy (d, vlib_buffer_get_current (s) + copy_len, rest);
+ b = vlib_get_buffer (vm, b->next_buffer);
+ ++n_buffers;
}
}
- vlib_buffer_free (vm, &chain_to_free, free_count);
- b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
- if (b == first) /* no buffers addeed */
- b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
- ASSERT (len == vlib_buffer_length_in_chain (vm, first));
- ASSERT (n_buffers == 0);
- return buf_len - b->current_data - b->current_length;
+
+ first->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
+
+ return n_buffers;
}
#endif /* included_vlib_buffer_funcs_h */