X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlib%2Fbuffer_funcs.h;h=8091d83729c5fb1a7cf2206c0d2bbb9a8121e837;hb=e5584965b7bf4e196bbddebc8a663fafb87c1f86;hp=ce8d1ef9078f17106bf07159c26cc222fe0f092f;hpb=910d3694e8b22c9d14e5f2913d14ae149e184620;p=vpp.git diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h index ce8d1ef9078..8091d83729c 100644 --- a/src/vlib/buffer_funcs.h +++ b/src/vlib/buffer_funcs.h @@ -42,11 +42,38 @@ #include #include +#include +#include +#include +#include /** \file vlib buffer access methods. */ +always_inline void +vlib_buffer_validate (vlib_main_t * vm, vlib_buffer_t * b) +{ + vlib_buffer_main_t *bm = vm->buffer_main; + vlib_buffer_pool_t *bp; + + /* reference count in allocated buffer always must be 1 or higher */ + ASSERT (b->ref_count > 0); + + /* verify that buffer pool index is valid */ + bp = vec_elt_at_index (bm->buffer_pools, b->buffer_pool_index); + ASSERT (pointer_to_uword (b) >= bp->start); + ASSERT (pointer_to_uword (b) < bp->start + bp->size - + (bp->data_size + sizeof (vlib_buffer_t))); +} + +always_inline void * +vlib_buffer_ptr_from_index (uword buffer_mem_start, u32 buffer_index, + uword offset) +{ + offset += ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES; + return uword_to_pointer (buffer_mem_start + offset, vlib_buffer_t *); +} /** \brief Translate buffer index into buffer pointer @@ -58,16 +85,59 @@ always_inline vlib_buffer_t * vlib_get_buffer (vlib_main_t * vm, u32 buffer_index) { vlib_buffer_main_t *bm = vm->buffer_main; - uword offset = ((uword) buffer_index) << CLIB_LOG2_CACHE_LINE_BYTES; - ASSERT (offset < bm->buffer_mem_size); + vlib_buffer_t *b; + + b = vlib_buffer_ptr_from_index (bm->buffer_mem_start, buffer_index, 0); + vlib_buffer_validate (vm, b); + return b; +} - return uword_to_pointer (bm->buffer_mem_start + offset, void *); +static_always_inline u32 +vlib_buffer_get_default_data_size (vlib_main_t * vm) +{ + return vm->buffer_main->default_data_size; } static_always_inline void vlib_buffer_copy_indices (u32 * dst, u32 * src, u32 n_indices) { - clib_memcpy_fast (dst, src, n_indices * sizeof (u32)); +#if defined(CLIB_HAVE_VEC512) + while (n_indices >= 16) + { + u32x16_store_unaligned (u32x16_load_unaligned (src), dst); + dst += 16; + src += 16; + n_indices -= 16; + } +#endif + +#if defined(CLIB_HAVE_VEC256) + while (n_indices >= 8) + { + u32x8_store_unaligned (u32x8_load_unaligned (src), dst); + dst += 8; + src += 8; + n_indices -= 8; + } +#endif + +#if defined(CLIB_HAVE_VEC128) + while (n_indices >= 4) + { + u32x4_store_unaligned (u32x4_load_unaligned (src), dst); + dst += 4; + src += 4; + n_indices -= 4; + } +#endif + + while (n_indices) + { + dst[0] = src[0]; + dst += 1; + src += 1; + n_indices -= 1; + } } STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, template_end, 64); @@ -92,8 +162,8 @@ vlib_buffer_copy_template (vlib_buffer_t * b, vlib_buffer_t * bt) always_inline u8 vlib_buffer_pool_get_default_for_numa (vlib_main_t * vm, u32 numa_node) { - ASSERT (numa_node < vm->buffer_main->n_numa_nodes); - return numa_node; + ASSERT (numa_node < VLIB_BUFFER_MAX_NUMA_NODES); + return vm->buffer_main->default_buffer_pool_index_for_numa[numa_node]; } /** \brief Translate array of buffer indices into buffer pointers with offset @@ -108,9 +178,7 @@ static_always_inline void vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count, i32 offset) { -#if defined (CLIB_HAVE_VEC256) || defined (CLIB_HAVE_VEC128) uword buffer_mem_start = vm->buffer_main->buffer_mem_start; -#endif #ifdef CLIB_HAVE_VEC256 u64x4 off = u64x4_splat (buffer_mem_start + offset); /* if count is not const, compiler will not unroll while loop @@ -146,10 +214,10 @@ vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count, u64x2_store_unaligned ((b0 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b); u64x2_store_unaligned ((b1 << CLIB_LOG2_CACHE_LINE_BYTES) + off, b + 2); #else - b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset; - b[1] = ((u8 *) vlib_get_buffer (vm, bi[1])) + offset; - b[2] = ((u8 *) vlib_get_buffer (vm, bi[2])) + offset; - b[3] = ((u8 *) vlib_get_buffer (vm, bi[3])) + offset; + b[0] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[0], offset); + b[1] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[1], offset); + b[2] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[2], offset); + b[3] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[3], offset); #endif b += 4; bi += 4; @@ -157,7 +225,7 @@ vlib_get_buffers_with_offset (vlib_main_t * vm, u32 * bi, void **b, int count, } while (count) { - b[0] = ((u8 *) vlib_get_buffer (vm, bi[0])) + offset; + b[0] = vlib_buffer_ptr_from_index (buffer_mem_start, bi[0], offset); b += 1; bi += 1; count -= 1; @@ -608,7 +676,7 @@ vlib_buffer_alloc_to_ring_from_pool (vlib_main_t * vm, u32 * ring, u32 start, return n_alloc; } -static void +static_always_inline void vlib_buffer_pool_put (vlib_main_t * vm, u8 buffer_pool_index, u32 * buffers, u32 n_buffers) { @@ -616,6 +684,10 @@ vlib_buffer_pool_put (vlib_main_t * vm, u8 buffer_pool_index, vlib_buffer_pool_thread_t *bpt = vec_elt_at_index (bp->threads, vm->thread_index); + if (CLIB_DEBUG > 0) + vlib_buffer_validate_alloc_free (vm, buffers, n_buffers, + VLIB_BUFFER_KNOWN_ALLOCATED); + vec_add_aligned (bpt->cached_buffers, buffers, n_buffers, CLIB_CACHE_LINE_BYTES); @@ -640,12 +712,12 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers, u8 buffer_pool_index = ~0; u32 n_queue = 0, queue[queue_size + 4]; vlib_buffer_t bt = { }; -#if defined(CLIB_HAVE_VEC128) && !__aarch64__ +#if defined(CLIB_HAVE_VEC128) vlib_buffer_t bpi_mask = {.buffer_pool_index = ~0 }; vlib_buffer_t bpi_vec = {.buffer_pool_index = ~0 }; vlib_buffer_t flags_refs_mask = { .flags = VLIB_BUFFER_NEXT_PRESENT, - .ref_count = ~0 + .ref_count = ~1 }; #endif @@ -665,7 +737,7 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers, vlib_prefetch_buffer_header (b[6], LOAD); vlib_prefetch_buffer_header (b[7], LOAD); -#if defined(CLIB_HAVE_VEC128) && !__aarch64__ +#if defined(CLIB_HAVE_VEC128) u8x16 p0, p1, p2, p3, r; p0 = u8x16_load_unaligned (b[0]); p1 = u8x16_load_unaligned (b[1]); @@ -706,9 +778,10 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers, vlib_buffer_copy_template (b[3], &bt); n_queue += 4; - if (CLIB_DEBUG > 0) - vlib_buffer_validate_alloc_free (vm, buffers, 4, - VLIB_BUFFER_KNOWN_ALLOCATED); + vlib_buffer_validate (vm, b[0]); + vlib_buffer_validate (vm, b[1]); + vlib_buffer_validate (vm, b[2]); + vlib_buffer_validate (vm, b[3]); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[1]); @@ -734,31 +807,27 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers, if (PREDICT_FALSE (buffer_pool_index != b[0]->buffer_pool_index)) { - buffer_pool_index = b[0]->buffer_pool_index; -#if defined(CLIB_HAVE_VEC128) && !__aarch64__ - bpi_vec.buffer_pool_index = buffer_pool_index; -#endif - bp = vlib_get_buffer_pool (vm, buffer_pool_index); - vlib_buffer_copy_template (&bt, &bp->buffer_template); if (n_queue) { vlib_buffer_pool_put (vm, buffer_pool_index, queue, n_queue); n_queue = 0; } + + buffer_pool_index = b[0]->buffer_pool_index; +#if defined(CLIB_HAVE_VEC128) + bpi_vec.buffer_pool_index = buffer_pool_index; +#endif + bp = vlib_get_buffer_pool (vm, buffer_pool_index); + vlib_buffer_copy_template (&bt, &bp->buffer_template); } - ASSERT (pointer_to_uword (b[0]) >= bp->start && - pointer_to_uword (b[0]) < - bp->start + bp->size - (bp->data_size + sizeof (*b[0]))); + vlib_buffer_validate (vm, b[0]); VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b[0]); if (clib_atomic_sub_fetch (&b[0]->ref_count, 1) == 0) { - if (CLIB_DEBUG > 0) - vlib_buffer_validate_alloc_free (vm, &bi, 1, - VLIB_BUFFER_KNOWN_ALLOCATED); vlib_buffer_copy_template (b[0], &bt); queue[n_queue++] = bi; } @@ -769,7 +838,7 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers, n_queue = 0; } - if (flags & VLIB_BUFFER_NEXT_PRESENT) + if (maybe_next && (flags & VLIB_BUFFER_NEXT_PRESENT)) { bi = next; goto next_in_chain; @@ -943,6 +1012,52 @@ vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b) return fd; } +/* duplicate first buffer in chain */ +always_inline vlib_buffer_t * +vlib_buffer_copy_no_chain (vlib_main_t * vm, vlib_buffer_t * b, u32 * di) +{ + vlib_buffer_t *d; + + if ((vlib_buffer_alloc (vm, di, 1)) != 1) + return 0; + + d = vlib_get_buffer (vm, *di); + /* 1st segment */ + d->current_data = b->current_data; + d->current_length = b->current_length; + clib_memcpy_fast (d->opaque, b->opaque, sizeof (b->opaque)); + clib_memcpy_fast (d->opaque2, b->opaque2, sizeof (b->opaque2)); + clib_memcpy_fast (vlib_buffer_get_current (d), + vlib_buffer_get_current (b), b->current_length); + + return d; +} + +/* \brief Move packet from current position to offset position in buffer. + Only work for small packet using one buffer with room to fit the move + @param vm - (vlib_main_t *) vlib main data structure pointer + @param b - (vlib_buffer_t *) pointer to buffer + @param offset - (i16) position to move the packet in buffer + */ +always_inline void +vlib_buffer_move (vlib_main_t * vm, vlib_buffer_t * b, i16 offset) +{ + ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0); + ASSERT (offset + VLIB_BUFFER_PRE_DATA_SIZE >= 0); + ASSERT (offset + b->current_length < + vlib_buffer_get_default_data_size (vm)); + + u8 *source = vlib_buffer_get_current (b); + b->current_data = offset; + u8 *destination = vlib_buffer_get_current (b); + u16 length = b->current_length; + + if (source + length <= destination) /* no overlap */ + clib_memcpy_fast (destination, source, length); + else + memmove (destination, source, length); +} + /** \brief Create a maximum of 256 clones of buffer and store them in the supplied array @@ -952,12 +1067,14 @@ vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b) @param n_buffers - (u16) number of buffer clones requested (<=256) @param head_end_offset - (u16) offset relative to current position where packet head ends + @param offset - (i16) copy packet head at current position if 0, + else at offset position to change headroom space as specified @return - (u16) number of buffers actually cloned, may be less than the number requested or zero */ always_inline u16 vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers, - u16 n_buffers, u16 head_end_offset) + u16 n_buffers, u16 head_end_offset, i16 offset) { u16 i; vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer); @@ -965,10 +1082,16 @@ vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers, ASSERT (s->ref_count == 1); ASSERT (n_buffers); ASSERT (n_buffers <= 256); + ASSERT (offset + VLIB_BUFFER_PRE_DATA_SIZE >= 0); + ASSERT ((offset + head_end_offset) < + vlib_buffer_get_default_data_size (vm)); if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2) { buffers[0] = src_buffer; + if (offset) + vlib_buffer_move (vm, s, offset); + for (i = 1; i < n_buffers; i++) { vlib_buffer_t *d; @@ -981,7 +1104,7 @@ vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers, return n_buffers; } - if (PREDICT_FALSE (n_buffers == 1)) + if (PREDICT_FALSE ((n_buffers == 1) && (offset == 0))) { buffers[0] = src_buffer; return 1; @@ -993,7 +1116,11 @@ vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers, for (i = 0; i < n_buffers; i++) { vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]); - d->current_data = s->current_data; + if (offset) + d->current_data = offset; + else + d->current_data = s->current_data; + d->current_length = head_end_offset; ASSERT (d->buffer_pool_index == s->buffer_pool_index); @@ -1032,12 +1159,14 @@ vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers, @param n_buffers - (u16) number of buffer clones requested (<=256) @param head_end_offset - (u16) offset relative to current position where packet head ends + @param offset - (i16) copy packet head at current position if 0, + else at offset position to change headroom space as specified @return - (u16) number of buffers actually cloned, may be less than the number requested or zero */ always_inline u16 -vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers, - u16 n_buffers, u16 head_end_offset) +vlib_buffer_clone_at_offset (vlib_main_t * vm, u32 src_buffer, u32 * buffers, + u16 n_buffers, u16 head_end_offset, i16 offset) { vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer); u16 n_cloned = 0; @@ -1049,16 +1178,36 @@ vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers, n_cloned += vlib_buffer_clone_256 (vm, vlib_get_buffer_index (vm, copy), (buffers + n_cloned), - 256, head_end_offset); + 256, head_end_offset, offset); n_buffers -= 256; } n_cloned += vlib_buffer_clone_256 (vm, src_buffer, buffers + n_cloned, - n_buffers, head_end_offset); + n_buffers, head_end_offset, offset); return n_cloned; } +/** \brief Create multiple clones of buffer and store them + in the supplied array + + @param vm - (vlib_main_t *) vlib main data structure pointer + @param src_buffer - (u32) source buffer index + @param buffers - (u32 * ) buffer index array + @param n_buffers - (u16) number of buffer clones requested (<=256) + @param head_end_offset - (u16) offset relative to current position + where packet head ends + @return - (u16) number of buffers actually cloned, may be + less than the number requested or zero +*/ +always_inline u16 +vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers, + u16 n_buffers, u16 head_end_offset) +{ + return vlib_buffer_clone_at_offset (vm, src_buffer, buffers, n_buffers, + head_end_offset, 0); +} + /** \brief Attach cloned tail to the buffer @param vm - (vlib_main_t *) vlib main data structure pointer @@ -1134,7 +1283,7 @@ vlib_buffer_chain_append_data (vlib_main_t * vm, vlib_buffer_t * first, vlib_buffer_t * last, void *data, u16 data_len) { - u32 n_buffer_bytes = VLIB_BUFFER_DATA_SIZE; + u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm); ASSERT (n_buffer_bytes >= last->current_length + last->current_data); u16 len = clib_min (data_len, n_buffer_bytes - last->current_length - @@ -1188,122 +1337,147 @@ vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t) vec_free (t->packet_data); } -/** - * @brief compress buffer chain in a way where the first buffer is at least - * VLIB_BUFFER_CLONE_HEAD_SIZE long - * - * @param[in] vm - vlib_main - * @param[in,out] first - first buffer in chain - * @param[in,out] discard_vector - vector of buffer indexes which were removed - * from the chain - */ -always_inline void -vlib_buffer_chain_compress (vlib_main_t * vm, - vlib_buffer_t * first, u32 ** discard_vector) +always_inline u32 +vlib_buffer_space_left_at_end (vlib_main_t * vm, vlib_buffer_t * b) +{ + return b->data + vlib_buffer_get_default_data_size (vm) - + ((u8 *) vlib_buffer_get_current (b) + b->current_length); +} + +always_inline u32 +vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * b) { - if (first->current_length >= VLIB_BUFFER_CLONE_HEAD_SIZE || - !(first->flags & VLIB_BUFFER_NEXT_PRESENT)) + vlib_buffer_t *db = b, *sb, *first = b; + int is_cloned = 0; + u32 bytes_left = 0, data_size; + u16 src_left, dst_left, n_buffers = 1; + u8 *dp, *sp; + u32 to_free = 0; + + if (PREDICT_TRUE ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0)) + return 1; + + data_size = vlib_buffer_get_default_data_size (vm); + + dst_left = vlib_buffer_space_left_at_end (vm, b); + + while (b->flags & VLIB_BUFFER_NEXT_PRESENT) { - /* this is already big enough or not a chain */ - return; + b = vlib_get_buffer (vm, b->next_buffer); + if (b->ref_count > 1) + is_cloned = 1; + bytes_left += b->current_length; + n_buffers++; } - u32 want_first_size = clib_min (VLIB_BUFFER_CLONE_HEAD_SIZE, - VLIB_BUFFER_DATA_SIZE - - first->current_data); - do + /* if buffer is cloned, create completely new chain - unless everything fits + * into one buffer */ + if (is_cloned && bytes_left >= dst_left) { - vlib_buffer_t *second = vlib_get_buffer (vm, first->next_buffer); - u32 need = want_first_size - first->current_length; - u32 amount_to_copy = clib_min (need, second->current_length); - clib_memcpy_fast (((u8 *) vlib_buffer_get_current (first)) + - first->current_length, - vlib_buffer_get_current (second), amount_to_copy); - first->current_length += amount_to_copy; - second->current_data += amount_to_copy; - second->current_length -= amount_to_copy; - if (first->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID) + u32 len = 0; + u32 space_needed = bytes_left - dst_left; + u32 tail; + + if (vlib_buffer_alloc (vm, &tail, 1) == 0) + return 0; + + ++n_buffers; + len += data_size; + b = vlib_get_buffer (vm, tail); + + while (len < space_needed) { - first->total_length_not_including_first_buffer -= amount_to_copy; + u32 bi; + if (vlib_buffer_alloc (vm, &bi, 1) == 0) + { + vlib_buffer_free_one (vm, tail); + return 0; + } + b->flags = VLIB_BUFFER_NEXT_PRESENT; + b->next_buffer = bi; + b = vlib_get_buffer (vm, bi); + len += data_size; + n_buffers++; } - if (!second->current_length) + sb = vlib_get_buffer (vm, first->next_buffer); + to_free = first->next_buffer; + first->next_buffer = tail; + } + else + sb = vlib_get_buffer (vm, first->next_buffer); + + src_left = sb->current_length; + sp = vlib_buffer_get_current (sb); + dp = vlib_buffer_get_tail (db); + + while (bytes_left) + { + u16 bytes_to_copy; + + if (dst_left == 0) { - vec_add1 (*discard_vector, first->next_buffer); - if (second->flags & VLIB_BUFFER_NEXT_PRESENT) + db->current_length = dp - (u8 *) vlib_buffer_get_current (db); + ASSERT (db->flags & VLIB_BUFFER_NEXT_PRESENT); + db = vlib_get_buffer (vm, db->next_buffer); + dst_left = data_size; + if (db->current_data > 0) { - first->next_buffer = second->next_buffer; + db->current_data = 0; } else { - first->flags &= ~VLIB_BUFFER_NEXT_PRESENT; + dst_left += -db->current_data; } - second->flags &= ~VLIB_BUFFER_NEXT_PRESENT; + dp = vlib_buffer_get_current (db); } - } - while ((first->current_length < want_first_size) && - (first->flags & VLIB_BUFFER_NEXT_PRESENT)); -} -/** - * @brief linearize buffer chain - the first buffer is filled, if needed, - * buffers are allocated and filled, returns free space in last buffer or - * negative on failure - * - * @param[in] vm - vlib_main - * @param[in,out] first - first buffer in chain - */ -always_inline int -vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * first) -{ - vlib_buffer_t *b = first; - u32 buf_len = VLIB_BUFFER_DATA_SIZE; - // free buffer chain starting from the second buffer - int free_count = (b->flags & VLIB_BUFFER_NEXT_PRESENT) != 0; - u32 chain_to_free = b->next_buffer; - - u32 len = vlib_buffer_length_in_chain (vm, b); - u32 free_len = buf_len - b->current_data - b->current_length; - int alloc_len = clib_max (len - free_len, 0); //use the free len in the first buffer - int n_buffers = (alloc_len + buf_len - 1) / buf_len; - u32 new_buffers[n_buffers]; + while (src_left == 0) + { + ASSERT (sb->flags & VLIB_BUFFER_NEXT_PRESENT); + sb = vlib_get_buffer (vm, sb->next_buffer); + src_left = sb->current_length; + sp = vlib_buffer_get_current (sb); + } - u32 n_alloc = vlib_buffer_alloc (vm, new_buffers, n_buffers); - if (n_alloc != n_buffers) - { - vlib_buffer_free_no_next (vm, new_buffers, n_alloc); - return -1; + bytes_to_copy = clib_min (dst_left, src_left); + + if (dp != sp) + { + if (sb == db) + bytes_to_copy = clib_min (bytes_to_copy, sp - dp); + + clib_memcpy_fast (dp, sp, bytes_to_copy); + } + + src_left -= bytes_to_copy; + dst_left -= bytes_to_copy; + dp += bytes_to_copy; + sp += bytes_to_copy; + bytes_left -= bytes_to_copy; } + if (db != first) + db->current_data = 0; + db->current_length = dp - (u8 *) vlib_buffer_get_current (db); - vlib_buffer_t *s = b; - while (s->flags & VLIB_BUFFER_NEXT_PRESENT) + if (is_cloned && to_free) + vlib_buffer_free_one (vm, to_free); + else { - s = vlib_get_buffer (vm, s->next_buffer); - int d_free_len = buf_len - b->current_data - b->current_length; - ASSERT (d_free_len >= 0); - // chain buf and split write - u32 copy_len = clib_min (d_free_len, s->current_length); - u8 *d = vlib_buffer_put_uninit (b, copy_len); - clib_memcpy (d, vlib_buffer_get_current (s), copy_len); - int rest = s->current_length - copy_len; - if (rest > 0) + if (db->flags & VLIB_BUFFER_NEXT_PRESENT) + vlib_buffer_free_one (vm, db->next_buffer); + db->flags &= ~VLIB_BUFFER_NEXT_PRESENT; + b = first; + n_buffers = 1; + while (b->flags & VLIB_BUFFER_NEXT_PRESENT) { - //prev buf is full - ASSERT (vlib_buffer_get_tail (b) == b->data + buf_len); - ASSERT (n_buffers > 0); - b = vlib_buffer_chain_buffer (vm, b, new_buffers[--n_buffers]); - //make full use of the new buffers - b->current_data = 0; - d = vlib_buffer_put_uninit (b, rest); - clib_memcpy (d, vlib_buffer_get_current (s) + copy_len, rest); + b = vlib_get_buffer (vm, b->next_buffer); + ++n_buffers; } } - vlib_buffer_free (vm, &chain_to_free, free_count); - b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; - if (b == first) /* no buffers addeed */ - b->flags &= ~VLIB_BUFFER_NEXT_PRESENT; - ASSERT (len == vlib_buffer_length_in_chain (vm, first)); - ASSERT (n_buffers == 0); - return buf_len - b->current_data - b->current_length; + + first->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; + + return n_buffers; } #endif /* included_vlib_buffer_funcs_h */