X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlib%2Fbuffer_funcs.h;h=7103227a9655f290b95c77f13af1c4d23b0bd326;hb=72f4921c8eb567da412125d412fd153b49a349dd;hp=483a990731fa8e912b6933f85d8f1620efadb00a;hpb=da7f7b6164e976a97ff0afb13f488c60461402bc;p=vpp.git diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h index 483a990731f..7103227a965 100644 --- a/src/vlib/buffer_funcs.h +++ b/src/vlib/buffer_funcs.h @@ -101,7 +101,79 @@ vlib_buffer_get_default_data_size (vlib_main_t * vm) static_always_inline void vlib_buffer_copy_indices (u32 * dst, u32 * src, u32 n_indices) { - clib_memcpy_fast (dst, src, n_indices * sizeof (u32)); +#if defined(CLIB_HAVE_VEC512) + while (n_indices >= 16) + { + u32x16_store_unaligned (u32x16_load_unaligned (src), dst); + dst += 16; + src += 16; + n_indices -= 16; + } +#endif + +#if defined(CLIB_HAVE_VEC256) + while (n_indices >= 8) + { + u32x8_store_unaligned (u32x8_load_unaligned (src), dst); + dst += 8; + src += 8; + n_indices -= 8; + } +#endif + +#if defined(CLIB_HAVE_VEC128) + while (n_indices >= 4) + { + u32x4_store_unaligned (u32x4_load_unaligned (src), dst); + dst += 4; + src += 4; + n_indices -= 4; + } +#endif + + while (n_indices) + { + dst[0] = src[0]; + dst += 1; + src += 1; + n_indices -= 1; + } +} + +always_inline void +vlib_buffer_copy_indices_from_ring (u32 * dst, u32 * ring, u32 start, + u32 ring_size, u32 n_buffers) +{ + ASSERT (n_buffers <= ring_size); + + if (PREDICT_TRUE (start + n_buffers <= ring_size)) + { + vlib_buffer_copy_indices (dst, ring + start, n_buffers); + } + else + { + u32 n = ring_size - start; + vlib_buffer_copy_indices (dst, ring + start, n); + vlib_buffer_copy_indices (dst + n, ring, n_buffers - n); + } +} + +always_inline void +vlib_buffer_copy_indices_to_ring (u32 * ring, u32 * src, u32 start, + u32 ring_size, u32 n_buffers) +{ + ASSERT (n_buffers <= ring_size); + + if (PREDICT_TRUE (start + n_buffers <= ring_size)) + { + vlib_buffer_copy_indices (ring + start, src, n_buffers); + } + else + { + u32 n = ring_size - start; + vlib_buffer_copy_indices (ring + start, src, n); + vlib_buffer_copy_indices (ring, src + n, n_buffers - n); + } } STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, template_end, 64); @@ -438,6 +510,13 @@ vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index) u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index, uword follow_chain); +u8 *vlib_validate_buffers (vlib_main_t * vm, + u32 * buffers, + uword next_buffer_stride, + uword n_buffers, + vlib_buffer_known_state_t known_state, + uword follow_buffer_next); + static_always_inline vlib_buffer_pool_t * vlib_get_buffer_pool (vlib_main_t * vm, u8 buffer_pool_index) { @@ -455,19 +534,19 @@ vlib_buffer_pool_get (vlib_main_t * vm, u8 buffer_pool_index, u32 * buffers, ASSERT (bp->buffers); clib_spinlock_lock (&bp->lock); - len = vec_len (bp->buffers); + len = bp->n_avail; if (PREDICT_TRUE (n_buffers < len)) { len -= n_buffers; vlib_buffer_copy_indices (buffers, bp->buffers + len, n_buffers); - _vec_len (bp->buffers) = len; + bp->n_avail = len; clib_spinlock_unlock (&bp->lock); return n_buffers; } else { vlib_buffer_copy_indices (buffers, bp->buffers, len); - _vec_len (bp->buffers) = 0; + bp->n_avail = 0; clib_spinlock_unlock (&bp->lock); return len; } @@ -497,14 +576,26 @@ vlib_buffer_alloc_from_pool (vlib_main_t * vm, u32 * buffers, u32 n_buffers, dst = buffers; n_left = n_buffers; - len = vec_len (bpt->cached_buffers); + len = bpt->n_cached; /* per-thread cache contains enough buffers */ if (len >= n_buffers) { src = bpt->cached_buffers + len - n_buffers; vlib_buffer_copy_indices (dst, src, n_buffers); - _vec_len (bpt->cached_buffers) -= n_buffers; + bpt->n_cached -= n_buffers; + + if (CLIB_DEBUG > 0) + vlib_buffer_validate_alloc_free (vm, buffers, n_buffers, + VLIB_BUFFER_KNOWN_FREE); + return n_buffers; + } + + /* alloc bigger than cache - take buffers directly from main pool */ + if (n_buffers >= VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ) + { + n_buffers = vlib_buffer_pool_get (vm, buffer_pool_index, buffers, + n_buffers); if (CLIB_DEBUG > 0) vlib_buffer_validate_alloc_free (vm, buffers, n_buffers, @@ -516,23 +607,22 @@ vlib_buffer_alloc_from_pool (vlib_main_t * vm, u32 * buffers, u32 n_buffers, if (len) { vlib_buffer_copy_indices (dst, bpt->cached_buffers, len); - _vec_len (bpt->cached_buffers) = 0; + bpt->n_cached = 0; dst += len; n_left -= len; } len = round_pow2 (n_left, 32); - vec_validate_aligned (bpt->cached_buffers, len - 1, CLIB_CACHE_LINE_BYTES); len = vlib_buffer_pool_get (vm, buffer_pool_index, bpt->cached_buffers, len); - _vec_len (bpt->cached_buffers) = len; + bpt->n_cached = len; if (len) { u32 n_copy = clib_min (len, n_left); src = bpt->cached_buffers + len - n_copy; vlib_buffer_copy_indices (dst, src, n_copy); - _vec_len (bpt->cached_buffers) -= n_copy; + bpt->n_cached -= n_copy; n_left -= n_copy; } @@ -645,26 +735,33 @@ vlib_buffer_pool_put (vlib_main_t * vm, u8 buffer_pool_index, u32 * buffers, u32 n_buffers) { vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, buffer_pool_index); - vlib_buffer_pool_thread_t *bpt = - vec_elt_at_index (bp->threads, vm->thread_index); + vlib_buffer_pool_thread_t *bpt = vec_elt_at_index (bp->threads, + vm->thread_index); + u32 n_cached, n_empty; if (CLIB_DEBUG > 0) vlib_buffer_validate_alloc_free (vm, buffers, n_buffers, VLIB_BUFFER_KNOWN_ALLOCATED); - vec_add_aligned (bpt->cached_buffers, buffers, n_buffers, - CLIB_CACHE_LINE_BYTES); - - if (vec_len (bpt->cached_buffers) > 4 * VLIB_FRAME_SIZE) + n_cached = bpt->n_cached; + n_empty = VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ - n_cached; + if (n_buffers <= n_empty) { - clib_spinlock_lock (&bp->lock); - /* keep last stored buffers, as they are more likely hot in the cache */ - vec_add_aligned (bp->buffers, bpt->cached_buffers, VLIB_FRAME_SIZE, - CLIB_CACHE_LINE_BYTES); - vec_delete (bpt->cached_buffers, VLIB_FRAME_SIZE, 0); - bpt->n_alloc -= VLIB_FRAME_SIZE; - clib_spinlock_unlock (&bp->lock); + vlib_buffer_copy_indices (bpt->cached_buffers + n_cached, + buffers, n_buffers); + bpt->n_cached = n_cached + n_buffers; + return; } + + vlib_buffer_copy_indices (bpt->cached_buffers + n_cached, + buffers + n_buffers - n_empty, n_empty); + bpt->n_cached = VLIB_BUFFER_POOL_PER_THREAD_CACHE_SZ; + + clib_spinlock_lock (&bp->lock); + vlib_buffer_copy_indices (bp->buffers + bp->n_avail, buffers, + n_buffers - n_empty); + bp->n_avail += n_buffers - n_empty; + clib_spinlock_unlock (&bp->lock); } static_always_inline void @@ -676,12 +773,12 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers, u8 buffer_pool_index = ~0; u32 n_queue = 0, queue[queue_size + 4]; vlib_buffer_t bt = { }; -#if defined(CLIB_HAVE_VEC128) && !__aarch64__ +#if defined(CLIB_HAVE_VEC128) vlib_buffer_t bpi_mask = {.buffer_pool_index = ~0 }; vlib_buffer_t bpi_vec = {.buffer_pool_index = ~0 }; vlib_buffer_t flags_refs_mask = { .flags = VLIB_BUFFER_NEXT_PRESENT, - .ref_count = ~0 + .ref_count = ~1 }; #endif @@ -701,7 +798,7 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers, vlib_prefetch_buffer_header (b[6], LOAD); vlib_prefetch_buffer_header (b[7], LOAD); -#if defined(CLIB_HAVE_VEC128) && !__aarch64__ +#if defined(CLIB_HAVE_VEC128) u8x16 p0, p1, p2, p3, r; p0 = u8x16_load_unaligned (b[0]); p1 = u8x16_load_unaligned (b[1]); @@ -779,7 +876,7 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers, } buffer_pool_index = b[0]->buffer_pool_index; -#if defined(CLIB_HAVE_VEC128) && !__aarch64__ +#if defined(CLIB_HAVE_VEC128) bpi_vec.buffer_pool_index = buffer_pool_index; #endif bp = vlib_get_buffer_pool (vm, buffer_pool_index); @@ -802,7 +899,7 @@ vlib_buffer_free_inline (vlib_main_t * vm, u32 * buffers, u32 n_buffers, n_queue = 0; } - if (flags & VLIB_BUFFER_NEXT_PRESENT) + if (maybe_next && (flags & VLIB_BUFFER_NEXT_PRESENT)) { bi = next; goto next_in_chain; @@ -918,13 +1015,18 @@ vlib_buffer_free_from_ring_no_next (vlib_main_t * vm, u32 * ring, u32 start, int vlib_buffer_add_data (vlib_main_t * vm, u32 * buffer_index, void *data, u32 n_data_bytes); +/* Define vlib_buffer and vnet_buffer flags bits preserved for copy/clone */ +#define VLIB_BUFFER_COPY_CLONE_FLAGS_MASK \ + (VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID | \ + VLIB_BUFFER_IS_TRACED | ~VLIB_BUFFER_FLAGS_ALL) + /* duplicate all buffers in chain */ always_inline vlib_buffer_t * vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b) { vlib_buffer_t *s, *d, *fd; uword n_alloc, n_buffers = 1; - u32 flag_mask = VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID; + u32 flag_mask = VLIB_BUFFER_COPY_CLONE_FLAGS_MASK; int i; s = b; @@ -951,6 +1053,7 @@ vlib_buffer_copy (vlib_main_t * vm, vlib_buffer_t * b) d->current_data = s->current_data; d->current_length = s->current_length; d->flags = s->flags & flag_mask; + d->trace_handle = s->trace_handle; d->total_length_not_including_first_buffer = s->total_length_not_including_first_buffer; clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque)); @@ -997,6 +1100,31 @@ vlib_buffer_copy_no_chain (vlib_main_t * vm, vlib_buffer_t * b, u32 * di) return d; } +/* \brief Move packet from current position to offset position in buffer. + Only work for small packet using one buffer with room to fit the move + @param vm - (vlib_main_t *) vlib main data structure pointer + @param b - (vlib_buffer_t *) pointer to buffer + @param offset - (i16) position to move the packet in buffer + */ +always_inline void +vlib_buffer_move (vlib_main_t * vm, vlib_buffer_t * b, i16 offset) +{ + ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0); + ASSERT (offset + VLIB_BUFFER_PRE_DATA_SIZE >= 0); + ASSERT (offset + b->current_length < + vlib_buffer_get_default_data_size (vm)); + + u8 *source = vlib_buffer_get_current (b); + b->current_data = offset; + u8 *destination = vlib_buffer_get_current (b); + u16 length = b->current_length; + + if (source + length <= destination) /* no overlap */ + clib_memcpy_fast (destination, source, length); + else + memmove (destination, source, length); +} + /** \brief Create a maximum of 256 clones of buffer and store them in the supplied array @@ -1006,12 +1134,14 @@ vlib_buffer_copy_no_chain (vlib_main_t * vm, vlib_buffer_t * b, u32 * di) @param n_buffers - (u16) number of buffer clones requested (<=256) @param head_end_offset - (u16) offset relative to current position where packet head ends + @param offset - (i16) copy packet head at current position if 0, + else at offset position to change headroom space as specified @return - (u16) number of buffers actually cloned, may be less than the number requested or zero */ always_inline u16 vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers, - u16 n_buffers, u16 head_end_offset) + u16 n_buffers, u16 head_end_offset, i16 offset) { u16 i; vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer); @@ -1019,10 +1149,16 @@ vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers, ASSERT (s->ref_count == 1); ASSERT (n_buffers); ASSERT (n_buffers <= 256); + ASSERT (offset + VLIB_BUFFER_PRE_DATA_SIZE >= 0); + ASSERT ((offset + head_end_offset) < + vlib_buffer_get_default_data_size (vm)); if (s->current_length <= head_end_offset + CLIB_CACHE_LINE_BYTES * 2) { buffers[0] = src_buffer; + if (offset) + vlib_buffer_move (vm, s, offset); + for (i = 1; i < n_buffers; i++) { vlib_buffer_t *d; @@ -1035,7 +1171,7 @@ vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers, return n_buffers; } - if (PREDICT_FALSE (n_buffers == 1)) + if (PREDICT_FALSE ((n_buffers == 1) && (offset == 0))) { buffers[0] = src_buffer; return 1; @@ -1047,7 +1183,11 @@ vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers, for (i = 0; i < n_buffers; i++) { vlib_buffer_t *d = vlib_get_buffer (vm, buffers[i]); - d->current_data = s->current_data; + if (offset) + d->current_data = offset; + else + d->current_data = s->current_data; + d->current_length = head_end_offset; ASSERT (d->buffer_pool_index == s->buffer_pool_index); @@ -1058,8 +1198,9 @@ vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers, d->total_length_not_including_first_buffer += s->total_length_not_including_first_buffer; } - d->flags = s->flags | VLIB_BUFFER_NEXT_PRESENT; - d->flags &= ~VLIB_BUFFER_EXT_HDR_VALID; + d->flags = (s->flags & VLIB_BUFFER_COPY_CLONE_FLAGS_MASK) | + VLIB_BUFFER_NEXT_PRESENT; + d->trace_handle = s->trace_handle; clib_memcpy_fast (d->opaque, s->opaque, sizeof (s->opaque)); clib_memcpy_fast (d->opaque2, s->opaque2, sizeof (s->opaque2)); clib_memcpy_fast (vlib_buffer_get_current (d), @@ -1086,12 +1227,14 @@ vlib_buffer_clone_256 (vlib_main_t * vm, u32 src_buffer, u32 * buffers, @param n_buffers - (u16) number of buffer clones requested (<=256) @param head_end_offset - (u16) offset relative to current position where packet head ends + @param offset - (i16) copy packet head at current position if 0, + else at offset position to change headroom space as specified @return - (u16) number of buffers actually cloned, may be less than the number requested or zero */ always_inline u16 -vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers, - u16 n_buffers, u16 head_end_offset) +vlib_buffer_clone_at_offset (vlib_main_t * vm, u32 src_buffer, u32 * buffers, + u16 n_buffers, u16 head_end_offset, i16 offset) { vlib_buffer_t *s = vlib_get_buffer (vm, src_buffer); u16 n_cloned = 0; @@ -1103,16 +1246,36 @@ vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers, n_cloned += vlib_buffer_clone_256 (vm, vlib_get_buffer_index (vm, copy), (buffers + n_cloned), - 256, head_end_offset); + 256, head_end_offset, offset); n_buffers -= 256; } n_cloned += vlib_buffer_clone_256 (vm, src_buffer, buffers + n_cloned, - n_buffers, head_end_offset); + n_buffers, head_end_offset, offset); return n_cloned; } +/** \brief Create multiple clones of buffer and store them + in the supplied array + + @param vm - (vlib_main_t *) vlib main data structure pointer + @param src_buffer - (u32) source buffer index + @param buffers - (u32 * ) buffer index array + @param n_buffers - (u16) number of buffer clones requested (<=256) + @param head_end_offset - (u16) offset relative to current position + where packet head ends + @return - (u16) number of buffers actually cloned, may be + less than the number requested or zero +*/ +always_inline u16 +vlib_buffer_clone (vlib_main_t * vm, u32 src_buffer, u32 * buffers, + u16 n_buffers, u16 head_end_offset) +{ + return vlib_buffer_clone_at_offset (vm, src_buffer, buffers, n_buffers, + head_end_offset, 0); +} + /** \brief Attach cloned tail to the buffer @param vm - (vlib_main_t *) vlib main data structure pointer @@ -1212,7 +1375,7 @@ vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm, void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * first); format_function_t format_vlib_buffer, format_vlib_buffer_and_data, - format_vlib_buffer_contents; + format_vlib_buffer_contents, format_vlib_buffer_no_chain; typedef struct { @@ -1321,13 +1484,19 @@ vlib_buffer_chain_linearize (vlib_main_t * vm, vlib_buffer_t * b) if (dst_left == 0) { - if (db != first) - db->current_data = 0; db->current_length = dp - (u8 *) vlib_buffer_get_current (db); ASSERT (db->flags & VLIB_BUFFER_NEXT_PRESENT); db = vlib_get_buffer (vm, db->next_buffer); dst_left = data_size; - dp = db->data; + if (db->current_data > 0) + { + db->current_data = 0; + } + else + { + dst_left += -db->current_data; + } + dp = vlib_buffer_get_current (db); } while (src_left == 0)