* Allocate/free network buffers.
*/
+#include <vppinfra/linux/sysfs.h>
#include <vlib/vlib.h>
#include <vlib/unix/unix.h>
+#include <vpp/stats/stat_segment.h>
-vlib_buffer_callbacks_t *vlib_buffer_callbacks = 0;
+#define VLIB_BUFFER_DEFAULT_BUFFERS_PER_NUMA 16384
+#define VLIB_BUFFER_DEFAULT_BUFFERS_PER_NUMA_UNPRIV 8192
-/* when running unpriviledged we are limited by RLIMIT_MEMLOCK which is
- typically set to 16MB so setting default size for buffer memory to 14MB
- */
-static u32 vlib_buffer_physmem_sz = 14 << 20;
+#ifdef CLIB_HAVE_VEC128
+/* Assumptions by vlib_buffer_free_inline: */
+STATIC_ASSERT_FITS_IN (vlib_buffer_t, flags, 16);
+STATIC_ASSERT_FITS_IN (vlib_buffer_t, ref_count, 16);
+STATIC_ASSERT_FITS_IN (vlib_buffer_t, buffer_pool_index, 16);
+#endif
-vlib_buffer_main_t buffer_main;
+/* Make sure that buffer template size is not accidentally changed */
+STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, template_end, 64);
-/* logging */
-static vlib_log_class_t buffer_log_default;
+u16 __vlib_buffer_external_hdr_size = 0;
+
+static void
+buffer_gauges_update_cached_fn (stat_segment_directory_entry_t * e,
+ u32 index);
+
+static void
+buffer_gauges_update_available_fn (stat_segment_directory_entry_t * e,
+ u32 index);
+
+static void
+buffer_gauges_update_used_fn (stat_segment_directory_entry_t * e, u32 index);
uword
vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
}
u8 *
-format_vlib_buffer (u8 * s, va_list * args)
+format_vlib_buffer_no_chain (u8 * s, va_list * args)
{
vlib_buffer_t *b = va_arg (*args, vlib_buffer_t *);
u32 indent = format_get_indent (s);
a = format (a, "%s ", v);
foreach_vlib_buffer_flag
#undef _
- s = format (s, "current data %d, length %d, free-list %d, clone-count %u",
- b->current_data, b->current_length,
- vlib_buffer_get_free_list_index (b), b->n_add_refs);
+ s = format (s, "current data %d, length %d, buffer-pool %d, "
+ "ref-count %u", b->current_data, b->current_length,
+ b->buffer_pool_index, b->ref_count);
if (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
s = format (s, ", totlen-nifb %d",
b->total_length_not_including_first_buffer);
if (b->flags & VLIB_BUFFER_IS_TRACED)
- s = format (s, ", trace 0x%x", b->trace_index);
+ s = format (s, ", trace handle 0x%x", b->trace_handle);
if (a)
s = format (s, "\n%U%v", format_white_space, indent, a);
vec_free (a);
+ return s;
+}
+
+u8 *
+format_vlib_buffer (u8 * s, va_list * args)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_buffer_t *b = va_arg (*args, vlib_buffer_t *);
+ u32 indent = format_get_indent (s);
+
+ s = format (s, "%U", format_vlib_buffer_no_chain, b);
+
while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
{
- vlib_main_t *vm = vlib_get_main ();
u32 next_buffer = b->next_buffer;
b = vlib_get_buffer (vm, next_buffer);
s =
- format (s, "\n%Unext-buffer 0x%x, segment length %d, clone-count %u",
+ format (s, "\n%Unext-buffer 0x%x, segment length %d, ref-count %u",
format_white_space, indent, next_buffer, b->current_length,
- b->n_add_refs);
+ b->ref_count);
}
return s;
u32 bi,
uword follow_buffer_next, uword ** unique_hash)
{
+ vlib_buffer_main_t *bm = vm->buffer_main;
vlib_buffer_t *b = vlib_get_buffer (vm, bi);
- vlib_buffer_free_list_t *fl;
-
- if (pool_is_free_index
- (vm->buffer_free_list_pool, vlib_buffer_get_free_list_index (b)))
- return format (0, "unknown free list 0x%x",
- vlib_buffer_get_free_list_index (b));
- fl =
- pool_elt_at_index (vm->buffer_free_list_pool,
- vlib_buffer_get_free_list_index (b));
+ if (vec_len (bm->buffer_pools) <= b->buffer_pool_index)
+ return format (0, "unknown buffer pool 0x%x", b->buffer_pool_index);
if ((signed) b->current_data < (signed) -VLIB_BUFFER_PRE_DATA_SIZE)
return format (0, "current data %d before pre-data", b->current_data);
- if (b->current_data + b->current_length > fl->n_data_bytes)
- return format (0, "%d-%d beyond end of buffer %d",
- b->current_data, b->current_length, fl->n_data_bytes);
+ if (b->current_data + b->current_length >
+ vlib_buffer_get_default_data_size (vm))
+ return format (0, "%d-%d beyond end of buffer %d", b->current_data,
+ b->current_length, vlib_buffer_get_default_data_size (vm));
if (follow_buffer_next && (b->flags & VLIB_BUFFER_NEXT_PRESENT))
{
vlib_buffer_known_state_t k;
u8 *msg, *result;
- k = vlib_buffer_is_known (b->next_buffer);
+ k = vlib_buffer_is_known (vm, b->next_buffer);
if (k != VLIB_BUFFER_KNOWN_ALLOCATED)
return format (0, "next 0x%x: %U",
b->next_buffer, format_vlib_buffer_known_state, k);
goto done;
}
- k = vlib_buffer_is_known (bi);
+ k = vlib_buffer_is_known (vm, bi);
if (k != known_state)
{
msg = format (0, "is %U; expected %U",
return result;
}
-/*
- * Hand-craft a static vector w/ length 1, so vec_len(vlib_mains) =1
- * and vlib_mains[0] = &vlib_global_main from the beginning of time.
- *
- * The only place which should ever expand vlib_mains is start_workers()
- * in threads.c. It knows about the bootstrap vector.
- */
-/* *INDENT-OFF* */
-static struct
-{
- vec_header_t h;
- vlib_main_t *vm;
-} __attribute__ ((packed)) __bootstrap_vlib_main_vector
- __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES))) =
-{
- .h.len = 1,
- .vm = &vlib_global_main,
-};
-/* *INDENT-ON* */
-
-vlib_main_t **vlib_mains = &__bootstrap_vlib_main_vector.vm;
-
-
-/* When dubugging validate that given buffers are either known allocated
+/* When debugging validate that given buffers are either known allocated
or known free. */
void
vlib_buffer_validate_alloc_free (vlib_main_t * vm,
uword n_buffers,
vlib_buffer_known_state_t expected_state)
{
+ vlib_buffer_main_t *bm = vm->buffer_main;
u32 *b;
uword i, bi, is_free;
if (CLIB_DEBUG == 0)
return;
- if (vlib_buffer_callbacks)
- return;
-
is_free = expected_state == VLIB_BUFFER_KNOWN_ALLOCATED;
b = buffers;
for (i = 0; i < n_buffers; i++)
bi = b[0];
b += 1;
- known = vlib_buffer_is_known (bi);
- if (known != expected_state)
- {
- ASSERT (0);
- vlib_panic_with_msg
- (vm, "%s %U buffer 0x%x",
- is_free ? "freeing" : "allocating",
- format_vlib_buffer_known_state, known, bi);
- }
-
- vlib_buffer_set_known_state
- (bi, is_free ? VLIB_BUFFER_KNOWN_FREE : VLIB_BUFFER_KNOWN_ALLOCATED);
- }
-}
-
-/* Add buffer free list. */
-static vlib_buffer_free_list_index_t
-vlib_buffer_create_free_list_helper (vlib_main_t * vm,
- u32 n_data_bytes,
- u32 is_public, u32 is_default, u8 * name)
-{
- vlib_buffer_main_t *bm = &buffer_main;
- vlib_buffer_free_list_t *f;
- int i;
-
- ASSERT (vlib_get_thread_index () == 0);
-
- if (!is_default && pool_elts (vm->buffer_free_list_pool) == 0)
- {
- vlib_buffer_free_list_index_t default_free_free_list_index;
-
- /* *INDENT-OFF* */
- default_free_free_list_index =
- vlib_buffer_create_free_list_helper
- (vm,
- /* default buffer size */ VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES,
- /* is_public */ 1,
- /* is_default */ 1,
- (u8 *) "default");
- /* *INDENT-ON* */
- ASSERT (default_free_free_list_index ==
- VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
-
- if (n_data_bytes == VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES && is_public)
- return default_free_free_list_index;
- }
-
- pool_get_aligned (vm->buffer_free_list_pool, f, CLIB_CACHE_LINE_BYTES);
+ known = vlib_buffer_is_known (vm, bi);
- clib_memset (f, 0, sizeof (f[0]));
- f->index = f - vm->buffer_free_list_pool;
- vec_validate (f->buffers, 0);
- vec_reset_length (f->buffers);
- f->n_data_bytes = vlib_buffer_round_size (n_data_bytes);
- f->min_n_buffers_each_alloc = VLIB_FRAME_SIZE;
- f->buffer_pool_index = 0;
- f->name = clib_mem_is_vec (name) ? name : format (0, "%s", name);
-
- /* Setup free buffer template. */
- vlib_buffer_set_free_list_index (&f->buffer_init_template, f->index);
- f->buffer_init_template.n_add_refs = 0;
-
- if (is_public)
- {
- uword *p = hash_get (bm->free_list_by_size, f->n_data_bytes);
- if (!p)
- hash_set (bm->free_list_by_size, f->n_data_bytes, f->index);
- }
-
- for (i = 1; i < vec_len (vlib_mains); i++)
- {
- vlib_main_t *wvm = vlib_mains[i];
- vlib_buffer_free_list_t *wf;
- pool_get_aligned (wvm->buffer_free_list_pool,
- wf, CLIB_CACHE_LINE_BYTES);
- ASSERT (f - vm->buffer_free_list_pool ==
- wf - wvm->buffer_free_list_pool);
- wf[0] = f[0];
- wf->buffers = 0;
- vec_validate (wf->buffers, 0);
- vec_reset_length (wf->buffers);
- wf->n_alloc = 0;
- }
-
- return f->index;
-}
-
-vlib_buffer_free_list_index_t
-vlib_buffer_create_free_list (vlib_main_t * vm, u32 n_data_bytes,
- char *fmt, ...)
-{
- va_list va;
- u8 *name;
-
- va_start (va, fmt);
- name = va_format (0, fmt, &va);
- va_end (va);
-
- return vlib_buffer_create_free_list_helper (vm, n_data_bytes,
- /* is_public */ 0,
- /* is_default */ 0,
- name);
-}
-
-static void
-del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f)
-{
- vlib_buffer_pool_t *bp = vlib_buffer_pool_get (f->buffer_pool_index);
-
- vec_add_aligned (bp->buffers, f->buffers, vec_len (f->buffers),
- CLIB_CACHE_LINE_BYTES);
- vec_free (f->name);
- vec_free (f->buffers);
-
- /* Poison it. */
- clib_memset (f, 0xab, sizeof (f[0]));
-}
-
-/* Add buffer free list. */
-void
-vlib_buffer_delete_free_list_internal (vlib_main_t * vm,
- vlib_buffer_free_list_index_t index)
-{
- vlib_buffer_free_list_t *f;
- int i;
+ if (known == VLIB_BUFFER_UNKNOWN &&
+ expected_state == VLIB_BUFFER_KNOWN_FREE)
+ known = VLIB_BUFFER_KNOWN_FREE;
- ASSERT (vlib_get_thread_index () == 0);
-
- f = vlib_buffer_get_free_list (vm, index);
-
- ASSERT (vec_len (f->buffers) == f->n_alloc);
-
- del_free_list (vm, f);
-
- pool_put (vm->buffer_free_list_pool, f);
-
- for (i = 1; i < vec_len (vlib_mains); i++)
- {
- vlib_main_t *wvm = vlib_mains[i];
- f = vlib_buffer_get_free_list (vlib_mains[i], index);
- del_free_list (wvm, f);
- pool_put (wvm->buffer_free_list_pool, f);
- }
-}
-
-static_always_inline void *
-vlib_buffer_pool_get_buffer (vlib_main_t * vm, vlib_buffer_pool_t * bp)
-{
- return vlib_physmem_alloc_from_map (vm, bp->physmem_map_index,
- bp->buffer_size, CLIB_CACHE_LINE_BYTES);
-}
-
-/* Make sure free list has at least given number of free buffers. */
-static uword
-vlib_buffer_fill_free_list_internal (vlib_main_t * vm,
- vlib_buffer_free_list_t * fl,
- uword min_free_buffers)
-{
- vlib_buffer_t *b;
- vlib_buffer_pool_t *bp = vlib_buffer_pool_get (fl->buffer_pool_index);
- int n;
- u32 *bi;
- u32 n_alloc = 0;
-
- /* Already have enough free buffers on free list? */
- n = min_free_buffers - vec_len (fl->buffers);
- if (n <= 0)
- return min_free_buffers;
-
- if (vec_len (bp->buffers) > 0)
- {
- int n_copy, n_left;
- clib_spinlock_lock (&bp->lock);
- n_copy = clib_min (vec_len (bp->buffers), n);
- n_left = vec_len (bp->buffers) - n_copy;
- vec_add_aligned (fl->buffers, bp->buffers + n_left, n_copy,
- CLIB_CACHE_LINE_BYTES);
- _vec_len (bp->buffers) = n_left;
- clib_spinlock_unlock (&bp->lock);
- n = min_free_buffers - vec_len (fl->buffers);
- if (n <= 0)
- return min_free_buffers;
- }
-
- /* Always allocate round number of buffers. */
- n = round_pow2 (n, CLIB_CACHE_LINE_BYTES / sizeof (u32));
-
- /* Always allocate new buffers in reasonably large sized chunks. */
- n = clib_max (n, fl->min_n_buffers_each_alloc);
-
- clib_spinlock_lock (&bp->lock);
- while (n_alloc < n)
- {
- if ((b = vlib_buffer_pool_get_buffer (vm, bp)) == 0)
- goto done;
-
- n_alloc += 1;
-
- vec_add2_aligned (fl->buffers, bi, 1, CLIB_CACHE_LINE_BYTES);
- bi[0] = vlib_get_buffer_index (vm, b);
-
- if (CLIB_DEBUG > 0)
- vlib_buffer_set_known_state (bi[0], VLIB_BUFFER_KNOWN_FREE);
-
- clib_memset (b, 0, sizeof (vlib_buffer_t));
- vlib_buffer_init_for_free_list (b, fl);
-
- if (fl->buffer_init_function)
- fl->buffer_init_function (vm, fl, bi, 1);
- }
-
-done:
- clib_spinlock_unlock (&bp->lock);
- fl->n_alloc += n_alloc;
- return n_alloc;
-}
-
-void *
-vlib_set_buffer_free_callback (vlib_main_t * vm, void *fp)
-{
- vlib_buffer_main_t *bm = &buffer_main;
- void *rv = bm->buffer_free_callback;
-
- bm->buffer_free_callback = fp;
- return rv;
-}
-
-static_always_inline void
-recycle_or_free (vlib_main_t * vm, vlib_buffer_main_t * bm, u32 bi,
- vlib_buffer_t * b, u32 follow_buffer_next)
-{
- vlib_buffer_free_list_t *fl;
- vlib_buffer_free_list_index_t fi;
- u32 flags, next;
-
- fl = vlib_buffer_get_buffer_free_list (vm, b, &fi);
-
- do
- {
- vlib_buffer_t *nb = vlib_get_buffer (vm, bi);
- flags = nb->flags;
- next = nb->next_buffer;
- if (nb->n_add_refs)
- nb->n_add_refs--;
- else
+ if (known != expected_state)
{
- vlib_buffer_validate_alloc_free (vm, &bi, 1,
- VLIB_BUFFER_KNOWN_ALLOCATED);
- vlib_buffer_add_to_free_list (vm, fl, bi, 1);
+ clib_panic ("%s %U buffer 0x%x", is_free ? "freeing" : "allocating",
+ format_vlib_buffer_known_state, known, bi);
}
- bi = next;
- }
- while (follow_buffer_next && (flags & VLIB_BUFFER_NEXT_PRESENT));
-}
-
-static_always_inline void
-vlib_buffer_free_inline (vlib_main_t * vm,
- u32 * buffers, u32 n_buffers, u32 follow_buffer_next)
-{
- vlib_buffer_main_t *bm = &buffer_main;
- vlib_buffer_t *p, *b0, *b1, *b2, *b3;
- int i = 0;
- u32 (*cb) (vlib_main_t * vm, u32 * buffers, u32 n_buffers,
- u32 follow_buffer_next);
-
- cb = bm->buffer_free_callback;
-
- if (PREDICT_FALSE (cb != 0))
- n_buffers = (*cb) (vm, buffers, n_buffers, follow_buffer_next);
-
- if (!n_buffers)
- return;
- while (i + 11 < n_buffers)
- {
- p = vlib_get_buffer (vm, buffers[i + 8]);
- vlib_prefetch_buffer_header (p, LOAD);
- p = vlib_get_buffer (vm, buffers[i + 9]);
- vlib_prefetch_buffer_header (p, LOAD);
- p = vlib_get_buffer (vm, buffers[i + 10]);
- vlib_prefetch_buffer_header (p, LOAD);
- p = vlib_get_buffer (vm, buffers[i + 11]);
- vlib_prefetch_buffer_header (p, LOAD);
-
- b0 = vlib_get_buffer (vm, buffers[i]);
- b1 = vlib_get_buffer (vm, buffers[i + 1]);
- b2 = vlib_get_buffer (vm, buffers[i + 2]);
- b3 = vlib_get_buffer (vm, buffers[i + 3]);
-
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
-
- recycle_or_free (vm, bm, buffers[i], b0, follow_buffer_next);
- recycle_or_free (vm, bm, buffers[i + 1], b1, follow_buffer_next);
- recycle_or_free (vm, bm, buffers[i + 2], b2, follow_buffer_next);
- recycle_or_free (vm, bm, buffers[i + 3], b3, follow_buffer_next);
-
- i += 4;
- }
-
- while (i < n_buffers)
- {
- b0 = vlib_get_buffer (vm, buffers[i]);
- VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
- recycle_or_free (vm, bm, buffers[i], b0, follow_buffer_next);
- i++;
+ clib_spinlock_lock (&bm->buffer_known_hash_lockp);
+ hash_set (bm->buffer_known_hash, bi, is_free ? VLIB_BUFFER_KNOWN_FREE :
+ VLIB_BUFFER_KNOWN_ALLOCATED);
+ clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
}
}
-static void
-vlib_buffer_free_internal (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
-{
- vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
- 1);
-}
-
-static void
-vlib_buffer_free_no_next_internal (vlib_main_t * vm, u32 * buffers,
- u32 n_buffers)
-{
- vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */
- 0);
-}
-
void
vlib_packet_template_init (vlib_main_t * vm,
vlib_packet_template_t * t,
vec_add (t->packet_data, packet_data, n_packet_data_bytes);
t->min_n_buffers_each_alloc = min_n_buffers_each_alloc;
-
vlib_worker_thread_barrier_release (vm);
}
/* Append given data to end of buffer, possibly allocating new buffers. */
int
-vlib_buffer_add_data (vlib_main_t * vm,
- vlib_buffer_free_list_index_t free_list_index,
- u32 * buffer_index, void *data, u32 n_data_bytes)
+vlib_buffer_add_data (vlib_main_t * vm, u32 * buffer_index, void *data,
+ u32 n_data_bytes)
{
u32 n_buffer_bytes, n_left, n_left_this_buffer, bi;
vlib_buffer_t *b;
void *d;
bi = *buffer_index;
- if (bi == ~0
- && 1 != vlib_buffer_alloc_from_free_list (vm, &bi, 1, free_list_index))
+ if (bi == ~0 && 1 != vlib_buffer_alloc (vm, &bi, 1))
goto out_of_buffers;
d = data;
n_left = n_data_bytes;
- n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm, free_list_index);
+ n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
b = vlib_get_buffer (vm, bi);
b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
break;
d += n;
- if (1 !=
- vlib_buffer_alloc_from_free_list (vm, &b->next_buffer, 1,
- free_list_index))
+ if (1 != vlib_buffer_alloc (vm, &b->next_buffer, 1))
goto out_of_buffers;
b->flags |= VLIB_BUFFER_NEXT_PRESENT;
u16
vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
- vlib_buffer_free_list_index_t
- free_list_index,
vlib_buffer_t * first,
vlib_buffer_t ** last, void *data,
u16 data_len)
{
vlib_buffer_t *l = *last;
- u32 n_buffer_bytes =
- vlib_buffer_free_list_buffer_size (vm, free_list_index);
+ u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
u16 copied = 0;
ASSERT (n_buffer_bytes >= l->current_length + l->current_data);
while (data_len)
u16 max = n_buffer_bytes - l->current_length - l->current_data;
if (max == 0)
{
- if (1 !=
- vlib_buffer_alloc_from_free_list (vm, &l->next_buffer, 1,
- free_list_index))
+ if (1 != vlib_buffer_alloc_from_pool (vm, &l->next_buffer, 1,
+ first->buffer_pool_index))
return copied;
*last = l = vlib_buffer_chain_buffer (vm, l, l->next_buffer);
max = n_buffer_bytes - l->current_length - l->current_data;
return copied;
}
+static uword
+vlib_buffer_alloc_size (uword ext_hdr_size, uword data_size)
+{
+ uword alloc_size = ext_hdr_size + sizeof (vlib_buffer_t) + data_size;
+ alloc_size = CLIB_CACHE_LINE_ROUND (alloc_size);
+
+ /* in case when we have even number of cachelines, we add one more for
+ * better cache occupancy */
+ alloc_size |= CLIB_CACHE_LINE_BYTES;
+
+ return alloc_size;
+}
+
u8
-vlib_buffer_register_physmem_map (vlib_main_t * vm, u32 physmem_map_index)
+vlib_buffer_pool_create (vlib_main_t * vm, char *name, u32 data_size,
+ u32 physmem_map_index)
{
- vlib_buffer_main_t *bm = &buffer_main;
- vlib_buffer_pool_t *p;
+ vlib_buffer_main_t *bm = vm->buffer_main;
+ vlib_buffer_pool_t *bp;
vlib_physmem_map_t *m = vlib_physmem_get_map (vm, physmem_map_index);
uword start = pointer_to_uword (m->base);
uword size = (uword) m->n_pages << m->log2_page_size;
+ uword i, j;
+ u32 alloc_size, n_alloc_per_page;
+
+ if (vec_len (bm->buffer_pools) >= 255)
+ return ~0;
+
+ vec_add2_aligned (bm->buffer_pools, bp, 1, CLIB_CACHE_LINE_BYTES);
if (bm->buffer_mem_size == 0)
{
clib_panic ("buffer memory size out of range!");
}
- vec_add2 (bm->buffer_pools, p, 1);
- p->start = start;
- p->size = size;
- p->physmem_map_index = physmem_map_index;
+ bp->start = start;
+ bp->size = size;
+ bp->index = bp - bm->buffer_pools;
+ bp->buffer_template.buffer_pool_index = bp->index;
+ bp->buffer_template.ref_count = 1;
+ bp->physmem_map_index = physmem_map_index;
+ bp->name = format (0, "%s%c", name, 0);
+ bp->data_size = data_size;
+ bp->numa_node = m->numa_node;
- ASSERT (p - bm->buffer_pools < 256);
- return p - bm->buffer_pools;
-}
+ vec_validate_aligned (bp->threads, vlib_get_n_threads () - 1,
+ CLIB_CACHE_LINE_BYTES);
-static u8 *
-format_vlib_buffer_free_list (u8 * s, va_list * va)
-{
- vlib_buffer_free_list_t *f = va_arg (*va, vlib_buffer_free_list_t *);
- u32 threadnum = va_arg (*va, u32);
- uword bytes_alloc, bytes_free, n_free, size;
+ alloc_size = vlib_buffer_alloc_size (bm->ext_hdr_size, data_size);
+ n_alloc_per_page = (1ULL << m->log2_page_size) / alloc_size;
- if (!f)
- return format (s, "%=7s%=30s%=12s%=12s%=12s%=12s%=12s%=12s",
- "Thread", "Name", "Index", "Size", "Alloc", "Free",
- "#Alloc", "#Free");
+ /* preallocate buffer indices memory */
+ bp->n_buffers = m->n_pages * n_alloc_per_page;
+ bp->buffers = clib_mem_alloc_aligned (bp->n_buffers * sizeof (u32),
+ CLIB_CACHE_LINE_BYTES);
- size = sizeof (vlib_buffer_t) + f->n_data_bytes;
- n_free = vec_len (f->buffers);
- bytes_alloc = size * f->n_alloc;
- bytes_free = size * n_free;
+ clib_spinlock_init (&bp->lock);
- s = format (s, "%7d%30v%12d%12d%=12U%=12U%=12d%=12d", threadnum,
- f->name, f->index, f->n_data_bytes,
- format_memory_size, bytes_alloc,
- format_memory_size, bytes_free, f->n_alloc, n_free);
+ for (j = 0; j < m->n_pages; j++)
+ for (i = 0; i < n_alloc_per_page; i++)
+ {
+ u8 *p;
+ u32 bi;
- return s;
+ p = m->base + (j << m->log2_page_size) + i * alloc_size;
+ p += bm->ext_hdr_size;
+
+ /*
+ * Waste 1 buffer (maximum) so that 0 is never a valid buffer index.
+ * Allows various places to ASSERT (bi != 0). Much easier
+ * than debugging downstream crashes in successor nodes.
+ */
+ if (p == m->base)
+ continue;
+
+ vlib_buffer_copy_template ((vlib_buffer_t *) p, &bp->buffer_template);
+
+ bi = vlib_get_buffer_index (vm, (vlib_buffer_t *) p);
+
+ bp->buffers[bp->n_avail++] = bi;
+
+ vlib_get_buffer (vm, bi);
+ }
+
+ return bp->index;
}
-static clib_error_t *
-show_buffers (vlib_main_t * vm,
- unformat_input_t * input, vlib_cli_command_t * cmd)
+static u8 *
+format_vlib_buffer_pool (u8 * s, va_list * va)
{
- vlib_buffer_free_list_t *f;
- vlib_main_t *curr_vm;
- u32 vm_index = 0;
+ vlib_main_t *vm = va_arg (*va, vlib_main_t *);
+ vlib_buffer_pool_t *bp = va_arg (*va, vlib_buffer_pool_t *);
+ vlib_buffer_pool_thread_t *bpt;
+ u32 cached = 0;
+
+ if (!bp)
+ return format (s, "%-20s%=6s%=6s%=6s%=11s%=6s%=8s%=8s%=8s",
+ "Pool Name", "Index", "NUMA", "Size", "Data Size",
+ "Total", "Avail", "Cached", "Used");
+
+ /* *INDENT-OFF* */
+ vec_foreach (bpt, bp->threads)
+ cached += bpt->n_cached;
+ /* *INDENT-ON* */
+
+ s = format (s, "%-20s%=6d%=6d%=6u%=11u%=6u%=8u%=8u%=8u",
+ bp->name, bp->index, bp->numa_node, bp->data_size +
+ sizeof (vlib_buffer_t) + vm->buffer_main->ext_hdr_size,
+ bp->data_size, bp->n_buffers, bp->n_avail, cached,
+ bp->n_buffers - bp->n_avail - cached);
- vlib_cli_output (vm, "%U", format_vlib_buffer_free_list, 0, 0);
+ return s;
+}
- do
- {
- curr_vm = vlib_mains[vm_index];
+u8 *
+format_vlib_buffer_pool_all (u8 *s, va_list *va)
+{
+ vlib_main_t *vm = va_arg (*va, vlib_main_t *);
+ vlib_buffer_main_t *bm = vm->buffer_main;
+ vlib_buffer_pool_t *bp;
- /* *INDENT-OFF* */
- pool_foreach (f, curr_vm->buffer_free_list_pool, ({
- vlib_cli_output (vm, "%U", format_vlib_buffer_free_list, f, vm_index);
- }));
- /* *INDENT-ON* */
+ s = format (s, "%U", format_vlib_buffer_pool, vm, 0);
- vm_index++;
- }
- while (vm_index < vec_len (vlib_mains));
+ vec_foreach (bp, bm->buffer_pools)
+ s = format (s, "\n%U", format_vlib_buffer_pool, vm, bp);
+
+ return s;
+}
+static clib_error_t *
+show_buffers (vlib_main_t *vm, unformat_input_t *input,
+ vlib_cli_command_t *cmd)
+{
+ vlib_cli_output (vm, "%U", format_vlib_buffer_pool_all, vm);
return 0;
}
/* *INDENT-ON* */
clib_error_t *
-vlib_buffer_main_init (struct vlib_main_t * vm)
+vlib_buffer_worker_init (vlib_main_t * vm)
+{
+ vlib_buffer_main_t *bm = vm->buffer_main;
+ vlib_buffer_pool_t *bp;
+
+ /* *INDENT-OFF* */
+ vec_foreach (bp, bm->buffer_pools)
+ {
+ clib_spinlock_lock (&bp->lock);
+ vec_validate_aligned (bp->threads, vlib_get_n_threads () - 1,
+ CLIB_CACHE_LINE_BYTES);
+ clib_spinlock_unlock (&bp->lock);
+ }
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+VLIB_WORKER_INIT_FUNCTION (vlib_buffer_worker_init);
+
+static clib_error_t *
+vlib_buffer_main_init_numa_alloc (struct vlib_main_t *vm, u32 numa_node,
+ u32 * physmem_map_index,
+ clib_mem_page_sz_t log2_page_size,
+ u8 unpriv)
{
- vlib_buffer_main_t *bm = &buffer_main;
+ vlib_buffer_main_t *bm = vm->buffer_main;
+ u32 buffers_per_numa = bm->buffers_per_numa;
clib_error_t *error;
- u32 physmem_map_index;
- u8 pool_index;
- int log2_page_size = 0;
+ u32 buffer_size;
+ uword n_pages, pagesize;
+ u8 *name = 0;
+
+ ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
+
+ pagesize = clib_mem_page_bytes (log2_page_size);
+ buffer_size = vlib_buffer_alloc_size (bm->ext_hdr_size,
+ vlib_buffer_get_default_data_size
+ (vm));
+ if (buffer_size > pagesize)
+ return clib_error_return (0, "buffer size (%llu) is greater than page "
+ "size (%llu)", buffer_size, pagesize);
+
+ if (buffers_per_numa == 0)
+ buffers_per_numa = unpriv ? VLIB_BUFFER_DEFAULT_BUFFERS_PER_NUMA_UNPRIV :
+ VLIB_BUFFER_DEFAULT_BUFFERS_PER_NUMA;
+
+ name = format (0, "buffers-numa-%d%c", numa_node, 0);
+ n_pages = (buffers_per_numa - 1) / (pagesize / buffer_size) + 1;
+ error = vlib_physmem_shared_map_create (vm, (char *) name,
+ n_pages * pagesize,
+ min_log2 (pagesize), numa_node,
+ physmem_map_index);
+ vec_free (name);
+ return error;
+}
- buffer_log_default = vlib_log_register_class ("buffer", 0);
+static clib_error_t *
+vlib_buffer_main_init_numa_node (struct vlib_main_t *vm, u32 numa_node,
+ u8 * index)
+{
+ vlib_buffer_main_t *bm = vm->buffer_main;
+ u32 physmem_map_index;
+ clib_error_t *error;
+ u8 *name = 0;
- if (vlib_buffer_callbacks)
+ if (bm->log2_page_size == CLIB_MEM_PAGE_SZ_UNKNOWN)
{
- /* external plugin has registered own buffer callbacks
- so we just copy them and quit */
- clib_memcpy_fast (&bm->cb, vlib_buffer_callbacks,
- sizeof (vlib_buffer_callbacks_t));
- bm->callbacks_registered = 1;
- return 0;
+ error = vlib_buffer_main_init_numa_alloc (vm, numa_node,
+ &physmem_map_index,
+ CLIB_MEM_PAGE_SZ_DEFAULT_HUGE,
+ 0 /* unpriv */ );
+ if (!error)
+ goto buffer_pool_create;
+
+ /* If alloc failed, retry without hugepages */
+ vlib_log_warn (bm->log_default,
+ "numa[%u] falling back to non-hugepage backed "
+ "buffer pool (%U)", numa_node, format_clib_error, error);
+ clib_error_free (error);
+
+ error = vlib_buffer_main_init_numa_alloc (vm, numa_node,
+ &physmem_map_index,
+ CLIB_MEM_PAGE_SZ_DEFAULT,
+ 1 /* unpriv */ );
}
+ else
+ error = vlib_buffer_main_init_numa_alloc (vm, numa_node,
+ &physmem_map_index,
+ bm->log2_page_size,
+ 0 /* unpriv */ );
+ if (error)
+ return error;
+
+buffer_pool_create:
+ name = format (name, "default-numa-%d%c", numa_node, 0);
+ *index = vlib_buffer_pool_create (vm, (char *) name,
+ vlib_buffer_get_default_data_size (vm),
+ physmem_map_index);
+
+ if (*index == (u8) ~ 0)
+ error = clib_error_return (0, "maximum number of buffer pools reached");
+ vec_free (name);
+
+
+ return error;
+}
+
+void
+vlib_buffer_main_alloc (vlib_main_t * vm)
+{
+ vlib_buffer_main_t *bm;
+
+ if (vm->buffer_main)
+ return;
+
+ vm->buffer_main = bm = clib_mem_alloc (sizeof (bm[0]));
+ clib_memset (vm->buffer_main, 0, sizeof (bm[0]));
+ bm->default_data_size = VLIB_BUFFER_DEFAULT_DATA_SIZE;
+}
+
+static u32
+buffer_get_cached (vlib_buffer_pool_t * bp)
+{
+ u32 cached = 0;
+ vlib_buffer_pool_thread_t *bpt;
+
+ clib_spinlock_lock (&bp->lock);
+
+ /* *INDENT-OFF* */
+ vec_foreach (bpt, bp->threads)
+ cached += bpt->n_cached;
+ /* *INDENT-ON* */
+
+ clib_spinlock_unlock (&bp->lock);
+
+ return cached;
+}
+
+static vlib_buffer_pool_t *
+buffer_get_by_index (vlib_buffer_main_t * bm, u32 index)
+{
+ vlib_buffer_pool_t *bp;
+ if (!bm->buffer_pools || vec_len (bm->buffer_pools) < index)
+ return 0;
+ bp = vec_elt_at_index (bm->buffer_pools, index);
+
+ return bp;
+}
+
+static void
+buffer_gauges_update_used_fn (stat_segment_directory_entry_t * e, u32 index)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_buffer_pool_t *bp = buffer_get_by_index (vm->buffer_main, index);
+ if (!bp)
+ return;
+
+ e->value = bp->n_buffers - bp->n_avail - buffer_get_cached (bp);
+}
+
+static void
+buffer_gauges_update_available_fn (stat_segment_directory_entry_t * e,
+ u32 index)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_buffer_pool_t *bp = buffer_get_by_index (vm->buffer_main, index);
+ if (!bp)
+ return;
+
+ e->value = bp->n_avail;
+}
+
+static void
+buffer_gauges_update_cached_fn (stat_segment_directory_entry_t * e, u32 index)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ vlib_buffer_pool_t *bp = buffer_get_by_index (vm->buffer_main, index);
+ if (!bp)
+ return;
+
+ e->value = buffer_get_cached (bp);
+}
+
+clib_error_t *
+vlib_buffer_main_init (struct vlib_main_t * vm)
+{
+ vlib_buffer_main_t *bm;
+ clib_error_t *err;
+ clib_bitmap_t *bmp = 0, *bmp_has_memory = 0;
+ u32 numa_node;
+ vlib_buffer_pool_t *bp;
+ u8 *name = 0, first_valid_buffer_pool_index = ~0;
+
+ vlib_buffer_main_alloc (vm);
+
+ bm = vm->buffer_main;
+ bm->log_default = vlib_log_register_class ("buffer", 0);
+ bm->ext_hdr_size = __vlib_buffer_external_hdr_size;
- bm->cb.vlib_buffer_fill_free_list_cb = &vlib_buffer_fill_free_list_internal;
- bm->cb.vlib_buffer_free_cb = &vlib_buffer_free_internal;
- bm->cb.vlib_buffer_free_no_next_cb = &vlib_buffer_free_no_next_internal;
- bm->cb.vlib_buffer_delete_free_list_cb =
- &vlib_buffer_delete_free_list_internal;
clib_spinlock_init (&bm->buffer_known_hash_lockp);
-retry:
- error = vlib_physmem_shared_map_create (vm, "buffers",
- vlib_buffer_physmem_sz,
- log2_page_size,
- CLIB_PMALLOC_NUMA_LOCAL,
- &physmem_map_index);
+ if ((err = clib_sysfs_read ("/sys/devices/system/node/online", "%U",
+ unformat_bitmap_list, &bmp)))
+ clib_error_free (err);
+
+ if ((err = clib_sysfs_read ("/sys/devices/system/node/has_memory", "%U",
+ unformat_bitmap_list, &bmp_has_memory)))
+ clib_error_free (err);
+
+ if (bmp && bmp_has_memory)
+ bmp = clib_bitmap_and (bmp, bmp_has_memory);
- if (error && log2_page_size == 0)
+ /* no info from sysfs, assuming that only numa 0 exists */
+ if (bmp == 0)
+ bmp = clib_bitmap_set (bmp, 0, 1);
+
+ if (clib_bitmap_last_set (bmp) >= VLIB_BUFFER_MAX_NUMA_NODES)
+ clib_panic ("system have more than %u NUMA nodes",
+ VLIB_BUFFER_MAX_NUMA_NODES);
+
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (numa_node, bmp)
{
- vlib_log_warn (buffer_log_default, "%U", format_clib_error, error);
- clib_error_free (error);
- vlib_log_warn (buffer_log_default, "falling back to non-hugepage "
- "backed buffer pool");
- log2_page_size = min_log2 (clib_mem_get_page_size ());
- goto retry;
+ u8 *index = bm->default_buffer_pool_index_for_numa + numa_node;
+ index[0] = ~0;
+ if ((err = vlib_buffer_main_init_numa_node (vm, numa_node, index)))
+ {
+ clib_error_report (err);
+ clib_error_free (err);
+ continue;
+ }
+
+ if (first_valid_buffer_pool_index == 0xff)
+ first_valid_buffer_pool_index = index[0];
}
+ /* *INDENT-ON* */
- if (error)
- return error;
+ if (first_valid_buffer_pool_index == (u8) ~ 0)
+ {
+ err = clib_error_return (0, "failed to allocate buffer pool(s)");
+ goto done;
+ }
- pool_index = vlib_buffer_register_physmem_map (vm, physmem_map_index);
- vlib_buffer_pool_t *bp = vlib_buffer_pool_get (pool_index);
- clib_spinlock_init (&bp->lock);
- bp->buffer_size = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES +
- sizeof (vlib_buffer_t);
+ /* *INDENT-OFF* */
+ clib_bitmap_foreach (numa_node, bmp)
+ {
+ if (bm->default_buffer_pool_index_for_numa[numa_node] == (u8) ~0)
+ bm->default_buffer_pool_index_for_numa[numa_node] =
+ first_valid_buffer_pool_index;
+ }
+ /* *INDENT-ON* */
- return 0;
+ vec_foreach (bp, bm->buffer_pools)
+ {
+ if (bp->n_buffers == 0)
+ continue;
+
+ vec_reset_length (name);
+ name = format (name, "/buffer-pools/%s/cached%c", bp->name, 0);
+ stat_segment_register_gauge (name, buffer_gauges_update_cached_fn,
+ bp - bm->buffer_pools);
+
+ vec_reset_length (name);
+ name = format (name, "/buffer-pools/%s/used%c", bp->name, 0);
+ stat_segment_register_gauge (name, buffer_gauges_update_used_fn,
+ bp - bm->buffer_pools);
+
+ vec_reset_length (name);
+ name = format (name, "/buffer-pools/%s/available%c", bp->name, 0);
+ stat_segment_register_gauge (name, buffer_gauges_update_available_fn,
+ bp - bm->buffer_pools);
+ }
+
+done:
+ vec_free (bmp);
+ vec_free (bmp_has_memory);
+ vec_free (name);
+ return err;
}
static clib_error_t *
vlib_buffers_configure (vlib_main_t * vm, unformat_input_t * input)
{
- u32 size_in_mb;
+ vlib_buffer_main_t *bm;
+
+ vlib_buffer_main_alloc (vm);
+
+ bm = vm->buffer_main;
+ bm->log2_page_size = CLIB_MEM_PAGE_SZ_UNKNOWN;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
- if (unformat (input, "memory-size-in-mb %d", &size_in_mb))
- vlib_buffer_physmem_sz = size_in_mb << 20;
+ if (unformat (input, "buffers-per-numa %u", &bm->buffers_per_numa))
+ ;
+ else if (unformat (input, "page-size %U", unformat_log2_page_size,
+ &bm->log2_page_size))
+ ;
+ else if (unformat (input, "default data-size %u",
+ &bm->default_data_size))
+ ;
else
return unformat_parse_error (input);
}
VLIB_EARLY_CONFIG_FUNCTION (vlib_buffers_configure, "buffers");
+#if VLIB_BUFFER_ALLOC_FAULT_INJECTOR > 0
+u32
+vlib_buffer_alloc_may_fail (vlib_main_t * vm, u32 n_buffers)
+{
+ f64 r;
+
+ r = random_f64 (&vm->buffer_alloc_success_seed);
+
+ /* Fail this request? */
+ if (r > vm->buffer_alloc_success_rate)
+ n_buffers--;
+ /* 5% chance of returning nothing at all */
+ if (r > vm->buffer_alloc_success_rate && r > 0.95)
+ n_buffers = 0;
+
+ return n_buffers;
+}
+#endif
+
+__clib_export int
+vlib_buffer_set_alloc_free_callback (
+ vlib_main_t *vm, vlib_buffer_alloc_free_callback_t *alloc_callback_fn,
+ vlib_buffer_alloc_free_callback_t *free_callback_fn)
+{
+ vlib_buffer_main_t *bm = vm->buffer_main;
+ if ((alloc_callback_fn && bm->alloc_callback_fn) ||
+ (free_callback_fn && bm->free_callback_fn))
+ return 1;
+ bm->alloc_callback_fn = alloc_callback_fn;
+ bm->free_callback_fn = free_callback_fn;
+ return 0;
+}
/** @endcond */
/*