2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * buffer.c: allocate/free network buffers.
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
43 * Allocate/free network buffers.
46 #include <vppinfra/linux/sysfs.h>
47 #include <vlib/vlib.h>
48 #include <vlib/unix/unix.h>
49 #include <vpp/stats/stat_segment.h>
51 #define VLIB_BUFFER_DEFAULT_BUFFERS_PER_NUMA 16384
52 #define VLIB_BUFFER_DEFAULT_BUFFERS_PER_NUMA_UNPRIV 8192
54 #ifdef CLIB_HAVE_VEC128
55 /* Assumptions by vlib_buffer_free_inline: */
56 STATIC_ASSERT_FITS_IN (vlib_buffer_t, flags, 16);
57 STATIC_ASSERT_FITS_IN (vlib_buffer_t, ref_count, 16);
58 STATIC_ASSERT_FITS_IN (vlib_buffer_t, buffer_pool_index, 16);
61 /* Make sure that buffer template size is not accidentally changed */
62 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, template_end, 64);
64 u16 __vlib_buffer_external_hdr_size = 0;
67 buffer_gauges_update_cached_fn (stat_segment_directory_entry_t * e,
71 buffer_gauges_update_available_fn (stat_segment_directory_entry_t * e,
75 buffer_gauges_update_used_fn (stat_segment_directory_entry_t * e, u32 index);
78 vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm,
79 vlib_buffer_t * b_first)
81 vlib_buffer_t *b = b_first;
82 uword l_first = b_first->current_length;
84 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
86 b = vlib_get_buffer (vm, b->next_buffer);
87 l += b->current_length;
89 b_first->total_length_not_including_first_buffer = l;
90 b_first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
95 format_vlib_buffer_no_chain (u8 * s, va_list * args)
97 vlib_buffer_t *b = va_arg (*args, vlib_buffer_t *);
98 u32 indent = format_get_indent (s);
101 #define _(bit, name, v) \
102 if (v && (b->flags & VLIB_BUFFER_##name)) \
103 a = format (a, "%s ", v);
104 foreach_vlib_buffer_flag
106 s = format (s, "current data %d, length %d, buffer-pool %d, "
107 "ref-count %u", b->current_data, b->current_length,
108 b->buffer_pool_index, b->ref_count);
110 if (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
111 s = format (s, ", totlen-nifb %d",
112 b->total_length_not_including_first_buffer);
114 if (b->flags & VLIB_BUFFER_IS_TRACED)
115 s = format (s, ", trace handle 0x%x", b->trace_handle);
118 s = format (s, "\n%U%v", format_white_space, indent, a);
125 format_vlib_buffer (u8 * s, va_list * args)
127 vlib_main_t *vm = vlib_get_main ();
128 vlib_buffer_t *b = va_arg (*args, vlib_buffer_t *);
129 u32 indent = format_get_indent (s);
131 s = format (s, "%U", format_vlib_buffer_no_chain, b);
133 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
135 u32 next_buffer = b->next_buffer;
136 b = vlib_get_buffer (vm, next_buffer);
139 format (s, "\n%Unext-buffer 0x%x, segment length %d, ref-count %u",
140 format_white_space, indent, next_buffer, b->current_length,
148 format_vlib_buffer_and_data (u8 * s, va_list * args)
150 vlib_buffer_t *b = va_arg (*args, vlib_buffer_t *);
152 s = format (s, "%U, %U",
153 format_vlib_buffer, b,
154 format_hex_bytes, vlib_buffer_get_current (b), 64);
160 format_vlib_buffer_known_state (u8 * s, va_list * args)
162 vlib_buffer_known_state_t state = va_arg (*args, vlib_buffer_known_state_t);
167 case VLIB_BUFFER_UNKNOWN:
171 case VLIB_BUFFER_KNOWN_ALLOCATED:
172 t = "known-allocated";
175 case VLIB_BUFFER_KNOWN_FREE:
184 return format (s, "%s", t);
188 format_vlib_buffer_contents (u8 * s, va_list * va)
190 vlib_main_t *vm = va_arg (*va, vlib_main_t *);
191 vlib_buffer_t *b = va_arg (*va, vlib_buffer_t *);
195 vec_add (s, vlib_buffer_get_current (b), b->current_length);
196 if (!(b->flags & VLIB_BUFFER_NEXT_PRESENT))
198 b = vlib_get_buffer (vm, b->next_buffer);
205 vlib_validate_buffer_helper (vlib_main_t * vm,
207 uword follow_buffer_next, uword ** unique_hash)
209 vlib_buffer_main_t *bm = vm->buffer_main;
210 vlib_buffer_t *b = vlib_get_buffer (vm, bi);
212 if (vec_len (bm->buffer_pools) <= b->buffer_pool_index)
213 return format (0, "unknown buffer pool 0x%x", b->buffer_pool_index);
215 if ((signed) b->current_data < (signed) -VLIB_BUFFER_PRE_DATA_SIZE)
216 return format (0, "current data %d before pre-data", b->current_data);
218 if (b->current_data + b->current_length >
219 vlib_buffer_get_default_data_size (vm))
220 return format (0, "%d-%d beyond end of buffer %d", b->current_data,
221 b->current_length, vlib_buffer_get_default_data_size (vm));
223 if (follow_buffer_next && (b->flags & VLIB_BUFFER_NEXT_PRESENT))
225 vlib_buffer_known_state_t k;
228 k = vlib_buffer_is_known (vm, b->next_buffer);
229 if (k != VLIB_BUFFER_KNOWN_ALLOCATED)
230 return format (0, "next 0x%x: %U",
231 b->next_buffer, format_vlib_buffer_known_state, k);
235 if (hash_get (*unique_hash, b->next_buffer))
236 return format (0, "duplicate buffer 0x%x", b->next_buffer);
238 hash_set1 (*unique_hash, b->next_buffer);
241 msg = vlib_validate_buffer (vm, b->next_buffer, follow_buffer_next);
244 result = format (0, "next 0x%x: %v", b->next_buffer, msg);
254 vlib_validate_buffer (vlib_main_t * vm, u32 bi, uword follow_buffer_next)
256 return vlib_validate_buffer_helper (vm, bi, follow_buffer_next,
257 /* unique_hash */ 0);
261 vlib_validate_buffers (vlib_main_t * vm,
263 uword next_buffer_stride,
265 vlib_buffer_known_state_t known_state,
266 uword follow_buffer_next)
269 u32 bi, *b = buffers;
270 vlib_buffer_known_state_t k;
271 u8 *msg = 0, *result = 0;
273 hash = hash_create (0, 0);
274 for (i = 0; i < n_buffers; i++)
277 b += next_buffer_stride;
279 /* Buffer is not unique. */
280 if (hash_get (hash, bi))
282 msg = format (0, "not unique");
286 k = vlib_buffer_is_known (vm, bi);
287 if (k != known_state)
289 msg = format (0, "is %U; expected %U",
290 format_vlib_buffer_known_state, k,
291 format_vlib_buffer_known_state, known_state);
295 msg = vlib_validate_buffer_helper (vm, bi, follow_buffer_next, &hash);
299 hash_set1 (hash, bi);
305 result = format (0, "0x%x: %v", bi, msg);
312 /* When debugging validate that given buffers are either known allocated
315 vlib_buffer_validate_alloc_free (vlib_main_t * vm,
318 vlib_buffer_known_state_t expected_state)
320 vlib_buffer_main_t *bm = vm->buffer_main;
322 uword i, bi, is_free;
327 is_free = expected_state == VLIB_BUFFER_KNOWN_ALLOCATED;
329 for (i = 0; i < n_buffers; i++)
331 vlib_buffer_known_state_t known;
335 known = vlib_buffer_is_known (vm, bi);
337 if (known == VLIB_BUFFER_UNKNOWN &&
338 expected_state == VLIB_BUFFER_KNOWN_FREE)
339 known = VLIB_BUFFER_KNOWN_FREE;
341 if (known != expected_state)
343 clib_panic ("%s %U buffer 0x%x", is_free ? "freeing" : "allocating",
344 format_vlib_buffer_known_state, known, bi);
347 clib_spinlock_lock (&bm->buffer_known_hash_lockp);
348 hash_set (bm->buffer_known_hash, bi, is_free ? VLIB_BUFFER_KNOWN_FREE :
349 VLIB_BUFFER_KNOWN_ALLOCATED);
350 clib_spinlock_unlock (&bm->buffer_known_hash_lockp);
355 vlib_packet_template_init (vlib_main_t * vm,
356 vlib_packet_template_t * t,
358 uword n_packet_data_bytes,
359 uword min_n_buffers_each_alloc, char *fmt, ...)
364 t->name = va_format (0, fmt, &va);
367 vlib_worker_thread_barrier_sync (vm);
369 clib_memset (t, 0, sizeof (t[0]));
371 vec_add (t->packet_data, packet_data, n_packet_data_bytes);
372 t->min_n_buffers_each_alloc = min_n_buffers_each_alloc;
373 vlib_worker_thread_barrier_release (vm);
377 vlib_packet_template_get_packet (vlib_main_t * vm,
378 vlib_packet_template_t * t, u32 * bi_result)
383 if (vlib_buffer_alloc (vm, &bi, 1) != 1)
388 b = vlib_get_buffer (vm, bi);
389 clib_memcpy_fast (vlib_buffer_get_current (b),
390 t->packet_data, vec_len (t->packet_data));
391 b->current_length = vec_len (t->packet_data);
396 /* Append given data to end of buffer, possibly allocating new buffers. */
398 vlib_buffer_add_data (vlib_main_t * vm, u32 * buffer_index, void *data,
401 u32 n_buffer_bytes, n_left, n_left_this_buffer, bi;
406 if (bi == ~0 && 1 != vlib_buffer_alloc (vm, &bi, 1))
410 n_left = n_data_bytes;
411 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
413 b = vlib_get_buffer (vm, bi);
414 b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
416 /* Get to the end of the chain before we try to append data... */
417 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
418 b = vlib_get_buffer (vm, b->next_buffer);
424 ASSERT (n_buffer_bytes >= b->current_length);
426 n_buffer_bytes - (b->current_data + b->current_length);
427 n = clib_min (n_left_this_buffer, n_left);
428 clib_memcpy_fast (vlib_buffer_get_current (b) + b->current_length, d,
430 b->current_length += n;
436 if (1 != vlib_buffer_alloc (vm, &b->next_buffer, 1))
439 b->flags |= VLIB_BUFFER_NEXT_PRESENT;
441 b = vlib_get_buffer (vm, b->next_buffer);
448 clib_warning ("out of buffers");
453 vlib_buffer_chain_append_data_with_alloc (vlib_main_t * vm,
454 vlib_buffer_t * first,
455 vlib_buffer_t ** last, void *data,
458 vlib_buffer_t *l = *last;
459 u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
461 ASSERT (n_buffer_bytes >= l->current_length + l->current_data);
464 u16 max = n_buffer_bytes - l->current_length - l->current_data;
467 if (1 != vlib_buffer_alloc_from_pool (vm, &l->next_buffer, 1,
468 first->buffer_pool_index))
470 *last = l = vlib_buffer_chain_buffer (vm, l, l->next_buffer);
471 max = n_buffer_bytes - l->current_length - l->current_data;
474 u16 len = (data_len > max) ? max : data_len;
475 clib_memcpy_fast (vlib_buffer_get_current (l) + l->current_length,
477 vlib_buffer_chain_increase_length (first, l, len);
485 vlib_buffer_alloc_size (uword ext_hdr_size, uword data_size)
487 uword alloc_size = ext_hdr_size + sizeof (vlib_buffer_t) + data_size;
488 alloc_size = CLIB_CACHE_LINE_ROUND (alloc_size);
490 /* in case when we have even number of cachelines, we add one more for
491 * better cache occupancy */
492 alloc_size |= CLIB_CACHE_LINE_BYTES;
498 vlib_buffer_pool_create (vlib_main_t * vm, char *name, u32 data_size,
499 u32 physmem_map_index)
501 vlib_buffer_main_t *bm = vm->buffer_main;
502 vlib_buffer_pool_t *bp;
503 vlib_physmem_map_t *m = vlib_physmem_get_map (vm, physmem_map_index);
504 uword start = pointer_to_uword (m->base);
505 uword size = (uword) m->n_pages << m->log2_page_size;
507 u32 alloc_size, n_alloc_per_page;
509 if (vec_len (bm->buffer_pools) >= 255)
512 vec_add2_aligned (bm->buffer_pools, bp, 1, CLIB_LOG2_CACHE_LINE_BYTES);
514 if (bm->buffer_mem_size == 0)
516 bm->buffer_mem_start = start;
517 bm->buffer_mem_size = size;
519 else if (start < bm->buffer_mem_start)
521 bm->buffer_mem_size += bm->buffer_mem_start - start;
522 bm->buffer_mem_start = start;
523 if (size > bm->buffer_mem_size)
524 bm->buffer_mem_size = size;
526 else if (start > bm->buffer_mem_start)
528 uword new_size = start - bm->buffer_mem_start + size;
529 if (new_size > bm->buffer_mem_size)
530 bm->buffer_mem_size = new_size;
533 if ((u64) bm->buffer_mem_size >
534 ((u64) 1 << (32 + CLIB_LOG2_CACHE_LINE_BYTES)))
536 clib_panic ("buffer memory size out of range!");
541 bp->index = bp - bm->buffer_pools;
542 bp->buffer_template.buffer_pool_index = bp->index;
543 bp->buffer_template.ref_count = 1;
544 bp->physmem_map_index = physmem_map_index;
545 bp->name = format (0, "%s%c", name, 0);
546 bp->data_size = data_size;
547 bp->numa_node = m->numa_node;
549 vec_validate_aligned (bp->threads, vlib_get_n_threads () - 1,
550 CLIB_CACHE_LINE_BYTES);
552 alloc_size = vlib_buffer_alloc_size (bm->ext_hdr_size, data_size);
553 n_alloc_per_page = (1ULL << m->log2_page_size) / alloc_size;
555 /* preallocate buffer indices memory */
556 bp->n_buffers = m->n_pages * n_alloc_per_page;
557 bp->buffers = clib_mem_alloc_aligned (bp->n_buffers * sizeof (u32),
558 CLIB_CACHE_LINE_BYTES);
560 clib_spinlock_init (&bp->lock);
562 for (j = 0; j < m->n_pages; j++)
563 for (i = 0; i < n_alloc_per_page; i++)
568 p = m->base + (j << m->log2_page_size) + i * alloc_size;
569 p += bm->ext_hdr_size;
572 * Waste 1 buffer (maximum) so that 0 is never a valid buffer index.
573 * Allows various places to ASSERT (bi != 0). Much easier
574 * than debugging downstream crashes in successor nodes.
579 vlib_buffer_copy_template ((vlib_buffer_t *) p, &bp->buffer_template);
581 bi = vlib_get_buffer_index (vm, (vlib_buffer_t *) p);
583 bp->buffers[bp->n_avail++] = bi;
585 vlib_get_buffer (vm, bi);
592 format_vlib_buffer_pool (u8 * s, va_list * va)
594 vlib_main_t *vm = va_arg (*va, vlib_main_t *);
595 vlib_buffer_pool_t *bp = va_arg (*va, vlib_buffer_pool_t *);
596 vlib_buffer_pool_thread_t *bpt;
600 return format (s, "%-20s%=6s%=6s%=6s%=11s%=6s%=8s%=8s%=8s",
601 "Pool Name", "Index", "NUMA", "Size", "Data Size",
602 "Total", "Avail", "Cached", "Used");
605 vec_foreach (bpt, bp->threads)
606 cached += bpt->n_cached;
609 s = format (s, "%-20s%=6d%=6d%=6u%=11u%=6u%=8u%=8u%=8u",
610 bp->name, bp->index, bp->numa_node, bp->data_size +
611 sizeof (vlib_buffer_t) + vm->buffer_main->ext_hdr_size,
612 bp->data_size, bp->n_buffers, bp->n_avail, cached,
613 bp->n_buffers - bp->n_avail - cached);
618 static clib_error_t *
619 show_buffers (vlib_main_t * vm,
620 unformat_input_t * input, vlib_cli_command_t * cmd)
622 vlib_buffer_main_t *bm = vm->buffer_main;
623 vlib_buffer_pool_t *bp;
625 vlib_cli_output (vm, "%U", format_vlib_buffer_pool, vm, 0);
628 vec_foreach (bp, bm->buffer_pools)
629 vlib_cli_output (vm, "%U", format_vlib_buffer_pool, vm, bp);
636 VLIB_CLI_COMMAND (show_buffers_command, static) = {
637 .path = "show buffers",
638 .short_help = "Show packet buffer allocation",
639 .function = show_buffers,
644 vlib_buffer_worker_init (vlib_main_t * vm)
646 vlib_buffer_main_t *bm = vm->buffer_main;
647 vlib_buffer_pool_t *bp;
650 vec_foreach (bp, bm->buffer_pools)
652 clib_spinlock_lock (&bp->lock);
653 vec_validate_aligned (bp->threads, vlib_get_n_threads () - 1,
654 CLIB_CACHE_LINE_BYTES);
655 clib_spinlock_unlock (&bp->lock);
662 VLIB_WORKER_INIT_FUNCTION (vlib_buffer_worker_init);
664 static clib_error_t *
665 vlib_buffer_main_init_numa_alloc (struct vlib_main_t *vm, u32 numa_node,
666 u32 * physmem_map_index,
667 clib_mem_page_sz_t log2_page_size,
670 vlib_buffer_main_t *bm = vm->buffer_main;
671 u32 buffers_per_numa = bm->buffers_per_numa;
674 uword n_pages, pagesize;
677 ASSERT (log2_page_size != CLIB_MEM_PAGE_SZ_UNKNOWN);
679 pagesize = clib_mem_page_bytes (log2_page_size);
680 buffer_size = vlib_buffer_alloc_size (bm->ext_hdr_size,
681 vlib_buffer_get_default_data_size
683 if (buffer_size > pagesize)
684 return clib_error_return (0, "buffer size (%llu) is greater than page "
685 "size (%llu)", buffer_size, pagesize);
687 if (buffers_per_numa == 0)
688 buffers_per_numa = unpriv ? VLIB_BUFFER_DEFAULT_BUFFERS_PER_NUMA_UNPRIV :
689 VLIB_BUFFER_DEFAULT_BUFFERS_PER_NUMA;
691 name = format (0, "buffers-numa-%d%c", numa_node, 0);
692 n_pages = (buffers_per_numa - 1) / (pagesize / buffer_size) + 1;
693 error = vlib_physmem_shared_map_create (vm, (char *) name,
695 min_log2 (pagesize), numa_node,
701 static clib_error_t *
702 vlib_buffer_main_init_numa_node (struct vlib_main_t *vm, u32 numa_node,
705 vlib_buffer_main_t *bm = vm->buffer_main;
706 u32 physmem_map_index;
710 if (bm->log2_page_size == CLIB_MEM_PAGE_SZ_UNKNOWN)
712 error = vlib_buffer_main_init_numa_alloc (vm, numa_node,
714 CLIB_MEM_PAGE_SZ_DEFAULT_HUGE,
717 goto buffer_pool_create;
719 /* If alloc failed, retry without hugepages */
720 vlib_log_warn (bm->log_default,
721 "numa[%u] falling back to non-hugepage backed "
722 "buffer pool (%U)", numa_node, format_clib_error, error);
723 clib_error_free (error);
725 error = vlib_buffer_main_init_numa_alloc (vm, numa_node,
727 CLIB_MEM_PAGE_SZ_DEFAULT,
731 error = vlib_buffer_main_init_numa_alloc (vm, numa_node,
739 name = format (name, "default-numa-%d%c", numa_node, 0);
740 *index = vlib_buffer_pool_create (vm, (char *) name,
741 vlib_buffer_get_default_data_size (vm),
744 if (*index == (u8) ~ 0)
745 error = clib_error_return (0, "maximum number of buffer pools reached");
753 vlib_buffer_main_alloc (vlib_main_t * vm)
755 vlib_buffer_main_t *bm;
760 vm->buffer_main = bm = clib_mem_alloc (sizeof (bm[0]));
761 clib_memset (vm->buffer_main, 0, sizeof (bm[0]));
762 bm->default_data_size = VLIB_BUFFER_DEFAULT_DATA_SIZE;
766 buffer_get_cached (vlib_buffer_pool_t * bp)
769 vlib_buffer_pool_thread_t *bpt;
771 clib_spinlock_lock (&bp->lock);
774 vec_foreach (bpt, bp->threads)
775 cached += bpt->n_cached;
778 clib_spinlock_unlock (&bp->lock);
783 static vlib_buffer_pool_t *
784 buffer_get_by_index (vlib_buffer_main_t * bm, u32 index)
786 vlib_buffer_pool_t *bp;
787 if (!bm->buffer_pools || vec_len (bm->buffer_pools) < index)
789 bp = vec_elt_at_index (bm->buffer_pools, index);
795 buffer_gauges_update_used_fn (stat_segment_directory_entry_t * e, u32 index)
797 vlib_main_t *vm = vlib_get_main ();
798 vlib_buffer_pool_t *bp = buffer_get_by_index (vm->buffer_main, index);
802 e->value = bp->n_buffers - bp->n_avail - buffer_get_cached (bp);
806 buffer_gauges_update_available_fn (stat_segment_directory_entry_t * e,
809 vlib_main_t *vm = vlib_get_main ();
810 vlib_buffer_pool_t *bp = buffer_get_by_index (vm->buffer_main, index);
814 e->value = bp->n_avail;
818 buffer_gauges_update_cached_fn (stat_segment_directory_entry_t * e, u32 index)
820 vlib_main_t *vm = vlib_get_main ();
821 vlib_buffer_pool_t *bp = buffer_get_by_index (vm->buffer_main, index);
825 e->value = buffer_get_cached (bp);
829 vlib_buffer_main_init (struct vlib_main_t * vm)
831 vlib_buffer_main_t *bm;
833 clib_bitmap_t *bmp = 0, *bmp_has_memory = 0;
835 vlib_buffer_pool_t *bp;
836 u8 *name = 0, first_valid_buffer_pool_index = ~0;
838 vlib_buffer_main_alloc (vm);
840 bm = vm->buffer_main;
841 bm->log_default = vlib_log_register_class ("buffer", 0);
842 bm->ext_hdr_size = __vlib_buffer_external_hdr_size;
844 clib_spinlock_init (&bm->buffer_known_hash_lockp);
846 if ((err = clib_sysfs_read ("/sys/devices/system/node/online", "%U",
847 unformat_bitmap_list, &bmp)))
848 clib_error_free (err);
850 if ((err = clib_sysfs_read ("/sys/devices/system/node/has_memory", "%U",
851 unformat_bitmap_list, &bmp_has_memory)))
852 clib_error_free (err);
854 if (bmp && bmp_has_memory)
855 bmp = clib_bitmap_and (bmp, bmp_has_memory);
857 /* no info from sysfs, assuming that only numa 0 exists */
859 bmp = clib_bitmap_set (bmp, 0, 1);
861 if (clib_bitmap_last_set (bmp) >= VLIB_BUFFER_MAX_NUMA_NODES)
862 clib_panic ("system have more than %u NUMA nodes",
863 VLIB_BUFFER_MAX_NUMA_NODES);
866 clib_bitmap_foreach (numa_node, bmp)
868 u8 *index = bm->default_buffer_pool_index_for_numa + numa_node;
870 if ((err = vlib_buffer_main_init_numa_node (vm, numa_node, index)))
872 clib_error_report (err);
873 clib_error_free (err);
877 if (first_valid_buffer_pool_index == 0xff)
878 first_valid_buffer_pool_index = index[0];
882 if (first_valid_buffer_pool_index == (u8) ~ 0)
884 err = clib_error_return (0, "failed to allocate buffer pool(s)");
889 clib_bitmap_foreach (numa_node, bmp)
891 if (bm->default_buffer_pool_index_for_numa[numa_node] == (u8) ~0)
892 bm->default_buffer_pool_index_for_numa[numa_node] =
893 first_valid_buffer_pool_index;
897 vec_foreach (bp, bm->buffer_pools)
899 if (bp->n_buffers == 0)
902 vec_reset_length (name);
903 name = format (name, "/buffer-pools/%s/cached%c", bp->name, 0);
904 stat_segment_register_gauge (name, buffer_gauges_update_cached_fn,
905 bp - bm->buffer_pools);
907 vec_reset_length (name);
908 name = format (name, "/buffer-pools/%s/used%c", bp->name, 0);
909 stat_segment_register_gauge (name, buffer_gauges_update_used_fn,
910 bp - bm->buffer_pools);
912 vec_reset_length (name);
913 name = format (name, "/buffer-pools/%s/available%c", bp->name, 0);
914 stat_segment_register_gauge (name, buffer_gauges_update_available_fn,
915 bp - bm->buffer_pools);
920 vec_free (bmp_has_memory);
925 static clib_error_t *
926 vlib_buffers_configure (vlib_main_t * vm, unformat_input_t * input)
928 vlib_buffer_main_t *bm;
930 vlib_buffer_main_alloc (vm);
932 bm = vm->buffer_main;
933 bm->log2_page_size = CLIB_MEM_PAGE_SZ_UNKNOWN;
935 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
937 if (unformat (input, "buffers-per-numa %u", &bm->buffers_per_numa))
939 else if (unformat (input, "page-size %U", unformat_log2_page_size,
940 &bm->log2_page_size))
942 else if (unformat (input, "default data-size %u",
943 &bm->default_data_size))
946 return unformat_parse_error (input);
949 unformat_free (input);
953 VLIB_EARLY_CONFIG_FUNCTION (vlib_buffers_configure, "buffers");
955 #if VLIB_BUFFER_ALLOC_FAULT_INJECTOR > 0
957 vlib_buffer_alloc_may_fail (vlib_main_t * vm, u32 n_buffers)
961 r = random_f64 (&vm->buffer_alloc_success_seed);
963 /* Fail this request? */
964 if (r > vm->buffer_alloc_success_rate)
966 /* 5% chance of returning nothing at all */
967 if (r > vm->buffer_alloc_success_rate && r > 0.95)
976 * fd.io coding-style-patch-verification: ON
979 * eval: (c-set-style "gnu")