2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * buffer.c: allocate/free network buffers.
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #include <rte_config.h>
42 #include <rte_common.h>
44 #include <rte_memory.h>
45 #include <rte_memcpy.h>
46 #include <rte_memzone.h>
47 #include <rte_tailq.h>
49 #include <rte_per_lcore.h>
50 #include <rte_launch.h>
51 #include <rte_atomic.h>
52 #include <rte_cycles.h>
53 #include <rte_prefetch.h>
54 #include <rte_lcore.h>
55 #include <rte_per_lcore.h>
56 #include <rte_branch_prediction.h>
57 #include <rte_interrupts.h>
59 #include <rte_random.h>
60 #include <rte_debug.h>
61 #include <rte_ether.h>
62 #include <rte_ethdev.h>
64 #include <rte_mempool.h>
67 #include <vlib/vlib.h>
69 phys_addr_t __attribute__ ((weak)) rte_mem_virt2phy();
70 int __attribute__ ((weak)) rte_eal_has_hugepages();
71 unsigned __attribute__ ((weak)) rte_socket_id();
72 struct rte_mempool * __attribute__ ((weak)) rte_mempool_create();
73 void __attribute__ ((weak)) rte_pktmbuf_init();
74 void __attribute__ ((weak)) rte_pktmbuf_pool_init();
76 uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm, vlib_buffer_t * b_first)
78 vlib_buffer_t * b = b_first;
79 uword l_first = b_first->current_length;
81 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
83 b = vlib_get_buffer (vm, b->next_buffer);
84 l += b->current_length;
86 b_first->total_length_not_including_first_buffer = l;
87 b_first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
91 u8 * format_vlib_buffer (u8 * s, va_list * args)
93 vlib_buffer_t * b = va_arg (*args, vlib_buffer_t *);
94 uword indent = format_get_indent (s);
96 s = format (s, "current data %d, length %d, free-list %d",
97 b->current_data, b->current_length,
100 if (b->flags & VLIB_BUFFER_TOTAL_LENGTH_VALID)
101 s = format (s, ", totlen-nifb %d",
102 b->total_length_not_including_first_buffer);
104 if (b->flags & VLIB_BUFFER_IS_TRACED)
105 s = format (s, ", trace 0x%x", b->trace_index);
107 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
109 vlib_main_t * vm = vlib_get_main();
110 u32 next_buffer = b->next_buffer;
111 b = vlib_get_buffer(vm, next_buffer);
113 s = format (s, "\n%Unext-buffer 0x%x, segment length %d",
114 format_white_space, indent, next_buffer, b->current_length);
121 u8 * format_vlib_buffer_and_data (u8 * s, va_list * args)
123 vlib_buffer_t * b = va_arg (*args, vlib_buffer_t *);
125 s = format (s, "%U, %U",
126 format_vlib_buffer, b,
127 format_hex_bytes, vlib_buffer_get_current (b), 64);
132 u8 * format_vlib_buffer_contents (u8 * s, va_list * va)
134 vlib_main_t * vm = va_arg (*va, vlib_main_t *);
135 vlib_buffer_t * b = va_arg (*va, vlib_buffer_t *);
139 vec_add (s, vlib_buffer_get_current (b),
141 if (! (b->flags & VLIB_BUFFER_NEXT_PRESENT))
143 b = vlib_get_buffer (vm, b->next_buffer);
149 vlib_main_t **vlib_mains;
151 /* Aligned copy routine. */
153 vlib_aligned_memcpy (void * _dst, void * _src, int n_bytes)
155 vlib_copy_unit_t * dst = _dst;
156 vlib_copy_unit_t * src = _src;
158 /* Arguments must be naturally aligned. */
159 ASSERT (pointer_to_uword (dst) % sizeof (dst[0]) == 0);
160 ASSERT (pointer_to_uword (src) % sizeof (src[0]) == 0);
161 ASSERT (n_bytes % sizeof (dst[0]) == 0);
163 if (4 * sizeof (dst[0]) == CLIB_CACHE_LINE_BYTES)
165 CLIB_PREFETCH (dst + 0, 4 * sizeof (dst[0]), WRITE);
166 CLIB_PREFETCH (src + 0, 4 * sizeof (src[0]), READ);
168 while (n_bytes >= 4 * sizeof (dst[0]))
172 n_bytes -= 4 * sizeof (dst[0]);
173 CLIB_PREFETCH (dst, 4 * sizeof (dst[0]), WRITE);
174 CLIB_PREFETCH (src, 4 * sizeof (src[0]), READ);
181 else if (8 * sizeof (dst[0]) == CLIB_CACHE_LINE_BYTES)
183 CLIB_PREFETCH (dst + 0, 8 * sizeof (dst[0]), WRITE);
184 CLIB_PREFETCH (src + 0, 8 * sizeof (src[0]), READ);
186 while (n_bytes >= 8 * sizeof (dst[0]))
190 n_bytes -= 8 * sizeof (dst[0]);
191 CLIB_PREFETCH (dst, 8 * sizeof (dst[0]), WRITE);
192 CLIB_PREFETCH (src, 8 * sizeof (src[0]), READ);
204 /* Cache line size unknown: fall back to slow version. */;
209 n_bytes -= 1 * sizeof (dst[0]);
213 #define BUFFERS_PER_COPY (sizeof (vlib_copy_unit_t) / sizeof (u32))
215 /* Make sure we have at least given number of unaligned buffers. */
217 fill_unaligned (vlib_main_t * vm,
218 vlib_buffer_free_list_t * free_list,
219 uword n_unaligned_buffers)
221 word la = vec_len (free_list->aligned_buffers);
222 word lu = vec_len (free_list->unaligned_buffers);
224 /* Aligned come in aligned copy-sized chunks. */
225 ASSERT (la % BUFFERS_PER_COPY == 0);
227 ASSERT (la >= n_unaligned_buffers);
229 while (lu < n_unaligned_buffers)
231 /* Copy 4 buffers from end of aligned vector to unaligned vector. */
232 vec_add (free_list->unaligned_buffers,
233 free_list->aligned_buffers + la - BUFFERS_PER_COPY,
235 la -= BUFFERS_PER_COPY;
236 lu += BUFFERS_PER_COPY;
238 _vec_len (free_list->aligned_buffers) = la;
241 /* After free aligned buffers may not contain even sized chunks. */
243 trim_aligned (vlib_buffer_free_list_t * f)
247 /* Add unaligned to aligned before trim. */
248 l = vec_len (f->unaligned_buffers);
251 vec_add_aligned (f->aligned_buffers, f->unaligned_buffers, l,
252 /* align */ sizeof (vlib_copy_unit_t));
254 _vec_len (f->unaligned_buffers) = 0;
257 /* Remove unaligned buffers from end of aligned vector and save for next trim. */
258 l = vec_len (f->aligned_buffers);
259 n_trim = l % BUFFERS_PER_COPY;
262 /* Trim aligned -> unaligned. */
263 vec_add (f->unaligned_buffers, f->aligned_buffers + l - n_trim, n_trim);
265 /* Remove from aligned. */
266 _vec_len (f->aligned_buffers) = l - n_trim;
271 merge_free_lists (vlib_buffer_free_list_t * dst,
272 vlib_buffer_free_list_t * src)
280 l = vec_len (src->aligned_buffers);
283 vec_add2_aligned (dst->aligned_buffers, d, l,
284 /* align */ sizeof (vlib_copy_unit_t));
285 vlib_aligned_memcpy (d, src->aligned_buffers, l * sizeof (d[0]));
286 vec_free (src->aligned_buffers);
289 l = vec_len (src->unaligned_buffers);
292 vec_add (dst->unaligned_buffers, src->unaligned_buffers, l);
293 vec_free (src->unaligned_buffers);
298 vlib_buffer_get_free_list_with_size (vlib_main_t * vm, u32 size)
300 vlib_buffer_main_t * bm = vm->buffer_main;
302 size = vlib_buffer_round_size (size);
303 uword * p = hash_get (bm->free_list_by_size, size);
304 return p ? p[0] : ~0;
307 /* Add buffer free list. */
309 vlib_buffer_create_free_list_helper (vlib_main_t * vm,
315 vlib_buffer_main_t * bm = vm->buffer_main;
316 vlib_buffer_free_list_t * f;
318 if (! is_default && pool_elts (bm->buffer_free_list_pool) == 0)
320 u32 default_free_free_list_index;
322 default_free_free_list_index =
323 vlib_buffer_create_free_list_helper (vm,
324 /* default buffer size */ VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES,
328 ASSERT (default_free_free_list_index == VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
330 if (n_data_bytes == VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES && is_public)
331 return default_free_free_list_index;
334 pool_get_aligned (bm->buffer_free_list_pool, f, CLIB_CACHE_LINE_BYTES);
336 memset (f, 0, sizeof (f[0]));
337 f->index = f - bm->buffer_free_list_pool;
338 f->n_data_bytes = vlib_buffer_round_size (n_data_bytes);
339 f->min_n_buffers_each_physmem_alloc = 16;
340 f->name = clib_mem_is_heap_object (name) ? name : format (0, "%s", name);
342 /* Setup free buffer template. */
343 f->buffer_init_template.free_list_index = f->index;
347 uword * p = hash_get (bm->free_list_by_size, f->n_data_bytes);
349 hash_set (bm->free_list_by_size, f->n_data_bytes, f->index);
355 u32 vlib_buffer_create_free_list (vlib_main_t * vm, u32 n_data_bytes,
362 name = va_format (0, fmt, &va);
365 return vlib_buffer_create_free_list_helper (vm, n_data_bytes,
371 u32 vlib_buffer_get_or_create_free_list (vlib_main_t * vm, u32 n_data_bytes,
374 u32 i = vlib_buffer_get_free_list_with_size (vm, n_data_bytes);
382 name = va_format (0, fmt, &va);
385 i = vlib_buffer_create_free_list_helper (vm, n_data_bytes,
395 del_free_list (vlib_main_t * vm, vlib_buffer_free_list_t * f)
401 for (i = 0; i < vec_len (f->unaligned_buffers); i++) {
402 b = vlib_get_buffer (vm, f->unaligned_buffers[i]);
403 mb = ((struct rte_mbuf *)b)-1;
404 ASSERT(rte_mbuf_refcnt_read(mb) == 1);
405 rte_pktmbuf_free (mb);
407 for (i = 0; i < vec_len (f->aligned_buffers); i++) {
408 b = vlib_get_buffer (vm, f->aligned_buffers[i]);
409 mb = ((struct rte_mbuf *)b)-1;
410 ASSERT(rte_mbuf_refcnt_read(mb) == 1);
411 rte_pktmbuf_free (mb);
414 vec_free (f->unaligned_buffers);
415 vec_free (f->aligned_buffers);
418 /* Add buffer free list. */
419 void vlib_buffer_delete_free_list (vlib_main_t * vm, u32 free_list_index)
421 vlib_buffer_main_t * bm = vm->buffer_main;
422 vlib_buffer_free_list_t * f;
425 f = vlib_buffer_get_free_list (vm, free_list_index);
427 merge_index = vlib_buffer_get_free_list_with_size (vm, f->n_data_bytes);
428 if (merge_index != ~0 && merge_index != free_list_index)
430 merge_free_lists (pool_elt_at_index (bm->buffer_free_list_pool,
434 del_free_list (vm, f);
437 memset (f, 0xab, sizeof (f[0]));
439 pool_put (bm->buffer_free_list_pool, f);
442 /* Make sure free list has at least given number of free buffers. */
444 fill_free_list (vlib_main_t * vm,
445 vlib_buffer_free_list_t * fl,
446 uword min_free_buffers)
451 u32 n_remaining = 0, n_alloc = 0;
452 unsigned socket_id = rte_socket_id ? rte_socket_id() : 0;
453 struct rte_mempool *rmp = vm->buffer_main->pktmbuf_pools[socket_id];
457 if (PREDICT_FALSE(rmp == 0))
462 /* Already have enough free buffers on free list? */
463 n = min_free_buffers - vec_len (fl->aligned_buffers);
465 return min_free_buffers;
467 /* Always allocate round number of buffers. */
468 n = round_pow2 (n, BUFFERS_PER_COPY);
470 /* Always allocate new buffers in reasonably large sized chunks. */
471 n = clib_max (n, fl->min_n_buffers_each_physmem_alloc);
473 vec_validate (vm->mbuf_alloc_list, n-1);
475 if (rte_mempool_get_bulk (rmp, vm->mbuf_alloc_list, n) < 0)
478 _vec_len (vm->mbuf_alloc_list) = n;
480 for (i = 0; i < n; i++)
482 mb = vm->mbuf_alloc_list[i];
484 ASSERT(rte_mbuf_refcnt_read(mb) == 0);
485 rte_mbuf_refcnt_set(mb, 1);
487 mb->data_off = RTE_PKTMBUF_HEADROOM;
490 b = (vlib_buffer_t *)(mb+1);
491 bi = vlib_get_buffer_index (vm, b);
493 vec_add1_aligned (fl->aligned_buffers, bi, sizeof (vlib_copy_unit_t));
497 vlib_buffer_init_for_free_list (b, fl);
499 if (fl->buffer_init_function)
500 fl->buffer_init_function (vm, fl, &bi, 1);
509 copy_alignment (u32 * x)
510 { return (pointer_to_uword (x) / sizeof (x[0])) % BUFFERS_PER_COPY; }
513 alloc_from_free_list (vlib_main_t * vm,
514 vlib_buffer_free_list_t * free_list,
520 uword n_unaligned_start, n_unaligned_end, n_filled;
522 n_left = n_alloc_buffers;
524 n_unaligned_start = ((BUFFERS_PER_COPY - copy_alignment (dst))
525 & (BUFFERS_PER_COPY - 1));
527 n_filled = fill_free_list (vm, free_list, n_alloc_buffers);
531 n_left = n_filled < n_left ? n_filled : n_left;
532 n_alloc_buffers = n_left;
534 if (n_unaligned_start >= n_left)
536 n_unaligned_start = n_left;
540 n_unaligned_end = copy_alignment (dst + n_alloc_buffers);
542 fill_unaligned (vm, free_list, n_unaligned_start + n_unaligned_end);
544 u_len = vec_len (free_list->unaligned_buffers);
545 u_src = free_list->unaligned_buffers + u_len - 1;
547 if (n_unaligned_start)
549 uword n_copy = n_unaligned_start;
561 /* Now dst should be aligned. */
563 ASSERT (pointer_to_uword (dst) % sizeof (vlib_copy_unit_t) == 0);
568 vlib_copy_unit_t * d, * s;
571 if (vec_len(free_list->aligned_buffers) < ((n_left/BUFFERS_PER_COPY)*BUFFERS_PER_COPY))
574 n_copy = n_left / BUFFERS_PER_COPY;
575 n_left = n_left % BUFFERS_PER_COPY;
577 /* Remove buffers from aligned free list. */
578 _vec_len (free_list->aligned_buffers) -= n_copy * BUFFERS_PER_COPY;
580 s = (vlib_copy_unit_t *) vec_end (free_list->aligned_buffers);
581 d = (vlib_copy_unit_t *) dst;
583 /* Fast path loop. */
606 /* Unaligned copy. */
607 ASSERT (n_unaligned_end == n_left);
615 if (! free_list->unaligned_buffers)
618 _vec_len (free_list->unaligned_buffers) = u_len;
620 return n_alloc_buffers;
623 /* Allocate a given number of buffers into given array.
624 Returns number actually allocated which will be either zero or
626 u32 vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
628 vlib_buffer_main_t * bm = vm->buffer_main;
630 return alloc_from_free_list
632 pool_elt_at_index (bm->buffer_free_list_pool,
633 VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX),
637 u32 vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
642 vlib_buffer_main_t * bm = vm->buffer_main;
643 vlib_buffer_free_list_t * f;
644 f = pool_elt_at_index (bm->buffer_free_list_pool, free_list_index);
645 return alloc_from_free_list (vm, f, buffers, n_buffers);
649 add_buffer_to_free_list (vlib_main_t * vm,
650 vlib_buffer_free_list_t * f,
651 u32 buffer_index, u8 do_init)
654 b = vlib_get_buffer (vm, buffer_index);
655 if (PREDICT_TRUE(do_init))
656 vlib_buffer_init_for_free_list (b, f);
657 vec_add1_aligned (f->aligned_buffers, buffer_index, sizeof (vlib_copy_unit_t));
660 always_inline vlib_buffer_free_list_t *
661 buffer_get_free_list (vlib_main_t * vm, vlib_buffer_t * b, u32 * index)
663 vlib_buffer_main_t * bm = vm->buffer_main;
666 *index = i = b->free_list_index;
667 return pool_elt_at_index (bm->buffer_free_list_pool, i);
670 void *vlib_set_buffer_free_callback (vlib_main_t *vm, void *fp)
672 vlib_buffer_main_t * bm = vm->buffer_main;
673 void * rv = bm->buffer_free_callback;
675 bm->buffer_free_callback = fp;
679 static_always_inline void
680 vlib_buffer_free_inline (vlib_main_t * vm,
683 u32 follow_buffer_next)
685 vlib_buffer_main_t * bm = vm->buffer_main;
686 vlib_buffer_free_list_t * fl;
689 u32 (*cb)(vlib_main_t * vm, u32 * buffers, u32 n_buffers,
690 u32 follow_buffer_next);
692 cb = bm->buffer_free_callback;
694 if (PREDICT_FALSE (cb != 0))
695 n_buffers = (*cb)(vm, buffers, n_buffers, follow_buffer_next);
700 for (i = 0; i < n_buffers; i++)
703 struct rte_mbuf * mb;
705 b = vlib_get_buffer (vm, buffers[i]);
707 fl = buffer_get_free_list (vm, b, &fi);
709 /* The only current use of this callback: multicast recycle */
710 if (PREDICT_FALSE (fl->buffers_added_to_freelist_function != 0))
714 add_buffer_to_free_list (vm, fl, buffers[i], b->clone_count == 0);
716 for (j = 0; j < vec_len (bm->announce_list); j++)
718 if (fl == bm->announce_list[j])
719 goto already_announced;
721 vec_add1 (bm->announce_list, fl);
727 if (PREDICT_TRUE (b->clone_count == 0))
729 mb = ((struct rte_mbuf *)b)-1;
730 ASSERT(rte_mbuf_refcnt_read(mb) == 1);
731 rte_pktmbuf_free (mb);
735 if (vec_len(bm->announce_list))
737 vlib_buffer_free_list_t * fl;
738 for (i = 0; i < vec_len (bm->announce_list); i++)
740 fl = bm->announce_list[i];
741 fl->buffers_added_to_freelist_function (vm, fl);
743 _vec_len(bm->announce_list) = 0;
747 void vlib_buffer_free (vlib_main_t * vm,
751 vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */ 1);
754 void vlib_buffer_free_no_next (vlib_main_t * vm,
758 vlib_buffer_free_inline (vm, buffers, n_buffers, /* follow_buffer_next */ 0);
761 /* Copy template packet data into buffers as they are allocated. */
762 __attribute__((unused)) static void
763 vlib_packet_template_buffer_init (vlib_main_t * vm,
764 vlib_buffer_free_list_t * fl,
768 vlib_packet_template_t * t = uword_to_pointer (fl->buffer_init_function_opaque,
769 vlib_packet_template_t *);
772 for (i = 0; i < n_buffers; i++)
774 vlib_buffer_t * b = vlib_get_buffer (vm, buffers[i]);
775 ASSERT (b->current_length == vec_len (t->packet_data));
776 memcpy (vlib_buffer_get_current (b), t->packet_data, b->current_length);
780 void vlib_packet_template_init (vlib_main_t * vm,
781 vlib_packet_template_t * t,
783 uword n_packet_data_bytes,
784 uword min_n_buffers_each_physmem_alloc,
789 __attribute__((unused)) u8 * name;
792 name = va_format (0, fmt, &va);
795 vlib_worker_thread_barrier_sync(vm);
796 memset (t, 0, sizeof (t[0]));
798 vec_add (t->packet_data, packet_data, n_packet_data_bytes);
800 vlib_worker_thread_barrier_release(vm);
804 vlib_packet_template_get_packet (vlib_main_t * vm,
805 vlib_packet_template_t * t,
811 if (vlib_buffer_alloc (vm, &bi, 1) != 1)
816 b = vlib_get_buffer (vm, bi);
817 memcpy (vlib_buffer_get_current (b),
818 t->packet_data, vec_len(t->packet_data));
819 b->current_length = vec_len(t->packet_data);
821 /* Fix up mbuf header length fields */
822 struct rte_mbuf * mb;
823 mb = ((struct rte_mbuf *)b) - 1;
824 mb->data_len = b->current_length;
825 mb->pkt_len = b->current_length;
830 /* Append given data to end of buffer, possibly allocating new buffers. */
831 u32 vlib_buffer_add_data (vlib_main_t * vm,
834 void * data, u32 n_data_bytes)
836 u32 n_buffer_bytes, n_left, n_left_this_buffer, bi;
842 && 1 != vlib_buffer_alloc_from_free_list (vm, &bi, 1, free_list_index))
846 n_left = n_data_bytes;
847 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm, free_list_index);
849 b = vlib_get_buffer (vm, bi);
850 b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
852 /* Get to the end of the chain before we try to append data...*/
853 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
854 b = vlib_get_buffer (vm, b->next_buffer);
860 ASSERT (n_buffer_bytes >= b->current_length);
861 n_left_this_buffer = n_buffer_bytes - (b->current_data + b->current_length);
862 n = clib_min (n_left_this_buffer, n_left);
863 memcpy (vlib_buffer_get_current (b) + b->current_length, d, n);
864 b->current_length += n;
870 if (1 != vlib_buffer_alloc_from_free_list (vm, &b->next_buffer, 1, free_list_index))
873 b->flags |= VLIB_BUFFER_NEXT_PRESENT;
875 b = vlib_get_buffer (vm, b->next_buffer);
881 clib_error ("out of buffers");
886 vlib_buffer_chain_append_data_with_alloc(vlib_main_t *vm,
888 vlib_buffer_t *first,
889 vlib_buffer_t **last,
890 void * data, u16 data_len) {
891 vlib_buffer_t *l = *last;
892 u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm, free_list_index);
894 ASSERT(n_buffer_bytes >= l->current_length + l->current_data);
896 u16 max = n_buffer_bytes - l->current_length - l->current_data;
898 if (1 != vlib_buffer_alloc_from_free_list (vm, &l->next_buffer, 1, free_list_index))
900 *last = l = vlib_buffer_chain_buffer(vm, first, l, l->next_buffer);
901 max = n_buffer_bytes - l->current_length - l->current_data;
904 u16 len = (data_len > max)?max:data_len;
905 rte_memcpy(vlib_buffer_get_current (l) + l->current_length, data + copied, len);
906 vlib_buffer_chain_increase_length(first, l, len);
914 * Fills in the required rte_mbuf fields for chained buffers given a VLIB chain.
916 void vlib_buffer_chain_validate (vlib_main_t * vm, vlib_buffer_t * b_first)
918 vlib_buffer_t *b = b_first, *prev = b_first;
919 struct rte_mbuf *mb_first = ((struct rte_mbuf *) b) - 1;
921 mb_first->pkt_len = mb_first-> data_len = b_first->current_length;
922 while (b->flags & VLIB_BUFFER_NEXT_PRESENT) {
923 b = vlib_get_buffer(vm, b->next_buffer);
925 mb_first->pkt_len += b->current_length;
926 (((struct rte_mbuf *) prev) - 1)->next = (((struct rte_mbuf *) b) - 1);
927 (((struct rte_mbuf *) b) - 1)->data_len = b->current_length;
933 vlib_buffer_pool_create(vlib_main_t * vm, unsigned num_mbufs,
934 unsigned mbuf_size, unsigned socket_id)
936 vlib_buffer_main_t * bm = vm->buffer_main;
937 vlib_physmem_main_t * vpm = &vm->physmem_main;
938 struct rte_mempool * rmp;
939 uword new_start, new_size;
942 if (!rte_mempool_create)
943 return clib_error_return (0, "not linked with DPDK");
945 vec_validate_aligned(bm->pktmbuf_pools, socket_id, CLIB_CACHE_LINE_BYTES);
947 /* pool already exists, nothing to do */
948 if (bm->pktmbuf_pools[socket_id])
951 u8 * pool_name = format(0, "mbuf_pool_socket%u%c",socket_id, 0);
952 rmp = rte_mempool_create((char *) pool_name,
953 num_mbufs, mbuf_size, 512,
954 sizeof(struct rte_pktmbuf_pool_private),
955 rte_pktmbuf_pool_init, NULL,
956 rte_pktmbuf_init, NULL,
962 new_start = pointer_to_uword(rmp);
963 new_size = rmp->elt_va_end - new_start;
965 if (vpm->virtual.size > 0)
967 ASSERT(new_start != vpm->virtual.start);
968 if (new_start < vpm->virtual.start)
970 new_size = vpm->virtual.size + vpm->virtual.start - new_start;
974 new_size += new_start - vpm->virtual.start;
975 new_start = vpm->virtual.start;
978 /* check if fits into buffer index range */
979 if (new_size > ( (uword) 1 << (32 + CLIB_LOG2_CACHE_LINE_BYTES)))
986 bm->pktmbuf_pools[socket_id] = rmp;
987 vpm->virtual.start = new_start;
988 vpm->virtual.size = new_size;
989 vpm->virtual.end = new_start + new_size;
993 /* no usable pool for this socket, try to use pool from another one */
994 for (i = 0; i < vec_len(bm->pktmbuf_pools); i++)
996 if(bm->pktmbuf_pools[i])
998 clib_warning("WARNING: Failed to allocate mempool for CPU socket %u. "
999 "Threads running on socket %u will use socket %u mempool.",
1000 socket_id, socket_id, i);
1001 bm->pktmbuf_pools[socket_id] = bm->pktmbuf_pools[i];
1006 return clib_error_return (0, "failed to allocate mempool on socket %u",
1011 static void vlib_serialize_tx (serialize_main_header_t * m, serialize_stream_t * s)
1014 vlib_serialize_buffer_main_t * sm;
1015 uword n, n_bytes_to_write;
1016 vlib_buffer_t * last;
1018 n_bytes_to_write = s->current_buffer_index;
1019 sm = uword_to_pointer (s->data_function_opaque, vlib_serialize_buffer_main_t *);
1022 ASSERT (sm->tx.max_n_data_bytes_per_chain > 0);
1023 if (serialize_stream_is_end_of_stream (s)
1024 || sm->tx.n_total_data_bytes + n_bytes_to_write > sm->tx.max_n_data_bytes_per_chain)
1026 vlib_process_t * p = vlib_get_current_process (vm);
1028 last = vlib_get_buffer (vm, sm->last_buffer);
1029 last->current_length = n_bytes_to_write;
1031 vlib_set_next_frame_buffer (vm, &p->node_runtime, sm->tx.next_index, sm->first_buffer);
1033 sm->first_buffer = sm->last_buffer = ~0;
1034 sm->tx.n_total_data_bytes = 0;
1037 else if (n_bytes_to_write == 0 && s->n_buffer_bytes == 0)
1039 ASSERT (sm->first_buffer == ~0);
1040 ASSERT (sm->last_buffer == ~0);
1041 n = vlib_buffer_alloc_from_free_list (vm, &sm->first_buffer, 1, sm->tx.free_list_index);
1043 serialize_error (m, clib_error_create ("vlib_buffer_alloc_from_free_list fails"));
1044 sm->last_buffer = sm->first_buffer;
1045 s->n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm, sm->tx.free_list_index);
1048 if (n_bytes_to_write > 0)
1050 vlib_buffer_t * prev = vlib_get_buffer (vm, sm->last_buffer);
1051 n = vlib_buffer_alloc_from_free_list (vm, &sm->last_buffer, 1, sm->tx.free_list_index);
1053 serialize_error (m, clib_error_create ("vlib_buffer_alloc_from_free_list fails"));
1054 sm->tx.n_total_data_bytes += n_bytes_to_write;
1055 prev->current_length = n_bytes_to_write;
1056 prev->next_buffer = sm->last_buffer;
1057 prev->flags |= VLIB_BUFFER_NEXT_PRESENT;
1060 if (sm->last_buffer != ~0)
1062 last = vlib_get_buffer (vm, sm->last_buffer);
1063 s->buffer = vlib_buffer_get_current (last);
1064 s->current_buffer_index = 0;
1065 ASSERT (last->current_data == s->current_buffer_index);
1069 static void vlib_serialize_rx (serialize_main_header_t * m, serialize_stream_t * s)
1072 vlib_serialize_buffer_main_t * sm;
1073 vlib_buffer_t * last;
1075 sm = uword_to_pointer (s->data_function_opaque, vlib_serialize_buffer_main_t *);
1078 if (serialize_stream_is_end_of_stream (s))
1081 if (sm->last_buffer != ~0)
1083 last = vlib_get_buffer (vm, sm->last_buffer);
1085 if (last->flags & VLIB_BUFFER_NEXT_PRESENT)
1086 sm->last_buffer = last->next_buffer;
1089 vlib_buffer_free (vm, &sm->first_buffer, /* count */ 1);
1090 sm->first_buffer = sm->last_buffer = ~0;
1094 if (sm->last_buffer == ~0)
1096 while (clib_fifo_elts (sm->rx.buffer_fifo) == 0)
1098 sm->rx.ready_one_time_event = vlib_process_create_one_time_event (vm, vlib_current_process (vm), ~0);
1099 vlib_process_wait_for_one_time_event (vm, /* no event data */ 0, sm->rx.ready_one_time_event);
1102 clib_fifo_sub1 (sm->rx.buffer_fifo, sm->first_buffer);
1103 sm->last_buffer = sm->first_buffer;
1106 ASSERT (sm->last_buffer != ~0);
1108 last = vlib_get_buffer (vm, sm->last_buffer);
1109 s->current_buffer_index = 0;
1110 s->buffer = vlib_buffer_get_current (last);
1111 s->n_buffer_bytes = last->current_length;
1115 serialize_open_vlib_helper (serialize_main_t * m,
1117 vlib_serialize_buffer_main_t * sm,
1120 /* Initialize serialize main but save overflow buffer for re-use between calls. */
1122 u8 * save = m->stream.overflow_buffer;
1123 memset (m, 0, sizeof (m[0]));
1124 m->stream.overflow_buffer = save;
1126 _vec_len (save) = 0;
1129 sm->first_buffer = sm->last_buffer = ~0;
1131 clib_fifo_reset (sm->rx.buffer_fifo);
1133 sm->tx.n_total_data_bytes = 0;
1135 m->header.data_function = is_read ? vlib_serialize_rx : vlib_serialize_tx;
1136 m->stream.data_function_opaque = pointer_to_uword (sm);
1139 void serialize_open_vlib_buffer (serialize_main_t * m, vlib_main_t * vm, vlib_serialize_buffer_main_t * sm)
1140 { serialize_open_vlib_helper (m, vm, sm, /* is_read */ 0); }
1142 void unserialize_open_vlib_buffer (serialize_main_t * m, vlib_main_t * vm, vlib_serialize_buffer_main_t * sm)
1143 { serialize_open_vlib_helper (m, vm, sm, /* is_read */ 1); }
1145 u32 serialize_close_vlib_buffer (serialize_main_t * m)
1147 vlib_serialize_buffer_main_t * sm
1148 = uword_to_pointer (m->stream.data_function_opaque, vlib_serialize_buffer_main_t *);
1149 vlib_buffer_t * last;
1150 serialize_stream_t * s = &m->stream;
1152 last = vlib_get_buffer (sm->vlib_main, sm->last_buffer);
1153 last->current_length = s->current_buffer_index;
1155 if (vec_len (s->overflow_buffer) > 0)
1158 = vlib_buffer_add_data (sm->vlib_main, sm->tx.free_list_index,
1159 sm->last_buffer == ~0 ? 0 : sm->last_buffer,
1161 vec_len (s->overflow_buffer));
1162 _vec_len (s->overflow_buffer) = 0;
1165 return sm->first_buffer;
1168 void unserialize_close_vlib_buffer (serialize_main_t * m)
1170 vlib_serialize_buffer_main_t * sm
1171 = uword_to_pointer (m->stream.data_function_opaque, vlib_serialize_buffer_main_t *);
1172 if (sm->first_buffer != ~0)
1173 vlib_buffer_free_one (sm->vlib_main, sm->first_buffer);
1174 clib_fifo_reset (sm->rx.buffer_fifo);
1175 if (m->stream.overflow_buffer)
1176 _vec_len (m->stream.overflow_buffer) = 0;
1179 static u8 * format_vlib_buffer_free_list (u8 * s, va_list * va)
1181 vlib_buffer_free_list_t * f = va_arg (*va, vlib_buffer_free_list_t *);
1182 u32 threadnum= va_arg (*va, u32);
1183 uword bytes_alloc, bytes_free, n_free, size;
1186 return format (s, "%=7s%=30s%=12s%=12s%=12s%=12s%=12s%=12s",
1187 "Thread", "Name", "Index", "Size", "Alloc", "Free", "#Alloc", "#Free");
1189 size = sizeof (vlib_buffer_t) + f->n_data_bytes;
1190 n_free = vec_len (f->aligned_buffers) + vec_len (f->unaligned_buffers);
1191 bytes_alloc = size * f->n_alloc;
1192 bytes_free = size * n_free;
1194 s = format (s, "%7d%30s%12d%12d%=12U%=12U%=12d%=12d",
1196 f->name, f->index, f->n_data_bytes,
1197 format_memory_size, bytes_alloc,
1198 format_memory_size, bytes_free,
1199 f->n_alloc, n_free);
1204 static clib_error_t *
1205 show_buffers (vlib_main_t * vm,
1206 unformat_input_t * input,
1207 vlib_cli_command_t * cmd)
1209 vlib_buffer_main_t * bm;
1210 vlib_buffer_free_list_t * f;
1211 vlib_main_t *curr_vm;
1214 vlib_cli_output (vm, "%U", format_vlib_buffer_free_list, 0, 0);
1217 curr_vm = vec_len(vlib_mains) ? vlib_mains[vm_index] : vm;
1218 bm = curr_vm->buffer_main;
1220 pool_foreach (f, bm->buffer_free_list_pool, ({
1221 vlib_cli_output (vm, "%U", format_vlib_buffer_free_list, f, vm_index);
1225 } while (vm_index < vec_len(vlib_mains));
1230 VLIB_CLI_COMMAND (show_buffers_command, static) = {
1231 .path = "show buffers",
1232 .short_help = "Show packet buffer allocation",
1233 .function = show_buffers,
1238 u32 * vlib_buffer_state_validation_lock;
1239 uword * vlib_buffer_state_validation_hash;
1240 void * vlib_buffer_state_heap;
1242 static clib_error_t *
1243 buffer_state_validation_init (vlib_main_t * vm)
1247 vlib_buffer_state_heap = mheap_alloc (0, 10<<20);
1249 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
1251 vlib_buffer_state_validation_hash = hash_create (0, sizeof(uword));
1252 vec_validate_aligned (vlib_buffer_state_validation_lock, 0,
1253 CLIB_CACHE_LINE_BYTES);
1254 clib_mem_set_heap (oldheap);
1258 VLIB_INIT_FUNCTION (buffer_state_validation_init);