2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * buffer_funcs.h: VLIB buffer related functions/inlines
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #ifndef included_vlib_buffer_funcs_h
41 #define included_vlib_buffer_funcs_h
43 #include <vppinfra/hash.h>
46 #undef always_inline // dpdk and clib use conflicting always_inline macros.
47 #include <rte_config.h>
49 #include <rte_memcpy.h>
52 #define always_inline static inline
54 #define always_inline static inline __attribute__ ((__always_inline__))
59 vlib buffer access methods.
63 /** \brief Translate buffer index into buffer pointer
65 @param vm - (vlib_main_t *) vlib main data structure pointer
66 @param buffer_index - (u32) buffer index
67 @return - (vlib_buffer_t *) buffer pointer
69 always_inline vlib_buffer_t *
70 vlib_get_buffer (vlib_main_t * vm, u32 buffer_index)
72 return vlib_physmem_at_offset (&vm->physmem_main, ((uword)buffer_index)
73 << CLIB_LOG2_CACHE_LINE_BYTES);
76 /** \brief Translate buffer pointer into buffer index
78 @param vm - (vlib_main_t *) vlib main data structure pointer
79 @param b - (void *) buffer pointer
80 @return - (u32) buffer index
83 vlib_get_buffer_index (vlib_main_t * vm, void * p)
85 uword offset = vlib_physmem_offset_of (&vm->physmem_main, p);
86 ASSERT((offset % (1<<CLIB_LOG2_CACHE_LINE_BYTES)) == 0);
87 return offset >> CLIB_LOG2_CACHE_LINE_BYTES;
90 /** \brief Get next buffer in buffer linklist, or zero for end of list.
92 @param vm - (vlib_main_t *) vlib main data structure pointer
93 @param b - (void *) buffer pointer
94 @return - (vlib_buffer_t *) next buffer, or NULL
96 always_inline vlib_buffer_t *
97 vlib_get_next_buffer (vlib_main_t * vm, vlib_buffer_t * b)
99 return (b->flags & VLIB_BUFFER_NEXT_PRESENT
100 ? vlib_get_buffer (vm, b->next_buffer)
104 uword vlib_buffer_length_in_chain_slow_path (vlib_main_t * vm, vlib_buffer_t * b_first);
106 /** \brief Get length in bytes of the buffer chain
108 @param vm - (vlib_main_t *) vlib main data structure pointer
109 @param b - (void *) buffer pointer
110 @return - (uword) length of buffer chain
113 vlib_buffer_length_in_chain (vlib_main_t * vm, vlib_buffer_t * b)
115 uword l = b->current_length + b->total_length_not_including_first_buffer;
116 if (PREDICT_FALSE ((b->flags & (VLIB_BUFFER_NEXT_PRESENT
117 | VLIB_BUFFER_TOTAL_LENGTH_VALID))
118 == VLIB_BUFFER_NEXT_PRESENT))
119 return vlib_buffer_length_in_chain_slow_path (vm, b);
123 /** \brief Get length in bytes of the buffer index buffer chain
125 @param vm - (vlib_main_t *) vlib main data structure pointer
126 @param bi - (u32) buffer index
127 @return - (uword) length of buffer chain
130 vlib_buffer_index_length_in_chain (vlib_main_t * vm, u32 bi)
132 vlib_buffer_t * b = vlib_get_buffer (vm, bi);
133 return vlib_buffer_length_in_chain (vm, b);
136 /** \brief Copy buffer contents to memory
138 @param vm - (vlib_main_t *) vlib main data structure pointer
139 @param bi - (u32) buffer index
140 @param contents - (u8 *) memory, <strong>must be large enough</strong>
141 @return - (uword) length of buffer chain
144 vlib_buffer_contents (vlib_main_t * vm, u32 buffer_index, u8 * contents)
146 uword content_len = 0;
152 b = vlib_get_buffer (vm, buffer_index);
153 l = b->current_length;
154 memcpy (contents + content_len, b->data + b->current_data, l);
156 if (! (b->flags & VLIB_BUFFER_NEXT_PRESENT))
158 buffer_index = b->next_buffer;
164 /* Return physical address of buffer->data start. */
166 vlib_get_buffer_data_physical_address (vlib_main_t * vm, u32 buffer_index)
168 return vlib_physmem_offset_to_physical (&vm->physmem_main,
169 (((uword)buffer_index) <<
170 CLIB_LOG2_CACHE_LINE_BYTES) +
171 STRUCT_OFFSET_OF (vlib_buffer_t, data));
174 /** \brief Prefetch buffer metadata by buffer index
175 The first 64 bytes of buffer contains most header information
177 @param vm - (vlib_main_t *) vlib main data structure pointer
178 @param bi - (u32) buffer index
179 @param type - LOAD, STORE. In most cases, STORE is the right answer
181 /* Prefetch buffer header given index. */
182 #define vlib_prefetch_buffer_with_index(vm,bi,type) \
184 vlib_buffer_t * _b = vlib_get_buffer (vm, bi); \
185 vlib_prefetch_buffer_header (_b, type); \
189 /* Iterate over known allocated vlib bufs. You probably do not want
191 @param vm the vlib_main_t
192 @param bi found allocated buffer index
193 @param body operation to perform on buffer index
194 function executes body for each allocated buffer index
196 #define vlib_buffer_foreach_allocated(vm,bi,body) \
198 vlib_main_t * _vmain = (vm); \
199 vlib_buffer_main_t * _bmain = &_vmain->buffer_main; \
200 hash_pair_t * _vbpair; \
201 hash_foreach_pair(_vbpair, _bmain->buffer_known_hash, ({ \
202 if (VLIB_BUFFER_KNOWN_ALLOCATED == _vbpair->value[0]) { \
203 (bi) = _vbpair->key; \
213 /* Index is unknown. */
216 /* Index is known and free/allocated. */
217 VLIB_BUFFER_KNOWN_FREE,
218 VLIB_BUFFER_KNOWN_ALLOCATED,
219 } vlib_buffer_known_state_t;
221 always_inline vlib_buffer_known_state_t
222 vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index)
224 vlib_buffer_main_t * bm = vm->buffer_main;
225 ASSERT(os_get_cpu_number() == 0);
227 uword * p = hash_get (bm->buffer_known_hash, buffer_index);
228 return p ? p[0] : VLIB_BUFFER_UNKNOWN;
232 vlib_buffer_set_known_state (vlib_main_t * vm,
234 vlib_buffer_known_state_t state)
236 vlib_buffer_main_t * bm = vm->buffer_main;
237 ASSERT(os_get_cpu_number() == 0);
238 hash_set (bm->buffer_known_hash, buffer_index, state);
241 /* Validates sanity of a single buffer.
242 Returns format'ed vector with error message if any. */
243 u8 * vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index, uword follow_chain);
245 /* Validate an array of buffers. As above. */
246 u8 * vlib_validate_buffers (vlib_main_t * vm,
248 uword next_buffer_stride,
250 vlib_buffer_known_state_t known_state,
253 #endif /* DPDK == 0 */
256 vlib_buffer_pool_create(vlib_main_t * vm, unsigned num_mbufs,
257 unsigned mbuf_size, unsigned socket_id);
259 /** \brief Allocate buffers into supplied array
261 @param vm - (vlib_main_t *) vlib main data structure pointer
262 @param buffers - (u32 * ) buffer index array
263 @param n_buffers - (u32) number of buffers requested
264 @return - (u32) number of buffers actually allocated, may be
265 less than the number requested or zero
267 u32 vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers);
270 vlib_buffer_round_size (u32 size)
271 { return round_pow2 (size, sizeof (vlib_buffer_t)); }
273 /** \brief Allocate buffers from specific freelist into supplied array
275 @param vm - (vlib_main_t *) vlib main data structure pointer
276 @param buffers - (u32 * ) buffer index array
277 @param n_buffers - (u32) number of buffers requested
278 @return - (u32) number of buffers actually allocated, may be
279 less than the number requested or zero
281 u32 vlib_buffer_alloc_from_free_list (vlib_main_t * vm,
284 u32 free_list_index);
286 /** \brief Free buffers
287 Frees the entire buffer chain for each buffer
289 @param vm - (vlib_main_t *) vlib main data structure pointer
290 @param buffers - (u32 * ) buffer index array
291 @param n_buffers - (u32) number of buffers to free
294 void vlib_buffer_free (vlib_main_t * vm,
295 /* pointer to first buffer */
297 /* number of buffers to free */
300 /** \brief Free buffers, does not free the buffer chain for each buffer
302 @param vm - (vlib_main_t *) vlib main data structure pointer
303 @param buffers - (u32 * ) buffer index array
304 @param n_buffers - (u32) number of buffers to free
307 void vlib_buffer_free_no_next (vlib_main_t * vm,
308 /* pointer to first buffer */
310 /* number of buffers to free */
313 /** \brief Free one buffer
314 Shorthand to free a single buffer chain.
316 @param vm - (vlib_main_t *) vlib main data structure pointer
317 @param buffer_index - (u32) buffer index to free
320 vlib_buffer_free_one (vlib_main_t * vm, u32 buffer_index)
322 vlib_buffer_free (vm, &buffer_index, /* n_buffers */ 1);
325 /* Add/delete buffer free lists. */
326 u32 vlib_buffer_create_free_list (vlib_main_t * vm, u32 n_data_bytes, char * fmt, ...);
327 void vlib_buffer_delete_free_list (vlib_main_t * vm, u32 free_list_index);
329 /* Find already existing public free list with given size or create one. */
330 u32 vlib_buffer_get_or_create_free_list (vlib_main_t * vm, u32 n_data_bytes, char * fmt, ...);
332 always_inline vlib_buffer_free_list_t *
333 vlib_buffer_get_free_list (vlib_main_t * vm, u32 free_list_index)
335 vlib_buffer_main_t * bm = vm->buffer_main;
336 vlib_buffer_free_list_t * f;
338 f = pool_elt_at_index (bm->buffer_free_list_pool, free_list_index);
340 /* Sanity: indices must match. */
341 ASSERT (f->index == free_list_index);
347 vlib_buffer_free_list_buffer_size (vlib_main_t * vm, u32 free_list_index)
349 vlib_buffer_free_list_t * f = vlib_buffer_get_free_list (vm, free_list_index);
350 return f->n_data_bytes;
354 vlib_aligned_memcpy (void * _dst, void * _src, int n_bytes);
356 /* Reasonably fast buffer copy routine. */
358 vlib_copy_buffers (u32 * dst, u32 * src, u32 n)
380 vlib_physmem_alloc_aligned (vlib_main_t * vm, clib_error_t ** error,
381 uword n_bytes, uword alignment)
383 void * r = vm->os_physmem_alloc_aligned (&vm->physmem_main, n_bytes, alignment);
385 *error = clib_error_return (0, "failed to allocate %wd bytes of I/O memory", n_bytes);
391 /* By default allocate I/O memory with cache line alignment. */
393 vlib_physmem_alloc (vlib_main_t * vm, clib_error_t ** error, uword n_bytes)
394 { return vlib_physmem_alloc_aligned (vm, error, n_bytes, CLIB_CACHE_LINE_BYTES); }
397 vlib_physmem_free (vlib_main_t * vm, void * mem)
398 { return vm->os_physmem_free (mem); }
401 vlib_physmem_virtual_to_physical (vlib_main_t * vm, void * mem)
403 vlib_physmem_main_t * pm = &vm->physmem_main;
404 uword o = pointer_to_uword (mem) - pm->virtual.start;
405 return vlib_physmem_offset_to_physical (pm, o);
408 /* Append given data to end of buffer, possibly allocating new buffers. */
409 u32 vlib_buffer_add_data (vlib_main_t * vm,
412 void * data, u32 n_data_bytes);
415 * vlib_buffer_chain_* functions provide a way to create long buffers.
416 * When DPDK is enabled, the 'hidden' DPDK header is taken care of transparently.
419 /* Initializes the buffer as an empty packet with no chained buffers. */
421 vlib_buffer_chain_init(vlib_buffer_t *first)
423 first->total_length_not_including_first_buffer = 0;
424 first->current_length = 0;
425 first->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
426 first->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
428 (((struct rte_mbuf *) first) - 1)->nb_segs = 1;
429 (((struct rte_mbuf *) first) - 1)->next = 0;
430 (((struct rte_mbuf *) first) - 1)->pkt_len = 0;
431 (((struct rte_mbuf *) first) - 1)->data_len = 0;
432 (((struct rte_mbuf *) first) - 1)->data_off = RTE_PKTMBUF_HEADROOM + first->current_data;
436 /* The provided next_bi buffer index is appended to the end of the packet. */
437 always_inline vlib_buffer_t *
438 vlib_buffer_chain_buffer(vlib_main_t *vm,
439 vlib_buffer_t *first,
443 vlib_buffer_t *next_buffer = vlib_get_buffer(vm, next_bi);
444 last->next_buffer = next_bi;
445 last->flags |= VLIB_BUFFER_NEXT_PRESENT;
446 next_buffer->current_length = 0;
447 next_buffer->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
449 (((struct rte_mbuf *) first) - 1)->nb_segs++;
450 (((struct rte_mbuf *) last) - 1)->next = (((struct rte_mbuf *) next_buffer) - 1);
451 (((struct rte_mbuf *) next_buffer) - 1)->data_len = 0;
452 (((struct rte_mbuf *) next_buffer) - 1)->data_off = RTE_PKTMBUF_HEADROOM + next_buffer->current_data;
453 (((struct rte_mbuf *) next_buffer) - 1)->next = 0;
458 /* Increases or decreases the packet length.
459 * It does not allocate or deallocate new buffers.
460 * Therefore, the added length must be compatible
461 * with the last buffer. */
463 vlib_buffer_chain_increase_length(vlib_buffer_t *first,
467 last->current_length += len;
469 first->total_length_not_including_first_buffer += len;
471 (((struct rte_mbuf *) first) - 1)->pkt_len += len;
472 (((struct rte_mbuf *) last) - 1)->data_len += len;
476 /* Copy data to the end of the packet and increases its length.
477 * It does not allocate new buffers.
478 * Returns the number of copied bytes. */
480 vlib_buffer_chain_append_data(vlib_main_t *vm,
482 vlib_buffer_t *first,
484 void *data, u16 data_len)
486 u32 n_buffer_bytes = vlib_buffer_free_list_buffer_size (vm, free_list_index);
487 ASSERT(n_buffer_bytes >= last->current_length + last->current_data);
488 u16 len = clib_min(data_len, n_buffer_bytes - last->current_length - last->current_data);
490 rte_memcpy(vlib_buffer_get_current (last) + last->current_length, data, len);
492 memcpy(vlib_buffer_get_current (last) + last->current_length, data, len);
494 vlib_buffer_chain_increase_length(first, last, len);
498 /* Copy data to the end of the packet and increases its length.
499 * Allocates additional buffers from the free list if necessary.
500 * Returns the number of copied bytes.
501 * 'last' value is modified whenever new buffers are allocated and
502 * chained and points to the last buffer in the chain. */
504 vlib_buffer_chain_append_data_with_alloc(vlib_main_t *vm,
506 vlib_buffer_t *first,
507 vlib_buffer_t **last,
508 void * data, u16 data_len);
509 void vlib_buffer_chain_validate(vlib_main_t *vm, vlib_buffer_t *first);
511 format_function_t format_vlib_buffer, format_vlib_buffer_and_data, format_vlib_buffer_contents;
514 /* Vector of packet data. */
518 /* Number of buffers to allocate in each call to physmem
520 u32 min_n_buffers_each_physmem_alloc;
522 /* Buffer free list for this template. */
527 } vlib_packet_template_t;
529 void vlib_packet_template_get_packet_helper (vlib_main_t * vm,
530 vlib_packet_template_t * t);
532 void vlib_packet_template_init (vlib_main_t * vm,
533 vlib_packet_template_t * t,
535 uword n_packet_data_bytes,
536 uword min_n_buffers_each_physmem_alloc,
540 vlib_packet_template_get_packet (vlib_main_t * vm,
541 vlib_packet_template_t * t,
545 vlib_packet_template_free (vlib_main_t * vm, vlib_packet_template_t * t)
547 vec_free (t->packet_data);
551 unserialize_vlib_buffer_n_bytes (serialize_main_t * m)
553 serialize_stream_t * s = &m->stream;
554 vlib_serialize_buffer_main_t * sm
555 = uword_to_pointer (m->stream.data_function_opaque, vlib_serialize_buffer_main_t *);
556 vlib_main_t * vm = sm->vlib_main;
559 n = s->n_buffer_bytes - s->current_buffer_index;
560 if (sm->last_buffer != ~0)
562 vlib_buffer_t * b = vlib_get_buffer (vm, sm->last_buffer);
563 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
565 b = vlib_get_buffer (vm, b->next_buffer);
566 n += b->current_length;
570 clib_fifo_foreach (f, sm->rx.buffer_fifo, ({
571 n += vlib_buffer_index_length_in_chain (vm, f[0]);
579 vlib_copy_unit_t i[sizeof (vlib_buffer_t) / sizeof (vlib_copy_unit_t)];
580 } vlib_buffer_union_t;
582 /* Set a buffer quickly into "uninitialized" state. We want this to
583 be extremely cheap and arrange for all fields that need to be
584 initialized to be in the first 128 bits of the buffer. */
586 vlib_buffer_init_for_free_list (vlib_buffer_t * _dst,
587 vlib_buffer_free_list_t * fl)
589 vlib_buffer_union_t * dst = (vlib_buffer_union_t *) _dst;
590 vlib_buffer_union_t * src = (vlib_buffer_union_t *) &fl->buffer_init_template;
592 /* Make sure buffer template is sane. */
593 ASSERT (fl->index == fl->buffer_init_template.free_list_index);
595 /* Copy template from src->current_data thru src->free_list_index */
596 dst->i[0] = src->i[0];
597 if (1 * sizeof (dst->i[0]) < 16)
598 dst->i[1] = src->i[1];
599 if (2 * sizeof (dst->i[0]) < 16)
600 dst->i[2] = src->i[2];
602 /* Make sure it really worked. */
603 #define _(f) ASSERT (dst->b.f == src->b.f)
609 ASSERT (dst->b.total_length_not_including_first_buffer == 0);
613 vlib_buffer_init_two_for_free_list (vlib_buffer_t * _dst0,
614 vlib_buffer_t * _dst1,
615 vlib_buffer_free_list_t * fl)
617 vlib_buffer_union_t * dst0 = (vlib_buffer_union_t *) _dst0;
618 vlib_buffer_union_t * dst1 = (vlib_buffer_union_t *) _dst1;
619 vlib_buffer_union_t * src = (vlib_buffer_union_t *) &fl->buffer_init_template;
621 /* Make sure buffer template is sane. */
622 ASSERT (fl->index == fl->buffer_init_template.free_list_index);
624 /* Copy template from src->current_data thru src->free_list_index */
625 dst0->i[0] = dst1->i[0] = src->i[0];
626 if (1 * sizeof (dst0->i[0]) < 16)
627 dst0->i[1] = dst1->i[1] = src->i[1];
628 if (2 * sizeof (dst0->i[0]) < 16)
629 dst0->i[2] = dst1->i[2] = src->i[2];
631 /* Make sure it really worked. */
632 #define _(f) ASSERT (dst0->b.f == src->b.f && dst1->b.f == src->b.f)
638 ASSERT (dst0->b.total_length_not_including_first_buffer == 0);
639 ASSERT (dst1->b.total_length_not_including_first_buffer == 0);
643 u32 * vlib_buffer_state_validation_lock;
644 uword * vlib_buffer_state_validation_hash;
645 void * vlib_buffer_state_heap;
649 vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected)
655 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
657 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
660 p = hash_get (vlib_buffer_state_validation_hash, b);
662 /* If we don't know about b, declare it to be in the expected state */
665 hash_set (vlib_buffer_state_validation_hash, b, expected);
669 if (p[0] != expected)
673 vlib_main_t * vm = &vlib_global_main;
677 bi = vlib_get_buffer_index (vm, b);
679 clib_mem_set_heap (oldheap);
680 clib_warning ("%.6f buffer %llx (%d): %s, not %s",
681 vlib_time_now(vm), bi,
682 p[0] ? "busy" : "free",
683 expected ? "busy" : "free");
687 CLIB_MEMORY_BARRIER();
688 *vlib_buffer_state_validation_lock = 0;
689 clib_mem_set_heap (oldheap);
694 vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected)
699 oldheap = clib_mem_set_heap (vlib_buffer_state_heap);
701 while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1))
704 hash_set (vlib_buffer_state_validation_hash, b, expected);
706 CLIB_MEMORY_BARRIER();
707 *vlib_buffer_state_validation_lock = 0;
708 clib_mem_set_heap (oldheap);
712 #endif /* included_vlib_buffer_funcs_h */