#define VLIB_BUFFER_DATA_SIZE (2048)
#define VLIB_BUFFER_PRE_DATA_SIZE __PRE_DATA_SIZE
+/* Minimum buffer chain segment size. Does not apply to last buffer in chain.
+ Dataplane code can safely asume that specified amount of data is not split
+ into 2 chained buffers */
+#define VLIB_BUFFER_MIN_CHAIN_SEG_SIZE (128)
+
+/* Amount of head buffer data copied to each replica head buffer */
+#define VLIB_BUFFER_CLONE_HEAD_SIZE (256)
+
typedef u8 vlib_buffer_free_list_index_t;
/** \file
_( 0, NON_DEFAULT_FREELIST, "non-default-fl") \
_( 1, IS_TRACED, 0) \
_( 2, NEXT_PRESENT, 0) \
- _( 3, IS_RECYCLED, "is-recycled") \
- _( 4, TOTAL_LENGTH_VALID, 0) \
- _( 5, REPL_FAIL, "repl-fail") \
- _( 6, RECYCLE, "recycle") \
- _( 7, EXT_HDR_VALID, "ext-hdr-valid")
+ _( 3, TOTAL_LENGTH_VALID, 0) \
+ _( 4, EXT_HDR_VALID, "ext-hdr-valid")
/* NOTE: only buffer generic flags should be defined here, please consider
using user flags. i.e. src/vnet/buffer.h */
<br> VLIB_BUFFER_IS_TRACED: trace this buffer.
<br> VLIB_BUFFER_NEXT_PRESENT: this is a multi-chunk buffer.
<br> VLIB_BUFFER_TOTAL_LENGTH_VALID: as it says
- <br> VLIB_BUFFER_REPL_FAIL: packet replication failure
- <br> VLIB_BUFFER_RECYCLE: as it says
<br> VLIB_BUFFER_EXT_HDR_VALID: buffer contains valid external buffer manager header,
set to avoid adding it to a flow report
<br> VLIB_BUFFER_FLAG_USER(n): user-defined bit N
*/
+ u32 flow_id; /**< Generic flow identifier */
- STRUCT_MARK (template_end);
u32 next_buffer; /**< Next buffer for this linked-list of buffers.
Only valid if VLIB_BUFFER_NEXT_PRESENT flag is set.
*/
- vlib_error_t error; /**< Error code for buffers to be enqueued
- to error handler.
- */
+ STRUCT_MARK (template_end);
+
u32 current_config_index; /**< Used by feature subgraph arcs to
visit enabled feature nodes
*/
-
- u8 feature_arc_index; /**< Used to identify feature arcs by intermediate
- feature node
+ vlib_error_t error; /**< Error code for buffers to be enqueued
+ to error handler.
*/
-
u8 n_add_refs; /**< Number of additional references to this buffer. */
u8 buffer_pool_index; /**< index of buffer pool this buffer belongs. */
- u8 dont_waste_me[1]; /**< Available space in the (precious)
- first 32 octets of buffer metadata
- Before allocating any of it, discussion required!
- */
u32 opaque[10]; /**< Opaque data used by sub-graphs for their own purposes.
See .../vnet/vnet/buffer.h
#define vlib_prefetch_buffer_header(b,type) CLIB_PREFETCH (b, 64, type)
-always_inline vlib_buffer_t *
-vlib_buffer_next_contiguous (vlib_buffer_t * b, u32 buffer_bytes)
-{
- return (void *) (b + 1) + buffer_bytes;
-}
-
always_inline void
vlib_buffer_struct_is_sane (vlib_buffer_t * b)
{
ASSERT (b->pre_data + VLIB_BUFFER_PRE_DATA_SIZE == b->data);
}
+always_inline uword
+vlib_buffer_get_va (vlib_buffer_t * b)
+{
+ return pointer_to_uword (b->data);
+}
+
/** \brief Get pointer to current data to process
@param b - (vlib_buffer_t *) pointer to the buffer
return b->data + b->current_data;
}
+always_inline uword
+vlib_buffer_get_current_va (vlib_buffer_t * b)
+{
+ return vlib_buffer_get_va (b) + b->current_data;
+}
+
/** \brief Advance current data pointer by the supplied (signed!) amount
@param b - (vlib_buffer_t *) pointer to the buffer
ASSERT (b->current_length >= l);
b->current_data += l;
b->current_length -= l;
+
+ ASSERT ((b->flags & VLIB_BUFFER_NEXT_PRESENT) == 0 ||
+ b->current_length >= VLIB_BUFFER_MIN_CHAIN_SEG_SIZE);
}
/** \brief Check if there is enough space in buffer to advance
/* Number of data bytes for buffers in this free list. */
u32 n_data_bytes;
- /* Number of buffers to allocate when we need to allocate new buffers
- from physmem heap. */
- u32 min_n_buffers_each_physmem_alloc;
+ /* Number of buffers to allocate when we need to allocate new buffers */
+ u32 min_n_buffers_each_alloc;
/* Total number of buffers allocated from this free list. */
u32 n_alloc;
/* Vector of free buffers. Each element is a byte offset into I/O heap. */
u32 *buffers;
- /* global vector of free buffers, used only on main thread.
- Bufers are returned to global buffers only in case when number of
- buffers on free buffers list grows about threshold */
- u32 *global_buffers;
- clib_spinlock_t global_buffers_lock;
-
- /* Memory chunks allocated for this free list
- recorded here so they can be freed when free list
- is deleted. */
- void **buffer_memory_allocated;
+ /* index of buffer pool used to get / put buffers */
+ u8 buffer_pool_index;
/* Free list name. */
u8 *name;
struct vlib_buffer_free_list_t * fl,
u32 * buffers, u32 n_buffers);
- /* Callback function to announce that buffers have been
- added to the freelist */
- void (*buffers_added_to_freelist_function)
- (struct vlib_main_t * vm, struct vlib_buffer_free_list_t * fl);
-
uword buffer_init_function_opaque;
} __attribute__ ((aligned (16))) vlib_buffer_free_list_t;
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
uword start;
uword size;
- vlib_physmem_region_index_t physmem_region;
- int alloc_chunk_size;
+ uword log2_page_size;
+ u32 physmem_map_index;
+ u32 buffer_size;
+ u32 *buffers;
+ clib_spinlock_t lock;
} vlib_buffer_pool_t;
typedef struct
u32 (*buffer_free_callback) (struct vlib_main_t * vm,
u32 * buffers,
u32 n_buffers, u32 follow_buffer_next);
- /* Pool of buffer free lists.
- Multiple free lists exist for packet generator which uses
- separate free lists for each packet stream --- so as to avoid
- initializing static data for each packet generated. */
- vlib_buffer_free_list_t *buffer_free_list_pool;
#define VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX (0)
#define VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES VLIB_BUFFER_DATA_SIZE
uword *buffer_known_hash;
clib_spinlock_t buffer_known_hash_lockp;
- /* List of free-lists needing Blue Light Special announcements */
- vlib_buffer_free_list_t **announce_list;
-
/* Callbacks */
vlib_buffer_callbacks_t cb;
int callbacks_registered;
} vlib_buffer_main_t;
-u8 vlib_buffer_add_physmem_region (struct vlib_main_t *vm,
- vlib_physmem_region_index_t region);
+extern vlib_buffer_main_t buffer_main;
+
+static_always_inline vlib_buffer_pool_t *
+vlib_buffer_pool_get (u8 buffer_pool_index)
+{
+ vlib_buffer_main_t *bm = &buffer_main;
+ return vec_elt_at_index (bm->buffer_pools, buffer_pool_index);
+}
+
+u8 vlib_buffer_register_physmem_map (struct vlib_main_t * vm,
+ u32 physmem_map_index);
clib_error_t *vlib_buffer_main_init (struct vlib_main_t *vm);
clib_panic ("vlib buffer callbacks already registered"); \
vlib_buffer_callbacks = &__##x##_buffer_callbacks; \
} \
+static void __vlib_rm_buffer_callbacks_t_##x (void) \
+ __attribute__((__destructor__)) ; \
+static void __vlib_rm_buffer_callbacks_t_##x (void) \
+{ vlib_buffer_callbacks = 0; } \
__VA_ARGS__ vlib_buffer_callbacks_t __##x##_buffer_callbacks
/*