always_inline u64
vlib_get_buffer_data_physical_address (vlib_main_t * vm, u32 buffer_index)
{
- return vlib_physmem_offset_to_physical (&vm->physmem_main,
+ vlib_physmem_region_index_t pri;
+ vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index);
+ pri = vm->buffer_main->buffer_pools[b->buffer_pool_index].physmem_region;
+ return vlib_physmem_offset_to_physical (vm, pri,
(((uword) buffer_index) <<
CLIB_LOG2_CACHE_LINE_BYTES) +
STRUCT_OFFSET_OF (vlib_buffer_t,
VLIB_BUFFER_KNOWN_ALLOCATED,
} vlib_buffer_known_state_t;
+void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers,
+ uword n_buffers,
+ vlib_buffer_known_state_t
+ expected_state);
+
always_inline vlib_buffer_known_state_t
vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index)
{
u8 *vlib_validate_buffer (vlib_main_t * vm, u32 buffer_index,
uword follow_chain);
-/** \brief Allocate buffers into supplied array
-
- @param vm - (vlib_main_t *) vlib main data structure pointer
- @param buffers - (u32 * ) buffer index array
- @param n_buffers - (u32) number of buffers requested
- @return - (u32) number of buffers actually allocated, may be
- less than the number requested or zero
-*/
-always_inline u32
-vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
-{
- vlib_buffer_main_t *bm = vm->buffer_main;
-
- ASSERT (bm->cb.vlib_buffer_alloc_cb);
-
- return bm->cb.vlib_buffer_alloc_cb (vm, buffers, n_buffers);
-}
-
always_inline u32
vlib_buffer_round_size (u32 size)
{
u32 n_buffers, u32 free_list_index)
{
vlib_buffer_main_t *bm = vm->buffer_main;
+ vlib_buffer_free_list_t *fl;
+ u32 *src;
+ uword len;
+
+ ASSERT (bm->cb.vlib_buffer_fill_free_list_cb);
+
+ fl = pool_elt_at_index (bm->buffer_free_list_pool, free_list_index);
+
+ len = vec_len (fl->buffers);
+
+ if (PREDICT_FALSE (len < n_buffers))
+ {
+ bm->cb.vlib_buffer_fill_free_list_cb (vm, fl, n_buffers);
+ len = vec_len (fl->buffers);
+
+ /* even if fill free list didn't manage to refill free list
+ we should give what we have */
+ n_buffers = clib_min (len, n_buffers);
+
+ /* following code is intentionaly duplicated to allow compiler
+ to optimize fast path when n_buffers is constant value */
+ src = fl->buffers + len - n_buffers;
+ clib_memcpy (buffers, src, n_buffers * sizeof (u32));
+ _vec_len (fl->buffers) -= n_buffers;
+
+ /* Verify that buffers are known free. */
+ vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
+ VLIB_BUFFER_KNOWN_FREE);
+
+ return n_buffers;
+ }
+
+ src = fl->buffers + len - n_buffers;
+ clib_memcpy (buffers, src, n_buffers * sizeof (u32));
+ _vec_len (fl->buffers) -= n_buffers;
- ASSERT (bm->cb.vlib_buffer_alloc_from_free_list_cb);
+ /* Verify that buffers are known free. */
+ vlib_buffer_validate_alloc_free (vm, buffers, n_buffers,
+ VLIB_BUFFER_KNOWN_FREE);
- return bm->cb.vlib_buffer_alloc_from_free_list_cb (vm, buffers, n_buffers,
- free_list_index);
+ return n_buffers;
+}
+
+/** \brief Allocate buffers into supplied array
+
+ @param vm - (vlib_main_t *) vlib main data structure pointer
+ @param buffers - (u32 * ) buffer index array
+ @param n_buffers - (u32) number of buffers requested
+ @return - (u32) number of buffers actually allocated, may be
+ less than the number requested or zero
+*/
+always_inline u32
+vlib_buffer_alloc (vlib_main_t * vm, u32 * buffers, u32 n_buffers)
+{
+ return vlib_buffer_alloc_from_free_list (vm, buffers, n_buffers,
+ VLIB_BUFFER_DEFAULT_FREE_LIST_INDEX);
}
/** \brief Free buffers
}
}
-always_inline void *
-vlib_physmem_alloc_aligned (vlib_main_t * vm, clib_error_t ** error,
- uword n_bytes, uword alignment)
-{
- void *r =
- vm->os_physmem_alloc_aligned (&vm->physmem_main, n_bytes, alignment);
- if (!r)
- *error =
- clib_error_return (0, "failed to allocate %wd bytes of I/O memory",
- n_bytes);
- else
- *error = 0;
- return r;
-}
-
-/* By default allocate I/O memory with cache line alignment. */
-always_inline void *
-vlib_physmem_alloc (vlib_main_t * vm, clib_error_t ** error, uword n_bytes)
-{
- return vlib_physmem_alloc_aligned (vm, error, n_bytes,
- CLIB_CACHE_LINE_BYTES);
-}
-
-always_inline void
-vlib_physmem_free (vlib_main_t * vm, void *mem)
-{
- return vm->os_physmem_free (mem);
-}
-
-always_inline u64
-vlib_physmem_virtual_to_physical (vlib_main_t * vm, void *mem)
-{
- vlib_physmem_main_t *pm = &vm->physmem_main;
- uword o = pointer_to_uword (mem) - pm->virtual.start;
- return vlib_physmem_offset_to_physical (pm, o);
-}
-
/* Append given data to end of buffer, possibly allocating new buffers. */
u32 vlib_buffer_add_data (vlib_main_t * vm,
u32 free_list_index,
_(current_length);
_(flags);
#undef _
- ASSERT (dst->total_length_not_including_first_buffer == 0);
+ /* ASSERT (dst->total_length_not_including_first_buffer == 0); */
+ /* total_length_not_including_first_buffer is not in the template anymore
+ * so it may actually not zeroed for some buffers. One option is to
+ * uncomment the line lower (comes at a cost), the other, is to just not
+ * care */
+ /* dst->total_length_not_including_first_buffer = 0; */
ASSERT (dst->n_add_refs == 0);
}
u32 buffer_index, u8 do_init)
{
vlib_buffer_t *b;
- u32 i;
b = vlib_get_buffer (vm, buffer_index);
if (PREDICT_TRUE (do_init))
vlib_buffer_init_for_free_list (b, f);
vec_add1_aligned (f->buffers, buffer_index, CLIB_CACHE_LINE_BYTES);
- if (vec_len (f->buffers) > 3 * VLIB_FRAME_SIZE)
+ if (vec_len (f->buffers) > 4 * VLIB_FRAME_SIZE)
{
+ vlib_buffer_free_list_t *mf;
+ mf = vlib_buffer_get_free_list (vlib_mains[0], f->index);
+ clib_spinlock_lock (&mf->global_buffers_lock);
/* keep last stored buffers, as they are more likely hot in the cache */
- for (i = 0; i < VLIB_FRAME_SIZE; i++)
- vm->os_physmem_free (vlib_get_buffer (vm, i));
+ vec_add_aligned (mf->global_buffers, f->buffers, VLIB_FRAME_SIZE,
+ CLIB_CACHE_LINE_BYTES);
vec_delete (f->buffers, VLIB_FRAME_SIZE, 0);
+ f->n_alloc -= VLIB_FRAME_SIZE;
+ clib_spinlock_unlock (&mf->global_buffers_lock);
}
}