/* Always allocate new buffers in reasonably large sized chunks. */
n = clib_max (n, fl->min_n_buffers_each_physmem_alloc);
- vec_validate (vm->mbuf_alloc_list, n - 1);
+ vec_validate_aligned (vm->mbuf_alloc_list, n - 1, CLIB_CACHE_LINE_BYTES);
if (rte_mempool_get_bulk (rmp, vm->mbuf_alloc_list, n) < 0)
return 0;
+ dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp);
+
_vec_len (vm->mbuf_alloc_list) = n;
i = 0;
mb2 = vm->mbuf_alloc_list[i + 2];
mb3 = vm->mbuf_alloc_list[i + 3];
- ASSERT (rte_mbuf_refcnt_read (mb0) == 0);
- ASSERT (rte_mbuf_refcnt_read (mb1) == 0);
- ASSERT (rte_mbuf_refcnt_read (mb2) == 0);
- ASSERT (rte_mbuf_refcnt_read (mb3) == 0);
-
- rte_mbuf_refcnt_set (mb0, 1);
- rte_mbuf_refcnt_set (mb1, 1);
- rte_mbuf_refcnt_set (mb2, 1);
- rte_mbuf_refcnt_set (mb3, 1);
-
b0 = vlib_buffer_from_rte_mbuf (mb0);
b1 = vlib_buffer_from_rte_mbuf (mb1);
b2 = vlib_buffer_from_rte_mbuf (mb2);
vlib_buffer_init_for_free_list (b2, fl);
vlib_buffer_init_for_free_list (b3, fl);
+ b0->buffer_pool_index = privp->buffer_pool_index;
+ b1->buffer_pool_index = privp->buffer_pool_index;
+ b2->buffer_pool_index = privp->buffer_pool_index;
+ b3->buffer_pool_index = privp->buffer_pool_index;
+
if (fl->buffer_init_function)
{
fl->buffer_init_function (vm, fl, &bi0, 1);
{
mb0 = vm->mbuf_alloc_list[i];
- ASSERT (rte_mbuf_refcnt_read (mb0) == 0);
- rte_mbuf_refcnt_set (mb0, 1);
-
b0 = vlib_buffer_from_rte_mbuf (mb0);
bi0 = vlib_get_buffer_index (vm, b0);
vec_add1_aligned (fl->buffers, bi0, CLIB_CACHE_LINE_BYTES);
vlib_buffer_init_for_free_list (b0, fl);
+ b0->buffer_pool_index = privp->buffer_pool_index;
if (fl->buffer_init_function)
fl->buffer_init_function (vm, fl, &bi0, 1);
vlib_buffer_t *b;
b = vlib_get_buffer (vm, buffers[i]);
-
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b);
fl = vlib_buffer_get_buffer_free_list (vm, b, &fi);
/* The only current use of this callback: multicast recycle */
}
clib_error_t *
-vlib_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs,
+dpdk_buffer_pool_create (vlib_main_t * vm, unsigned num_mbufs,
unsigned socket_id)
{
dpdk_main_t *dm = &dpdk_main;
- vlib_physmem_main_t *vpm = &vm->physmem_main;
struct rte_mempool *rmp;
- int i;
+ dpdk_mempool_private_t priv;
+ vlib_physmem_region_t *pr;
+ vlib_physmem_region_index_t pri;
+ u8 *pool_name;
+ unsigned elt_size;
+ u32 size, obj_size;
+ i32 i, ret;
vec_validate_aligned (dm->pktmbuf_pools, socket_id, CLIB_CACHE_LINE_BYTES);
if (dm->pktmbuf_pools[socket_id])
return 0;
- u8 *pool_name = format (0, "mbuf_pool_socket%u%c", socket_id, 0);
+ pool_name = format (0, "dpdk_mbuf_pool_socket%u%c", socket_id, 0);
+
+ elt_size = sizeof (struct rte_mbuf) +
+ VLIB_BUFFER_HDR_SIZE /* priv size */ +
+ VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE; /*data room size */
- rmp = rte_pktmbuf_pool_create ((char *) pool_name, /* pool name */
- num_mbufs, /* number of mbufs */
- 512, /* cache size */
- VLIB_BUFFER_HDR_SIZE, /* priv size */
- VLIB_BUFFER_PRE_DATA_SIZE + VLIB_BUFFER_DATA_SIZE, /* dataroom size */
- socket_id); /* cpu socket */
+ obj_size = rte_mempool_calc_obj_size (elt_size, 0, 0);
+ size = rte_mempool_xmem_size (num_mbufs, obj_size, 21);
+ clib_error_t *error = 0;
+ error = vlib_physmem_region_alloc (vm, (char *) pool_name, size, socket_id,
+ 0, &pri);
+ if (error)
+ clib_error_report (error);
+
+ pr = vlib_physmem_get_region (vm, pri);
+
+ priv.mbp_priv.mbuf_data_room_size = VLIB_BUFFER_PRE_DATA_SIZE +
+ VLIB_BUFFER_DATA_SIZE;
+ priv.mbp_priv.mbuf_priv_size = VLIB_BUFFER_HDR_SIZE;
+
+#if 0
+ /* Check that pg_shift parameter is valid. */
+ if (pg_shift > MEMPOOL_PG_SHIFT_MAX)
+ {
+ rte_errno = EINVAL;
+ return NULL;
+ }
+#endif
+ rmp = rte_mempool_create_empty ((char *) pool_name, /* pool name */
+ num_mbufs, /* number of mbufs */
+ elt_size, 512, /* cache size */
+ sizeof (dpdk_mempool_private_t), /* private data size */
+ socket_id, 0); /* flags */
if (rmp)
{
- {
- uword this_pool_end;
- uword this_pool_start;
- uword this_pool_size;
- uword save_vpm_start, save_vpm_end, save_vpm_size;
- struct rte_mempool_memhdr *memhdr;
+ rte_mempool_set_ops_byname (rmp, RTE_MBUF_DEFAULT_MEMPOOL_OPS, NULL);
- this_pool_start = ~0;
- this_pool_end = 0;
+ /* call the mempool priv initializer */
+ rte_pktmbuf_pool_init (rmp, &priv);
- STAILQ_FOREACH (memhdr, &rmp->mem_list, next)
- {
- if (((uword) (memhdr->addr + memhdr->len)) > this_pool_end)
- this_pool_end = (uword) (memhdr->addr + memhdr->len);
- if (((uword) memhdr->addr) < this_pool_start)
- this_pool_start = (uword) (memhdr->addr);
- }
- ASSERT (this_pool_start < ~0 && this_pool_end > 0);
- this_pool_size = this_pool_end - this_pool_start;
-
- if (CLIB_DEBUG > 1)
- {
- clib_warning ("%s: pool start %llx pool end %llx pool size %lld",
- pool_name, this_pool_start, this_pool_end,
- this_pool_size);
- clib_warning
- ("before: virtual.start %llx virtual.end %llx virtual.size %lld",
- vpm->virtual.start, vpm->virtual.end, vpm->virtual.size);
- }
-
- save_vpm_start = vpm->virtual.start;
- save_vpm_end = vpm->virtual.end;
- save_vpm_size = vpm->virtual.size;
-
- if ((this_pool_start < vpm->virtual.start) || vpm->virtual.start == 0)
- vpm->virtual.start = this_pool_start;
- if (this_pool_end > vpm->virtual.end)
- vpm->virtual.end = this_pool_end;
-
- vpm->virtual.size = vpm->virtual.end - vpm->virtual.start;
-
- if (CLIB_DEBUG > 1)
- {
- clib_warning
- ("after: virtual.start %llx virtual.end %llx virtual.size %lld",
- vpm->virtual.start, vpm->virtual.end, vpm->virtual.size);
- }
-
- /* check if fits into buffer index range */
- if ((u64) vpm->virtual.size >
- ((u64) 1 << (32 + CLIB_LOG2_CACHE_LINE_BYTES)))
- {
- clib_warning ("physmem: virtual size out of range!");
- vpm->virtual.start = save_vpm_start;
- vpm->virtual.end = save_vpm_end;
- vpm->virtual.size = save_vpm_size;
- rmp = 0;
- }
- }
- if (rmp)
+ ret = rte_mempool_populate_phys_tab (rmp, pr->mem, pr->page_table,
+ pr->n_pages, pr->log2_page_size,
+ NULL, NULL);
+ if (ret == (i32) rmp->size)
{
+ /* call the object initializers */
+ rte_mempool_obj_iter (rmp, rte_pktmbuf_init, 0);
+
+ dpdk_mempool_private_t *privp = rte_mempool_get_priv (rmp);
+ privp->buffer_pool_index = vlib_buffer_add_physmem_region (vm, pri);
+
dm->pktmbuf_pools[socket_id] = rmp;
- vec_free (pool_name);
+
return 0;
}
+
+ rte_mempool_free (rmp);
}
vec_free (pool_name);
{
if (dm->pktmbuf_pools[i])
{
- clib_warning
- ("WARNING: Failed to allocate mempool for CPU socket %u. "
- "Threads running on socket %u will use socket %u mempool.",
- socket_id, socket_id, i);
+ clib_warning ("WARNING: Failed to allocate mempool for CPU socket "
+ "%u. Threads running on socket %u will use socket %u "
+ "mempool.", socket_id, socket_id, i);
dm->pktmbuf_pools[socket_id] = dm->pktmbuf_pools[i];
return 0;
}
VLIB_INIT_FUNCTION (buffer_state_validation_init);
#endif
-static vlib_buffer_callbacks_t callbacks = {
+#if CLI_DEBUG
+struct dpdk_validate_buf_result
+{
+ u32 invalid;
+ u32 uninitialized;
+};
+
+#define DPDK_TRAJECTORY_POISON 31
+
+static void
+dpdk_buffer_validate_trajectory (struct rte_mempool *mp, void *opaque,
+ void *obj, unsigned obj_idx)
+{
+ vlib_buffer_t *b;
+ struct dpdk_validate_buf_result *counter = opaque;
+ b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj);
+ if (b->pre_data[0] != 0)
+ {
+ if (b->pre_data[0] == DPDK_TRAJECTORY_POISON)
+ counter->uninitialized++;
+ else
+ counter->invalid++;
+ }
+}
+
+int
+dpdk_buffer_validate_trajectory_all (u32 * uninitialized)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ struct dpdk_validate_buf_result counter = { 0 };
+ int i;
+
+ for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
+ rte_mempool_obj_iter (dm->pktmbuf_pools[i],
+ dpdk_buffer_validate_trajectory, &counter);
+ if (uninitialized)
+ *uninitialized = counter.uninitialized;
+ return counter.invalid;
+}
+
+static void
+dpdk_buffer_poison_trajectory (struct rte_mempool *mp, void *opaque,
+ void *obj, unsigned obj_idx)
+{
+ vlib_buffer_t *b;
+ b = vlib_buffer_from_rte_mbuf ((struct rte_mbuf *) obj);
+ b->pre_data[0] = DPDK_TRAJECTORY_POISON;
+}
+
+void
+dpdk_buffer_poison_trajectory_all (void)
+{
+ dpdk_main_t *dm = &dpdk_main;
+ int i;
+
+ for (i = 0; i < vec_len (dm->pktmbuf_pools); i++)
+ rte_mempool_obj_iter (dm->pktmbuf_pools[i], dpdk_buffer_poison_trajectory,
+ 0);
+}
+#endif
+
+/* *INDENT-OFF* */
+VLIB_BUFFER_REGISTER_CALLBACKS (dpdk, static) = {
.vlib_buffer_alloc_cb = &dpdk_buffer_alloc,
.vlib_buffer_alloc_from_free_list_cb = &dpdk_buffer_alloc_from_free_list,
.vlib_buffer_free_cb = &dpdk_buffer_free,
.vlib_packet_template_init_cb = &dpdk_packet_template_init,
.vlib_buffer_delete_free_list_cb = &dpdk_buffer_delete_free_list,
};
-
-static clib_error_t *
-dpdk_buffer_init (vlib_main_t * vm)
-{
- vlib_buffer_cb_register (vm, &callbacks);
- return 0;
-}
-
-VLIB_INIT_FUNCTION (dpdk_buffer_init);
+/* *INDENT-ON* */
/** @endcond */
/*