#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
#include <rte_errno.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
RTE_MEMPOOL_ALIGN);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
sz->trailer_size = sizeof(struct rte_mempool_objtlr);
+#else
+ sz->trailer_size = 0;
+#endif
/* element size is 8 bytes-aligned at least */
sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t));
}
/* not enough room to store one object */
- if (i == 0)
- return -EINVAL;
+ if (i == 0) {
+ ret = -EINVAL;
+ goto fail;
+ }
STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next);
mp->nb_mem_chunks++;
return i;
+
+fail:
+ rte_free(memhdr);
+ return ret;
}
/* Add objects in the pool, using a table of physical pages. Return the
/* populate with the largest group of contiguous pages */
for (n = 1; (i + n) < pg_num &&
- paddr[i] + pg_sz == paddr[i+n]; n++)
+ paddr[i + n - 1] + pg_sz == paddr[i + n]; n++)
;
ret = rte_mempool_populate_phys(mp, vaddr + i * pg_sz,
size_t off, phys_len;
int ret, cnt = 0;
- /* mempool must not be populated */
- if (mp->nb_mem_chunks != 0)
- return -EEXIST;
/* address and len must be page-aligned */
if (RTE_PTR_ALIGN_CEIL(addr, pg_sz) != addr)
return -EINVAL;
if (mp->nb_mem_chunks != 0)
return -EEXIST;
- if (rte_eal_has_hugepages()) {
+ if (rte_xen_dom0_supported()) {
+ pg_sz = RTE_PGSIZE_2M;
+ pg_shift = rte_bsf32(pg_sz);
+ align = pg_sz;
+ } else if (rte_eal_has_hugepages()) {
pg_shift = 0; /* not needed, zone is physically contiguous */
pg_sz = 0;
align = RTE_CACHE_LINE_SIZE;
mz->len, pg_sz,
rte_mempool_memchunk_mz_free,
(void *)(uintptr_t)mz);
- if (ret < 0)
+ if (ret < 0) {
+ rte_memzone_free(mz);
goto fail;
+ }
}
return mp->size;
char *addr;
/* mempool is already populated, error */
- if (!STAILQ_EMPTY(&mp->mem_list)) {
+ if ((!STAILQ_EMPTY(&mp->mem_list)) || mp->nb_mem_chunks != 0) {
rte_errno = EINVAL;
return 0;
}
* Since we have 4 combinations of the SP/SC/MP/MC examine the flags to
* set the correct index into the table of ops structs.
*/
- if (flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET))
+ if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET))
rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
else if (flags & MEMPOOL_F_SP_PUT)
rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
/*
* Create the mempool over already allocated chunk of memory.
* That external memory buffer can consists of physically disjoint pages.
- * Setting vaddr to NULL, makes mempool to fallback to original behaviour
- * and allocate space for mempool and it's elements as one big chunk of
- * physically continuos memory.
+ * Setting vaddr to NULL, makes mempool to fallback to rte_mempool_create()
+ * behavior.
*/
struct rte_mempool *
rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
{
struct rte_tailq_entry *te = NULL;
struct rte_mempool_list *mempool_list;
+ void *tmp_te;
mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK);
- TAILQ_FOREACH(te, mempool_list, next) {
+ TAILQ_FOREACH_SAFE(te, mempool_list, next, tmp_te) {
(*func)((struct rte_mempool *) te->data, arg);
}