X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=lib%2Flibrte_mempool%2Frte_mempool.c;h=3cb9e42c312fc046ee197668fbe1dbf94d6284a6;hb=ba78d0104e4ce61135ffb26a39dac0d57b00824b;hp=d78d02b7b8c514cb631d3eee867501c7779ab382;hpb=8b25d1ad5d2264bdfc2818c7bda74ee2697df6db;p=deb_dpdk.git diff --git a/lib/librte_mempool/rte_mempool.c b/lib/librte_mempool/rte_mempool.c index d78d02b7..3cb9e42c 100644 --- a/lib/librte_mempool/rte_mempool.c +++ b/lib/librte_mempool/rte_mempool.c @@ -55,7 +55,6 @@ #include #include #include -#include #include #include #include @@ -199,7 +198,11 @@ rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags, sz->header_size = RTE_ALIGN_CEIL(sz->header_size, RTE_MEMPOOL_ALIGN); +#ifdef RTE_LIBRTE_MEMPOOL_DEBUG sz->trailer_size = sizeof(struct rte_mempool_objtlr); +#else + sz->trailer_size = 0; +#endif /* element size is 8 bytes-aligned at least */ sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t)); @@ -393,12 +396,18 @@ rte_mempool_populate_phys(struct rte_mempool *mp, char *vaddr, } /* not enough room to store one object */ - if (i == 0) - return -EINVAL; + if (i == 0) { + ret = -EINVAL; + goto fail; + } STAILQ_INSERT_TAIL(&mp->mem_list, memhdr, next); mp->nb_mem_chunks++; return i; + +fail: + rte_free(memhdr); + return ret; } /* Add objects in the pool, using a table of physical pages. Return the @@ -425,7 +434,7 @@ rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr, /* populate with the largest group of contiguous pages */ for (n = 1; (i + n) < pg_num && - paddr[i] + pg_sz == paddr[i+n]; n++) + paddr[i + n - 1] + pg_sz == paddr[i + n]; n++) ; ret = rte_mempool_populate_phys(mp, vaddr + i * pg_sz, @@ -453,9 +462,6 @@ rte_mempool_populate_virt(struct rte_mempool *mp, char *addr, size_t off, phys_len; int ret, cnt = 0; - /* mempool must not be populated */ - if (mp->nb_mem_chunks != 0) - return -EEXIST; /* address and len must be page-aligned */ if (RTE_PTR_ALIGN_CEIL(addr, pg_sz) != addr) return -EINVAL; @@ -524,7 +530,11 @@ rte_mempool_populate_default(struct rte_mempool *mp) if (mp->nb_mem_chunks != 0) return -EEXIST; - if (rte_eal_has_hugepages()) { + if (rte_xen_dom0_supported()) { + pg_sz = RTE_PGSIZE_2M; + pg_shift = rte_bsf32(pg_sz); + align = pg_sz; + } else if (rte_eal_has_hugepages()) { pg_shift = 0; /* not needed, zone is physically contiguous */ pg_sz = 0; align = RTE_CACHE_LINE_SIZE; @@ -571,8 +581,10 @@ rte_mempool_populate_default(struct rte_mempool *mp) mz->len, pg_sz, rte_mempool_memchunk_mz_free, (void *)(uintptr_t)mz); - if (ret < 0) + if (ret < 0) { + rte_memzone_free(mz); goto fail; + } } return mp->size; @@ -613,7 +625,7 @@ rte_mempool_populate_anon(struct rte_mempool *mp) char *addr; /* mempool is already populated, error */ - if (!STAILQ_EMPTY(&mp->mem_list)) { + if ((!STAILQ_EMPTY(&mp->mem_list)) || mp->nb_mem_chunks != 0) { rte_errno = EINVAL; return 0; } @@ -871,7 +883,7 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, * Since we have 4 combinations of the SP/SC/MP/MC examine the flags to * set the correct index into the table of ops structs. */ - if (flags & (MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) + if ((flags & MEMPOOL_F_SP_PUT) && (flags & MEMPOOL_F_SC_GET)) rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL); else if (flags & MEMPOOL_F_SP_PUT) rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL); @@ -901,9 +913,8 @@ rte_mempool_create(const char *name, unsigned n, unsigned elt_size, /* * Create the mempool over already allocated chunk of memory. * That external memory buffer can consists of physically disjoint pages. - * Setting vaddr to NULL, makes mempool to fallback to original behaviour - * and allocate space for mempool and it's elements as one big chunk of - * physically continuos memory. + * Setting vaddr to NULL, makes mempool to fallback to rte_mempool_create() + * behavior. */ struct rte_mempool * rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size, @@ -1275,12 +1286,13 @@ void rte_mempool_walk(void (*func)(struct rte_mempool *, void *), { struct rte_tailq_entry *te = NULL; struct rte_mempool_list *mempool_list; + void *tmp_te; mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list); rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK); - TAILQ_FOREACH(te, mempool_list, next) { + TAILQ_FOREACH_SAFE(te, mempool_list, next, tmp_te) { (*func)((struct rte_mempool *) te->data, arg); }