Code Review
/
deb_dpdk.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
Imported Upstream version 17.05.2
[deb_dpdk.git]
/
lib
/
librte_mempool
/
rte_mempool.c
diff --git
a/lib/librte_mempool/rte_mempool.c
b/lib/librte_mempool/rte_mempool.c
index
d78d02b
..
6fc3c9c
100644
(file)
--- a/
lib/librte_mempool/rte_mempool.c
+++ b/
lib/librte_mempool/rte_mempool.c
@@
-55,7
+55,6
@@
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
#include <rte_branch_prediction.h>
-#include <rte_ring.h>
#include <rte_errno.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
#include <rte_errno.h>
#include <rte_string_fns.h>
#include <rte_spinlock.h>
@@
-199,7
+198,11
@@
rte_mempool_calc_obj_size(uint32_t elt_size, uint32_t flags,
sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
RTE_MEMPOOL_ALIGN);
sz->header_size = RTE_ALIGN_CEIL(sz->header_size,
RTE_MEMPOOL_ALIGN);
+#ifdef RTE_LIBRTE_MEMPOOL_DEBUG
sz->trailer_size = sizeof(struct rte_mempool_objtlr);
sz->trailer_size = sizeof(struct rte_mempool_objtlr);
+#else
+ sz->trailer_size = 0;
+#endif
/* element size is 8 bytes-aligned at least */
sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t));
/* element size is 8 bytes-aligned at least */
sz->elt_size = RTE_ALIGN_CEIL(elt_size, sizeof(uint64_t));
@@
-425,7
+428,7
@@
rte_mempool_populate_phys_tab(struct rte_mempool *mp, char *vaddr,
/* populate with the largest group of contiguous pages */
for (n = 1; (i + n) < pg_num &&
/* populate with the largest group of contiguous pages */
for (n = 1; (i + n) < pg_num &&
- paddr[i
] + pg_sz == paddr[i+
n]; n++)
+ paddr[i
+ n - 1] + pg_sz == paddr[i +
n]; n++)
;
ret = rte_mempool_populate_phys(mp, vaddr + i * pg_sz,
;
ret = rte_mempool_populate_phys(mp, vaddr + i * pg_sz,
@@
-473,7
+476,7
@@
rte_mempool_populate_virt(struct rte_mempool *mp, char *addr,
/* required for xen_dom0 to get the machine address */
paddr = rte_mem_phy2mch(-1, paddr);
/* required for xen_dom0 to get the machine address */
paddr = rte_mem_phy2mch(-1, paddr);
- if (paddr == RTE_BAD_PHYS_ADDR) {
+ if (paddr == RTE_BAD_PHYS_ADDR
&& rte_eal_has_hugepages()
) {
ret = -EINVAL;
goto fail;
}
ret = -EINVAL;
goto fail;
}
@@
-524,7
+527,11
@@
rte_mempool_populate_default(struct rte_mempool *mp)
if (mp->nb_mem_chunks != 0)
return -EEXIST;
if (mp->nb_mem_chunks != 0)
return -EEXIST;
- if (rte_eal_has_hugepages()) {
+ if (rte_xen_dom0_supported()) {
+ pg_sz = RTE_PGSIZE_2M;
+ pg_shift = rte_bsf32(pg_sz);
+ align = pg_sz;
+ } else if (rte_eal_has_hugepages()) {
pg_shift = 0; /* not needed, zone is physically contiguous */
pg_sz = 0;
align = RTE_CACHE_LINE_SIZE;
pg_shift = 0; /* not needed, zone is physically contiguous */
pg_sz = 0;
align = RTE_CACHE_LINE_SIZE;
@@
-571,8
+578,10
@@
rte_mempool_populate_default(struct rte_mempool *mp)
mz->len, pg_sz,
rte_mempool_memchunk_mz_free,
(void *)(uintptr_t)mz);
mz->len, pg_sz,
rte_mempool_memchunk_mz_free,
(void *)(uintptr_t)mz);
- if (ret < 0)
+ if (ret < 0) {
+ rte_memzone_free(mz);
goto fail;
goto fail;
+ }
}
return mp->size;
}
return mp->size;
@@
-809,7
+818,6
@@
rte_mempool_create_empty(const char *name, unsigned n, unsigned elt_size,
goto exit_unlock;
}
mp->mz = mz;
goto exit_unlock;
}
mp->mz = mz;
- mp->socket_id = socket_id;
mp->size = n;
mp->flags = flags;
mp->socket_id = socket_id;
mp->size = n;
mp->flags = flags;
mp->socket_id = socket_id;
@@
-860,6
+868,7
@@
rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags)
{
rte_mempool_obj_cb_t *obj_init, void *obj_init_arg,
int socket_id, unsigned flags)
{
+ int ret;
struct rte_mempool *mp;
mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
struct rte_mempool *mp;
mp = rte_mempool_create_empty(name, n, elt_size, cache_size,
@@
-871,14
+880,17
@@
rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
* Since we have 4 combinations of the SP/SC/MP/MC examine the flags to
* set the correct index into the table of ops structs.
*/
* Since we have 4 combinations of the SP/SC/MP/MC examine the flags to
* set the correct index into the table of ops structs.
*/
- if (
flags & (MEMPOOL_F_SP_PUT |
MEMPOOL_F_SC_GET))
- rte_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
+ if (
(flags & MEMPOOL_F_SP_PUT) && (flags &
MEMPOOL_F_SC_GET))
+ r
et = r
te_mempool_set_ops_byname(mp, "ring_sp_sc", NULL);
else if (flags & MEMPOOL_F_SP_PUT)
else if (flags & MEMPOOL_F_SP_PUT)
- rte_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
+ r
et = r
te_mempool_set_ops_byname(mp, "ring_sp_mc", NULL);
else if (flags & MEMPOOL_F_SC_GET)
else if (flags & MEMPOOL_F_SC_GET)
- rte_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
+ r
et = r
te_mempool_set_ops_byname(mp, "ring_mp_sc", NULL);
else
else
- rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
+ ret = rte_mempool_set_ops_byname(mp, "ring_mp_mc", NULL);
+
+ if (ret)
+ goto fail;
/* call the mempool priv initializer */
if (mp_init)
/* call the mempool priv initializer */
if (mp_init)
@@
-901,9
+913,8
@@
rte_mempool_create(const char *name, unsigned n, unsigned elt_size,
/*
* Create the mempool over already allocated chunk of memory.
* That external memory buffer can consists of physically disjoint pages.
/*
* Create the mempool over already allocated chunk of memory.
* That external memory buffer can consists of physically disjoint pages.
- * Setting vaddr to NULL, makes mempool to fallback to original behaviour
- * and allocate space for mempool and it's elements as one big chunk of
- * physically continuos memory.
+ * Setting vaddr to NULL, makes mempool to fallback to rte_mempool_create()
+ * behavior.
*/
struct rte_mempool *
rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
*/
struct rte_mempool *
rte_mempool_xmem_create(const char *name, unsigned n, unsigned elt_size,
@@
-990,12
+1001,6
@@
rte_mempool_in_use_count(const struct rte_mempool *mp)
return mp->size - rte_mempool_avail_count(mp);
}
return mp->size - rte_mempool_avail_count(mp);
}
-unsigned int
-rte_mempool_count(const struct rte_mempool *mp)
-{
- return rte_mempool_avail_count(mp);
-}
-
/* dump the cache status */
static unsigned
rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
/* dump the cache status */
static unsigned
rte_mempool_dump_cache(FILE *f, const struct rte_mempool *mp)
@@
-1039,7
+1044,7
@@
void rte_mempool_check_cookies(const struct rte_mempool *mp,
/* Force to drop the "const" attribute. This is done only when
* DEBUG is enabled */
tmp = (void *) obj_table_const;
/* Force to drop the "const" attribute. This is done only when
* DEBUG is enabled */
tmp = (void *) obj_table_const;
- obj_table =
(void **)
tmp;
+ obj_table = tmp;
while (n--) {
obj = obj_table[n];
while (n--) {
obj = obj_table[n];
@@
-1275,12
+1280,13
@@
void rte_mempool_walk(void (*func)(struct rte_mempool *, void *),
{
struct rte_tailq_entry *te = NULL;
struct rte_mempool_list *mempool_list;
{
struct rte_tailq_entry *te = NULL;
struct rte_mempool_list *mempool_list;
+ void *tmp_te;
mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK);
mempool_list = RTE_TAILQ_CAST(rte_mempool_tailq.head, rte_mempool_list);
rte_rwlock_read_lock(RTE_EAL_MEMPOOL_RWLOCK);
- TAILQ_FOREACH
(te, mempool_list, next
) {
+ TAILQ_FOREACH
_SAFE(te, mempool_list, next, tmp_te
) {
(*func)((struct rte_mempool *) te->data, arg);
}
(*func)((struct rte_mempool *) te->data, arg);
}