static int
mr_insert_dev_cache(struct rte_eth_dev *dev, struct mlx4_mr *mr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
unsigned int n;
DEBUG("port %u inserting MR(%p) to global cache",
dev->data->port_id, (void *)mr);
for (n = 0; n < mr->ms_bmp_n; ) {
- struct mlx4_mr_cache entry = { 0, };
+ struct mlx4_mr_cache entry;
+ memset(&entry, 0, sizeof(entry));
/* Find a contiguous chunk and advance the index. */
n = mr_find_next_chunk(mr, &entry, n);
if (!entry.end)
mr_lookup_dev_list(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr *mr;
/* Iterate all the existing MRs. */
if (mr->ms_n == 0)
continue;
for (n = 0; n < mr->ms_bmp_n; ) {
- struct mlx4_mr_cache ret = { 0, };
+ struct mlx4_mr_cache ret;
+ memset(&ret, 0, sizeof(ret));
n = mr_find_next_chunk(mr, &ret, n);
if (addr >= ret.start && addr < ret.end) {
/* Found. */
mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
uint16_t idx;
uint32_t lkey = UINT32_MAX;
struct mlx4_mr *mr;
}
/**
- * Releass resources of detached MR having no online entry.
+ * Release resources of detached MR having no online entry.
*
* @param dev
* Pointer to Ethernet device.
static void
mlx4_mr_garbage_collect(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr *mr_next;
struct mlx4_mr_list free_list = LIST_HEAD_INITIALIZER(free_list);
}
/**
- * Create a new global Memroy Region (MR) for a missing virtual address.
+ * Create a new global Memory Region (MR) for a missing virtual address.
* Register entire virtually contiguous memory chunk around the address.
*
* @param dev
mlx4_mr_create(struct rte_eth_dev *dev, struct mlx4_mr_cache *entry,
uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
const struct rte_memseg_list *msl;
const struct rte_memseg *ms;
* Find out a contiguous virtual address chunk in use, to which the
* given address belongs, in order to register maximum range. In the
* best case where mempools are not dynamically recreated and
- * '--socket-mem' is speicified as an EAL option, it is very likely to
+ * '--socket-mem' is specified as an EAL option, it is very likely to
* have only one MR(LKey) per a socket and per a hugepage-size even
* though the system memory is highly fragmented.
*/
bmp_mem = RTE_PTR_ALIGN_CEIL(mr + 1, RTE_CACHE_LINE_SIZE);
mr->ms_bmp = rte_bitmap_init(ms_n, bmp_mem, bmp_size);
if (mr->ms_bmp == NULL) {
- WARN("port %u unable to initialize bitamp for a new MR of"
+ WARN("port %u unable to initialize bitmap for a new MR of"
" address (%p).",
dev->data->port_id, (void *)addr);
rte_errno = EINVAL;
*/
for (n = 0; n < ms_n; ++n) {
uintptr_t start;
- struct mlx4_mr_cache ret = { 0, };
+ struct mlx4_mr_cache ret;
+ memset(&ret, 0, sizeof(ret));
start = data_re.start + n * msl->page_sz;
/* Exclude memsegs already registered by other MRs. */
if (mr_lookup_dev(dev, &ret, start) == UINT32_MAX) {
static void
mr_rebuild_dev_cache(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr *mr;
DEBUG("port %u rebuild dev cache[]", dev->data->port_id);
static void
mlx4_mr_mem_event_free_cb(struct rte_eth_dev *dev, const void *addr, size_t len)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
const struct rte_memseg_list *msl;
struct mlx4_mr *mr;
int ms_n;
mlx4_mr_mem_event_cb(enum rte_mem_event event_type, const void *addr,
size_t len, void *arg __rte_unused)
{
- struct priv *priv;
+ struct mlx4_priv *priv;
switch (event_type) {
case RTE_MEM_EVENT_FREE:
rte_rwlock_read_lock(&mlx4_mem_event_rwlock);
/* Iterate all the existing mlx4 devices. */
LIST_FOREACH(priv, &mlx4_mem_event_cb_list, mem_event_cb)
- mlx4_mr_mem_event_free_cb(priv->dev, addr, len);
+ mlx4_mr_mem_event_free_cb(ETH_DEV(priv), addr, len);
rte_rwlock_read_unlock(&mlx4_mem_event_rwlock);
break;
case RTE_MEM_EVENT_ALLOC:
mlx4_mr_lookup_dev(struct rte_eth_dev *dev, struct mlx4_mr_ctrl *mr_ctrl,
struct mlx4_mr_cache *entry, uintptr_t addr)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr_btree *bt = &mr_ctrl->cache_bh;
uint16_t idx;
uint32_t lkey;
mlx4_rx_addr2mr_bh(struct rxq *rxq, uintptr_t addr)
{
struct mlx4_mr_ctrl *mr_ctrl = &rxq->mr_ctrl;
- struct priv *priv = rxq->priv;
+ struct mlx4_priv *priv = rxq->priv;
- DEBUG("Rx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
- rxq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
- return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
+ return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
}
/**
* @return
* Searched LKey on success, UINT32_MAX on no match.
*/
-uint32_t
+static uint32_t
mlx4_tx_addr2mr_bh(struct txq *txq, uintptr_t addr)
{
struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- struct priv *priv = txq->priv;
+ struct mlx4_priv *priv = txq->priv;
- DEBUG("Tx queue %u: miss on top-half, mru=%u, head=%u, addr=%p",
- txq->stats.idx, mr_ctrl->mru, mr_ctrl->head, (void *)addr);
- return mlx4_mr_addr2mr_bh(priv->dev, mr_ctrl, addr);
+ return mlx4_mr_addr2mr_bh(ETH_DEV(priv), mr_ctrl, addr);
+}
+
+/**
+ * Bottom-half of LKey search on Tx. If it can't be searched in the memseg
+ * list, register the mempool of the mbuf as externally allocated memory.
+ *
+ * @param txq
+ * Pointer to Tx queue structure.
+ * @param mb
+ * Pointer to mbuf.
+ *
+ * @return
+ * Searched LKey on success, UINT32_MAX on no match.
+ */
+uint32_t
+mlx4_tx_mb2mr_bh(struct txq *txq, struct rte_mbuf *mb)
+{
+ uintptr_t addr = (uintptr_t)mb->buf_addr;
+ uint32_t lkey;
+
+ lkey = mlx4_tx_addr2mr_bh(txq, addr);
+ if (lkey == UINT32_MAX && rte_errno == ENXIO) {
+ /* Mempool may have externally allocated memory. */
+ return mlx4_tx_update_ext_mp(txq, addr, mlx4_mb2mp(mb));
+ }
+ return lkey;
}
/**
{
struct mr_update_mp_data *data = opaque;
struct rte_eth_dev *dev = data->dev;
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr_ctrl *mr_ctrl = data->mr_ctrl;
struct mlx4_mr *mr = NULL;
uintptr_t addr = (uintptr_t)memhdr->addr;
mlx4_tx_update_ext_mp(struct txq *txq, uintptr_t addr, struct rte_mempool *mp)
{
struct mlx4_mr_ctrl *mr_ctrl = &txq->mr_ctrl;
- struct priv *priv = txq->priv;
+ struct mlx4_priv *priv = txq->priv;
- mlx4_mr_update_ext_mp(priv->dev, mr_ctrl, mp);
+ mlx4_mr_update_ext_mp(ETH_DEV(priv), mr_ctrl, mp);
return mlx4_tx_addr2mr_bh(txq, addr);
}
void
mlx4_mr_dump_dev(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
+ struct mlx4_priv *priv = dev->data->dev_private;
struct mlx4_mr *mr;
int mr_n = 0;
int chunk_n = 0;
if (mr->ms_n == 0)
continue;
for (n = 0; n < mr->ms_bmp_n; ) {
- struct mlx4_mr_cache ret = { 0, };
+ struct mlx4_mr_cache ret;
+ memset(&ret, 0, sizeof(ret));
n = mr_find_next_chunk(mr, &ret, n);
if (!ret.end)
break;
void
mlx4_mr_release(struct rte_eth_dev *dev)
{
- struct priv *priv = dev->data->dev_private;
- struct mlx4_mr *mr_next = LIST_FIRST(&priv->mr.mr_list);
+ struct mlx4_priv *priv = dev->data->dev_private;
+ struct mlx4_mr *mr_next;
/* Remove from memory callback device list. */
rte_rwlock_write_lock(&mlx4_mem_event_rwlock);
#endif
rte_rwlock_write_lock(&priv->mr.rwlock);
/* Detach from MR list and move to free list. */
+ mr_next = LIST_FIRST(&priv->mr.mr_list);
while (mr_next != NULL) {
struct mlx4_mr *mr = mr_next;