New upstream version 16.11.7
[deb_dpdk.git] / drivers / net / vmxnet3 / vmxnet3_rxtx.c
index 4ac0456..0a69588 100644 (file)
@@ -57,7 +57,6 @@
 #include <rte_lcore.h>
 #include <rte_atomic.h>
 #include <rte_branch_prediction.h>
-#include <rte_ring.h>
 #include <rte_mempool.h>
 #include <rte_malloc.h>
 #include <rte_mbuf.h>
@@ -86,16 +85,6 @@ static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *);
 static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *);
 #endif
 
-static struct rte_mbuf *
-rte_rxmbuf_alloc(struct rte_mempool *mp)
-{
-       struct rte_mbuf *m;
-
-       m = __rte_mbuf_raw_alloc(mp);
-       __rte_mbuf_sanity_check_raw(m, 0);
-       return m;
-}
-
 #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED
 static void
 vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
@@ -106,7 +95,7 @@ vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq)
                return;
 
        PMD_RX_LOG(DEBUG,
-                  "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.",
+                  "RXQ: cmd0 base : %p cmd1 base : %p comp ring base : %p.",
                   rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base);
        PMD_RX_LOG(DEBUG,
                   "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.",
@@ -136,7 +125,7 @@ vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
        if (txq == NULL)
                return;
 
-       PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p data ring base : 0x%p.",
+       PMD_TX_LOG(DEBUG, "TXQ: cmd base : %p comp ring base : %p data ring base : %p.",
                   txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base);
        PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.",
                   (unsigned long)txq->cmd_ring.basePA,
@@ -151,10 +140,10 @@ vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq)
 #endif
 
 static void
-vmxnet3_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
+vmxnet3_tx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
 {
        while (ring->next2comp != ring->next2fill) {
-               /* No need to worry about tx desc ownership, device is quiesced by now. */
+               /* No need to worry about desc ownership, device is quiesced by now. */
                vmxnet3_buf_info_t *buf_info = ring->buf_info + ring->next2comp;
 
                if (buf_info->m) {
@@ -167,23 +156,46 @@ vmxnet3_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
        }
 }
 
+static void
+vmxnet3_rx_cmd_ring_release_mbufs(vmxnet3_cmd_ring_t *ring)
+{
+       uint32_t i;
+
+       for (i = 0; i < ring->size; i++) {
+               /* No need to worry about desc ownership, device is quiesced by now. */
+               vmxnet3_buf_info_t *buf_info = &ring->buf_info[i];
+
+               if (buf_info->m) {
+                       rte_pktmbuf_free_seg(buf_info->m);
+                       buf_info->m = NULL;
+                       buf_info->bufPA = 0;
+                       buf_info->len = 0;
+               }
+               vmxnet3_cmd_ring_adv_next2comp(ring);
+       }
+}
+
 static void
 vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
 {
-       vmxnet3_cmd_ring_release_mbufs(ring);
        rte_free(ring->buf_info);
        ring->buf_info = NULL;
 }
 
-
 void
 vmxnet3_dev_tx_queue_release(void *txq)
 {
        vmxnet3_tx_queue_t *tq = txq;
 
        if (tq != NULL) {
+               /* Release mbufs */
+               vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
                /* Release the cmd_ring */
                vmxnet3_cmd_ring_release(&tq->cmd_ring);
+               /* Release the memzone */
+               rte_memzone_free(tq->mz);
+               /* Release the queue */
+               rte_free(tq);
        }
 }
 
@@ -194,9 +206,19 @@ vmxnet3_dev_rx_queue_release(void *rxq)
        vmxnet3_rx_queue_t *rq = rxq;
 
        if (rq != NULL) {
+               /* Release mbufs */
+               for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+                       vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
+
                /* Release both the cmd_rings */
                for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
                        vmxnet3_cmd_ring_release(&rq->cmd_ring[i]);
+
+               /* Release the memzone */
+               rte_memzone_free(rq->mz);
+
+               /* Release the queue */
+               rte_free(rq);
        }
 }
 
@@ -211,7 +233,7 @@ vmxnet3_dev_tx_queue_reset(void *txq)
 
        if (tq != NULL) {
                /* Release the cmd_ring mbufs */
-               vmxnet3_cmd_ring_release_mbufs(&tq->cmd_ring);
+               vmxnet3_tx_cmd_ring_release_mbufs(&tq->cmd_ring);
        }
 
        /* Tx vmxnet rings structure initialization*/
@@ -237,11 +259,9 @@ vmxnet3_dev_rx_queue_reset(void *rxq)
        struct vmxnet3_comp_ring *comp_ring;
        int size;
 
-       if (rq != NULL) {
-               /* Release both the cmd_rings mbufs */
-               for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
-                       vmxnet3_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
-       }
+       /* Release both the cmd_rings mbufs */
+       for (i = 0; i < VMXNET3_RX_CMDRING_SIZE; i++)
+               vmxnet3_rx_cmd_ring_release_mbufs(&rq->cmd_ring[i]);
 
        ring0 = &rq->cmd_ring[0];
        ring1 = &rq->cmd_ring[1];
@@ -296,7 +316,7 @@ vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
        struct rte_mbuf *mbuf;
 
        /* Release cmd_ring descriptor and free mbuf */
-       VMXNET3_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
+       RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1);
 
        mbuf = txq->cmd_ring.buf_info[eop_idx].m;
        if (mbuf == NULL)
@@ -307,7 +327,7 @@ vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq)
 
        while (txq->cmd_ring.next2comp != eop_idx) {
                /* no out-of-order completion */
-               VMXNET3_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
+               RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0);
                vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring);
                completed++;
        }
@@ -402,7 +422,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        continue;
                }
 
-               if (txm->nb_segs == 1 && rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
+               if (txm->nb_segs == 1 &&
+                   rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
                        struct Vmxnet3_TxDataDesc *tdd;
 
                        tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
@@ -424,8 +445,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
                        if (copy_size)
                                gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA +
-                                                               txq->cmd_ring.next2fill *
-                                                               sizeof(struct Vmxnet3_TxDataDesc));
+                                                                  txq->cmd_ring.next2fill *
+                                                                  sizeof(struct Vmxnet3_TxDataDesc));
                        else
                                gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
 
@@ -454,7 +475,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                if (tso) {
                        uint16_t mss = txm->tso_segsz;
 
-                       VMXNET3_ASSERT(mss > 0);
+                       RTE_ASSERT(mss > 0);
 
                        gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len;
                        gdesc->txd.om = VMXNET3_OM_TSO;
@@ -505,16 +526,41 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
        return nb_tx;
 }
 
+static inline void
+vmxnet3_renew_desc(vmxnet3_rx_queue_t *rxq, uint8_t ring_id,
+                  struct rte_mbuf *mbuf)
+{
+       uint32_t val = 0;
+       struct vmxnet3_cmd_ring *ring = &rxq->cmd_ring[ring_id];
+       struct Vmxnet3_RxDesc *rxd =
+               (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
+       vmxnet3_buf_info_t *buf_info = &ring->buf_info[ring->next2fill];
+
+       if (ring_id == 0)
+               val = VMXNET3_RXD_BTYPE_HEAD;
+       else
+               val = VMXNET3_RXD_BTYPE_BODY;
+
+       buf_info->m = mbuf;
+       buf_info->len = (uint16_t)(mbuf->buf_len - RTE_PKTMBUF_HEADROOM);
+       buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
+
+       rxd->addr = buf_info->bufPA;
+       rxd->btype = val;
+       rxd->len = buf_info->len;
+       rxd->gen = ring->gen;
+
+       vmxnet3_cmd_ring_adv_next2fill(ring);
+}
 /*
  *  Allocates mbufs and clusters. Post rx descriptors with buffer details
  *  so that device can receive packets in those buffers.
- *     Ring layout:
- *      Among the two rings, 1st ring contains buffers of type 0 and type1.
+ *  Ring layout:
+ *      Among the two rings, 1st ring contains buffers of type 0 and type 1.
  *      bufs_per_pkt is set such that for non-LRO cases all the buffers required
  *      by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
  *      2nd ring contains buffers of type 1 alone. Second ring mostly be used
  *      only for LRO.
- *
  */
 static int
 vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
@@ -544,7 +590,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
                rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill);
 
                /* Allocate blank mbuf for the current Rx Descriptor */
-               mbuf = rte_rxmbuf_alloc(rxq->mp);
+               mbuf = rte_mbuf_raw_alloc(rxq->mp);
                if (unlikely(mbuf == NULL)) {
                        PMD_RX_LOG(ERR, "Error allocating mbuf");
                        rxq->stats.rx_buf_alloc_failure++;
@@ -559,8 +605,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
                buf_info->m = mbuf;
                buf_info->len = (uint16_t)(mbuf->buf_len -
                                           RTE_PKTMBUF_HEADROOM);
-               buf_info->bufPA =
-                       rte_mbuf_data_dma_addr_default(mbuf);
+               buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
 
                /* Load Rx Descriptor with the buffer's GPA */
                rxd->addr = buf_info->bufPA;
@@ -587,12 +632,6 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
 static void
 vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm)
 {
-       /* Check for hardware stripped VLAN tag */
-       if (rcd->ts) {
-               rxm->ol_flags |= PKT_RX_VLAN_PKT;
-               rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
-       }
-
        /* Check for RSS */
        if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) {
                rxm->ol_flags |= PKT_RX_RSS_HASH;
@@ -652,18 +691,28 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
        }
 
        while (rcd->gen == rxq->comp_ring.gen) {
+               struct rte_mbuf *newm;
+
                if (nb_rx >= nb_pkts)
                        break;
 
+               newm = rte_mbuf_raw_alloc(rxq->mp);
+               if (unlikely(newm == NULL)) {
+                       PMD_RX_LOG(ERR, "Error allocating mbuf");
+                       rxq->stats.rx_buf_alloc_failure++;
+                       break;
+               }
+
                idx = rcd->rxdIdx;
                ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1);
                rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx;
+               RTE_SET_USED(rxd); /* used only for assert when enabled */
                rbi = rxq->cmd_ring[ring_idx].buf_info + idx;
 
                PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx);
 
-               VMXNET3_ASSERT(rcd->len <= rxd->len);
-               VMXNET3_ASSERT(rbi->m);
+               RTE_ASSERT(rcd->len <= rxd->len);
+               RTE_ASSERT(rbi->m);
 
                /* Get the packet buffer pointer from buf_info */
                rxm = rbi->m;
@@ -688,10 +737,15 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                                   (int)(rcd - (struct Vmxnet3_RxCompDesc *)
                                         rxq->comp_ring.base), rcd->rxdIdx);
                        rte_pktmbuf_free_seg(rxm);
+                       if (rxq->start_seg) {
+                               struct rte_mbuf *start = rxq->start_seg;
+
+                               rxq->start_seg = NULL;
+                               rte_pktmbuf_free(start);
+                       }
                        goto rcd_done;
                }
 
-
                /* Initialize newly received packet buffer */
                rxm->port = rxq->port_id;
                rxm->nb_segs = 1;
@@ -710,10 +764,10 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                 * the last mbuf of the current packet.
                 */
                if (rcd->sop) {
-                       VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
+                       RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD);
 
                        if (unlikely(rcd->len == 0)) {
-                               VMXNET3_ASSERT(rcd->eop);
+                               RTE_ASSERT(rcd->eop);
 
                                PMD_RX_LOG(DEBUG,
                                           "Rx buf was skipped. rxring[%d][%d])",
@@ -727,7 +781,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                } else {
                        struct rte_mbuf *start = rxq->start_seg;
 
-                       VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
+                       RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY);
 
                        start->pkt_len += rxm->data_len;
                        start->nb_segs++;
@@ -737,16 +791,25 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                rxq->last_seg = rxm;
 
                if (rcd->eop) {
-                       rx_pkts[nb_rx++] = rxq->start_seg;
+                       struct rte_mbuf *start = rxq->start_seg;
+
+                       /* Check for hardware stripped VLAN tag */
+                       if (rcd->ts) {
+                               start->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED);
+                               start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci);
+                       }
+
+                       rx_pkts[nb_rx++] = start;
                        rxq->start_seg = NULL;
                }
 
 rcd_done:
                rxq->cmd_ring[ring_idx].next2comp = idx;
-               VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size);
+               VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
+                                         rxq->cmd_ring[ring_idx].size);
 
-               /* It's time to allocate some new buf and renew descriptors */
-               vmxnet3_post_rx_bufs(rxq, ring_idx);
+               /* It's time to renew descriptors */
+               vmxnet3_renew_desc(rxq, ring_idx, newm);
                if (unlikely(rxq->shared->ctrl.updateRxProd)) {
                        VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[ring_idx] + (rxq->queue_id * VMXNET3_REG_ALIGN),
                                               rxq->cmd_ring[ring_idx].next2fill);
@@ -758,8 +821,7 @@ rcd_done:
                rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
                nb_rxd++;
                if (nb_rxd > rxq->cmd_ring[0].size) {
-                       PMD_RX_LOG(ERR,
-                                  "Used up quota of receiving packets,"
+                       PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
                                   " relinquish control.");
                        break;
                }
@@ -768,36 +830,12 @@ rcd_done:
        return nb_rx;
 }
 
-/*
- * Create memzone for device rings. malloc can't be used as the physical address is
- * needed. If the memzone is already created, then this function returns a ptr
- * to the old one.
- */
-static const struct rte_memzone *
-ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
-                     uint16_t queue_id, uint32_t ring_size, int socket_id)
-{
-       char z_name[RTE_MEMZONE_NAMESIZE];
-       const struct rte_memzone *mz;
-
-       snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                       dev->driver->pci_drv.name, ring_name,
-                       dev->data->port_id, queue_id);
-
-       mz = rte_memzone_lookup(z_name);
-       if (mz)
-               return mz;
-
-       return rte_memzone_reserve_aligned(z_name, ring_size,
-                       socket_id, 0, VMXNET3_RING_BA_ALIGN);
-}
-
 int
 vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
                           uint16_t queue_idx,
                           uint16_t nb_desc,
                           unsigned int socket_id,
-                          __attribute__((unused)) const struct rte_eth_txconf *tx_conf)
+                          __rte_unused const struct rte_eth_txconf *tx_conf)
 {
        struct vmxnet3_hw *hw = dev->data->dev_private;
        const struct rte_memzone *mz;
@@ -815,7 +853,8 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
-       txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), RTE_CACHE_LINE_SIZE);
+       txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
+                         RTE_CACHE_LINE_SIZE);
        if (txq == NULL) {
                PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
                return -ENOMEM;
@@ -823,7 +862,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
 
        txq->queue_id = queue_idx;
        txq->port_id = dev->data->port_id;
-       txq->shared = &hw->tqd_start[queue_idx];
+       txq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
        txq->hw = hw;
        txq->qid = queue_idx;
        txq->stopped = TRUE;
@@ -858,11 +897,13 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
        size += sizeof(struct Vmxnet3_TxCompDesc) * comp_ring->size;
        size += sizeof(struct Vmxnet3_TxDataDesc) * data_ring->size;
 
-       mz = ring_dma_zone_reserve(dev, "txdesc", queue_idx, size, socket_id);
+       mz = rte_eth_dma_zone_reserve(dev, "txdesc", queue_idx, size,
+                                     VMXNET3_RING_BA_ALIGN, socket_id);
        if (mz == NULL) {
                PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
                return -ENOMEM;
        }
+       txq->mz = mz;
        memset(mz->addr, 0, mz->len);
 
        /* cmd_ring initialization */
@@ -898,12 +939,12 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
                           uint16_t queue_idx,
                           uint16_t nb_desc,
                           unsigned int socket_id,
-                          __attribute__((unused)) const struct rte_eth_rxconf *rx_conf,
+                          __rte_unused const struct rte_eth_rxconf *rx_conf,
                           struct rte_mempool *mp)
 {
        const struct rte_memzone *mz;
        struct vmxnet3_rx_queue *rxq;
-       struct vmxnet3_hw     *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = dev->data->dev_private;
        struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
        struct vmxnet3_comp_ring *comp_ring;
        int size;
@@ -912,7 +953,8 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
        PMD_INIT_FUNC_TRACE();
 
-       rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), RTE_CACHE_LINE_SIZE);
+       rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue),
+                         RTE_CACHE_LINE_SIZE);
        if (rxq == NULL) {
                PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
                return -ENOMEM;
@@ -921,7 +963,7 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->mp = mp;
        rxq->queue_id = queue_idx;
        rxq->port_id = dev->data->port_id;
-       rxq->shared = &hw->rqd_start[queue_idx];
+       rxq->shared = NULL; /* set in vmxnet3_setup_driver_shared() */
        rxq->hw = hw;
        rxq->qid1 = queue_idx;
        rxq->qid2 = queue_idx + hw->num_rx_queues;
@@ -959,11 +1001,13 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
        size = sizeof(struct Vmxnet3_RxDesc) * (ring0->size + ring1->size);
        size += sizeof(struct Vmxnet3_RxCompDesc) * comp_ring->size;
 
-       mz = ring_dma_zone_reserve(dev, "rxdesc", queue_idx, size, socket_id);
+       mz = rte_eth_dma_zone_reserve(dev, "rxdesc", queue_idx, size,
+                                     VMXNET3_RING_BA_ALIGN, socket_id);
        if (mz == NULL) {
                PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
                return -ENOMEM;
        }
+       rxq->mz = mz;
        memset(mz->addr, 0, mz->len);
 
        /* cmd_ring0 initialization */
@@ -986,7 +1030,9 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
                ring->rid = i;
                snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
 
-               ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
+               ring->buf_info = rte_zmalloc(mem_name,
+                                            ring->size * sizeof(vmxnet3_buf_info_t),
+                                            RTE_CACHE_LINE_SIZE);
                if (ring->buf_info == NULL) {
                        PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
                        return -ENOMEM;
@@ -1020,10 +1066,15 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
                        /* Passing 0 as alloc_num will allocate full ring */
                        ret = vmxnet3_post_rx_bufs(rxq, j);
                        if (ret <= 0) {
-                               PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d", i, j);
+                               PMD_INIT_LOG(ERR,
+                                            "ERROR: Posting Rxq: %d buffers ring: %d",
+                                            i, j);
                                return -ret;
                        }
-                       /* Updating device with the index:next2fill to fill the mbufs for coming packets */
+                       /*
+                        * Updating device with the index:next2fill to fill the
+                        * mbufs for coming packets.
+                        */
                        if (unlikely(rxq->shared->ctrl.updateRxProd)) {
                                VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
                                                       rxq->cmd_ring[j].next2fill);
@@ -1071,7 +1122,7 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
        dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
        /* loading hashKeySize */
        dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
-       /* loading indTableSize : Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
+       /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
        dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
 
        if (port_rss_conf->rss_key == NULL) {
@@ -1080,7 +1131,8 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
        }
 
        /* loading hashKey */
-       memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key, dev_rss_conf->hashKeySize);
+       memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key,
+              dev_rss_conf->hashKeySize);
 
        /* loading indTable */
        for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {