Imported Upstream version 16.11
[deb_dpdk.git] / drivers / net / vmxnet3 / vmxnet3_rxtx.c
index 88df576..b109168 100644 (file)
@@ -57,7 +57,6 @@
 #include <rte_lcore.h>
 #include <rte_atomic.h>
 #include <rte_branch_prediction.h>
-#include <rte_ring.h>
 #include <rte_mempool.h>
 #include <rte_malloc.h>
 #include <rte_mbuf.h>
@@ -183,7 +182,6 @@ vmxnet3_cmd_ring_release(vmxnet3_cmd_ring_t *ring)
        ring->buf_info = NULL;
 }
 
-
 void
 vmxnet3_dev_tx_queue_release(void *txq)
 {
@@ -416,7 +414,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        continue;
                }
 
-               if (txm->nb_segs == 1 && rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
+               if (txm->nb_segs == 1 &&
+                   rte_pktmbuf_pkt_len(txm) <= VMXNET3_HDR_COPY_SIZE) {
                        struct Vmxnet3_TxDataDesc *tdd;
 
                        tdd = txq->data_ring.base + txq->cmd_ring.next2fill;
@@ -438,8 +437,8 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
                        gdesc = txq->cmd_ring.base + txq->cmd_ring.next2fill;
                        if (copy_size)
                                gdesc->txd.addr = rte_cpu_to_le_64(txq->data_ring.basePA +
-                                                               txq->cmd_ring.next2fill *
-                                                               sizeof(struct Vmxnet3_TxDataDesc));
+                                                                  txq->cmd_ring.next2fill *
+                                                                  sizeof(struct Vmxnet3_TxDataDesc));
                        else
                                gdesc->txd.addr = rte_mbuf_data_dma_addr(m_seg);
 
@@ -522,13 +521,12 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
 /*
  *  Allocates mbufs and clusters. Post rx descriptors with buffer details
  *  so that device can receive packets in those buffers.
- *     Ring layout:
- *      Among the two rings, 1st ring contains buffers of type 0 and type1.
+ *  Ring layout:
+ *      Among the two rings, 1st ring contains buffers of type 0 and type 1.
  *      bufs_per_pkt is set such that for non-LRO cases all the buffers required
  *      by a frame will fit in 1st ring (1st buf of type0 and rest of type1).
  *      2nd ring contains buffers of type 1 alone. Second ring mostly be used
  *      only for LRO.
- *
  */
 static int
 vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
@@ -573,8 +571,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id)
                buf_info->m = mbuf;
                buf_info->len = (uint16_t)(mbuf->buf_len -
                                           RTE_PKTMBUF_HEADROOM);
-               buf_info->bufPA =
-                       rte_mbuf_data_dma_addr_default(mbuf);
+               buf_info->bufPA = rte_mbuf_data_dma_addr_default(mbuf);
 
                /* Load Rx Descriptor with the buffer's GPA */
                rxd->addr = buf_info->bufPA;
@@ -700,7 +697,6 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        goto rcd_done;
                }
 
-
                /* Initialize newly received packet buffer */
                rxm->port = rxq->port_id;
                rxm->nb_segs = 1;
@@ -760,7 +756,8 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 
 rcd_done:
                rxq->cmd_ring[ring_idx].next2comp = idx;
-               VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp, rxq->cmd_ring[ring_idx].size);
+               VMXNET3_INC_RING_IDX_ONLY(rxq->cmd_ring[ring_idx].next2comp,
+                                         rxq->cmd_ring[ring_idx].size);
 
                /* It's time to allocate some new buf and renew descriptors */
                vmxnet3_post_rx_bufs(rxq, ring_idx);
@@ -775,8 +772,7 @@ rcd_done:
                rcd = &rxq->comp_ring.base[rxq->comp_ring.next2proc].rcd;
                nb_rxd++;
                if (nb_rxd > rxq->cmd_ring[0].size) {
-                       PMD_RX_LOG(ERR,
-                                  "Used up quota of receiving packets,"
+                       PMD_RX_LOG(ERR, "Used up quota of receiving packets,"
                                   " relinquish control.");
                        break;
                }
@@ -798,15 +794,15 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
        const struct rte_memzone *mz;
 
        snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                       dev->driver->pci_drv.name, ring_name,
-                       dev->data->port_id, queue_id);
+                dev->driver->pci_drv.driver.name, ring_name,
+                dev->data->port_id, queue_id);
 
        mz = rte_memzone_lookup(z_name);
        if (mz)
                return mz;
 
        return rte_memzone_reserve_aligned(z_name, ring_size,
-                       socket_id, 0, VMXNET3_RING_BA_ALIGN);
+                                          socket_id, 0, VMXNET3_RING_BA_ALIGN);
 }
 
 int
@@ -814,7 +810,7 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
                           uint16_t queue_idx,
                           uint16_t nb_desc,
                           unsigned int socket_id,
-                          __attribute__((unused)) const struct rte_eth_txconf *tx_conf)
+                          __rte_unused const struct rte_eth_txconf *tx_conf)
 {
        struct vmxnet3_hw *hw = dev->data->dev_private;
        const struct rte_memzone *mz;
@@ -832,7 +828,8 @@ vmxnet3_dev_tx_queue_setup(struct rte_eth_dev *dev,
                return -EINVAL;
        }
 
-       txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue), RTE_CACHE_LINE_SIZE);
+       txq = rte_zmalloc("ethdev_tx_queue", sizeof(struct vmxnet3_tx_queue),
+                         RTE_CACHE_LINE_SIZE);
        if (txq == NULL) {
                PMD_INIT_LOG(ERR, "Can not allocate tx queue structure");
                return -ENOMEM;
@@ -915,12 +912,12 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
                           uint16_t queue_idx,
                           uint16_t nb_desc,
                           unsigned int socket_id,
-                          __attribute__((unused)) const struct rte_eth_rxconf *rx_conf,
+                          __rte_unused const struct rte_eth_rxconf *rx_conf,
                           struct rte_mempool *mp)
 {
        const struct rte_memzone *mz;
        struct vmxnet3_rx_queue *rxq;
-       struct vmxnet3_hw     *hw = dev->data->dev_private;
+       struct vmxnet3_hw *hw = dev->data->dev_private;
        struct vmxnet3_cmd_ring *ring0, *ring1, *ring;
        struct vmxnet3_comp_ring *comp_ring;
        int size;
@@ -929,7 +926,8 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
 
        PMD_INIT_FUNC_TRACE();
 
-       rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue), RTE_CACHE_LINE_SIZE);
+       rxq = rte_zmalloc("ethdev_rx_queue", sizeof(struct vmxnet3_rx_queue),
+                         RTE_CACHE_LINE_SIZE);
        if (rxq == NULL) {
                PMD_INIT_LOG(ERR, "Can not allocate rx queue structure");
                return -ENOMEM;
@@ -1003,7 +1001,9 @@ vmxnet3_dev_rx_queue_setup(struct rte_eth_dev *dev,
                ring->rid = i;
                snprintf(mem_name, sizeof(mem_name), "rx_ring_%d_buf_info", i);
 
-               ring->buf_info = rte_zmalloc(mem_name, ring->size * sizeof(vmxnet3_buf_info_t), RTE_CACHE_LINE_SIZE);
+               ring->buf_info = rte_zmalloc(mem_name,
+                                            ring->size * sizeof(vmxnet3_buf_info_t),
+                                            RTE_CACHE_LINE_SIZE);
                if (ring->buf_info == NULL) {
                        PMD_INIT_LOG(ERR, "ERROR: Creating rx_buf_info structure");
                        return -ENOMEM;
@@ -1037,10 +1037,15 @@ vmxnet3_dev_rxtx_init(struct rte_eth_dev *dev)
                        /* Passing 0 as alloc_num will allocate full ring */
                        ret = vmxnet3_post_rx_bufs(rxq, j);
                        if (ret <= 0) {
-                               PMD_INIT_LOG(ERR, "ERROR: Posting Rxq: %d buffers ring: %d", i, j);
+                               PMD_INIT_LOG(ERR,
+                                            "ERROR: Posting Rxq: %d buffers ring: %d",
+                                            i, j);
                                return -ret;
                        }
-                       /* Updating device with the index:next2fill to fill the mbufs for coming packets */
+                       /*
+                        * Updating device with the index:next2fill to fill the
+                        * mbufs for coming packets.
+                        */
                        if (unlikely(rxq->shared->ctrl.updateRxProd)) {
                                VMXNET3_WRITE_BAR0_REG(hw, rxprod_reg[j] + (rxq->queue_id * VMXNET3_REG_ALIGN),
                                                       rxq->cmd_ring[j].next2fill);
@@ -1088,7 +1093,7 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
        dev_rss_conf->hashFunc = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
        /* loading hashKeySize */
        dev_rss_conf->hashKeySize = VMXNET3_RSS_MAX_KEY_SIZE;
-       /* loading indTableSize : Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
+       /* loading indTableSize: Must not exceed VMXNET3_RSS_MAX_IND_TABLE_SIZE (128)*/
        dev_rss_conf->indTableSize = (uint16_t)(hw->num_rx_queues * 4);
 
        if (port_rss_conf->rss_key == NULL) {
@@ -1097,7 +1102,8 @@ vmxnet3_rss_configure(struct rte_eth_dev *dev)
        }
 
        /* loading hashKey */
-       memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key, dev_rss_conf->hashKeySize);
+       memcpy(&dev_rss_conf->hashKey[0], port_rss_conf->rss_key,
+              dev_rss_conf->hashKeySize);
 
        /* loading indTable */
        for (i = 0, j = 0; i < dev_rss_conf->indTableSize; i++, j++) {