New upstream version 18.08
[deb_dpdk.git] / drivers / net / bnxt / bnxt_rxq.c
index d49f354..832fc9e 100644 (file)
@@ -1,34 +1,6 @@
-/*-
- *   BSD LICENSE
- *
- *   Copyright(c) Broadcom Limited.
- *   All rights reserved.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Broadcom Corporation nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2014-2018 Broadcom
+ * All rights reserved.
  */
 
 #include <inttypes.h>
 
 void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq)
 {
-       struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
-
-       if (cpr->hw_stats)
-               cpr->hw_stats = NULL;
+       if (rxq && rxq->cp_ring && rxq->cp_ring->hw_stats)
+               rxq->cp_ring->hw_stats = NULL;
 }
 
 int bnxt_mq_rx_configure(struct bnxt *bp)
@@ -107,6 +77,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                switch (dev_conf->rxmode.mq_mode) {
                case ETH_MQ_RX_VMDQ_RSS:
                case ETH_MQ_RX_VMDQ_ONLY:
+                       /* FALLTHROUGH */
                        /* ETH_8/64_POOLs */
                        pools = conf->nb_queue_pools;
                        /* For each pool, allocate MACVLAN CFA rule & VNIC */
@@ -228,16 +199,19 @@ err_out:
        return rc;
 }
 
-static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
+void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
 {
        struct bnxt_sw_rx_bd *sw_ring;
        struct bnxt_tpa_info *tpa_info;
        uint16_t i;
 
+       rte_spinlock_lock(&rxq->lock);
+
        if (rxq) {
                sw_ring = rxq->rx_ring->rx_buf_ring;
                if (sw_ring) {
-                       for (i = 0; i < rxq->nb_rx_desc; i++) {
+                       for (i = 0;
+                            i < rxq->rx_ring->rx_ring_struct->ring_size; i++) {
                                if (sw_ring[i].mbuf) {
                                        rte_pktmbuf_free_seg(sw_ring[i].mbuf);
                                        sw_ring[i].mbuf = NULL;
@@ -247,7 +221,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
                /* Free up mbufs in Agg ring */
                sw_ring = rxq->rx_ring->ag_buf_ring;
                if (sw_ring) {
-                       for (i = 0; i < rxq->nb_rx_desc; i++) {
+                       for (i = 0;
+                            i < rxq->rx_ring->ag_ring_struct->ring_size; i++) {
                                if (sw_ring[i].mbuf) {
                                        rte_pktmbuf_free_seg(sw_ring[i].mbuf);
                                        sw_ring[i].mbuf = NULL;
@@ -266,6 +241,8 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
                        }
                }
        }
+
+       rte_spinlock_unlock(&rxq->lock);
 }
 
 void bnxt_free_rx_mbufs(struct bnxt *bp)
@@ -295,6 +272,8 @@ void bnxt_rx_queue_release_op(void *rx_queue)
                bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
 
                bnxt_free_rxq_stats(rxq);
+               rte_memzone_free(rxq->mz);
+               rxq->mz = NULL;
 
                rte_free(rxq);
        }
@@ -308,14 +287,16 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
                               struct rte_mempool *mp)
 {
        struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+       uint64_t rx_offloads = eth_dev->data->dev_conf.rxmode.offloads;
        struct bnxt_rx_queue *rxq;
        int rc = 0;
+       uint8_t queue_state;
 
        if (queue_idx >= bp->max_rx_rings) {
                PMD_DRV_LOG(ERR,
                        "Cannot create Rx ring %d. Only %d rings available\n",
                        queue_idx, bp->max_rx_rings);
-               return -ENOSPC;
+               return -EINVAL;
        }
 
        if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
@@ -350,12 +331,12 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
 
        rxq->queue_id = queue_idx;
        rxq->port_id = eth_dev->data->port_id;
-       rxq->crc_len = (uint8_t)((eth_dev->data->dev_conf.rxmode.hw_strip_crc) ?
-                               0 : ETHER_CRC_LEN);
+       rxq->crc_len = rte_eth_dev_must_keep_crc(rx_offloads) ?
+               ETHER_CRC_LEN : 0;
 
        eth_dev->data->rx_queues[queue_idx] = rxq;
        /* Allocate RX ring hardware descriptors */
-       if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
+       if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq, rxq->cp_ring,
                        "rxr")) {
                PMD_DRV_LOG(ERR,
                        "ring_dma_zone_reserve for rx_ring failed!\n");
@@ -363,7 +344,13 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
                rc = -ENOMEM;
                goto out;
        }
+       rte_atomic64_init(&rxq->rx_mbuf_alloc_fail);
 
+       rxq->rx_deferred_start = rx_conf->rx_deferred_start;
+       queue_state = rxq->rx_deferred_start ? RTE_ETH_QUEUE_STATE_STOPPED :
+                                               RTE_ETH_QUEUE_STATE_STARTED;
+       eth_dev->data->rx_queue_state[queue_idx] = queue_state;
+       rte_spinlock_init(&rxq->lock);
 out:
        return rc;
 }
@@ -412,6 +399,7 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
        struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
        struct bnxt_vnic_info *vnic = NULL;
+       int rc = 0;
 
        if (rxq == NULL) {
                PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
@@ -419,28 +407,47 @@ int bnxt_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        }
 
        dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
-       rxq->rx_deferred_start = false;
+
+       bnxt_free_hwrm_rx_ring(bp, rx_queue_id);
+       bnxt_alloc_hwrm_rx_ring(bp, rx_queue_id);
        PMD_DRV_LOG(INFO, "Rx queue started %d\n", rx_queue_id);
+
        if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
                vnic = rxq->vnic;
+
                if (vnic->fw_grp_ids[rx_queue_id] != INVALID_HW_RING_ID)
                        return 0;
-               PMD_DRV_LOG(DEBUG, "vnic = %p fw_grp_id = %d\n",
-                       vnic, bp->grp_info[rx_queue_id + 1].fw_grp_id);
+
+               PMD_DRV_LOG(DEBUG,
+                           "vnic = %p fw_grp_id = %d\n",
+                           vnic, bp->grp_info[rx_queue_id].fw_grp_id);
+
                vnic->fw_grp_ids[rx_queue_id] =
-                                       bp->grp_info[rx_queue_id + 1].fw_grp_id;
-               return bnxt_vnic_rss_configure(bp, vnic);
+                                       bp->grp_info[rx_queue_id].fw_grp_id;
+               rc = bnxt_vnic_rss_configure(bp, vnic);
        }
 
-       return 0;
+       if (rc == 0)
+               rxq->rx_deferred_start = false;
+
+       return rc;
 }
 
 int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
 {
        struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
        struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
-       struct bnxt_rx_queue *rxq = bp->rx_queues[rx_queue_id];
        struct bnxt_vnic_info *vnic = NULL;
+       struct bnxt_rx_queue *rxq = NULL;
+       int rc = 0;
+
+       /* Rx CQ 0 also works as Default CQ for async notifications */
+       if (!rx_queue_id) {
+               PMD_DRV_LOG(ERR, "Cannot stop Rx queue id %d\n", rx_queue_id);
+               return -EINVAL;
+       }
+
+       rxq = bp->rx_queues[rx_queue_id];
 
        if (rxq == NULL) {
                PMD_DRV_LOG(ERR, "Invalid Rx queue %d\n", rx_queue_id);
@@ -454,7 +461,11 @@ int bnxt_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
        if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) {
                vnic = rxq->vnic;
                vnic->fw_grp_ids[rx_queue_id] = INVALID_HW_RING_ID;
-               return bnxt_vnic_rss_configure(bp, vnic);
+               rc = bnxt_vnic_rss_configure(bp, vnic);
        }
-       return 0;
+
+       if (rc == 0)
+               bnxt_rx_queue_release_mbufs(rxq);
+
+       return rc;
 }