New upstream version 17.08
[deb_dpdk.git] / drivers / net / bnxt / bnxt_rxq.c
index cddf17d..0793820 100644 (file)
@@ -76,6 +76,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                        rc = -ENOMEM;
                        goto err_out;
                }
+               vnic->flags |= BNXT_VNIC_INFO_BCAST;
                STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next);
                bp->nr_vnics++;
 
@@ -84,9 +85,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 
                vnic->func_default = true;
                vnic->ff_pool_idx = 0;
-               vnic->start_grp_id = 1;
-               vnic->end_grp_id = vnic->start_grp_id +
-                                  bp->rx_cp_nr_rings - 1;
+               vnic->start_grp_id = 0;
+               vnic->end_grp_id = vnic->start_grp_id;
                filter = bnxt_alloc_filter(bp);
                if (!filter) {
                        RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
@@ -121,13 +121,16 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                }
                /* For each pool, allocate MACVLAN CFA rule & VNIC */
                if (!pools) {
+                       pools = RTE_MIN(bp->max_vnics,
+                           RTE_MIN(bp->max_l2_ctx,
+                            RTE_MIN(bp->max_rsscos_ctx, ETH_64_POOLS)));
                        RTE_LOG(ERR, PMD,
                                "VMDq pool not set, defaulted to 64\n");
                        pools = ETH_64_POOLS;
                }
                nb_q_per_grp = bp->rx_cp_nr_rings / pools;
-               start_grp_id = 1;
-               end_grp_id = start_grp_id + nb_q_per_grp - 1;
+               start_grp_id = 0;
+               end_grp_id = nb_q_per_grp;
 
                ring_idx = 0;
                for (i = 0; i < pools; i++) {
@@ -138,6 +141,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                                rc = -ENOMEM;
                                goto err_out;
                        }
+                       vnic->flags |= BNXT_VNIC_INFO_BCAST;
                        STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next);
                        bp->nr_vnics++;
 
@@ -178,6 +182,7 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
                rc = -ENOMEM;
                goto err_out;
        }
+       vnic->flags |= BNXT_VNIC_INFO_BCAST;
        /* Partition the rx queues for the single pool */
        for (i = 0; i < bp->rx_cp_nr_rings; i++) {
                rxq = bp->eth_dev->data->rx_queues[i];
@@ -188,9 +193,8 @@ int bnxt_mq_rx_configure(struct bnxt *bp)
 
        vnic->func_default = true;
        vnic->ff_pool_idx = 0;
-       vnic->start_grp_id = 1;
-       vnic->end_grp_id = vnic->start_grp_id +
-                          bp->rx_cp_nr_rings - 1;
+       vnic->start_grp_id = 0;
+       vnic->end_grp_id = bp->rx_cp_nr_rings;
        filter = bnxt_alloc_filter(bp);
        if (!filter) {
                RTE_LOG(ERR, PMD, "L2 filter alloc failed\n");
@@ -213,9 +217,10 @@ err_out:
        return rc;
 }
 
-static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused)
+static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq)
 {
        struct bnxt_sw_rx_bd *sw_ring;
+       struct bnxt_tpa_info *tpa_info;
        uint16_t i;
 
        if (rxq) {
@@ -228,6 +233,27 @@ static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused)
                                }
                        }
                }
+               /* Free up mbufs in Agg ring */
+               sw_ring = rxq->rx_ring->ag_buf_ring;
+               if (sw_ring) {
+                       for (i = 0; i < rxq->nb_rx_desc; i++) {
+                               if (sw_ring[i].mbuf) {
+                                       rte_pktmbuf_free_seg(sw_ring[i].mbuf);
+                                       sw_ring[i].mbuf = NULL;
+                               }
+                       }
+               }
+
+               /* Free up mbufs in TPA */
+               tpa_info = rxq->rx_ring->tpa_info;
+               if (tpa_info) {
+                       for (i = 0; i < BNXT_TPA_MAX; i++) {
+                               if (tpa_info[i].mbuf) {
+                                       rte_pktmbuf_free_seg(tpa_info[i].mbuf);
+                                       tpa_info[i].mbuf = NULL;
+                               }
+                       }
+               }
        }
 }
 
@@ -251,6 +277,8 @@ void bnxt_rx_queue_release_op(void *rx_queue)
 
                /* Free RX ring hardware descriptors */
                bnxt_free_ring(rxq->rx_ring->rx_ring_struct);
+               /* Free RX Agg ring hardware descriptors */
+               bnxt_free_ring(rxq->rx_ring->ag_ring_struct);
 
                /* Free RX completion ring hardware descriptors */
                bnxt_free_ring(rxq->cp_ring->cp_ring_struct);
@@ -273,7 +301,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        int rc = 0;
 
        if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) {
-               RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc);
+               RTE_LOG(ERR, PMD, "nb_desc %d is invalid\n", nb_desc);
                rc = -EINVAL;
                goto out;
        }
@@ -286,7 +314,7 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue),
                                 RTE_CACHE_LINE_SIZE, socket_id);
        if (!rxq) {
-               RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!");
+               RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!\n");
                rc = -ENOMEM;
                goto out;
        }
@@ -295,6 +323,9 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        rxq->nb_rx_desc = nb_desc;
        rxq->rx_free_thresh = rx_conf->rx_free_thresh;
 
+       RTE_LOG(DEBUG, PMD, "RX Buf size is %d\n", rxq->rx_buf_use_size);
+       RTE_LOG(DEBUG, PMD, "RX Buf MTU %d\n", eth_dev->data->mtu);
+
        rc = bnxt_init_rx_ring_struct(rxq, socket_id);
        if (rc)
                goto out;
@@ -308,7 +339,8 @@ int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev,
        /* Allocate RX ring hardware descriptors */
        if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring,
                        "rxr")) {
-               RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!");
+               RTE_LOG(ERR, PMD,
+                       "ring_dma_zone_reserve for rx_ring failed!\n");
                bnxt_rx_queue_release_op(rxq);
                rc = -ENOMEM;
                goto out;