New upstream version 18.08
[deb_dpdk.git] / drivers / net / qede / qede_rxtx.c
index 0de7c6b..0f157de 100644 (file)
@@ -1,9 +1,7 @@
-/*
- * Copyright (c) 2016 QLogic Corporation.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright (c) 2016 - 2018 Cavium Inc.
  * All rights reserved.
- * www.qlogic.com
- *
- * See LICENSE.qede_pmd for copyright and licensing details.
+ * www.cavium.com
  */
 
 #include <rte_net.h>
@@ -87,7 +85,7 @@ qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
 
        /* Fix up RX buffer size */
        bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
-       if ((rxmode->enable_scatter)                    ||
+       if ((rxmode->offloads & DEV_RX_OFFLOAD_SCATTER) ||
            (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
                if (!dev->data->scattered_rx) {
                        DP_INFO(edev, "Forcing scatter-gather mode\n");
@@ -192,9 +190,15 @@ static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
 void qede_rx_queue_release(void *rx_queue)
 {
        struct qede_rx_queue *rxq = rx_queue;
+       struct qede_dev *qdev = rxq->qdev;
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+       PMD_INIT_FUNC_TRACE(edev);
 
        if (rxq) {
                qede_rx_queue_release_mbufs(rxq);
+               qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
+               qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
                rte_free(rxq->sw_rx_ring);
                rte_free(rxq);
        }
@@ -350,9 +354,14 @@ static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
 void qede_tx_queue_release(void *tx_queue)
 {
        struct qede_tx_queue *txq = tx_queue;
+       struct qede_dev *qdev = txq->qdev;
+       struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
+
+       PMD_INIT_FUNC_TRACE(edev);
 
        if (txq) {
                qede_tx_queue_release_mbufs(txq);
+               qdev->ops->common->chain_free(edev, &txq->tx_pbl);
                rte_free(txq->sw_tx_ring);
                rte_free(txq);
        }
@@ -441,8 +450,6 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
        struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
        struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
        struct qede_fastpath *fp;
-       struct qede_rx_queue *rxq;
-       struct qede_tx_queue *txq;
        uint16_t sb_idx;
        uint8_t i;
 
@@ -467,21 +474,13 @@ void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
        for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
                if (eth_dev->data->rx_queues[i]) {
                        qede_rx_queue_release(eth_dev->data->rx_queues[i]);
-                       rxq = eth_dev->data->rx_queues[i];
-                       qdev->ops->common->chain_free(edev,
-                                                     &rxq->rx_bd_ring);
-                       qdev->ops->common->chain_free(edev,
-                                                     &rxq->rx_comp_ring);
                        eth_dev->data->rx_queues[i] = NULL;
                }
        }
 
        for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
                if (eth_dev->data->tx_queues[i]) {
-                       txq = eth_dev->data->tx_queues[i];
                        qede_tx_queue_release(eth_dev->data->tx_queues[i]);
-                       qdev->ops->common->chain_free(edev,
-                                                     &txq->tx_pbl);
                        eth_dev->data->tx_queues[i] = NULL;
                }
        }
@@ -1466,6 +1465,8 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                         */
                        rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
                        packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
+               } else {
+                       packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
                }
 
                /* Common handling for non-tunnel packets and for inner
@@ -1487,7 +1488,6 @@ qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        ol_flags |= PKT_RX_IP_CKSUM_BAD;
                } else {
                        ol_flags |= PKT_RX_IP_CKSUM_GOOD;
-                       packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
                }
 
                if (CQE_HAS_VLAN(parse_flag) ||
@@ -1631,6 +1631,7 @@ qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
                        QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
                        PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
                }
+               start_seg++;
                m_seg = m_seg->next;
        }
 
@@ -1837,17 +1838,14 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                 * offloads. Don't rely on pkt_type marked by Rx, instead use
                 * tx_ol_flags to decide.
                 */
-               if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
-                                               PKT_TX_TUNNEL_VXLAN) ||
-                   ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
-                                               PKT_TX_TUNNEL_MPLSINUDP) ||
-                   ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
-                                               PKT_TX_TUNNEL_GENEVE)) {
+               tunn_flg = !!(tx_ol_flags & PKT_TX_TUNNEL_MASK);
+
+               if (tunn_flg) {
                        /* Check against max which is Tunnel IPv6 + ext */
                        if (unlikely(txq->nb_tx_avail <
                                ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
                                        break;
-                       tunn_flg = true;
+
                        /* First indicate its a tunnel pkt */
                        bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
                                  ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
@@ -1971,7 +1969,7 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                }
 
                /* Descriptor based VLAN insertion */
-               if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
+               if (tx_ol_flags & PKT_TX_VLAN_PKT) {
                        vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
                        bd1_bd_flags_bf |=
                            1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
@@ -1986,7 +1984,8 @@ qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                         * csum offload is requested then we need to force
                         * recalculation of L4 tunnel header csum also.
                         */
-                       if (tunn_flg) {
+                       if (tunn_flg && ((tx_ol_flags & PKT_TX_TUNNEL_MASK) !=
+                                                       PKT_TX_TUNNEL_GRE)) {
                                bd1_bd_flags_bf |=
                                        ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
                                        ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;