New upstream version 18.02
[deb_dpdk.git] / drivers / net / thunderx / nicvf_ethdev.c
index d65d3ce..a65361f 100644 (file)
@@ -1,33 +1,5 @@
-/*
- *   BSD LICENSE
- *
- *   Copyright (C) Cavium, Inc. 2016.
- *
- *   Redistribution and use in source and binary forms, with or without
- *   modification, are permitted provided that the following conditions
- *   are met:
- *
- *     * Redistributions of source code must retain the above copyright
- *       notice, this list of conditions and the following disclaimer.
- *     * Redistributions in binary form must reproduce the above copyright
- *       notice, this list of conditions and the following disclaimer in
- *       the documentation and/or other materials provided with the
- *       distribution.
- *     * Neither the name of Cavium, Inc nor the names of its
- *       contributors may be used to endorse or promote products derived
- *       from this software without specific prior written permission.
- *
- *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2016 Cavium, Inc
  */
 
 #include <assert.h>
@@ -52,7 +24,7 @@
 #include <rte_dev.h>
 #include <rte_eal.h>
 #include <rte_ether.h>
-#include <rte_ethdev.h>
+#include <rte_ethdev_driver.h>
 #include <rte_ethdev_pci.h>
 #include <rte_interrupts.h>
 #include <rte_log.h>
 #include "nicvf_svf.h"
 #include "nicvf_logs.h"
 
+int nicvf_logtype_mbox;
+int nicvf_logtype_init;
+int nicvf_logtype_driver;
+
 static void nicvf_dev_stop(struct rte_eth_dev *dev);
 static void nicvf_dev_stop_cleanup(struct rte_eth_dev *dev, bool cleanup);
 static void nicvf_vf_stop(struct rte_eth_dev *dev, struct nicvf *nic,
                          bool cleanup);
 
+RTE_INIT(nicvf_init_log);
+static void
+nicvf_init_log(void)
+{
+       nicvf_logtype_mbox = rte_log_register("pmd.net.thunderx.mbox");
+       if (nicvf_logtype_mbox >= 0)
+               rte_log_set_level(nicvf_logtype_mbox, RTE_LOG_NOTICE);
+
+       nicvf_logtype_init = rte_log_register("pmd.net.thunderx.init");
+       if (nicvf_logtype_init >= 0)
+               rte_log_set_level(nicvf_logtype_init, RTE_LOG_NOTICE);
+
+       nicvf_logtype_driver = rte_log_register("pmd.net.thunderx.driver");
+       if (nicvf_logtype_driver >= 0)
+               rte_log_set_level(nicvf_logtype_driver, RTE_LOG_NOTICE);
+}
+
 static inline int
 nicvf_atomic_write_link_status(struct rte_eth_dev *dev,
                               struct rte_eth_link *link)
@@ -100,7 +93,7 @@ nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link)
        else if (nic->duplex == NICVF_FULL_DUPLEX)
                link->link_duplex = ETH_LINK_FULL_DUPLEX;
        link->link_speed = nic->speed;
-       link->link_autoneg = ETH_LINK_SPEED_AUTONEG;
+       link->link_autoneg = ETH_LINK_AUTONEG;
 }
 
 static void
@@ -113,7 +106,7 @@ nicvf_interrupt(void *arg)
                if (dev->data->dev_conf.intr_conf.lsc)
                        nicvf_set_eth_link_status(nic, &dev->data->dev_link);
                _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
-                                             NULL, NULL);
+                                             NULL);
        }
 
        rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000,
@@ -179,6 +172,7 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
        struct nicvf *nic = nicvf_pmd_priv(dev);
        uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
        size_t i;
+       struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -204,15 +198,15 @@ nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
                return -EINVAL;
 
        if (frame_size > ETHER_MAX_LEN)
-               dev->data->dev_conf.rxmode.jumbo_frame = 1;
+               rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
        else
-               dev->data->dev_conf.rxmode.jumbo_frame = 0;
+               rxmode->offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
 
        if (nicvf_mbox_update_hw_max_frs(nic, frame_size))
                return -EINVAL;
 
        /* Update max frame size */
-       dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size;
+       rxmode->max_rx_pkt_len = (uint32_t)frame_size;
        nic->mtu = mtu;
 
        for (i = 0; i < nic->sqs_count; i++)
@@ -903,7 +897,7 @@ nicvf_set_tx_function(struct rte_eth_dev *dev)
 
        for (i = 0; i < dev->data->nb_tx_queues; i++) {
                txq = dev->data->tx_queues[i];
-               if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) {
+               if (txq->offloads & DEV_TX_OFFLOAD_MULTI_SEGS) {
                        multiseg = true;
                        break;
                }
@@ -942,9 +936,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
                         const struct rte_eth_txconf *tx_conf)
 {
        uint16_t tx_free_thresh;
-       uint8_t is_single_pool;
+       bool is_single_pool;
        struct nicvf_txq *txq;
        struct nicvf *nic = nicvf_pmd_priv(dev);
+       uint64_t conf_offloads, offload_capa, unsupported_offloads;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -958,6 +953,17 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
                PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
                socket_id, nic->node);
 
+       conf_offloads = tx_conf->offloads;
+       offload_capa = NICVF_TX_OFFLOAD_CAPA;
+
+       unsupported_offloads = conf_offloads & ~offload_capa;
+       if (unsupported_offloads) {
+               PMD_INIT_LOG(ERR, "Tx offloads 0x%" PRIx64 " are not supported."
+                     "Requested 0x%" PRIx64 " supported 0x%" PRIx64 ".\n",
+                     unsupported_offloads, conf_offloads, offload_capa);
+               return -ENOTSUP;
+       }
+
        /* Tx deferred start is not supported */
        if (tx_conf->tx_deferred_start) {
                PMD_INIT_LOG(ERR, "Tx deferred start not supported");
@@ -1007,11 +1013,11 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
        txq->nic = nic;
        txq->queue_id = qidx;
        txq->tx_free_thresh = tx_free_thresh;
-       txq->txq_flags = tx_conf->txq_flags;
        txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD;
        txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR;
-       is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT &&
-                               txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP);
+       txq->offloads = conf_offloads;
+
+       is_single_pool = !!(conf_offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE);
 
        /* Choose optimum free threshold value for multipool case */
        if (!is_single_pool) {
@@ -1042,9 +1048,10 @@ nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
        nicvf_tx_queue_reset(txq);
 
-       PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64,
+       PMD_INIT_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p"
+                       " phys=0x%" PRIx64 " offloads=0x%" PRIx64,
                        nicvf_netdev_qidx(nic, qidx), txq, nb_desc, txq->desc,
-                       txq->phys);
+                       txq->phys, txq->offloads);
 
        dev->data->tx_queues[nicvf_netdev_qidx(nic, qidx)] = txq;
        dev->data->tx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1270,6 +1277,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
        uint16_t rx_free_thresh;
        struct nicvf_rxq *rxq;
        struct nicvf *nic = nicvf_pmd_priv(dev);
+       uint64_t conf_offloads, offload_capa, unsupported_offloads;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1283,6 +1291,24 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
                PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d",
                socket_id, nic->node);
 
+
+       conf_offloads = rx_conf->offloads;
+
+       if (conf_offloads & DEV_RX_OFFLOAD_CHECKSUM) {
+               PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
+               conf_offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
+       }
+
+       offload_capa = NICVF_RX_OFFLOAD_CAPA;
+       unsupported_offloads = conf_offloads & ~offload_capa;
+
+       if (unsupported_offloads) {
+               PMD_INIT_LOG(ERR, "Rx offloads 0x%" PRIx64 " are not supported. "
+                     "Requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+                     unsupported_offloads, conf_offloads, offload_capa);
+               return -ENOTSUP;
+       }
+
        /* Mempool memory must be contiguous, so must be one memory segment*/
        if (mp->nb_mem_chunks != 1) {
                PMD_INIT_LOG(ERR, "Non-contiguous mempool, add more huge pages");
@@ -1363,9 +1389,10 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 
        nicvf_rx_queue_reset(rxq);
 
-       PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64,
+       PMD_INIT_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d)"
+                       " phy=0x%" PRIx64 " offloads=0x%" PRIx64,
                        nicvf_netdev_qidx(nic, qidx), rxq, mp->name, nb_desc,
-                       rte_mempool_avail_count(mp), rxq->phys);
+                       rte_mempool_avail_count(mp), rxq->phys, conf_offloads);
 
        dev->data->rx_queues[nicvf_netdev_qidx(nic, qidx)] = rxq;
        dev->data->rx_queue_state[nicvf_netdev_qidx(nic, qidx)] =
@@ -1399,13 +1426,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->max_mac_addrs = 1;
        dev_info->max_vfs = pci_dev->max_vfs;
 
-       dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
-       dev_info->tx_offload_capa =
-               DEV_TX_OFFLOAD_IPV4_CKSUM  |
-               DEV_TX_OFFLOAD_UDP_CKSUM   |
-               DEV_TX_OFFLOAD_TCP_CKSUM   |
-               DEV_TX_OFFLOAD_TCP_TSO     |
-               DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM;
+       dev_info->rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+       dev_info->tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
+       dev_info->rx_queue_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+       dev_info->tx_queue_offload_capa = NICVF_TX_OFFLOAD_CAPA;
 
        dev_info->reta_size = nic->rss_info.rss_size;
        dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE;
@@ -1416,6 +1440,7 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH,
                .rx_drop_en = 0,
+               .offloads = DEV_RX_OFFLOAD_CRC_STRIP,
        };
 
        dev_info->default_txconf = (struct rte_eth_txconf) {
@@ -1426,6 +1451,10 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
                        ETH_TXQ_FLAGS_NOMULTMEMP  |
                        ETH_TXQ_FLAGS_NOVLANOFFL  |
                        ETH_TXQ_FLAGS_NOXSUMSCTP,
+               .offloads = DEV_TX_OFFLOAD_MBUF_FAST_FREE |
+                       DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM   |
+                       DEV_TX_OFFLOAD_UDP_CKSUM          |
+                       DEV_TX_OFFLOAD_TCP_CKSUM,
        };
 }
 
@@ -1466,6 +1495,7 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
        struct rte_mbuf *mbuf;
        uint16_t rx_start, rx_end;
        uint16_t tx_start, tx_end;
+       bool vlan_strip;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1585,7 +1615,9 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
                     nic->rbdr->tail, nb_rbdr_desc, nic->vf_id);
 
        /* Configure VLAN Strip */
-       nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip);
+       vlan_strip = !!(dev->data->dev_conf.rxmode.offloads &
+                       DEV_RX_OFFLOAD_VLAN_STRIP);
+       nicvf_vlan_hw_strip(nic, vlan_strip);
 
        /* Based on the packet type(IPv4 or IPv6), the nicvf HW aligns L3 data
         * to the 64bit memory address.
@@ -1713,11 +1745,11 @@ nicvf_dev_start(struct rte_eth_dev *dev)
        if (dev->data->dev_conf.rxmode.max_rx_pkt_len +
                                            2 * VLAN_TAG_SIZE > buffsz)
                dev->data->scattered_rx = 1;
-       if (rx_conf->enable_scatter)
+       if ((rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) != 0)
                dev->data->scattered_rx = 1;
 
        /* Setup MTU based on max_rx_pkt_len or default */
-       mtu = dev->data->dev_conf.rxmode.jumbo_frame ?
+       mtu = dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME ?
                dev->data->dev_conf.rxmode.max_rx_pkt_len
                        -  ETHER_HDR_LEN - ETHER_CRC_LEN
                : ETHER_MTU;
@@ -1891,6 +1923,8 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
        struct rte_eth_txmode *txmode = &conf->txmode;
        struct nicvf *nic = nicvf_pmd_priv(dev);
        uint8_t cqcount;
+       uint64_t conf_rx_offloads, rx_offload_capa;
+       uint64_t conf_tx_offloads, tx_offload_capa;
 
        PMD_INIT_FUNC_TRACE();
 
@@ -1899,44 +1933,49 @@ nicvf_dev_configure(struct rte_eth_dev *dev)
                return -EINVAL;
        }
 
-       if (txmode->mq_mode) {
-               PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
-               return -EINVAL;
-       }
+       conf_tx_offloads = dev->data->dev_conf.txmode.offloads;
+       tx_offload_capa = NICVF_TX_OFFLOAD_CAPA;
 
-       if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
-               rxmode->mq_mode != ETH_MQ_RX_RSS) {
-               PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
-               return -EINVAL;
+       if ((conf_tx_offloads & tx_offload_capa) != conf_tx_offloads) {
+               PMD_INIT_LOG(ERR, "Some Tx offloads are not supported "
+                     "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+                     conf_tx_offloads, tx_offload_capa);
+               return -ENOTSUP;
        }
 
-       if (!rxmode->hw_strip_crc) {
-               PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
-               rxmode->hw_strip_crc = 1;
+       if (rxmode->offloads & DEV_RX_OFFLOAD_CHECKSUM) {
+               PMD_INIT_LOG(NOTICE, "Rx checksum not supported");
+               rxmode->offloads &= ~DEV_RX_OFFLOAD_CHECKSUM;
        }
 
-       if (rxmode->hw_ip_checksum) {
-               PMD_INIT_LOG(NOTICE, "Rxcksum not supported");
-               rxmode->hw_ip_checksum = 0;
+       conf_rx_offloads = rxmode->offloads;
+       rx_offload_capa = NICVF_RX_OFFLOAD_CAPA;
+
+       if ((conf_rx_offloads & rx_offload_capa) != conf_rx_offloads) {
+               PMD_INIT_LOG(ERR, "Some Rx offloads are not supported "
+                     "requested 0x%" PRIx64 " supported 0x%" PRIx64 "\n",
+                     conf_rx_offloads, rx_offload_capa);
+               return -ENOTSUP;
        }
 
-       if (rxmode->split_hdr_size) {
-               PMD_INIT_LOG(INFO, "Rxmode does not support split header");
-               return -EINVAL;
+       if ((conf_rx_offloads & DEV_RX_OFFLOAD_CRC_STRIP) == 0) {
+               PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip");
+               rxmode->offloads |= DEV_RX_OFFLOAD_CRC_STRIP;
        }
 
-       if (rxmode->hw_vlan_filter) {
-               PMD_INIT_LOG(INFO, "VLAN filter not supported");
+       if (txmode->mq_mode) {
+               PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported");
                return -EINVAL;
        }
 
-       if (rxmode->hw_vlan_extend) {
-               PMD_INIT_LOG(INFO, "VLAN extended not supported");
+       if (rxmode->mq_mode != ETH_MQ_RX_NONE &&
+               rxmode->mq_mode != ETH_MQ_RX_RSS) {
+               PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode);
                return -EINVAL;
        }
 
-       if (rxmode->enable_lro) {
-               PMD_INIT_LOG(INFO, "LRO not supported");
+       if (rxmode->split_hdr_size) {
+               PMD_INIT_LOG(INFO, "Rxmode does not support split header");
                return -EINVAL;
        }