New upstream version 16.11.8
[deb_dpdk.git] / drivers / net / nfp / nfp_net.c
index bcf5fa9..0f0589e 100644 (file)
@@ -54,6 +54,7 @@
 #include <rte_version.h>
 #include <rte_string_fns.h>
 #include <rte_alarm.h>
+#include <rte_spinlock.h>
 
 #include "nfp_net_pmd.h"
 #include "nfp_net_logs.h"
@@ -213,7 +214,7 @@ ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
        const struct rte_memzone *mz;
 
        snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
-                dev->driver->pci_drv.name,
+                dev->driver->pci_drv.driver.name,
                 ring_name, dev->data->port_id, queue_id);
 
        mz = rte_memzone_lookup(z_name);
@@ -407,6 +408,8 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
        PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n",
                    ctrl, update);
 
+       rte_spinlock_lock(&hw->reconfig_lock);
+
        nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
        nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
 
@@ -414,6 +417,8 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
 
        err = __nfp_net_reconfig(hw, update);
 
+       rte_spinlock_unlock(&hw->reconfig_lock);
+
        if (!err)
                return 0;
 
@@ -512,12 +517,10 @@ nfp_net_configure(struct rte_eth_dev *dev)
                new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
 
        if (rxmode->jumbo_frame)
-               /* this is handled in rte_eth_dev_configure */
+               hw->mtu = rxmode->max_rx_pkt_len;
 
-       if (rxmode->hw_strip_crc) {
-               PMD_INIT_LOG(INFO, "strip CRC not supported\n");
-               return -EINVAL;
-       }
+       if (!rxmode->hw_strip_crc)
+               PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable\n");
 
        if (rxmode->enable_scatter) {
                PMD_INIT_LOG(INFO, "Scatter not supported\n");
@@ -602,18 +605,8 @@ nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
 static void
 nfp_net_params_setup(struct nfp_net_hw *hw)
 {
-       uint32_t *mac_address;
-
        nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
        nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
-
-       /* A MAC address is 8 bytes long */
-       mac_address = (uint32_t *)(hw->mac_addr);
-
-       nn_cfg_writel(hw, NFP_NET_CFG_MACADDR,
-                     rte_cpu_to_be_32(*mac_address));
-       nn_cfg_writel(hw, NFP_NET_CFG_MACADDR + 4,
-                     rte_cpu_to_be_32(*(mac_address + 4)));
 }
 
 static void
@@ -622,6 +615,17 @@ nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
        hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
 }
 
+static void nfp_net_read_mac(struct nfp_net_hw *hw)
+{
+       uint32_t tmp;
+
+       tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
+       memcpy(&hw->mac_addr[0], &tmp, 4);
+
+       tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
+       memcpy(&hw->mac_addr[4], &tmp, 2);
+}
+
 static int
 nfp_net_start(struct rte_eth_dev *dev)
 {
@@ -727,6 +731,11 @@ nfp_net_close(struct rte_eth_dev *dev)
        rte_intr_disable(&dev->pci_dev->intr_handle);
        nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
 
+       /* unregister callback func from eal lib */
+       rte_intr_callback_unregister(&dev->pci_dev->intr_handle,
+                                    nfp_net_dev_interrupt_handler,
+                                    (void *)dev);
+
        /*
         * The ixgbe PMD driver disables the pcie master on the
         * device. The i40e does not...
@@ -846,6 +855,8 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
        /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
 
+       memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
+
        /* reading per RX ring stats */
        for (i = 0; i < dev->data->nb_rx_queues; i++) {
                if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
@@ -902,11 +913,6 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
        nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
 
-       nfp_dev_stats.imcasts =
-               nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
-
-       nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts;
-
        /* reading general device stats */
        nfp_dev_stats.ierrors =
                nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
@@ -918,12 +924,6 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
 
        nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
 
-       /* Multicast frames received */
-       nfp_dev_stats.imcasts =
-               nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
-
-       nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts;
-
        /* RX ring mbuf allocation failures */
        nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
 
@@ -985,9 +985,6 @@ nfp_net_stats_reset(struct rte_eth_dev *dev)
        hw->eth_stats_base.obytes =
                nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
 
-       hw->eth_stats_base.imcasts =
-               nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
-
        /* reading general device stats */
        hw->eth_stats_base.ierrors =
                nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
@@ -995,10 +992,6 @@ nfp_net_stats_reset(struct rte_eth_dev *dev)
        hw->eth_stats_base.oerrors =
                nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
 
-       /* Multicast frames received */
-       hw->eth_stats_base.imcasts =
-               nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES);
-
        /* RX ring mbuf allocation failures */
        dev->data->rx_mbuf_alloc_failed = 0;
 
@@ -1013,11 +1006,11 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       dev_info->driver_name = dev->driver->pci_drv.name;
+       dev_info->driver_name = dev->driver->pci_drv.driver.name;
        dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
        dev_info->min_rx_bufsize = ETHER_MIN_MTU;
-       dev_info->max_rx_pktlen = hw->mtu;
+       dev_info->max_rx_pktlen = hw->max_mtu;
        /* Next should change when PF support is implemented */
        dev_info->max_mac_addrs = 1;
 
@@ -1034,8 +1027,8 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
                dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
-                                            DEV_RX_OFFLOAD_UDP_CKSUM |
-                                            DEV_RX_OFFLOAD_TCP_CKSUM;
+                                            DEV_TX_OFFLOAD_UDP_CKSUM |
+                                            DEV_TX_OFFLOAD_TCP_CKSUM;
 
        dev_info->default_rxconf = (struct rte_eth_rxconf) {
                .rx_thresh = {
@@ -1226,7 +1219,7 @@ nfp_net_dev_interrupt_delayed_handler(void *param)
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 
        nfp_net_link_update(dev, 0);
-       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
 
        nfp_net_dev_link_status_print(dev);
 
@@ -1245,6 +1238,13 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
        if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
                return -EINVAL;
 
+       /* mtu setting is forbidden if port is started */
+       if (dev->data->dev_started) {
+               PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+                           dev->data->port_id);
+               return -EBUSY;
+       }
+
        /* switch to jumbo mode if needed */
        if ((uint32_t)mtu > ETHER_MAX_LEN)
                dev->data->dev_conf.rxmode.jumbo_frame = 1;
@@ -1731,16 +1731,16 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                        break;
                }
 
+               rxds = &rxq->rxds[idx];
+               if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
+                       break;
+
                /*
                 * Memory barrier to ensure that we won't do other
                 * reads before the DD bit.
                 */
                rte_rmb();
 
-               rxds = &rxq->rxds[idx];
-               if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
-                       break;
-
                /*
                 * We got a packet. Let's alloc a new mbuff for refilling the
                 * free descriptor ring as soon as possible
@@ -1801,6 +1801,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                mb->nb_segs = 1;
                mb->next = NULL;
 
+               mb->port = rxq->port_id;
+
                /* Checking the RSS flag */
                nfp_net_set_hash(rxq, rxds, mb);
 
@@ -1813,7 +1815,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
                    (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
                        mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
-                       mb->ol_flags |= PKT_RX_VLAN_PKT;
+                       mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
                }
 
                /* Adding the mbuff to the mbuff array passed by the app */
@@ -1923,7 +1925,7 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
        struct nfp_net_tx_desc *txds;
        struct rte_mbuf *pkt;
        uint64_t dma_addr;
-       int pkt_size, dma_size;
+       int pkt_size, pkt_len, dma_size;
        uint16_t free_descs, issued_descs;
        struct rte_mbuf **lmbuf;
        int i;
@@ -1971,6 +1973,8 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                 * Checksum and VLAN flags just in the first descriptor for a
                 * multisegment packet
                 */
+
+               txds->data_len = pkt->pkt_len;
                nfp_net_tx_cksum(txq, txds, pkt);
 
                if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
@@ -1988,12 +1992,18 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
                 * then data_len = pkt_len
                 */
                pkt_size = pkt->pkt_len;
+               pkt_len = pkt->pkt_len;
 
-               while (pkt_size) {
-                       /* Releasing mbuf which was prefetched above */
-                       if (*lmbuf)
-                               rte_pktmbuf_free_seg(*lmbuf);
+               /* Releasing mbuf which was prefetched above */
+               if (*lmbuf)
+                       rte_pktmbuf_free(*lmbuf);
+               /*
+                * Linking mbuf with descriptor for being released
+                * next time descriptor is used
+                */
+               *lmbuf = pkt;
 
+               while (pkt_size) {
                        dma_size = pkt->data_len;
                        dma_addr = rte_mbuf_data_dma_addr(pkt);
                        PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
@@ -2001,29 +2011,27 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 
                        /* Filling descriptors fields */
                        txds->dma_len = dma_size;
-                       txds->data_len = pkt->pkt_len;
+                       txds->data_len = pkt_len;
                        txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
                        txds->dma_addr_lo = (dma_addr & 0xffffffff);
                        ASSERT(free_descs > 0);
                        free_descs--;
 
-                       /*
-                        * Linking mbuf with descriptor for being released
-                        * next time descriptor is used
-                        */
-                       *lmbuf = pkt;
-
                        txq->wr_p++;
                        txq->tail++;
                        if (unlikely(txq->tail == txq->tx_count)) /* wrapping?*/
                                txq->tail = 0;
 
                        pkt_size -= dma_size;
-                       if (!pkt_size) {
-                               /* End of packet */
-                               txds->offset_eop |= PCIE_DESC_TX_EOP;
-                       } else {
-                               txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
+
+                       /*
+                        * Making the EOP, packets with just one segment
+                        * the priority
+                        */
+                       if (likely(!pkt_size))
+                               txds->offset_eop = PCIE_DESC_TX_EOP;
+                       else {
+                               txds->offset_eop = 0;
                                pkt = pkt->next;
                        }
                        /* Referencing next free TX descriptor */
@@ -2051,9 +2059,9 @@ nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
        new_ctrl = 0;
 
        if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
-           (mask & ETH_VLAN_FILTER_OFFLOAD))
-               RTE_LOG(INFO, PMD, "Not support for ETH_VLAN_FILTER_OFFLOAD or"
-                       " ETH_VLAN_FILTER_EXTEND");
+           (mask & ETH_VLAN_EXTEND_OFFLOAD))
+               RTE_LOG(INFO, PMD, "No support for ETH_VLAN_FILTER_OFFLOAD or"
+                       " ETH_VLAN_EXTEND_OFFLOAD");
 
        /* Enable vlan strip if it is not configured yet */
        if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
@@ -2125,7 +2133,8 @@ nfp_net_reta_update(struct rte_eth_dev *dev,
                                reta &= ~(0xFF << (8 * j));
                        reta |= reta_conf[idx].reta[shift + j] << (8 * j);
                }
-               nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + shift, reta);
+               nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
+                             reta);
        }
 
        update = NFP_NET_CFG_UPDATE_RSS;
@@ -2172,7 +2181,8 @@ nfp_net_reta_query(struct rte_eth_dev *dev,
                if (!mask)
                        continue;
 
-               reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + shift);
+               reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
+                                   shift);
                for (j = 0; j < 4; j++) {
                        if (!(mask & (0x1 << j)))
                                continue;
@@ -2222,6 +2232,9 @@ nfp_net_rss_hash_update(struct rte_eth_dev *dev,
                                NFP_NET_CFG_RSS_IPV6_TCP |
                                NFP_NET_CFG_RSS_IPV6_UDP;
 
+       cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
+       cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
+
        /* configuring where to apply the RSS hash */
        nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
 
@@ -2388,7 +2401,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
        hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
        hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
-       hw->mtu = hw->max_mtu;
+       hw->mtu = ETHER_MTU;
 
        if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
                hw->rx_offset = NFP_NET_RX_OFFSET;
@@ -2417,6 +2430,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n",
                     hw->max_rx_queues, hw->max_tx_queues);
 
+       /* Initializing spinlock for reconfigs */
+       rte_spinlock_init(&hw->reconfig_lock);
+
        /* Allocating memory for mac addr */
        eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
        if (eth_dev->data->mac_addrs == NULL) {
@@ -2424,12 +2440,15 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
                return -ENOMEM;
        }
 
-       /* Using random mac addresses for VFs */
-       eth_random_addr(&hw->mac_addr[0]);
+       nfp_net_read_mac(hw);
+
+       if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr))
+               /* Using random mac addresses for VFs */
+               eth_random_addr(&hw->mac_addr[0]);
 
        /* Copying mac address to DPDK eth_dev struct */
-       ether_addr_copy(&eth_dev->data->mac_addrs[0],
-                       (struct ether_addr *)hw->mac_addr);
+       ether_addr_copy((struct ether_addr *)hw->mac_addr,
+                       &eth_dev->data->mac_addrs[0]);
 
        PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
                     "mac=%02x:%02x:%02x:%02x:%02x:%02x",
@@ -2457,16 +2476,12 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
 
 static struct rte_pci_id pci_id_nfp_net_map[] = {
        {
-               .vendor_id = PCI_VENDOR_ID_NETRONOME,
-               .device_id = PCI_DEVICE_ID_NFP6000_PF_NIC,
-               .subsystem_vendor_id = PCI_ANY_ID,
-               .subsystem_device_id = PCI_ANY_ID,
+               RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
+                              PCI_DEVICE_ID_NFP6000_PF_NIC)
        },
        {
-               .vendor_id = PCI_VENDOR_ID_NETRONOME,
-               .device_id = PCI_DEVICE_ID_NFP6000_VF_NIC,
-               .subsystem_vendor_id = PCI_ANY_ID,
-               .subsystem_device_id = PCI_ANY_ID,
+               RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
+                              PCI_DEVICE_ID_NFP6000_VF_NIC)
        },
        {
                .vendor_id = 0,
@@ -2474,33 +2489,19 @@ static struct rte_pci_id pci_id_nfp_net_map[] = {
 };
 
 static struct eth_driver rte_nfp_net_pmd = {
-       {
-               .name = "rte_nfp_net_pmd",
+       .pci_drv = {
                .id_table = pci_id_nfp_net_map,
-               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+               .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+                            RTE_PCI_DRV_DETACHABLE,
+               .probe = rte_eth_dev_pci_probe,
+               .remove = rte_eth_dev_pci_remove,
        },
        .eth_dev_init = nfp_net_init,
        .dev_private_size = sizeof(struct nfp_net_adapter),
 };
 
-static int
-nfp_net_pmd_init(const char *name __rte_unused,
-                const char *params __rte_unused)
-{
-       PMD_INIT_FUNC_TRACE();
-       PMD_INIT_LOG(INFO, "librte_pmd_nfp_net version %s\n",
-                    NFP_NET_PMD_VERSION);
-
-       rte_eth_driver_register(&rte_nfp_net_pmd);
-       return 0;
-}
-
-static struct rte_driver rte_nfp_net_driver = {
-       .type = PMD_PDEV,
-       .init = nfp_net_pmd_init,
-};
-
-PMD_REGISTER_DRIVER(rte_nfp_net_driver);
+RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map);
 
 /*
  * Local variables: