New upstream version 17.08
[deb_dpdk.git] / drivers / net / nfp / nfp_net.c
index 5479fb3..92b03c4 100644 (file)
@@ -39,8 +39,6 @@
  * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
  */
 
-#include <math.h>
-
 #include <rte_byteorder.h>
 #include <rte_common.h>
 #include <rte_log.h>
@@ -647,7 +645,7 @@ nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
 static int
 nfp_net_start(struct rte_eth_dev *dev)
 {
-       struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
        uint32_t new_ctrl, update = 0;
        struct nfp_net_hw *hw;
@@ -772,7 +770,7 @@ nfp_net_close(struct rte_eth_dev *dev)
        PMD_INIT_LOG(DEBUG, "Close");
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       pci_dev = RTE_DEV_TO_PCI(dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
        /*
         * We assume that the DPDK application is stopping all the
@@ -1081,7 +1079,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 
-       dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+       dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
        dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
        dev_info->min_rx_bufsize = ETHER_MIN_MTU;
@@ -1173,7 +1171,7 @@ nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
         * Other PMDs are just checking the DD bit in intervals of 4
         * descriptors and counting all four if the first has the DD
         * bit on. Of course, this is not accurate but can be good for
-        * perfomance. But ideally that should be done in descriptors
+        * performance. But ideally that should be done in descriptors
         * chunks belonging to the same cache line
         */
 
@@ -1201,7 +1199,7 @@ nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
        int base = 0;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       pci_dev = RTE_DEV_TO_PCI(dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
        if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
                base = 1;
@@ -1221,7 +1219,7 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
        int base = 0;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       pci_dev = RTE_DEV_TO_PCI(dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
        if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
                base = 1;
@@ -1235,7 +1233,7 @@ nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 static void
 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
 {
-       struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
+       struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
        struct rte_eth_link link;
 
        memset(&link, 0, sizeof(link));
@@ -1269,7 +1267,7 @@ nfp_net_irq_unmask(struct rte_eth_dev *dev)
        struct rte_pci_device *pci_dev;
 
        hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       pci_dev = RTE_DEV_TO_PCI(dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 
        if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
                /* If MSI-X auto-masking is used, clear the entry */
@@ -1334,7 +1332,7 @@ nfp_net_dev_interrupt_delayed_handler(void *param)
        struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
 
        nfp_net_link_update(dev, 0);
-       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+       _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
 
        nfp_net_dev_link_status_print(dev);
 
@@ -1473,7 +1471,7 @@ nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
         * of descriptors in log2 format
         */
        nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
-       nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), log2(nb_desc));
+       nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
 
        return 0;
 }
@@ -1628,7 +1626,7 @@ nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
         * of descriptors in log2 format
         */
        nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
-       nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), log2(nb_desc));
+       nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
 
        return 0;
 }
@@ -2450,7 +2448,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev)
        if (rte_eal_process_type() != RTE_PROC_PRIMARY)
                return 0;
 
-       pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+       pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
        rte_eth_copy_pci_info(eth_dev, pci_dev);
        eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE;