New upstream version 16.11.5
[deb_dpdk.git] / drivers / net / e1000 / igb_ethdev.c
index 5067d20..5108ff3 100644 (file)
@@ -129,7 +129,7 @@ static int  eth_igb_flow_ctrl_get(struct rte_eth_dev *dev,
                                struct rte_eth_fc_conf *fc_conf);
 static int  eth_igb_flow_ctrl_set(struct rte_eth_dev *dev,
                                struct rte_eth_fc_conf *fc_conf);
-static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev);
+static int eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on);
 static int eth_igb_rxq_interrupt_setup(struct rte_eth_dev *dev);
 static int eth_igb_interrupt_get_status(struct rte_eth_dev *dev);
 static int eth_igb_interrupt_action(struct rte_eth_dev *dev);
@@ -306,22 +306,57 @@ static enum e1000_fc_mode igb_fc_setting = e1000_fc_full;
  * The set of PCI devices this driver supports
  */
 static const struct rte_pci_id pci_id_igb_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IGB(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-
-{0},
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_FIBER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_QUAD_COPPER_ET2) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_NS_SERDES) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_SERDES_QUAD) },
+
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_COPPER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575EB_FIBER_SERDES) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82575GB_QUAD_COPPER) },
+
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_FIBER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SERDES) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_SGMII) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_COPPER_DUAL) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82580_QUAD_FIBER) },
+
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_COPPER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_FIBER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SERDES) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_SGMII) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_DA4) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_OEM1) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_COPPER_IT) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_FIBER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SERDES) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I210_SGMII) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I211_COPPER) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_SGMII) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SGMII) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SERDES) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_BACKPLANE) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_DH89XXCC_SFP) },
+       { .vendor_id = 0, /* sentinel */ },
 };
 
 /*
  * The set of PCI devices this driver supports (for 82576&I350 VF)
  */
 static const struct rte_pci_id pci_id_igbvf_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IGBVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-
-{0},
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_82576_VF_HV) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF) },
+       { RTE_PCI_DEVICE(E1000_INTEL_VENDOR_ID, E1000_DEV_ID_I350_VF_HV) },
+       { .vendor_id = 0, /* sentinel */ },
 };
 
 static const struct rte_eth_desc_lim rx_desc_lim = {
@@ -386,7 +421,6 @@ static const struct eth_dev_ops eth_igb_ops = {
        .timesync_disable     = igb_timesync_disable,
        .timesync_read_rx_timestamp = igb_timesync_read_rx_timestamp,
        .timesync_read_tx_timestamp = igb_timesync_read_tx_timestamp,
-       .get_reg_length       = eth_igb_get_reg_length,
        .get_reg              = eth_igb_get_regs,
        .get_eeprom_length    = eth_igb_get_eeprom_length,
        .get_eeprom           = eth_igb_get_eeprom,
@@ -426,7 +460,6 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = {
        .rxq_info_get         = igb_rxq_info_get,
        .txq_info_get         = igb_txq_info_get,
        .mac_addr_set         = igbvf_default_mac_addr_set,
-       .get_reg_length       = igbvf_get_reg_length,
        .get_reg              = igbvf_get_regs,
 };
 
@@ -979,12 +1012,6 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
        /* Generate a random MAC address, if none was assigned by PF. */
        if (is_zero_ether_addr(perm_addr)) {
                eth_random_addr(perm_addr->addr_bytes);
-               diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
-               if (diag) {
-                       rte_free(eth_dev->data->mac_addrs);
-                       eth_dev->data->mac_addrs = NULL;
-                       return diag;
-               }
                PMD_INIT_LOG(INFO, "\tVF MAC address not assigned by Host PF");
                PMD_INIT_LOG(INFO, "\tAssign randomly generated MAC address "
                             "%02x:%02x:%02x:%02x:%02x:%02x",
@@ -996,6 +1023,12 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev)
                             perm_addr->addr_bytes[5]);
        }
 
+       diag = e1000_rar_set(hw, perm_addr->addr_bytes, 0);
+       if (diag) {
+               rte_free(eth_dev->data->mac_addrs);
+               eth_dev->data->mac_addrs = NULL;
+               return diag;
+       }
        /* Copy the permanent MAC address */
        ether_addr_copy((struct ether_addr *) hw->mac.perm_addr,
                        &eth_dev->data->mac_addrs[0]);
@@ -1045,10 +1078,11 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev)
 
 static struct eth_driver rte_igb_pmd = {
        .pci_drv = {
-               .name = "rte_igb_pmd",
                .id_table = pci_id_igb_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
                        RTE_PCI_DRV_DETACHABLE,
+               .probe = rte_eth_dev_pci_probe,
+               .remove = rte_eth_dev_pci_remove,
        },
        .eth_dev_init = eth_igb_dev_init,
        .eth_dev_uninit = eth_igb_dev_uninit,
@@ -1060,22 +1094,16 @@ static struct eth_driver rte_igb_pmd = {
  */
 static struct eth_driver rte_igbvf_pmd = {
        .pci_drv = {
-               .name = "rte_igbvf_pmd",
                .id_table = pci_id_igbvf_map,
                .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+               .probe = rte_eth_dev_pci_probe,
+               .remove = rte_eth_dev_pci_remove,
        },
        .eth_dev_init = eth_igbvf_dev_init,
        .eth_dev_uninit = eth_igbvf_dev_uninit,
        .dev_private_size = sizeof(struct e1000_adapter),
 };
 
-static int
-rte_igb_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
-       rte_eth_driver_register(&rte_igb_pmd);
-       return 0;
-}
-
 static void
 igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
 {
@@ -1087,27 +1115,13 @@ igb_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev)
        E1000_WRITE_REG(hw, E1000_RCTL, rctl);
 }
 
-/*
- * VF Driver initialization routine.
- * Invoked one at EAL init time.
- * Register itself as the [Virtual Poll Mode] Driver of PCI IGB devices.
- */
-static int
-rte_igbvf_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
-       PMD_INIT_FUNC_TRACE();
-
-       rte_eth_driver_register(&rte_igbvf_pmd);
-       return 0;
-}
-
 static int
 igb_check_mq_mode(struct rte_eth_dev *dev)
 {
        enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
        enum rte_eth_tx_mq_mode tx_mq_mode = dev->data->dev_conf.txmode.mq_mode;
        uint16_t nb_rx_q = dev->data->nb_rx_queues;
-       uint16_t nb_tx_q = dev->data->nb_rx_queues;
+       uint16_t nb_tx_q = dev->data->nb_tx_queues;
 
        if ((rx_mq_mode & ETH_MQ_RX_DCB_FLAG) ||
            tx_mq_mode == ETH_MQ_TX_DCB ||
@@ -1313,6 +1327,7 @@ eth_igb_start(struct rte_eth_dev *dev)
        speeds = &dev->data->dev_conf.link_speeds;
        if (*speeds == ETH_LINK_SPEED_AUTONEG) {
                hw->phy.autoneg_advertised = E1000_ALL_SPEED_DUPLEX;
+               hw->mac.autoneg = 1;
        } else {
                num_speeds = 0;
                autoneg = (*speeds & ETH_LINK_SPEED_FIXED) == 0;
@@ -1348,6 +1363,17 @@ eth_igb_start(struct rte_eth_dev *dev)
                }
                if (num_speeds == 0 || (!autoneg && (num_speeds > 1)))
                        goto error_invalid_config;
+
+               /* Set/reset the mac.autoneg based on the link speed,
+                * fixed or not
+                */
+               if (!autoneg) {
+                       hw->mac.autoneg = 0;
+                       hw->mac.forced_speed_duplex =
+                                       hw->phy.autoneg_advertised;
+               } else {
+                       hw->mac.autoneg = 1;
+               }
        }
 
        e1000_setup_link(hw);
@@ -1355,7 +1381,9 @@ eth_igb_start(struct rte_eth_dev *dev)
        if (rte_intr_allow_others(intr_handle)) {
                /* check if lsc interrupt is enabled */
                if (dev->data->dev_conf.intr_conf.lsc != 0)
-                       eth_igb_lsc_interrupt_setup(dev);
+                       eth_igb_lsc_interrupt_setup(dev, TRUE);
+               else
+                       eth_igb_lsc_interrupt_setup(dev, FALSE);
        } else {
                rte_intr_callback_unregister(intr_handle,
                                             eth_igb_interrupt_handler,
@@ -2198,7 +2226,7 @@ eth_igb_link_update(struct rte_eth_dev *dev, int wait_to_complete)
                link.link_speed = 0;
                link.link_duplex = ETH_LINK_HALF_DUPLEX;
                link.link_status = ETH_LINK_DOWN;
-               link.link_autoneg = ETH_LINK_SPEED_FIXED;
+               link.link_autoneg = ETH_LINK_FIXED;
        }
        rte_igb_dev_atomic_write_link_status(dev, &link);
 
@@ -2517,18 +2545,23 @@ eth_igb_vlan_offload_set(struct rte_eth_dev *dev, int mask)
  *
  * @param dev
  *  Pointer to struct rte_eth_dev.
+ * @param on
+ *  Enable or Disable
  *
  * @return
  *  - On success, zero.
  *  - On failure, a negative value.
  */
 static int
-eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev)
+eth_igb_lsc_interrupt_setup(struct rte_eth_dev *dev, uint8_t on)
 {
        struct e1000_interrupt *intr =
                E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 
-       intr->mask |= E1000_ICR_LSC;
+       if (on)
+               intr->mask |= E1000_ICR_LSC;
+       else
+               intr->mask &= ~E1000_ICR_LSC;
 
        return 0;
 }
@@ -2669,7 +2702,7 @@ eth_igb_interrupt_action(struct rte_eth_dev *dev)
                E1000_WRITE_REG(hw, E1000_TCTL, tctl);
                E1000_WRITE_REG(hw, E1000_RCTL, rctl);
                E1000_WRITE_FLUSH(hw);
-               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
        }
 
        return 0;
@@ -2724,12 +2757,17 @@ void igbvf_mbx_process(struct rte_eth_dev *dev)
        struct e1000_mbx_info *mbx = &hw->mbx;
        u32 in_msg = 0;
 
-       if (mbx->ops.read(hw, &in_msg, 1, 0))
-               return;
+       /* peek the message first */
+       in_msg = E1000_READ_REG(hw, E1000_VMBMEM(0));
 
        /* PF reset VF event */
-       if (in_msg == E1000_PF_CONTROL_MSG)
-               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET);
+       if (in_msg == E1000_PF_CONTROL_MSG) {
+               /* dummy mbx read to ack pf */
+               if (mbx->ops.read(hw, &in_msg, 1, 0))
+                       return;
+               _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+                                             NULL);
+       }
 }
 
 static int
@@ -3052,7 +3090,8 @@ igbvf_dev_start(struct rte_eth_dev *dev)
        }
 
        /* check and configure queue intr-vector mapping */
-       if (dev->data->dev_conf.intr_conf.rxq != 0) {
+       if (rte_intr_cap_multiple(intr_handle) &&
+           dev->data->dev_conf.intr_conf.rxq) {
                intr_vector = dev->data->nb_rx_queues;
                ret = rte_intr_efd_enable(intr_handle, intr_vector);
                if (ret)
@@ -3710,10 +3749,6 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
        }
 
        wufc = E1000_READ_REG(hw, E1000_WUFC);
-       if (flex_filter->index < E1000_MAX_FHFT)
-               reg_off = E1000_FHFT(flex_filter->index);
-       else
-               reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
 
        if (add) {
                if (eth_igb_flex_filter_lookup(&filter_info->flex_list,
@@ -3743,6 +3778,11 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
                        return -ENOSYS;
                }
 
+               if (flex_filter->index < E1000_MAX_FHFT)
+                       reg_off = E1000_FHFT(flex_filter->index);
+               else
+                       reg_off = E1000_FHFT_EXT(flex_filter->index - E1000_MAX_FHFT);
+
                E1000_WRITE_REG(hw, E1000_WUFC, wufc | E1000_WUFC_FLEX_HQ |
                                (E1000_WUFC_FLX0 << flex_filter->index));
                queueing = filter->len |
@@ -3771,6 +3811,11 @@ eth_igb_add_del_flex_filter(struct rte_eth_dev *dev,
                        return -ENOENT;
                }
 
+               if (it->index < E1000_MAX_FHFT)
+                       reg_off = E1000_FHFT(it->index);
+               else
+                       reg_off = E1000_FHFT_EXT(it->index - E1000_MAX_FHFT);
+
                for (i = 0; i < E1000_FHFT_SIZE_IN_DWD; i++)
                        E1000_WRITE_REG(hw, reg_off + i * sizeof(uint32_t), 0);
                E1000_WRITE_REG(hw, E1000_WUFC, wufc &
@@ -3800,7 +3845,7 @@ eth_igb_get_flex_filter(struct rte_eth_dev *dev,
        flex_filter.filter_info.priority = filter->priority;
        memcpy(flex_filter.filter_info.dwords, filter->bytes, filter->len);
        memcpy(flex_filter.filter_info.mask, filter->mask,
-                       RTE_ALIGN(filter->len, sizeof(char)) / sizeof(char));
+                       RTE_ALIGN(filter->len, CHAR_BIT) / CHAR_BIT);
 
        it = eth_igb_flex_filter_lookup(&filter_info->flex_list,
                                &flex_filter.filter_info);
@@ -4945,6 +4990,12 @@ eth_igb_get_regs(struct rte_eth_dev *dev,
        int count = 0;
        const struct reg_info *reg_group;
 
+       if (data == NULL) {
+               regs->length = eth_igb_get_reg_length(dev);
+               regs->width = sizeof(uint32_t);
+               return 0;
+       }
+
        /* Support only full register dump */
        if ((regs->length == 0) ||
            (regs->length == (uint32_t)eth_igb_get_reg_length(dev))) {
@@ -4969,6 +5020,12 @@ igbvf_get_regs(struct rte_eth_dev *dev,
        int count = 0;
        const struct reg_info *reg_group;
 
+       if (data == NULL) {
+               regs->length = igbvf_get_reg_length(dev);
+               regs->width = sizeof(uint32_t);
+               return 0;
+       }
+
        /* Support only full register dump */
        if ((regs->length == 0) ||
            (regs->length == (uint32_t)igbvf_get_reg_length(dev))) {
@@ -5039,22 +5096,18 @@ eth_igb_set_eeprom(struct rte_eth_dev *dev,
        return nvm->ops.write(hw,  first, length, data);
 }
 
-static struct rte_driver pmd_igb_drv = {
-       .type = PMD_PDEV,
-       .init = rte_igb_pmd_init,
-};
-
-static struct rte_driver pmd_igbvf_drv = {
-       .type = PMD_PDEV,
-       .init = rte_igbvf_pmd_init,
-};
-
 static int
 eth_igb_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
        struct e1000_hw *hw =
                E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t mask = 1 << queue_id;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       uint32_t vec = E1000_MISC_VEC_ID;
+
+       if (rte_intr_allow_others(intr_handle))
+               vec = E1000_RX_VEC_START;
+
+       uint32_t mask = 1 << (queue_id + vec);
 
        E1000_WRITE_REG(hw, E1000_EIMC, mask);
        E1000_WRITE_FLUSH(hw);
@@ -5067,7 +5120,13 @@ eth_igb_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
        struct e1000_hw *hw =
                E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
-       uint32_t mask = 1 << queue_id;
+       struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle;
+       uint32_t vec = E1000_MISC_VEC_ID;
+
+       if (rte_intr_allow_others(intr_handle))
+               vec = E1000_RX_VEC_START;
+
+       uint32_t mask = 1 << (queue_id + vec);
        uint32_t regval;
 
        regval = E1000_READ_REG(hw, E1000_EIMS);
@@ -5210,5 +5269,7 @@ eth_igb_configure_msix_intr(struct rte_eth_dev *dev)
        E1000_WRITE_FLUSH(hw);
 }
 
-PMD_REGISTER_DRIVER(pmd_igb_drv);
-PMD_REGISTER_DRIVER(pmd_igbvf_drv);
+RTE_PMD_REGISTER_PCI(net_e1000_igb, rte_igb_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb, pci_id_igb_map);
+RTE_PMD_REGISTER_PCI(net_e1000_igb_vf, rte_igbvf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_e1000_igb_vf, pci_id_igbvf_map);