X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_ethdev.c;h=f047db83c38e7a57ecb6e045e4c6c8488de8c1dd;hb=refs%2Fheads%2Fupstream-17.11-stable;hp=22171d866bf10bf6fad9cd1868c9330e6e640e42;hpb=f239aed5e674965691846e8ce3f187dd47523689;p=deb_dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 22171d86..f047db83 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -48,10 +48,10 @@ #include #include #include +#include #include #include #include -#include #include #include #include @@ -61,6 +61,9 @@ #include #include #include +#ifdef RTE_LIBRTE_SECURITY +#include +#endif #include "ixgbe_logs.h" #include "base/ixgbe_api.h" @@ -92,6 +95,9 @@ /* Timer value included in XOFF frames. */ #define IXGBE_FC_PAUSE 0x680 +/*Default value of Max Rx Queue*/ +#define IXGBE_MAX_RX_QUEUE_NUM 128 + #define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */ #define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */ #define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */ @@ -169,13 +175,14 @@ static void ixgbe_dev_stop(struct rte_eth_dev *dev); static int ixgbe_dev_set_link_up(struct rte_eth_dev *dev); static int ixgbe_dev_set_link_down(struct rte_eth_dev *dev); static void ixgbe_dev_close(struct rte_eth_dev *dev); +static int ixgbe_dev_reset(struct rte_eth_dev *dev); static void ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); static void ixgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); static void ixgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); static void ixgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); static int ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); -static void ixgbe_dev_stats_get(struct rte_eth_dev *dev, +static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n); @@ -218,7 +225,7 @@ static void ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on); static void ixgbe_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); -static void ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue); static void ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev); @@ -247,6 +254,8 @@ static int ixgbe_dev_interrupt_action(struct rte_eth_dev *dev, struct rte_intr_handle *handle); static void ixgbe_dev_interrupt_handler(void *param); static void ixgbe_dev_interrupt_delayed_handler(void *param); +static void ixgbe_dev_setup_link_alarm_handler(void *param); + static int ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, uint32_t index, uint32_t pool); static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); @@ -265,16 +274,17 @@ static int ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete); static void ixgbevf_dev_stop(struct rte_eth_dev *dev); static void ixgbevf_dev_close(struct rte_eth_dev *dev); +static int ixgbevf_dev_reset(struct rte_eth_dev *dev); static void ixgbevf_intr_disable(struct ixgbe_hw *hw); static void ixgbevf_intr_enable(struct ixgbe_hw *hw); -static void ixgbevf_dev_stats_get(struct rte_eth_dev *dev, +static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev); static int ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); static void ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on); -static void ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); +static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask); static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on); static int ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id); @@ -518,6 +528,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .dev_set_link_up = ixgbe_dev_set_link_up, .dev_set_link_down = ixgbe_dev_set_link_down, .dev_close = ixgbe_dev_close, + .dev_reset = ixgbe_dev_reset, .promiscuous_enable = ixgbe_dev_promiscuous_enable, .promiscuous_disable = ixgbe_dev_promiscuous_disable, .allmulticast_enable = ixgbe_dev_allmulticast_enable, @@ -608,6 +619,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .xstats_reset = ixgbevf_dev_stats_reset, .xstats_get_names = ixgbevf_dev_xstats_get_names, .dev_close = ixgbevf_dev_close, + .dev_reset = ixgbevf_dev_reset, .allmulticast_enable = ixgbevf_dev_allmulticast_enable, .allmulticast_disable = ixgbevf_dev_allmulticast_disable, .dev_infos_get = ixgbevf_dev_info_get, @@ -1163,8 +1175,14 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) return 0; } +#ifdef RTE_LIBRTE_SECURITY + /* Initialize security_ctx only for primary process*/ + eth_dev->security_ctx = ixgbe_ipsec_ctx_create(eth_dev); + if (eth_dev->security_ctx == NULL) + return -ENOMEM; +#endif + rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; /* Vendor and Device ID need to be set before init of shared code */ hw->device_id = pci_dev->id.device_id; @@ -1332,12 +1350,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* initialize l2 tunnel filter list & hash */ ixgbe_l2_tn_filter_init(eth_dev); - TAILQ_INIT(&filter_ntuple_list); - TAILQ_INIT(&filter_ethertype_list); - TAILQ_INIT(&filter_syn_list); - TAILQ_INIT(&filter_fdir_list); - TAILQ_INIT(&filter_l2_tunnel_list); - TAILQ_INIT(&ixgbe_flow_list); + /* initialize flow filter lists */ + ixgbe_filterlist_init(); /* initialize bandwidth configuration info */ memset(bw_conf, 0, sizeof(struct ixgbe_bw_conf)); @@ -1354,6 +1368,8 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; struct ixgbe_hw *hw; + int retries = 0; + int ret; PMD_INIT_FUNC_TRACE(); @@ -1374,8 +1390,20 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) /* disable uio intr before callback unregister */ rte_intr_disable(intr_handle); - rte_intr_callback_unregister(intr_handle, - ixgbe_dev_interrupt_handler, eth_dev); + + do { + ret = rte_intr_callback_unregister(intr_handle, + ixgbe_dev_interrupt_handler, eth_dev); + if (ret >= 0) { + break; + } else if (ret != -EAGAIN) { + PMD_INIT_LOG(ERR, + "intr callback unregister failed: %d", + ret); + return ret; + } + rte_delay_ms(100); + } while (retries++ < (10 + IXGBE_LINK_UP_TIME)); /* uninitialize PF if max_vfs not zero */ ixgbe_pf_host_uninit(eth_dev); @@ -1401,6 +1429,10 @@ eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev) /* Remove all Traffic Manager configuration */ ixgbe_tm_conf_uninit(eth_dev); +#ifdef RTE_LIBRTE_SECURITY + rte_free(eth_dev->security_ctx); +#endif + return 0; } @@ -1627,7 +1659,6 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) } rte_eth_copy_pci_info(eth_dev, pci_dev); - eth_dev->data->dev_flags |= RTE_ETH_DEV_DETACHABLE; hw->device_id = pci_dev->id.device_id; hw->vendor_id = pci_dev->id.vendor_id; @@ -1781,7 +1812,8 @@ static int eth_ixgbe_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_ixgbe_pmd = { .id_table = pci_id_ixgbe_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_IOVA_AS_VA, .probe = eth_ixgbe_pci_probe, .remove = eth_ixgbe_pci_remove, }; @@ -1803,7 +1835,7 @@ static int eth_ixgbevf_pci_remove(struct rte_pci_device *pci_dev) */ static struct rte_pci_driver rte_ixgbevf_pmd = { .id_table = pci_id_ixgbevf_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, .probe = eth_ixgbevf_pci_probe, .remove = eth_ixgbevf_pci_remove, }; @@ -1959,9 +1991,9 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) rxq = dev->data->rx_queues[queue]; if (on) - rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; + rxq->vlan_flags = PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; else - rxq->vlan_flags = PKT_RX_VLAN_PKT; + rxq->vlan_flags = PKT_RX_VLAN; } static void @@ -2125,7 +2157,7 @@ ixgbe_vlan_hw_extend_enable(struct rte_eth_dev *dev) */ } -static void +static int ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) { if (mask & ETH_VLAN_STRIP_MASK) { @@ -2148,6 +2180,8 @@ ixgbe_vlan_offload_set(struct rte_eth_dev *dev, int mask) else ixgbe_vlan_hw_extend_disable(dev); } + + return 0; } static void @@ -2179,9 +2213,10 @@ ixgbe_check_vf_rss_rxq_num(struct rte_eth_dev *dev, uint16_t nb_rx_q) return -EINVAL; } - RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q; - RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = pci_dev->max_vfs * nb_rx_q; - + RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = + IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active; + RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = + pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool; return 0; } @@ -2221,8 +2256,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) case ETH_MQ_RX_NONE: /* if nothing mq mode configure, use default scheme */ dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY; - if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1) - RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1; break; default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/ /* SRIOV only works in VMDq enable mode */ @@ -2299,11 +2332,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) if (dev_conf->rxmode.mq_mode == ETH_MQ_RX_DCB) { const struct rte_eth_dcb_rx_conf *conf; - if (nb_rx_q != IXGBE_DCB_NB_QUEUES) { - PMD_INIT_LOG(ERR, "DCB selected, nb_rx_q != %d.", - IXGBE_DCB_NB_QUEUES); - return -EINVAL; - } conf = &dev_conf->rx_adv_conf.dcb_rx_conf; if (!(conf->nb_tcs == ETH_4_TCS || conf->nb_tcs == ETH_8_TCS)) { @@ -2317,11 +2345,6 @@ ixgbe_check_mq_mode(struct rte_eth_dev *dev) if (dev_conf->txmode.mq_mode == ETH_MQ_TX_DCB) { const struct rte_eth_dcb_tx_conf *conf; - if (nb_tx_q != IXGBE_DCB_NB_QUEUES) { - PMD_INIT_LOG(ERR, "DCB, nb_tx_q != %d.", - IXGBE_DCB_NB_QUEUES); - return -EINVAL; - } conf = &dev_conf->tx_adv_conf.dcb_tx_conf; if (!(conf->nb_tcs == ETH_4_TCS || conf->nb_tcs == ETH_8_TCS)) { @@ -2504,11 +2527,15 @@ ixgbe_dev_start(struct rte_eth_dev *dev) * - fixed speed: TODO implement */ if (dev->data->dev_conf.link_speeds & ETH_LINK_SPEED_FIXED) { - PMD_INIT_LOG(ERR, "Invalid link_speeds for port %hhu; fix speed not supported", - dev->data->port_id); + PMD_INIT_LOG(ERR, + "Invalid link_speeds for port %u, fix speed not supported", + dev->data->port_id); return -EINVAL; } + /* Stop the link setup handler before resetting the HW. */ + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + /* disable uio/vfio intr/eventfd mapping */ rte_intr_disable(intr_handle); @@ -2568,9 +2595,13 @@ ixgbe_dev_start(struct rte_eth_dev *dev) goto error; } - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - ixgbe_vlan_offload_set(dev, mask); + err = ixgbe_vlan_offload_set(dev, mask); + if (err) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload"); + goto error; + } if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) { /* Enable vlan filtering for VMDq */ @@ -2706,6 +2737,12 @@ skip_link_setup: "please call hierarchy_commit() " "before starting the port"); + /* + * Update link status right before return, because it may + * start link configuration process in a separate thread. + */ + ixgbe_dev_link_update(dev, 0); + return 0; error: @@ -2733,6 +2770,8 @@ ixgbe_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + /* disable interrupts */ ixgbe_disable_intr(hw); @@ -2842,7 +2881,7 @@ ixgbe_dev_set_link_down(struct rte_eth_dev *dev) } /* - * Reest and stop device. + * Reset and stop device. */ static void ixgbe_dev_close(struct rte_eth_dev *dev) @@ -2865,6 +2904,32 @@ ixgbe_dev_close(struct rte_eth_dev *dev) ixgbe_set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); } +/* + * Reset PF device. + */ +static int +ixgbe_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + /* When a DPDK PMD PF begin to reset PF port, it should notify all + * its VF to make them align with it. The detailed notification + * mechanism is PMD specific. As to ixgbe PF, it is rather complex. + * To avoid unexpected behavior in VF, currently reset of PF with + * SR-IOV activation is not supported. It might be supported later. + */ + if (dev->data->sriov.active) + return -ENOTSUP; + + ret = eth_ixgbe_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_ixgbe_dev_init(dev); + + return ret; +} + static void ixgbe_read_stats_registers(struct ixgbe_hw *hw, struct ixgbe_hw_stats *hw_stats, @@ -3077,7 +3142,7 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, /* * This function is based on ixgbe_update_stats_counters() in ixgbe/ixgbe.c */ -static void +static int ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct ixgbe_hw *hw = @@ -3099,7 +3164,7 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) &total_qbrc, &total_qprc, &total_qprdc); if (stats == NULL) - return; + return -EINVAL; /* Fill out the rte_eth_stats statistics structure */ stats->ipackets = total_qprc; @@ -3130,6 +3195,7 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* Tx Errors */ stats->oerrors = 0; + return 0; } static void @@ -3541,7 +3607,7 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, return IXGBEVF_NB_XSTATS; } -static void +static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) @@ -3550,12 +3616,13 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) ixgbevf_update_stats(dev); if (stats == NULL) - return; + return -EINVAL; stats->ipackets = hw_stats->vfgprc; stats->ibytes = hw_stats->vfgorc; stats->opackets = hw_stats->vfgptc; stats->obytes = hw_stats->vfgotc; + return 0; } static void @@ -3665,6 +3732,11 @@ ixgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) hw->mac.type == ixgbe_mac_X550EM_a) dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; +#ifdef RTE_LIBRTE_SECURITY + dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_SECURITY; + dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_SECURITY; +#endif + dev_info->default_rxconf = (struct rte_eth_rxconf) { .rx_thresh = { .pthresh = IXGBE_DEFAULT_RX_PTHRESH, @@ -3806,11 +3878,6 @@ static int ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, int *link_up, int wait_to_complete) { - /** - * for a quick link status checking, wait_to_compelet == 0, - * skip PF link status checking - */ - bool no_pflink_check = wait_to_complete == 0; struct ixgbe_mbx_info *mbx = &hw->mbx; struct ixgbe_mac_info *mac = &hw->mac; uint32_t links_reg, in_msg; @@ -3831,7 +3898,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs * before the link status is correct */ - if (mac->type == ixgbe_mac_82599_vf) { + if (mac->type == ixgbe_mac_82599_vf && wait_to_complete) { int i; for (i = 0; i < 5; i++) { @@ -3871,14 +3938,6 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, *speed = IXGBE_LINK_SPEED_UNKNOWN; } - if (no_pflink_check) { - if (*speed == IXGBE_LINK_SPEED_UNKNOWN) - mac->get_link_status = true; - else - mac->get_link_status = false; - - goto out; - } /* if the read failed it could just be a mailbox collision, best wait * until we are called again and don't report an error */ @@ -3888,7 +3947,7 @@ ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) { /* msg is not CTS and is NACK we must have lost CTS status */ if (in_msg & IXGBE_VT_MSGTYPE_NACK) - ret_val = -1; + mac->get_link_status = false; goto out; } @@ -3908,6 +3967,25 @@ out: return ret_val; } +static void +ixgbe_dev_setup_link_alarm_handler(void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + u32 speed; + bool autoneg = false; + + speed = hw->phy.autoneg_advertised; + if (!speed) + ixgbe_get_link_capabilities(hw, &speed, &autoneg); + + ixgbe_setup_link(hw, speed, true); + + intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; +} + /* return 0 means link status changed, -1 means not changed */ static int ixgbe_dev_link_update_share(struct rte_eth_dev *dev, @@ -3920,24 +3998,22 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); int link_up; int diag; - u32 speed = 0; int wait = 1; - bool autoneg = false; link.link_status = ETH_LINK_DOWN; link.link_speed = 0; link.link_duplex = ETH_LINK_HALF_DUPLEX; + link.link_autoneg = ETH_LINK_AUTONEG; memset(&old, 0, sizeof(old)); rte_ixgbe_dev_atomic_read_link_status(dev, &old); hw->mac.get_link_status = true; - if ((intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) && - ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { - speed = hw->phy.autoneg_advertised; - if (!speed) - ixgbe_get_link_capabilities(hw, &speed, &autoneg); - ixgbe_setup_link(hw, speed, true); + if (intr->flags & IXGBE_FLAG_NEED_LINK_CONFIG) { + rte_ixgbe_dev_atomic_write_link_status(dev, &link); + if (link.link_status == old.link_status) + return -1; + return 0; } /* check if it needs to wait to complete, if lsc interrupt is enabled */ @@ -3960,12 +4036,16 @@ ixgbe_dev_link_update_share(struct rte_eth_dev *dev, if (link_up == 0) { rte_ixgbe_dev_atomic_write_link_status(dev, &link); - intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + if (ixgbe_get_media_type(hw) == ixgbe_media_type_fiber) { + intr->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + rte_eal_alarm_set(10, + ixgbe_dev_setup_link_alarm_handler, dev); + } if (link.link_status == old.link_status) return -1; return 0; } - intr->flags &= ~IXGBE_FLAG_NEED_LINK_CONFIG; + link.link_status = ETH_LINK_UP; link.link_duplex = ETH_LINK_FULL_DUPLEX; @@ -4971,7 +5051,14 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); - hw->mac.ops.reset_hw(hw); + /* Stop the link setup handler before resetting the HW. */ + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + + err = hw->mac.ops.reset_hw(hw); + if (err) { + PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err); + return err; + } hw->mac.get_link_status = true; /* negotiate mailbox API version to use with the PF. */ @@ -4993,13 +5080,22 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) /* Set HW strip */ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; - ixgbevf_vlan_offload_set(dev, mask); + err = ixgbevf_vlan_offload_set(dev, mask); + if (err) { + PMD_INIT_LOG(ERR, "Unable to set VLAN offload (%d)", err); + ixgbe_dev_clear_queues(dev); + return err; + } ixgbevf_dev_rxtx_start(dev); /* check and configure queue intr-vector mapping */ - if (dev->data->dev_conf.intr_conf.rxq != 0) { - intr_vector = dev->data->nb_rx_queues; + if (rte_intr_cap_multiple(intr_handle) && + dev->data->dev_conf.intr_conf.rxq) { + /* According to datasheet, only vector 0/1/2 can be used, + * now only one vector is used for Rx queue + */ + intr_vector = 1; if (rte_intr_efd_enable(intr_handle, intr_vector)) return -1; } @@ -5016,11 +5112,26 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) } ixgbevf_configure_msix(dev); + /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt + * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ). + * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( ) + * is not cleared, it will fail when following rte_intr_enable( ) tries + * to map Rx queue interrupt to other VFIO vectors. + * So clear uio/vfio intr/evevnfd first to avoid failure. + */ + rte_intr_disable(intr_handle); + rte_intr_enable(intr_handle); /* Re-enable interrupt for VF */ ixgbevf_intr_enable(hw); + /* + * Update link status right before return, because it may + * start link configuration process in a separate thread. + */ + ixgbevf_dev_link_update(dev, 0); + return 0; } @@ -5033,6 +5144,8 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + rte_eal_alarm_cancel(ixgbe_dev_setup_link_alarm_handler, dev); + ixgbevf_intr_disable(hw); hw->adapter_stopped = 1; @@ -5078,6 +5191,23 @@ ixgbevf_dev_close(struct rte_eth_dev *dev) ixgbevf_remove_mac_addr(dev, 0); } +/* + * Reset VF device + */ +static int +ixgbevf_dev_reset(struct rte_eth_dev *dev) +{ + int ret; + + ret = eth_ixgbevf_dev_uninit(dev); + if (ret) + return ret; + + ret = eth_ixgbevf_dev_init(dev); + + return ret; +} + static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -5153,7 +5283,7 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); } -static void +static int ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) { struct ixgbe_hw *hw = @@ -5168,6 +5298,8 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) for (i = 0; i < hw->mac.max_rx_queues; i++) ixgbevf_vlan_strip_queue_set(dev, i, on); } + + return 0; } int @@ -5450,13 +5582,13 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl); /* write pool mirrror control register */ - if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) { + if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) { IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb); IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset), mp_msb); } /* write VLAN mirrror control register */ - if (mirror_conf->rule_type == ETH_MIRROR_VLAN) { + if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb); IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset), mv_msb); @@ -5509,9 +5641,12 @@ ixgbevf_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id) uint32_t mask; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t vec = IXGBE_MISC_VEC_ID; mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); - mask |= (1 << IXGBE_MISC_VEC_ID); + if (rte_intr_allow_others(intr_handle)) + vec = IXGBE_RX_VEC_START; + mask |= (1 << vec); RTE_SET_USED(queue_id); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); @@ -5526,9 +5661,14 @@ ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id) uint32_t mask; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev); + struct rte_intr_handle *intr_handle = &pci_dev->intr_handle; + uint32_t vec = IXGBE_MISC_VEC_ID; mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS); - mask &= ~(1 << IXGBE_MISC_VEC_ID); + if (rte_intr_allow_others(intr_handle)) + vec = IXGBE_RX_VEC_START; + mask &= ~(1 << vec); RTE_SET_USED(queue_id); IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask); @@ -5670,6 +5810,7 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t q_idx; uint32_t vector_idx = IXGBE_MISC_VEC_ID; + uint32_t base = IXGBE_MISC_VEC_ID; /* Configure VF other cause ivar */ ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); @@ -5680,6 +5821,11 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) if (!rte_intr_dp_is_en(intr_handle)) return; + if (rte_intr_allow_others(intr_handle)) { + base = IXGBE_RX_VEC_START; + vector_idx = IXGBE_RX_VEC_START; + } + /* Configure all RX queues of VF */ for (q_idx = 0; q_idx < dev->data->nb_rx_queues; q_idx++) { /* Force all queue use vector 0, @@ -5687,6 +5833,8 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) */ ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); intr_handle->intr_vec[q_idx] = vector_idx; + if (vector_idx < base + intr_handle->nb_efd - 1) + vector_idx++; } } @@ -5709,8 +5857,12 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) /* won't configure msix register if no mapping is done * between intr vector and event fd + * but if misx has been enabled already, need to configure + * auto clean, auto mask and throttling. */ - if (!rte_intr_dp_is_en(intr_handle)) + gpie = IXGBE_READ_REG(hw, IXGBE_GPIE); + if (!rte_intr_dp_is_en(intr_handle) && + !(gpie & (IXGBE_GPIE_MSIX_MODE | IXGBE_GPIE_PBA_SUPPORT))) return; if (rte_intr_allow_others(intr_handle)) @@ -5734,27 +5886,30 @@ ixgbe_configure_msix(struct rte_eth_dev *dev) /* Populate the IVAR table and set the ITR values to the * corresponding register. */ - for (queue_id = 0; queue_id < dev->data->nb_rx_queues; - queue_id++) { - /* by default, 1:1 mapping */ - ixgbe_set_ivar_map(hw, 0, queue_id, vec); - intr_handle->intr_vec[queue_id] = vec; - if (vec < base + intr_handle->nb_efd - 1) - vec++; - } + if (rte_intr_dp_is_en(intr_handle)) { + for (queue_id = 0; queue_id < dev->data->nb_rx_queues; + queue_id++) { + /* by default, 1:1 mapping */ + ixgbe_set_ivar_map(hw, 0, queue_id, vec); + intr_handle->intr_vec[queue_id] = vec; + if (vec < base + intr_handle->nb_efd - 1) + vec++; + } - switch (hw->mac.type) { - case ixgbe_mac_82598EB: - ixgbe_set_ivar_map(hw, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, - IXGBE_MISC_VEC_ID); - break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); - break; - default: - break; + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + ixgbe_set_ivar_map(hw, -1, + IXGBE_IVAR_OTHER_CAUSES_INDEX, + IXGBE_MISC_VEC_ID); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID); + break; + default: + break; + } } IXGBE_WRITE_REG(hw, IXGBE_EITR(IXGBE_MISC_VEC_ID), IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF); @@ -6313,7 +6468,7 @@ ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev, sizeof(struct ixgbe_5tuple_filter), 0); if (filter == NULL) return -ENOMEM; - (void)rte_memcpy(&filter->filter_info, + rte_memcpy(&filter->filter_info, &filter_5tuple, sizeof(struct ixgbe_5tuple_filter_info)); filter->queue = ntuple_filter->queue; @@ -7153,6 +7308,8 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, struct ixgbe_dcb_config *dcb_config = IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); struct ixgbe_dcb_tc_config *tc; + struct rte_eth_dcb_tc_queue_mapping *tc_queue; + uint8_t nb_tcs; uint8_t i, j; if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG) @@ -7160,19 +7317,31 @@ ixgbe_dev_get_dcb_info(struct rte_eth_dev *dev, else dcb_info->nb_tcs = 1; + tc_queue = &dcb_info->tc_queue; + nb_tcs = dcb_info->nb_tcs; + if (dcb_config->vt_mode) { /* vt is enabled*/ struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i]; - for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { - for (j = 0; j < dcb_info->nb_tcs; j++) { - dcb_info->tc_queue.tc_rxq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1; - dcb_info->tc_queue.tc_txq[i][j].base = - i * dcb_info->nb_tcs + j; - dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1; + if (RTE_ETH_DEV_SRIOV(dev).active > 0) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[0][j].base = j; + tc_queue->tc_rxq[0][j].nb_queue = 1; + tc_queue->tc_txq[0][j].base = j; + tc_queue->tc_txq[0][j].nb_queue = 1; + } + } else { + for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) { + for (j = 0; j < nb_tcs; j++) { + tc_queue->tc_rxq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_rxq[i][j].nb_queue = 1; + tc_queue->tc_txq[i][j].base = + i * nb_tcs + j; + tc_queue->tc_txq[i][j].nb_queue = 1; + } } } } else { /* vt is disabled*/ @@ -7529,7 +7698,7 @@ ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev, if (!node) return -ENOMEM; - (void)rte_memcpy(&node->key, + rte_memcpy(&node->key, &key, sizeof(struct ixgbe_l2_tn_key)); node->pool = l2_tunnel->pool;