X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fnfp%2Fnfp_net.c;h=8ab28ddec75da408071b7ee279fc393e6ac2b378;hb=6e7cbd63706f3435b9d9a2057a37db1da01db9a7;hp=83dec0612fb44fe1e2588543d0195eb128a6fb77;hpb=055c52583a2794da8ba1e85a48cce3832372b12f;p=deb_dpdk.git diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index 83dec061..8ab28dde 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -301,7 +301,7 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq) for (i = 0; i < txq->tx_count; i++) { if (txq->txbufs[i].mbuf) { - rte_pktmbuf_free(txq->txbufs[i].mbuf); + rte_pktmbuf_free_seg(txq->txbufs[i].mbuf); txq->txbufs[i].mbuf = NULL; } } @@ -489,12 +489,10 @@ nfp_net_configure(struct rte_eth_dev *dev) } if (rxmode->jumbo_frame) - /* this is handled in rte_eth_dev_configure */ + hw->mtu = rxmode->max_rx_pkt_len; - if (rxmode->hw_strip_crc) { - PMD_INIT_LOG(INFO, "strip CRC not supported"); - return -EINVAL; - } + if (!rxmode->hw_strip_crc) + PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable"); if (rxmode->enable_scatter) { PMD_INIT_LOG(INFO, "Scatter not supported"); @@ -1038,6 +1036,8 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */ + memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats)); + /* reading per RX ring stats */ for (i = 0; i < dev->data->nb_rx_queues; i++) { if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS) @@ -1194,7 +1194,7 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; dev_info->min_rx_bufsize = ETHER_MIN_MTU; - dev_info->max_rx_pktlen = hw->mtu; + dev_info->max_rx_pktlen = hw->max_mtu; /* Next should change when PF support is implemented */ dev_info->max_mac_addrs = 1; @@ -1244,9 +1244,9 @@ nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ; dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ; - dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G | - ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G | - ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G; + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G | + ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G | + ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G; if (hw->cap & NFP_NET_CFG_CTRL_LSO) dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; @@ -1467,6 +1467,13 @@ nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu)) return -EINVAL; + /* mtu setting is forbidden if port is started */ + if (dev->data->dev_started) { + PMD_DRV_LOG(ERR, "port %d must be stopped before configuration", + dev->data->port_id); + return -EBUSY; + } + /* switch to jumbo mode if needed */ if ((uint32_t)mtu > ETHER_MAX_LEN) dev->data->dev_conf.rxmode.jumbo_frame = 1; @@ -1988,16 +1995,16 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) break; } + rxds = &rxq->rxds[rxq->rd_p]; + if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) + break; + /* * Memory barrier to ensure that we won't do other * reads before the DD bit. */ rte_rmb(); - rxds = &rxq->rxds[rxq->rd_p]; - if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0) - break; - /* * We got a packet. Let's alloc a new mbuff for refilling the * free descriptor ring as soon as possible @@ -2058,6 +2065,8 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) mb->nb_segs = 1; mb->next = NULL; + mb->port = rxq->port_id; + /* Checking the RSS flag */ nfp_net_set_hash(rxq, rxds, mb); @@ -2283,11 +2292,15 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) txq->wr_p = 0; pkt_size -= dma_size; - if (!pkt_size) - /* End of packet */ - txds->offset_eop |= PCIE_DESC_TX_EOP; + + /* + * Making the EOP, packets with just one segment + * the priority + */ + if (likely(!pkt_size)) + txds->offset_eop = PCIE_DESC_TX_EOP; else - txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK; + txds->offset_eop = 0; pkt = pkt->next; /* Referencing next free TX descriptor */ @@ -2640,6 +2653,14 @@ nfp_net_init(struct rte_eth_dev *eth_dev) pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); + /* NFP can not handle DMA addresses requiring more than 40 bits */ + if (rte_eal_check_dma_mask(40) < 0) { + RTE_LOG(INFO, PMD, "device %s can not be used:", + pci_dev->device.name); + RTE_LOG(INFO, PMD, "\trestricted dma mask to 40 bits!\n"); + return -ENODEV; + }; + if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) || (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) { port = get_pf_port_number(eth_dev->data->name); @@ -2772,7 +2793,7 @@ nfp_net_init(struct rte_eth_dev *eth_dev) hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION); hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP); hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU); - hw->mtu = hw->max_mtu; + hw->mtu = ETHER_MTU; if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2) hw->rx_offset = NFP_NET_RX_OFFSET; @@ -3036,14 +3057,16 @@ static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev) static struct rte_pci_driver rte_nfp_net_pf_pmd = { .id_table = pci_id_nfp_pf_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_IOVA_AS_VA, .probe = nfp_pf_pci_probe, .remove = eth_nfp_pci_remove, }; static struct rte_pci_driver rte_nfp_net_vf_pmd = { .id_table = pci_id_nfp_vf_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_IOVA_AS_VA, .probe = eth_nfp_pci_probe, .remove = eth_nfp_pci_remove, };