for (i = 0; i < txq->tx_count; i++) {
if (txq->txbufs[i].mbuf) {
- rte_pktmbuf_free(txq->txbufs[i].mbuf);
+ rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
txq->txbufs[i].mbuf = NULL;
}
}
}
if (rxmode->jumbo_frame)
- /* this is handled in rte_eth_dev_configure */
+ hw->mtu = rxmode->max_rx_pkt_len;
- if (rxmode->hw_strip_crc) {
- PMD_INIT_LOG(INFO, "strip CRC not supported");
- return -EINVAL;
- }
+ if (!rxmode->hw_strip_crc)
+ PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable");
if (rxmode->enable_scatter) {
PMD_INIT_LOG(INFO, "Scatter not supported");
/* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
+ memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
+
/* reading per RX ring stats */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
- dev_info->max_rx_pktlen = hw->mtu;
+ dev_info->max_rx_pktlen = hw->max_mtu;
/* Next should change when PF support is implemented */
dev_info->max_mac_addrs = 1;
dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
- dev_info->speed_capa = ETH_SPEED_NUM_1G | ETH_LINK_SPEED_10G |
- ETH_SPEED_NUM_25G | ETH_SPEED_NUM_40G |
- ETH_SPEED_NUM_50G | ETH_LINK_SPEED_100G;
+ dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
+ ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
+ ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
if (hw->cap & NFP_NET_CFG_CTRL_LSO)
dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
return -EINVAL;
+ /* mtu setting is forbidden if port is started */
+ if (dev->data->dev_started) {
+ PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
+ dev->data->port_id);
+ return -EBUSY;
+ }
+
/* switch to jumbo mode if needed */
if ((uint32_t)mtu > ETHER_MAX_LEN)
dev->data->dev_conf.rxmode.jumbo_frame = 1;
break;
}
+ rxds = &rxq->rxds[rxq->rd_p];
+ if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
+ break;
+
/*
* Memory barrier to ensure that we won't do other
* reads before the DD bit.
*/
rte_rmb();
- rxds = &rxq->rxds[rxq->rd_p];
- if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
- break;
-
/*
* We got a packet. Let's alloc a new mbuff for refilling the
* free descriptor ring as soon as possible
mb->nb_segs = 1;
mb->next = NULL;
+ mb->port = rxq->port_id;
+
/* Checking the RSS flag */
nfp_net_set_hash(rxq, rxds, mb);
txq->wr_p = 0;
pkt_size -= dma_size;
- if (!pkt_size)
- /* End of packet */
- txds->offset_eop |= PCIE_DESC_TX_EOP;
+
+ /*
+ * Making the EOP, packets with just one segment
+ * the priority
+ */
+ if (likely(!pkt_size))
+ txds->offset_eop = PCIE_DESC_TX_EOP;
else
- txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
+ txds->offset_eop = 0;
pkt = pkt->next;
/* Referencing next free TX descriptor */
pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
+ /* NFP can not handle DMA addresses requiring more than 40 bits */
+ if (rte_eal_check_dma_mask(40) < 0) {
+ RTE_LOG(INFO, PMD, "device %s can not be used:",
+ pci_dev->device.name);
+ RTE_LOG(INFO, PMD, "\trestricted dma mask to 40 bits!\n");
+ return -ENODEV;
+ };
+
if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
(pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
port = get_pf_port_number(eth_dev->data->name);
hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
- hw->mtu = hw->max_mtu;
+ hw->mtu = ETHER_MTU;
if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
hw->rx_offset = NFP_NET_RX_OFFSET;
static struct rte_pci_driver rte_nfp_net_pf_pmd = {
.id_table = pci_id_nfp_pf_net_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = nfp_pf_pci_probe,
.remove = eth_nfp_pci_remove,
};
static struct rte_pci_driver rte_nfp_net_vf_pmd = {
.id_table = pci_id_nfp_vf_net_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
+ RTE_PCI_DRV_IOVA_AS_VA,
.probe = eth_nfp_pci_probe,
.remove = eth_nfp_pci_remove,
};