2 * Copyright (c) 2014-2018 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * vim:shiftwidth=8:noexpandtab
37 * @file dpdk/pmd/nfp_net.c
39 * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
42 #include <rte_byteorder.h>
43 #include <rte_common.h>
45 #include <rte_debug.h>
46 #include <rte_ethdev_driver.h>
47 #include <rte_ethdev_pci.h>
49 #include <rte_ether.h>
50 #include <rte_malloc.h>
51 #include <rte_memzone.h>
52 #include <rte_mempool.h>
53 #include <rte_version.h>
54 #include <rte_string_fns.h>
55 #include <rte_alarm.h>
56 #include <rte_spinlock.h>
58 #include "nfpcore/nfp_cpp.h"
59 #include "nfpcore/nfp_nffw.h"
60 #include "nfpcore/nfp_hwinfo.h"
61 #include "nfpcore/nfp_mip.h"
62 #include "nfpcore/nfp_rtsym.h"
63 #include "nfpcore/nfp_nsp.h"
65 #include "nfp_net_pmd.h"
66 #include "nfp_net_logs.h"
67 #include "nfp_net_ctrl.h"
70 static void nfp_net_close(struct rte_eth_dev *dev);
71 static int nfp_net_configure(struct rte_eth_dev *dev);
72 static void nfp_net_dev_interrupt_handler(void *param);
73 static void nfp_net_dev_interrupt_delayed_handler(void *param);
74 static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
75 static void nfp_net_infos_get(struct rte_eth_dev *dev,
76 struct rte_eth_dev_info *dev_info);
77 static int nfp_net_init(struct rte_eth_dev *eth_dev);
78 static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
79 static void nfp_net_promisc_enable(struct rte_eth_dev *dev);
80 static void nfp_net_promisc_disable(struct rte_eth_dev *dev);
81 static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
82 static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
84 static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
86 static void nfp_net_rx_queue_release(void *rxq);
87 static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
88 uint16_t nb_desc, unsigned int socket_id,
89 const struct rte_eth_rxconf *rx_conf,
90 struct rte_mempool *mp);
91 static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
92 static void nfp_net_tx_queue_release(void *txq);
93 static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
94 uint16_t nb_desc, unsigned int socket_id,
95 const struct rte_eth_txconf *tx_conf);
96 static int nfp_net_start(struct rte_eth_dev *dev);
97 static int nfp_net_stats_get(struct rte_eth_dev *dev,
98 struct rte_eth_stats *stats);
99 static void nfp_net_stats_reset(struct rte_eth_dev *dev);
100 static void nfp_net_stop(struct rte_eth_dev *dev);
101 static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
104 static int nfp_net_rss_config_default(struct rte_eth_dev *dev);
105 static int nfp_net_rss_hash_update(struct rte_eth_dev *dev,
106 struct rte_eth_rss_conf *rss_conf);
107 static int nfp_net_rss_reta_write(struct rte_eth_dev *dev,
108 struct rte_eth_rss_reta_entry64 *reta_conf,
110 static int nfp_net_rss_hash_write(struct rte_eth_dev *dev,
111 struct rte_eth_rss_conf *rss_conf);
112 static int nfp_set_mac_addr(struct rte_eth_dev *dev,
113 struct ether_addr *mac_addr);
115 /* The offset of the queue controller queues in the PCIe Target */
116 #define NFP_PCIE_QUEUE(_q) (0x80000 + (NFP_QCP_QUEUE_ADDR_SZ * ((_q) & 0xff)))
118 /* Maximum value which can be added to a queue with one transaction */
119 #define NFP_QCP_MAX_ADD 0x7f
121 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
122 (uint64_t)((mb)->buf_iova + RTE_PKTMBUF_HEADROOM)
124 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
126 NFP_QCP_READ_PTR = 0,
131 * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
132 * @q: Base address for queue structure
133 * @ptr: Add to the Read or Write pointer
134 * @val: Value to add to the queue pointer
136 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
139 nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
143 if (ptr == NFP_QCP_READ_PTR)
144 off = NFP_QCP_QUEUE_ADD_RPTR;
146 off = NFP_QCP_QUEUE_ADD_WPTR;
148 while (val > NFP_QCP_MAX_ADD) {
149 nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
150 val -= NFP_QCP_MAX_ADD;
153 nn_writel(rte_cpu_to_le_32(val), q + off);
157 * nfp_qcp_read - Read the current Read/Write pointer value for a queue
158 * @q: Base address for queue structure
159 * @ptr: Read or Write pointer
161 static inline uint32_t
162 nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
167 if (ptr == NFP_QCP_READ_PTR)
168 off = NFP_QCP_QUEUE_STS_LO;
170 off = NFP_QCP_QUEUE_STS_HI;
172 val = rte_cpu_to_le_32(nn_readl(q + off));
174 if (ptr == NFP_QCP_READ_PTR)
175 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
177 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
181 * Functions to read/write from/to Config BAR
182 * Performs any endian conversion necessary.
184 static inline uint8_t
185 nn_cfg_readb(struct nfp_net_hw *hw, int off)
187 return nn_readb(hw->ctrl_bar + off);
191 nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
193 nn_writeb(val, hw->ctrl_bar + off);
196 static inline uint32_t
197 nn_cfg_readl(struct nfp_net_hw *hw, int off)
199 return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
203 nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
205 nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
208 static inline uint64_t
209 nn_cfg_readq(struct nfp_net_hw *hw, int off)
211 return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
215 nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
217 nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
221 nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
225 if (rxq->rxbufs == NULL)
228 for (i = 0; i < rxq->rx_count; i++) {
229 if (rxq->rxbufs[i].mbuf) {
230 rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
231 rxq->rxbufs[i].mbuf = NULL;
237 nfp_net_rx_queue_release(void *rx_queue)
239 struct nfp_net_rxq *rxq = rx_queue;
242 nfp_net_rx_queue_release_mbufs(rxq);
243 rte_free(rxq->rxbufs);
249 nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
251 nfp_net_rx_queue_release_mbufs(rxq);
257 nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
261 if (txq->txbufs == NULL)
264 for (i = 0; i < txq->tx_count; i++) {
265 if (txq->txbufs[i].mbuf) {
266 rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
267 txq->txbufs[i].mbuf = NULL;
273 nfp_net_tx_queue_release(void *tx_queue)
275 struct nfp_net_txq *txq = tx_queue;
278 nfp_net_tx_queue_release_mbufs(txq);
279 rte_free(txq->txbufs);
285 nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
287 nfp_net_tx_queue_release_mbufs(txq);
293 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
297 struct timespec wait;
299 PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...",
302 if (hw->qcp_cfg == NULL)
303 rte_panic("Bad configuration queue pointer\n");
305 nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
308 wait.tv_nsec = 1000000;
310 PMD_DRV_LOG(DEBUG, "Polling for update ack...");
312 /* Poll update field, waiting for NFP to ack the config */
313 for (cnt = 0; ; cnt++) {
314 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
317 if (new & NFP_NET_CFG_UPDATE_ERR) {
318 PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x", new);
321 if (cnt >= NFP_NET_POLL_TIMEOUT) {
322 PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
323 " %dms", update, cnt);
324 rte_panic("Exiting\n");
326 nanosleep(&wait, 0); /* waiting for a 1ms */
328 PMD_DRV_LOG(DEBUG, "Ack DONE");
333 * Reconfigure the NIC
334 * @nn: device to reconfigure
335 * @ctrl: The value for the ctrl field in the BAR config
336 * @update: The value for the update field in the BAR config
338 * Write the update word to the BAR and ping the reconfig queue. Then poll
339 * until the firmware has acknowledged the update by zeroing the update word.
342 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
346 PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x",
349 rte_spinlock_lock(&hw->reconfig_lock);
351 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
352 nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
356 err = __nfp_net_reconfig(hw, update);
358 rte_spinlock_unlock(&hw->reconfig_lock);
364 * Reconfig errors imply situations where they can be handled.
365 * Otherwise, rte_panic is called inside __nfp_net_reconfig
367 PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x",
373 * Configure an Ethernet device. This function must be invoked first
374 * before any other function in the Ethernet API. This function can
375 * also be re-invoked when a device is in the stopped state.
378 nfp_net_configure(struct rte_eth_dev *dev)
380 struct rte_eth_conf *dev_conf;
381 struct rte_eth_rxmode *rxmode;
382 struct rte_eth_txmode *txmode;
383 struct nfp_net_hw *hw;
385 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
388 * A DPDK app sends info about how many queues to use and how
389 * those queues need to be configured. This is used by the
390 * DPDK core and it makes sure no more queues than those
391 * advertised by the driver are requested. This function is
392 * called after that internal process
395 PMD_INIT_LOG(DEBUG, "Configure");
397 dev_conf = &dev->data->dev_conf;
398 rxmode = &dev_conf->rxmode;
399 txmode = &dev_conf->txmode;
401 /* Checking TX mode */
402 if (txmode->mq_mode) {
403 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported");
407 /* Checking RX mode */
408 if (rxmode->mq_mode & ETH_MQ_RX_RSS &&
409 !(hw->cap & NFP_NET_CFG_CTRL_RSS)) {
410 PMD_INIT_LOG(INFO, "RSS not supported");
418 nfp_net_enable_queues(struct rte_eth_dev *dev)
420 struct nfp_net_hw *hw;
421 uint64_t enabled_queues = 0;
424 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
426 /* Enabling the required TX queues in the device */
427 for (i = 0; i < dev->data->nb_tx_queues; i++)
428 enabled_queues |= (1 << i);
430 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
434 /* Enabling the required RX queues in the device */
435 for (i = 0; i < dev->data->nb_rx_queues; i++)
436 enabled_queues |= (1 << i);
438 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
442 nfp_net_disable_queues(struct rte_eth_dev *dev)
444 struct nfp_net_hw *hw;
445 uint32_t new_ctrl, update = 0;
447 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
449 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
450 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
452 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
453 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
454 NFP_NET_CFG_UPDATE_MSIX;
456 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
457 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
459 /* If an error when reconfig we avoid to change hw state */
460 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
467 nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
471 for (i = 0; i < dev->data->nb_rx_queues; i++) {
472 if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
479 nfp_net_params_setup(struct nfp_net_hw *hw)
481 nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
482 nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
486 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
488 hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
491 #define ETH_ADDR_LEN 6
494 nfp_eth_copy_mac(uint8_t *dst, const uint8_t *src)
498 for (i = 0; i < ETH_ADDR_LEN; i++)
503 nfp_net_pf_read_mac(struct nfp_net_hw *hw, int port)
505 struct nfp_eth_table *nfp_eth_table;
507 nfp_eth_table = nfp_eth_read_ports(hw->cpp);
509 * hw points to port0 private data. We need hw now pointing to
513 nfp_eth_copy_mac((uint8_t *)&hw->mac_addr,
514 (uint8_t *)&nfp_eth_table->ports[port].mac_addr);
521 nfp_net_vf_read_mac(struct nfp_net_hw *hw)
525 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
526 memcpy(&hw->mac_addr[0], &tmp, 4);
528 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
529 memcpy(&hw->mac_addr[4], &tmp, 2);
533 nfp_net_write_mac(struct nfp_net_hw *hw, uint8_t *mac)
535 uint32_t mac0 = *(uint32_t *)mac;
538 nn_writel(rte_cpu_to_be_32(mac0), hw->ctrl_bar + NFP_NET_CFG_MACADDR);
541 mac1 = *(uint16_t *)mac;
542 nn_writew(rte_cpu_to_be_16(mac1),
543 hw->ctrl_bar + NFP_NET_CFG_MACADDR + 6);
547 nfp_set_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
549 struct nfp_net_hw *hw;
550 uint32_t update, ctrl;
552 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
553 if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
554 !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR)) {
555 PMD_INIT_LOG(INFO, "MAC address unable to change when"
560 if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
561 !(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
564 /* Writing new MAC to the specific port BAR address */
565 nfp_net_write_mac(hw, (uint8_t *)mac_addr);
567 /* Signal the NIC about the change */
568 update = NFP_NET_CFG_UPDATE_MACADDR;
570 if ((hw->ctrl & NFP_NET_CFG_CTRL_ENABLE) &&
571 (hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
572 ctrl |= NFP_NET_CFG_CTRL_LIVE_ADDR;
573 if (nfp_net_reconfig(hw, ctrl, update) < 0) {
574 PMD_INIT_LOG(INFO, "MAC address update failed");
581 nfp_configure_rx_interrupt(struct rte_eth_dev *dev,
582 struct rte_intr_handle *intr_handle)
584 struct nfp_net_hw *hw;
587 if (!intr_handle->intr_vec) {
588 intr_handle->intr_vec =
589 rte_zmalloc("intr_vec",
590 dev->data->nb_rx_queues * sizeof(int), 0);
591 if (!intr_handle->intr_vec) {
592 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
593 " intr_vec", dev->data->nb_rx_queues);
598 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
600 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
601 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with UIO");
602 /* UIO just supports one queue and no LSC*/
603 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(0), 0);
604 intr_handle->intr_vec[0] = 0;
606 PMD_INIT_LOG(INFO, "VF: enabling RX interrupt with VFIO");
607 for (i = 0; i < dev->data->nb_rx_queues; i++) {
609 * The first msix vector is reserved for non
612 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_VEC(i), i + 1);
613 intr_handle->intr_vec[i] = i + 1;
614 PMD_INIT_LOG(DEBUG, "intr_vec[%d]= %d", i,
615 intr_handle->intr_vec[i]);
619 /* Avoiding TX interrupts */
620 hw->ctrl |= NFP_NET_CFG_CTRL_MSIX_TX_OFF;
625 nfp_check_offloads(struct rte_eth_dev *dev)
627 struct nfp_net_hw *hw;
628 struct rte_eth_conf *dev_conf;
629 struct rte_eth_rxmode *rxmode;
630 struct rte_eth_txmode *txmode;
633 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
635 dev_conf = &dev->data->dev_conf;
636 rxmode = &dev_conf->rxmode;
637 txmode = &dev_conf->txmode;
639 if (rxmode->offloads & DEV_RX_OFFLOAD_IPV4_CKSUM) {
640 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
641 ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
644 if (rxmode->offloads & DEV_RX_OFFLOAD_VLAN_STRIP) {
645 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
646 ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
649 if (rxmode->offloads & DEV_RX_OFFLOAD_JUMBO_FRAME)
650 hw->mtu = rxmode->max_rx_pkt_len;
652 if (txmode->offloads & DEV_TX_OFFLOAD_VLAN_INSERT)
653 ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
656 if (hw->cap & NFP_NET_CFG_CTRL_L2BC)
657 ctrl |= NFP_NET_CFG_CTRL_L2BC;
660 if (hw->cap & NFP_NET_CFG_CTRL_L2MC)
661 ctrl |= NFP_NET_CFG_CTRL_L2MC;
663 /* TX checksum offload */
664 if (txmode->offloads & DEV_TX_OFFLOAD_IPV4_CKSUM ||
665 txmode->offloads & DEV_TX_OFFLOAD_UDP_CKSUM ||
666 txmode->offloads & DEV_TX_OFFLOAD_TCP_CKSUM)
667 ctrl |= NFP_NET_CFG_CTRL_TXCSUM;
670 if (txmode->offloads & DEV_TX_OFFLOAD_TCP_TSO) {
671 if (hw->cap & NFP_NET_CFG_CTRL_LSO)
672 ctrl |= NFP_NET_CFG_CTRL_LSO;
674 ctrl |= NFP_NET_CFG_CTRL_LSO2;
678 if (txmode->offloads & DEV_TX_OFFLOAD_MULTI_SEGS)
679 ctrl |= NFP_NET_CFG_CTRL_GATHER;
685 nfp_net_start(struct rte_eth_dev *dev)
687 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
688 struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
689 uint32_t new_ctrl, update = 0;
690 struct nfp_net_hw *hw;
691 struct rte_eth_conf *dev_conf;
692 struct rte_eth_rxmode *rxmode;
693 uint32_t intr_vector;
696 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
698 PMD_INIT_LOG(DEBUG, "Start");
700 /* Disabling queues just in case... */
701 nfp_net_disable_queues(dev);
703 /* Enabling the required queues in the device */
704 nfp_net_enable_queues(dev);
706 /* check and configure queue intr-vector mapping */
707 if (dev->data->dev_conf.intr_conf.rxq != 0) {
708 if (hw->pf_multiport_enabled) {
709 PMD_INIT_LOG(ERR, "PMD rx interrupt is not supported "
710 "with NFP multiport PF");
713 if (intr_handle->type == RTE_INTR_HANDLE_UIO) {
715 * Better not to share LSC with RX interrupts.
716 * Unregistering LSC interrupt handler
718 rte_intr_callback_unregister(&pci_dev->intr_handle,
719 nfp_net_dev_interrupt_handler, (void *)dev);
721 if (dev->data->nb_rx_queues > 1) {
722 PMD_INIT_LOG(ERR, "PMD rx interrupt only "
723 "supports 1 queue with UIO");
727 intr_vector = dev->data->nb_rx_queues;
728 if (rte_intr_efd_enable(intr_handle, intr_vector))
731 nfp_configure_rx_interrupt(dev, intr_handle);
732 update = NFP_NET_CFG_UPDATE_MSIX;
735 rte_intr_enable(intr_handle);
737 new_ctrl = nfp_check_offloads(dev);
739 /* Writing configuration parameters in the device */
740 nfp_net_params_setup(hw);
742 dev_conf = &dev->data->dev_conf;
743 rxmode = &dev_conf->rxmode;
745 if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
746 nfp_net_rss_config_default(dev);
747 update |= NFP_NET_CFG_UPDATE_RSS;
748 new_ctrl |= NFP_NET_CFG_CTRL_RSS;
752 new_ctrl |= NFP_NET_CFG_CTRL_ENABLE;
754 update |= NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
756 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
757 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
759 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
760 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
764 * Allocating rte mbufs for configured rx queues.
765 * This requires queues being enabled before
767 if (nfp_net_rx_freelist_setup(dev) < 0) {
773 /* Configure the physical port up */
774 nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 1);
782 * An error returned by this function should mean the app
783 * exiting and then the system releasing all the memory
784 * allocated even memory coming from hugepages.
786 * The device could be enabled at this point with some queues
787 * ready for getting packets. This is true if the call to
788 * nfp_net_rx_freelist_setup() succeeds for some queues but
789 * fails for subsequent queues.
791 * This should make the app exiting but better if we tell the
794 nfp_net_disable_queues(dev);
799 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
801 nfp_net_stop(struct rte_eth_dev *dev)
804 struct nfp_net_hw *hw;
806 PMD_INIT_LOG(DEBUG, "Stop");
808 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
810 nfp_net_disable_queues(dev);
813 for (i = 0; i < dev->data->nb_tx_queues; i++) {
814 nfp_net_reset_tx_queue(
815 (struct nfp_net_txq *)dev->data->tx_queues[i]);
818 for (i = 0; i < dev->data->nb_rx_queues; i++) {
819 nfp_net_reset_rx_queue(
820 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
824 /* Configure the physical port down */
825 nfp_eth_set_configured(hw->cpp, hw->pf_port_idx, 0);
828 /* Reset and stop device. The device can not be restarted. */
830 nfp_net_close(struct rte_eth_dev *dev)
832 struct nfp_net_hw *hw;
833 struct rte_pci_device *pci_dev;
836 PMD_INIT_LOG(DEBUG, "Close");
838 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
839 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
842 * We assume that the DPDK application is stopping all the
843 * threads/queues before calling the device close function.
846 nfp_net_disable_queues(dev);
849 for (i = 0; i < dev->data->nb_tx_queues; i++) {
850 nfp_net_reset_tx_queue(
851 (struct nfp_net_txq *)dev->data->tx_queues[i]);
854 for (i = 0; i < dev->data->nb_rx_queues; i++) {
855 nfp_net_reset_rx_queue(
856 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
859 rte_intr_disable(&pci_dev->intr_handle);
860 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
862 /* unregister callback func from eal lib */
863 rte_intr_callback_unregister(&pci_dev->intr_handle,
864 nfp_net_dev_interrupt_handler,
868 * The ixgbe PMD driver disables the pcie master on the
869 * device. The i40e does not...
874 nfp_net_promisc_enable(struct rte_eth_dev *dev)
876 uint32_t new_ctrl, update = 0;
877 struct nfp_net_hw *hw;
879 PMD_DRV_LOG(DEBUG, "Promiscuous mode enable");
881 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
883 if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
884 PMD_INIT_LOG(INFO, "Promiscuous mode not supported");
888 if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
889 PMD_DRV_LOG(INFO, "Promiscuous mode already enabled");
893 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
894 update = NFP_NET_CFG_UPDATE_GEN;
897 * DPDK sets promiscuous mode on just after this call assuming
898 * it can not fail ...
900 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
907 nfp_net_promisc_disable(struct rte_eth_dev *dev)
909 uint32_t new_ctrl, update = 0;
910 struct nfp_net_hw *hw;
912 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
914 if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
915 PMD_DRV_LOG(INFO, "Promiscuous mode already disabled");
919 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
920 update = NFP_NET_CFG_UPDATE_GEN;
923 * DPDK sets promiscuous mode off just before this call
924 * assuming it can not fail ...
926 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
933 * return 0 means link status changed, -1 means not changed
935 * Wait to complete is needed as it can take up to 9 seconds to get the Link
939 nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
941 struct nfp_net_hw *hw;
942 struct rte_eth_link link;
943 uint32_t nn_link_status;
946 static const uint32_t ls_to_ethtool[] = {
947 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED] = ETH_SPEED_NUM_NONE,
948 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN] = ETH_SPEED_NUM_NONE,
949 [NFP_NET_CFG_STS_LINK_RATE_1G] = ETH_SPEED_NUM_1G,
950 [NFP_NET_CFG_STS_LINK_RATE_10G] = ETH_SPEED_NUM_10G,
951 [NFP_NET_CFG_STS_LINK_RATE_25G] = ETH_SPEED_NUM_25G,
952 [NFP_NET_CFG_STS_LINK_RATE_40G] = ETH_SPEED_NUM_40G,
953 [NFP_NET_CFG_STS_LINK_RATE_50G] = ETH_SPEED_NUM_50G,
954 [NFP_NET_CFG_STS_LINK_RATE_100G] = ETH_SPEED_NUM_100G,
957 PMD_DRV_LOG(DEBUG, "Link update");
959 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
961 nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
963 memset(&link, 0, sizeof(struct rte_eth_link));
965 if (nn_link_status & NFP_NET_CFG_STS_LINK)
966 link.link_status = ETH_LINK_UP;
968 link.link_duplex = ETH_LINK_FULL_DUPLEX;
970 nn_link_status = (nn_link_status >> NFP_NET_CFG_STS_LINK_RATE_SHIFT) &
971 NFP_NET_CFG_STS_LINK_RATE_MASK;
973 if (nn_link_status >= RTE_DIM(ls_to_ethtool))
974 link.link_speed = ETH_SPEED_NUM_NONE;
976 link.link_speed = ls_to_ethtool[nn_link_status];
978 ret = rte_eth_linkstatus_set(dev, &link);
980 if (link.link_status)
981 PMD_DRV_LOG(INFO, "NIC Link is Up");
983 PMD_DRV_LOG(INFO, "NIC Link is Down");
989 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
992 struct nfp_net_hw *hw;
993 struct rte_eth_stats nfp_dev_stats;
995 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
997 /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
999 memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
1001 /* reading per RX ring stats */
1002 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1003 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1006 nfp_dev_stats.q_ipackets[i] =
1007 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1009 nfp_dev_stats.q_ipackets[i] -=
1010 hw->eth_stats_base.q_ipackets[i];
1012 nfp_dev_stats.q_ibytes[i] =
1013 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1015 nfp_dev_stats.q_ibytes[i] -=
1016 hw->eth_stats_base.q_ibytes[i];
1019 /* reading per TX ring stats */
1020 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1021 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1024 nfp_dev_stats.q_opackets[i] =
1025 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1027 nfp_dev_stats.q_opackets[i] -=
1028 hw->eth_stats_base.q_opackets[i];
1030 nfp_dev_stats.q_obytes[i] =
1031 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1033 nfp_dev_stats.q_obytes[i] -=
1034 hw->eth_stats_base.q_obytes[i];
1037 nfp_dev_stats.ipackets =
1038 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1040 nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
1042 nfp_dev_stats.ibytes =
1043 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1045 nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
1047 nfp_dev_stats.opackets =
1048 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1050 nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
1052 nfp_dev_stats.obytes =
1053 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1055 nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
1057 /* reading general device stats */
1058 nfp_dev_stats.ierrors =
1059 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1061 nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
1063 nfp_dev_stats.oerrors =
1064 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1066 nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
1068 /* RX ring mbuf allocation failures */
1069 nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
1071 nfp_dev_stats.imissed =
1072 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1074 nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
1077 memcpy(stats, &nfp_dev_stats, sizeof(*stats));
1084 nfp_net_stats_reset(struct rte_eth_dev *dev)
1087 struct nfp_net_hw *hw;
1089 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1092 * hw->eth_stats_base records the per counter starting point.
1093 * Lets update it now
1096 /* reading per RX ring stats */
1097 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1098 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1101 hw->eth_stats_base.q_ipackets[i] =
1102 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
1104 hw->eth_stats_base.q_ibytes[i] =
1105 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
1108 /* reading per TX ring stats */
1109 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1110 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
1113 hw->eth_stats_base.q_opackets[i] =
1114 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
1116 hw->eth_stats_base.q_obytes[i] =
1117 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
1120 hw->eth_stats_base.ipackets =
1121 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
1123 hw->eth_stats_base.ibytes =
1124 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
1126 hw->eth_stats_base.opackets =
1127 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
1129 hw->eth_stats_base.obytes =
1130 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
1132 /* reading general device stats */
1133 hw->eth_stats_base.ierrors =
1134 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
1136 hw->eth_stats_base.oerrors =
1137 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
1139 /* RX ring mbuf allocation failures */
1140 dev->data->rx_mbuf_alloc_failed = 0;
1142 hw->eth_stats_base.imissed =
1143 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1147 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1149 struct nfp_net_hw *hw;
1151 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1153 dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1154 dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1155 dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1156 dev_info->max_rx_pktlen = hw->max_mtu;
1157 /* Next should change when PF support is implemented */
1158 dev_info->max_mac_addrs = 1;
1160 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
1161 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1163 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
1164 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
1165 DEV_RX_OFFLOAD_UDP_CKSUM |
1166 DEV_RX_OFFLOAD_TCP_CKSUM;
1168 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1170 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
1171 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1173 if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
1174 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
1175 DEV_TX_OFFLOAD_UDP_CKSUM |
1176 DEV_TX_OFFLOAD_TCP_CKSUM;
1178 if (hw->cap & NFP_NET_CFG_CTRL_LSO_ANY)
1179 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO;
1181 if (hw->cap & NFP_NET_CFG_CTRL_GATHER)
1182 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_MULTI_SEGS;
1184 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1186 .pthresh = DEFAULT_RX_PTHRESH,
1187 .hthresh = DEFAULT_RX_HTHRESH,
1188 .wthresh = DEFAULT_RX_WTHRESH,
1190 .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1194 dev_info->default_txconf = (struct rte_eth_txconf) {
1196 .pthresh = DEFAULT_TX_PTHRESH,
1197 .hthresh = DEFAULT_TX_HTHRESH,
1198 .wthresh = DEFAULT_TX_WTHRESH,
1200 .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1201 .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1204 dev_info->flow_type_rss_offloads = ETH_RSS_IPV4 |
1205 ETH_RSS_NONFRAG_IPV4_TCP |
1206 ETH_RSS_NONFRAG_IPV4_UDP |
1208 ETH_RSS_NONFRAG_IPV6_TCP |
1209 ETH_RSS_NONFRAG_IPV6_UDP;
1211 dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1212 dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1214 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_10G |
1215 ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G |
1216 ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G;
1219 static const uint32_t *
1220 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
1222 static const uint32_t ptypes[] = {
1223 /* refers to nfp_net_set_hash() */
1224 RTE_PTYPE_INNER_L3_IPV4,
1225 RTE_PTYPE_INNER_L3_IPV6,
1226 RTE_PTYPE_INNER_L3_IPV6_EXT,
1227 RTE_PTYPE_INNER_L4_MASK,
1231 if (dev->rx_pkt_burst == nfp_net_recv_pkts)
1237 nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
1239 struct nfp_net_rxq *rxq;
1240 struct nfp_net_rx_desc *rxds;
1244 rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
1251 * Other PMDs are just checking the DD bit in intervals of 4
1252 * descriptors and counting all four if the first has the DD
1253 * bit on. Of course, this is not accurate but can be good for
1254 * performance. But ideally that should be done in descriptors
1255 * chunks belonging to the same cache line
1258 while (count < rxq->rx_count) {
1259 rxds = &rxq->rxds[idx];
1260 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1267 if ((idx) == rxq->rx_count)
1275 nfp_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1277 struct rte_pci_device *pci_dev;
1278 struct nfp_net_hw *hw;
1281 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1282 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1284 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1287 /* Make sure all updates are written before un-masking */
1289 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id),
1290 NFP_NET_CFG_ICR_UNMASKED);
1295 nfp_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1297 struct rte_pci_device *pci_dev;
1298 struct nfp_net_hw *hw;
1301 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1302 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1304 if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UIO)
1307 /* Make sure all updates are written before un-masking */
1309 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(base + queue_id), 0x1);
1314 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1316 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1317 struct rte_eth_link link;
1319 rte_eth_linkstatus_get(dev, &link);
1320 if (link.link_status)
1321 PMD_DRV_LOG(INFO, "Port %d: Link Up - speed %u Mbps - %s",
1322 dev->data->port_id, link.link_speed,
1323 link.link_duplex == ETH_LINK_FULL_DUPLEX
1324 ? "full-duplex" : "half-duplex");
1326 PMD_DRV_LOG(INFO, " Port %d: Link Down",
1327 dev->data->port_id);
1329 PMD_DRV_LOG(INFO, "PCI Address: %04d:%02d:%02d:%d",
1330 pci_dev->addr.domain, pci_dev->addr.bus,
1331 pci_dev->addr.devid, pci_dev->addr.function);
1334 /* Interrupt configuration and handling */
1337 * nfp_net_irq_unmask - Unmask an interrupt
1339 * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1340 * clear the ICR for the entry.
1343 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1345 struct nfp_net_hw *hw;
1346 struct rte_pci_device *pci_dev;
1348 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1349 pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1351 if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
1352 /* If MSI-X auto-masking is used, clear the entry */
1354 rte_intr_enable(&pci_dev->intr_handle);
1356 /* Make sure all updates are written before un-masking */
1358 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1359 NFP_NET_CFG_ICR_UNMASKED);
1364 nfp_net_dev_interrupt_handler(void *param)
1367 struct rte_eth_link link;
1368 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1370 PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!");
1372 rte_eth_linkstatus_get(dev, &link);
1374 nfp_net_link_update(dev, 0);
1377 if (!link.link_status) {
1378 /* handle it 1 sec later, wait it being stable */
1379 timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1380 /* likely to down */
1382 /* handle it 4 sec later, wait it being stable */
1383 timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1386 if (rte_eal_alarm_set(timeout * 1000,
1387 nfp_net_dev_interrupt_delayed_handler,
1389 PMD_INIT_LOG(ERR, "Error setting alarm");
1391 nfp_net_irq_unmask(dev);
1396 * Interrupt handler which shall be registered for alarm callback for delayed
1397 * handling specific interrupt to wait for the stable nic state. As the NIC
1398 * interrupt state is not stable for nfp after link is just down, it needs
1399 * to wait 4 seconds to get the stable status.
1401 * @param handle Pointer to interrupt handle.
1402 * @param param The address of parameter (struct rte_eth_dev *)
1407 nfp_net_dev_interrupt_delayed_handler(void *param)
1409 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1411 nfp_net_link_update(dev, 0);
1412 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1414 nfp_net_dev_link_status_print(dev);
1417 nfp_net_irq_unmask(dev);
1421 nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1423 struct nfp_net_hw *hw;
1425 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1427 /* check that mtu is within the allowed range */
1428 if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
1431 /* mtu setting is forbidden if port is started */
1432 if (dev->data->dev_started) {
1433 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1434 dev->data->port_id);
1438 /* switch to jumbo mode if needed */
1439 if ((uint32_t)mtu > ETHER_MAX_LEN)
1440 dev->data->dev_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1442 dev->data->dev_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME;
1444 /* update max frame size */
1445 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
1447 /* writing to configuration space */
1448 nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
1456 nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
1457 uint16_t queue_idx, uint16_t nb_desc,
1458 unsigned int socket_id,
1459 const struct rte_eth_rxconf *rx_conf,
1460 struct rte_mempool *mp)
1462 const struct rte_memzone *tz;
1463 struct nfp_net_rxq *rxq;
1464 struct nfp_net_hw *hw;
1466 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1468 PMD_INIT_FUNC_TRACE();
1470 /* Validating number of descriptors */
1471 if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
1472 (nb_desc > NFP_NET_MAX_RX_DESC) ||
1473 (nb_desc < NFP_NET_MIN_RX_DESC)) {
1474 PMD_DRV_LOG(ERR, "Wrong nb_desc value");
1479 * Free memory prior to re-allocation if needed. This is the case after
1480 * calling nfp_net_stop
1482 if (dev->data->rx_queues[queue_idx]) {
1483 nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
1484 dev->data->rx_queues[queue_idx] = NULL;
1487 /* Allocating rx queue data structure */
1488 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
1489 RTE_CACHE_LINE_SIZE, socket_id);
1493 /* Hw queues mapping based on firmware configuration */
1494 rxq->qidx = queue_idx;
1495 rxq->fl_qcidx = queue_idx * hw->stride_rx;
1496 rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
1497 rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
1498 rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
1501 * Tracking mbuf size for detecting a potential mbuf overflow due to
1505 rxq->mbuf_size = rxq->mem_pool->elt_size;
1506 rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
1507 hw->flbufsz = rxq->mbuf_size;
1509 rxq->rx_count = nb_desc;
1510 rxq->port_id = dev->data->port_id;
1511 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1512 rxq->drop_en = rx_conf->rx_drop_en;
1515 * Allocate RX ring hardware descriptors. A memzone large enough to
1516 * handle the maximum ring size is allocated in order to allow for
1517 * resizing in later calls to the queue setup function.
1519 tz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx,
1520 sizeof(struct nfp_net_rx_desc) *
1521 NFP_NET_MAX_RX_DESC, NFP_MEMZONE_ALIGN,
1525 PMD_DRV_LOG(ERR, "Error allocating rx dma");
1526 nfp_net_rx_queue_release(rxq);
1530 /* Saving physical and virtual addresses for the RX ring */
1531 rxq->dma = (uint64_t)tz->iova;
1532 rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
1534 /* mbuf pointers array for referencing mbufs linked to RX descriptors */
1535 rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
1536 sizeof(*rxq->rxbufs) * nb_desc,
1537 RTE_CACHE_LINE_SIZE, socket_id);
1538 if (rxq->rxbufs == NULL) {
1539 nfp_net_rx_queue_release(rxq);
1543 PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
1544 rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
1546 nfp_net_reset_rx_queue(rxq);
1548 dev->data->rx_queues[queue_idx] = rxq;
1552 * Telling the HW about the physical address of the RX ring and number
1553 * of descriptors in log2 format
1555 nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
1556 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1562 nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
1564 struct nfp_net_rx_buff *rxe = rxq->rxbufs;
1568 PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors",
1571 for (i = 0; i < rxq->rx_count; i++) {
1572 struct nfp_net_rx_desc *rxd;
1573 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
1576 PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id=%u",
1577 (unsigned)rxq->qidx);
1581 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
1583 rxd = &rxq->rxds[i];
1585 rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
1586 rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
1588 PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64, i, dma_addr);
1591 /* Make sure all writes are flushed before telling the hardware */
1594 /* Not advertising the whole ring as the firmware gets confused if so */
1595 PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u",
1598 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
1604 nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1605 uint16_t nb_desc, unsigned int socket_id,
1606 const struct rte_eth_txconf *tx_conf)
1608 const struct rte_memzone *tz;
1609 struct nfp_net_txq *txq;
1610 uint16_t tx_free_thresh;
1611 struct nfp_net_hw *hw;
1613 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1615 PMD_INIT_FUNC_TRACE();
1617 /* Validating number of descriptors */
1618 if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 ||
1619 (nb_desc > NFP_NET_MAX_TX_DESC) ||
1620 (nb_desc < NFP_NET_MIN_TX_DESC)) {
1621 PMD_DRV_LOG(ERR, "Wrong nb_desc value");
1625 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1626 tx_conf->tx_free_thresh :
1627 DEFAULT_TX_FREE_THRESH);
1629 if (tx_free_thresh > (nb_desc)) {
1631 "tx_free_thresh must be less than the number of TX "
1632 "descriptors. (tx_free_thresh=%u port=%d "
1633 "queue=%d)", (unsigned int)tx_free_thresh,
1634 dev->data->port_id, (int)queue_idx);
1639 * Free memory prior to re-allocation if needed. This is the case after
1640 * calling nfp_net_stop
1642 if (dev->data->tx_queues[queue_idx]) {
1643 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d",
1645 nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
1646 dev->data->tx_queues[queue_idx] = NULL;
1649 /* Allocating tx queue data structure */
1650 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
1651 RTE_CACHE_LINE_SIZE, socket_id);
1653 PMD_DRV_LOG(ERR, "Error allocating tx dma");
1658 * Allocate TX ring hardware descriptors. A memzone large enough to
1659 * handle the maximum ring size is allocated in order to allow for
1660 * resizing in later calls to the queue setup function.
1662 tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
1663 sizeof(struct nfp_net_tx_desc) *
1664 NFP_NET_MAX_TX_DESC, NFP_MEMZONE_ALIGN,
1667 PMD_DRV_LOG(ERR, "Error allocating tx dma");
1668 nfp_net_tx_queue_release(txq);
1672 txq->tx_count = nb_desc;
1673 txq->tx_free_thresh = tx_free_thresh;
1674 txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
1675 txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
1676 txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
1678 /* queue mapping based on firmware configuration */
1679 txq->qidx = queue_idx;
1680 txq->tx_qcidx = queue_idx * hw->stride_tx;
1681 txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
1683 txq->port_id = dev->data->port_id;
1685 /* Saving physical and virtual addresses for the TX ring */
1686 txq->dma = (uint64_t)tz->iova;
1687 txq->txds = (struct nfp_net_tx_desc *)tz->addr;
1689 /* mbuf pointers array for referencing mbufs linked to TX descriptors */
1690 txq->txbufs = rte_zmalloc_socket("txq->txbufs",
1691 sizeof(*txq->txbufs) * nb_desc,
1692 RTE_CACHE_LINE_SIZE, socket_id);
1693 if (txq->txbufs == NULL) {
1694 nfp_net_tx_queue_release(txq);
1697 PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64,
1698 txq->txbufs, txq->txds, (unsigned long int)txq->dma);
1700 nfp_net_reset_tx_queue(txq);
1702 dev->data->tx_queues[queue_idx] = txq;
1706 * Telling the HW about the physical address of the TX ring and number
1707 * of descriptors in log2 format
1709 nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
1710 nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), rte_log2_u32(nb_desc));
1715 /* nfp_net_tx_tso - Set TX descriptor for TSO */
1717 nfp_net_tx_tso(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1718 struct rte_mbuf *mb)
1721 struct nfp_net_hw *hw = txq->hw;
1723 if (!(hw->cap & NFP_NET_CFG_CTRL_LSO_ANY))
1726 ol_flags = mb->ol_flags;
1728 if (!(ol_flags & PKT_TX_TCP_SEG))
1731 txd->l3_offset = mb->l2_len;
1732 txd->l4_offset = mb->l2_len + mb->l3_len;
1733 txd->lso_hdrlen = mb->l2_len + mb->l3_len + mb->l4_len;
1734 txd->mss = rte_cpu_to_le_16(mb->tso_segsz);
1735 txd->flags = PCIE_DESC_TX_LSO;
1742 txd->lso_hdrlen = 0;
1746 /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
1748 nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1749 struct rte_mbuf *mb)
1752 struct nfp_net_hw *hw = txq->hw;
1754 if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
1757 ol_flags = mb->ol_flags;
1759 /* IPv6 does not need checksum */
1760 if (ol_flags & PKT_TX_IP_CKSUM)
1761 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
1763 switch (ol_flags & PKT_TX_L4_MASK) {
1764 case PKT_TX_UDP_CKSUM:
1765 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
1767 case PKT_TX_TCP_CKSUM:
1768 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
1772 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
1773 txd->flags |= PCIE_DESC_TX_CSUM;
1776 /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
1778 nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1779 struct rte_mbuf *mb)
1781 struct nfp_net_hw *hw = rxq->hw;
1783 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
1786 /* If IPv4 and IP checksum error, fail */
1787 if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
1788 !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
1789 mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1791 mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1793 /* If neither UDP nor TCP return */
1794 if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1795 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
1798 if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
1799 mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1801 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1804 #define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
1805 #define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
1807 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1810 * nfp_net_set_hash - Set mbuf hash data
1812 * The RSS hash and hash-type are pre-pended to the packet data.
1813 * Extract and decode it and set the mbuf fields.
1816 nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1817 struct rte_mbuf *mbuf)
1819 struct nfp_net_hw *hw = rxq->hw;
1820 uint8_t *meta_offset;
1823 uint32_t hash_type = 0;
1825 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1828 /* this is true for new firmwares */
1829 if (likely(((hw->cap & NFP_NET_CFG_CTRL_RSS2) ||
1830 (NFD_CFG_MAJOR_VERSION_of(hw->ver) == 4)) &&
1831 NFP_DESC_META_LEN(rxd))) {
1834 * <---- 32 bit ----->
1839 * ====================
1842 * Field type word contains up to 8 4bit field types
1843 * A 4bit field type refers to a data field word
1844 * A data field word can have several 4bit field types
1846 meta_offset = rte_pktmbuf_mtod(mbuf, uint8_t *);
1847 meta_offset -= NFP_DESC_META_LEN(rxd);
1848 meta_info = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1850 /* NFP PMD just supports metadata for hashing */
1851 switch (meta_info & NFP_NET_META_FIELD_MASK) {
1852 case NFP_NET_META_HASH:
1853 /* next field type is about the hash type */
1854 meta_info >>= NFP_NET_META_FIELD_SIZE;
1855 /* hash value is in the data field */
1856 hash = rte_be_to_cpu_32(*(uint32_t *)meta_offset);
1857 hash_type = meta_info & NFP_NET_META_FIELD_MASK;
1860 /* Unsupported metadata can be a performance issue */
1864 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1867 hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
1868 hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
1871 mbuf->hash.rss = hash;
1872 mbuf->ol_flags |= PKT_RX_RSS_HASH;
1874 switch (hash_type) {
1875 case NFP_NET_RSS_IPV4:
1876 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
1878 case NFP_NET_RSS_IPV6:
1879 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
1881 case NFP_NET_RSS_IPV6_EX:
1882 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1884 case NFP_NET_RSS_IPV4_TCP:
1885 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1887 case NFP_NET_RSS_IPV6_TCP:
1888 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1890 case NFP_NET_RSS_IPV4_UDP:
1891 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1893 case NFP_NET_RSS_IPV6_UDP:
1894 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1897 mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
1902 nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
1904 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1907 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1912 * There are some decisions to take:
1913 * 1) How to check DD RX descriptors bit
1914 * 2) How and when to allocate new mbufs
1916 * Current implementation checks just one single DD bit each loop. As each
1917 * descriptor is 8 bytes, it is likely a good idea to check descriptors in
1918 * a single cache line instead. Tests with this change have not shown any
1919 * performance improvement but it requires further investigation. For example,
1920 * depending on which descriptor is next, the number of descriptors could be
1921 * less than 8 for just checking those in the same cache line. This implies
1922 * extra work which could be counterproductive by itself. Indeed, last firmware
1923 * changes are just doing this: writing several descriptors with the DD bit
1924 * for saving PCIe bandwidth and DMA operations from the NFP.
1926 * Mbuf allocation is done when a new packet is received. Then the descriptor
1927 * is automatically linked with the new mbuf and the old one is given to the
1928 * user. The main drawback with this design is mbuf allocation is heavier than
1929 * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
1930 * cache point of view it does not seem allocating the mbuf early on as we are
1931 * doing now have any benefit at all. Again, tests with this change have not
1932 * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
1933 * so looking at the implications of this type of allocation should be studied
1938 nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1940 struct nfp_net_rxq *rxq;
1941 struct nfp_net_rx_desc *rxds;
1942 struct nfp_net_rx_buff *rxb;
1943 struct nfp_net_hw *hw;
1944 struct rte_mbuf *mb;
1945 struct rte_mbuf *new_mb;
1951 if (unlikely(rxq == NULL)) {
1953 * DPDK just checks the queue is lower than max queues
1954 * enabled. But the queue needs to be configured
1956 RTE_LOG_DP(ERR, PMD, "RX Bad queue\n");
1964 while (avail < nb_pkts) {
1965 rxb = &rxq->rxbufs[rxq->rd_p];
1966 if (unlikely(rxb == NULL)) {
1967 RTE_LOG_DP(ERR, PMD, "rxb does not exist!\n");
1971 rxds = &rxq->rxds[rxq->rd_p];
1972 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1976 * Memory barrier to ensure that we won't do other
1977 * reads before the DD bit.
1982 * We got a packet. Let's alloc a new mbuf for refilling the
1983 * free descriptor ring as soon as possible
1985 new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
1986 if (unlikely(new_mb == NULL)) {
1987 RTE_LOG_DP(DEBUG, PMD,
1988 "RX mbuf alloc failed port_id=%u queue_id=%u\n",
1989 rxq->port_id, (unsigned int)rxq->qidx);
1990 nfp_net_mbuf_alloc_failed(rxq);
1997 * Grab the mbuf and refill the descriptor with the
1998 * previously allocated mbuf
2003 PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u",
2004 rxds->rxd.data_len, rxq->mbuf_size);
2006 /* Size of this segment */
2007 mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2008 /* Size of the whole packet. We just support 1 segment */
2009 mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
2011 if (unlikely((mb->data_len + hw->rx_offset) >
2014 * This should not happen and the user has the
2015 * responsibility of avoiding it. But we have
2016 * to give some info about the error
2018 RTE_LOG_DP(ERR, PMD,
2019 "mbuf overflow likely due to the RX offset.\n"
2020 "\t\tYour mbuf size should have extra space for"
2021 " RX offset=%u bytes.\n"
2022 "\t\tCurrently you just have %u bytes available"
2023 " but the received packet is %u bytes long",
2025 rxq->mbuf_size - hw->rx_offset,
2030 /* Filling the received mbuf with packet info */
2032 mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
2034 mb->data_off = RTE_PKTMBUF_HEADROOM +
2035 NFP_DESC_META_LEN(rxds);
2037 /* No scatter mode supported */
2041 mb->port = rxq->port_id;
2043 /* Checking the RSS flag */
2044 nfp_net_set_hash(rxq, rxds, mb);
2046 /* Checking the checksum flag */
2047 nfp_net_rx_cksum(rxq, rxds, mb);
2049 if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
2050 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
2051 mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
2052 mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
2055 /* Adding the mbuf to the mbuf array passed by the app */
2056 rx_pkts[avail++] = mb;
2058 /* Now resetting and updating the descriptor */
2061 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
2063 rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
2064 rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
2067 if (unlikely(rxq->rd_p == rxq->rx_count)) /* wrapping?*/
2074 PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received",
2075 rxq->port_id, (unsigned int)rxq->qidx, nb_hold);
2077 nb_hold += rxq->nb_rx_hold;
2080 * FL descriptors needs to be written before incrementing the
2081 * FL queue WR pointer
2084 if (nb_hold > rxq->rx_free_thresh) {
2085 PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u",
2086 rxq->port_id, (unsigned int)rxq->qidx,
2087 (unsigned)nb_hold, (unsigned)avail);
2088 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
2091 rxq->nb_rx_hold = nb_hold;
2097 * nfp_net_tx_free_bufs - Check for descriptors with a complete
2099 * @txq: TX queue to work with
2100 * Returns number of descriptors freed
2103 nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
2108 PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
2109 " status", txq->qidx);
2111 /* Work out how many packets have been sent */
2112 qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
2114 if (qcp_rd_p == txq->rd_p) {
2115 PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
2116 "packets (%u, %u)", txq->qidx,
2117 qcp_rd_p, txq->rd_p);
2121 if (qcp_rd_p > txq->rd_p)
2122 todo = qcp_rd_p - txq->rd_p;
2124 todo = qcp_rd_p + txq->tx_count - txq->rd_p;
2126 PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->rd_p: %u, qcp->rd_p: %u",
2127 qcp_rd_p, txq->rd_p, txq->rd_p);
2133 if (unlikely(txq->rd_p >= txq->tx_count))
2134 txq->rd_p -= txq->tx_count;
2139 /* Leaving always free descriptors for avoiding wrapping confusion */
2141 uint32_t nfp_free_tx_desc(struct nfp_net_txq *txq)
2143 if (txq->wr_p >= txq->rd_p)
2144 return txq->tx_count - (txq->wr_p - txq->rd_p) - 8;
2146 return txq->rd_p - txq->wr_p - 8;
2150 * nfp_net_txq_full - Check if the TX queue free descriptors
2151 * is below tx_free_threshold
2153 * @txq: TX queue to check
2155 * This function uses the host copy* of read/write pointers
2158 uint32_t nfp_net_txq_full(struct nfp_net_txq *txq)
2160 return (nfp_free_tx_desc(txq) < txq->tx_free_thresh);
2164 nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
2166 struct nfp_net_txq *txq;
2167 struct nfp_net_hw *hw;
2168 struct nfp_net_tx_desc *txds, txd;
2169 struct rte_mbuf *pkt;
2171 int pkt_size, dma_size;
2172 uint16_t free_descs, issued_descs;
2173 struct rte_mbuf **lmbuf;
2178 txds = &txq->txds[txq->wr_p];
2180 PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets",
2181 txq->qidx, txq->wr_p, nb_pkts);
2183 if ((nfp_free_tx_desc(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
2184 nfp_net_tx_free_bufs(txq);
2186 free_descs = (uint16_t)nfp_free_tx_desc(txq);
2187 if (unlikely(free_descs == 0))
2194 PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets",
2195 txq->qidx, nb_pkts);
2196 /* Sending packets */
2197 while ((i < nb_pkts) && free_descs) {
2198 /* Grabbing the mbuf linked to the current descriptor */
2199 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2200 /* Warming the cache for releasing the mbuf later on */
2201 RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
2203 pkt = *(tx_pkts + i);
2205 if (unlikely((pkt->nb_segs > 1) &&
2206 !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
2207 PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set");
2208 rte_panic("Multisegment packet unsupported\n");
2211 /* Checking if we have enough descriptors */
2212 if (unlikely(pkt->nb_segs > free_descs))
2216 * Checksum and VLAN flags just in the first descriptor for a
2217 * multisegment packet, but TSO info needs to be in all of them.
2219 txd.data_len = pkt->pkt_len;
2220 nfp_net_tx_tso(txq, &txd, pkt);
2221 nfp_net_tx_cksum(txq, &txd, pkt);
2223 if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
2224 (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
2225 txd.flags |= PCIE_DESC_TX_VLAN;
2226 txd.vlan = pkt->vlan_tci;
2230 * mbuf data_len is the data in one segment and pkt_len data
2231 * in the whole packet. When the packet is just one segment,
2232 * then data_len = pkt_len
2234 pkt_size = pkt->pkt_len;
2237 /* Copying TSO, VLAN and cksum info */
2240 /* Releasing mbuf used by this descriptor previously*/
2242 rte_pktmbuf_free_seg(*lmbuf);
2245 * Linking mbuf with descriptor for being released
2246 * next time descriptor is used
2250 dma_size = pkt->data_len;
2251 dma_addr = rte_mbuf_data_iova(pkt);
2252 PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
2253 "%" PRIx64 "", dma_addr);
2255 /* Filling descriptors fields */
2256 txds->dma_len = dma_size;
2257 txds->data_len = txd.data_len;
2258 txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
2259 txds->dma_addr_lo = (dma_addr & 0xffffffff);
2260 ASSERT(free_descs > 0);
2264 if (unlikely(txq->wr_p == txq->tx_count)) /* wrapping?*/
2267 pkt_size -= dma_size;
2270 * Making the EOP, packets with just one segment
2273 if (likely(!pkt_size))
2274 txds->offset_eop = PCIE_DESC_TX_EOP;
2276 txds->offset_eop = 0;
2279 /* Referencing next free TX descriptor */
2280 txds = &txq->txds[txq->wr_p];
2281 lmbuf = &txq->txbufs[txq->wr_p].mbuf;
2288 /* Increment write pointers. Force memory write before we let HW know */
2290 nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
2296 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2298 uint32_t new_ctrl, update;
2299 struct nfp_net_hw *hw;
2302 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2305 if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
2306 (mask & ETH_VLAN_EXTEND_OFFLOAD))
2307 PMD_DRV_LOG(INFO, "No support for ETH_VLAN_FILTER_OFFLOAD or"
2308 " ETH_VLAN_EXTEND_OFFLOAD");
2310 /* Enable vlan strip if it is not configured yet */
2311 if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
2312 !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2313 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
2315 /* Disable vlan strip just if it is configured */
2316 if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
2317 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2318 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
2323 update = NFP_NET_CFG_UPDATE_GEN;
2325 ret = nfp_net_reconfig(hw, new_ctrl, update);
2327 hw->ctrl = new_ctrl;
2333 nfp_net_rss_reta_write(struct rte_eth_dev *dev,
2334 struct rte_eth_rss_reta_entry64 *reta_conf,
2337 uint32_t reta, mask;
2340 struct nfp_net_hw *hw =
2341 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2343 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2344 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2345 "(%d) doesn't match the number hardware can supported "
2346 "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2351 * Update Redirection Table. There are 128 8bit-entries which can be
2352 * manage as 32 32bit-entries
2354 for (i = 0; i < reta_size; i += 4) {
2355 /* Handling 4 RSS entries per loop */
2356 idx = i / RTE_RETA_GROUP_SIZE;
2357 shift = i % RTE_RETA_GROUP_SIZE;
2358 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2364 /* If all 4 entries were set, don't need read RETA register */
2366 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
2368 for (j = 0; j < 4; j++) {
2369 if (!(mask & (0x1 << j)))
2372 /* Clearing the entry bits */
2373 reta &= ~(0xFF << (8 * j));
2374 reta |= reta_conf[idx].reta[shift + j] << (8 * j);
2376 nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
2382 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
2384 nfp_net_reta_update(struct rte_eth_dev *dev,
2385 struct rte_eth_rss_reta_entry64 *reta_conf,
2388 struct nfp_net_hw *hw =
2389 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2393 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2396 ret = nfp_net_rss_reta_write(dev, reta_conf, reta_size);
2400 update = NFP_NET_CFG_UPDATE_RSS;
2402 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2408 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
2410 nfp_net_reta_query(struct rte_eth_dev *dev,
2411 struct rte_eth_rss_reta_entry64 *reta_conf,
2417 struct nfp_net_hw *hw;
2419 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2421 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2424 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2425 PMD_DRV_LOG(ERR, "The size of hash lookup table configured "
2426 "(%d) doesn't match the number hardware can supported "
2427 "(%d)", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2432 * Reading Redirection Table. There are 128 8bit-entries which can be
2433 * manage as 32 32bit-entries
2435 for (i = 0; i < reta_size; i += 4) {
2436 /* Handling 4 RSS entries per loop */
2437 idx = i / RTE_RETA_GROUP_SIZE;
2438 shift = i % RTE_RETA_GROUP_SIZE;
2439 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2444 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
2446 for (j = 0; j < 4; j++) {
2447 if (!(mask & (0x1 << j)))
2449 reta_conf[idx].reta[shift + j] =
2450 (uint8_t)((reta >> (8 * j)) & 0xF);
2457 nfp_net_rss_hash_write(struct rte_eth_dev *dev,
2458 struct rte_eth_rss_conf *rss_conf)
2460 struct nfp_net_hw *hw;
2462 uint32_t cfg_rss_ctrl = 0;
2466 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2468 /* Writing the key byte a byte */
2469 for (i = 0; i < rss_conf->rss_key_len; i++) {
2470 memcpy(&key, &rss_conf->rss_key[i], 1);
2471 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
2474 rss_hf = rss_conf->rss_hf;
2476 if (rss_hf & ETH_RSS_IPV4)
2477 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
2479 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2480 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
2482 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2483 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
2485 if (rss_hf & ETH_RSS_IPV6)
2486 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
2488 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
2489 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
2491 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
2492 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
2494 cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
2495 cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
2497 /* configuring where to apply the RSS hash */
2498 nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
2500 /* Writing the key size */
2501 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
2507 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
2508 struct rte_eth_rss_conf *rss_conf)
2512 struct nfp_net_hw *hw;
2514 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2516 rss_hf = rss_conf->rss_hf;
2518 /* Checking if RSS is enabled */
2519 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
2520 if (rss_hf != 0) { /* Enable RSS? */
2521 PMD_DRV_LOG(ERR, "RSS unsupported");
2524 return 0; /* Nothing to do */
2527 if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
2528 PMD_DRV_LOG(ERR, "hash key too long");
2532 nfp_net_rss_hash_write(dev, rss_conf);
2534 update = NFP_NET_CFG_UPDATE_RSS;
2536 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2543 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
2544 struct rte_eth_rss_conf *rss_conf)
2547 uint32_t cfg_rss_ctrl;
2550 struct nfp_net_hw *hw;
2552 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2554 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2557 rss_hf = rss_conf->rss_hf;
2558 cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
2560 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
2561 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
2563 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
2564 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2566 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
2567 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2569 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
2570 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2572 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
2573 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2575 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
2576 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
2578 /* Reading the key size */
2579 rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
2581 /* Reading the key byte a byte */
2582 for (i = 0; i < rss_conf->rss_key_len; i++) {
2583 key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
2584 memcpy(&rss_conf->rss_key[i], &key, 1);
2591 nfp_net_rss_config_default(struct rte_eth_dev *dev)
2593 struct rte_eth_conf *dev_conf;
2594 struct rte_eth_rss_conf rss_conf;
2595 struct rte_eth_rss_reta_entry64 nfp_reta_conf[2];
2596 uint16_t rx_queues = dev->data->nb_rx_queues;
2600 PMD_DRV_LOG(INFO, "setting default RSS conf for %u queues",
2603 nfp_reta_conf[0].mask = ~0x0;
2604 nfp_reta_conf[1].mask = ~0x0;
2607 for (i = 0; i < 0x40; i += 8) {
2608 for (j = i; j < (i + 8); j++) {
2609 nfp_reta_conf[0].reta[j] = queue;
2610 nfp_reta_conf[1].reta[j] = queue++;
2614 ret = nfp_net_rss_reta_write(dev, nfp_reta_conf, 0x80);
2618 dev_conf = &dev->data->dev_conf;
2620 PMD_DRV_LOG(INFO, "wrong rss conf");
2623 rss_conf = dev_conf->rx_adv_conf.rss_conf;
2625 ret = nfp_net_rss_hash_write(dev, &rss_conf);
2631 /* Initialise and register driver with DPDK Application */
2632 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
2633 .dev_configure = nfp_net_configure,
2634 .dev_start = nfp_net_start,
2635 .dev_stop = nfp_net_stop,
2636 .dev_close = nfp_net_close,
2637 .promiscuous_enable = nfp_net_promisc_enable,
2638 .promiscuous_disable = nfp_net_promisc_disable,
2639 .link_update = nfp_net_link_update,
2640 .stats_get = nfp_net_stats_get,
2641 .stats_reset = nfp_net_stats_reset,
2642 .dev_infos_get = nfp_net_infos_get,
2643 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
2644 .mtu_set = nfp_net_dev_mtu_set,
2645 .mac_addr_set = nfp_set_mac_addr,
2646 .vlan_offload_set = nfp_net_vlan_offload_set,
2647 .reta_update = nfp_net_reta_update,
2648 .reta_query = nfp_net_reta_query,
2649 .rss_hash_update = nfp_net_rss_hash_update,
2650 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
2651 .rx_queue_setup = nfp_net_rx_queue_setup,
2652 .rx_queue_release = nfp_net_rx_queue_release,
2653 .rx_queue_count = nfp_net_rx_queue_count,
2654 .tx_queue_setup = nfp_net_tx_queue_setup,
2655 .tx_queue_release = nfp_net_tx_queue_release,
2656 .rx_queue_intr_enable = nfp_rx_queue_intr_enable,
2657 .rx_queue_intr_disable = nfp_rx_queue_intr_disable,
2661 * All eth_dev created got its private data, but before nfp_net_init, that
2662 * private data is referencing private data for all the PF ports. This is due
2663 * to how the vNIC bars are mapped based on first port, so all ports need info
2664 * about port 0 private data. Inside nfp_net_init the private data pointer is
2665 * changed to the right address for each port once the bars have been mapped.
2667 * This functions helps to find out which port and therefore which offset
2668 * inside the private data array to use.
2671 get_pf_port_number(char *name)
2673 char *pf_str = name;
2676 while ((*pf_str != '_') && (*pf_str != '\0') && (size++ < 30))
2681 * This should not happen at all and it would mean major
2682 * implementation fault.
2684 rte_panic("nfp_net: problem with pf device name\n");
2686 /* Expecting _portX with X within [0,7] */
2689 return (int)strtol(pf_str, NULL, 10);
2693 nfp_net_init(struct rte_eth_dev *eth_dev)
2695 struct rte_pci_device *pci_dev;
2696 struct nfp_net_hw *hw, *hwport0;
2698 uint64_t tx_bar_off = 0, rx_bar_off = 0;
2704 PMD_INIT_FUNC_TRACE();
2706 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
2708 /* NFP can not handle DMA addresses requiring more than 40 bits */
2709 if (rte_mem_check_dma_mask(40)) {
2710 RTE_LOG(ERR, PMD, "device %s can not be used:",
2711 pci_dev->device.name);
2712 RTE_LOG(ERR, PMD, "\trestricted dma mask to 40 bits!\n");
2716 if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
2717 (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
2718 port = get_pf_port_number(eth_dev->data->name);
2719 if (port < 0 || port > 7) {
2720 PMD_DRV_LOG(ERR, "Port value is wrong");
2724 PMD_INIT_LOG(DEBUG, "Working with PF port value %d", port);
2726 /* This points to port 0 private data */
2727 hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2729 /* This points to the specific port private data */
2730 hw = &hwport0[port];
2732 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2736 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
2737 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
2738 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
2740 /* For secondary processes, the primary has done all the work */
2741 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2744 rte_eth_copy_pci_info(eth_dev, pci_dev);
2746 hw->device_id = pci_dev->id.device_id;
2747 hw->vendor_id = pci_dev->id.vendor_id;
2748 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2749 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2751 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u",
2752 pci_dev->id.vendor_id, pci_dev->id.device_id,
2753 pci_dev->addr.domain, pci_dev->addr.bus,
2754 pci_dev->addr.devid, pci_dev->addr.function);
2756 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
2757 if (hw->ctrl_bar == NULL) {
2759 "hw->ctrl_bar is NULL. BAR0 not configured");
2763 if (hw->is_pf && port == 0) {
2764 hw->ctrl_bar = nfp_rtsym_map(hw->sym_tbl, "_pf0_net_bar0",
2765 hw->total_ports * 32768,
2767 if (!hw->ctrl_bar) {
2768 printf("nfp_rtsym_map fails for _pf0_net_ctrl_bar");
2772 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
2776 if (!hwport0->ctrl_bar)
2779 /* address based on port0 offset */
2780 hw->ctrl_bar = hwport0->ctrl_bar +
2781 (port * NFP_PF_CSR_SLICE_SIZE);
2784 PMD_INIT_LOG(DEBUG, "ctrl bar: %p", hw->ctrl_bar);
2786 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
2787 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
2789 /* Work out where in the BAR the queues start. */
2790 switch (pci_dev->id.device_id) {
2791 case PCI_DEVICE_ID_NFP4000_PF_NIC:
2792 case PCI_DEVICE_ID_NFP6000_PF_NIC:
2793 case PCI_DEVICE_ID_NFP6000_VF_NIC:
2794 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
2795 tx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
2796 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
2797 rx_bar_off = (uint64_t)start_q * NFP_QCP_QUEUE_ADDR_SZ;
2800 PMD_DRV_LOG(ERR, "nfp_net: no device ID matching");
2802 goto dev_err_ctrl_map;
2805 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%" PRIx64 "", tx_bar_off);
2806 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%" PRIx64 "", rx_bar_off);
2808 if (hw->is_pf && port == 0) {
2809 /* configure access to tx/rx vNIC BARs */
2810 hwport0->hw_queues = nfp_cpp_map_area(hw->cpp, 0, 0,
2812 NFP_QCP_QUEUE_AREA_SZ,
2813 &hw->hwqueues_area);
2815 if (!hwport0->hw_queues) {
2816 printf("nfp_rtsym_map fails for net.qc");
2818 goto dev_err_ctrl_map;
2821 PMD_INIT_LOG(DEBUG, "tx/rx bar address: 0x%p",
2822 hwport0->hw_queues);
2826 hw->tx_bar = hwport0->hw_queues + tx_bar_off;
2827 hw->rx_bar = hwport0->hw_queues + rx_bar_off;
2828 eth_dev->data->dev_private = hw;
2830 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2832 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr +
2836 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p",
2837 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
2839 nfp_net_cfg_queue_setup(hw);
2841 /* Get some of the read-only fields from the config BAR */
2842 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
2843 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
2844 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
2845 hw->mtu = ETHER_MTU;
2847 /* VLAN insertion is incompatible with LSOv2 */
2848 if (hw->cap & NFP_NET_CFG_CTRL_LSO2)
2849 hw->cap &= ~NFP_NET_CFG_CTRL_TXVLAN;
2851 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
2852 hw->rx_offset = NFP_NET_RX_OFFSET;
2854 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
2856 PMD_INIT_LOG(INFO, "VER: %u.%u, Maximum supported MTU: %d",
2857 NFD_CFG_MAJOR_VERSION_of(hw->ver),
2858 NFD_CFG_MINOR_VERSION_of(hw->ver), hw->max_mtu);
2860 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s%s%s%s%s%s", hw->cap,
2861 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2862 hw->cap & NFP_NET_CFG_CTRL_L2BC ? "L2BCFILT " : "",
2863 hw->cap & NFP_NET_CFG_CTRL_L2MC ? "L2MCFILT " : "",
2864 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
2865 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
2866 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
2867 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
2868 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2869 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
2870 hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR ? "LIVE_ADDR " : "",
2871 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
2872 hw->cap & NFP_NET_CFG_CTRL_LSO2 ? "TSOv2 " : "",
2873 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "",
2874 hw->cap & NFP_NET_CFG_CTRL_RSS2 ? "RSSv2 " : "");
2878 hw->stride_rx = stride;
2879 hw->stride_tx = stride;
2881 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u",
2882 hw->max_rx_queues, hw->max_tx_queues);
2884 /* Initializing spinlock for reconfigs */
2885 rte_spinlock_init(&hw->reconfig_lock);
2887 /* Allocating memory for mac addr */
2888 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2889 if (eth_dev->data->mac_addrs == NULL) {
2890 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
2892 goto dev_err_queues_map;
2896 nfp_net_pf_read_mac(hwport0, port);
2897 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2899 nfp_net_vf_read_mac(hw);
2902 if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr)) {
2903 PMD_INIT_LOG(INFO, "Using random mac address for port %d",
2905 /* Using random mac addresses for VFs */
2906 eth_random_addr(&hw->mac_addr[0]);
2907 nfp_net_write_mac(hw, (uint8_t *)&hw->mac_addr);
2910 /* Copying mac address to DPDK eth_dev struct */
2911 ether_addr_copy((struct ether_addr *)hw->mac_addr,
2912 ð_dev->data->mac_addrs[0]);
2914 if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
2915 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
2917 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
2918 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
2919 eth_dev->data->port_id, pci_dev->id.vendor_id,
2920 pci_dev->id.device_id,
2921 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
2922 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
2924 /* Registering LSC interrupt handler */
2925 rte_intr_callback_register(&pci_dev->intr_handle,
2926 nfp_net_dev_interrupt_handler,
2929 /* Telling the firmware about the LSC interrupt entry */
2930 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2932 /* Recording current stats counters values */
2933 nfp_net_stats_reset(eth_dev);
2938 nfp_cpp_area_free(hw->hwqueues_area);
2940 nfp_cpp_area_free(hw->ctrl_area);
2946 nfp_pf_create_dev(struct rte_pci_device *dev, int port, int ports,
2947 struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo,
2948 int phys_port, struct nfp_rtsym_table *sym_tbl, void **priv)
2950 struct rte_eth_dev *eth_dev;
2951 struct nfp_net_hw *hw;
2955 port_name = rte_zmalloc("nfp_pf_port_name", 100, 0);
2960 snprintf(port_name, 100, "%s_port%d", dev->device.name, port);
2962 strlcat(port_name, dev->device.name, 100);
2964 eth_dev = rte_eth_dev_allocate(port_name);
2969 *priv = rte_zmalloc(port_name,
2970 sizeof(struct nfp_net_adapter) * ports,
2971 RTE_CACHE_LINE_SIZE);
2973 rte_eth_dev_release_port(eth_dev);
2978 eth_dev->data->dev_private = *priv;
2981 * dev_private pointing to port0 dev_private because we need
2982 * to configure vNIC bars based on port0 at nfp_net_init.
2983 * Then dev_private is adjusted per port.
2985 hw = (struct nfp_net_hw *)(eth_dev->data->dev_private) + port;
2987 hw->hwinfo = hwinfo;
2988 hw->sym_tbl = sym_tbl;
2989 hw->pf_port_idx = phys_port;
2992 hw->pf_multiport_enabled = 1;
2994 hw->total_ports = ports;
2996 eth_dev->device = &dev->device;
2997 rte_eth_copy_pci_info(eth_dev, dev);
2999 ret = nfp_net_init(eth_dev);
3002 rte_eth_dev_release_port(eth_dev);
3004 rte_eth_dev_probing_finish(eth_dev);
3006 rte_free(port_name);
3011 #define DEFAULT_FW_PATH "/lib/firmware/netronome"
3014 nfp_fw_upload(struct rte_pci_device *dev, struct nfp_nsp *nsp, char *card)
3016 struct nfp_cpp *cpp = nsp->cpp;
3021 struct stat file_stat;
3024 /* Looking for firmware file in order of priority */
3026 /* First try to find a firmware image specific for this device */
3027 snprintf(serial, sizeof(serial),
3028 "serial-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x",
3029 cpp->serial[0], cpp->serial[1], cpp->serial[2], cpp->serial[3],
3030 cpp->serial[4], cpp->serial[5], cpp->interface >> 8,
3031 cpp->interface & 0xff);
3033 snprintf(fw_name, sizeof(fw_name), "%s/%s.nffw", DEFAULT_FW_PATH,
3036 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
3037 fw_f = open(fw_name, O_RDONLY);
3041 /* Then try the PCI name */
3042 snprintf(fw_name, sizeof(fw_name), "%s/pci-%s.nffw", DEFAULT_FW_PATH,
3045 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
3046 fw_f = open(fw_name, O_RDONLY);
3050 /* Finally try the card type and media */
3051 snprintf(fw_name, sizeof(fw_name), "%s/%s", DEFAULT_FW_PATH, card);
3052 PMD_DRV_LOG(DEBUG, "Trying with fw file: %s", fw_name);
3053 fw_f = open(fw_name, O_RDONLY);
3055 PMD_DRV_LOG(INFO, "Firmware file %s not found.", fw_name);
3060 if (fstat(fw_f, &file_stat) < 0) {
3061 PMD_DRV_LOG(INFO, "Firmware file %s size is unknown", fw_name);
3066 fsize = file_stat.st_size;
3067 PMD_DRV_LOG(INFO, "Firmware file found at %s with size: %" PRIu64 "",
3068 fw_name, (uint64_t)fsize);
3070 fw_buf = malloc((size_t)fsize);
3072 PMD_DRV_LOG(INFO, "malloc failed for fw buffer");
3076 memset(fw_buf, 0, fsize);
3078 bytes = read(fw_f, fw_buf, fsize);
3079 if (bytes != fsize) {
3080 PMD_DRV_LOG(INFO, "Reading fw to buffer failed."
3081 "Just %" PRIu64 " of %" PRIu64 " bytes read",
3082 (uint64_t)bytes, (uint64_t)fsize);
3088 PMD_DRV_LOG(INFO, "Uploading the firmware ...");
3089 nfp_nsp_load_fw(nsp, fw_buf, bytes);
3090 PMD_DRV_LOG(INFO, "Done");
3099 nfp_fw_setup(struct rte_pci_device *dev, struct nfp_cpp *cpp,
3100 struct nfp_eth_table *nfp_eth_table, struct nfp_hwinfo *hwinfo)
3102 struct nfp_nsp *nsp;
3103 const char *nfp_fw_model;
3104 char card_desc[100];
3107 nfp_fw_model = nfp_hwinfo_lookup(hwinfo, "assembly.partno");
3110 PMD_DRV_LOG(INFO, "firmware model found: %s", nfp_fw_model);
3112 PMD_DRV_LOG(ERR, "firmware model NOT found");
3116 if (nfp_eth_table->count == 0 || nfp_eth_table->count > 8) {
3117 PMD_DRV_LOG(ERR, "NFP ethernet table reports wrong ports: %u",
3118 nfp_eth_table->count);
3122 PMD_DRV_LOG(INFO, "NFP ethernet port table reports %u ports",
3123 nfp_eth_table->count);
3125 PMD_DRV_LOG(INFO, "Port speed: %u", nfp_eth_table->ports[0].speed);
3127 snprintf(card_desc, sizeof(card_desc), "nic_%s_%dx%d.nffw",
3128 nfp_fw_model, nfp_eth_table->count,
3129 nfp_eth_table->ports[0].speed / 1000);
3131 nsp = nfp_nsp_open(cpp);
3133 PMD_DRV_LOG(ERR, "NFP error when obtaining NSP handle");
3137 nfp_nsp_device_soft_reset(nsp);
3138 err = nfp_fw_upload(dev, nsp, card_desc);
3144 static int nfp_pf_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3145 struct rte_pci_device *dev)
3147 struct nfp_cpp *cpp;
3148 struct nfp_hwinfo *hwinfo;
3149 struct nfp_rtsym_table *sym_tbl;
3150 struct nfp_eth_table *nfp_eth_table = NULL;
3161 * When device bound to UIO, the device could be used, by mistake,
3162 * by two DPDK apps, and the UIO driver does not avoid it. This
3163 * could lead to a serious problem when configuring the NFP CPP
3164 * interface. Here we avoid this telling to the CPP init code to
3165 * use a lock file if UIO is being used.
3167 if (dev->kdrv == RTE_KDRV_VFIO)
3168 cpp = nfp_cpp_from_device_name(dev, 0);
3170 cpp = nfp_cpp_from_device_name(dev, 1);
3173 PMD_DRV_LOG(ERR, "A CPP handle can not be obtained");
3178 hwinfo = nfp_hwinfo_read(cpp);
3180 PMD_DRV_LOG(ERR, "Error reading hwinfo table");
3184 nfp_eth_table = nfp_eth_read_ports(cpp);
3185 if (!nfp_eth_table) {
3186 PMD_DRV_LOG(ERR, "Error reading NFP ethernet table");
3190 if (nfp_fw_setup(dev, cpp, nfp_eth_table, hwinfo)) {
3191 PMD_DRV_LOG(INFO, "Error when uploading firmware");
3196 /* Now the symbol table should be there */
3197 sym_tbl = nfp_rtsym_table_read(cpp);
3199 PMD_DRV_LOG(ERR, "Something is wrong with the firmware"
3205 total_ports = nfp_rtsym_read_le(sym_tbl, "nfd_cfg_pf0_num_ports", &err);
3206 if (total_ports != (int)nfp_eth_table->count) {
3207 PMD_DRV_LOG(ERR, "Inconsistent number of ports");
3211 PMD_INIT_LOG(INFO, "Total pf ports: %d", total_ports);
3213 if (total_ports <= 0 || total_ports > 8) {
3214 PMD_DRV_LOG(ERR, "nfd_cfg_pf0_num_ports symbol with wrong value");
3219 for (i = 0; i < total_ports; i++) {
3220 ret = nfp_pf_create_dev(dev, i, total_ports, cpp, hwinfo,
3221 nfp_eth_table->ports[i].index,
3228 free(nfp_eth_table);
3232 int nfp_logtype_init;
3233 int nfp_logtype_driver;
3235 static const struct rte_pci_id pci_id_nfp_pf_net_map[] = {
3237 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3238 PCI_DEVICE_ID_NFP4000_PF_NIC)
3241 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3242 PCI_DEVICE_ID_NFP6000_PF_NIC)
3249 static const struct rte_pci_id pci_id_nfp_vf_net_map[] = {
3251 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
3252 PCI_DEVICE_ID_NFP6000_VF_NIC)
3259 static int eth_nfp_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3260 struct rte_pci_device *pci_dev)
3262 return rte_eth_dev_pci_generic_probe(pci_dev,
3263 sizeof(struct nfp_net_adapter), nfp_net_init);
3266 static int eth_nfp_pci_remove(struct rte_pci_device *pci_dev)
3268 struct rte_eth_dev *eth_dev;
3269 struct nfp_net_hw *hw, *hwport0;
3272 eth_dev = rte_eth_dev_allocated(pci_dev->device.name);
3273 if ((pci_dev->id.device_id == PCI_DEVICE_ID_NFP4000_PF_NIC) ||
3274 (pci_dev->id.device_id == PCI_DEVICE_ID_NFP6000_PF_NIC)) {
3275 port = get_pf_port_number(eth_dev->data->name);
3277 * hotplug is not possible with multiport PF although freeing
3278 * data structures can be done for first port.
3282 hwport0 = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
3283 hw = &hwport0[port];
3284 nfp_cpp_area_free(hw->ctrl_area);
3285 nfp_cpp_area_free(hw->hwqueues_area);
3288 nfp_cpp_free(hw->cpp);
3290 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
3292 /* hotplug is not possible with multiport PF */
3293 if (hw->pf_multiport_enabled)
3295 return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
3298 static struct rte_pci_driver rte_nfp_net_pf_pmd = {
3299 .id_table = pci_id_nfp_pf_net_map,
3300 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3301 RTE_PCI_DRV_IOVA_AS_VA,
3302 .probe = nfp_pf_pci_probe,
3303 .remove = eth_nfp_pci_remove,
3306 static struct rte_pci_driver rte_nfp_net_vf_pmd = {
3307 .id_table = pci_id_nfp_vf_net_map,
3308 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3309 RTE_PCI_DRV_IOVA_AS_VA,
3310 .probe = eth_nfp_pci_probe,
3311 .remove = eth_nfp_pci_remove,
3314 RTE_PMD_REGISTER_PCI(net_nfp_pf, rte_nfp_net_pf_pmd);
3315 RTE_PMD_REGISTER_PCI(net_nfp_vf, rte_nfp_net_vf_pmd);
3316 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_pf, pci_id_nfp_pf_net_map);
3317 RTE_PMD_REGISTER_PCI_TABLE(net_nfp_vf, pci_id_nfp_vf_net_map);
3318 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_pf, "* igb_uio | uio_pci_generic | vfio");
3319 RTE_PMD_REGISTER_KMOD_DEP(net_nfp_vf, "* igb_uio | uio_pci_generic | vfio");
3321 RTE_INIT(nfp_init_log)
3323 nfp_logtype_init = rte_log_register("pmd.net.nfp.init");
3324 if (nfp_logtype_init >= 0)
3325 rte_log_set_level(nfp_logtype_init, RTE_LOG_NOTICE);
3326 nfp_logtype_driver = rte_log_register("pmd.net.nfp.driver");
3327 if (nfp_logtype_driver >= 0)
3328 rte_log_set_level(nfp_logtype_driver, RTE_LOG_NOTICE);
3332 * c-file-style: "Linux"
3333 * indent-tabs-mode: t