2 * Copyright (c) 2014, 2015 Netronome Systems, Inc.
5 * Small portions derived from code Copyright(c) 2010-2015 Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are met:
10 * 1. Redistributions of source code must retain the above copyright notice,
11 * this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution
17 * 3. Neither the name of the copyright holder nor the names of its
18 * contributors may be used to endorse or promote products derived from this
19 * software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
35 * vim:shiftwidth=8:noexpandtab
37 * @file dpdk/pmd/nfp_net.c
39 * Netronome vNIC DPDK Poll-Mode Driver: Main entry point
44 #include <rte_byteorder.h>
45 #include <rte_common.h>
47 #include <rte_debug.h>
48 #include <rte_ethdev.h>
50 #include <rte_ether.h>
51 #include <rte_malloc.h>
52 #include <rte_memzone.h>
53 #include <rte_mempool.h>
54 #include <rte_version.h>
55 #include <rte_string_fns.h>
56 #include <rte_alarm.h>
57 #include <rte_spinlock.h>
59 #include "nfp_net_pmd.h"
60 #include "nfp_net_logs.h"
61 #include "nfp_net_ctrl.h"
64 static void nfp_net_close(struct rte_eth_dev *dev);
65 static int nfp_net_configure(struct rte_eth_dev *dev);
66 static void nfp_net_dev_interrupt_handler(struct rte_intr_handle *handle,
68 static void nfp_net_dev_interrupt_delayed_handler(void *param);
69 static int nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu);
70 static void nfp_net_infos_get(struct rte_eth_dev *dev,
71 struct rte_eth_dev_info *dev_info);
72 static int nfp_net_init(struct rte_eth_dev *eth_dev);
73 static int nfp_net_link_update(struct rte_eth_dev *dev, int wait_to_complete);
74 static void nfp_net_promisc_enable(struct rte_eth_dev *dev);
75 static void nfp_net_promisc_disable(struct rte_eth_dev *dev);
76 static int nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq);
77 static uint32_t nfp_net_rx_queue_count(struct rte_eth_dev *dev,
79 static uint16_t nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
81 static void nfp_net_rx_queue_release(void *rxq);
82 static int nfp_net_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
83 uint16_t nb_desc, unsigned int socket_id,
84 const struct rte_eth_rxconf *rx_conf,
85 struct rte_mempool *mp);
86 static int nfp_net_tx_free_bufs(struct nfp_net_txq *txq);
87 static void nfp_net_tx_queue_release(void *txq);
88 static int nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
89 uint16_t nb_desc, unsigned int socket_id,
90 const struct rte_eth_txconf *tx_conf);
91 static int nfp_net_start(struct rte_eth_dev *dev);
92 static void nfp_net_stats_get(struct rte_eth_dev *dev,
93 struct rte_eth_stats *stats);
94 static void nfp_net_stats_reset(struct rte_eth_dev *dev);
95 static void nfp_net_stop(struct rte_eth_dev *dev);
96 static uint16_t nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
100 * The offset of the queue controller queues in the PCIe Target. These
101 * happen to be at the same offset on the NFP6000 and the NFP3200 so
102 * we use a single macro here.
104 #define NFP_PCIE_QUEUE(_q) (0x80000 + (0x800 * ((_q) & 0xff)))
106 /* Maximum value which can be added to a queue with one transaction */
107 #define NFP_QCP_MAX_ADD 0x7f
109 #define RTE_MBUF_DMA_ADDR_DEFAULT(mb) \
110 (uint64_t)((mb)->buf_physaddr + RTE_PKTMBUF_HEADROOM)
112 /* nfp_qcp_ptr - Read or Write Pointer of a queue */
114 NFP_QCP_READ_PTR = 0,
119 * nfp_qcp_ptr_add - Add the value to the selected pointer of a queue
120 * @q: Base address for queue structure
121 * @ptr: Add to the Read or Write pointer
122 * @val: Value to add to the queue pointer
124 * If @val is greater than @NFP_QCP_MAX_ADD multiple writes are performed.
127 nfp_qcp_ptr_add(uint8_t *q, enum nfp_qcp_ptr ptr, uint32_t val)
131 if (ptr == NFP_QCP_READ_PTR)
132 off = NFP_QCP_QUEUE_ADD_RPTR;
134 off = NFP_QCP_QUEUE_ADD_WPTR;
136 while (val > NFP_QCP_MAX_ADD) {
137 nn_writel(rte_cpu_to_le_32(NFP_QCP_MAX_ADD), q + off);
138 val -= NFP_QCP_MAX_ADD;
141 nn_writel(rte_cpu_to_le_32(val), q + off);
145 * nfp_qcp_read - Read the current Read/Write pointer value for a queue
146 * @q: Base address for queue structure
147 * @ptr: Read or Write pointer
149 static inline uint32_t
150 nfp_qcp_read(uint8_t *q, enum nfp_qcp_ptr ptr)
155 if (ptr == NFP_QCP_READ_PTR)
156 off = NFP_QCP_QUEUE_STS_LO;
158 off = NFP_QCP_QUEUE_STS_HI;
160 val = rte_cpu_to_le_32(nn_readl(q + off));
162 if (ptr == NFP_QCP_READ_PTR)
163 return val & NFP_QCP_QUEUE_STS_LO_READPTR_mask;
165 return val & NFP_QCP_QUEUE_STS_HI_WRITEPTR_mask;
169 * Functions to read/write from/to Config BAR
170 * Performs any endian conversion necessary.
172 static inline uint8_t
173 nn_cfg_readb(struct nfp_net_hw *hw, int off)
175 return nn_readb(hw->ctrl_bar + off);
179 nn_cfg_writeb(struct nfp_net_hw *hw, int off, uint8_t val)
181 nn_writeb(val, hw->ctrl_bar + off);
184 static inline uint32_t
185 nn_cfg_readl(struct nfp_net_hw *hw, int off)
187 return rte_le_to_cpu_32(nn_readl(hw->ctrl_bar + off));
191 nn_cfg_writel(struct nfp_net_hw *hw, int off, uint32_t val)
193 nn_writel(rte_cpu_to_le_32(val), hw->ctrl_bar + off);
196 static inline uint64_t
197 nn_cfg_readq(struct nfp_net_hw *hw, int off)
199 return rte_le_to_cpu_64(nn_readq(hw->ctrl_bar + off));
203 nn_cfg_writeq(struct nfp_net_hw *hw, int off, uint64_t val)
205 nn_writeq(rte_cpu_to_le_64(val), hw->ctrl_bar + off);
208 /* Creating memzone for hardware rings. */
209 static const struct rte_memzone *
210 ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name,
211 uint16_t queue_id, uint32_t ring_size, int socket_id)
213 char z_name[RTE_MEMZONE_NAMESIZE];
214 const struct rte_memzone *mz;
216 snprintf(z_name, sizeof(z_name), "%s_%s_%d_%d",
217 dev->driver->pci_drv.driver.name,
218 ring_name, dev->data->port_id, queue_id);
220 mz = rte_memzone_lookup(z_name);
224 return rte_memzone_reserve_aligned(z_name, ring_size, socket_id, 0,
229 * Atomically reads link status information from global structure rte_eth_dev.
232 * - Pointer to the structure rte_eth_dev to read from.
233 * - Pointer to the buffer to be saved with the link status.
236 * - On success, zero.
237 * - On failure, negative value.
240 nfp_net_dev_atomic_read_link_status(struct rte_eth_dev *dev,
241 struct rte_eth_link *link)
243 struct rte_eth_link *dst = link;
244 struct rte_eth_link *src = &dev->data->dev_link;
246 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
247 *(uint64_t *)src) == 0)
254 * Atomically writes the link status information into global
255 * structure rte_eth_dev.
258 * - Pointer to the structure rte_eth_dev to read from.
259 * - Pointer to the buffer to be saved with the link status.
262 * - On success, zero.
263 * - On failure, negative value.
266 nfp_net_dev_atomic_write_link_status(struct rte_eth_dev *dev,
267 struct rte_eth_link *link)
269 struct rte_eth_link *dst = &dev->data->dev_link;
270 struct rte_eth_link *src = link;
272 if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
273 *(uint64_t *)src) == 0)
280 nfp_net_rx_queue_release_mbufs(struct nfp_net_rxq *rxq)
284 if (rxq->rxbufs == NULL)
287 for (i = 0; i < rxq->rx_count; i++) {
288 if (rxq->rxbufs[i].mbuf) {
289 rte_pktmbuf_free_seg(rxq->rxbufs[i].mbuf);
290 rxq->rxbufs[i].mbuf = NULL;
296 nfp_net_rx_queue_release(void *rx_queue)
298 struct nfp_net_rxq *rxq = rx_queue;
301 nfp_net_rx_queue_release_mbufs(rxq);
302 rte_free(rxq->rxbufs);
308 nfp_net_reset_rx_queue(struct nfp_net_rxq *rxq)
310 nfp_net_rx_queue_release_mbufs(rxq);
317 nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq)
321 if (txq->txbufs == NULL)
324 for (i = 0; i < txq->tx_count; i++) {
325 if (txq->txbufs[i].mbuf) {
326 rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
327 txq->txbufs[i].mbuf = NULL;
333 nfp_net_tx_queue_release(void *tx_queue)
335 struct nfp_net_txq *txq = tx_queue;
338 nfp_net_tx_queue_release_mbufs(txq);
339 rte_free(txq->txbufs);
345 nfp_net_reset_tx_queue(struct nfp_net_txq *txq)
347 nfp_net_tx_queue_release_mbufs(txq);
355 __nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t update)
359 struct timespec wait;
361 PMD_DRV_LOG(DEBUG, "Writing to the configuration queue (%p)...\n",
364 if (hw->qcp_cfg == NULL)
365 rte_panic("Bad configuration queue pointer\n");
367 nfp_qcp_ptr_add(hw->qcp_cfg, NFP_QCP_WRITE_PTR, 1);
370 wait.tv_nsec = 1000000;
372 PMD_DRV_LOG(DEBUG, "Polling for update ack...\n");
374 /* Poll update field, waiting for NFP to ack the config */
375 for (cnt = 0; ; cnt++) {
376 new = nn_cfg_readl(hw, NFP_NET_CFG_UPDATE);
379 if (new & NFP_NET_CFG_UPDATE_ERR) {
380 PMD_INIT_LOG(ERR, "Reconfig error: 0x%08x\n", new);
383 if (cnt >= NFP_NET_POLL_TIMEOUT) {
384 PMD_INIT_LOG(ERR, "Reconfig timeout for 0x%08x after"
385 " %dms\n", update, cnt);
386 rte_panic("Exiting\n");
388 nanosleep(&wait, 0); /* waiting for a 1ms */
390 PMD_DRV_LOG(DEBUG, "Ack DONE\n");
395 * Reconfigure the NIC
396 * @nn: device to reconfigure
397 * @ctrl: The value for the ctrl field in the BAR config
398 * @update: The value for the update field in the BAR config
400 * Write the update word to the BAR and ping the reconfig queue. Then poll
401 * until the firmware has acknowledged the update by zeroing the update word.
404 nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update)
408 PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n",
411 rte_spinlock_lock(&hw->reconfig_lock);
413 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl);
414 nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update);
418 err = __nfp_net_reconfig(hw, update);
420 rte_spinlock_unlock(&hw->reconfig_lock);
426 * Reconfig errors imply situations where they can be handled.
427 * Otherwise, rte_panic is called inside __nfp_net_reconfig
429 PMD_INIT_LOG(ERR, "Error nfp_net reconfig for ctrl: %x update: %x\n",
435 * Configure an Ethernet device. This function must be invoked first
436 * before any other function in the Ethernet API. This function can
437 * also be re-invoked when a device is in the stopped state.
440 nfp_net_configure(struct rte_eth_dev *dev)
442 struct rte_eth_conf *dev_conf;
443 struct rte_eth_rxmode *rxmode;
444 struct rte_eth_txmode *txmode;
445 uint32_t new_ctrl = 0;
447 struct nfp_net_hw *hw;
449 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
452 * A DPDK app sends info about how many queues to use and how
453 * those queues need to be configured. This is used by the
454 * DPDK core and it makes sure no more queues than those
455 * advertised by the driver are requested. This function is
456 * called after that internal process
459 PMD_INIT_LOG(DEBUG, "Configure\n");
461 dev_conf = &dev->data->dev_conf;
462 rxmode = &dev_conf->rxmode;
463 txmode = &dev_conf->txmode;
465 /* Checking TX mode */
466 if (txmode->mq_mode) {
467 PMD_INIT_LOG(INFO, "TX mq_mode DCB and VMDq not supported\n");
471 /* Checking RX mode */
472 if (rxmode->mq_mode & ETH_MQ_RX_RSS) {
473 if (hw->cap & NFP_NET_CFG_CTRL_RSS) {
474 update = NFP_NET_CFG_UPDATE_RSS;
475 new_ctrl = NFP_NET_CFG_CTRL_RSS;
477 PMD_INIT_LOG(INFO, "RSS not supported\n");
482 if (rxmode->split_hdr_size) {
483 PMD_INIT_LOG(INFO, "rxmode does not support split header\n");
487 if (rxmode->hw_ip_checksum) {
488 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM) {
489 new_ctrl |= NFP_NET_CFG_CTRL_RXCSUM;
491 PMD_INIT_LOG(INFO, "RXCSUM not supported\n");
496 if (rxmode->hw_vlan_filter) {
497 PMD_INIT_LOG(INFO, "VLAN filter not supported\n");
501 if (rxmode->hw_vlan_strip) {
502 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN) {
503 new_ctrl |= NFP_NET_CFG_CTRL_RXVLAN;
505 PMD_INIT_LOG(INFO, "hw vlan strip not supported\n");
510 if (rxmode->hw_vlan_extend) {
511 PMD_INIT_LOG(INFO, "VLAN extended not supported\n");
515 /* Supporting VLAN insertion by default */
516 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
517 new_ctrl |= NFP_NET_CFG_CTRL_TXVLAN;
519 if (rxmode->jumbo_frame)
520 hw->mtu = rxmode->max_rx_pkt_len;
522 if (!rxmode->hw_strip_crc)
523 PMD_INIT_LOG(INFO, "HW does strip CRC and it is not configurable\n");
525 if (rxmode->enable_scatter) {
526 PMD_INIT_LOG(INFO, "Scatter not supported\n");
533 update |= NFP_NET_CFG_UPDATE_GEN;
535 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
536 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
545 nfp_net_enable_queues(struct rte_eth_dev *dev)
547 struct nfp_net_hw *hw;
548 uint64_t enabled_queues = 0;
551 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
553 /* Enabling the required TX queues in the device */
554 for (i = 0; i < dev->data->nb_tx_queues; i++)
555 enabled_queues |= (1 << i);
557 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, enabled_queues);
561 /* Enabling the required RX queues in the device */
562 for (i = 0; i < dev->data->nb_rx_queues; i++)
563 enabled_queues |= (1 << i);
565 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, enabled_queues);
569 nfp_net_disable_queues(struct rte_eth_dev *dev)
571 struct nfp_net_hw *hw;
572 uint32_t new_ctrl, update = 0;
574 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
576 nn_cfg_writeq(hw, NFP_NET_CFG_TXRS_ENABLE, 0);
577 nn_cfg_writeq(hw, NFP_NET_CFG_RXRS_ENABLE, 0);
579 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_ENABLE;
580 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING |
581 NFP_NET_CFG_UPDATE_MSIX;
583 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
584 new_ctrl &= ~NFP_NET_CFG_CTRL_RINGCFG;
586 /* If an error when reconfig we avoid to change hw state */
587 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
594 nfp_net_rx_freelist_setup(struct rte_eth_dev *dev)
598 for (i = 0; i < dev->data->nb_rx_queues; i++) {
599 if (nfp_net_rx_fill_freelist(dev->data->rx_queues[i]) < 0)
606 nfp_net_params_setup(struct nfp_net_hw *hw)
608 nn_cfg_writel(hw, NFP_NET_CFG_MTU, hw->mtu);
609 nn_cfg_writel(hw, NFP_NET_CFG_FLBUFSZ, hw->flbufsz);
613 nfp_net_cfg_queue_setup(struct nfp_net_hw *hw)
615 hw->qcp_cfg = hw->tx_bar + NFP_QCP_QUEUE_ADDR_SZ;
618 static void nfp_net_read_mac(struct nfp_net_hw *hw)
622 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
623 memcpy(&hw->mac_addr[0], &tmp, 4);
625 tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
626 memcpy(&hw->mac_addr[4], &tmp, 2);
630 nfp_net_start(struct rte_eth_dev *dev)
632 uint32_t new_ctrl, update = 0;
633 struct nfp_net_hw *hw;
636 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
638 PMD_INIT_LOG(DEBUG, "Start\n");
640 /* Disabling queues just in case... */
641 nfp_net_disable_queues(dev);
643 /* Writing configuration parameters in the device */
644 nfp_net_params_setup(hw);
646 /* Enabling the required queues in the device */
647 nfp_net_enable_queues(dev);
650 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_ENABLE | NFP_NET_CFG_UPDATE_MSIX;
651 update = NFP_NET_CFG_UPDATE_GEN | NFP_NET_CFG_UPDATE_RING;
653 if (hw->cap & NFP_NET_CFG_CTRL_RINGCFG)
654 new_ctrl |= NFP_NET_CFG_CTRL_RINGCFG;
656 nn_cfg_writel(hw, NFP_NET_CFG_CTRL, new_ctrl);
657 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
661 * Allocating rte mbuffs for configured rx queues.
662 * This requires queues being enabled before
664 if (nfp_net_rx_freelist_setup(dev) < 0) {
675 * An error returned by this function should mean the app
676 * exiting and then the system releasing all the memory
677 * allocated even memory coming from hugepages.
679 * The device could be enabled at this point with some queues
680 * ready for getting packets. This is true if the call to
681 * nfp_net_rx_freelist_setup() succeeds for some queues but
682 * fails for subsequent queues.
684 * This should make the app exiting but better if we tell the
687 nfp_net_disable_queues(dev);
692 /* Stop device: disable rx and tx functions to allow for reconfiguring. */
694 nfp_net_stop(struct rte_eth_dev *dev)
698 PMD_INIT_LOG(DEBUG, "Stop\n");
700 nfp_net_disable_queues(dev);
703 for (i = 0; i < dev->data->nb_tx_queues; i++) {
704 nfp_net_reset_tx_queue(
705 (struct nfp_net_txq *)dev->data->tx_queues[i]);
708 for (i = 0; i < dev->data->nb_rx_queues; i++) {
709 nfp_net_reset_rx_queue(
710 (struct nfp_net_rxq *)dev->data->rx_queues[i]);
714 /* Reset and stop device. The device can not be restarted. */
716 nfp_net_close(struct rte_eth_dev *dev)
718 struct nfp_net_hw *hw;
720 PMD_INIT_LOG(DEBUG, "Close\n");
722 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
725 * We assume that the DPDK application is stopping all the
726 * threads/queues before calling the device close function.
731 rte_intr_disable(&dev->pci_dev->intr_handle);
732 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, 0xff);
734 /* unregister callback func from eal lib */
735 rte_intr_callback_unregister(&dev->pci_dev->intr_handle,
736 nfp_net_dev_interrupt_handler,
740 * The ixgbe PMD driver disables the pcie master on the
741 * device. The i40e does not...
746 nfp_net_promisc_enable(struct rte_eth_dev *dev)
748 uint32_t new_ctrl, update = 0;
749 struct nfp_net_hw *hw;
751 PMD_DRV_LOG(DEBUG, "Promiscuous mode enable\n");
753 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
755 if (!(hw->cap & NFP_NET_CFG_CTRL_PROMISC)) {
756 PMD_INIT_LOG(INFO, "Promiscuous mode not supported\n");
760 if (hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) {
761 PMD_DRV_LOG(INFO, "Promiscuous mode already enabled\n");
765 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_PROMISC;
766 update = NFP_NET_CFG_UPDATE_GEN;
769 * DPDK sets promiscuous mode on just after this call assuming
770 * it can not fail ...
772 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
779 nfp_net_promisc_disable(struct rte_eth_dev *dev)
781 uint32_t new_ctrl, update = 0;
782 struct nfp_net_hw *hw;
784 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
786 if ((hw->ctrl & NFP_NET_CFG_CTRL_PROMISC) == 0) {
787 PMD_DRV_LOG(INFO, "Promiscuous mode already disabled\n");
791 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_PROMISC;
792 update = NFP_NET_CFG_UPDATE_GEN;
795 * DPDK sets promiscuous mode off just before this call
796 * assuming it can not fail ...
798 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
805 * return 0 means link status changed, -1 means not changed
807 * Wait to complete is needed as it can take up to 9 seconds to get the Link
811 nfp_net_link_update(struct rte_eth_dev *dev, __rte_unused int wait_to_complete)
813 struct nfp_net_hw *hw;
814 struct rte_eth_link link, old;
815 uint32_t nn_link_status;
817 PMD_DRV_LOG(DEBUG, "Link update\n");
819 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
821 memset(&old, 0, sizeof(old));
822 nfp_net_dev_atomic_read_link_status(dev, &old);
824 nn_link_status = nn_cfg_readl(hw, NFP_NET_CFG_STS);
826 memset(&link, 0, sizeof(struct rte_eth_link));
828 if (nn_link_status & NFP_NET_CFG_STS_LINK)
829 link.link_status = ETH_LINK_UP;
831 link.link_duplex = ETH_LINK_FULL_DUPLEX;
832 /* Other cards can limit the tx and rx rate per VF */
833 link.link_speed = ETH_SPEED_NUM_40G;
835 if (old.link_status != link.link_status) {
836 nfp_net_dev_atomic_write_link_status(dev, &link);
837 if (link.link_status)
838 PMD_DRV_LOG(INFO, "NIC Link is Up\n");
840 PMD_DRV_LOG(INFO, "NIC Link is Down\n");
848 nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
851 struct nfp_net_hw *hw;
852 struct rte_eth_stats nfp_dev_stats;
854 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
856 /* RTE_ETHDEV_QUEUE_STAT_CNTRS default value is 16 */
858 memset(&nfp_dev_stats, 0, sizeof(nfp_dev_stats));
860 /* reading per RX ring stats */
861 for (i = 0; i < dev->data->nb_rx_queues; i++) {
862 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
865 nfp_dev_stats.q_ipackets[i] =
866 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
868 nfp_dev_stats.q_ipackets[i] -=
869 hw->eth_stats_base.q_ipackets[i];
871 nfp_dev_stats.q_ibytes[i] =
872 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
874 nfp_dev_stats.q_ibytes[i] -=
875 hw->eth_stats_base.q_ibytes[i];
878 /* reading per TX ring stats */
879 for (i = 0; i < dev->data->nb_tx_queues; i++) {
880 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
883 nfp_dev_stats.q_opackets[i] =
884 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
886 nfp_dev_stats.q_opackets[i] -=
887 hw->eth_stats_base.q_opackets[i];
889 nfp_dev_stats.q_obytes[i] =
890 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
892 nfp_dev_stats.q_obytes[i] -=
893 hw->eth_stats_base.q_obytes[i];
896 nfp_dev_stats.ipackets =
897 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
899 nfp_dev_stats.ipackets -= hw->eth_stats_base.ipackets;
901 nfp_dev_stats.ibytes =
902 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
904 nfp_dev_stats.ibytes -= hw->eth_stats_base.ibytes;
906 nfp_dev_stats.opackets =
907 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
909 nfp_dev_stats.opackets -= hw->eth_stats_base.opackets;
911 nfp_dev_stats.obytes =
912 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
914 nfp_dev_stats.obytes -= hw->eth_stats_base.obytes;
916 /* reading general device stats */
917 nfp_dev_stats.ierrors =
918 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
920 nfp_dev_stats.ierrors -= hw->eth_stats_base.ierrors;
922 nfp_dev_stats.oerrors =
923 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
925 nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors;
927 /* RX ring mbuf allocation failures */
928 nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed;
930 nfp_dev_stats.imissed =
931 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
933 nfp_dev_stats.imissed -= hw->eth_stats_base.imissed;
936 memcpy(stats, &nfp_dev_stats, sizeof(*stats));
940 nfp_net_stats_reset(struct rte_eth_dev *dev)
943 struct nfp_net_hw *hw;
945 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
948 * hw->eth_stats_base records the per counter starting point.
952 /* reading per RX ring stats */
953 for (i = 0; i < dev->data->nb_rx_queues; i++) {
954 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
957 hw->eth_stats_base.q_ipackets[i] =
958 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i));
960 hw->eth_stats_base.q_ibytes[i] =
961 nn_cfg_readq(hw, NFP_NET_CFG_RXR_STATS(i) + 0x8);
964 /* reading per TX ring stats */
965 for (i = 0; i < dev->data->nb_tx_queues; i++) {
966 if (i == RTE_ETHDEV_QUEUE_STAT_CNTRS)
969 hw->eth_stats_base.q_opackets[i] =
970 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i));
972 hw->eth_stats_base.q_obytes[i] =
973 nn_cfg_readq(hw, NFP_NET_CFG_TXR_STATS(i) + 0x8);
976 hw->eth_stats_base.ipackets =
977 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_FRAMES);
979 hw->eth_stats_base.ibytes =
980 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_OCTETS);
982 hw->eth_stats_base.opackets =
983 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_FRAMES);
985 hw->eth_stats_base.obytes =
986 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS);
988 /* reading general device stats */
989 hw->eth_stats_base.ierrors =
990 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS);
992 hw->eth_stats_base.oerrors =
993 nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS);
995 /* RX ring mbuf allocation failures */
996 dev->data->rx_mbuf_alloc_failed = 0;
998 hw->eth_stats_base.imissed =
999 nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_DISCARDS);
1003 nfp_net_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
1005 struct nfp_net_hw *hw;
1007 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1009 dev_info->driver_name = dev->driver->pci_drv.driver.name;
1010 dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues;
1011 dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues;
1012 dev_info->min_rx_bufsize = ETHER_MIN_MTU;
1013 dev_info->max_rx_pktlen = hw->max_mtu;
1014 /* Next should change when PF support is implemented */
1015 dev_info->max_mac_addrs = 1;
1017 if (hw->cap & NFP_NET_CFG_CTRL_RXVLAN)
1018 dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
1020 if (hw->cap & NFP_NET_CFG_CTRL_RXCSUM)
1021 dev_info->rx_offload_capa |= DEV_RX_OFFLOAD_IPV4_CKSUM |
1022 DEV_RX_OFFLOAD_UDP_CKSUM |
1023 DEV_RX_OFFLOAD_TCP_CKSUM;
1025 if (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)
1026 dev_info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT;
1028 if (hw->cap & NFP_NET_CFG_CTRL_TXCSUM)
1029 dev_info->tx_offload_capa |= DEV_TX_OFFLOAD_IPV4_CKSUM |
1030 DEV_TX_OFFLOAD_UDP_CKSUM |
1031 DEV_TX_OFFLOAD_TCP_CKSUM;
1033 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1035 .pthresh = DEFAULT_RX_PTHRESH,
1036 .hthresh = DEFAULT_RX_HTHRESH,
1037 .wthresh = DEFAULT_RX_WTHRESH,
1039 .rx_free_thresh = DEFAULT_RX_FREE_THRESH,
1043 dev_info->default_txconf = (struct rte_eth_txconf) {
1045 .pthresh = DEFAULT_TX_PTHRESH,
1046 .hthresh = DEFAULT_TX_HTHRESH,
1047 .wthresh = DEFAULT_TX_WTHRESH,
1049 .tx_free_thresh = DEFAULT_TX_FREE_THRESH,
1050 .tx_rs_thresh = DEFAULT_TX_RSBIT_THRESH,
1051 .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS |
1052 ETH_TXQ_FLAGS_NOOFFLOADS,
1055 dev_info->reta_size = NFP_NET_CFG_RSS_ITBL_SZ;
1056 dev_info->hash_key_size = NFP_NET_CFG_RSS_KEY_SZ;
1058 dev_info->speed_capa = ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1061 static const uint32_t *
1062 nfp_net_supported_ptypes_get(struct rte_eth_dev *dev)
1064 static const uint32_t ptypes[] = {
1065 /* refers to nfp_net_set_hash() */
1066 RTE_PTYPE_INNER_L3_IPV4,
1067 RTE_PTYPE_INNER_L3_IPV6,
1068 RTE_PTYPE_INNER_L3_IPV6_EXT,
1069 RTE_PTYPE_INNER_L4_MASK,
1073 if (dev->rx_pkt_burst == nfp_net_recv_pkts)
1079 nfp_net_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
1081 struct nfp_net_rxq *rxq;
1082 struct nfp_net_rx_desc *rxds;
1086 rxq = (struct nfp_net_rxq *)dev->data->rx_queues[queue_idx];
1089 PMD_INIT_LOG(ERR, "Bad queue: %u\n", queue_idx);
1093 idx = rxq->rd_p % rxq->rx_count;
1094 rxds = &rxq->rxds[idx];
1099 * Other PMDs are just checking the DD bit in intervals of 4
1100 * descriptors and counting all four if the first has the DD
1101 * bit on. Of course, this is not accurate but can be good for
1102 * perfomance. But ideally that should be done in descriptors
1103 * chunks belonging to the same cache line
1106 while (count < rxq->rx_count) {
1107 rxds = &rxq->rxds[idx];
1108 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1115 if ((idx) == rxq->rx_count)
1123 nfp_net_dev_link_status_print(struct rte_eth_dev *dev)
1125 struct rte_eth_link link;
1127 memset(&link, 0, sizeof(link));
1128 nfp_net_dev_atomic_read_link_status(dev, &link);
1129 if (link.link_status)
1130 RTE_LOG(INFO, PMD, "Port %d: Link Up - speed %u Mbps - %s\n",
1131 (int)(dev->data->port_id), (unsigned)link.link_speed,
1132 link.link_duplex == ETH_LINK_FULL_DUPLEX
1133 ? "full-duplex" : "half-duplex");
1135 RTE_LOG(INFO, PMD, " Port %d: Link Down\n",
1136 (int)(dev->data->port_id));
1138 RTE_LOG(INFO, PMD, "PCI Address: %04d:%02d:%02d:%d\n",
1139 dev->pci_dev->addr.domain, dev->pci_dev->addr.bus,
1140 dev->pci_dev->addr.devid, dev->pci_dev->addr.function);
1143 /* Interrupt configuration and handling */
1146 * nfp_net_irq_unmask - Unmask an interrupt
1148 * If MSI-X auto-masking is enabled clear the mask bit, otherwise
1149 * clear the ICR for the entry.
1152 nfp_net_irq_unmask(struct rte_eth_dev *dev)
1154 struct nfp_net_hw *hw;
1156 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1158 if (hw->ctrl & NFP_NET_CFG_CTRL_MSIXAUTO) {
1159 /* If MSI-X auto-masking is used, clear the entry */
1161 rte_intr_enable(&dev->pci_dev->intr_handle);
1163 /* Make sure all updates are written before un-masking */
1165 nn_cfg_writeb(hw, NFP_NET_CFG_ICR(NFP_NET_IRQ_LSC_IDX),
1166 NFP_NET_CFG_ICR_UNMASKED);
1171 nfp_net_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
1175 struct rte_eth_link link;
1176 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1178 PMD_DRV_LOG(DEBUG, "We got a LSC interrupt!!!\n");
1180 /* get the link status */
1181 memset(&link, 0, sizeof(link));
1182 nfp_net_dev_atomic_read_link_status(dev, &link);
1184 nfp_net_link_update(dev, 0);
1187 if (!link.link_status) {
1188 /* handle it 1 sec later, wait it being stable */
1189 timeout = NFP_NET_LINK_UP_CHECK_TIMEOUT;
1190 /* likely to down */
1192 /* handle it 4 sec later, wait it being stable */
1193 timeout = NFP_NET_LINK_DOWN_CHECK_TIMEOUT;
1196 if (rte_eal_alarm_set(timeout * 1000,
1197 nfp_net_dev_interrupt_delayed_handler,
1199 RTE_LOG(ERR, PMD, "Error setting alarm");
1201 nfp_net_irq_unmask(dev);
1206 * Interrupt handler which shall be registered for alarm callback for delayed
1207 * handling specific interrupt to wait for the stable nic state. As the NIC
1208 * interrupt state is not stable for nfp after link is just down, it needs
1209 * to wait 4 seconds to get the stable status.
1211 * @param handle Pointer to interrupt handle.
1212 * @param param The address of parameter (struct rte_eth_dev *)
1217 nfp_net_dev_interrupt_delayed_handler(void *param)
1219 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
1221 nfp_net_link_update(dev, 0);
1222 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
1224 nfp_net_dev_link_status_print(dev);
1227 nfp_net_irq_unmask(dev);
1231 nfp_net_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
1233 struct nfp_net_hw *hw;
1235 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1237 /* check that mtu is within the allowed range */
1238 if ((mtu < ETHER_MIN_MTU) || ((uint32_t)mtu > hw->max_mtu))
1241 /* mtu setting is forbidden if port is started */
1242 if (dev->data->dev_started) {
1243 PMD_DRV_LOG(ERR, "port %d must be stopped before configuration",
1244 dev->data->port_id);
1248 /* switch to jumbo mode if needed */
1249 if ((uint32_t)mtu > ETHER_MAX_LEN)
1250 dev->data->dev_conf.rxmode.jumbo_frame = 1;
1252 dev->data->dev_conf.rxmode.jumbo_frame = 0;
1254 /* update max frame size */
1255 dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)mtu;
1257 /* writing to configuration space */
1258 nn_cfg_writel(hw, NFP_NET_CFG_MTU, (uint32_t)mtu);
1266 nfp_net_rx_queue_setup(struct rte_eth_dev *dev,
1267 uint16_t queue_idx, uint16_t nb_desc,
1268 unsigned int socket_id,
1269 const struct rte_eth_rxconf *rx_conf,
1270 struct rte_mempool *mp)
1272 const struct rte_memzone *tz;
1273 struct nfp_net_rxq *rxq;
1274 struct nfp_net_hw *hw;
1276 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1278 PMD_INIT_FUNC_TRACE();
1280 /* Validating number of descriptors */
1281 if (((nb_desc * sizeof(struct nfp_net_rx_desc)) % 128) != 0 ||
1282 (nb_desc > NFP_NET_MAX_RX_DESC) ||
1283 (nb_desc < NFP_NET_MIN_RX_DESC)) {
1284 RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
1289 * Free memory prior to re-allocation if needed. This is the case after
1290 * calling nfp_net_stop
1292 if (dev->data->rx_queues[queue_idx]) {
1293 nfp_net_rx_queue_release(dev->data->rx_queues[queue_idx]);
1294 dev->data->rx_queues[queue_idx] = NULL;
1297 /* Allocating rx queue data structure */
1298 rxq = rte_zmalloc_socket("ethdev RX queue", sizeof(struct nfp_net_rxq),
1299 RTE_CACHE_LINE_SIZE, socket_id);
1303 /* Hw queues mapping based on firmware confifguration */
1304 rxq->qidx = queue_idx;
1305 rxq->fl_qcidx = queue_idx * hw->stride_rx;
1306 rxq->rx_qcidx = rxq->fl_qcidx + (hw->stride_rx - 1);
1307 rxq->qcp_fl = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->fl_qcidx);
1308 rxq->qcp_rx = hw->rx_bar + NFP_QCP_QUEUE_OFF(rxq->rx_qcidx);
1311 * Tracking mbuf size for detecting a potential mbuf overflow due to
1315 rxq->mbuf_size = rxq->mem_pool->elt_size;
1316 rxq->mbuf_size -= (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM);
1317 hw->flbufsz = rxq->mbuf_size;
1319 rxq->rx_count = nb_desc;
1320 rxq->port_id = dev->data->port_id;
1321 rxq->rx_free_thresh = rx_conf->rx_free_thresh;
1322 rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? 0
1324 rxq->drop_en = rx_conf->rx_drop_en;
1327 * Allocate RX ring hardware descriptors. A memzone large enough to
1328 * handle the maximum ring size is allocated in order to allow for
1329 * resizing in later calls to the queue setup function.
1331 tz = ring_dma_zone_reserve(dev, "rx_ring", queue_idx,
1332 sizeof(struct nfp_net_rx_desc) *
1333 NFP_NET_MAX_RX_DESC, socket_id);
1336 RTE_LOG(ERR, PMD, "Error allocatig rx dma\n");
1337 nfp_net_rx_queue_release(rxq);
1341 /* Saving physical and virtual addresses for the RX ring */
1342 rxq->dma = (uint64_t)tz->phys_addr;
1343 rxq->rxds = (struct nfp_net_rx_desc *)tz->addr;
1345 /* mbuf pointers array for referencing mbufs linked to RX descriptors */
1346 rxq->rxbufs = rte_zmalloc_socket("rxq->rxbufs",
1347 sizeof(*rxq->rxbufs) * nb_desc,
1348 RTE_CACHE_LINE_SIZE, socket_id);
1349 if (rxq->rxbufs == NULL) {
1350 nfp_net_rx_queue_release(rxq);
1354 PMD_RX_LOG(DEBUG, "rxbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
1355 rxq->rxbufs, rxq->rxds, (unsigned long int)rxq->dma);
1357 nfp_net_reset_rx_queue(rxq);
1359 dev->data->rx_queues[queue_idx] = rxq;
1363 * Telling the HW about the physical address of the RX ring and number
1364 * of descriptors in log2 format
1366 nn_cfg_writeq(hw, NFP_NET_CFG_RXR_ADDR(queue_idx), rxq->dma);
1367 nn_cfg_writeb(hw, NFP_NET_CFG_RXR_SZ(queue_idx), log2(nb_desc));
1373 nfp_net_rx_fill_freelist(struct nfp_net_rxq *rxq)
1375 struct nfp_net_rx_buff *rxe = rxq->rxbufs;
1379 PMD_RX_LOG(DEBUG, "nfp_net_rx_fill_freelist for %u descriptors\n",
1382 for (i = 0; i < rxq->rx_count; i++) {
1383 struct nfp_net_rx_desc *rxd;
1384 struct rte_mbuf *mbuf = rte_pktmbuf_alloc(rxq->mem_pool);
1387 RTE_LOG(ERR, PMD, "RX mbuf alloc failed queue_id=%u\n",
1388 (unsigned)rxq->qidx);
1392 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(mbuf));
1394 rxd = &rxq->rxds[i];
1396 rxd->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
1397 rxd->fld.dma_addr_lo = dma_addr & 0xffffffff;
1399 PMD_RX_LOG(DEBUG, "[%d]: %" PRIx64 "\n", i, dma_addr);
1404 /* Make sure all writes are flushed before telling the hardware */
1407 /* Not advertising the whole ring as the firmware gets confused if so */
1408 PMD_RX_LOG(DEBUG, "Increment FL write pointer in %u\n",
1411 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, rxq->rx_count - 1);
1417 nfp_net_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
1418 uint16_t nb_desc, unsigned int socket_id,
1419 const struct rte_eth_txconf *tx_conf)
1421 const struct rte_memzone *tz;
1422 struct nfp_net_txq *txq;
1423 uint16_t tx_free_thresh;
1424 struct nfp_net_hw *hw;
1426 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1428 PMD_INIT_FUNC_TRACE();
1430 /* Validating number of descriptors */
1431 if (((nb_desc * sizeof(struct nfp_net_tx_desc)) % 128) != 0 ||
1432 (nb_desc > NFP_NET_MAX_TX_DESC) ||
1433 (nb_desc < NFP_NET_MIN_TX_DESC)) {
1434 RTE_LOG(ERR, PMD, "Wrong nb_desc value\n");
1438 tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ?
1439 tx_conf->tx_free_thresh :
1440 DEFAULT_TX_FREE_THRESH);
1442 if (tx_free_thresh > (nb_desc)) {
1444 "tx_free_thresh must be less than the number of TX "
1445 "descriptors. (tx_free_thresh=%u port=%d "
1446 "queue=%d)\n", (unsigned int)tx_free_thresh,
1447 (int)dev->data->port_id, (int)queue_idx);
1452 * Free memory prior to re-allocation if needed. This is the case after
1453 * calling nfp_net_stop
1455 if (dev->data->tx_queues[queue_idx]) {
1456 PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d\n",
1458 nfp_net_tx_queue_release(dev->data->tx_queues[queue_idx]);
1459 dev->data->tx_queues[queue_idx] = NULL;
1462 /* Allocating tx queue data structure */
1463 txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nfp_net_txq),
1464 RTE_CACHE_LINE_SIZE, socket_id);
1466 RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
1471 * Allocate TX ring hardware descriptors. A memzone large enough to
1472 * handle the maximum ring size is allocated in order to allow for
1473 * resizing in later calls to the queue setup function.
1475 tz = ring_dma_zone_reserve(dev, "tx_ring", queue_idx,
1476 sizeof(struct nfp_net_tx_desc) *
1477 NFP_NET_MAX_TX_DESC, socket_id);
1479 RTE_LOG(ERR, PMD, "Error allocating tx dma\n");
1480 nfp_net_tx_queue_release(txq);
1484 txq->tx_count = nb_desc;
1486 txq->tx_free_thresh = tx_free_thresh;
1487 txq->tx_pthresh = tx_conf->tx_thresh.pthresh;
1488 txq->tx_hthresh = tx_conf->tx_thresh.hthresh;
1489 txq->tx_wthresh = tx_conf->tx_thresh.wthresh;
1491 /* queue mapping based on firmware configuration */
1492 txq->qidx = queue_idx;
1493 txq->tx_qcidx = queue_idx * hw->stride_tx;
1494 txq->qcp_q = hw->tx_bar + NFP_QCP_QUEUE_OFF(txq->tx_qcidx);
1496 txq->port_id = dev->data->port_id;
1497 txq->txq_flags = tx_conf->txq_flags;
1499 /* Saving physical and virtual addresses for the TX ring */
1500 txq->dma = (uint64_t)tz->phys_addr;
1501 txq->txds = (struct nfp_net_tx_desc *)tz->addr;
1503 /* mbuf pointers array for referencing mbufs linked to TX descriptors */
1504 txq->txbufs = rte_zmalloc_socket("txq->txbufs",
1505 sizeof(*txq->txbufs) * nb_desc,
1506 RTE_CACHE_LINE_SIZE, socket_id);
1507 if (txq->txbufs == NULL) {
1508 nfp_net_tx_queue_release(txq);
1511 PMD_TX_LOG(DEBUG, "txbufs=%p hw_ring=%p dma_addr=0x%" PRIx64 "\n",
1512 txq->txbufs, txq->txds, (unsigned long int)txq->dma);
1514 nfp_net_reset_tx_queue(txq);
1516 dev->data->tx_queues[queue_idx] = txq;
1520 * Telling the HW about the physical address of the TX ring and number
1521 * of descriptors in log2 format
1523 nn_cfg_writeq(hw, NFP_NET_CFG_TXR_ADDR(queue_idx), txq->dma);
1524 nn_cfg_writeb(hw, NFP_NET_CFG_TXR_SZ(queue_idx), log2(nb_desc));
1529 /* nfp_net_tx_cksum - Set TX CSUM offload flags in TX descriptor */
1531 nfp_net_tx_cksum(struct nfp_net_txq *txq, struct nfp_net_tx_desc *txd,
1532 struct rte_mbuf *mb)
1535 struct nfp_net_hw *hw = txq->hw;
1537 if (!(hw->cap & NFP_NET_CFG_CTRL_TXCSUM))
1540 ol_flags = mb->ol_flags;
1542 /* IPv6 does not need checksum */
1543 if (ol_flags & PKT_TX_IP_CKSUM)
1544 txd->flags |= PCIE_DESC_TX_IP4_CSUM;
1546 switch (ol_flags & PKT_TX_L4_MASK) {
1547 case PKT_TX_UDP_CKSUM:
1548 txd->flags |= PCIE_DESC_TX_UDP_CSUM;
1550 case PKT_TX_TCP_CKSUM:
1551 txd->flags |= PCIE_DESC_TX_TCP_CSUM;
1555 if (ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK))
1556 txd->flags |= PCIE_DESC_TX_CSUM;
1559 /* nfp_net_rx_cksum - set mbuf checksum flags based on RX descriptor flags */
1561 nfp_net_rx_cksum(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1562 struct rte_mbuf *mb)
1564 struct nfp_net_hw *hw = rxq->hw;
1566 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RXCSUM))
1569 /* If IPv4 and IP checksum error, fail */
1570 if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
1571 !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
1572 mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
1574 mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1576 /* If neither UDP nor TCP return */
1577 if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
1578 !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
1581 if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
1582 mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1584 mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
1587 #define NFP_HASH_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 4)
1588 #define NFP_HASH_TYPE_OFFSET ((uint8_t *)mbuf->buf_addr + mbuf->data_off - 8)
1591 * nfp_net_set_hash - Set mbuf hash data
1593 * The RSS hash and hash-type are pre-pended to the packet data.
1594 * Extract and decode it and set the mbuf fields.
1597 nfp_net_set_hash(struct nfp_net_rxq *rxq, struct nfp_net_rx_desc *rxd,
1598 struct rte_mbuf *mbuf)
1602 struct nfp_net_hw *hw = rxq->hw;
1604 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
1607 if (!(rxd->rxd.flags & PCIE_DESC_RX_RSS))
1610 hash = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_OFFSET);
1611 hash_type = rte_be_to_cpu_32(*(uint32_t *)NFP_HASH_TYPE_OFFSET);
1614 * hash type is sharing the same word with input port info
1619 mbuf->hash.rss = hash;
1620 mbuf->ol_flags |= PKT_RX_RSS_HASH;
1622 switch (hash_type) {
1623 case NFP_NET_RSS_IPV4:
1624 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV4;
1626 case NFP_NET_RSS_IPV6:
1627 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6;
1629 case NFP_NET_RSS_IPV6_EX:
1630 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1632 case NFP_NET_RSS_IPV4_TCP:
1633 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1635 case NFP_NET_RSS_IPV6_TCP:
1636 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1638 case NFP_NET_RSS_IPV4_UDP:
1639 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1641 case NFP_NET_RSS_IPV6_UDP:
1642 mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
1645 mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
1649 /* nfp_net_check_port - Set mbuf in_port field */
1651 nfp_net_check_port(struct nfp_net_rx_desc *rxd, struct rte_mbuf *mbuf)
1655 if (!(rxd->rxd.flags & PCIE_DESC_RX_INGRESS_PORT)) {
1660 port = rte_be_to_cpu_32(*(uint32_t *)((uint8_t *)mbuf->buf_addr +
1661 mbuf->data_off - 8));
1664 * hash type is sharing the same word with input port info
1668 port = (uint8_t)(port >> 8);
1673 nfp_net_mbuf_alloc_failed(struct nfp_net_rxq *rxq)
1675 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1678 #define NFP_DESC_META_LEN(d) (d->rxd.meta_len_dd & PCIE_DESC_RX_META_LEN_MASK)
1683 * There are some decissions to take:
1684 * 1) How to check DD RX descriptors bit
1685 * 2) How and when to allocate new mbufs
1687 * Current implementation checks just one single DD bit each loop. As each
1688 * descriptor is 8 bytes, it is likely a good idea to check descriptors in
1689 * a single cache line instead. Tests with this change have not shown any
1690 * performance improvement but it requires further investigation. For example,
1691 * depending on which descriptor is next, the number of descriptors could be
1692 * less than 8 for just checking those in the same cache line. This implies
1693 * extra work which could be counterproductive by itself. Indeed, last firmware
1694 * changes are just doing this: writing several descriptors with the DD bit
1695 * for saving PCIe bandwidth and DMA operations from the NFP.
1697 * Mbuf allocation is done when a new packet is received. Then the descriptor
1698 * is automatically linked with the new mbuf and the old one is given to the
1699 * user. The main drawback with this design is mbuf allocation is heavier than
1700 * using bulk allocations allowed by DPDK with rte_mempool_get_bulk. From the
1701 * cache point of view it does not seem allocating the mbuf early on as we are
1702 * doing now have any benefit at all. Again, tests with this change have not
1703 * shown any improvement. Also, rte_mempool_get_bulk returns all or nothing
1704 * so looking at the implications of this type of allocation should be studied
1709 nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1711 struct nfp_net_rxq *rxq;
1712 struct nfp_net_rx_desc *rxds;
1713 struct nfp_net_rx_buff *rxb;
1714 struct nfp_net_hw *hw;
1715 struct rte_mbuf *mb;
1716 struct rte_mbuf *new_mb;
1723 if (unlikely(rxq == NULL)) {
1725 * DPDK just checks the queue is lower than max queues
1726 * enabled. But the queue needs to be configured
1728 RTE_LOG(ERR, PMD, "RX Bad queue\n");
1736 while (avail < nb_pkts) {
1737 idx = rxq->rd_p % rxq->rx_count;
1739 rxb = &rxq->rxbufs[idx];
1740 if (unlikely(rxb == NULL)) {
1741 RTE_LOG(ERR, PMD, "rxb does not exist!\n");
1745 rxds = &rxq->rxds[idx];
1746 if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
1750 * Memory barrier to ensure that we won't do other
1751 * reads before the DD bit.
1756 * We got a packet. Let's alloc a new mbuff for refilling the
1757 * free descriptor ring as soon as possible
1759 new_mb = rte_pktmbuf_alloc(rxq->mem_pool);
1760 if (unlikely(new_mb == NULL)) {
1761 RTE_LOG(DEBUG, PMD, "RX mbuf alloc failed port_id=%u "
1762 "queue_id=%u\n", (unsigned)rxq->port_id,
1763 (unsigned)rxq->qidx);
1764 nfp_net_mbuf_alloc_failed(rxq);
1771 * Grab the mbuff and refill the descriptor with the
1772 * previously allocated mbuff
1777 PMD_RX_LOG(DEBUG, "Packet len: %u, mbuf_size: %u\n",
1778 rxds->rxd.data_len, rxq->mbuf_size);
1780 /* Size of this segment */
1781 mb->data_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
1782 /* Size of the whole packet. We just support 1 segment */
1783 mb->pkt_len = rxds->rxd.data_len - NFP_DESC_META_LEN(rxds);
1785 if (unlikely((mb->data_len + hw->rx_offset) >
1788 * This should not happen and the user has the
1789 * responsibility of avoiding it. But we have
1790 * to give some info about the error
1793 "mbuf overflow likely due to the RX offset.\n"
1794 "\t\tYour mbuf size should have extra space for"
1795 " RX offset=%u bytes.\n"
1796 "\t\tCurrently you just have %u bytes available"
1797 " but the received packet is %u bytes long",
1799 rxq->mbuf_size - hw->rx_offset,
1804 /* Filling the received mbuff with packet info */
1806 mb->data_off = RTE_PKTMBUF_HEADROOM + hw->rx_offset;
1808 mb->data_off = RTE_PKTMBUF_HEADROOM +
1809 NFP_DESC_META_LEN(rxds);
1811 /* No scatter mode supported */
1815 mb->port = rxq->port_id;
1817 /* Checking the RSS flag */
1818 nfp_net_set_hash(rxq, rxds, mb);
1820 /* Checking the checksum flag */
1821 nfp_net_rx_cksum(rxq, rxds, mb);
1823 /* Checking the port flag */
1824 nfp_net_check_port(rxds, mb);
1826 if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) &&
1827 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) {
1828 mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan);
1829 mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
1832 /* Adding the mbuff to the mbuff array passed by the app */
1833 rx_pkts[avail++] = mb;
1835 /* Now resetting and updating the descriptor */
1838 dma_addr = rte_cpu_to_le_64(RTE_MBUF_DMA_ADDR_DEFAULT(new_mb));
1840 rxds->fld.dma_addr_hi = (dma_addr >> 32) & 0xff;
1841 rxds->fld.dma_addr_lo = dma_addr & 0xffffffff;
1849 PMD_RX_LOG(DEBUG, "RX port_id=%u queue_id=%u, %d packets received\n",
1850 (unsigned)rxq->port_id, (unsigned)rxq->qidx, nb_hold);
1852 nb_hold += rxq->nb_rx_hold;
1855 * FL descriptors needs to be written before incrementing the
1856 * FL queue WR pointer
1859 if (nb_hold > rxq->rx_free_thresh) {
1860 PMD_RX_LOG(DEBUG, "port=%u queue=%u nb_hold=%u avail=%u\n",
1861 (unsigned)rxq->port_id, (unsigned)rxq->qidx,
1862 (unsigned)nb_hold, (unsigned)avail);
1863 nfp_qcp_ptr_add(rxq->qcp_fl, NFP_QCP_WRITE_PTR, nb_hold);
1866 rxq->nb_rx_hold = nb_hold;
1872 * nfp_net_tx_free_bufs - Check for descriptors with a complete
1874 * @txq: TX queue to work with
1875 * Returns number of descriptors freed
1878 nfp_net_tx_free_bufs(struct nfp_net_txq *txq)
1883 PMD_TX_LOG(DEBUG, "queue %u. Check for descriptor with a complete"
1884 " status\n", txq->qidx);
1886 /* Work out how many packets have been sent */
1887 qcp_rd_p = nfp_qcp_read(txq->qcp_q, NFP_QCP_READ_PTR);
1889 if (qcp_rd_p == txq->qcp_rd_p) {
1890 PMD_TX_LOG(DEBUG, "queue %u: It seems harrier is not sending "
1891 "packets (%u, %u)\n", txq->qidx,
1892 qcp_rd_p, txq->qcp_rd_p);
1896 if (qcp_rd_p > txq->qcp_rd_p)
1897 todo = qcp_rd_p - txq->qcp_rd_p;
1899 todo = qcp_rd_p + txq->tx_count - txq->qcp_rd_p;
1901 PMD_TX_LOG(DEBUG, "qcp_rd_p %u, txq->qcp_rd_p: %u, qcp->rd_p: %u\n",
1902 qcp_rd_p, txq->qcp_rd_p, txq->rd_p);
1907 txq->qcp_rd_p += todo;
1908 txq->qcp_rd_p %= txq->tx_count;
1914 /* Leaving always free descriptors for avoiding wrapping confusion */
1915 #define NFP_FREE_TX_DESC(t) (t->tx_count - (t->wr_p - t->rd_p) - 8)
1918 * nfp_net_txq_full - Check if the TX queue free descriptors
1919 * is below tx_free_threshold
1921 * @txq: TX queue to check
1923 * This function uses the host copy* of read/write pointers
1926 int nfp_net_txq_full(struct nfp_net_txq *txq)
1928 return NFP_FREE_TX_DESC(txq) < txq->tx_free_thresh;
1932 nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1934 struct nfp_net_txq *txq;
1935 struct nfp_net_hw *hw;
1936 struct nfp_net_tx_desc *txds;
1937 struct rte_mbuf *pkt;
1939 int pkt_size, pkt_len, dma_size;
1940 uint16_t free_descs, issued_descs;
1941 struct rte_mbuf **lmbuf;
1946 txds = &txq->txds[txq->tail];
1948 PMD_TX_LOG(DEBUG, "working for queue %u at pos %d and %u packets\n",
1949 txq->qidx, txq->tail, nb_pkts);
1951 if ((NFP_FREE_TX_DESC(txq) < nb_pkts) || (nfp_net_txq_full(txq)))
1952 nfp_net_tx_free_bufs(txq);
1954 free_descs = (uint16_t)NFP_FREE_TX_DESC(txq);
1955 if (unlikely(free_descs == 0))
1962 PMD_TX_LOG(DEBUG, "queue: %u. Sending %u packets\n",
1963 txq->qidx, nb_pkts);
1964 /* Sending packets */
1965 while ((i < nb_pkts) && free_descs) {
1966 /* Grabbing the mbuf linked to the current descriptor */
1967 lmbuf = &txq->txbufs[txq->tail].mbuf;
1968 /* Warming the cache for releasing the mbuf later on */
1969 RTE_MBUF_PREFETCH_TO_FREE(*lmbuf);
1971 pkt = *(tx_pkts + i);
1973 if (unlikely((pkt->nb_segs > 1) &&
1974 !(hw->cap & NFP_NET_CFG_CTRL_GATHER))) {
1975 PMD_INIT_LOG(INFO, "NFP_NET_CFG_CTRL_GATHER not set\n");
1976 rte_panic("Multisegment packet unsupported\n");
1979 /* Checking if we have enough descriptors */
1980 if (unlikely(pkt->nb_segs > free_descs))
1984 * Checksum and VLAN flags just in the first descriptor for a
1985 * multisegment packet
1988 txds->data_len = pkt->pkt_len;
1989 nfp_net_tx_cksum(txq, txds, pkt);
1991 if ((pkt->ol_flags & PKT_TX_VLAN_PKT) &&
1992 (hw->cap & NFP_NET_CFG_CTRL_TXVLAN)) {
1993 txds->flags |= PCIE_DESC_TX_VLAN;
1994 txds->vlan = pkt->vlan_tci;
1997 if (pkt->ol_flags & PKT_TX_TCP_SEG)
1998 rte_panic("TSO is not supported\n");
2001 * mbuf data_len is the data in one segment and pkt_len data
2002 * in the whole packet. When the packet is just one segment,
2003 * then data_len = pkt_len
2005 pkt_size = pkt->pkt_len;
2006 pkt_len = pkt->pkt_len;
2008 /* Releasing mbuf which was prefetched above */
2010 rte_pktmbuf_free(*lmbuf);
2012 * Linking mbuf with descriptor for being released
2013 * next time descriptor is used
2018 dma_size = pkt->data_len;
2019 dma_addr = rte_mbuf_data_dma_addr(pkt);
2020 PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:"
2021 "%" PRIx64 "\n", dma_addr);
2023 /* Filling descriptors fields */
2024 txds->dma_len = dma_size;
2025 txds->data_len = pkt_len;
2026 txds->dma_addr_hi = (dma_addr >> 32) & 0xff;
2027 txds->dma_addr_lo = (dma_addr & 0xffffffff);
2028 ASSERT(free_descs > 0);
2033 if (unlikely(txq->tail == txq->tx_count)) /* wrapping?*/
2036 pkt_size -= dma_size;
2039 * Making the EOP, packets with just one segment
2042 if (likely(!pkt_size))
2043 txds->offset_eop = PCIE_DESC_TX_EOP;
2045 txds->offset_eop = 0;
2048 /* Referencing next free TX descriptor */
2049 txds = &txq->txds[txq->tail];
2056 /* Increment write pointers. Force memory write before we let HW know */
2058 nfp_qcp_ptr_add(txq->qcp_q, NFP_QCP_WRITE_PTR, issued_descs);
2064 nfp_net_vlan_offload_set(struct rte_eth_dev *dev, int mask)
2066 uint32_t new_ctrl, update;
2067 struct nfp_net_hw *hw;
2069 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2072 if ((mask & ETH_VLAN_FILTER_OFFLOAD) ||
2073 (mask & ETH_VLAN_EXTEND_OFFLOAD))
2074 RTE_LOG(INFO, PMD, "No support for ETH_VLAN_FILTER_OFFLOAD or"
2075 " ETH_VLAN_EXTEND_OFFLOAD");
2077 /* Enable vlan strip if it is not configured yet */
2078 if ((mask & ETH_VLAN_STRIP_OFFLOAD) &&
2079 !(hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2080 new_ctrl = hw->ctrl | NFP_NET_CFG_CTRL_RXVLAN;
2082 /* Disable vlan strip just if it is configured */
2083 if (!(mask & ETH_VLAN_STRIP_OFFLOAD) &&
2084 (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN))
2085 new_ctrl = hw->ctrl & ~NFP_NET_CFG_CTRL_RXVLAN;
2090 update = NFP_NET_CFG_UPDATE_GEN;
2092 if (nfp_net_reconfig(hw, new_ctrl, update) < 0)
2095 hw->ctrl = new_ctrl;
2098 /* Update Redirection Table(RETA) of Receive Side Scaling of Ethernet device */
2100 nfp_net_reta_update(struct rte_eth_dev *dev,
2101 struct rte_eth_rss_reta_entry64 *reta_conf,
2104 uint32_t reta, mask;
2108 struct nfp_net_hw *hw =
2109 NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2111 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2114 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2115 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
2116 "(%d) doesn't match the number hardware can supported "
2117 "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2122 * Update Redirection Table. There are 128 8bit-entries which can be
2123 * manage as 32 32bit-entries
2125 for (i = 0; i < reta_size; i += 4) {
2126 /* Handling 4 RSS entries per loop */
2127 idx = i / RTE_RETA_GROUP_SIZE;
2128 shift = i % RTE_RETA_GROUP_SIZE;
2129 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2135 /* If all 4 entries were set, don't need read RETA register */
2137 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + i);
2139 for (j = 0; j < 4; j++) {
2140 if (!(mask & (0x1 << j)))
2143 /* Clearing the entry bits */
2144 reta &= ~(0xFF << (8 * j));
2145 reta |= reta_conf[idx].reta[shift + j] << (8 * j);
2147 nn_cfg_writel(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) + shift,
2151 update = NFP_NET_CFG_UPDATE_RSS;
2153 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2159 /* Query Redirection Table(RETA) of Receive Side Scaling of Ethernet device. */
2161 nfp_net_reta_query(struct rte_eth_dev *dev,
2162 struct rte_eth_rss_reta_entry64 *reta_conf,
2168 struct nfp_net_hw *hw;
2170 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2172 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2175 if (reta_size != NFP_NET_CFG_RSS_ITBL_SZ) {
2176 RTE_LOG(ERR, PMD, "The size of hash lookup table configured "
2177 "(%d) doesn't match the number hardware can supported "
2178 "(%d)\n", reta_size, NFP_NET_CFG_RSS_ITBL_SZ);
2183 * Reading Redirection Table. There are 128 8bit-entries which can be
2184 * manage as 32 32bit-entries
2186 for (i = 0; i < reta_size; i += 4) {
2187 /* Handling 4 RSS entries per loop */
2188 idx = i / RTE_RETA_GROUP_SIZE;
2189 shift = i % RTE_RETA_GROUP_SIZE;
2190 mask = (uint8_t)((reta_conf[idx].mask >> shift) & 0xF);
2195 reta = nn_cfg_readl(hw, NFP_NET_CFG_RSS_ITBL + (idx * 64) +
2197 for (j = 0; j < 4; j++) {
2198 if (!(mask & (0x1 << j)))
2200 reta_conf->reta[shift + j] =
2201 (uint8_t)((reta >> (8 * j)) & 0xF);
2208 nfp_net_rss_hash_update(struct rte_eth_dev *dev,
2209 struct rte_eth_rss_conf *rss_conf)
2212 uint32_t cfg_rss_ctrl = 0;
2216 struct nfp_net_hw *hw;
2218 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2220 rss_hf = rss_conf->rss_hf;
2222 /* Checking if RSS is enabled */
2223 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS)) {
2224 if (rss_hf != 0) { /* Enable RSS? */
2225 RTE_LOG(ERR, PMD, "RSS unsupported\n");
2228 return 0; /* Nothing to do */
2231 if (rss_conf->rss_key_len > NFP_NET_CFG_RSS_KEY_SZ) {
2232 RTE_LOG(ERR, PMD, "hash key too long\n");
2236 if (rss_hf & ETH_RSS_IPV4)
2237 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
2239 if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
2240 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
2242 if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
2243 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
2245 if (rss_hf & ETH_RSS_IPV6)
2246 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
2248 if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
2249 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
2251 if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
2252 cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
2254 cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
2255 cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
2257 /* configuring where to apply the RSS hash */
2258 nn_cfg_writel(hw, NFP_NET_CFG_RSS_CTRL, cfg_rss_ctrl);
2260 /* Writing the key byte a byte */
2261 for (i = 0; i < rss_conf->rss_key_len; i++) {
2262 memcpy(&key, &rss_conf->rss_key[i], 1);
2263 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY + i, key);
2266 /* Writing the key size */
2267 nn_cfg_writeb(hw, NFP_NET_CFG_RSS_KEY_SZ, rss_conf->rss_key_len);
2269 update = NFP_NET_CFG_UPDATE_RSS;
2271 if (nfp_net_reconfig(hw, hw->ctrl, update) < 0)
2278 nfp_net_rss_hash_conf_get(struct rte_eth_dev *dev,
2279 struct rte_eth_rss_conf *rss_conf)
2282 uint32_t cfg_rss_ctrl;
2285 struct nfp_net_hw *hw;
2287 hw = NFP_NET_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2289 if (!(hw->ctrl & NFP_NET_CFG_CTRL_RSS))
2292 rss_hf = rss_conf->rss_hf;
2293 cfg_rss_ctrl = nn_cfg_readl(hw, NFP_NET_CFG_RSS_CTRL);
2295 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4)
2296 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP;
2298 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_TCP)
2299 rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
2301 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_TCP)
2302 rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP;
2304 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV4_UDP)
2305 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP;
2307 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6_UDP)
2308 rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP;
2310 if (cfg_rss_ctrl & NFP_NET_CFG_RSS_IPV6)
2311 rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP;
2313 /* Reading the key size */
2314 rss_conf->rss_key_len = nn_cfg_readl(hw, NFP_NET_CFG_RSS_KEY_SZ);
2316 /* Reading the key byte a byte */
2317 for (i = 0; i < rss_conf->rss_key_len; i++) {
2318 key = nn_cfg_readb(hw, NFP_NET_CFG_RSS_KEY + i);
2319 memcpy(&rss_conf->rss_key[i], &key, 1);
2325 /* Initialise and register driver with DPDK Application */
2326 static const struct eth_dev_ops nfp_net_eth_dev_ops = {
2327 .dev_configure = nfp_net_configure,
2328 .dev_start = nfp_net_start,
2329 .dev_stop = nfp_net_stop,
2330 .dev_close = nfp_net_close,
2331 .promiscuous_enable = nfp_net_promisc_enable,
2332 .promiscuous_disable = nfp_net_promisc_disable,
2333 .link_update = nfp_net_link_update,
2334 .stats_get = nfp_net_stats_get,
2335 .stats_reset = nfp_net_stats_reset,
2336 .dev_infos_get = nfp_net_infos_get,
2337 .dev_supported_ptypes_get = nfp_net_supported_ptypes_get,
2338 .mtu_set = nfp_net_dev_mtu_set,
2339 .vlan_offload_set = nfp_net_vlan_offload_set,
2340 .reta_update = nfp_net_reta_update,
2341 .reta_query = nfp_net_reta_query,
2342 .rss_hash_update = nfp_net_rss_hash_update,
2343 .rss_hash_conf_get = nfp_net_rss_hash_conf_get,
2344 .rx_queue_setup = nfp_net_rx_queue_setup,
2345 .rx_queue_release = nfp_net_rx_queue_release,
2346 .rx_queue_count = nfp_net_rx_queue_count,
2347 .tx_queue_setup = nfp_net_tx_queue_setup,
2348 .tx_queue_release = nfp_net_tx_queue_release,
2352 nfp_net_init(struct rte_eth_dev *eth_dev)
2354 struct rte_pci_device *pci_dev;
2355 struct nfp_net_hw *hw;
2357 uint32_t tx_bar_off, rx_bar_off;
2361 PMD_INIT_FUNC_TRACE();
2363 hw = NFP_NET_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
2365 eth_dev->dev_ops = &nfp_net_eth_dev_ops;
2366 eth_dev->rx_pkt_burst = &nfp_net_recv_pkts;
2367 eth_dev->tx_pkt_burst = &nfp_net_xmit_pkts;
2369 /* For secondary processes, the primary has done all the work */
2370 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2373 pci_dev = eth_dev->pci_dev;
2374 rte_eth_copy_pci_info(eth_dev, pci_dev);
2376 hw->device_id = pci_dev->id.device_id;
2377 hw->vendor_id = pci_dev->id.vendor_id;
2378 hw->subsystem_device_id = pci_dev->id.subsystem_device_id;
2379 hw->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id;
2381 PMD_INIT_LOG(DEBUG, "nfp_net: device (%u:%u) %u:%u:%u:%u\n",
2382 pci_dev->id.vendor_id, pci_dev->id.device_id,
2383 pci_dev->addr.domain, pci_dev->addr.bus,
2384 pci_dev->addr.devid, pci_dev->addr.function);
2386 hw->ctrl_bar = (uint8_t *)pci_dev->mem_resource[0].addr;
2387 if (hw->ctrl_bar == NULL) {
2389 "hw->ctrl_bar is NULL. BAR0 not configured\n");
2392 hw->max_rx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_RXRINGS);
2393 hw->max_tx_queues = nn_cfg_readl(hw, NFP_NET_CFG_MAX_TXRINGS);
2395 /* Work out where in the BAR the queues start. */
2396 switch (pci_dev->id.device_id) {
2397 case PCI_DEVICE_ID_NFP6000_VF_NIC:
2398 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_TXQ);
2399 tx_bar_off = NFP_PCIE_QUEUE(start_q);
2400 start_q = nn_cfg_readl(hw, NFP_NET_CFG_START_RXQ);
2401 rx_bar_off = NFP_PCIE_QUEUE(start_q);
2404 RTE_LOG(ERR, PMD, "nfp_net: no device ID matching\n");
2408 PMD_INIT_LOG(DEBUG, "tx_bar_off: 0x%08x\n", tx_bar_off);
2409 PMD_INIT_LOG(DEBUG, "rx_bar_off: 0x%08x\n", rx_bar_off);
2411 hw->tx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + tx_bar_off;
2412 hw->rx_bar = (uint8_t *)pci_dev->mem_resource[2].addr + rx_bar_off;
2414 PMD_INIT_LOG(DEBUG, "ctrl_bar: %p, tx_bar: %p, rx_bar: %p\n",
2415 hw->ctrl_bar, hw->tx_bar, hw->rx_bar);
2417 nfp_net_cfg_queue_setup(hw);
2419 /* Get some of the read-only fields from the config BAR */
2420 hw->ver = nn_cfg_readl(hw, NFP_NET_CFG_VERSION);
2421 hw->cap = nn_cfg_readl(hw, NFP_NET_CFG_CAP);
2422 hw->max_mtu = nn_cfg_readl(hw, NFP_NET_CFG_MAX_MTU);
2423 hw->mtu = ETHER_MTU;
2425 if (NFD_CFG_MAJOR_VERSION_of(hw->ver) < 2)
2426 hw->rx_offset = NFP_NET_RX_OFFSET;
2428 hw->rx_offset = nn_cfg_readl(hw, NFP_NET_CFG_RX_OFFSET_ADDR);
2430 PMD_INIT_LOG(INFO, "VER: %#x, Maximum supported MTU: %d\n",
2431 hw->ver, hw->max_mtu);
2432 PMD_INIT_LOG(INFO, "CAP: %#x, %s%s%s%s%s%s%s%s%s\n", hw->cap,
2433 hw->cap & NFP_NET_CFG_CTRL_PROMISC ? "PROMISC " : "",
2434 hw->cap & NFP_NET_CFG_CTRL_RXCSUM ? "RXCSUM " : "",
2435 hw->cap & NFP_NET_CFG_CTRL_TXCSUM ? "TXCSUM " : "",
2436 hw->cap & NFP_NET_CFG_CTRL_RXVLAN ? "RXVLAN " : "",
2437 hw->cap & NFP_NET_CFG_CTRL_TXVLAN ? "TXVLAN " : "",
2438 hw->cap & NFP_NET_CFG_CTRL_SCATTER ? "SCATTER " : "",
2439 hw->cap & NFP_NET_CFG_CTRL_GATHER ? "GATHER " : "",
2440 hw->cap & NFP_NET_CFG_CTRL_LSO ? "TSO " : "",
2441 hw->cap & NFP_NET_CFG_CTRL_RSS ? "RSS " : "");
2443 pci_dev = eth_dev->pci_dev;
2446 hw->stride_rx = stride;
2447 hw->stride_tx = stride;
2449 PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n",
2450 hw->max_rx_queues, hw->max_tx_queues);
2452 /* Initializing spinlock for reconfigs */
2453 rte_spinlock_init(&hw->reconfig_lock);
2455 /* Allocating memory for mac addr */
2456 eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0);
2457 if (eth_dev->data->mac_addrs == NULL) {
2458 PMD_INIT_LOG(ERR, "Failed to space for MAC address");
2462 nfp_net_read_mac(hw);
2464 if (!is_valid_assigned_ether_addr((struct ether_addr *)&hw->mac_addr))
2465 /* Using random mac addresses for VFs */
2466 eth_random_addr(&hw->mac_addr[0]);
2468 /* Copying mac address to DPDK eth_dev struct */
2469 ether_addr_copy((struct ether_addr *)hw->mac_addr,
2470 ð_dev->data->mac_addrs[0]);
2472 if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
2473 eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
2475 PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
2476 "mac=%02x:%02x:%02x:%02x:%02x:%02x",
2477 eth_dev->data->port_id, pci_dev->id.vendor_id,
2478 pci_dev->id.device_id,
2479 hw->mac_addr[0], hw->mac_addr[1], hw->mac_addr[2],
2480 hw->mac_addr[3], hw->mac_addr[4], hw->mac_addr[5]);
2482 /* Registering LSC interrupt handler */
2483 rte_intr_callback_register(&pci_dev->intr_handle,
2484 nfp_net_dev_interrupt_handler,
2487 /* enable uio intr after callback register */
2488 rte_intr_enable(&pci_dev->intr_handle);
2490 /* Telling the firmware about the LSC interrupt entry */
2491 nn_cfg_writeb(hw, NFP_NET_CFG_LSC, NFP_NET_IRQ_LSC_IDX);
2493 /* Recording current stats counters values */
2494 nfp_net_stats_reset(eth_dev);
2499 static struct rte_pci_id pci_id_nfp_net_map[] = {
2501 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2502 PCI_DEVICE_ID_NFP6000_PF_NIC)
2505 RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME,
2506 PCI_DEVICE_ID_NFP6000_VF_NIC)
2513 static struct eth_driver rte_nfp_net_pmd = {
2515 .id_table = pci_id_nfp_net_map,
2516 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
2517 RTE_PCI_DRV_DETACHABLE,
2518 .probe = rte_eth_dev_pci_probe,
2519 .remove = rte_eth_dev_pci_remove,
2521 .eth_dev_init = nfp_net_init,
2522 .dev_private_size = sizeof(struct nfp_net_adapter),
2525 RTE_PMD_REGISTER_PCI(net_nfp, rte_nfp_net_pmd.pci_drv);
2526 RTE_PMD_REGISTER_PCI_TABLE(net_nfp, pci_id_nfp_net_map);
2530 * c-file-style: "Linux"
2531 * indent-tabs-mode: t