4 * Copyright(c) 2013-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_ethdev.h>
35 #include <rte_ethdev_pci.h>
36 #include <rte_malloc.h>
37 #include <rte_memzone.h>
38 #include <rte_string_fns.h>
40 #include <rte_spinlock.h>
41 #include <rte_kvargs.h>
44 #include "base/fm10k_api.h"
46 /* Default delay to acquire mailbox lock */
47 #define FM10K_MBXLOCK_DELAY_US 20
48 #define UINT64_LOWER_32BITS_MASK 0x00000000ffffffffULL
50 #define MAIN_VSI_POOL_NUMBER 0
52 /* Max try times to acquire switch status */
53 #define MAX_QUERY_SWITCH_STATE_TIMES 10
54 /* Wait interval to get switch status */
55 #define WAIT_SWITCH_MSG_US 100000
56 /* A period of quiescence for switch */
57 #define FM10K_SWITCH_QUIESCE_US 100000
58 /* Number of chars per uint32 type */
59 #define CHARS_PER_UINT32 (sizeof(uint32_t))
60 #define BIT_MASK_PER_UINT32 ((1 << CHARS_PER_UINT32) - 1)
62 /* default 1:1 map from queue ID to interrupt vector ID */
63 #define Q2V(pci_dev, queue_id) ((pci_dev)->intr_handle.intr_vec[queue_id])
65 /* First 64 Logical ports for PF/VMDQ, second 64 for Flow director */
66 #define MAX_LPORT_NUM 128
67 #define GLORT_FD_Q_BASE 0x40
68 #define GLORT_PF_MASK 0xFFC0
69 #define GLORT_FD_MASK GLORT_PF_MASK
70 #define GLORT_FD_INDEX GLORT_FD_Q_BASE
72 static void fm10k_close_mbx_service(struct fm10k_hw *hw);
73 static void fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev);
74 static void fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev);
75 static void fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev);
76 static void fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev);
77 static inline int fm10k_glort_valid(struct fm10k_hw *hw);
79 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on);
80 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
81 const u8 *mac, bool add, uint32_t pool);
82 static void fm10k_tx_queue_release(void *queue);
83 static void fm10k_rx_queue_release(void *queue);
84 static void fm10k_set_rx_function(struct rte_eth_dev *dev);
85 static void fm10k_set_tx_function(struct rte_eth_dev *dev);
86 static int fm10k_check_ftag(struct rte_devargs *devargs);
87 static int fm10k_link_update(struct rte_eth_dev *dev, int wait_to_complete);
89 struct fm10k_xstats_name_off {
90 char name[RTE_ETH_XSTATS_NAME_SIZE];
94 struct fm10k_xstats_name_off fm10k_hw_stats_strings[] = {
95 {"completion_timeout_count", offsetof(struct fm10k_hw_stats, timeout)},
96 {"unsupported_requests_count", offsetof(struct fm10k_hw_stats, ur)},
97 {"completer_abort_count", offsetof(struct fm10k_hw_stats, ca)},
98 {"unsupported_message_count", offsetof(struct fm10k_hw_stats, um)},
99 {"checksum_error_count", offsetof(struct fm10k_hw_stats, xec)},
100 {"vlan_dropped", offsetof(struct fm10k_hw_stats, vlan_drop)},
101 {"loopback_dropped", offsetof(struct fm10k_hw_stats, loopback_drop)},
102 {"rx_mbuf_allocation_errors", offsetof(struct fm10k_hw_stats,
106 #define FM10K_NB_HW_XSTATS (sizeof(fm10k_hw_stats_strings) / \
107 sizeof(fm10k_hw_stats_strings[0]))
109 struct fm10k_xstats_name_off fm10k_hw_stats_rx_q_strings[] = {
110 {"packets", offsetof(struct fm10k_hw_stats_q, rx_packets)},
111 {"bytes", offsetof(struct fm10k_hw_stats_q, rx_bytes)},
112 {"dropped", offsetof(struct fm10k_hw_stats_q, rx_drops)},
115 #define FM10K_NB_RX_Q_XSTATS (sizeof(fm10k_hw_stats_rx_q_strings) / \
116 sizeof(fm10k_hw_stats_rx_q_strings[0]))
118 struct fm10k_xstats_name_off fm10k_hw_stats_tx_q_strings[] = {
119 {"packets", offsetof(struct fm10k_hw_stats_q, tx_packets)},
120 {"bytes", offsetof(struct fm10k_hw_stats_q, tx_bytes)},
123 #define FM10K_NB_TX_Q_XSTATS (sizeof(fm10k_hw_stats_tx_q_strings) / \
124 sizeof(fm10k_hw_stats_tx_q_strings[0]))
126 #define FM10K_NB_XSTATS (FM10K_NB_HW_XSTATS + FM10K_MAX_QUEUES_PF * \
127 (FM10K_NB_RX_Q_XSTATS + FM10K_NB_TX_Q_XSTATS))
129 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev);
132 fm10k_mbx_initlock(struct fm10k_hw *hw)
134 rte_spinlock_init(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
138 fm10k_mbx_lock(struct fm10k_hw *hw)
140 while (!rte_spinlock_trylock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back)))
141 rte_delay_us(FM10K_MBXLOCK_DELAY_US);
145 fm10k_mbx_unlock(struct fm10k_hw *hw)
147 rte_spinlock_unlock(FM10K_DEV_PRIVATE_TO_MBXLOCK(hw->back));
150 /* Stubs needed for linkage when vPMD is disabled */
151 int __attribute__((weak))
152 fm10k_rx_vec_condition_check(__rte_unused struct rte_eth_dev *dev)
157 uint16_t __attribute__((weak))
159 __rte_unused void *rx_queue,
160 __rte_unused struct rte_mbuf **rx_pkts,
161 __rte_unused uint16_t nb_pkts)
166 uint16_t __attribute__((weak))
167 fm10k_recv_scattered_pkts_vec(
168 __rte_unused void *rx_queue,
169 __rte_unused struct rte_mbuf **rx_pkts,
170 __rte_unused uint16_t nb_pkts)
175 int __attribute__((weak))
176 fm10k_rxq_vec_setup(__rte_unused struct fm10k_rx_queue *rxq)
182 void __attribute__((weak))
183 fm10k_rx_queue_release_mbufs_vec(
184 __rte_unused struct fm10k_rx_queue *rxq)
189 void __attribute__((weak))
190 fm10k_txq_vec_setup(__rte_unused struct fm10k_tx_queue *txq)
195 int __attribute__((weak))
196 fm10k_tx_vec_condition_check(__rte_unused struct fm10k_tx_queue *txq)
201 uint16_t __attribute__((weak))
202 fm10k_xmit_fixed_burst_vec(__rte_unused void *tx_queue,
203 __rte_unused struct rte_mbuf **tx_pkts,
204 __rte_unused uint16_t nb_pkts)
210 * reset queue to initial state, allocate software buffers used when starting
212 * return 0 on success
213 * return -ENOMEM if buffers cannot be allocated
214 * return -EINVAL if buffers do not satisfy alignment condition
217 rx_queue_reset(struct fm10k_rx_queue *q)
219 static const union fm10k_rx_desc zero = {{0} };
222 PMD_INIT_FUNC_TRACE();
224 diag = rte_mempool_get_bulk(q->mp, (void **)q->sw_ring, q->nb_desc);
228 for (i = 0; i < q->nb_desc; ++i) {
229 fm10k_pktmbuf_reset(q->sw_ring[i], q->port_id);
230 if (!fm10k_addr_alignment_valid(q->sw_ring[i])) {
231 rte_mempool_put_bulk(q->mp, (void **)q->sw_ring,
235 dma_addr = MBUF_DMA_ADDR_DEFAULT(q->sw_ring[i]);
236 q->hw_ring[i].q.pkt_addr = dma_addr;
237 q->hw_ring[i].q.hdr_addr = dma_addr;
240 /* initialize extra software ring entries. Space for these extra
241 * entries is always allocated.
243 memset(&q->fake_mbuf, 0x0, sizeof(q->fake_mbuf));
244 for (i = 0; i < q->nb_fake_desc; ++i) {
245 q->sw_ring[q->nb_desc + i] = &q->fake_mbuf;
246 q->hw_ring[q->nb_desc + i] = zero;
251 q->next_trigger = q->alloc_thresh - 1;
252 FM10K_PCI_REG_WRITE(q->tail_ptr, q->nb_desc - 1);
253 q->rxrearm_start = 0;
260 * clean queue, descriptor rings, free software buffers used when stopping
264 rx_queue_clean(struct fm10k_rx_queue *q)
266 union fm10k_rx_desc zero = {.q = {0, 0, 0, 0} };
268 PMD_INIT_FUNC_TRACE();
270 /* zero descriptor rings */
271 for (i = 0; i < q->nb_desc; ++i)
272 q->hw_ring[i] = zero;
274 /* zero faked descriptors */
275 for (i = 0; i < q->nb_fake_desc; ++i)
276 q->hw_ring[q->nb_desc + i] = zero;
278 /* vPMD driver has a different way of releasing mbufs. */
279 if (q->rx_using_sse) {
280 fm10k_rx_queue_release_mbufs_vec(q);
284 /* free software buffers */
285 for (i = 0; i < q->nb_desc; ++i) {
287 rte_pktmbuf_free_seg(q->sw_ring[i]);
288 q->sw_ring[i] = NULL;
294 * free all queue memory used when releasing the queue (i.e. configure)
297 rx_queue_free(struct fm10k_rx_queue *q)
299 PMD_INIT_FUNC_TRACE();
301 PMD_INIT_LOG(DEBUG, "Freeing rx queue %p", q);
304 rte_free(q->sw_ring);
313 * disable RX queue, wait unitl HW finished necessary flush operation
316 rx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
320 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
321 FM10K_WRITE_REG(hw, FM10K_RXQCTL(qnum),
322 reg & ~FM10K_RXQCTL_ENABLE);
324 /* Wait 100us at most */
325 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
327 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(qnum));
328 if (!(reg & FM10K_RXQCTL_ENABLE))
332 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
339 * reset queue to initial state, allocate software buffers used when starting
343 tx_queue_reset(struct fm10k_tx_queue *q)
345 PMD_INIT_FUNC_TRACE();
349 q->nb_free = q->nb_desc - 1;
350 fifo_reset(&q->rs_tracker, (q->nb_desc + 1) / q->rs_thresh);
351 FM10K_PCI_REG_WRITE(q->tail_ptr, 0);
355 * clean queue, descriptor rings, free software buffers used when stopping
359 tx_queue_clean(struct fm10k_tx_queue *q)
361 struct fm10k_tx_desc zero = {0, 0, 0, 0, 0, 0};
363 PMD_INIT_FUNC_TRACE();
365 /* zero descriptor rings */
366 for (i = 0; i < q->nb_desc; ++i)
367 q->hw_ring[i] = zero;
369 /* free software buffers */
370 for (i = 0; i < q->nb_desc; ++i) {
372 rte_pktmbuf_free_seg(q->sw_ring[i]);
373 q->sw_ring[i] = NULL;
379 * free all queue memory used when releasing the queue (i.e. configure)
382 tx_queue_free(struct fm10k_tx_queue *q)
384 PMD_INIT_FUNC_TRACE();
386 PMD_INIT_LOG(DEBUG, "Freeing tx queue %p", q);
388 if (q->rs_tracker.list) {
389 rte_free(q->rs_tracker.list);
390 q->rs_tracker.list = NULL;
393 rte_free(q->sw_ring);
402 * disable TX queue, wait unitl HW finished necessary flush operation
405 tx_queue_disable(struct fm10k_hw *hw, uint16_t qnum)
409 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
410 FM10K_WRITE_REG(hw, FM10K_TXDCTL(qnum),
411 reg & ~FM10K_TXDCTL_ENABLE);
413 /* Wait 100us at most */
414 for (i = 0; i < FM10K_QUEUE_DISABLE_TIMEOUT; i++) {
416 reg = FM10K_READ_REG(hw, FM10K_TXDCTL(qnum));
417 if (!(reg & FM10K_TXDCTL_ENABLE))
421 if (i == FM10K_QUEUE_DISABLE_TIMEOUT)
428 fm10k_check_mq_mode(struct rte_eth_dev *dev)
430 enum rte_eth_rx_mq_mode rx_mq_mode = dev->data->dev_conf.rxmode.mq_mode;
431 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
432 struct rte_eth_vmdq_rx_conf *vmdq_conf;
433 uint16_t nb_rx_q = dev->data->nb_rx_queues;
435 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
437 if (rx_mq_mode & ETH_MQ_RX_DCB_FLAG) {
438 PMD_INIT_LOG(ERR, "DCB mode is not supported.");
442 if (!(rx_mq_mode & ETH_MQ_RX_VMDQ_FLAG))
445 if (hw->mac.type == fm10k_mac_vf) {
446 PMD_INIT_LOG(ERR, "VMDQ mode is not supported in VF.");
450 /* Check VMDQ queue pool number */
451 if (vmdq_conf->nb_queue_pools >
452 sizeof(vmdq_conf->pool_map[0].pools) * CHAR_BIT ||
453 vmdq_conf->nb_queue_pools > nb_rx_q) {
454 PMD_INIT_LOG(ERR, "Too many of queue pools: %d",
455 vmdq_conf->nb_queue_pools);
462 static const struct fm10k_txq_ops def_txq_ops = {
463 .reset = tx_queue_reset,
467 fm10k_dev_configure(struct rte_eth_dev *dev)
471 PMD_INIT_FUNC_TRACE();
473 if (dev->data->dev_conf.rxmode.hw_strip_crc == 0)
474 PMD_INIT_LOG(WARNING, "fm10k always strip CRC");
475 /* multipe queue mode checking */
476 ret = fm10k_check_mq_mode(dev);
478 PMD_DRV_LOG(ERR, "fm10k_check_mq_mode fails with %d.",
487 fm10k_dev_vmdq_rx_configure(struct rte_eth_dev *dev)
489 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
490 struct rte_eth_vmdq_rx_conf *vmdq_conf;
493 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
495 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
496 if (!vmdq_conf->pool_map[i].pools)
499 fm10k_update_vlan(hw, vmdq_conf->pool_map[i].vlan_id, 0, true);
500 fm10k_mbx_unlock(hw);
505 fm10k_dev_pf_main_vsi_reset(struct rte_eth_dev *dev)
507 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
509 /* Add default mac address */
510 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
511 MAIN_VSI_POOL_NUMBER);
515 fm10k_dev_rss_configure(struct rte_eth_dev *dev)
517 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
518 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
519 uint32_t mrqc, *key, i, reta, j;
522 #define RSS_KEY_SIZE 40
523 static uint8_t rss_intel_key[RSS_KEY_SIZE] = {
524 0x6D, 0x5A, 0x56, 0xDA, 0x25, 0x5B, 0x0E, 0xC2,
525 0x41, 0x67, 0x25, 0x3D, 0x43, 0xA3, 0x8F, 0xB0,
526 0xD0, 0xCA, 0x2B, 0xCB, 0xAE, 0x7B, 0x30, 0xB4,
527 0x77, 0xCB, 0x2D, 0xA3, 0x80, 0x30, 0xF2, 0x0C,
528 0x6A, 0x42, 0xB7, 0x3B, 0xBE, 0xAC, 0x01, 0xFA,
531 if (dev->data->nb_rx_queues == 1 ||
532 dev_conf->rxmode.mq_mode != ETH_MQ_RX_RSS ||
533 dev_conf->rx_adv_conf.rss_conf.rss_hf == 0) {
534 FM10K_WRITE_REG(hw, FM10K_MRQC(0), 0);
538 /* random key is rss_intel_key (default) or user provided (rss_key) */
539 if (dev_conf->rx_adv_conf.rss_conf.rss_key == NULL)
540 key = (uint32_t *)rss_intel_key;
542 key = (uint32_t *)dev_conf->rx_adv_conf.rss_conf.rss_key;
544 /* Now fill our hash function seeds, 4 bytes at a time */
545 for (i = 0; i < RSS_KEY_SIZE / sizeof(*key); ++i)
546 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
549 * Fill in redirection table
550 * The byte-swap is needed because NIC registers are in
551 * little-endian order.
554 for (i = 0, j = 0; i < FM10K_MAX_RSS_INDICES; i++, j++) {
555 if (j == dev->data->nb_rx_queues)
557 reta = (reta << CHAR_BIT) | j;
559 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2),
564 * Generate RSS hash based on packet types, TCP/UDP
565 * port numbers and/or IPv4/v6 src and dst addresses
567 hf = dev_conf->rx_adv_conf.rss_conf.rss_hf;
569 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
570 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
571 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
572 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
573 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
574 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
575 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
576 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
577 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
580 PMD_INIT_LOG(ERR, "Specified RSS mode 0x%"PRIx64"is not"
585 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
589 fm10k_dev_logic_port_update(struct rte_eth_dev *dev, uint16_t nb_lport_new)
591 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
594 for (i = 0; i < nb_lport_new; i++) {
595 /* Set unicast mode by default. App can change
596 * to other mode in other API func.
599 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map + i,
600 FM10K_XCAST_MODE_NONE);
601 fm10k_mbx_unlock(hw);
606 fm10k_dev_mq_rx_configure(struct rte_eth_dev *dev)
608 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
609 struct rte_eth_vmdq_rx_conf *vmdq_conf;
610 struct rte_eth_conf *dev_conf = &dev->data->dev_conf;
611 struct fm10k_macvlan_filter_info *macvlan;
612 uint16_t nb_queue_pools = 0; /* pool number in configuration */
613 uint16_t nb_lport_new;
615 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
616 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
618 fm10k_dev_rss_configure(dev);
620 /* only PF supports VMDQ */
621 if (hw->mac.type != fm10k_mac_pf)
624 if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG)
625 nb_queue_pools = vmdq_conf->nb_queue_pools;
627 /* no pool number change, no need to update logic port and VLAN/MAC */
628 if (macvlan->nb_queue_pools == nb_queue_pools)
631 nb_lport_new = nb_queue_pools ? nb_queue_pools : 1;
632 fm10k_dev_logic_port_update(dev, nb_lport_new);
634 /* reset MAC/VLAN as it's based on VMDQ or PF main VSI */
635 memset(dev->data->mac_addrs, 0,
636 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM);
637 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
638 &dev->data->mac_addrs[0]);
639 memset(macvlan, 0, sizeof(*macvlan));
640 macvlan->nb_queue_pools = nb_queue_pools;
643 fm10k_dev_vmdq_rx_configure(dev);
645 fm10k_dev_pf_main_vsi_reset(dev);
649 fm10k_dev_tx_init(struct rte_eth_dev *dev)
651 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
653 struct fm10k_tx_queue *txq;
657 /* Disable TXINT to avoid possible interrupt */
658 for (i = 0; i < hw->mac.max_queues; i++)
659 FM10K_WRITE_REG(hw, FM10K_TXINT(i),
660 3 << FM10K_TXINT_TIMER_SHIFT);
663 for (i = 0; i < dev->data->nb_tx_queues; ++i) {
664 txq = dev->data->tx_queues[i];
665 base_addr = txq->hw_ring_phys_addr;
666 size = txq->nb_desc * sizeof(struct fm10k_tx_desc);
668 /* disable queue to avoid issues while updating state */
669 ret = tx_queue_disable(hw, i);
671 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
674 /* Enable use of FTAG bit in TX descriptor, PFVTCTL
675 * register is read-only for VF.
677 if (fm10k_check_ftag(dev->device->devargs)) {
678 if (hw->mac.type == fm10k_mac_pf) {
679 FM10K_WRITE_REG(hw, FM10K_PFVTCTL(i),
680 FM10K_PFVTCTL_FTAG_DESC_ENABLE);
681 PMD_INIT_LOG(DEBUG, "FTAG mode is enabled");
683 PMD_INIT_LOG(ERR, "VF FTAG is not supported.");
688 /* set location and size for descriptor ring */
689 FM10K_WRITE_REG(hw, FM10K_TDBAL(i),
690 base_addr & UINT64_LOWER_32BITS_MASK);
691 FM10K_WRITE_REG(hw, FM10K_TDBAH(i),
692 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
693 FM10K_WRITE_REG(hw, FM10K_TDLEN(i), size);
695 /* assign default SGLORT for each TX queue by PF */
696 if (hw->mac.type == fm10k_mac_pf)
697 FM10K_WRITE_REG(hw, FM10K_TX_SGLORT(i), hw->mac.dglort_map);
700 /* set up vector or scalar TX function as appropriate */
701 fm10k_set_tx_function(dev);
707 fm10k_dev_rx_init(struct rte_eth_dev *dev)
709 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
710 struct fm10k_macvlan_filter_info *macvlan;
711 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
712 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
714 struct fm10k_rx_queue *rxq;
717 uint32_t rxdctl = FM10K_RXDCTL_WRITE_BACK_MIN_DELAY;
718 uint32_t logic_port = hw->mac.dglort_map;
720 uint16_t queue_stride = 0;
722 /* enable RXINT for interrupt mode */
724 if (rte_intr_dp_is_en(intr_handle)) {
725 for (; i < dev->data->nb_rx_queues; i++) {
726 FM10K_WRITE_REG(hw, FM10K_RXINT(i), Q2V(pdev, i));
727 if (hw->mac.type == fm10k_mac_pf)
728 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
730 FM10K_ITR_MASK_CLEAR);
732 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
734 FM10K_ITR_MASK_CLEAR);
737 /* Disable other RXINT to avoid possible interrupt */
738 for (; i < hw->mac.max_queues; i++)
739 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
740 3 << FM10K_RXINT_TIMER_SHIFT);
742 /* Setup RX queues */
743 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
744 rxq = dev->data->rx_queues[i];
745 base_addr = rxq->hw_ring_phys_addr;
746 size = rxq->nb_desc * sizeof(union fm10k_rx_desc);
748 /* disable queue to avoid issues while updating state */
749 ret = rx_queue_disable(hw, i);
751 PMD_INIT_LOG(ERR, "failed to disable queue %d", i);
755 /* Setup the Base and Length of the Rx Descriptor Ring */
756 FM10K_WRITE_REG(hw, FM10K_RDBAL(i),
757 base_addr & UINT64_LOWER_32BITS_MASK);
758 FM10K_WRITE_REG(hw, FM10K_RDBAH(i),
759 base_addr >> (CHAR_BIT * sizeof(uint32_t)));
760 FM10K_WRITE_REG(hw, FM10K_RDLEN(i), size);
762 /* Configure the Rx buffer size for one buff without split */
763 buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) -
764 RTE_PKTMBUF_HEADROOM);
765 /* As RX buffer is aligned to 512B within mbuf, some bytes are
766 * reserved for this purpose, and the worst case could be 511B.
767 * But SRR reg assumes all buffers have the same size. In order
768 * to fill the gap, we'll have to consider the worst case and
769 * assume 512B is reserved. If we don't do so, it's possible
770 * for HW to overwrite data to next mbuf.
772 buf_size -= FM10K_RX_DATABUF_ALIGN;
774 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i),
775 (buf_size >> FM10K_SRRCTL_BSIZEPKT_SHIFT) |
776 FM10K_SRRCTL_LOOPBACK_SUPPRESS);
778 /* It adds dual VLAN length for supporting dual VLAN */
779 if ((dev->data->dev_conf.rxmode.max_rx_pkt_len +
780 2 * FM10K_VLAN_TAG_SIZE) > buf_size ||
781 dev->data->dev_conf.rxmode.enable_scatter) {
783 dev->data->scattered_rx = 1;
784 reg = FM10K_READ_REG(hw, FM10K_SRRCTL(i));
785 reg |= FM10K_SRRCTL_BUFFER_CHAINING_EN;
786 FM10K_WRITE_REG(hw, FM10K_SRRCTL(i), reg);
789 /* Enable drop on empty, it's RO for VF */
790 if (hw->mac.type == fm10k_mac_pf && rxq->drop_en)
791 rxdctl |= FM10K_RXDCTL_DROP_ON_EMPTY;
793 FM10K_WRITE_REG(hw, FM10K_RXDCTL(i), rxdctl);
794 FM10K_WRITE_FLUSH(hw);
797 /* Configure VMDQ/RSS if applicable */
798 fm10k_dev_mq_rx_configure(dev);
800 /* Decide the best RX function */
801 fm10k_set_rx_function(dev);
803 /* update RX_SGLORT for loopback suppress*/
804 if (hw->mac.type != fm10k_mac_pf)
806 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
807 if (macvlan->nb_queue_pools)
808 queue_stride = dev->data->nb_rx_queues / macvlan->nb_queue_pools;
809 for (i = 0; i < dev->data->nb_rx_queues; ++i) {
810 if (i && queue_stride && !(i % queue_stride))
812 FM10K_WRITE_REG(hw, FM10K_RX_SGLORT(i), logic_port);
819 fm10k_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
821 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
824 struct fm10k_rx_queue *rxq;
826 PMD_INIT_FUNC_TRACE();
828 if (rx_queue_id < dev->data->nb_rx_queues) {
829 rxq = dev->data->rx_queues[rx_queue_id];
830 err = rx_queue_reset(rxq);
831 if (err == -ENOMEM) {
832 PMD_INIT_LOG(ERR, "Failed to alloc memory : %d", err);
834 } else if (err == -EINVAL) {
835 PMD_INIT_LOG(ERR, "Invalid buffer address alignment :"
840 /* Setup the HW Rx Head and Tail Descriptor Pointers
841 * Note: this must be done AFTER the queue is enabled on real
842 * hardware, but BEFORE the queue is enabled when using the
843 * emulation platform. Do it in both places for now and remove
844 * this comment and the following two register writes when the
845 * emulation platform is no longer being used.
847 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
848 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
850 /* Set PF ownership flag for PF devices */
851 reg = FM10K_READ_REG(hw, FM10K_RXQCTL(rx_queue_id));
852 if (hw->mac.type == fm10k_mac_pf)
853 reg |= FM10K_RXQCTL_PF;
854 reg |= FM10K_RXQCTL_ENABLE;
855 /* enable RX queue */
856 FM10K_WRITE_REG(hw, FM10K_RXQCTL(rx_queue_id), reg);
857 FM10K_WRITE_FLUSH(hw);
859 /* Setup the HW Rx Head and Tail Descriptor Pointers
860 * Note: this must be done AFTER the queue is enabled
862 FM10K_WRITE_REG(hw, FM10K_RDH(rx_queue_id), 0);
863 FM10K_WRITE_REG(hw, FM10K_RDT(rx_queue_id), rxq->nb_desc - 1);
864 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
871 fm10k_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
873 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
875 PMD_INIT_FUNC_TRACE();
877 if (rx_queue_id < dev->data->nb_rx_queues) {
878 /* Disable RX queue */
879 rx_queue_disable(hw, rx_queue_id);
881 /* Free mbuf and clean HW ring */
882 rx_queue_clean(dev->data->rx_queues[rx_queue_id]);
883 dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
890 fm10k_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
892 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
893 /** @todo - this should be defined in the shared code */
894 #define FM10K_TXDCTL_WRITE_BACK_MIN_DELAY 0x00010000
895 uint32_t txdctl = FM10K_TXDCTL_WRITE_BACK_MIN_DELAY;
898 PMD_INIT_FUNC_TRACE();
900 if (tx_queue_id < dev->data->nb_tx_queues) {
901 struct fm10k_tx_queue *q = dev->data->tx_queues[tx_queue_id];
905 /* reset head and tail pointers */
906 FM10K_WRITE_REG(hw, FM10K_TDH(tx_queue_id), 0);
907 FM10K_WRITE_REG(hw, FM10K_TDT(tx_queue_id), 0);
909 /* enable TX queue */
910 FM10K_WRITE_REG(hw, FM10K_TXDCTL(tx_queue_id),
911 FM10K_TXDCTL_ENABLE | txdctl);
912 FM10K_WRITE_FLUSH(hw);
913 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
921 fm10k_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
923 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
925 PMD_INIT_FUNC_TRACE();
927 if (tx_queue_id < dev->data->nb_tx_queues) {
928 tx_queue_disable(hw, tx_queue_id);
929 tx_queue_clean(dev->data->tx_queues[tx_queue_id]);
930 dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
936 static inline int fm10k_glort_valid(struct fm10k_hw *hw)
938 return ((hw->mac.dglort_map & FM10K_DGLORTMAP_NONE)
939 != FM10K_DGLORTMAP_NONE);
943 fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev)
945 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
948 PMD_INIT_FUNC_TRACE();
950 /* Return if it didn't acquire valid glort range */
951 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
955 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
956 FM10K_XCAST_MODE_PROMISC);
957 fm10k_mbx_unlock(hw);
959 if (status != FM10K_SUCCESS)
960 PMD_INIT_LOG(ERR, "Failed to enable promiscuous mode");
964 fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev)
966 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
970 PMD_INIT_FUNC_TRACE();
972 /* Return if it didn't acquire valid glort range */
973 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
976 if (dev->data->all_multicast == 1)
977 mode = FM10K_XCAST_MODE_ALLMULTI;
979 mode = FM10K_XCAST_MODE_NONE;
982 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
984 fm10k_mbx_unlock(hw);
986 if (status != FM10K_SUCCESS)
987 PMD_INIT_LOG(ERR, "Failed to disable promiscuous mode");
991 fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev)
993 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
996 PMD_INIT_FUNC_TRACE();
998 /* Return if it didn't acquire valid glort range */
999 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1002 /* If promiscuous mode is enabled, it doesn't make sense to enable
1003 * allmulticast and disable promiscuous since fm10k only can select
1006 if (dev->data->promiscuous) {
1007 PMD_INIT_LOG(INFO, "Promiscuous mode is enabled, "\
1008 "needn't enable allmulticast");
1013 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1014 FM10K_XCAST_MODE_ALLMULTI);
1015 fm10k_mbx_unlock(hw);
1017 if (status != FM10K_SUCCESS)
1018 PMD_INIT_LOG(ERR, "Failed to enable allmulticast mode");
1022 fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev)
1024 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1027 PMD_INIT_FUNC_TRACE();
1029 /* Return if it didn't acquire valid glort range */
1030 if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw))
1033 if (dev->data->promiscuous) {
1034 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode "\
1035 "since promisc mode is enabled");
1040 /* Change mode to unicast mode */
1041 status = hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
1042 FM10K_XCAST_MODE_NONE);
1043 fm10k_mbx_unlock(hw);
1045 if (status != FM10K_SUCCESS)
1046 PMD_INIT_LOG(ERR, "Failed to disable allmulticast mode");
1050 fm10k_dev_dglort_map_configure(struct rte_eth_dev *dev)
1052 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1053 uint32_t dglortdec, pool_len, rss_len, i, dglortmask;
1054 uint16_t nb_queue_pools;
1055 struct fm10k_macvlan_filter_info *macvlan;
1057 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1058 nb_queue_pools = macvlan->nb_queue_pools;
1059 pool_len = nb_queue_pools ? rte_fls_u32(nb_queue_pools - 1) : 0;
1060 rss_len = rte_fls_u32(dev->data->nb_rx_queues - 1) - pool_len;
1062 /* GLORT 0x0-0x3F are used by PF and VMDQ, 0x40-0x7F used by FD */
1063 dglortdec = (rss_len << FM10K_DGLORTDEC_RSSLENGTH_SHIFT) | pool_len;
1064 dglortmask = (GLORT_PF_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1066 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(0), dglortmask);
1067 /* Configure VMDQ/RSS DGlort Decoder */
1068 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(0), dglortdec);
1070 /* Flow Director configurations, only queue number is valid. */
1071 dglortdec = rte_fls_u32(dev->data->nb_rx_queues - 1);
1072 dglortmask = (GLORT_FD_MASK << FM10K_DGLORTMAP_MASK_SHIFT) |
1073 (hw->mac.dglort_map + GLORT_FD_Q_BASE);
1074 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(1), dglortmask);
1075 FM10K_WRITE_REG(hw, FM10K_DGLORTDEC(1), dglortdec);
1077 /* Invalidate all other GLORT entries */
1078 for (i = 2; i < FM10K_DGLORT_COUNT; i++)
1079 FM10K_WRITE_REG(hw, FM10K_DGLORTMAP(i),
1080 FM10K_DGLORTMAP_NONE);
1083 #define BSIZEPKT_ROUNDUP ((1 << FM10K_SRRCTL_BSIZEPKT_SHIFT) - 1)
1085 fm10k_dev_start(struct rte_eth_dev *dev)
1087 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1090 PMD_INIT_FUNC_TRACE();
1092 /* stop, init, then start the hw */
1093 diag = fm10k_stop_hw(hw);
1094 if (diag != FM10K_SUCCESS) {
1095 PMD_INIT_LOG(ERR, "Hardware stop failed: %d", diag);
1099 diag = fm10k_init_hw(hw);
1100 if (diag != FM10K_SUCCESS) {
1101 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
1105 diag = fm10k_start_hw(hw);
1106 if (diag != FM10K_SUCCESS) {
1107 PMD_INIT_LOG(ERR, "Hardware start failed: %d", diag);
1111 diag = fm10k_dev_tx_init(dev);
1113 PMD_INIT_LOG(ERR, "TX init failed: %d", diag);
1117 if (fm10k_dev_rxq_interrupt_setup(dev))
1120 diag = fm10k_dev_rx_init(dev);
1122 PMD_INIT_LOG(ERR, "RX init failed: %d", diag);
1126 if (hw->mac.type == fm10k_mac_pf)
1127 fm10k_dev_dglort_map_configure(dev);
1129 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1130 struct fm10k_rx_queue *rxq;
1131 rxq = dev->data->rx_queues[i];
1133 if (rxq->rx_deferred_start)
1135 diag = fm10k_dev_rx_queue_start(dev, i);
1138 for (j = 0; j < i; ++j)
1139 rx_queue_clean(dev->data->rx_queues[j]);
1144 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1145 struct fm10k_tx_queue *txq;
1146 txq = dev->data->tx_queues[i];
1148 if (txq->tx_deferred_start)
1150 diag = fm10k_dev_tx_queue_start(dev, i);
1153 for (j = 0; j < i; ++j)
1154 tx_queue_clean(dev->data->tx_queues[j]);
1155 for (j = 0; j < dev->data->nb_rx_queues; ++j)
1156 rx_queue_clean(dev->data->rx_queues[j]);
1161 /* Update default vlan when not in VMDQ mode */
1162 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG))
1163 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
1165 fm10k_link_update(dev, 0);
1171 fm10k_dev_stop(struct rte_eth_dev *dev)
1173 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1174 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1175 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
1178 PMD_INIT_FUNC_TRACE();
1180 if (dev->data->tx_queues)
1181 for (i = 0; i < dev->data->nb_tx_queues; i++)
1182 fm10k_dev_tx_queue_stop(dev, i);
1184 if (dev->data->rx_queues)
1185 for (i = 0; i < dev->data->nb_rx_queues; i++)
1186 fm10k_dev_rx_queue_stop(dev, i);
1188 /* Disable datapath event */
1189 if (rte_intr_dp_is_en(intr_handle)) {
1190 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1191 FM10K_WRITE_REG(hw, FM10K_RXINT(i),
1192 3 << FM10K_RXINT_TIMER_SHIFT);
1193 if (hw->mac.type == fm10k_mac_pf)
1194 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, i)),
1195 FM10K_ITR_MASK_SET);
1197 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, i)),
1198 FM10K_ITR_MASK_SET);
1201 /* Clean datapath event and queue/vec mapping */
1202 rte_intr_efd_disable(intr_handle);
1203 rte_free(intr_handle->intr_vec);
1204 intr_handle->intr_vec = NULL;
1208 fm10k_dev_queue_release(struct rte_eth_dev *dev)
1212 PMD_INIT_FUNC_TRACE();
1214 if (dev->data->tx_queues) {
1215 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1216 struct fm10k_tx_queue *txq = dev->data->tx_queues[i];
1222 if (dev->data->rx_queues) {
1223 for (i = 0; i < dev->data->nb_rx_queues; i++)
1224 fm10k_rx_queue_release(dev->data->rx_queues[i]);
1229 fm10k_dev_close(struct rte_eth_dev *dev)
1231 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1233 PMD_INIT_FUNC_TRACE();
1236 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
1237 MAX_LPORT_NUM, false);
1238 fm10k_mbx_unlock(hw);
1240 /* allow 100ms for device to quiesce */
1241 rte_delay_us(FM10K_SWITCH_QUIESCE_US);
1243 /* Stop mailbox service first */
1244 fm10k_close_mbx_service(hw);
1245 fm10k_dev_stop(dev);
1246 fm10k_dev_queue_release(dev);
1251 fm10k_link_update(struct rte_eth_dev *dev,
1252 __rte_unused int wait_to_complete)
1254 struct fm10k_dev_info *dev_info =
1255 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1256 PMD_INIT_FUNC_TRACE();
1258 /* The speed is ~50Gbps per Gen3 x8 PCIe interface. For now, we
1259 * leave the speed undefined since there is no 50Gbps Ethernet.
1261 dev->data->dev_link.link_speed = 0;
1262 dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
1263 dev->data->dev_link.link_status =
1264 dev_info->sm_down ? ETH_LINK_DOWN : ETH_LINK_UP;
1269 static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
1270 struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
1275 if (xstats_names != NULL) {
1276 /* Note: limit checked in rte_eth_xstats_names() */
1279 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1280 snprintf(xstats_names[count].name,
1281 sizeof(xstats_names[count].name),
1282 "%s", fm10k_hw_stats_strings[count].name);
1286 /* PF queue stats */
1287 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1288 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1289 snprintf(xstats_names[count].name,
1290 sizeof(xstats_names[count].name),
1292 fm10k_hw_stats_rx_q_strings[i].name);
1295 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1296 snprintf(xstats_names[count].name,
1297 sizeof(xstats_names[count].name),
1299 fm10k_hw_stats_tx_q_strings[i].name);
1304 return FM10K_NB_XSTATS;
1308 fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1311 struct fm10k_hw_stats *hw_stats =
1312 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1313 unsigned i, q, count = 0;
1315 if (n < FM10K_NB_XSTATS)
1316 return FM10K_NB_XSTATS;
1319 for (i = 0; i < FM10K_NB_HW_XSTATS; i++) {
1320 xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
1321 fm10k_hw_stats_strings[count].offset);
1322 xstats[count].id = count;
1326 /* PF queue stats */
1327 for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) {
1328 for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) {
1329 xstats[count].value =
1330 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1331 fm10k_hw_stats_rx_q_strings[i].offset);
1332 xstats[count].id = count;
1335 for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) {
1336 xstats[count].value =
1337 *(uint64_t *)(((char *)&hw_stats->q[q]) +
1338 fm10k_hw_stats_tx_q_strings[i].offset);
1339 xstats[count].id = count;
1344 return FM10K_NB_XSTATS;
1348 fm10k_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1350 uint64_t ipackets, opackets, ibytes, obytes;
1351 struct fm10k_hw *hw =
1352 FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1353 struct fm10k_hw_stats *hw_stats =
1354 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1357 PMD_INIT_FUNC_TRACE();
1359 fm10k_update_hw_stats(hw, hw_stats);
1361 ipackets = opackets = ibytes = obytes = 0;
1362 for (i = 0; (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) &&
1363 (i < hw->mac.max_queues); ++i) {
1364 stats->q_ipackets[i] = hw_stats->q[i].rx_packets.count;
1365 stats->q_opackets[i] = hw_stats->q[i].tx_packets.count;
1366 stats->q_ibytes[i] = hw_stats->q[i].rx_bytes.count;
1367 stats->q_obytes[i] = hw_stats->q[i].tx_bytes.count;
1368 ipackets += stats->q_ipackets[i];
1369 opackets += stats->q_opackets[i];
1370 ibytes += stats->q_ibytes[i];
1371 obytes += stats->q_obytes[i];
1373 stats->ipackets = ipackets;
1374 stats->opackets = opackets;
1375 stats->ibytes = ibytes;
1376 stats->obytes = obytes;
1381 fm10k_stats_reset(struct rte_eth_dev *dev)
1383 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1384 struct fm10k_hw_stats *hw_stats =
1385 FM10K_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
1387 PMD_INIT_FUNC_TRACE();
1389 memset(hw_stats, 0, sizeof(*hw_stats));
1390 fm10k_rebind_hw_stats(hw, hw_stats);
1394 fm10k_dev_infos_get(struct rte_eth_dev *dev,
1395 struct rte_eth_dev_info *dev_info)
1397 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1398 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
1400 PMD_INIT_FUNC_TRACE();
1402 dev_info->pci_dev = pdev;
1403 dev_info->min_rx_bufsize = FM10K_MIN_RX_BUF_SIZE;
1404 dev_info->max_rx_pktlen = FM10K_MAX_PKT_SIZE;
1405 dev_info->max_rx_queues = hw->mac.max_queues;
1406 dev_info->max_tx_queues = hw->mac.max_queues;
1407 dev_info->max_mac_addrs = FM10K_MAX_MACADDR_NUM;
1408 dev_info->max_hash_mac_addrs = 0;
1409 dev_info->max_vfs = pdev->max_vfs;
1410 dev_info->vmdq_pool_base = 0;
1411 dev_info->vmdq_queue_base = 0;
1412 dev_info->max_vmdq_pools = ETH_32_POOLS;
1413 dev_info->vmdq_queue_num = FM10K_MAX_QUEUES_PF;
1414 dev_info->rx_offload_capa =
1415 DEV_RX_OFFLOAD_VLAN_STRIP |
1416 DEV_RX_OFFLOAD_IPV4_CKSUM |
1417 DEV_RX_OFFLOAD_UDP_CKSUM |
1418 DEV_RX_OFFLOAD_TCP_CKSUM;
1419 dev_info->tx_offload_capa =
1420 DEV_TX_OFFLOAD_VLAN_INSERT |
1421 DEV_TX_OFFLOAD_IPV4_CKSUM |
1422 DEV_TX_OFFLOAD_UDP_CKSUM |
1423 DEV_TX_OFFLOAD_TCP_CKSUM |
1424 DEV_TX_OFFLOAD_TCP_TSO;
1426 dev_info->hash_key_size = FM10K_RSSRK_SIZE * sizeof(uint32_t);
1427 dev_info->reta_size = FM10K_MAX_RSS_INDICES;
1429 dev_info->default_rxconf = (struct rte_eth_rxconf) {
1431 .pthresh = FM10K_DEFAULT_RX_PTHRESH,
1432 .hthresh = FM10K_DEFAULT_RX_HTHRESH,
1433 .wthresh = FM10K_DEFAULT_RX_WTHRESH,
1435 .rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(0),
1439 dev_info->default_txconf = (struct rte_eth_txconf) {
1441 .pthresh = FM10K_DEFAULT_TX_PTHRESH,
1442 .hthresh = FM10K_DEFAULT_TX_HTHRESH,
1443 .wthresh = FM10K_DEFAULT_TX_WTHRESH,
1445 .tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(0),
1446 .tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(0),
1447 .txq_flags = FM10K_SIMPLE_TX_FLAG,
1450 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1451 .nb_max = FM10K_MAX_RX_DESC,
1452 .nb_min = FM10K_MIN_RX_DESC,
1453 .nb_align = FM10K_MULT_RX_DESC,
1456 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1457 .nb_max = FM10K_MAX_TX_DESC,
1458 .nb_min = FM10K_MIN_TX_DESC,
1459 .nb_align = FM10K_MULT_TX_DESC,
1460 .nb_seg_max = FM10K_TX_MAX_SEG,
1461 .nb_mtu_seg_max = FM10K_TX_MAX_MTU_SEG,
1464 dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G |
1465 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_25G |
1466 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_100G;
1469 #ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE
1470 static const uint32_t *
1471 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1473 if (dev->rx_pkt_burst == fm10k_recv_pkts ||
1474 dev->rx_pkt_burst == fm10k_recv_scattered_pkts) {
1475 static uint32_t ptypes[] = {
1476 /* refers to rx_desc_to_ol_flags() */
1479 RTE_PTYPE_L3_IPV4_EXT,
1481 RTE_PTYPE_L3_IPV6_EXT,
1488 } else if (dev->rx_pkt_burst == fm10k_recv_pkts_vec ||
1489 dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec) {
1490 static uint32_t ptypes_vec[] = {
1491 /* refers to fm10k_desc_to_pktype_v() */
1493 RTE_PTYPE_L3_IPV4_EXT,
1495 RTE_PTYPE_L3_IPV6_EXT,
1498 RTE_PTYPE_TUNNEL_GENEVE,
1499 RTE_PTYPE_TUNNEL_NVGRE,
1500 RTE_PTYPE_TUNNEL_VXLAN,
1501 RTE_PTYPE_TUNNEL_GRE,
1511 static const uint32_t *
1512 fm10k_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused)
1519 fm10k_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
1522 uint16_t mac_num = 0;
1523 uint32_t vid_idx, vid_bit, mac_index;
1524 struct fm10k_hw *hw;
1525 struct fm10k_macvlan_filter_info *macvlan;
1526 struct rte_eth_dev_data *data = dev->data;
1528 hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1529 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1531 if (macvlan->nb_queue_pools > 0) { /* VMDQ mode */
1532 PMD_INIT_LOG(ERR, "Cannot change VLAN filter in VMDQ mode");
1536 if (vlan_id > ETH_VLAN_ID_MAX) {
1537 PMD_INIT_LOG(ERR, "Invalid vlan_id: must be < 4096");
1541 vid_idx = FM10K_VFTA_IDX(vlan_id);
1542 vid_bit = FM10K_VFTA_BIT(vlan_id);
1543 /* this VLAN ID is already in the VLAN filter table, return SUCCESS */
1544 if (on && (macvlan->vfta[vid_idx] & vid_bit))
1546 /* this VLAN ID is NOT in the VLAN filter table, cannot remove */
1547 if (!on && !(macvlan->vfta[vid_idx] & vid_bit)) {
1548 PMD_INIT_LOG(ERR, "Invalid vlan_id: not existing "
1549 "in the VLAN filter table");
1554 result = fm10k_update_vlan(hw, vlan_id, 0, on);
1555 fm10k_mbx_unlock(hw);
1556 if (result != FM10K_SUCCESS) {
1557 PMD_INIT_LOG(ERR, "VLAN update failed: %d", result);
1561 for (mac_index = 0; (mac_index < FM10K_MAX_MACADDR_NUM) &&
1562 (result == FM10K_SUCCESS); mac_index++) {
1563 if (is_zero_ether_addr(&data->mac_addrs[mac_index]))
1565 if (mac_num > macvlan->mac_num - 1) {
1566 PMD_INIT_LOG(ERR, "MAC address number "
1571 result = fm10k_update_uc_addr(hw, hw->mac.dglort_map,
1572 data->mac_addrs[mac_index].addr_bytes,
1574 fm10k_mbx_unlock(hw);
1577 if (result != FM10K_SUCCESS) {
1578 PMD_INIT_LOG(ERR, "MAC address update failed: %d", result);
1583 macvlan->vlan_num++;
1584 macvlan->vfta[vid_idx] |= vid_bit;
1586 macvlan->vlan_num--;
1587 macvlan->vfta[vid_idx] &= ~vid_bit;
1593 fm10k_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1595 if (mask & ETH_VLAN_STRIP_MASK) {
1596 if (!dev->data->dev_conf.rxmode.hw_vlan_strip)
1597 PMD_INIT_LOG(ERR, "VLAN stripping is "
1598 "always on in fm10k");
1601 if (mask & ETH_VLAN_EXTEND_MASK) {
1602 if (dev->data->dev_conf.rxmode.hw_vlan_extend)
1603 PMD_INIT_LOG(ERR, "VLAN QinQ is not "
1604 "supported in fm10k");
1607 if (mask & ETH_VLAN_FILTER_MASK) {
1608 if (!dev->data->dev_conf.rxmode.hw_vlan_filter)
1609 PMD_INIT_LOG(ERR, "VLAN filter is always on in fm10k");
1615 /* Add/Remove a MAC address, and update filters to main VSI */
1616 static void fm10k_MAC_filter_set_main_vsi(struct rte_eth_dev *dev,
1617 const u8 *mac, bool add, uint32_t pool)
1619 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1620 struct fm10k_macvlan_filter_info *macvlan;
1623 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1625 if (pool != MAIN_VSI_POOL_NUMBER) {
1626 PMD_DRV_LOG(ERR, "VMDQ not enabled, can't set "
1627 "mac to pool %u", pool);
1630 for (i = 0, j = 0; j < FM10K_VFTA_SIZE; j++) {
1631 if (!macvlan->vfta[j])
1633 for (k = 0; k < FM10K_UINT32_BIT_SIZE; k++) {
1634 if (!(macvlan->vfta[j] & (1 << k)))
1636 if (i + 1 > macvlan->vlan_num) {
1637 PMD_INIT_LOG(ERR, "vlan number not match");
1641 fm10k_update_uc_addr(hw, hw->mac.dglort_map, mac,
1642 j * FM10K_UINT32_BIT_SIZE + k, add, 0);
1643 fm10k_mbx_unlock(hw);
1649 /* Add/Remove a MAC address, and update filters to VMDQ */
1650 static void fm10k_MAC_filter_set_vmdq(struct rte_eth_dev *dev,
1651 const u8 *mac, bool add, uint32_t pool)
1653 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1654 struct fm10k_macvlan_filter_info *macvlan;
1655 struct rte_eth_vmdq_rx_conf *vmdq_conf;
1658 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1659 vmdq_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_rx_conf;
1661 if (pool > macvlan->nb_queue_pools) {
1662 PMD_DRV_LOG(ERR, "Pool number %u invalid."
1664 pool, macvlan->nb_queue_pools);
1667 for (i = 0; i < vmdq_conf->nb_pool_maps; i++) {
1668 if (!(vmdq_conf->pool_map[i].pools & (1UL << pool)))
1671 fm10k_update_uc_addr(hw, hw->mac.dglort_map + pool, mac,
1672 vmdq_conf->pool_map[i].vlan_id, add, 0);
1673 fm10k_mbx_unlock(hw);
1677 /* Add/Remove a MAC address, and update filters */
1678 static void fm10k_MAC_filter_set(struct rte_eth_dev *dev,
1679 const u8 *mac, bool add, uint32_t pool)
1681 struct fm10k_macvlan_filter_info *macvlan;
1683 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1685 if (macvlan->nb_queue_pools > 0) /* VMDQ mode */
1686 fm10k_MAC_filter_set_vmdq(dev, mac, add, pool);
1688 fm10k_MAC_filter_set_main_vsi(dev, mac, add, pool);
1696 /* Add a MAC address, and update filters */
1698 fm10k_macaddr_add(struct rte_eth_dev *dev,
1699 struct ether_addr *mac_addr,
1703 struct fm10k_macvlan_filter_info *macvlan;
1705 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1706 fm10k_MAC_filter_set(dev, mac_addr->addr_bytes, TRUE, pool);
1707 macvlan->mac_vmdq_id[index] = pool;
1711 /* Remove a MAC address, and update filters */
1713 fm10k_macaddr_remove(struct rte_eth_dev *dev, uint32_t index)
1715 struct rte_eth_dev_data *data = dev->data;
1716 struct fm10k_macvlan_filter_info *macvlan;
1718 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
1719 fm10k_MAC_filter_set(dev, data->mac_addrs[index].addr_bytes,
1720 FALSE, macvlan->mac_vmdq_id[index]);
1721 macvlan->mac_vmdq_id[index] = 0;
1725 check_nb_desc(uint16_t min, uint16_t max, uint16_t mult, uint16_t request)
1727 if ((request < min) || (request > max) || ((request % mult) != 0))
1735 check_thresh(uint16_t min, uint16_t max, uint16_t div, uint16_t request)
1737 if ((request < min) || (request > max) || ((div % request) != 0))
1744 handle_rxconf(struct fm10k_rx_queue *q, const struct rte_eth_rxconf *conf)
1746 uint16_t rx_free_thresh;
1748 if (conf->rx_free_thresh == 0)
1749 rx_free_thresh = FM10K_RX_FREE_THRESH_DEFAULT(q);
1751 rx_free_thresh = conf->rx_free_thresh;
1753 /* make sure the requested threshold satisfies the constraints */
1754 if (check_thresh(FM10K_RX_FREE_THRESH_MIN(q),
1755 FM10K_RX_FREE_THRESH_MAX(q),
1756 FM10K_RX_FREE_THRESH_DIV(q),
1758 PMD_INIT_LOG(ERR, "rx_free_thresh (%u) must be "
1759 "less than or equal to %u, "
1760 "greater than or equal to %u, "
1761 "and a divisor of %u",
1762 rx_free_thresh, FM10K_RX_FREE_THRESH_MAX(q),
1763 FM10K_RX_FREE_THRESH_MIN(q),
1764 FM10K_RX_FREE_THRESH_DIV(q));
1768 q->alloc_thresh = rx_free_thresh;
1769 q->drop_en = conf->rx_drop_en;
1770 q->rx_deferred_start = conf->rx_deferred_start;
1776 * Hardware requires specific alignment for Rx packet buffers. At
1777 * least one of the following two conditions must be satisfied.
1778 * 1. Address is 512B aligned
1779 * 2. Address is 8B aligned and buffer does not cross 4K boundary.
1781 * As such, the driver may need to adjust the DMA address within the
1782 * buffer by up to 512B.
1784 * return 1 if the element size is valid, otherwise return 0.
1787 mempool_element_size_valid(struct rte_mempool *mp)
1791 /* elt_size includes mbuf header and headroom */
1792 min_size = mp->elt_size - sizeof(struct rte_mbuf) -
1793 RTE_PKTMBUF_HEADROOM;
1795 /* account for up to 512B of alignment */
1796 min_size -= FM10K_RX_DATABUF_ALIGN;
1798 /* sanity check for overflow */
1799 if (min_size > mp->elt_size)
1807 fm10k_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1808 uint16_t nb_desc, unsigned int socket_id,
1809 const struct rte_eth_rxconf *conf, struct rte_mempool *mp)
1811 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1812 struct fm10k_dev_info *dev_info =
1813 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
1814 struct fm10k_rx_queue *q;
1815 const struct rte_memzone *mz;
1817 PMD_INIT_FUNC_TRACE();
1819 /* make sure the mempool element size can account for alignment. */
1820 if (!mempool_element_size_valid(mp)) {
1821 PMD_INIT_LOG(ERR, "Error : Mempool element size is too small");
1825 /* make sure a valid number of descriptors have been requested */
1826 if (check_nb_desc(FM10K_MIN_RX_DESC, FM10K_MAX_RX_DESC,
1827 FM10K_MULT_RX_DESC, nb_desc)) {
1828 PMD_INIT_LOG(ERR, "Number of Rx descriptors (%u) must be "
1829 "less than or equal to %"PRIu32", "
1830 "greater than or equal to %u, "
1831 "and a multiple of %u",
1832 nb_desc, (uint32_t)FM10K_MAX_RX_DESC, FM10K_MIN_RX_DESC,
1833 FM10K_MULT_RX_DESC);
1838 * if this queue existed already, free the associated memory. The
1839 * queue cannot be reused in case we need to allocate memory on
1840 * different socket than was previously used.
1842 if (dev->data->rx_queues[queue_id] != NULL) {
1843 rx_queue_free(dev->data->rx_queues[queue_id]);
1844 dev->data->rx_queues[queue_id] = NULL;
1847 /* allocate memory for the queue structure */
1848 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
1851 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
1857 q->nb_desc = nb_desc;
1858 q->nb_fake_desc = FM10K_MULT_RX_DESC;
1859 q->port_id = dev->data->port_id;
1860 q->queue_id = queue_id;
1861 q->tail_ptr = (volatile uint32_t *)
1862 &((uint32_t *)hw->hw_addr)[FM10K_RDT(queue_id)];
1863 if (handle_rxconf(q, conf))
1866 /* allocate memory for the software ring */
1867 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
1868 (nb_desc + q->nb_fake_desc) * sizeof(struct rte_mbuf *),
1869 RTE_CACHE_LINE_SIZE, socket_id);
1870 if (q->sw_ring == NULL) {
1871 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
1877 * allocate memory for the hardware descriptor ring. A memzone large
1878 * enough to hold the maximum ring size is requested to allow for
1879 * resizing in later calls to the queue setup function.
1881 mz = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_id,
1882 FM10K_MAX_RX_RING_SZ, FM10K_ALIGN_RX_DESC,
1885 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
1886 rte_free(q->sw_ring);
1890 q->hw_ring = mz->addr;
1891 q->hw_ring_phys_addr = mz->iova;
1893 /* Check if number of descs satisfied Vector requirement */
1894 if (!rte_is_power_of_2(nb_desc)) {
1895 PMD_INIT_LOG(DEBUG, "queue[%d] doesn't meet Vector Rx "
1896 "preconditions - canceling the feature for "
1897 "the whole port[%d]",
1898 q->queue_id, q->port_id);
1899 dev_info->rx_vec_allowed = false;
1901 fm10k_rxq_vec_setup(q);
1903 dev->data->rx_queues[queue_id] = q;
1908 fm10k_rx_queue_release(void *queue)
1910 PMD_INIT_FUNC_TRACE();
1912 rx_queue_free(queue);
1916 handle_txconf(struct fm10k_tx_queue *q, const struct rte_eth_txconf *conf)
1918 uint16_t tx_free_thresh;
1919 uint16_t tx_rs_thresh;
1921 /* constraint MACROs require that tx_free_thresh is configured
1922 * before tx_rs_thresh */
1923 if (conf->tx_free_thresh == 0)
1924 tx_free_thresh = FM10K_TX_FREE_THRESH_DEFAULT(q);
1926 tx_free_thresh = conf->tx_free_thresh;
1928 /* make sure the requested threshold satisfies the constraints */
1929 if (check_thresh(FM10K_TX_FREE_THRESH_MIN(q),
1930 FM10K_TX_FREE_THRESH_MAX(q),
1931 FM10K_TX_FREE_THRESH_DIV(q),
1933 PMD_INIT_LOG(ERR, "tx_free_thresh (%u) must be "
1934 "less than or equal to %u, "
1935 "greater than or equal to %u, "
1936 "and a divisor of %u",
1937 tx_free_thresh, FM10K_TX_FREE_THRESH_MAX(q),
1938 FM10K_TX_FREE_THRESH_MIN(q),
1939 FM10K_TX_FREE_THRESH_DIV(q));
1943 q->free_thresh = tx_free_thresh;
1945 if (conf->tx_rs_thresh == 0)
1946 tx_rs_thresh = FM10K_TX_RS_THRESH_DEFAULT(q);
1948 tx_rs_thresh = conf->tx_rs_thresh;
1950 q->tx_deferred_start = conf->tx_deferred_start;
1952 /* make sure the requested threshold satisfies the constraints */
1953 if (check_thresh(FM10K_TX_RS_THRESH_MIN(q),
1954 FM10K_TX_RS_THRESH_MAX(q),
1955 FM10K_TX_RS_THRESH_DIV(q),
1957 PMD_INIT_LOG(ERR, "tx_rs_thresh (%u) must be "
1958 "less than or equal to %u, "
1959 "greater than or equal to %u, "
1960 "and a divisor of %u",
1961 tx_rs_thresh, FM10K_TX_RS_THRESH_MAX(q),
1962 FM10K_TX_RS_THRESH_MIN(q),
1963 FM10K_TX_RS_THRESH_DIV(q));
1967 q->rs_thresh = tx_rs_thresh;
1973 fm10k_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_id,
1974 uint16_t nb_desc, unsigned int socket_id,
1975 const struct rte_eth_txconf *conf)
1977 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1978 struct fm10k_tx_queue *q;
1979 const struct rte_memzone *mz;
1981 PMD_INIT_FUNC_TRACE();
1983 /* make sure a valid number of descriptors have been requested */
1984 if (check_nb_desc(FM10K_MIN_TX_DESC, FM10K_MAX_TX_DESC,
1985 FM10K_MULT_TX_DESC, nb_desc)) {
1986 PMD_INIT_LOG(ERR, "Number of Tx descriptors (%u) must be "
1987 "less than or equal to %"PRIu32", "
1988 "greater than or equal to %u, "
1989 "and a multiple of %u",
1990 nb_desc, (uint32_t)FM10K_MAX_TX_DESC, FM10K_MIN_TX_DESC,
1991 FM10K_MULT_TX_DESC);
1996 * if this queue existed already, free the associated memory. The
1997 * queue cannot be reused in case we need to allocate memory on
1998 * different socket than was previously used.
2000 if (dev->data->tx_queues[queue_id] != NULL) {
2001 struct fm10k_tx_queue *txq = dev->data->tx_queues[queue_id];
2004 dev->data->tx_queues[queue_id] = NULL;
2007 /* allocate memory for the queue structure */
2008 q = rte_zmalloc_socket("fm10k", sizeof(*q), RTE_CACHE_LINE_SIZE,
2011 PMD_INIT_LOG(ERR, "Cannot allocate queue structure");
2016 q->nb_desc = nb_desc;
2017 q->port_id = dev->data->port_id;
2018 q->queue_id = queue_id;
2019 q->txq_flags = conf->txq_flags;
2020 q->ops = &def_txq_ops;
2021 q->tail_ptr = (volatile uint32_t *)
2022 &((uint32_t *)hw->hw_addr)[FM10K_TDT(queue_id)];
2023 if (handle_txconf(q, conf))
2026 /* allocate memory for the software ring */
2027 q->sw_ring = rte_zmalloc_socket("fm10k sw ring",
2028 nb_desc * sizeof(struct rte_mbuf *),
2029 RTE_CACHE_LINE_SIZE, socket_id);
2030 if (q->sw_ring == NULL) {
2031 PMD_INIT_LOG(ERR, "Cannot allocate software ring");
2037 * allocate memory for the hardware descriptor ring. A memzone large
2038 * enough to hold the maximum ring size is requested to allow for
2039 * resizing in later calls to the queue setup function.
2041 mz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_id,
2042 FM10K_MAX_TX_RING_SZ, FM10K_ALIGN_TX_DESC,
2045 PMD_INIT_LOG(ERR, "Cannot allocate hardware ring");
2046 rte_free(q->sw_ring);
2050 q->hw_ring = mz->addr;
2051 q->hw_ring_phys_addr = mz->iova;
2054 * allocate memory for the RS bit tracker. Enough slots to hold the
2055 * descriptor index for each RS bit needing to be set are required.
2057 q->rs_tracker.list = rte_zmalloc_socket("fm10k rs tracker",
2058 ((nb_desc + 1) / q->rs_thresh) *
2060 RTE_CACHE_LINE_SIZE, socket_id);
2061 if (q->rs_tracker.list == NULL) {
2062 PMD_INIT_LOG(ERR, "Cannot allocate RS bit tracker");
2063 rte_free(q->sw_ring);
2068 dev->data->tx_queues[queue_id] = q;
2073 fm10k_tx_queue_release(void *queue)
2075 struct fm10k_tx_queue *q = queue;
2076 PMD_INIT_FUNC_TRACE();
2082 fm10k_reta_update(struct rte_eth_dev *dev,
2083 struct rte_eth_rss_reta_entry64 *reta_conf,
2086 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2087 uint16_t i, j, idx, shift;
2091 PMD_INIT_FUNC_TRACE();
2093 if (reta_size > FM10K_MAX_RSS_INDICES) {
2094 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2095 "(%d) doesn't match the number hardware can supported "
2096 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2101 * Update Redirection Table RETA[n], n=0..31. The redirection table has
2102 * 128-entries in 32 registers
2104 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2105 idx = i / RTE_RETA_GROUP_SIZE;
2106 shift = i % RTE_RETA_GROUP_SIZE;
2107 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2108 BIT_MASK_PER_UINT32);
2113 if (mask != BIT_MASK_PER_UINT32)
2114 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2116 for (j = 0; j < CHARS_PER_UINT32; j++) {
2117 if (mask & (0x1 << j)) {
2119 reta &= ~(UINT8_MAX << CHAR_BIT * j);
2120 reta |= reta_conf[idx].reta[shift + j] <<
2124 FM10K_WRITE_REG(hw, FM10K_RETA(0, i >> 2), reta);
2131 fm10k_reta_query(struct rte_eth_dev *dev,
2132 struct rte_eth_rss_reta_entry64 *reta_conf,
2135 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2136 uint16_t i, j, idx, shift;
2140 PMD_INIT_FUNC_TRACE();
2142 if (reta_size < FM10K_MAX_RSS_INDICES) {
2143 PMD_INIT_LOG(ERR, "The size of hash lookup table configured "
2144 "(%d) doesn't match the number hardware can supported "
2145 "(%d)", reta_size, FM10K_MAX_RSS_INDICES);
2150 * Read Redirection Table RETA[n], n=0..31. The redirection table has
2151 * 128-entries in 32 registers
2153 for (i = 0; i < FM10K_MAX_RSS_INDICES; i += CHARS_PER_UINT32) {
2154 idx = i / RTE_RETA_GROUP_SIZE;
2155 shift = i % RTE_RETA_GROUP_SIZE;
2156 mask = (uint8_t)((reta_conf[idx].mask >> shift) &
2157 BIT_MASK_PER_UINT32);
2161 reta = FM10K_READ_REG(hw, FM10K_RETA(0, i >> 2));
2162 for (j = 0; j < CHARS_PER_UINT32; j++) {
2163 if (mask & (0x1 << j))
2164 reta_conf[idx].reta[shift + j] = ((reta >>
2165 CHAR_BIT * j) & UINT8_MAX);
2173 fm10k_rss_hash_update(struct rte_eth_dev *dev,
2174 struct rte_eth_rss_conf *rss_conf)
2176 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2177 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2179 uint64_t hf = rss_conf->rss_hf;
2182 PMD_INIT_FUNC_TRACE();
2184 if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2185 FM10K_RSSRK_ENTRIES_PER_REG))
2192 mrqc |= (hf & ETH_RSS_IPV4) ? FM10K_MRQC_IPV4 : 0;
2193 mrqc |= (hf & ETH_RSS_IPV6) ? FM10K_MRQC_IPV6 : 0;
2194 mrqc |= (hf & ETH_RSS_IPV6_EX) ? FM10K_MRQC_IPV6 : 0;
2195 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? FM10K_MRQC_TCP_IPV4 : 0;
2196 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? FM10K_MRQC_TCP_IPV6 : 0;
2197 mrqc |= (hf & ETH_RSS_IPV6_TCP_EX) ? FM10K_MRQC_TCP_IPV6 : 0;
2198 mrqc |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? FM10K_MRQC_UDP_IPV4 : 0;
2199 mrqc |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? FM10K_MRQC_UDP_IPV6 : 0;
2200 mrqc |= (hf & ETH_RSS_IPV6_UDP_EX) ? FM10K_MRQC_UDP_IPV6 : 0;
2202 /* If the mapping doesn't fit any supported, return */
2207 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2208 FM10K_WRITE_REG(hw, FM10K_RSSRK(0, i), key[i]);
2210 FM10K_WRITE_REG(hw, FM10K_MRQC(0), mrqc);
2216 fm10k_rss_hash_conf_get(struct rte_eth_dev *dev,
2217 struct rte_eth_rss_conf *rss_conf)
2219 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2220 uint32_t *key = (uint32_t *)rss_conf->rss_key;
2225 PMD_INIT_FUNC_TRACE();
2227 if (key && (rss_conf->rss_key_len < FM10K_RSSRK_SIZE *
2228 FM10K_RSSRK_ENTRIES_PER_REG))
2232 for (i = 0; i < FM10K_RSSRK_SIZE; ++i)
2233 key[i] = FM10K_READ_REG(hw, FM10K_RSSRK(0, i));
2235 mrqc = FM10K_READ_REG(hw, FM10K_MRQC(0));
2237 hf |= (mrqc & FM10K_MRQC_IPV4) ? ETH_RSS_IPV4 : 0;
2238 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6 : 0;
2239 hf |= (mrqc & FM10K_MRQC_IPV6) ? ETH_RSS_IPV6_EX : 0;
2240 hf |= (mrqc & FM10K_MRQC_TCP_IPV4) ? ETH_RSS_NONFRAG_IPV4_TCP : 0;
2241 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_NONFRAG_IPV6_TCP : 0;
2242 hf |= (mrqc & FM10K_MRQC_TCP_IPV6) ? ETH_RSS_IPV6_TCP_EX : 0;
2243 hf |= (mrqc & FM10K_MRQC_UDP_IPV4) ? ETH_RSS_NONFRAG_IPV4_UDP : 0;
2244 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_NONFRAG_IPV6_UDP : 0;
2245 hf |= (mrqc & FM10K_MRQC_UDP_IPV6) ? ETH_RSS_IPV6_UDP_EX : 0;
2247 rss_conf->rss_hf = hf;
2253 fm10k_dev_enable_intr_pf(struct rte_eth_dev *dev)
2255 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2256 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2258 /* Bind all local non-queue interrupt to vector 0 */
2259 int_map |= FM10K_MISC_VEC_ID;
2261 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2262 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2263 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2264 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2265 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2266 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2268 /* Enable misc causes */
2269 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_ENABLE(PCA_FAULT) |
2270 FM10K_EIMR_ENABLE(THI_FAULT) |
2271 FM10K_EIMR_ENABLE(FUM_FAULT) |
2272 FM10K_EIMR_ENABLE(MAILBOX) |
2273 FM10K_EIMR_ENABLE(SWITCHREADY) |
2274 FM10K_EIMR_ENABLE(SWITCHNOTREADY) |
2275 FM10K_EIMR_ENABLE(SRAMERROR) |
2276 FM10K_EIMR_ENABLE(VFLR));
2279 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2280 FM10K_ITR_MASK_CLEAR);
2281 FM10K_WRITE_FLUSH(hw);
2285 fm10k_dev_disable_intr_pf(struct rte_eth_dev *dev)
2287 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2288 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2290 int_map |= FM10K_MISC_VEC_ID;
2292 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_mailbox), int_map);
2293 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_pcie_fault), int_map);
2294 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_up_down), int_map);
2295 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_switch_event), int_map);
2296 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_sram), int_map);
2297 FM10K_WRITE_REG(hw, FM10K_INT_MAP(fm10k_int_vflr), int_map);
2299 /* Disable misc causes */
2300 FM10K_WRITE_REG(hw, FM10K_EIMR, FM10K_EIMR_DISABLE(PCA_FAULT) |
2301 FM10K_EIMR_DISABLE(THI_FAULT) |
2302 FM10K_EIMR_DISABLE(FUM_FAULT) |
2303 FM10K_EIMR_DISABLE(MAILBOX) |
2304 FM10K_EIMR_DISABLE(SWITCHREADY) |
2305 FM10K_EIMR_DISABLE(SWITCHNOTREADY) |
2306 FM10K_EIMR_DISABLE(SRAMERROR) |
2307 FM10K_EIMR_DISABLE(VFLR));
2310 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_MASK_SET);
2311 FM10K_WRITE_FLUSH(hw);
2315 fm10k_dev_enable_intr_vf(struct rte_eth_dev *dev)
2317 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2318 uint32_t int_map = FM10K_INT_MAP_IMMEDIATE;
2320 /* Bind all local non-queue interrupt to vector 0 */
2321 int_map |= FM10K_MISC_VEC_ID;
2323 /* Only INT 0 available, other 15 are reserved. */
2324 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2327 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2328 FM10K_ITR_MASK_CLEAR);
2329 FM10K_WRITE_FLUSH(hw);
2333 fm10k_dev_disable_intr_vf(struct rte_eth_dev *dev)
2335 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2336 uint32_t int_map = FM10K_INT_MAP_DISABLE;
2338 int_map |= FM10K_MISC_VEC_ID;
2340 /* Only INT 0 available, other 15 are reserved. */
2341 FM10K_WRITE_REG(hw, FM10K_VFINT_MAP, int_map);
2344 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_MASK_SET);
2345 FM10K_WRITE_FLUSH(hw);
2349 fm10k_dev_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
2351 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2352 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2355 if (hw->mac.type == fm10k_mac_pf)
2356 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2357 FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2359 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2360 FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR);
2361 rte_intr_enable(&pdev->intr_handle);
2366 fm10k_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
2368 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2369 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2372 if (hw->mac.type == fm10k_mac_pf)
2373 FM10K_WRITE_REG(hw, FM10K_ITR(Q2V(pdev, queue_id)),
2374 FM10K_ITR_MASK_SET);
2376 FM10K_WRITE_REG(hw, FM10K_VFITR(Q2V(pdev, queue_id)),
2377 FM10K_ITR_MASK_SET);
2382 fm10k_dev_rxq_interrupt_setup(struct rte_eth_dev *dev)
2384 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2385 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2386 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2387 uint32_t intr_vector, vec;
2391 /* fm10k needs one separate interrupt for mailbox,
2392 * so only drivers which support multiple interrupt vectors
2393 * e.g. vfio-pci can work for fm10k interrupt mode
2395 if (!rte_intr_cap_multiple(intr_handle) ||
2396 dev->data->dev_conf.intr_conf.rxq == 0)
2399 intr_vector = dev->data->nb_rx_queues;
2401 /* disable interrupt first */
2402 rte_intr_disable(intr_handle);
2403 if (hw->mac.type == fm10k_mac_pf)
2404 fm10k_dev_disable_intr_pf(dev);
2406 fm10k_dev_disable_intr_vf(dev);
2408 if (rte_intr_efd_enable(intr_handle, intr_vector)) {
2409 PMD_INIT_LOG(ERR, "Failed to init event fd");
2413 if (rte_intr_dp_is_en(intr_handle) && !result) {
2414 intr_handle->intr_vec = rte_zmalloc("intr_vec",
2415 dev->data->nb_rx_queues * sizeof(int), 0);
2416 if (intr_handle->intr_vec) {
2417 for (queue_id = 0, vec = FM10K_RX_VEC_START;
2418 queue_id < dev->data->nb_rx_queues;
2420 intr_handle->intr_vec[queue_id] = vec;
2421 if (vec < intr_handle->nb_efd - 1
2422 + FM10K_RX_VEC_START)
2426 PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues"
2427 " intr_vec", dev->data->nb_rx_queues);
2428 rte_intr_efd_disable(intr_handle);
2433 if (hw->mac.type == fm10k_mac_pf)
2434 fm10k_dev_enable_intr_pf(dev);
2436 fm10k_dev_enable_intr_vf(dev);
2437 rte_intr_enable(intr_handle);
2438 hw->mac.ops.update_int_moderator(hw);
2443 fm10k_dev_handle_fault(struct fm10k_hw *hw, uint32_t eicr)
2445 struct fm10k_fault fault;
2447 const char *estr = "Unknown error";
2449 /* Process PCA fault */
2450 if (eicr & FM10K_EICR_PCA_FAULT) {
2451 err = fm10k_get_fault(hw, FM10K_PCA_FAULT, &fault);
2454 switch (fault.type) {
2456 estr = "PCA_NO_FAULT"; break;
2457 case PCA_UNMAPPED_ADDR:
2458 estr = "PCA_UNMAPPED_ADDR"; break;
2459 case PCA_BAD_QACCESS_PF:
2460 estr = "PCA_BAD_QACCESS_PF"; break;
2461 case PCA_BAD_QACCESS_VF:
2462 estr = "PCA_BAD_QACCESS_VF"; break;
2463 case PCA_MALICIOUS_REQ:
2464 estr = "PCA_MALICIOUS_REQ"; break;
2465 case PCA_POISONED_TLP:
2466 estr = "PCA_POISONED_TLP"; break;
2468 estr = "PCA_TLP_ABORT"; break;
2472 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2473 estr, fault.func ? "VF" : "PF", fault.func,
2474 fault.address, fault.specinfo);
2477 /* Process THI fault */
2478 if (eicr & FM10K_EICR_THI_FAULT) {
2479 err = fm10k_get_fault(hw, FM10K_THI_FAULT, &fault);
2482 switch (fault.type) {
2484 estr = "THI_NO_FAULT"; break;
2485 case THI_MAL_DIS_Q_FAULT:
2486 estr = "THI_MAL_DIS_Q_FAULT"; break;
2490 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2491 estr, fault.func ? "VF" : "PF", fault.func,
2492 fault.address, fault.specinfo);
2495 /* Process FUM fault */
2496 if (eicr & FM10K_EICR_FUM_FAULT) {
2497 err = fm10k_get_fault(hw, FM10K_FUM_FAULT, &fault);
2500 switch (fault.type) {
2502 estr = "FUM_NO_FAULT"; break;
2503 case FUM_UNMAPPED_ADDR:
2504 estr = "FUM_UNMAPPED_ADDR"; break;
2505 case FUM_POISONED_TLP:
2506 estr = "FUM_POISONED_TLP"; break;
2507 case FUM_BAD_VF_QACCESS:
2508 estr = "FUM_BAD_VF_QACCESS"; break;
2509 case FUM_ADD_DECODE_ERR:
2510 estr = "FUM_ADD_DECODE_ERR"; break;
2512 estr = "FUM_RO_ERROR"; break;
2513 case FUM_QPRC_CRC_ERROR:
2514 estr = "FUM_QPRC_CRC_ERROR"; break;
2515 case FUM_CSR_TIMEOUT:
2516 estr = "FUM_CSR_TIMEOUT"; break;
2517 case FUM_INVALID_TYPE:
2518 estr = "FUM_INVALID_TYPE"; break;
2519 case FUM_INVALID_LENGTH:
2520 estr = "FUM_INVALID_LENGTH"; break;
2521 case FUM_INVALID_BE:
2522 estr = "FUM_INVALID_BE"; break;
2523 case FUM_INVALID_ALIGN:
2524 estr = "FUM_INVALID_ALIGN"; break;
2528 PMD_INIT_LOG(ERR, "%s: %s(%d) Addr:0x%"PRIx64" Spec: 0x%x",
2529 estr, fault.func ? "VF" : "PF", fault.func,
2530 fault.address, fault.specinfo);
2535 PMD_INIT_LOG(ERR, "Failed to handle fault event.");
2540 * PF interrupt handler triggered by NIC for handling specific interrupt.
2543 * Pointer to interrupt handle.
2545 * The address of parameter (struct rte_eth_dev *) regsitered before.
2551 fm10k_dev_interrupt_handler_pf(void *param)
2553 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2554 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2555 uint32_t cause, status;
2556 struct fm10k_dev_info *dev_info =
2557 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2561 if (hw->mac.type != fm10k_mac_pf)
2564 cause = FM10K_READ_REG(hw, FM10K_EICR);
2566 /* Handle PCI fault cases */
2567 if (cause & FM10K_EICR_FAULT_MASK) {
2568 PMD_INIT_LOG(ERR, "INT: find fault!");
2569 fm10k_dev_handle_fault(hw, cause);
2572 /* Handle switch up/down */
2573 if (cause & FM10K_EICR_SWITCHNOTREADY)
2574 PMD_INIT_LOG(ERR, "INT: Switch is not ready");
2576 if (cause & FM10K_EICR_SWITCHREADY) {
2577 PMD_INIT_LOG(INFO, "INT: Switch is ready");
2578 if (dev_info->sm_down == 1) {
2581 /* For recreating logical ports */
2582 status_mbx = hw->mac.ops.update_lport_state(hw,
2583 hw->mac.dglort_map, MAX_LPORT_NUM, 1);
2584 if (status_mbx == FM10K_SUCCESS)
2586 "INT: Recreated Logical port");
2589 "INT: Logical ports weren't recreated");
2591 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2592 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2593 if (status_mbx != FM10K_SUCCESS)
2594 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2596 fm10k_mbx_unlock(hw);
2598 /* first clear the internal SW recording structure */
2599 if (!(dev->data->dev_conf.rxmode.mq_mode &
2600 ETH_MQ_RX_VMDQ_FLAG))
2601 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2604 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2605 MAIN_VSI_POOL_NUMBER);
2608 * Add default mac address and vlan for the logical
2609 * ports that have been created, leave to the
2610 * application to fully recover Rx filtering.
2612 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2613 MAIN_VSI_POOL_NUMBER);
2615 if (!(dev->data->dev_conf.rxmode.mq_mode &
2616 ETH_MQ_RX_VMDQ_FLAG))
2617 fm10k_vlan_filter_set(dev, hw->mac.default_vid,
2620 dev_info->sm_down = 0;
2621 _rte_eth_dev_callback_process(dev,
2622 RTE_ETH_EVENT_INTR_LSC,
2627 /* Handle mailbox message */
2629 err = hw->mbx.ops.process(hw, &hw->mbx);
2630 fm10k_mbx_unlock(hw);
2632 if (err == FM10K_ERR_RESET_REQUESTED) {
2633 PMD_INIT_LOG(INFO, "INT: Switch is down");
2634 dev_info->sm_down = 1;
2635 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2639 /* Handle SRAM error */
2640 if (cause & FM10K_EICR_SRAMERROR) {
2641 PMD_INIT_LOG(ERR, "INT: SRAM error on PEP");
2643 status = FM10K_READ_REG(hw, FM10K_SRAM_IP);
2644 /* Write to clear pending bits */
2645 FM10K_WRITE_REG(hw, FM10K_SRAM_IP, status);
2647 /* Todo: print out error message after shared code updates */
2650 /* Clear these 3 events if having any */
2651 cause &= FM10K_EICR_SWITCHNOTREADY | FM10K_EICR_MAILBOX |
2652 FM10K_EICR_SWITCHREADY;
2654 FM10K_WRITE_REG(hw, FM10K_EICR, cause);
2656 /* Re-enable interrupt from device side */
2657 FM10K_WRITE_REG(hw, FM10K_ITR(0), FM10K_ITR_AUTOMASK |
2658 FM10K_ITR_MASK_CLEAR);
2659 /* Re-enable interrupt from host side */
2660 rte_intr_enable(dev->intr_handle);
2664 * VF interrupt handler triggered by NIC for handling specific interrupt.
2667 * Pointer to interrupt handle.
2669 * The address of parameter (struct rte_eth_dev *) regsitered before.
2675 fm10k_dev_interrupt_handler_vf(void *param)
2677 struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
2678 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2679 struct fm10k_mbx_info *mbx = &hw->mbx;
2680 struct fm10k_dev_info *dev_info =
2681 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2682 const enum fm10k_mbx_state state = mbx->state;
2685 if (hw->mac.type != fm10k_mac_vf)
2688 /* Handle mailbox message if lock is acquired */
2690 hw->mbx.ops.process(hw, &hw->mbx);
2691 fm10k_mbx_unlock(hw);
2693 if (state == FM10K_STATE_OPEN && mbx->state == FM10K_STATE_CONNECT) {
2694 PMD_INIT_LOG(INFO, "INT: Switch has gone down");
2697 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
2699 fm10k_mbx_unlock(hw);
2701 /* Setting reset flag */
2702 dev_info->sm_down = 1;
2703 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2707 if (dev_info->sm_down == 1 &&
2708 hw->mac.dglort_map == FM10K_DGLORTMAP_ZERO) {
2709 PMD_INIT_LOG(INFO, "INT: Switch has gone up");
2711 status_mbx = hw->mac.ops.update_xcast_mode(hw,
2712 hw->mac.dglort_map, FM10K_XCAST_MODE_NONE);
2713 if (status_mbx != FM10K_SUCCESS)
2714 PMD_INIT_LOG(ERR, "Failed to set XCAST mode");
2715 fm10k_mbx_unlock(hw);
2717 /* first clear the internal SW recording structure */
2718 fm10k_vlan_filter_set(dev, hw->mac.default_vid, false);
2719 fm10k_MAC_filter_set(dev, hw->mac.addr, false,
2720 MAIN_VSI_POOL_NUMBER);
2723 * Add default mac address and vlan for the logical ports that
2724 * have been created, leave to the application to fully recover
2727 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
2728 MAIN_VSI_POOL_NUMBER);
2729 fm10k_vlan_filter_set(dev, hw->mac.default_vid, true);
2731 dev_info->sm_down = 0;
2732 _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC,
2736 /* Re-enable interrupt from device side */
2737 FM10K_WRITE_REG(hw, FM10K_VFITR(0), FM10K_ITR_AUTOMASK |
2738 FM10K_ITR_MASK_CLEAR);
2739 /* Re-enable interrupt from host side */
2740 rte_intr_enable(dev->intr_handle);
2743 /* Mailbox message handler in VF */
2744 static const struct fm10k_msg_data fm10k_msgdata_vf[] = {
2745 FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
2746 FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_msg_mac_vlan_vf),
2747 FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_msg_lport_state_vf),
2748 FM10K_TLV_MSG_ERROR_HANDLER(fm10k_tlv_msg_error),
2752 fm10k_setup_mbx_service(struct fm10k_hw *hw)
2756 /* Initialize mailbox lock */
2757 fm10k_mbx_initlock(hw);
2759 /* Replace default message handler with new ones */
2760 if (hw->mac.type == fm10k_mac_vf)
2761 err = hw->mbx.ops.register_handlers(&hw->mbx, fm10k_msgdata_vf);
2764 PMD_INIT_LOG(ERR, "Failed to register mailbox handler.err:%d",
2768 /* Connect to SM for PF device or PF for VF device */
2769 return hw->mbx.ops.connect(hw, &hw->mbx);
2773 fm10k_close_mbx_service(struct fm10k_hw *hw)
2775 /* Disconnect from SM for PF device or PF for VF device */
2776 hw->mbx.ops.disconnect(hw, &hw->mbx);
2779 static const struct eth_dev_ops fm10k_eth_dev_ops = {
2780 .dev_configure = fm10k_dev_configure,
2781 .dev_start = fm10k_dev_start,
2782 .dev_stop = fm10k_dev_stop,
2783 .dev_close = fm10k_dev_close,
2784 .promiscuous_enable = fm10k_dev_promiscuous_enable,
2785 .promiscuous_disable = fm10k_dev_promiscuous_disable,
2786 .allmulticast_enable = fm10k_dev_allmulticast_enable,
2787 .allmulticast_disable = fm10k_dev_allmulticast_disable,
2788 .stats_get = fm10k_stats_get,
2789 .xstats_get = fm10k_xstats_get,
2790 .xstats_get_names = fm10k_xstats_get_names,
2791 .stats_reset = fm10k_stats_reset,
2792 .xstats_reset = fm10k_stats_reset,
2793 .link_update = fm10k_link_update,
2794 .dev_infos_get = fm10k_dev_infos_get,
2795 .dev_supported_ptypes_get = fm10k_dev_supported_ptypes_get,
2796 .vlan_filter_set = fm10k_vlan_filter_set,
2797 .vlan_offload_set = fm10k_vlan_offload_set,
2798 .mac_addr_add = fm10k_macaddr_add,
2799 .mac_addr_remove = fm10k_macaddr_remove,
2800 .rx_queue_start = fm10k_dev_rx_queue_start,
2801 .rx_queue_stop = fm10k_dev_rx_queue_stop,
2802 .tx_queue_start = fm10k_dev_tx_queue_start,
2803 .tx_queue_stop = fm10k_dev_tx_queue_stop,
2804 .rx_queue_setup = fm10k_rx_queue_setup,
2805 .rx_queue_release = fm10k_rx_queue_release,
2806 .tx_queue_setup = fm10k_tx_queue_setup,
2807 .tx_queue_release = fm10k_tx_queue_release,
2808 .rx_descriptor_done = fm10k_dev_rx_descriptor_done,
2809 .rx_queue_intr_enable = fm10k_dev_rx_queue_intr_enable,
2810 .rx_queue_intr_disable = fm10k_dev_rx_queue_intr_disable,
2811 .reta_update = fm10k_reta_update,
2812 .reta_query = fm10k_reta_query,
2813 .rss_hash_update = fm10k_rss_hash_update,
2814 .rss_hash_conf_get = fm10k_rss_hash_conf_get,
2817 static int ftag_check_handler(__rte_unused const char *key,
2818 const char *value, __rte_unused void *opaque)
2820 if (strcmp(value, "1"))
2827 fm10k_check_ftag(struct rte_devargs *devargs)
2829 struct rte_kvargs *kvlist;
2830 const char *ftag_key = "enable_ftag";
2832 if (devargs == NULL)
2835 kvlist = rte_kvargs_parse(devargs->args, NULL);
2839 if (!rte_kvargs_count(kvlist, ftag_key)) {
2840 rte_kvargs_free(kvlist);
2843 /* FTAG is enabled when there's key-value pair: enable_ftag=1 */
2844 if (rte_kvargs_process(kvlist, ftag_key,
2845 ftag_check_handler, NULL) < 0) {
2846 rte_kvargs_free(kvlist);
2849 rte_kvargs_free(kvlist);
2855 fm10k_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
2859 struct fm10k_tx_queue *txq = (struct fm10k_tx_queue *)tx_queue;
2864 num = (uint16_t)RTE_MIN(nb_pkts, txq->rs_thresh);
2865 ret = fm10k_xmit_fixed_burst_vec(tx_queue, &tx_pkts[nb_tx],
2876 static void __attribute__((cold))
2877 fm10k_set_tx_function(struct rte_eth_dev *dev)
2879 struct fm10k_tx_queue *txq;
2882 uint16_t tx_ftag_en = 0;
2884 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
2885 /* primary process has set the ftag flag and txq_flags */
2886 txq = dev->data->tx_queues[0];
2887 if (fm10k_tx_vec_condition_check(txq)) {
2888 dev->tx_pkt_burst = fm10k_xmit_pkts;
2889 dev->tx_pkt_prepare = fm10k_prep_pkts;
2890 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2892 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2893 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2894 dev->tx_pkt_prepare = NULL;
2899 if (fm10k_check_ftag(dev->device->devargs))
2902 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2903 txq = dev->data->tx_queues[i];
2904 txq->tx_ftag_en = tx_ftag_en;
2905 /* Check if Vector Tx is satisfied */
2906 if (fm10k_tx_vec_condition_check(txq))
2911 PMD_INIT_LOG(DEBUG, "Use vector Tx func");
2912 for (i = 0; i < dev->data->nb_tx_queues; i++) {
2913 txq = dev->data->tx_queues[i];
2914 fm10k_txq_vec_setup(txq);
2916 dev->tx_pkt_burst = fm10k_xmit_pkts_vec;
2917 dev->tx_pkt_prepare = NULL;
2919 dev->tx_pkt_burst = fm10k_xmit_pkts;
2920 dev->tx_pkt_prepare = fm10k_prep_pkts;
2921 PMD_INIT_LOG(DEBUG, "Use regular Tx func");
2925 static void __attribute__((cold))
2926 fm10k_set_rx_function(struct rte_eth_dev *dev)
2928 struct fm10k_dev_info *dev_info =
2929 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2930 uint16_t i, rx_using_sse;
2931 uint16_t rx_ftag_en = 0;
2933 if (fm10k_check_ftag(dev->device->devargs))
2936 /* In order to allow Vector Rx there are a few configuration
2937 * conditions to be met.
2939 if (!fm10k_rx_vec_condition_check(dev) &&
2940 dev_info->rx_vec_allowed && !rx_ftag_en) {
2941 if (dev->data->scattered_rx)
2942 dev->rx_pkt_burst = fm10k_recv_scattered_pkts_vec;
2944 dev->rx_pkt_burst = fm10k_recv_pkts_vec;
2945 } else if (dev->data->scattered_rx)
2946 dev->rx_pkt_burst = fm10k_recv_scattered_pkts;
2948 dev->rx_pkt_burst = fm10k_recv_pkts;
2951 (dev->rx_pkt_burst == fm10k_recv_scattered_pkts_vec ||
2952 dev->rx_pkt_burst == fm10k_recv_pkts_vec);
2955 PMD_INIT_LOG(DEBUG, "Use vector Rx func");
2957 PMD_INIT_LOG(DEBUG, "Use regular Rx func");
2959 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2962 for (i = 0; i < dev->data->nb_rx_queues; i++) {
2963 struct fm10k_rx_queue *rxq = dev->data->rx_queues[i];
2965 rxq->rx_using_sse = rx_using_sse;
2966 rxq->rx_ftag_en = rx_ftag_en;
2971 fm10k_params_init(struct rte_eth_dev *dev)
2973 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2974 struct fm10k_dev_info *info =
2975 FM10K_DEV_PRIVATE_TO_INFO(dev->data->dev_private);
2977 /* Inialize bus info. Normally we would call fm10k_get_bus_info(), but
2978 * there is no way to get link status without reading BAR4. Until this
2979 * works, assume we have maximum bandwidth.
2980 * @todo - fix bus info
2982 hw->bus_caps.speed = fm10k_bus_speed_8000;
2983 hw->bus_caps.width = fm10k_bus_width_pcie_x8;
2984 hw->bus_caps.payload = fm10k_bus_payload_512;
2985 hw->bus.speed = fm10k_bus_speed_8000;
2986 hw->bus.width = fm10k_bus_width_pcie_x8;
2987 hw->bus.payload = fm10k_bus_payload_256;
2989 info->rx_vec_allowed = true;
2993 eth_fm10k_dev_init(struct rte_eth_dev *dev)
2995 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2996 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
2997 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
2999 struct fm10k_macvlan_filter_info *macvlan;
3001 PMD_INIT_FUNC_TRACE();
3003 dev->dev_ops = &fm10k_eth_dev_ops;
3004 dev->rx_pkt_burst = &fm10k_recv_pkts;
3005 dev->tx_pkt_burst = &fm10k_xmit_pkts;
3006 dev->tx_pkt_prepare = &fm10k_prep_pkts;
3009 * Primary process does the whole initialization, for secondary
3010 * processes, we just select the same Rx and Tx function as primary.
3012 if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
3013 fm10k_set_rx_function(dev);
3014 fm10k_set_tx_function(dev);
3018 rte_eth_copy_pci_info(dev, pdev);
3020 macvlan = FM10K_DEV_PRIVATE_TO_MACVLAN(dev->data->dev_private);
3021 memset(macvlan, 0, sizeof(*macvlan));
3022 /* Vendor and Device ID need to be set before init of shared code */
3023 memset(hw, 0, sizeof(*hw));
3024 hw->device_id = pdev->id.device_id;
3025 hw->vendor_id = pdev->id.vendor_id;
3026 hw->subsystem_device_id = pdev->id.subsystem_device_id;
3027 hw->subsystem_vendor_id = pdev->id.subsystem_vendor_id;
3028 hw->revision_id = 0;
3029 hw->hw_addr = (void *)pdev->mem_resource[0].addr;
3030 if (hw->hw_addr == NULL) {
3031 PMD_INIT_LOG(ERR, "Bad mem resource."
3032 " Try to blacklist unused devices.");
3036 /* Store fm10k_adapter pointer */
3037 hw->back = dev->data->dev_private;
3039 /* Initialize the shared code */
3040 diag = fm10k_init_shared_code(hw);
3041 if (diag != FM10K_SUCCESS) {
3042 PMD_INIT_LOG(ERR, "Shared code init failed: %d", diag);
3046 /* Initialize parameters */
3047 fm10k_params_init(dev);
3049 /* Initialize the hw */
3050 diag = fm10k_init_hw(hw);
3051 if (diag != FM10K_SUCCESS) {
3052 PMD_INIT_LOG(ERR, "Hardware init failed: %d", diag);
3056 /* Initialize MAC address(es) */
3057 dev->data->mac_addrs = rte_zmalloc("fm10k",
3058 ETHER_ADDR_LEN * FM10K_MAX_MACADDR_NUM, 0);
3059 if (dev->data->mac_addrs == NULL) {
3060 PMD_INIT_LOG(ERR, "Cannot allocate memory for MAC addresses");
3064 diag = fm10k_read_mac_addr(hw);
3066 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3067 &dev->data->mac_addrs[0]);
3069 if (diag != FM10K_SUCCESS ||
3070 !is_valid_assigned_ether_addr(dev->data->mac_addrs)) {
3072 /* Generate a random addr */
3073 eth_random_addr(hw->mac.addr);
3074 memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
3075 ether_addr_copy((const struct ether_addr *)hw->mac.addr,
3076 &dev->data->mac_addrs[0]);
3079 /* Reset the hw statistics */
3080 fm10k_stats_reset(dev);
3083 diag = fm10k_reset_hw(hw);
3084 if (diag != FM10K_SUCCESS) {
3085 PMD_INIT_LOG(ERR, "Hardware reset failed: %d", diag);
3089 /* Setup mailbox service */
3090 diag = fm10k_setup_mbx_service(hw);
3091 if (diag != FM10K_SUCCESS) {
3092 PMD_INIT_LOG(ERR, "Failed to setup mailbox: %d", diag);
3096 /*PF/VF has different interrupt handling mechanism */
3097 if (hw->mac.type == fm10k_mac_pf) {
3098 /* register callback func to eal lib */
3099 rte_intr_callback_register(intr_handle,
3100 fm10k_dev_interrupt_handler_pf, (void *)dev);
3102 /* enable MISC interrupt */
3103 fm10k_dev_enable_intr_pf(dev);
3105 rte_intr_callback_register(intr_handle,
3106 fm10k_dev_interrupt_handler_vf, (void *)dev);
3108 fm10k_dev_enable_intr_vf(dev);
3111 /* Enable intr after callback registered */
3112 rte_intr_enable(intr_handle);
3114 hw->mac.ops.update_int_moderator(hw);
3116 /* Make sure Switch Manager is ready before going forward. */
3117 if (hw->mac.type == fm10k_mac_pf) {
3118 int switch_ready = 0;
3120 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3122 hw->mac.ops.get_host_state(hw, &switch_ready);
3123 fm10k_mbx_unlock(hw);
3126 /* Delay some time to acquire async LPORT_MAP info. */
3127 rte_delay_us(WAIT_SWITCH_MSG_US);
3130 if (switch_ready == 0) {
3131 PMD_INIT_LOG(ERR, "switch is not ready");
3137 * Below function will trigger operations on mailbox, acquire lock to
3138 * avoid race condition from interrupt handler. Operations on mailbox
3139 * FIFO will trigger interrupt to PF/SM, in which interrupt handler
3140 * will handle and generate an interrupt to our side. Then, FIFO in
3141 * mailbox will be touched.
3144 /* Enable port first */
3145 hw->mac.ops.update_lport_state(hw, hw->mac.dglort_map,
3148 /* Set unicast mode by default. App can change to other mode in other
3151 hw->mac.ops.update_xcast_mode(hw, hw->mac.dglort_map,
3152 FM10K_XCAST_MODE_NONE);
3154 fm10k_mbx_unlock(hw);
3156 /* Make sure default VID is ready before going forward. */
3157 if (hw->mac.type == fm10k_mac_pf) {
3158 for (i = 0; i < MAX_QUERY_SWITCH_STATE_TIMES; i++) {
3159 if (hw->mac.default_vid)
3161 /* Delay some time to acquire async port VLAN info. */
3162 rte_delay_us(WAIT_SWITCH_MSG_US);
3165 if (!hw->mac.default_vid) {
3166 PMD_INIT_LOG(ERR, "default VID is not ready");
3171 /* Add default mac address */
3172 fm10k_MAC_filter_set(dev, hw->mac.addr, true,
3173 MAIN_VSI_POOL_NUMBER);
3179 eth_fm10k_dev_uninit(struct rte_eth_dev *dev)
3181 struct fm10k_hw *hw = FM10K_DEV_PRIVATE_TO_HW(dev->data->dev_private);
3182 struct rte_pci_device *pdev = RTE_ETH_DEV_TO_PCI(dev);
3183 struct rte_intr_handle *intr_handle = &pdev->intr_handle;
3184 PMD_INIT_FUNC_TRACE();
3186 /* only uninitialize in the primary process */
3187 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
3190 /* safe to close dev here */
3191 fm10k_dev_close(dev);
3193 dev->dev_ops = NULL;
3194 dev->rx_pkt_burst = NULL;
3195 dev->tx_pkt_burst = NULL;
3197 /* disable uio/vfio intr */
3198 rte_intr_disable(intr_handle);
3200 /*PF/VF has different interrupt handling mechanism */
3201 if (hw->mac.type == fm10k_mac_pf) {
3202 /* disable interrupt */
3203 fm10k_dev_disable_intr_pf(dev);
3205 /* unregister callback func to eal lib */
3206 rte_intr_callback_unregister(intr_handle,
3207 fm10k_dev_interrupt_handler_pf, (void *)dev);
3209 /* disable interrupt */
3210 fm10k_dev_disable_intr_vf(dev);
3212 rte_intr_callback_unregister(intr_handle,
3213 fm10k_dev_interrupt_handler_vf, (void *)dev);
3216 /* free mac memory */
3217 if (dev->data->mac_addrs) {
3218 rte_free(dev->data->mac_addrs);
3219 dev->data->mac_addrs = NULL;
3222 memset(hw, 0, sizeof(*hw));
3227 static int eth_fm10k_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
3228 struct rte_pci_device *pci_dev)
3230 return rte_eth_dev_pci_generic_probe(pci_dev,
3231 sizeof(struct fm10k_adapter), eth_fm10k_dev_init);
3234 static int eth_fm10k_pci_remove(struct rte_pci_device *pci_dev)
3236 return rte_eth_dev_pci_generic_remove(pci_dev, eth_fm10k_dev_uninit);
3240 * The set of PCI devices this driver supports. This driver will enable both PF
3241 * and SRIOV-VF devices.
3243 static const struct rte_pci_id pci_id_fm10k_map[] = {
3244 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_PF) },
3245 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_SDI_FM10420_QDA2) },
3246 { RTE_PCI_DEVICE(FM10K_INTEL_VENDOR_ID, FM10K_DEV_ID_VF) },
3247 { .vendor_id = 0, /* sentinel */ },
3250 static struct rte_pci_driver rte_pmd_fm10k = {
3251 .id_table = pci_id_fm10k_map,
3252 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
3253 RTE_PCI_DRV_IOVA_AS_VA,
3254 .probe = eth_fm10k_pci_probe,
3255 .remove = eth_fm10k_pci_remove,
3258 RTE_PMD_REGISTER_PCI(net_fm10k, rte_pmd_fm10k);
3259 RTE_PMD_REGISTER_PCI_TABLE(net_fm10k, pci_id_fm10k_map);
3260 RTE_PMD_REGISTER_KMOD_DEP(net_fm10k, "* igb_uio | uio_pci_generic | vfio-pci");