1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2015 Intel Corporation
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
18 #include <rte_interrupts.h>
20 #include <rte_debug.h>
22 #include <rte_bus_pci.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_memzone.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_ethdev_pci.h>
31 #include <rte_string_fns.h>
32 #include <rte_malloc.h>
35 #include "base/vmxnet3_defs.h"
37 #include "vmxnet3_ring.h"
38 #include "vmxnet3_logs.h"
39 #include "vmxnet3_ethdev.h"
41 #define PROCESS_SYS_EVENTS 0
43 #define VMXNET3_TX_MAX_SEG UINT8_MAX
45 #define VMXNET3_TX_OFFLOAD_CAP \
46 (DEV_TX_OFFLOAD_VLAN_INSERT | \
47 DEV_TX_OFFLOAD_IPV4_CKSUM | \
48 DEV_TX_OFFLOAD_TCP_CKSUM | \
49 DEV_TX_OFFLOAD_UDP_CKSUM | \
50 DEV_TX_OFFLOAD_TCP_TSO | \
51 DEV_TX_OFFLOAD_MULTI_SEGS)
53 #define VMXNET3_RX_OFFLOAD_CAP \
54 (DEV_RX_OFFLOAD_VLAN_STRIP | \
55 DEV_RX_OFFLOAD_VLAN_FILTER | \
56 DEV_RX_OFFLOAD_SCATTER | \
57 DEV_RX_OFFLOAD_IPV4_CKSUM | \
58 DEV_RX_OFFLOAD_UDP_CKSUM | \
59 DEV_RX_OFFLOAD_TCP_CKSUM | \
60 DEV_RX_OFFLOAD_TCP_LRO | \
61 DEV_RX_OFFLOAD_JUMBO_FRAME)
63 static int eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev);
64 static int eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev);
65 static int vmxnet3_dev_configure(struct rte_eth_dev *dev);
66 static int vmxnet3_dev_start(struct rte_eth_dev *dev);
67 static void vmxnet3_dev_stop(struct rte_eth_dev *dev);
68 static void vmxnet3_dev_close(struct rte_eth_dev *dev);
69 static void vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set);
70 static void vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev);
71 static void vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev);
72 static void vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev);
73 static void vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev);
74 static int __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
75 int wait_to_complete);
76 static int vmxnet3_dev_link_update(struct rte_eth_dev *dev,
77 int wait_to_complete);
78 static void vmxnet3_hw_stats_save(struct vmxnet3_hw *hw);
79 static int vmxnet3_dev_stats_get(struct rte_eth_dev *dev,
80 struct rte_eth_stats *stats);
81 static void vmxnet3_dev_stats_reset(struct rte_eth_dev *dev);
82 static int vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
83 struct rte_eth_xstat_name *xstats,
85 static int vmxnet3_dev_xstats_get(struct rte_eth_dev *dev,
86 struct rte_eth_xstat *xstats, unsigned int n);
87 static void vmxnet3_dev_info_get(struct rte_eth_dev *dev,
88 struct rte_eth_dev_info *dev_info);
89 static const uint32_t *
90 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev);
91 static int vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev,
92 uint16_t vid, int on);
93 static int vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask);
94 static int vmxnet3_mac_addr_set(struct rte_eth_dev *dev,
95 struct ether_addr *mac_addr);
96 static void vmxnet3_interrupt_handler(void *param);
98 int vmxnet3_logtype_init;
99 int vmxnet3_logtype_driver;
102 * The set of PCI devices this driver supports
104 #define VMWARE_PCI_VENDOR_ID 0x15AD
105 #define VMWARE_DEV_ID_VMXNET3 0x07B0
106 static const struct rte_pci_id pci_id_vmxnet3_map[] = {
107 { RTE_PCI_DEVICE(VMWARE_PCI_VENDOR_ID, VMWARE_DEV_ID_VMXNET3) },
108 { .vendor_id = 0, /* sentinel */ },
111 static const struct eth_dev_ops vmxnet3_eth_dev_ops = {
112 .dev_configure = vmxnet3_dev_configure,
113 .dev_start = vmxnet3_dev_start,
114 .dev_stop = vmxnet3_dev_stop,
115 .dev_close = vmxnet3_dev_close,
116 .promiscuous_enable = vmxnet3_dev_promiscuous_enable,
117 .promiscuous_disable = vmxnet3_dev_promiscuous_disable,
118 .allmulticast_enable = vmxnet3_dev_allmulticast_enable,
119 .allmulticast_disable = vmxnet3_dev_allmulticast_disable,
120 .link_update = vmxnet3_dev_link_update,
121 .stats_get = vmxnet3_dev_stats_get,
122 .xstats_get_names = vmxnet3_dev_xstats_get_names,
123 .xstats_get = vmxnet3_dev_xstats_get,
124 .stats_reset = vmxnet3_dev_stats_reset,
125 .mac_addr_set = vmxnet3_mac_addr_set,
126 .dev_infos_get = vmxnet3_dev_info_get,
127 .dev_supported_ptypes_get = vmxnet3_dev_supported_ptypes_get,
128 .vlan_filter_set = vmxnet3_dev_vlan_filter_set,
129 .vlan_offload_set = vmxnet3_dev_vlan_offload_set,
130 .rx_queue_setup = vmxnet3_dev_rx_queue_setup,
131 .rx_queue_release = vmxnet3_dev_rx_queue_release,
132 .tx_queue_setup = vmxnet3_dev_tx_queue_setup,
133 .tx_queue_release = vmxnet3_dev_tx_queue_release,
136 struct vmxnet3_xstats_name_off {
137 char name[RTE_ETH_XSTATS_NAME_SIZE];
141 /* tx_qX_ is prepended to the name string here */
142 static const struct vmxnet3_xstats_name_off vmxnet3_txq_stat_strings[] = {
143 {"drop_total", offsetof(struct vmxnet3_txq_stats, drop_total)},
144 {"drop_too_many_segs", offsetof(struct vmxnet3_txq_stats, drop_too_many_segs)},
145 {"drop_tso", offsetof(struct vmxnet3_txq_stats, drop_tso)},
146 {"tx_ring_full", offsetof(struct vmxnet3_txq_stats, tx_ring_full)},
149 /* rx_qX_ is prepended to the name string here */
150 static const struct vmxnet3_xstats_name_off vmxnet3_rxq_stat_strings[] = {
151 {"drop_total", offsetof(struct vmxnet3_rxq_stats, drop_total)},
152 {"drop_err", offsetof(struct vmxnet3_rxq_stats, drop_err)},
153 {"drop_fcs", offsetof(struct vmxnet3_rxq_stats, drop_fcs)},
154 {"rx_buf_alloc_failure", offsetof(struct vmxnet3_rxq_stats, rx_buf_alloc_failure)},
157 static const struct rte_memzone *
158 gpa_zone_reserve(struct rte_eth_dev *dev, uint32_t size,
159 const char *post_string, int socket_id,
160 uint16_t align, bool reuse)
162 char z_name[RTE_MEMZONE_NAMESIZE];
163 const struct rte_memzone *mz;
165 snprintf(z_name, sizeof(z_name), "eth_p%d_%s",
166 dev->data->port_id, post_string);
168 mz = rte_memzone_lookup(z_name);
171 rte_memzone_free(mz);
172 return rte_memzone_reserve_aligned(z_name, size, socket_id,
173 RTE_MEMZONE_IOVA_CONTIG, align);
179 return rte_memzone_reserve_aligned(z_name, size, socket_id,
180 RTE_MEMZONE_IOVA_CONTIG, align);
184 * This function is based on vmxnet3_disable_intr()
187 vmxnet3_disable_intr(struct vmxnet3_hw *hw)
191 PMD_INIT_FUNC_TRACE();
193 hw->shared->devRead.intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
194 for (i = 0; i < hw->num_intrs; i++)
195 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 1);
199 vmxnet3_enable_intr(struct vmxnet3_hw *hw)
203 PMD_INIT_FUNC_TRACE();
205 hw->shared->devRead.intrConf.intrCtrl &= ~VMXNET3_IC_DISABLE_ALL;
206 for (i = 0; i < hw->num_intrs; i++)
207 VMXNET3_WRITE_BAR0_REG(hw, VMXNET3_REG_IMR + i * 8, 0);
211 * Gets tx data ring descriptor size.
214 eth_vmxnet3_txdata_get(struct vmxnet3_hw *hw)
216 uint16 txdata_desc_size;
218 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
219 VMXNET3_CMD_GET_TXDATA_DESC_SIZE);
220 txdata_desc_size = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
222 return (txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE ||
223 txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE ||
224 txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK) ?
225 sizeof(struct Vmxnet3_TxDataDesc) : txdata_desc_size;
229 * It returns 0 on success.
232 eth_vmxnet3_dev_init(struct rte_eth_dev *eth_dev)
234 struct rte_pci_device *pci_dev;
235 struct vmxnet3_hw *hw = eth_dev->data->dev_private;
236 uint32_t mac_hi, mac_lo, ver;
237 struct rte_eth_link link;
239 PMD_INIT_FUNC_TRACE();
241 eth_dev->dev_ops = &vmxnet3_eth_dev_ops;
242 eth_dev->rx_pkt_burst = &vmxnet3_recv_pkts;
243 eth_dev->tx_pkt_burst = &vmxnet3_xmit_pkts;
244 eth_dev->tx_pkt_prepare = vmxnet3_prep_pkts;
245 pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
248 * for secondary processes, we don't initialize any further as primary
249 * has already done this work.
251 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
254 rte_eth_copy_pci_info(eth_dev, pci_dev);
256 /* Vendor and Device ID need to be set before init of shared code */
257 hw->device_id = pci_dev->id.device_id;
258 hw->vendor_id = pci_dev->id.vendor_id;
259 hw->hw_addr0 = (void *)pci_dev->mem_resource[0].addr;
260 hw->hw_addr1 = (void *)pci_dev->mem_resource[1].addr;
262 hw->num_rx_queues = 1;
263 hw->num_tx_queues = 1;
264 hw->bufs_per_pkt = 1;
266 /* Check h/w version compatibility with driver. */
267 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_VRRS);
268 PMD_INIT_LOG(DEBUG, "Hardware version : %d", ver);
270 if (ver & (1 << VMXNET3_REV_3)) {
271 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
273 hw->version = VMXNET3_REV_3 + 1;
274 } else if (ver & (1 << VMXNET3_REV_2)) {
275 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
277 hw->version = VMXNET3_REV_2 + 1;
278 } else if (ver & (1 << VMXNET3_REV_1)) {
279 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_VRRS,
281 hw->version = VMXNET3_REV_1 + 1;
283 PMD_INIT_LOG(ERR, "Incompatible hardware version: %d", ver);
287 PMD_INIT_LOG(DEBUG, "Using device version %d\n", hw->version);
289 /* Check UPT version compatibility with driver. */
290 ver = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_UVRS);
291 PMD_INIT_LOG(DEBUG, "UPT hardware version : %d", ver);
293 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_UVRS, 1);
295 PMD_INIT_LOG(ERR, "Incompatible UPT version.");
299 /* Getting MAC Address */
300 mac_lo = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACL);
301 mac_hi = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_MACH);
302 memcpy(hw->perm_addr, &mac_lo, 4);
303 memcpy(hw->perm_addr + 4, &mac_hi, 2);
305 /* Allocate memory for storing MAC addresses */
306 eth_dev->data->mac_addrs = rte_zmalloc("vmxnet3", ETHER_ADDR_LEN *
307 VMXNET3_MAX_MAC_ADDRS, 0);
308 if (eth_dev->data->mac_addrs == NULL) {
310 "Failed to allocate %d bytes needed to store MAC addresses",
311 ETHER_ADDR_LEN * VMXNET3_MAX_MAC_ADDRS);
314 /* Copy the permanent MAC address */
315 ether_addr_copy((struct ether_addr *) hw->perm_addr,
316 ð_dev->data->mac_addrs[0]);
318 PMD_INIT_LOG(DEBUG, "MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
319 hw->perm_addr[0], hw->perm_addr[1], hw->perm_addr[2],
320 hw->perm_addr[3], hw->perm_addr[4], hw->perm_addr[5]);
322 /* Flag to call rte_eth_dev_release_port() in rte_eth_dev_close(). */
323 eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;
325 /* Put device in Quiesce Mode */
326 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
328 /* allow untagged pkts */
329 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, 0);
331 hw->txdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
332 eth_vmxnet3_txdata_get(hw) : sizeof(struct Vmxnet3_TxDataDesc);
334 hw->rxdata_desc_size = VMXNET3_VERSION_GE_3(hw) ?
335 VMXNET3_DEF_RXDATA_DESC_SIZE : 0;
336 RTE_ASSERT((hw->rxdata_desc_size & ~VMXNET3_RXDATA_DESC_SIZE_MASK) ==
337 hw->rxdata_desc_size);
339 /* clear shadow stats */
340 memset(hw->saved_tx_stats, 0, sizeof(hw->saved_tx_stats));
341 memset(hw->saved_rx_stats, 0, sizeof(hw->saved_rx_stats));
343 /* clear snapshot stats */
344 memset(hw->snapshot_tx_stats, 0, sizeof(hw->snapshot_tx_stats));
345 memset(hw->snapshot_rx_stats, 0, sizeof(hw->snapshot_rx_stats));
347 /* set the initial link status */
348 memset(&link, 0, sizeof(link));
349 link.link_duplex = ETH_LINK_FULL_DUPLEX;
350 link.link_speed = ETH_SPEED_NUM_10G;
351 link.link_autoneg = ETH_LINK_FIXED;
352 rte_eth_linkstatus_set(eth_dev, &link);
358 eth_vmxnet3_dev_uninit(struct rte_eth_dev *eth_dev)
360 struct vmxnet3_hw *hw = eth_dev->data->dev_private;
362 PMD_INIT_FUNC_TRACE();
364 if (rte_eal_process_type() != RTE_PROC_PRIMARY)
367 if (hw->adapter_stopped == 0) {
368 PMD_INIT_LOG(DEBUG, "Device has not been closed.");
372 eth_dev->dev_ops = NULL;
373 eth_dev->rx_pkt_burst = NULL;
374 eth_dev->tx_pkt_burst = NULL;
375 eth_dev->tx_pkt_prepare = NULL;
380 static int eth_vmxnet3_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
381 struct rte_pci_device *pci_dev)
383 return rte_eth_dev_pci_generic_probe(pci_dev,
384 sizeof(struct vmxnet3_hw), eth_vmxnet3_dev_init);
387 static int eth_vmxnet3_pci_remove(struct rte_pci_device *pci_dev)
389 return rte_eth_dev_pci_generic_remove(pci_dev, eth_vmxnet3_dev_uninit);
392 static struct rte_pci_driver rte_vmxnet3_pmd = {
393 .id_table = pci_id_vmxnet3_map,
394 .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
395 .probe = eth_vmxnet3_pci_probe,
396 .remove = eth_vmxnet3_pci_remove,
400 vmxnet3_dev_configure(struct rte_eth_dev *dev)
402 const struct rte_memzone *mz;
403 struct vmxnet3_hw *hw = dev->data->dev_private;
406 PMD_INIT_FUNC_TRACE();
408 if (dev->data->nb_tx_queues > VMXNET3_MAX_TX_QUEUES ||
409 dev->data->nb_rx_queues > VMXNET3_MAX_RX_QUEUES) {
410 PMD_INIT_LOG(ERR, "ERROR: Number of queues not supported");
414 if (!rte_is_power_of_2(dev->data->nb_rx_queues)) {
415 PMD_INIT_LOG(ERR, "ERROR: Number of rx queues not power of 2");
419 size = dev->data->nb_rx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
420 dev->data->nb_tx_queues * sizeof(struct Vmxnet3_RxQueueDesc);
422 if (size > UINT16_MAX)
425 hw->num_rx_queues = (uint8_t)dev->data->nb_rx_queues;
426 hw->num_tx_queues = (uint8_t)dev->data->nb_tx_queues;
429 * Allocate a memzone for Vmxnet3_DriverShared - Vmxnet3_DSDevRead
432 mz = gpa_zone_reserve(dev, sizeof(struct Vmxnet3_DriverShared),
433 "shared", rte_socket_id(), 8, 1);
436 PMD_INIT_LOG(ERR, "ERROR: Creating shared zone");
439 memset(mz->addr, 0, mz->len);
441 hw->shared = mz->addr;
442 hw->sharedPA = mz->iova;
445 * Allocate a memzone for Vmxnet3_RxQueueDesc - Vmxnet3_TxQueueDesc
448 * We cannot reuse this memzone from previous allocation as its size
449 * depends on the number of tx and rx queues, which could be different
450 * from one config to another.
452 mz = gpa_zone_reserve(dev, size, "queuedesc", rte_socket_id(),
453 VMXNET3_QUEUE_DESC_ALIGN, 0);
455 PMD_INIT_LOG(ERR, "ERROR: Creating queue descriptors zone");
458 memset(mz->addr, 0, mz->len);
460 hw->tqd_start = (Vmxnet3_TxQueueDesc *)mz->addr;
461 hw->rqd_start = (Vmxnet3_RxQueueDesc *)(hw->tqd_start + hw->num_tx_queues);
463 hw->queueDescPA = mz->iova;
464 hw->queue_desc_len = (uint16_t)size;
466 if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
467 /* Allocate memory structure for UPT1_RSSConf and configure */
468 mz = gpa_zone_reserve(dev, sizeof(struct VMXNET3_RSSConf),
469 "rss_conf", rte_socket_id(),
470 RTE_CACHE_LINE_SIZE, 1);
473 "ERROR: Creating rss_conf structure zone");
476 memset(mz->addr, 0, mz->len);
478 hw->rss_conf = mz->addr;
479 hw->rss_confPA = mz->iova;
486 vmxnet3_write_mac(struct vmxnet3_hw *hw, const uint8_t *addr)
491 "Writing MAC Address : %02x:%02x:%02x:%02x:%02x:%02x",
492 addr[0], addr[1], addr[2],
493 addr[3], addr[4], addr[5]);
495 memcpy(&val, addr, 4);
496 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACL, val);
498 memcpy(&val, addr + 4, 2);
499 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_MACH, val);
503 vmxnet3_dev_setup_memreg(struct rte_eth_dev *dev)
505 struct vmxnet3_hw *hw = dev->data->dev_private;
506 Vmxnet3_DriverShared *shared = hw->shared;
507 Vmxnet3_CmdInfo *cmdInfo;
508 struct rte_mempool *mp[VMXNET3_MAX_RX_QUEUES];
509 uint8_t index[VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES];
510 uint32_t num, i, j, size;
512 if (hw->memRegsPA == 0) {
513 const struct rte_memzone *mz;
515 size = sizeof(Vmxnet3_MemRegs) +
516 (VMXNET3_MAX_RX_QUEUES + VMXNET3_MAX_TX_QUEUES) *
517 sizeof(Vmxnet3_MemoryRegion);
519 mz = gpa_zone_reserve(dev, size, "memRegs", rte_socket_id(), 8,
522 PMD_INIT_LOG(ERR, "ERROR: Creating memRegs zone");
525 memset(mz->addr, 0, mz->len);
526 hw->memRegs = mz->addr;
527 hw->memRegsPA = mz->iova;
530 num = hw->num_rx_queues;
532 for (i = 0; i < num; i++) {
533 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
540 * The same mempool could be used by multiple queues. In such a case,
541 * remove duplicate mempool entries. Only one entry is kept with
542 * bitmask indicating queues that are using this mempool.
544 for (i = 1; i < num; i++) {
545 for (j = 0; j < i; j++) {
546 if (mp[i] == mp[j]) {
555 for (i = 0; i < num; i++) {
559 Vmxnet3_MemoryRegion *mr = &hw->memRegs->memRegs[j];
562 (uintptr_t)STAILQ_FIRST(&mp[i]->mem_list)->iova;
563 mr->length = STAILQ_FIRST(&mp[i]->mem_list)->len <= INT32_MAX ?
564 STAILQ_FIRST(&mp[i]->mem_list)->len : INT32_MAX;
565 mr->txQueueBits = index[i];
566 mr->rxQueueBits = index[i];
569 "index: %u startPA: %" PRIu64 " length: %u, "
571 j, mr->startPA, mr->length, mr->rxQueueBits);
574 hw->memRegs->numRegs = j;
575 PMD_INIT_LOG(INFO, "numRegs: %u", j);
577 size = sizeof(Vmxnet3_MemRegs) +
578 (j - 1) * sizeof(Vmxnet3_MemoryRegion);
580 cmdInfo = &shared->cu.cmdInfo;
581 cmdInfo->varConf.confVer = 1;
582 cmdInfo->varConf.confLen = size;
583 cmdInfo->varConf.confPA = hw->memRegsPA;
589 vmxnet3_setup_driver_shared(struct rte_eth_dev *dev)
591 struct rte_eth_conf port_conf = dev->data->dev_conf;
592 struct vmxnet3_hw *hw = dev->data->dev_private;
593 uint32_t mtu = dev->data->mtu;
594 Vmxnet3_DriverShared *shared = hw->shared;
595 Vmxnet3_DSDevRead *devRead = &shared->devRead;
596 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
602 shared->magic = VMXNET3_REV1_MAGIC;
603 devRead->misc.driverInfo.version = VMXNET3_DRIVER_VERSION_NUM;
605 /* Setting up Guest OS information */
606 devRead->misc.driverInfo.gos.gosBits = sizeof(void *) == 4 ?
607 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64;
608 devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
609 devRead->misc.driverInfo.vmxnet3RevSpt = 1;
610 devRead->misc.driverInfo.uptVerSpt = 1;
612 devRead->misc.mtu = rte_le_to_cpu_32(mtu);
613 devRead->misc.queueDescPA = hw->queueDescPA;
614 devRead->misc.queueDescLen = hw->queue_desc_len;
615 devRead->misc.numTxQueues = hw->num_tx_queues;
616 devRead->misc.numRxQueues = hw->num_rx_queues;
619 * Set number of interrupts to 1
620 * PMD by default disables all the interrupts but this is MUST
621 * to activate device. It needs at least one interrupt for
622 * link events to handle
624 hw->num_intrs = devRead->intrConf.numIntrs = 1;
625 devRead->intrConf.intrCtrl |= VMXNET3_IC_DISABLE_ALL;
627 for (i = 0; i < hw->num_tx_queues; i++) {
628 Vmxnet3_TxQueueDesc *tqd = &hw->tqd_start[i];
629 vmxnet3_tx_queue_t *txq = dev->data->tx_queues[i];
631 txq->shared = &hw->tqd_start[i];
633 tqd->ctrl.txNumDeferred = 0;
634 tqd->ctrl.txThreshold = 1;
635 tqd->conf.txRingBasePA = txq->cmd_ring.basePA;
636 tqd->conf.compRingBasePA = txq->comp_ring.basePA;
637 tqd->conf.dataRingBasePA = txq->data_ring.basePA;
639 tqd->conf.txRingSize = txq->cmd_ring.size;
640 tqd->conf.compRingSize = txq->comp_ring.size;
641 tqd->conf.dataRingSize = txq->data_ring.size;
642 tqd->conf.txDataRingDescSize = txq->txdata_desc_size;
643 tqd->conf.intrIdx = txq->comp_ring.intr_idx;
644 tqd->status.stopped = TRUE;
645 tqd->status.error = 0;
646 memset(&tqd->stats, 0, sizeof(tqd->stats));
649 for (i = 0; i < hw->num_rx_queues; i++) {
650 Vmxnet3_RxQueueDesc *rqd = &hw->rqd_start[i];
651 vmxnet3_rx_queue_t *rxq = dev->data->rx_queues[i];
653 rxq->shared = &hw->rqd_start[i];
655 rqd->conf.rxRingBasePA[0] = rxq->cmd_ring[0].basePA;
656 rqd->conf.rxRingBasePA[1] = rxq->cmd_ring[1].basePA;
657 rqd->conf.compRingBasePA = rxq->comp_ring.basePA;
659 rqd->conf.rxRingSize[0] = rxq->cmd_ring[0].size;
660 rqd->conf.rxRingSize[1] = rxq->cmd_ring[1].size;
661 rqd->conf.compRingSize = rxq->comp_ring.size;
662 rqd->conf.intrIdx = rxq->comp_ring.intr_idx;
663 if (VMXNET3_VERSION_GE_3(hw)) {
664 rqd->conf.rxDataRingBasePA = rxq->data_ring.basePA;
665 rqd->conf.rxDataRingDescSize = rxq->data_desc_size;
667 rqd->status.stopped = TRUE;
668 rqd->status.error = 0;
669 memset(&rqd->stats, 0, sizeof(rqd->stats));
672 /* RxMode set to 0 of VMXNET3_RXM_xxx */
673 devRead->rxFilterConf.rxMode = 0;
675 /* Setting up feature flags */
676 if (rx_offloads & DEV_RX_OFFLOAD_CHECKSUM)
677 devRead->misc.uptFeatures |= VMXNET3_F_RXCSUM;
679 if (rx_offloads & DEV_RX_OFFLOAD_TCP_LRO) {
680 devRead->misc.uptFeatures |= VMXNET3_F_LRO;
681 devRead->misc.maxNumRxSG = 0;
684 if (port_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) {
685 ret = vmxnet3_rss_configure(dev);
686 if (ret != VMXNET3_SUCCESS)
689 devRead->misc.uptFeatures |= VMXNET3_F_RSS;
690 devRead->rssConfDesc.confVer = 1;
691 devRead->rssConfDesc.confLen = sizeof(struct VMXNET3_RSSConf);
692 devRead->rssConfDesc.confPA = hw->rss_confPA;
695 ret = vmxnet3_dev_vlan_offload_set(dev,
696 ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK);
700 vmxnet3_write_mac(hw, dev->data->mac_addrs->addr_bytes);
702 return VMXNET3_SUCCESS;
706 * Configure device link speed and setup link.
707 * Must be called after eth_vmxnet3_dev_init. Other wise it might fail
708 * It returns 0 on success.
711 vmxnet3_dev_start(struct rte_eth_dev *dev)
714 struct vmxnet3_hw *hw = dev->data->dev_private;
716 PMD_INIT_FUNC_TRACE();
718 /* Save stats before it is reset by CMD_ACTIVATE */
719 vmxnet3_hw_stats_save(hw);
721 ret = vmxnet3_setup_driver_shared(dev);
722 if (ret != VMXNET3_SUCCESS)
725 /* check if lsc interrupt feature is enabled */
726 if (dev->data->dev_conf.intr_conf.lsc) {
727 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
729 /* Setup interrupt callback */
730 rte_intr_callback_register(&pci_dev->intr_handle,
731 vmxnet3_interrupt_handler, dev);
733 if (rte_intr_enable(&pci_dev->intr_handle) < 0) {
734 PMD_INIT_LOG(ERR, "interrupt enable failed");
739 /* Exchange shared data with device */
740 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL,
741 VMXNET3_GET_ADDR_LO(hw->sharedPA));
742 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH,
743 VMXNET3_GET_ADDR_HI(hw->sharedPA));
745 /* Activate device by register write */
746 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
747 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
750 PMD_INIT_LOG(ERR, "Device activation: UNSUCCESSFUL");
754 /* Setup memory region for rx buffers */
755 ret = vmxnet3_dev_setup_memreg(dev);
757 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
758 VMXNET3_CMD_REGISTER_MEMREGS);
759 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
762 "Failed in setup memory region cmd\n");
765 PMD_INIT_LOG(DEBUG, "Failed to setup memory region\n");
768 /* Disable interrupts */
769 vmxnet3_disable_intr(hw);
772 * Load RX queues with blank mbufs and update next2fill index for device
773 * Update RxMode of the device
775 ret = vmxnet3_dev_rxtx_init(dev);
776 if (ret != VMXNET3_SUCCESS) {
777 PMD_INIT_LOG(ERR, "Device queue init: UNSUCCESSFUL");
781 hw->adapter_stopped = FALSE;
783 /* Setting proper Rx Mode and issue Rx Mode Update command */
784 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_UCAST | VMXNET3_RXM_BCAST, 1);
786 if (dev->data->dev_conf.intr_conf.lsc) {
787 vmxnet3_enable_intr(hw);
790 * Update link state from device since this won't be
791 * done upon starting with lsc in use. This is done
792 * only after enabling interrupts to avoid any race
793 * where the link state could change without an
794 * interrupt being fired.
796 __vmxnet3_dev_link_update(dev, 0);
799 return VMXNET3_SUCCESS;
803 * Stop device: disable rx and tx functions to allow for reconfiguring.
806 vmxnet3_dev_stop(struct rte_eth_dev *dev)
808 struct rte_eth_link link;
809 struct vmxnet3_hw *hw = dev->data->dev_private;
811 PMD_INIT_FUNC_TRACE();
813 if (hw->adapter_stopped == 1) {
814 PMD_INIT_LOG(DEBUG, "Device already stopped.");
818 /* disable interrupts */
819 vmxnet3_disable_intr(hw);
821 if (dev->data->dev_conf.intr_conf.lsc) {
822 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
824 rte_intr_disable(&pci_dev->intr_handle);
826 rte_intr_callback_unregister(&pci_dev->intr_handle,
827 vmxnet3_interrupt_handler, dev);
830 /* quiesce the device first */
831 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
832 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAL, 0);
833 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_DSAH, 0);
835 /* reset the device */
836 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
837 PMD_INIT_LOG(DEBUG, "Device reset.");
839 vmxnet3_dev_clear_queues(dev);
841 /* Clear recorded link status */
842 memset(&link, 0, sizeof(link));
843 link.link_duplex = ETH_LINK_FULL_DUPLEX;
844 link.link_speed = ETH_SPEED_NUM_10G;
845 link.link_autoneg = ETH_LINK_FIXED;
846 rte_eth_linkstatus_set(dev, &link);
848 hw->adapter_stopped = 1;
852 vmxnet3_free_queues(struct rte_eth_dev *dev)
856 PMD_INIT_FUNC_TRACE();
858 for (i = 0; i < dev->data->nb_rx_queues; i++) {
859 void *rxq = dev->data->rx_queues[i];
861 vmxnet3_dev_rx_queue_release(rxq);
863 dev->data->nb_rx_queues = 0;
865 for (i = 0; i < dev->data->nb_tx_queues; i++) {
866 void *txq = dev->data->tx_queues[i];
868 vmxnet3_dev_tx_queue_release(txq);
870 dev->data->nb_tx_queues = 0;
874 * Reset and stop device.
877 vmxnet3_dev_close(struct rte_eth_dev *dev)
879 PMD_INIT_FUNC_TRACE();
881 vmxnet3_dev_stop(dev);
882 vmxnet3_free_queues(dev);
886 vmxnet3_hw_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
887 struct UPT1_TxStats *res)
889 #define VMXNET3_UPDATE_TX_STAT(h, i, f, r) \
890 ((r)->f = (h)->tqd_start[(i)].stats.f + \
891 (h)->saved_tx_stats[(i)].f)
893 VMXNET3_UPDATE_TX_STAT(hw, q, ucastPktsTxOK, res);
894 VMXNET3_UPDATE_TX_STAT(hw, q, mcastPktsTxOK, res);
895 VMXNET3_UPDATE_TX_STAT(hw, q, bcastPktsTxOK, res);
896 VMXNET3_UPDATE_TX_STAT(hw, q, ucastBytesTxOK, res);
897 VMXNET3_UPDATE_TX_STAT(hw, q, mcastBytesTxOK, res);
898 VMXNET3_UPDATE_TX_STAT(hw, q, bcastBytesTxOK, res);
899 VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxError, res);
900 VMXNET3_UPDATE_TX_STAT(hw, q, pktsTxDiscard, res);
902 #undef VMXNET3_UPDATE_TX_STAT
906 vmxnet3_hw_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
907 struct UPT1_RxStats *res)
909 #define VMXNET3_UPDATE_RX_STAT(h, i, f, r) \
910 ((r)->f = (h)->rqd_start[(i)].stats.f + \
911 (h)->saved_rx_stats[(i)].f)
913 VMXNET3_UPDATE_RX_STAT(hw, q, ucastPktsRxOK, res);
914 VMXNET3_UPDATE_RX_STAT(hw, q, mcastPktsRxOK, res);
915 VMXNET3_UPDATE_RX_STAT(hw, q, bcastPktsRxOK, res);
916 VMXNET3_UPDATE_RX_STAT(hw, q, ucastBytesRxOK, res);
917 VMXNET3_UPDATE_RX_STAT(hw, q, mcastBytesRxOK, res);
918 VMXNET3_UPDATE_RX_STAT(hw, q, bcastBytesRxOK, res);
919 VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxError, res);
920 VMXNET3_UPDATE_RX_STAT(hw, q, pktsRxOutOfBuf, res);
922 #undef VMXNET3_UPDATE_RX_STAT
926 vmxnet3_tx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
927 struct UPT1_TxStats *res)
929 vmxnet3_hw_tx_stats_get(hw, q, res);
931 #define VMXNET3_REDUCE_SNAPSHOT_TX_STAT(h, i, f, r) \
932 ((r)->f -= (h)->snapshot_tx_stats[(i)].f)
934 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastPktsTxOK, res);
935 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastPktsTxOK, res);
936 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastPktsTxOK, res);
937 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, ucastBytesTxOK, res);
938 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, mcastBytesTxOK, res);
939 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, bcastBytesTxOK, res);
940 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxError, res);
941 VMXNET3_REDUCE_SNAPSHOT_TX_STAT(hw, q, pktsTxDiscard, res);
943 #undef VMXNET3_REDUCE_SNAPSHOT_TX_STAT
947 vmxnet3_rx_stats_get(struct vmxnet3_hw *hw, unsigned int q,
948 struct UPT1_RxStats *res)
950 vmxnet3_hw_rx_stats_get(hw, q, res);
952 #define VMXNET3_REDUCE_SNAPSHOT_RX_STAT(h, i, f, r) \
953 ((r)->f -= (h)->snapshot_rx_stats[(i)].f)
955 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastPktsRxOK, res);
956 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastPktsRxOK, res);
957 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastPktsRxOK, res);
958 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, ucastBytesRxOK, res);
959 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, mcastBytesRxOK, res);
960 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, bcastBytesRxOK, res);
961 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxError, res);
962 VMXNET3_REDUCE_SNAPSHOT_RX_STAT(hw, q, pktsRxOutOfBuf, res);
964 #undef VMXNET3_REDUCE_SNAPSHOT_RX_STAT
968 vmxnet3_hw_stats_save(struct vmxnet3_hw *hw)
972 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
974 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
976 for (i = 0; i < hw->num_tx_queues; i++)
977 vmxnet3_hw_tx_stats_get(hw, i, &hw->saved_tx_stats[i]);
978 for (i = 0; i < hw->num_rx_queues; i++)
979 vmxnet3_hw_rx_stats_get(hw, i, &hw->saved_rx_stats[i]);
983 vmxnet3_dev_xstats_get_names(struct rte_eth_dev *dev,
984 struct rte_eth_xstat_name *xstats_names,
987 unsigned int i, t, count = 0;
988 unsigned int nstats =
989 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
990 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
992 if (!xstats_names || n < nstats)
995 for (i = 0; i < dev->data->nb_rx_queues; i++) {
996 if (!dev->data->rx_queues[i])
999 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
1000 snprintf(xstats_names[count].name,
1001 sizeof(xstats_names[count].name),
1003 vmxnet3_rxq_stat_strings[t].name);
1008 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1009 if (!dev->data->tx_queues[i])
1012 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
1013 snprintf(xstats_names[count].name,
1014 sizeof(xstats_names[count].name),
1016 vmxnet3_txq_stat_strings[t].name);
1025 vmxnet3_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
1028 unsigned int i, t, count = 0;
1029 unsigned int nstats =
1030 dev->data->nb_tx_queues * RTE_DIM(vmxnet3_txq_stat_strings) +
1031 dev->data->nb_rx_queues * RTE_DIM(vmxnet3_rxq_stat_strings);
1036 for (i = 0; i < dev->data->nb_rx_queues; i++) {
1037 struct vmxnet3_rx_queue *rxq = dev->data->rx_queues[i];
1042 for (t = 0; t < RTE_DIM(vmxnet3_rxq_stat_strings); t++) {
1043 xstats[count].value = *(uint64_t *)(((char *)&rxq->stats) +
1044 vmxnet3_rxq_stat_strings[t].offset);
1045 xstats[count].id = count;
1050 for (i = 0; i < dev->data->nb_tx_queues; i++) {
1051 struct vmxnet3_tx_queue *txq = dev->data->tx_queues[i];
1056 for (t = 0; t < RTE_DIM(vmxnet3_txq_stat_strings); t++) {
1057 xstats[count].value = *(uint64_t *)(((char *)&txq->stats) +
1058 vmxnet3_txq_stat_strings[t].offset);
1059 xstats[count].id = count;
1068 vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
1071 struct vmxnet3_hw *hw = dev->data->dev_private;
1072 struct UPT1_TxStats txStats;
1073 struct UPT1_RxStats rxStats;
1075 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1077 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1078 for (i = 0; i < hw->num_tx_queues; i++) {
1079 vmxnet3_tx_stats_get(hw, i, &txStats);
1081 stats->q_opackets[i] = txStats.ucastPktsTxOK +
1082 txStats.mcastPktsTxOK +
1083 txStats.bcastPktsTxOK;
1085 stats->q_obytes[i] = txStats.ucastBytesTxOK +
1086 txStats.mcastBytesTxOK +
1087 txStats.bcastBytesTxOK;
1089 stats->opackets += stats->q_opackets[i];
1090 stats->obytes += stats->q_obytes[i];
1091 stats->oerrors += txStats.pktsTxError + txStats.pktsTxDiscard;
1094 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_RX_QUEUES);
1095 for (i = 0; i < hw->num_rx_queues; i++) {
1096 vmxnet3_rx_stats_get(hw, i, &rxStats);
1098 stats->q_ipackets[i] = rxStats.ucastPktsRxOK +
1099 rxStats.mcastPktsRxOK +
1100 rxStats.bcastPktsRxOK;
1102 stats->q_ibytes[i] = rxStats.ucastBytesRxOK +
1103 rxStats.mcastBytesRxOK +
1104 rxStats.bcastBytesRxOK;
1106 stats->ipackets += stats->q_ipackets[i];
1107 stats->ibytes += stats->q_ibytes[i];
1109 stats->q_errors[i] = rxStats.pktsRxError;
1110 stats->ierrors += rxStats.pktsRxError;
1111 stats->imissed += rxStats.pktsRxOutOfBuf;
1118 vmxnet3_dev_stats_reset(struct rte_eth_dev *dev)
1121 struct vmxnet3_hw *hw = dev->data->dev_private;
1122 struct UPT1_TxStats txStats;
1123 struct UPT1_RxStats rxStats;
1125 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
1127 RTE_BUILD_BUG_ON(RTE_ETHDEV_QUEUE_STAT_CNTRS < VMXNET3_MAX_TX_QUEUES);
1129 for (i = 0; i < hw->num_tx_queues; i++) {
1130 vmxnet3_hw_tx_stats_get(hw, i, &txStats);
1131 memcpy(&hw->snapshot_tx_stats[i], &txStats,
1132 sizeof(hw->snapshot_tx_stats[0]));
1134 for (i = 0; i < hw->num_rx_queues; i++) {
1135 vmxnet3_hw_rx_stats_get(hw, i, &rxStats);
1136 memcpy(&hw->snapshot_rx_stats[i], &rxStats,
1137 sizeof(hw->snapshot_rx_stats[0]));
1142 vmxnet3_dev_info_get(struct rte_eth_dev *dev __rte_unused,
1143 struct rte_eth_dev_info *dev_info)
1145 dev_info->max_rx_queues = VMXNET3_MAX_RX_QUEUES;
1146 dev_info->max_tx_queues = VMXNET3_MAX_TX_QUEUES;
1147 dev_info->min_rx_bufsize = 1518 + RTE_PKTMBUF_HEADROOM;
1148 dev_info->max_rx_pktlen = 16384; /* includes CRC, cf MAXFRS register */
1149 dev_info->speed_capa = ETH_LINK_SPEED_10G;
1150 dev_info->max_mac_addrs = VMXNET3_MAX_MAC_ADDRS;
1152 dev_info->flow_type_rss_offloads = VMXNET3_RSS_OFFLOAD_ALL;
1154 dev_info->rx_desc_lim = (struct rte_eth_desc_lim) {
1155 .nb_max = VMXNET3_RX_RING_MAX_SIZE,
1156 .nb_min = VMXNET3_DEF_RX_RING_SIZE,
1160 dev_info->tx_desc_lim = (struct rte_eth_desc_lim) {
1161 .nb_max = VMXNET3_TX_RING_MAX_SIZE,
1162 .nb_min = VMXNET3_DEF_TX_RING_SIZE,
1164 .nb_seg_max = VMXNET3_TX_MAX_SEG,
1165 .nb_mtu_seg_max = VMXNET3_MAX_TXD_PER_PKT,
1168 dev_info->rx_offload_capa = VMXNET3_RX_OFFLOAD_CAP;
1169 dev_info->rx_queue_offload_capa = 0;
1170 dev_info->tx_offload_capa = VMXNET3_TX_OFFLOAD_CAP;
1171 dev_info->tx_queue_offload_capa = 0;
1174 static const uint32_t *
1175 vmxnet3_dev_supported_ptypes_get(struct rte_eth_dev *dev)
1177 static const uint32_t ptypes[] = {
1178 RTE_PTYPE_L3_IPV4_EXT,
1183 if (dev->rx_pkt_burst == vmxnet3_recv_pkts)
1189 vmxnet3_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
1191 struct vmxnet3_hw *hw = dev->data->dev_private;
1193 ether_addr_copy(mac_addr, (struct ether_addr *)(hw->perm_addr));
1194 vmxnet3_write_mac(hw, mac_addr->addr_bytes);
1198 /* return 0 means link status changed, -1 means not changed */
1200 __vmxnet3_dev_link_update(struct rte_eth_dev *dev,
1201 __rte_unused int wait_to_complete)
1203 struct vmxnet3_hw *hw = dev->data->dev_private;
1204 struct rte_eth_link link;
1207 memset(&link, 0, sizeof(link));
1209 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
1210 ret = VMXNET3_READ_BAR1_REG(hw, VMXNET3_REG_CMD);
1213 link.link_status = ETH_LINK_UP;
1214 link.link_duplex = ETH_LINK_FULL_DUPLEX;
1215 link.link_speed = ETH_SPEED_NUM_10G;
1216 link.link_autoneg = ETH_LINK_FIXED;
1218 return rte_eth_linkstatus_set(dev, &link);
1222 vmxnet3_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
1224 /* Link status doesn't change for stopped dev */
1225 if (dev->data->dev_started == 0)
1228 return __vmxnet3_dev_link_update(dev, wait_to_complete);
1231 /* Updating rxmode through Vmxnet3_DriverShared structure in adapter */
1233 vmxnet3_dev_set_rxmode(struct vmxnet3_hw *hw, uint32_t feature, int set)
1235 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1238 rxConf->rxMode = rxConf->rxMode | feature;
1240 rxConf->rxMode = rxConf->rxMode & (~feature);
1242 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD, VMXNET3_CMD_UPDATE_RX_MODE);
1245 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1247 vmxnet3_dev_promiscuous_enable(struct rte_eth_dev *dev)
1249 struct vmxnet3_hw *hw = dev->data->dev_private;
1250 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1252 memset(vf_table, 0, VMXNET3_VFT_TABLE_SIZE);
1253 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 1);
1255 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1256 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1259 /* Promiscuous supported only if Vmxnet3_DriverShared is initialized in adapter */
1261 vmxnet3_dev_promiscuous_disable(struct rte_eth_dev *dev)
1263 struct vmxnet3_hw *hw = dev->data->dev_private;
1264 uint32_t *vf_table = hw->shared->devRead.rxFilterConf.vfTable;
1265 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1267 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1268 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1270 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1271 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_PROMISC, 0);
1272 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1273 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1276 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1278 vmxnet3_dev_allmulticast_enable(struct rte_eth_dev *dev)
1280 struct vmxnet3_hw *hw = dev->data->dev_private;
1282 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 1);
1285 /* Allmulticast supported only if Vmxnet3_DriverShared is initialized in adapter */
1287 vmxnet3_dev_allmulticast_disable(struct rte_eth_dev *dev)
1289 struct vmxnet3_hw *hw = dev->data->dev_private;
1291 vmxnet3_dev_set_rxmode(hw, VMXNET3_RXM_ALL_MULTI, 0);
1294 /* Enable/disable filter on vlan */
1296 vmxnet3_dev_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vid, int on)
1298 struct vmxnet3_hw *hw = dev->data->dev_private;
1299 struct Vmxnet3_RxFilterConf *rxConf = &hw->shared->devRead.rxFilterConf;
1300 uint32_t *vf_table = rxConf->vfTable;
1302 /* save state for restore */
1304 VMXNET3_SET_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1306 VMXNET3_CLEAR_VFTABLE_ENTRY(hw->shadow_vfta, vid);
1308 /* don't change active filter if in promiscuous mode */
1309 if (rxConf->rxMode & VMXNET3_RXM_PROMISC)
1312 /* set in hardware */
1314 VMXNET3_SET_VFTABLE_ENTRY(vf_table, vid);
1316 VMXNET3_CLEAR_VFTABLE_ENTRY(vf_table, vid);
1318 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1319 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1324 vmxnet3_dev_vlan_offload_set(struct rte_eth_dev *dev, int mask)
1326 struct vmxnet3_hw *hw = dev->data->dev_private;
1327 Vmxnet3_DSDevRead *devRead = &hw->shared->devRead;
1328 uint32_t *vf_table = devRead->rxFilterConf.vfTable;
1329 uint64_t rx_offloads = dev->data->dev_conf.rxmode.offloads;
1331 if (mask & ETH_VLAN_STRIP_MASK) {
1332 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
1333 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
1335 devRead->misc.uptFeatures &= ~UPT1_F_RXVLAN;
1337 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1338 VMXNET3_CMD_UPDATE_FEATURE);
1341 if (mask & ETH_VLAN_FILTER_MASK) {
1342 if (rx_offloads & DEV_RX_OFFLOAD_VLAN_FILTER)
1343 memcpy(vf_table, hw->shadow_vfta, VMXNET3_VFT_TABLE_SIZE);
1345 memset(vf_table, 0xff, VMXNET3_VFT_TABLE_SIZE);
1347 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1348 VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1355 vmxnet3_process_events(struct rte_eth_dev *dev)
1357 struct vmxnet3_hw *hw = dev->data->dev_private;
1358 uint32_t events = hw->shared->ecr;
1364 * ECR bits when written with 1b are cleared. Hence write
1365 * events back to ECR so that the bits which were set will be reset.
1367 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_ECR, events);
1369 /* Check if link state has changed */
1370 if (events & VMXNET3_ECR_LINK) {
1371 PMD_DRV_LOG(DEBUG, "Process events: VMXNET3_ECR_LINK event");
1372 if (vmxnet3_dev_link_update(dev, 0) == 0)
1373 _rte_eth_dev_callback_process(dev,
1374 RTE_ETH_EVENT_INTR_LSC,
1378 /* Check if there is an error on xmit/recv queues */
1379 if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
1380 VMXNET3_WRITE_BAR1_REG(hw, VMXNET3_REG_CMD,
1381 VMXNET3_CMD_GET_QUEUE_STATUS);
1383 if (hw->tqd_start->status.stopped)
1384 PMD_DRV_LOG(ERR, "tq error 0x%x",
1385 hw->tqd_start->status.error);
1387 if (hw->rqd_start->status.stopped)
1388 PMD_DRV_LOG(ERR, "rq error 0x%x",
1389 hw->rqd_start->status.error);
1391 /* Reset the device */
1392 /* Have to reset the device */
1395 if (events & VMXNET3_ECR_DIC)
1396 PMD_DRV_LOG(DEBUG, "Device implementation change event.");
1398 if (events & VMXNET3_ECR_DEBUG)
1399 PMD_DRV_LOG(DEBUG, "Debug event generated by device.");
1403 vmxnet3_interrupt_handler(void *param)
1405 struct rte_eth_dev *dev = param;
1406 struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1408 vmxnet3_process_events(dev);
1410 if (rte_intr_enable(&pci_dev->intr_handle) < 0)
1411 PMD_DRV_LOG(ERR, "interrupt enable failed");
1414 RTE_PMD_REGISTER_PCI(net_vmxnet3, rte_vmxnet3_pmd);
1415 RTE_PMD_REGISTER_PCI_TABLE(net_vmxnet3, pci_id_vmxnet3_map);
1416 RTE_PMD_REGISTER_KMOD_DEP(net_vmxnet3, "* igb_uio | uio_pci_generic | vfio-pci");
1418 RTE_INIT(vmxnet3_init_log)
1420 vmxnet3_logtype_init = rte_log_register("pmd.net.vmxnet3.init");
1421 if (vmxnet3_logtype_init >= 0)
1422 rte_log_set_level(vmxnet3_logtype_init, RTE_LOG_NOTICE);
1423 vmxnet3_logtype_driver = rte_log_register("pmd.net.vmxnet3.driver");
1424 if (vmxnet3_logtype_driver >= 0)
1425 rte_log_set_level(vmxnet3_logtype_driver, RTE_LOG_NOTICE);