2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/types.h>
17 #include <vlib/vlib.h>
18 #include <vlib/pci/pci.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/plugin/plugin.h>
21 #include <vpp/app/version.h>
23 #include <vmxnet3/vmxnet3.h>
25 #define PCI_VENDOR_ID_VMWARE 0x15ad
26 #define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07b0
28 vmxnet3_main_t vmxnet3_main;
30 static pci_device_id_t vmxnet3_pci_device_ids[] = {
32 .vendor_id = PCI_VENDOR_ID_VMWARE,
33 .device_id = PCI_DEVICE_ID_VMWARE_VMXNET3},
38 vmxnet3_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
41 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
42 vmxnet3_main_t *vmxm = &vmxnet3_main;
43 vmxnet3_device_t *vd = vec_elt_at_index (vmxm->devices, hi->dev_instance);
44 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
46 if (vd->flags & VMXNET3_DEVICE_F_ERROR)
47 return clib_error_return (0, "device is in error state");
51 vnet_hw_interface_set_flags (vnm, vd->hw_if_index,
52 VNET_HW_INTERFACE_FLAG_LINK_UP);
53 vd->flags |= VMXNET3_DEVICE_F_ADMIN_UP;
57 vnet_hw_interface_set_flags (vnm, vd->hw_if_index, 0);
58 vd->flags &= ~VMXNET3_DEVICE_F_ADMIN_UP;
64 vmxnet3_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
65 vnet_hw_interface_rx_mode mode)
67 vmxnet3_main_t *vmxm = &vmxnet3_main;
68 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
69 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, hw->dev_instance);
70 vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
72 if (mode == VNET_HW_INTERFACE_RX_MODE_POLLING)
81 vmxnet3_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
84 vmxnet3_main_t *vmxm = &vmxnet3_main;
85 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
86 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, hw->dev_instance);
88 /* Shut off redirection */
91 vd->per_interface_next_index = node_index;
95 vd->per_interface_next_index =
96 vlib_node_add_next (vlib_get_main (), vmxnet3_input_node.index,
101 vmxnet3_clear_hw_interface_counters (u32 instance)
103 vmxnet3_main_t *vmxm = &vmxnet3_main;
104 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, instance);
105 vmxnet3_tx_queue *tx = VMXNET3_TX_START (vd);
106 vmxnet3_rx_queue *rx = VMXNET3_RX_START (vd);
110 * Set the "last_cleared_stats" to the current stats, so that
111 * things appear to clear from a display perspective.
113 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
115 vec_foreach_index (qid, vd->txqs)
117 vmxnet3_tx_stats *txs = vec_elt_at_index (vd->tx_stats, qid);
118 clib_memcpy (txs, &tx->stats, sizeof (*txs));
121 vec_foreach_index (qid, vd->rxqs)
123 vmxnet3_rx_stats *rxs = vec_elt_at_index (vd->rx_stats, qid);
124 clib_memcpy (rxs, &rx->stats, sizeof (*rxs));
129 static char *vmxnet3_tx_func_error_strings[] = {
131 foreach_vmxnet3_tx_func_error
136 VNET_DEVICE_CLASS (vmxnet3_device_class,) =
138 .name = "VMXNET3 interface",
139 .format_device = format_vmxnet3_device,
140 .format_device_name = format_vmxnet3_device_name,
141 .admin_up_down_function = vmxnet3_interface_admin_up_down,
142 .clear_counters = vmxnet3_clear_hw_interface_counters,
143 .rx_mode_change_function = vmxnet3_interface_rx_mode_change,
144 .rx_redirect_to_node = vmxnet3_set_interface_next_node,
145 .tx_function_n_errors = VMXNET3_TX_N_ERROR,
146 .tx_function_error_strings = vmxnet3_tx_func_error_strings,
151 vmxnet3_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
157 vmxnet3_write_mac (vmxnet3_device_t * vd)
161 memcpy (&val, vd->mac_addr, 4);
162 vmxnet3_reg_write (vd, 1, VMXNET3_REG_MACL, val);
165 memcpy (&val, vd->mac_addr + 4, 2);
166 vmxnet3_reg_write (vd, 1, VMXNET3_REG_MACH, val);
169 static clib_error_t *
170 vmxnet3_provision_driver_shared (vlib_main_t * vm, vmxnet3_device_t * vd)
172 vmxnet3_shared *shared;
175 vmxnet3_tx_queue *tx = VMXNET3_TX_START (vd);
176 vmxnet3_rx_queue *rx = VMXNET3_RX_START (vd);
179 vlib_physmem_alloc_aligned_on_numa (vm, sizeof (*vd->driver_shared), 512,
181 if (vd->driver_shared == 0)
182 return vlib_physmem_last_error (vm);
184 clib_memset (vd->driver_shared, 0, sizeof (*vd->driver_shared));
186 vec_foreach_index (qid, vd->txqs)
188 vmxnet3_txq_t *txq = vec_elt_at_index (vd->txqs, qid);
190 tx->cfg.desc_address = vmxnet3_dma_addr (vm, vd, txq->tx_desc);
191 tx->cfg.comp_address = vmxnet3_dma_addr (vm, vd, txq->tx_comp);
192 tx->cfg.num_desc = txq->size;
193 tx->cfg.num_comp = txq->size;
197 vec_foreach_index (qid, vd->rxqs)
199 vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
201 for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
203 rx->cfg.desc_address[rid] = vmxnet3_dma_addr (vm, vd,
205 rx->cfg.num_desc[rid] = rxq->size;
207 rx->cfg.comp_address = vmxnet3_dma_addr (vm, vd, rxq->rx_comp);
208 rx->cfg.num_comp = rxq->size;
209 rx->cfg.intr_index = qid;
213 shared = vd->driver_shared;
214 shared->magic = VMXNET3_SHARED_MAGIC;
215 shared->misc.version = VMXNET3_VERSION_MAGIC;
216 if (sizeof (void *) == 4)
217 shared->misc.guest_info = VMXNET3_GOS_BITS_32;
219 shared->misc.guest_info = VMXNET3_GOS_BITS_64;
220 shared->misc.guest_info |= VMXNET3_GOS_TYPE_LINUX;
221 shared->misc.version_support = VMXNET3_VERSION_SELECT;
222 shared->misc.upt_features = VMXNET3_F_RXCSUM;
224 shared->misc.upt_features |= VMXNET3_F_LRO;
225 if (vd->num_rx_queues > 1)
227 shared->misc.upt_features |= VMXNET3_F_RSS;
228 shared->rss.version = 1;
229 shared->rss.address = vmxnet3_dma_addr (vm, vd, vd->rss);
230 shared->rss.length = sizeof (*vd->rss);
232 shared->misc.max_num_rx_sg = 0;
233 shared->misc.upt_version_support = VMXNET3_UPT_VERSION_SELECT;
234 shared->misc.queue_desc_address = vmxnet3_dma_addr (vm, vd, vd->queues);
235 shared->misc.queue_desc_len = sizeof (*tx) * vd->num_tx_queues +
236 sizeof (*rx) * vd->num_rx_queues;
237 shared->misc.mtu = VMXNET3_MTU;
238 shared->misc.num_tx_queues = vd->num_tx_queues;
239 shared->misc.num_rx_queues = vd->num_rx_queues;
240 shared->interrupt.num_intrs = vd->num_intrs;
241 shared->interrupt.event_intr_index = vd->num_rx_queues;
242 shared->interrupt.control = VMXNET3_IC_DISABLE_ALL;
243 shared->rx_filter.mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST |
244 VMXNET3_RXMODE_ALL_MULTI | VMXNET3_RXMODE_PROMISC;
245 shared_dma = vmxnet3_dma_addr (vm, vd, shared);
247 vmxnet3_reg_write (vd, 1, VMXNET3_REG_DSAL, shared_dma);
248 vmxnet3_reg_write (vd, 1, VMXNET3_REG_DSAH, shared_dma >> 32);
254 vmxnet3_enable_interrupt (vmxnet3_device_t * vd)
257 vmxnet3_shared *shared = vd->driver_shared;
259 shared->interrupt.control &= ~VMXNET3_IC_DISABLE_ALL;
260 for (i = 0; i < vd->num_intrs; i++)
261 vmxnet3_reg_write (vd, 0, VMXNET3_REG_IMR + i * 8, 0);
265 vmxnet3_disable_interrupt (vmxnet3_device_t * vd)
268 vmxnet3_shared *shared = vd->driver_shared;
270 shared->interrupt.control |= VMXNET3_IC_DISABLE_ALL;
271 for (i = 0; i < vd->num_intrs; i++)
272 vmxnet3_reg_write (vd, 0, VMXNET3_REG_IMR + i * 8, 1);
275 static clib_error_t *
276 vmxnet3_rxq_init (vlib_main_t * vm, vmxnet3_device_t * vd, u16 qid, u16 qsz)
279 vmxnet3_rx_stats *rxs;
282 vec_validate (vd->rx_stats, qid);
283 rxs = vec_elt_at_index (vd->rx_stats, qid);
284 clib_memset (rxs, 0, sizeof (*rxs));
286 vec_validate_aligned (vd->rxqs, qid, CLIB_CACHE_LINE_BYTES);
287 rxq = vec_elt_at_index (vd->rxqs, qid);
288 clib_memset (rxq, 0, sizeof (*rxq));
290 for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
292 rxq->rx_desc[rid] = vlib_physmem_alloc_aligned_on_numa
293 (vm, qsz * sizeof (*rxq->rx_desc[rid]), 512, vd->numa_node);
295 if (rxq->rx_desc[rid] == 0)
296 return vlib_physmem_last_error (vm);
298 clib_memset (rxq->rx_desc[rid], 0, qsz * sizeof (*rxq->rx_desc[rid]));
301 vlib_physmem_alloc_aligned_on_numa (vm, qsz * sizeof (*rxq->rx_comp), 512,
303 if (rxq->rx_comp == 0)
304 return vlib_physmem_last_error (vm);
306 clib_memset (rxq->rx_comp, 0, qsz * sizeof (*rxq->rx_comp));
307 for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
309 vmxnet3_rx_ring *ring;
311 ring = &rxq->rx_ring[rid];
312 ring->gen = VMXNET3_RXF_GEN;
314 vec_validate_aligned (ring->bufs, rxq->size, CLIB_CACHE_LINE_BYTES);
316 rxq->rx_comp_ring.gen = VMXNET3_RXCF_GEN;
321 static clib_error_t *
322 vmxnet3_txq_init (vlib_main_t * vm, vmxnet3_device_t * vd, u16 qid, u16 qsz)
325 vmxnet3_tx_stats *txs;
328 if (qid >= vd->num_tx_queues)
330 qid = qid % vd->num_tx_queues;
331 txq = vec_elt_at_index (vd->txqs, qid);
333 clib_spinlock_init (&txq->lock);
334 vd->flags |= VMXNET3_DEVICE_F_SHARED_TXQ_LOCK;
338 vec_validate (vd->tx_stats, qid);
339 txs = vec_elt_at_index (vd->tx_stats, qid);
340 clib_memset (txs, 0, sizeof (*txs));
342 vec_validate_aligned (vd->txqs, qid, CLIB_CACHE_LINE_BYTES);
343 txq = vec_elt_at_index (vd->txqs, qid);
344 clib_memset (txq, 0, sizeof (*txq));
346 txq->reg_txprod = qid * 8 + VMXNET3_REG_TXPROD;
348 size = qsz * sizeof (*txq->tx_desc);
350 vlib_physmem_alloc_aligned_on_numa (vm, size, 512, vd->numa_node);
351 if (txq->tx_desc == 0)
352 return vlib_physmem_last_error (vm);
354 memset (txq->tx_desc, 0, size);
356 size = qsz * sizeof (*txq->tx_comp);
358 vlib_physmem_alloc_aligned_on_numa (vm, size, 512, vd->numa_node);
359 if (txq->tx_comp == 0)
360 return vlib_physmem_last_error (vm);
362 clib_memset (txq->tx_comp, 0, size);
363 vec_validate_aligned (txq->tx_ring.bufs, txq->size, CLIB_CACHE_LINE_BYTES);
364 txq->tx_ring.gen = VMXNET3_TXF_GEN;
365 txq->tx_comp_ring.gen = VMXNET3_TXCF_GEN;
370 static const u8 vmxnet3_rss_key[VMXNET3_RSS_MAX_KEY_SZ] = {
371 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
372 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
373 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
374 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
375 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
378 static clib_error_t *
379 vmxnet3_rss_init (vlib_main_t * vm, vmxnet3_device_t * vd)
381 vmxnet3_rss_shared *rss;
382 size_t size = sizeof (*rss);
385 vd->rss = vlib_physmem_alloc_aligned_on_numa (vm, size, 512, vd->numa_node);
387 return vlib_physmem_last_error (vm);
389 clib_memset (vd->rss, 0, size);
392 VMXNET3_RSS_HASH_TYPE_IPV4 | VMXNET3_RSS_HASH_TYPE_TCP_IPV4 |
393 VMXNET3_RSS_HASH_TYPE_IPV6 | VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
394 rss->hash_func = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
395 rss->hash_key_sz = VMXNET3_RSS_MAX_KEY_SZ;
396 rss->ind_table_sz = VMXNET3_RSS_MAX_IND_TABLE_SZ;
397 clib_memcpy (rss->hash_key, vmxnet3_rss_key, VMXNET3_RSS_MAX_KEY_SZ);
398 for (i = 0; i < rss->ind_table_sz; i++)
399 rss->ind_table[i] = i % vd->num_rx_queues;
404 static clib_error_t *
405 vmxnet3_device_init (vlib_main_t * vm, vmxnet3_device_t * vd,
406 vmxnet3_create_if_args_t * args)
408 clib_error_t *error = 0;
410 vlib_thread_main_t *tm = vlib_get_thread_main ();
412 /* Quiesce the device */
413 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
414 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
417 error = clib_error_return (0, "error on quiescing device rc (%u)", ret);
421 /* Reset the device */
422 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
423 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
426 error = clib_error_return (0, "error on resetting device rc (%u)", ret);
430 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_VRRS);
431 vd->version = count_leading_zeros (ret);
432 vd->version = uword_bits - vd->version;
434 if (vd->version == 0)
436 error = clib_error_return (0, "unsupported hardware version %u",
441 /* cap support version to 3 */
442 vmxnet3_reg_write (vd, 1, VMXNET3_REG_VRRS,
443 1 << (clib_min (3, vd->version) - 1));
445 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_UVRS);
447 vmxnet3_reg_write (vd, 1, VMXNET3_REG_UVRS, 1);
450 error = clib_error_return (0, "unsupported upt version %u", ret);
454 /* GSO is only supported for version >= 3 */
455 if (args->enable_gso && (vd->version >= 3))
460 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
461 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
464 vd->flags |= VMXNET3_DEVICE_F_LINK_UP;
465 vd->link_speed = ret >> 16;
468 vd->flags &= ~VMXNET3_DEVICE_F_LINK_UP;
470 /* Get the mac address */
471 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_MACL);
472 clib_memcpy (vd->mac_addr, &ret, 4);
473 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_MACH);
474 clib_memcpy (vd->mac_addr + 4, &ret, 2);
476 size = sizeof (vmxnet3_rx_queue) * vd->num_rx_queues +
477 sizeof (vmxnet3_tx_queue) * vd->num_tx_queues;
480 vlib_physmem_alloc_aligned_on_numa (vm, size, 512, vd->numa_node);
482 return vlib_physmem_last_error (vm);
484 clib_memset (vd->queues, 0, size);
486 if (vd->num_rx_queues > 1)
488 error = vmxnet3_rss_init (vm, vd);
493 for (i = 0; i < vd->num_rx_queues; i++)
495 error = vmxnet3_rxq_init (vm, vd, i, args->rxq_size);
500 for (i = 0; i < tm->n_vlib_mains; i++)
502 error = vmxnet3_txq_init (vm, vd, i, args->txq_size);
507 error = vmxnet3_provision_driver_shared (vm, vd);
511 vmxnet3_write_mac (vd);
513 /* Activate device */
514 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
515 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
519 clib_error_return (0, "error on activating device rc (%u)", ret);
527 vmxnet3_rxq_irq_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h, u16 line)
529 vnet_main_t *vnm = vnet_get_main ();
530 vmxnet3_main_t *vmxm = &vmxnet3_main;
531 uword pd = vlib_pci_get_private_data (vm, h);
532 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, pd);
535 if (vec_len (vd->rxqs) > qid && vd->rxqs[qid].int_mode != 0)
536 vnet_device_input_set_interrupt_pending (vnm, vd->hw_if_index, qid);
540 vmxnet3_event_irq_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h,
543 vnet_main_t *vnm = vnet_get_main ();
544 vmxnet3_main_t *vmxm = &vmxnet3_main;
545 uword pd = vlib_pci_get_private_data (vm, h);
546 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, pd);
549 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
550 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
553 vd->flags |= VMXNET3_DEVICE_F_LINK_UP;
554 vd->link_speed = ret >> 16;
555 vnet_hw_interface_set_link_speed (vnm, vd->hw_if_index,
556 vd->link_speed * 1000);
557 vnet_hw_interface_set_flags (vnm, vd->hw_if_index,
558 VNET_HW_INTERFACE_FLAG_LINK_UP);
562 vd->flags &= ~VMXNET3_DEVICE_F_LINK_UP;
563 vnet_hw_interface_set_flags (vnm, vd->hw_if_index, 0);
568 vmxnet3_queue_size_valid (u16 qsz)
570 if (qsz < 64 || qsz > 4096)
578 vmxnet3_tx_queue_num_valid (u16 num)
580 vlib_thread_main_t *tm = vlib_get_thread_main ();
582 if ((num > VMXNET3_TXQ_MAX) || (num > tm->n_vlib_mains))
588 vmxnet3_rx_queue_num_valid (u16 num)
590 if (num > VMXNET3_RXQ_MAX)
596 vmxnet3_create_if (vlib_main_t * vm, vmxnet3_create_if_args_t * args)
598 vnet_main_t *vnm = vnet_get_main ();
599 vmxnet3_main_t *vmxm = &vmxnet3_main;
600 vmxnet3_device_t *vd;
601 vlib_pci_dev_handle_t h;
602 clib_error_t *error = 0;
606 if (args->txq_num == 0)
608 if (args->rxq_num == 0)
610 if (!vmxnet3_rx_queue_num_valid (args->rxq_num))
612 args->rv = VNET_API_ERROR_INVALID_VALUE;
614 clib_error_return (error, "number of rx queues must be <= %u",
616 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
617 format_vlib_pci_addr, &args->addr,
618 "number of rx queues must be <= %u", VMXNET3_RXQ_MAX);
622 if (!vmxnet3_tx_queue_num_valid (args->txq_num))
624 args->rv = VNET_API_ERROR_INVALID_VALUE;
626 clib_error_return (error,
627 "number of tx queues must be <= %u and <= number of "
628 "CPU's assigned to VPP", VMXNET3_TXQ_MAX);
629 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
630 format_vlib_pci_addr, &args->addr,
631 "number of tx queues must be <= %u and <= number of "
632 "CPU's assigned to VPP", VMXNET3_TXQ_MAX);
635 if (args->rxq_size == 0)
636 args->rxq_size = VMXNET3_NUM_RX_DESC;
637 if (args->txq_size == 0)
638 args->txq_size = VMXNET3_NUM_TX_DESC;
640 if (!vmxnet3_queue_size_valid (args->rxq_size) ||
641 !vmxnet3_queue_size_valid (args->txq_size))
643 args->rv = VNET_API_ERROR_INVALID_VALUE;
645 clib_error_return (error,
646 "queue size must be <= 4096, >= 64, "
647 "and multiples of 64");
648 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
649 format_vlib_pci_addr, &args->addr,
650 "queue size must be <= 4096, >= 64, and multiples of 64");
655 pool_foreach (vd, vmxm->devices, ({
656 if (vd->pci_addr.as_u32 == args->addr.as_u32)
658 args->rv = VNET_API_ERROR_ADDRESS_IN_USE;
660 clib_error_return (error, "%U: %s", format_vlib_pci_addr,
661 &args->addr, "pci address in use");
662 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
663 format_vlib_pci_addr, &args->addr, "pci address in use");
671 error = vlib_pci_bind_to_uio (vm, &args->addr, (char *) "auto");
674 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
676 clib_error_return (error, "%U: %s", format_vlib_pci_addr,
678 "error encountered on binding pci device");
679 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
680 format_vlib_pci_addr, &args->addr,
681 "error encountered on binding pci devicee");
687 vlib_pci_device_open (vm, &args->addr, vmxnet3_pci_device_ids, &h)))
689 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
691 clib_error_return (error, "%U: %s", format_vlib_pci_addr,
693 "error encountered on pci device open");
694 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
695 format_vlib_pci_addr, &args->addr,
696 "error encountered on pci device open");
701 * Do not use vmxnet3_log_error prior to this line since the macro
702 * references vd->pci_dev_handle
704 pool_get (vmxm->devices, vd);
705 vd->num_tx_queues = args->txq_num;
706 vd->num_rx_queues = args->rxq_num;
707 vd->dev_instance = vd - vmxm->devices;
708 vd->per_interface_next_index = ~0;
709 vd->pci_addr = args->addr;
711 if (args->enable_elog)
712 vd->flags |= VMXNET3_DEVICE_F_ELOG;
714 vd->pci_dev_handle = h;
715 vd->numa_node = vlib_pci_get_numa_node (vm, h);
716 vd->num_intrs = vd->num_rx_queues + 1; // +1 for the event interrupt
718 vlib_pci_set_private_data (vm, h, vd->dev_instance);
720 if ((error = vlib_pci_bus_master_enable (vm, h)))
722 vmxnet3_log_error (vd, "error encountered on pci bus master enable");
726 if ((error = vlib_pci_map_region (vm, h, 0, (void **) &vd->bar[0])))
728 vmxnet3_log_error (vd, "error encountered on pci map region for bar 0");
732 if ((error = vlib_pci_map_region (vm, h, 1, (void **) &vd->bar[1])))
734 vmxnet3_log_error (vd, "error encountered on pci map region for bar 1");
738 num_intr = vlib_pci_get_num_msix_interrupts (vm, h);
739 if (num_intr < vd->num_rx_queues + 1)
741 vmxnet3_log_error (vd,
742 "No sufficient interrupt lines (%u) for rx queues",
746 if ((error = vlib_pci_register_msix_handler (vm, h, 0, vd->num_rx_queues,
747 &vmxnet3_rxq_irq_handler)))
749 vmxnet3_log_error (vd,
750 "error encountered on pci register msix handler 0");
754 if ((error = vlib_pci_register_msix_handler (vm, h, vd->num_rx_queues, 1,
755 &vmxnet3_event_irq_handler)))
757 vmxnet3_log_error (vd,
758 "error encountered on pci register msix handler 1");
762 if ((error = vlib_pci_enable_msix_irq (vm, h, 0, vd->num_rx_queues + 1)))
764 vmxnet3_log_error (vd, "error encountered on pci enable msix irq");
768 if ((error = vlib_pci_intr_enable (vm, h)))
770 vmxnet3_log_error (vd, "error encountered on pci interrupt enable");
774 if ((error = vmxnet3_device_init (vm, vd, args)))
776 vmxnet3_log_error (vd, "error encountered on device init");
780 /* create interface */
781 error = ethernet_register_interface (vnm, vmxnet3_device_class.index,
782 vd->dev_instance, vd->mac_addr,
783 &vd->hw_if_index, vmxnet3_flag_change);
787 vmxnet3_log_error (vd,
788 "error encountered on ethernet register interface");
792 vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, vd->hw_if_index);
793 vd->sw_if_index = sw->sw_if_index;
794 args->sw_if_index = sw->sw_if_index;
796 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vd->hw_if_index);
797 hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE;
799 hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO;
801 vnet_hw_interface_set_input_node (vnm, vd->hw_if_index,
802 vmxnet3_input_node.index);
803 /* Disable interrupts */
804 vmxnet3_disable_interrupt (vd);
805 vec_foreach_index (qid, vd->rxqs)
807 vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
811 vnet_hw_interface_assign_rx_thread (vnm, vd->hw_if_index, qid, ~0);
812 thread_index = vnet_get_device_input_thread_index (vnm, vd->hw_if_index,
814 numa_node = vlib_mains[thread_index]->numa_node;
815 rxq->buffer_pool_index =
816 vlib_buffer_pool_get_default_for_numa (vm, numa_node);
817 vmxnet3_rxq_refill_ring0 (vm, vd, rxq);
818 vmxnet3_rxq_refill_ring1 (vm, vd, rxq);
820 vd->flags |= VMXNET3_DEVICE_F_INITIALIZED;
821 vmxnet3_enable_interrupt (vd);
823 vnet_hw_interface_set_link_speed (vnm, vd->hw_if_index,
824 vd->link_speed * 1000);
825 if (vd->flags & VMXNET3_DEVICE_F_LINK_UP)
826 vnet_hw_interface_set_flags (vnm, vd->hw_if_index,
827 VNET_HW_INTERFACE_FLAG_LINK_UP);
829 vnet_hw_interface_set_flags (vnm, vd->hw_if_index, 0);
833 vmxnet3_delete_if (vm, vd);
834 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
839 vmxnet3_delete_if (vlib_main_t * vm, vmxnet3_device_t * vd)
841 vnet_main_t *vnm = vnet_get_main ();
842 vmxnet3_main_t *vmxm = &vmxnet3_main;
846 /* Quiesce the device */
847 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
849 /* Reset the device */
850 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
854 vnet_hw_interface_set_flags (vnm, vd->hw_if_index, 0);
855 vec_foreach_index (qid, vd->rxqs)
856 vnet_hw_interface_unassign_rx_thread (vnm, vd->hw_if_index, qid);
857 ethernet_delete_interface (vnm, vd->hw_if_index);
860 vlib_pci_device_close (vm, vd->pci_dev_handle);
863 vec_foreach_index (i, vd->rxqs)
865 vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, i);
866 u16 mask = rxq->size - 1;
869 for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
871 vmxnet3_rx_ring *ring;
873 ring = &rxq->rx_ring[rid];
874 desc_idx = (ring->consume + 1) & mask;
875 vlib_buffer_free_from_ring (vm, ring->bufs, desc_idx, rxq->size,
877 vec_free (ring->bufs);
878 vlib_physmem_free (vm, rxq->rx_desc[rid]);
880 vlib_physmem_free (vm, rxq->rx_comp);
884 vec_free (vd->rx_stats);
887 vec_foreach_index (i, vd->txqs)
889 vmxnet3_txq_t *txq = vec_elt_at_index (vd->txqs, i);
890 u16 mask = txq->size - 1;
893 desc_idx = txq->tx_ring.consume;
894 end_idx = txq->tx_ring.produce;
895 while (desc_idx != end_idx)
897 bi = txq->tx_ring.bufs[desc_idx];
898 vlib_buffer_free_no_next (vm, &bi, 1);
902 clib_spinlock_free (&txq->lock);
903 vec_free (txq->tx_ring.bufs);
904 vlib_physmem_free (vm, txq->tx_desc);
905 vlib_physmem_free (vm, txq->tx_comp);
909 vec_free (vd->tx_stats);
911 vlib_physmem_free (vm, vd->driver_shared);
912 vlib_physmem_free (vm, vd->queues);
913 vlib_physmem_free (vm, vd->rss);
915 clib_error_free (vd->error);
916 clib_memset (vd, 0, sizeof (*vd));
917 pool_put (vmxm->devices, vd);
922 * fd.io coding-style-patch-verification: ON
925 * eval: (c-set-style "gnu")