2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/types.h>
17 #include <vlib/vlib.h>
18 #include <vlib/pci/pci.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/plugin/plugin.h>
21 #include <vpp/app/version.h>
22 #include <vnet/interface/rx_queue_funcs.h>
23 #include <vnet/interface/tx_queue_funcs.h>
24 #include <vmxnet3/vmxnet3.h>
26 #define PCI_VENDOR_ID_VMWARE 0x15ad
27 #define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07b0
29 vmxnet3_main_t vmxnet3_main;
31 static pci_device_id_t vmxnet3_pci_device_ids[] = {
33 .vendor_id = PCI_VENDOR_ID_VMWARE,
34 .device_id = PCI_DEVICE_ID_VMWARE_VMXNET3},
39 vmxnet3_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
42 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
43 vmxnet3_main_t *vmxm = &vmxnet3_main;
44 vmxnet3_device_t *vd = vec_elt_at_index (vmxm->devices, hi->dev_instance);
45 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
47 if (vd->flags & VMXNET3_DEVICE_F_ERROR)
48 return clib_error_return (0, "device is in error state");
52 vnet_hw_interface_set_flags (vnm, vd->hw_if_index,
53 VNET_HW_INTERFACE_FLAG_LINK_UP);
54 vd->flags |= VMXNET3_DEVICE_F_ADMIN_UP;
58 vnet_hw_interface_set_flags (vnm, vd->hw_if_index, 0);
59 vd->flags &= ~VMXNET3_DEVICE_F_ADMIN_UP;
65 vmxnet3_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
66 vnet_hw_if_rx_mode mode)
68 vmxnet3_main_t *vmxm = &vmxnet3_main;
69 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
70 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, hw->dev_instance);
71 vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
73 if (mode == VNET_HW_IF_RX_MODE_POLLING)
82 vmxnet3_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
85 vmxnet3_main_t *vmxm = &vmxnet3_main;
86 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
87 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, hw->dev_instance);
89 /* Shut off redirection */
92 vd->per_interface_next_index = node_index;
96 vd->per_interface_next_index =
97 vlib_node_add_next (vlib_get_main (), vmxnet3_input_node.index,
102 vmxnet3_clear_hw_interface_counters (u32 instance)
104 vmxnet3_main_t *vmxm = &vmxnet3_main;
105 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, instance);
106 vmxnet3_tx_queue *tx = VMXNET3_TX_START (vd);
107 vmxnet3_rx_queue *rx = VMXNET3_RX_START (vd);
111 * Set the "last_cleared_stats" to the current stats, so that
112 * things appear to clear from a display perspective.
114 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
116 vec_foreach_index (qid, vd->txqs)
118 vmxnet3_tx_stats *txs = vec_elt_at_index (vd->tx_stats, qid);
119 clib_memcpy (txs, &tx->stats, sizeof (*txs));
122 vec_foreach_index (qid, vd->rxqs)
124 vmxnet3_rx_stats *rxs = vec_elt_at_index (vd->rx_stats, qid);
125 clib_memcpy (rxs, &rx->stats, sizeof (*rxs));
130 static char *vmxnet3_tx_func_error_strings[] = {
132 foreach_vmxnet3_tx_func_error
137 VNET_DEVICE_CLASS (vmxnet3_device_class,) =
139 .name = "VMXNET3 interface",
140 .format_device = format_vmxnet3_device,
141 .format_device_name = format_vmxnet3_device_name,
142 .admin_up_down_function = vmxnet3_interface_admin_up_down,
143 .clear_counters = vmxnet3_clear_hw_interface_counters,
144 .rx_mode_change_function = vmxnet3_interface_rx_mode_change,
145 .rx_redirect_to_node = vmxnet3_set_interface_next_node,
146 .tx_function_n_errors = VMXNET3_TX_N_ERROR,
147 .tx_function_error_strings = vmxnet3_tx_func_error_strings,
152 vmxnet3_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
158 vmxnet3_write_mac (vmxnet3_device_t * vd)
162 memcpy (&val, vd->mac_addr, 4);
163 vmxnet3_reg_write (vd, 1, VMXNET3_REG_MACL, val);
166 memcpy (&val, vd->mac_addr + 4, 2);
167 vmxnet3_reg_write (vd, 1, VMXNET3_REG_MACH, val);
170 static clib_error_t *
171 vmxnet3_provision_driver_shared (vlib_main_t * vm, vmxnet3_device_t * vd)
173 vmxnet3_shared *shared;
176 vmxnet3_tx_queue *tx = VMXNET3_TX_START (vd);
177 vmxnet3_rx_queue *rx = VMXNET3_RX_START (vd);
180 vlib_physmem_alloc_aligned_on_numa (vm, sizeof (*vd->driver_shared), 512,
182 if (vd->driver_shared == 0)
183 return vlib_physmem_last_error (vm);
185 clib_memset (vd->driver_shared, 0, sizeof (*vd->driver_shared));
187 vec_foreach_index (qid, vd->txqs)
189 vmxnet3_txq_t *txq = vec_elt_at_index (vd->txqs, qid);
191 tx->cfg.desc_address = vmxnet3_dma_addr (vm, vd, txq->tx_desc);
192 tx->cfg.comp_address = vmxnet3_dma_addr (vm, vd, txq->tx_comp);
193 tx->cfg.num_desc = txq->size;
194 tx->cfg.num_comp = txq->size;
198 vec_foreach_index (qid, vd->rxqs)
200 vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
202 for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
204 rx->cfg.desc_address[rid] = vmxnet3_dma_addr (vm, vd,
206 rx->cfg.num_desc[rid] = rxq->size;
208 rx->cfg.comp_address = vmxnet3_dma_addr (vm, vd, rxq->rx_comp);
209 rx->cfg.num_comp = rxq->size;
210 rx->cfg.intr_index = qid;
214 shared = vd->driver_shared;
215 shared->magic = VMXNET3_SHARED_MAGIC;
216 shared->misc.version = VMXNET3_VERSION_MAGIC;
217 if (sizeof (void *) == 4)
218 shared->misc.guest_info = VMXNET3_GOS_BITS_32;
220 shared->misc.guest_info = VMXNET3_GOS_BITS_64;
221 shared->misc.guest_info |= VMXNET3_GOS_TYPE_LINUX;
222 shared->misc.version_support = VMXNET3_VERSION_SELECT;
223 shared->misc.upt_features = VMXNET3_F_RXCSUM;
225 shared->misc.upt_features |= VMXNET3_F_LRO;
226 if (vd->num_rx_queues > 1)
228 shared->misc.upt_features |= VMXNET3_F_RSS;
229 shared->rss.version = 1;
230 shared->rss.address = vmxnet3_dma_addr (vm, vd, vd->rss);
231 shared->rss.length = sizeof (*vd->rss);
233 shared->misc.max_num_rx_sg = 0;
234 shared->misc.upt_version_support = VMXNET3_UPT_VERSION_SELECT;
235 shared->misc.queue_desc_address = vmxnet3_dma_addr (vm, vd, vd->queues);
236 shared->misc.queue_desc_len = sizeof (*tx) * vd->num_tx_queues +
237 sizeof (*rx) * vd->num_rx_queues;
238 shared->misc.mtu = VMXNET3_MTU;
239 shared->misc.num_tx_queues = vd->num_tx_queues;
240 shared->misc.num_rx_queues = vd->num_rx_queues;
241 shared->interrupt.num_intrs = vd->num_intrs;
242 shared->interrupt.event_intr_index = vd->num_rx_queues;
243 shared->interrupt.control = VMXNET3_IC_DISABLE_ALL;
244 shared->rx_filter.mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST |
245 VMXNET3_RXMODE_ALL_MULTI | VMXNET3_RXMODE_PROMISC;
246 shared_dma = vmxnet3_dma_addr (vm, vd, shared);
248 vmxnet3_reg_write (vd, 1, VMXNET3_REG_DSAL, shared_dma);
249 vmxnet3_reg_write (vd, 1, VMXNET3_REG_DSAH, shared_dma >> 32);
255 vmxnet3_enable_interrupt (vmxnet3_device_t * vd)
258 vmxnet3_shared *shared = vd->driver_shared;
260 shared->interrupt.control &= ~VMXNET3_IC_DISABLE_ALL;
261 for (i = 0; i < vd->num_intrs; i++)
262 vmxnet3_reg_write (vd, 0, VMXNET3_REG_IMR + i * 8, 0);
266 vmxnet3_disable_interrupt (vmxnet3_device_t * vd)
269 vmxnet3_shared *shared = vd->driver_shared;
271 shared->interrupt.control |= VMXNET3_IC_DISABLE_ALL;
272 for (i = 0; i < vd->num_intrs; i++)
273 vmxnet3_reg_write (vd, 0, VMXNET3_REG_IMR + i * 8, 1);
276 static clib_error_t *
277 vmxnet3_rxq_init (vlib_main_t * vm, vmxnet3_device_t * vd, u16 qid, u16 qsz)
280 vmxnet3_rx_stats *rxs;
283 vec_validate (vd->rx_stats, qid);
284 rxs = vec_elt_at_index (vd->rx_stats, qid);
285 clib_memset (rxs, 0, sizeof (*rxs));
287 vec_validate_aligned (vd->rxqs, qid, CLIB_CACHE_LINE_BYTES);
288 rxq = vec_elt_at_index (vd->rxqs, qid);
289 clib_memset (rxq, 0, sizeof (*rxq));
291 for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
293 rxq->rx_desc[rid] = vlib_physmem_alloc_aligned_on_numa
294 (vm, qsz * sizeof (*rxq->rx_desc[rid]), 512, vd->numa_node);
296 if (rxq->rx_desc[rid] == 0)
297 return vlib_physmem_last_error (vm);
299 clib_memset (rxq->rx_desc[rid], 0, qsz * sizeof (*rxq->rx_desc[rid]));
302 vlib_physmem_alloc_aligned_on_numa (vm, qsz * sizeof (*rxq->rx_comp), 512,
304 if (rxq->rx_comp == 0)
305 return vlib_physmem_last_error (vm);
307 clib_memset (rxq->rx_comp, 0, qsz * sizeof (*rxq->rx_comp));
308 for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
310 vmxnet3_rx_ring *ring;
312 ring = &rxq->rx_ring[rid];
313 ring->gen = VMXNET3_RXF_GEN;
315 vec_validate_aligned (ring->bufs, rxq->size, CLIB_CACHE_LINE_BYTES);
317 rxq->rx_comp_ring.gen = VMXNET3_RXCF_GEN;
322 static clib_error_t *
323 vmxnet3_txq_init (vlib_main_t * vm, vmxnet3_device_t * vd, u16 qid, u16 qsz)
326 vmxnet3_tx_stats *txs;
329 vec_validate_aligned (vd->txqs, qid, CLIB_CACHE_LINE_BYTES);
330 txq = vec_elt_at_index (vd->txqs, qid);
331 clib_memset (txq, 0, sizeof (*txq));
332 clib_spinlock_init (&txq->lock);
334 vec_validate (vd->tx_stats, qid);
335 txs = vec_elt_at_index (vd->tx_stats, qid);
336 clib_memset (txs, 0, sizeof (*txs));
339 txq->reg_txprod = qid * 8 + VMXNET3_REG_TXPROD;
341 size = qsz * sizeof (*txq->tx_desc);
343 vlib_physmem_alloc_aligned_on_numa (vm, size, 512, vd->numa_node);
344 if (txq->tx_desc == 0)
345 return vlib_physmem_last_error (vm);
347 clib_memset (txq->tx_desc, 0, size);
349 size = qsz * sizeof (*txq->tx_comp);
351 vlib_physmem_alloc_aligned_on_numa (vm, size, 512, vd->numa_node);
352 if (txq->tx_comp == 0)
353 return vlib_physmem_last_error (vm);
355 clib_memset (txq->tx_comp, 0, size);
356 vec_validate_aligned (txq->tx_ring.bufs, txq->size, CLIB_CACHE_LINE_BYTES);
357 txq->tx_ring.gen = VMXNET3_TXF_GEN;
358 txq->tx_comp_ring.gen = VMXNET3_TXCF_GEN;
363 static const u8 vmxnet3_rss_key[VMXNET3_RSS_MAX_KEY_SZ] = {
364 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
365 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
366 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
367 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
368 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
371 static clib_error_t *
372 vmxnet3_rss_init (vlib_main_t * vm, vmxnet3_device_t * vd)
374 vmxnet3_rss_shared *rss;
375 size_t size = sizeof (*rss);
378 vd->rss = vlib_physmem_alloc_aligned_on_numa (vm, size, 512, vd->numa_node);
380 return vlib_physmem_last_error (vm);
382 clib_memset (vd->rss, 0, size);
385 VMXNET3_RSS_HASH_TYPE_IPV4 | VMXNET3_RSS_HASH_TYPE_TCP_IPV4 |
386 VMXNET3_RSS_HASH_TYPE_IPV6 | VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
387 rss->hash_func = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
388 rss->hash_key_sz = VMXNET3_RSS_MAX_KEY_SZ;
389 rss->ind_table_sz = VMXNET3_RSS_MAX_IND_TABLE_SZ;
390 clib_memcpy (rss->hash_key, vmxnet3_rss_key, VMXNET3_RSS_MAX_KEY_SZ);
391 for (i = 0; i < rss->ind_table_sz; i++)
392 rss->ind_table[i] = i % vd->num_rx_queues;
397 static clib_error_t *
398 vmxnet3_device_init (vlib_main_t * vm, vmxnet3_device_t * vd,
399 vmxnet3_create_if_args_t * args)
401 clib_error_t *error = 0;
404 /* Quiesce the device */
405 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
406 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
409 error = clib_error_return (0, "error on quiescing device rc (%u)", ret);
413 /* Reset the device */
414 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
415 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
418 error = clib_error_return (0, "error on resetting device rc (%u)", ret);
422 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_VRRS);
423 vd->version = count_leading_zeros (ret);
424 vd->version = uword_bits - vd->version;
426 if (vd->version == 0)
428 error = clib_error_return (0, "unsupported hardware version %u",
433 /* cap support version to 3 */
434 vmxnet3_reg_write (vd, 1, VMXNET3_REG_VRRS,
435 1 << (clib_min (3, vd->version) - 1));
437 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_UVRS);
439 vmxnet3_reg_write (vd, 1, VMXNET3_REG_UVRS, 1);
442 error = clib_error_return (0, "unsupported upt version %u", ret);
446 /* GSO is only supported for version >= 3 */
447 if (args->enable_gso)
449 if (vd->version >= 3)
454 clib_error_return (0,
455 "GSO is not supported because hardware version"
456 " is %u. It must be >= 3", vd->version);
461 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
462 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
465 vd->flags |= VMXNET3_DEVICE_F_LINK_UP;
466 vd->link_speed = ret >> 16;
469 vd->flags &= ~VMXNET3_DEVICE_F_LINK_UP;
471 /* Get the mac address */
472 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_MACL);
473 clib_memcpy (vd->mac_addr, &ret, 4);
474 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_MACH);
475 clib_memcpy (vd->mac_addr + 4, &ret, 2);
477 size = sizeof (vmxnet3_rx_queue) * vd->num_rx_queues +
478 sizeof (vmxnet3_tx_queue) * vd->num_tx_queues;
481 vlib_physmem_alloc_aligned_on_numa (vm, size, 512, vd->numa_node);
483 return vlib_physmem_last_error (vm);
485 clib_memset (vd->queues, 0, size);
487 if (vd->num_rx_queues > 1)
489 error = vmxnet3_rss_init (vm, vd);
494 for (i = 0; i < vd->num_rx_queues; i++)
496 error = vmxnet3_rxq_init (vm, vd, i, args->rxq_size);
501 for (i = 0; i < vd->num_tx_queues; i++)
503 error = vmxnet3_txq_init (vm, vd, i, args->txq_size);
508 error = vmxnet3_provision_driver_shared (vm, vd);
512 vmxnet3_write_mac (vd);
514 /* Activate device */
515 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
516 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
520 clib_error_return (0, "error on activating device rc (%u)", ret);
528 vmxnet3_rxq_irq_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h, u16 line)
530 vnet_main_t *vnm = vnet_get_main ();
531 vmxnet3_main_t *vmxm = &vmxnet3_main;
532 uword pd = vlib_pci_get_private_data (vm, h);
533 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, pd);
535 vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
537 if (vec_len (vd->rxqs) > qid && vd->rxqs[qid].int_mode != 0)
538 vnet_hw_if_rx_queue_set_int_pending (vnm, rxq->queue_index);
542 vmxnet3_event_irq_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h,
545 vnet_main_t *vnm = vnet_get_main ();
546 vmxnet3_main_t *vmxm = &vmxnet3_main;
547 uword pd = vlib_pci_get_private_data (vm, h);
548 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, pd);
551 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
552 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
555 vd->flags |= VMXNET3_DEVICE_F_LINK_UP;
556 vd->link_speed = ret >> 16;
557 vnet_hw_interface_set_link_speed (vnm, vd->hw_if_index,
558 vd->link_speed * 1000);
559 vnet_hw_interface_set_flags (vnm, vd->hw_if_index,
560 VNET_HW_INTERFACE_FLAG_LINK_UP);
564 vd->flags &= ~VMXNET3_DEVICE_F_LINK_UP;
565 vnet_hw_interface_set_flags (vnm, vd->hw_if_index, 0);
570 vmxnet3_queue_size_valid (u16 qsz)
572 if (qsz < 64 || qsz > 4096)
580 vmxnet3_tx_queue_num_valid (u16 num)
582 vlib_thread_main_t *tm = vlib_get_thread_main ();
584 if ((num > VMXNET3_TXQ_MAX) || (num > tm->n_vlib_mains))
590 vmxnet3_rx_queue_num_valid (u16 num)
592 if (num > VMXNET3_RXQ_MAX)
598 vmxnet3_create_if (vlib_main_t * vm, vmxnet3_create_if_args_t * args)
600 vnet_main_t *vnm = vnet_get_main ();
601 vmxnet3_main_t *vmxm = &vmxnet3_main;
602 vmxnet3_device_t *vd;
603 vlib_pci_dev_handle_t h;
604 clib_error_t *error = 0;
608 if (args->txq_num == 0)
610 if (args->rxq_num == 0)
612 if (!vmxnet3_rx_queue_num_valid (args->rxq_num))
614 args->rv = VNET_API_ERROR_INVALID_VALUE;
616 clib_error_return (error, "number of rx queues must be <= %u",
618 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
619 format_vlib_pci_addr, &args->addr,
620 "number of rx queues must be <= %u", VMXNET3_RXQ_MAX);
624 if (!vmxnet3_tx_queue_num_valid (args->txq_num))
626 args->rv = VNET_API_ERROR_INVALID_VALUE;
628 clib_error_return (error,
629 "number of tx queues must be <= %u and <= number of "
630 "CPU's assigned to VPP", VMXNET3_TXQ_MAX);
631 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
632 format_vlib_pci_addr, &args->addr,
633 "number of tx queues must be <= %u and <= number of "
634 "CPU's assigned to VPP", VMXNET3_TXQ_MAX);
637 if (args->rxq_size == 0)
638 args->rxq_size = VMXNET3_NUM_RX_DESC;
639 if (args->txq_size == 0)
640 args->txq_size = VMXNET3_NUM_TX_DESC;
642 if (!vmxnet3_queue_size_valid (args->rxq_size) ||
643 !vmxnet3_queue_size_valid (args->txq_size))
645 args->rv = VNET_API_ERROR_INVALID_VALUE;
647 clib_error_return (error,
648 "queue size must be <= 4096, >= 64, "
649 "and multiples of 64");
650 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
651 format_vlib_pci_addr, &args->addr,
652 "queue size must be <= 4096, >= 64, and multiples of 64");
657 pool_foreach (vd, vmxm->devices) {
658 if (vd->pci_addr.as_u32 == args->addr.as_u32)
660 args->rv = VNET_API_ERROR_ADDRESS_IN_USE;
662 clib_error_return (error, "%U: %s", format_vlib_pci_addr,
663 &args->addr, "pci address in use");
664 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
665 format_vlib_pci_addr, &args->addr, "pci address in use");
673 error = vlib_pci_bind_to_uio (vm, &args->addr, (char *) "auto");
676 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
678 clib_error_return (error, "%U: %s", format_vlib_pci_addr,
680 "error encountered on binding pci device");
681 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
682 format_vlib_pci_addr, &args->addr,
683 "error encountered on binding pci devicee");
689 vlib_pci_device_open (vm, &args->addr, vmxnet3_pci_device_ids, &h)))
691 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
693 clib_error_return (error, "%U: %s", format_vlib_pci_addr,
695 "error encountered on pci device open");
696 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
697 format_vlib_pci_addr, &args->addr,
698 "error encountered on pci device open");
703 * Do not use vmxnet3_log_error prior to this line since the macro
704 * references vd->pci_dev_handle
706 pool_get (vmxm->devices, vd);
707 vd->num_tx_queues = args->txq_num;
708 vd->num_rx_queues = args->rxq_num;
709 vd->dev_instance = vd - vmxm->devices;
710 vd->per_interface_next_index = ~0;
711 vd->pci_addr = args->addr;
713 if (args->enable_elog)
714 vd->flags |= VMXNET3_DEVICE_F_ELOG;
716 vd->pci_dev_handle = h;
717 vd->numa_node = vlib_pci_get_numa_node (vm, h);
718 vd->num_intrs = vd->num_rx_queues + 1; // +1 for the event interrupt
720 vlib_pci_set_private_data (vm, h, vd->dev_instance);
722 if ((error = vlib_pci_bus_master_enable (vm, h)))
724 vmxnet3_log_error (vd, "error encountered on pci bus master enable");
728 if ((error = vlib_pci_map_region (vm, h, 0, (void **) &vd->bar[0])))
730 vmxnet3_log_error (vd, "error encountered on pci map region for bar 0");
734 if ((error = vlib_pci_map_region (vm, h, 1, (void **) &vd->bar[1])))
736 vmxnet3_log_error (vd, "error encountered on pci map region for bar 1");
740 num_intr = vlib_pci_get_num_msix_interrupts (vm, h);
741 if (num_intr < vd->num_rx_queues + 1)
743 vmxnet3_log_error (vd,
744 "No sufficient interrupt lines (%u) for rx queues",
747 clib_error_return (0,
748 "No sufficient interrupt lines (%u) for rx queues",
752 if ((error = vlib_pci_register_msix_handler (vm, h, 0, vd->num_rx_queues,
753 &vmxnet3_rxq_irq_handler)))
755 vmxnet3_log_error (vd,
756 "error encountered on pci register msix handler 0");
760 if ((error = vlib_pci_register_msix_handler (vm, h, vd->num_rx_queues, 1,
761 &vmxnet3_event_irq_handler)))
763 vmxnet3_log_error (vd,
764 "error encountered on pci register msix handler 1");
768 if ((error = vlib_pci_enable_msix_irq (vm, h, 0, vd->num_rx_queues + 1)))
770 vmxnet3_log_error (vd, "error encountered on pci enable msix irq");
774 if ((error = vlib_pci_intr_enable (vm, h)))
776 vmxnet3_log_error (vd, "error encountered on pci interrupt enable");
780 if ((error = vmxnet3_device_init (vm, vd, args)))
782 vmxnet3_log_error (vd, "error encountered on device init");
786 /* create interface */
787 error = ethernet_register_interface (vnm, vmxnet3_device_class.index,
788 vd->dev_instance, vd->mac_addr,
789 &vd->hw_if_index, vmxnet3_flag_change);
793 vmxnet3_log_error (vd,
794 "error encountered on ethernet register interface");
798 vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, vd->hw_if_index);
799 vd->sw_if_index = sw->sw_if_index;
800 args->sw_if_index = sw->sw_if_index;
802 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vd->hw_if_index);
803 hw->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_INT_MODE;
806 hw->caps |= (VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO |
807 VNET_HW_INTERFACE_CAP_SUPPORTS_TX_TCP_CKSUM |
808 VNET_HW_INTERFACE_CAP_SUPPORTS_TX_UDP_CKSUM);
811 vnet_hw_if_set_input_node (vnm, vd->hw_if_index, vmxnet3_input_node.index);
812 /* Disable interrupts */
813 vmxnet3_disable_interrupt (vd);
814 vec_foreach_index (qid, vd->rxqs)
816 vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
819 qi = vnet_hw_if_register_rx_queue (vnm, vd->hw_if_index, qid,
820 VNET_HW_IF_RXQ_THREAD_ANY);
821 fi = vlib_pci_get_msix_file_index (vm, vd->pci_dev_handle, qid);
822 vnet_hw_if_set_rx_queue_file_index (vnm, qi, fi);
823 rxq->queue_index = qi;
824 rxq->buffer_pool_index =
825 vnet_hw_if_get_rx_queue_numa_node (vnm, rxq->queue_index);
826 vmxnet3_rxq_refill_ring0 (vm, vd, rxq);
827 vmxnet3_rxq_refill_ring1 (vm, vd, rxq);
830 vec_foreach_index (qid, vd->txqs)
832 vmxnet3_txq_t *txq = vec_elt_at_index (vd->txqs, qid);
834 vnet_hw_if_register_tx_queue (vnm, vd->hw_if_index, qid);
836 for (u32 i = 0; i < vlib_get_n_threads (); i++)
838 u32 qi = vd->txqs[i % vd->num_tx_queues].queue_index;
839 vnet_hw_if_tx_queue_assign_thread (vnm, qi, i);
841 vnet_hw_if_update_runtime_data (vnm, vd->hw_if_index);
843 vd->flags |= VMXNET3_DEVICE_F_INITIALIZED;
844 vmxnet3_enable_interrupt (vd);
846 vnet_hw_interface_set_link_speed (vnm, vd->hw_if_index,
847 vd->link_speed * 1000);
848 if (vd->flags & VMXNET3_DEVICE_F_LINK_UP)
849 vnet_hw_interface_set_flags (vnm, vd->hw_if_index,
850 VNET_HW_INTERFACE_FLAG_LINK_UP);
852 vnet_hw_interface_set_flags (vnm, vd->hw_if_index, 0);
856 vmxnet3_delete_if (vm, vd);
857 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
862 vmxnet3_delete_if (vlib_main_t * vm, vmxnet3_device_t * vd)
864 vnet_main_t *vnm = vnet_get_main ();
865 vmxnet3_main_t *vmxm = &vmxnet3_main;
869 /* Quiesce the device */
870 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
872 /* Reset the device */
873 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
877 vnet_hw_interface_set_flags (vnm, vd->hw_if_index, 0);
878 ethernet_delete_interface (vnm, vd->hw_if_index);
881 vlib_pci_device_close (vm, vd->pci_dev_handle);
884 vec_foreach_index (i, vd->rxqs)
886 vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, i);
887 u16 mask = rxq->size - 1;
890 for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
892 vmxnet3_rx_ring *ring;
894 ring = &rxq->rx_ring[rid];
895 desc_idx = (ring->consume + 1) & mask;
896 vlib_buffer_free_from_ring (vm, ring->bufs, desc_idx, rxq->size,
898 vec_free (ring->bufs);
899 vlib_physmem_free (vm, rxq->rx_desc[rid]);
901 vlib_physmem_free (vm, rxq->rx_comp);
905 vec_free (vd->rx_stats);
908 vec_foreach_index (i, vd->txqs)
910 vmxnet3_txq_t *txq = vec_elt_at_index (vd->txqs, i);
911 u16 mask = txq->size - 1;
914 desc_idx = txq->tx_ring.consume;
915 end_idx = txq->tx_ring.produce;
916 while (desc_idx != end_idx)
918 bi = txq->tx_ring.bufs[desc_idx];
919 vlib_buffer_free_no_next (vm, &bi, 1);
923 clib_spinlock_free (&txq->lock);
924 vec_free (txq->tx_ring.bufs);
925 vlib_physmem_free (vm, txq->tx_desc);
926 vlib_physmem_free (vm, txq->tx_comp);
930 vec_free (vd->tx_stats);
932 vlib_physmem_free (vm, vd->driver_shared);
933 vlib_physmem_free (vm, vd->queues);
934 vlib_physmem_free (vm, vd->rss);
936 clib_error_free (vd->error);
937 clib_memset (vd, 0, sizeof (*vd));
938 pool_put (vmxm->devices, vd);
943 * fd.io coding-style-patch-verification: ON
946 * eval: (c-set-style "gnu")