2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vppinfra/types.h>
17 #include <vlib/vlib.h>
18 #include <vlib/pci/pci.h>
19 #include <vnet/ethernet/ethernet.h>
20 #include <vnet/plugin/plugin.h>
21 #include <vpp/app/version.h>
22 #include <vnet/interface/rx_queue_funcs.h>
23 #include <vmxnet3/vmxnet3.h>
25 #define PCI_VENDOR_ID_VMWARE 0x15ad
26 #define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07b0
28 vmxnet3_main_t vmxnet3_main;
30 static pci_device_id_t vmxnet3_pci_device_ids[] = {
32 .vendor_id = PCI_VENDOR_ID_VMWARE,
33 .device_id = PCI_DEVICE_ID_VMWARE_VMXNET3},
38 vmxnet3_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index,
41 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
42 vmxnet3_main_t *vmxm = &vmxnet3_main;
43 vmxnet3_device_t *vd = vec_elt_at_index (vmxm->devices, hi->dev_instance);
44 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
46 if (vd->flags & VMXNET3_DEVICE_F_ERROR)
47 return clib_error_return (0, "device is in error state");
51 vnet_hw_interface_set_flags (vnm, vd->hw_if_index,
52 VNET_HW_INTERFACE_FLAG_LINK_UP);
53 vd->flags |= VMXNET3_DEVICE_F_ADMIN_UP;
57 vnet_hw_interface_set_flags (vnm, vd->hw_if_index, 0);
58 vd->flags &= ~VMXNET3_DEVICE_F_ADMIN_UP;
64 vmxnet3_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
65 vnet_hw_if_rx_mode mode)
67 vmxnet3_main_t *vmxm = &vmxnet3_main;
68 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
69 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, hw->dev_instance);
70 vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
72 if (mode == VNET_HW_IF_RX_MODE_POLLING)
81 vmxnet3_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
84 vmxnet3_main_t *vmxm = &vmxnet3_main;
85 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
86 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, hw->dev_instance);
88 /* Shut off redirection */
91 vd->per_interface_next_index = node_index;
95 vd->per_interface_next_index =
96 vlib_node_add_next (vlib_get_main (), vmxnet3_input_node.index,
101 vmxnet3_clear_hw_interface_counters (u32 instance)
103 vmxnet3_main_t *vmxm = &vmxnet3_main;
104 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, instance);
105 vmxnet3_tx_queue *tx = VMXNET3_TX_START (vd);
106 vmxnet3_rx_queue *rx = VMXNET3_RX_START (vd);
110 * Set the "last_cleared_stats" to the current stats, so that
111 * things appear to clear from a display perspective.
113 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
115 vec_foreach_index (qid, vd->txqs)
117 vmxnet3_tx_stats *txs = vec_elt_at_index (vd->tx_stats, qid);
118 clib_memcpy (txs, &tx->stats, sizeof (*txs));
121 vec_foreach_index (qid, vd->rxqs)
123 vmxnet3_rx_stats *rxs = vec_elt_at_index (vd->rx_stats, qid);
124 clib_memcpy (rxs, &rx->stats, sizeof (*rxs));
129 static char *vmxnet3_tx_func_error_strings[] = {
131 foreach_vmxnet3_tx_func_error
136 VNET_DEVICE_CLASS (vmxnet3_device_class,) =
138 .name = "VMXNET3 interface",
139 .format_device = format_vmxnet3_device,
140 .format_device_name = format_vmxnet3_device_name,
141 .admin_up_down_function = vmxnet3_interface_admin_up_down,
142 .clear_counters = vmxnet3_clear_hw_interface_counters,
143 .rx_mode_change_function = vmxnet3_interface_rx_mode_change,
144 .rx_redirect_to_node = vmxnet3_set_interface_next_node,
145 .tx_function_n_errors = VMXNET3_TX_N_ERROR,
146 .tx_function_error_strings = vmxnet3_tx_func_error_strings,
151 vmxnet3_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
157 vmxnet3_write_mac (vmxnet3_device_t * vd)
161 memcpy (&val, vd->mac_addr, 4);
162 vmxnet3_reg_write (vd, 1, VMXNET3_REG_MACL, val);
165 memcpy (&val, vd->mac_addr + 4, 2);
166 vmxnet3_reg_write (vd, 1, VMXNET3_REG_MACH, val);
169 static clib_error_t *
170 vmxnet3_provision_driver_shared (vlib_main_t * vm, vmxnet3_device_t * vd)
172 vmxnet3_shared *shared;
175 vmxnet3_tx_queue *tx = VMXNET3_TX_START (vd);
176 vmxnet3_rx_queue *rx = VMXNET3_RX_START (vd);
179 vlib_physmem_alloc_aligned_on_numa (vm, sizeof (*vd->driver_shared), 512,
181 if (vd->driver_shared == 0)
182 return vlib_physmem_last_error (vm);
184 clib_memset (vd->driver_shared, 0, sizeof (*vd->driver_shared));
186 vec_foreach_index (qid, vd->txqs)
188 vmxnet3_txq_t *txq = vec_elt_at_index (vd->txqs, qid);
190 tx->cfg.desc_address = vmxnet3_dma_addr (vm, vd, txq->tx_desc);
191 tx->cfg.comp_address = vmxnet3_dma_addr (vm, vd, txq->tx_comp);
192 tx->cfg.num_desc = txq->size;
193 tx->cfg.num_comp = txq->size;
197 vec_foreach_index (qid, vd->rxqs)
199 vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
201 for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
203 rx->cfg.desc_address[rid] = vmxnet3_dma_addr (vm, vd,
205 rx->cfg.num_desc[rid] = rxq->size;
207 rx->cfg.comp_address = vmxnet3_dma_addr (vm, vd, rxq->rx_comp);
208 rx->cfg.num_comp = rxq->size;
209 rx->cfg.intr_index = qid;
213 shared = vd->driver_shared;
214 shared->magic = VMXNET3_SHARED_MAGIC;
215 shared->misc.version = VMXNET3_VERSION_MAGIC;
216 if (sizeof (void *) == 4)
217 shared->misc.guest_info = VMXNET3_GOS_BITS_32;
219 shared->misc.guest_info = VMXNET3_GOS_BITS_64;
220 shared->misc.guest_info |= VMXNET3_GOS_TYPE_LINUX;
221 shared->misc.version_support = VMXNET3_VERSION_SELECT;
222 shared->misc.upt_features = VMXNET3_F_RXCSUM;
224 shared->misc.upt_features |= VMXNET3_F_LRO;
225 if (vd->num_rx_queues > 1)
227 shared->misc.upt_features |= VMXNET3_F_RSS;
228 shared->rss.version = 1;
229 shared->rss.address = vmxnet3_dma_addr (vm, vd, vd->rss);
230 shared->rss.length = sizeof (*vd->rss);
232 shared->misc.max_num_rx_sg = 0;
233 shared->misc.upt_version_support = VMXNET3_UPT_VERSION_SELECT;
234 shared->misc.queue_desc_address = vmxnet3_dma_addr (vm, vd, vd->queues);
235 shared->misc.queue_desc_len = sizeof (*tx) * vd->num_tx_queues +
236 sizeof (*rx) * vd->num_rx_queues;
237 shared->misc.mtu = VMXNET3_MTU;
238 shared->misc.num_tx_queues = vd->num_tx_queues;
239 shared->misc.num_rx_queues = vd->num_rx_queues;
240 shared->interrupt.num_intrs = vd->num_intrs;
241 shared->interrupt.event_intr_index = vd->num_rx_queues;
242 shared->interrupt.control = VMXNET3_IC_DISABLE_ALL;
243 shared->rx_filter.mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST |
244 VMXNET3_RXMODE_ALL_MULTI | VMXNET3_RXMODE_PROMISC;
245 shared_dma = vmxnet3_dma_addr (vm, vd, shared);
247 vmxnet3_reg_write (vd, 1, VMXNET3_REG_DSAL, shared_dma);
248 vmxnet3_reg_write (vd, 1, VMXNET3_REG_DSAH, shared_dma >> 32);
254 vmxnet3_enable_interrupt (vmxnet3_device_t * vd)
257 vmxnet3_shared *shared = vd->driver_shared;
259 shared->interrupt.control &= ~VMXNET3_IC_DISABLE_ALL;
260 for (i = 0; i < vd->num_intrs; i++)
261 vmxnet3_reg_write (vd, 0, VMXNET3_REG_IMR + i * 8, 0);
265 vmxnet3_disable_interrupt (vmxnet3_device_t * vd)
268 vmxnet3_shared *shared = vd->driver_shared;
270 shared->interrupt.control |= VMXNET3_IC_DISABLE_ALL;
271 for (i = 0; i < vd->num_intrs; i++)
272 vmxnet3_reg_write (vd, 0, VMXNET3_REG_IMR + i * 8, 1);
275 static clib_error_t *
276 vmxnet3_rxq_init (vlib_main_t * vm, vmxnet3_device_t * vd, u16 qid, u16 qsz)
279 vmxnet3_rx_stats *rxs;
282 vec_validate (vd->rx_stats, qid);
283 rxs = vec_elt_at_index (vd->rx_stats, qid);
284 clib_memset (rxs, 0, sizeof (*rxs));
286 vec_validate_aligned (vd->rxqs, qid, CLIB_CACHE_LINE_BYTES);
287 rxq = vec_elt_at_index (vd->rxqs, qid);
288 clib_memset (rxq, 0, sizeof (*rxq));
290 for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
292 rxq->rx_desc[rid] = vlib_physmem_alloc_aligned_on_numa
293 (vm, qsz * sizeof (*rxq->rx_desc[rid]), 512, vd->numa_node);
295 if (rxq->rx_desc[rid] == 0)
296 return vlib_physmem_last_error (vm);
298 clib_memset (rxq->rx_desc[rid], 0, qsz * sizeof (*rxq->rx_desc[rid]));
301 vlib_physmem_alloc_aligned_on_numa (vm, qsz * sizeof (*rxq->rx_comp), 512,
303 if (rxq->rx_comp == 0)
304 return vlib_physmem_last_error (vm);
306 clib_memset (rxq->rx_comp, 0, qsz * sizeof (*rxq->rx_comp));
307 for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
309 vmxnet3_rx_ring *ring;
311 ring = &rxq->rx_ring[rid];
312 ring->gen = VMXNET3_RXF_GEN;
314 vec_validate_aligned (ring->bufs, rxq->size, CLIB_CACHE_LINE_BYTES);
316 rxq->rx_comp_ring.gen = VMXNET3_RXCF_GEN;
321 static clib_error_t *
322 vmxnet3_txq_init (vlib_main_t * vm, vmxnet3_device_t * vd, u16 qid, u16 qsz)
325 vmxnet3_tx_stats *txs;
328 if (qid >= vd->num_tx_queues)
330 qid = qid % vd->num_tx_queues;
331 txq = vec_elt_at_index (vd->txqs, qid);
333 clib_spinlock_init (&txq->lock);
334 vd->flags |= VMXNET3_DEVICE_F_SHARED_TXQ_LOCK;
338 vec_validate (vd->tx_stats, qid);
339 txs = vec_elt_at_index (vd->tx_stats, qid);
340 clib_memset (txs, 0, sizeof (*txs));
342 vec_validate_aligned (vd->txqs, qid, CLIB_CACHE_LINE_BYTES);
343 txq = vec_elt_at_index (vd->txqs, qid);
344 clib_memset (txq, 0, sizeof (*txq));
346 txq->reg_txprod = qid * 8 + VMXNET3_REG_TXPROD;
348 size = qsz * sizeof (*txq->tx_desc);
350 vlib_physmem_alloc_aligned_on_numa (vm, size, 512, vd->numa_node);
351 if (txq->tx_desc == 0)
352 return vlib_physmem_last_error (vm);
354 memset (txq->tx_desc, 0, size);
356 size = qsz * sizeof (*txq->tx_comp);
358 vlib_physmem_alloc_aligned_on_numa (vm, size, 512, vd->numa_node);
359 if (txq->tx_comp == 0)
360 return vlib_physmem_last_error (vm);
362 clib_memset (txq->tx_comp, 0, size);
363 vec_validate_aligned (txq->tx_ring.bufs, txq->size, CLIB_CACHE_LINE_BYTES);
364 txq->tx_ring.gen = VMXNET3_TXF_GEN;
365 txq->tx_comp_ring.gen = VMXNET3_TXCF_GEN;
370 static const u8 vmxnet3_rss_key[VMXNET3_RSS_MAX_KEY_SZ] = {
371 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
372 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
373 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
374 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
375 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
378 static clib_error_t *
379 vmxnet3_rss_init (vlib_main_t * vm, vmxnet3_device_t * vd)
381 vmxnet3_rss_shared *rss;
382 size_t size = sizeof (*rss);
385 vd->rss = vlib_physmem_alloc_aligned_on_numa (vm, size, 512, vd->numa_node);
387 return vlib_physmem_last_error (vm);
389 clib_memset (vd->rss, 0, size);
392 VMXNET3_RSS_HASH_TYPE_IPV4 | VMXNET3_RSS_HASH_TYPE_TCP_IPV4 |
393 VMXNET3_RSS_HASH_TYPE_IPV6 | VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
394 rss->hash_func = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
395 rss->hash_key_sz = VMXNET3_RSS_MAX_KEY_SZ;
396 rss->ind_table_sz = VMXNET3_RSS_MAX_IND_TABLE_SZ;
397 clib_memcpy (rss->hash_key, vmxnet3_rss_key, VMXNET3_RSS_MAX_KEY_SZ);
398 for (i = 0; i < rss->ind_table_sz; i++)
399 rss->ind_table[i] = i % vd->num_rx_queues;
404 static clib_error_t *
405 vmxnet3_device_init (vlib_main_t * vm, vmxnet3_device_t * vd,
406 vmxnet3_create_if_args_t * args)
408 clib_error_t *error = 0;
410 vlib_thread_main_t *tm = vlib_get_thread_main ();
412 /* Quiesce the device */
413 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
414 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
417 error = clib_error_return (0, "error on quiescing device rc (%u)", ret);
421 /* Reset the device */
422 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
423 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
426 error = clib_error_return (0, "error on resetting device rc (%u)", ret);
430 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_VRRS);
431 vd->version = count_leading_zeros (ret);
432 vd->version = uword_bits - vd->version;
434 if (vd->version == 0)
436 error = clib_error_return (0, "unsupported hardware version %u",
441 /* cap support version to 3 */
442 vmxnet3_reg_write (vd, 1, VMXNET3_REG_VRRS,
443 1 << (clib_min (3, vd->version) - 1));
445 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_UVRS);
447 vmxnet3_reg_write (vd, 1, VMXNET3_REG_UVRS, 1);
450 error = clib_error_return (0, "unsupported upt version %u", ret);
454 /* GSO is only supported for version >= 3 */
455 if (args->enable_gso)
457 if (vd->version >= 3)
462 clib_error_return (0,
463 "GSO is not supported because hardware version"
464 " is %u. It must be >= 3", vd->version);
469 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
470 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
473 vd->flags |= VMXNET3_DEVICE_F_LINK_UP;
474 vd->link_speed = ret >> 16;
477 vd->flags &= ~VMXNET3_DEVICE_F_LINK_UP;
479 /* Get the mac address */
480 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_MACL);
481 clib_memcpy (vd->mac_addr, &ret, 4);
482 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_MACH);
483 clib_memcpy (vd->mac_addr + 4, &ret, 2);
485 size = sizeof (vmxnet3_rx_queue) * vd->num_rx_queues +
486 sizeof (vmxnet3_tx_queue) * vd->num_tx_queues;
489 vlib_physmem_alloc_aligned_on_numa (vm, size, 512, vd->numa_node);
491 return vlib_physmem_last_error (vm);
493 clib_memset (vd->queues, 0, size);
495 if (vd->num_rx_queues > 1)
497 error = vmxnet3_rss_init (vm, vd);
502 for (i = 0; i < vd->num_rx_queues; i++)
504 error = vmxnet3_rxq_init (vm, vd, i, args->rxq_size);
509 for (i = 0; i < tm->n_vlib_mains; i++)
511 error = vmxnet3_txq_init (vm, vd, i, args->txq_size);
516 error = vmxnet3_provision_driver_shared (vm, vd);
520 vmxnet3_write_mac (vd);
522 /* Activate device */
523 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_ACTIVATE_DEV);
524 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
528 clib_error_return (0, "error on activating device rc (%u)", ret);
536 vmxnet3_rxq_irq_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h, u16 line)
538 vnet_main_t *vnm = vnet_get_main ();
539 vmxnet3_main_t *vmxm = &vmxnet3_main;
540 uword pd = vlib_pci_get_private_data (vm, h);
541 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, pd);
543 vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
545 if (vec_len (vd->rxqs) > qid && vd->rxqs[qid].int_mode != 0)
546 vnet_hw_if_rx_queue_set_int_pending (vnm, rxq->queue_index);
550 vmxnet3_event_irq_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h,
553 vnet_main_t *vnm = vnet_get_main ();
554 vmxnet3_main_t *vmxm = &vmxnet3_main;
555 uword pd = vlib_pci_get_private_data (vm, h);
556 vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, pd);
559 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
560 ret = vmxnet3_reg_read (vd, 1, VMXNET3_REG_CMD);
563 vd->flags |= VMXNET3_DEVICE_F_LINK_UP;
564 vd->link_speed = ret >> 16;
565 vnet_hw_interface_set_link_speed (vnm, vd->hw_if_index,
566 vd->link_speed * 1000);
567 vnet_hw_interface_set_flags (vnm, vd->hw_if_index,
568 VNET_HW_INTERFACE_FLAG_LINK_UP);
572 vd->flags &= ~VMXNET3_DEVICE_F_LINK_UP;
573 vnet_hw_interface_set_flags (vnm, vd->hw_if_index, 0);
578 vmxnet3_queue_size_valid (u16 qsz)
580 if (qsz < 64 || qsz > 4096)
588 vmxnet3_tx_queue_num_valid (u16 num)
590 vlib_thread_main_t *tm = vlib_get_thread_main ();
592 if ((num > VMXNET3_TXQ_MAX) || (num > tm->n_vlib_mains))
598 vmxnet3_rx_queue_num_valid (u16 num)
600 if (num > VMXNET3_RXQ_MAX)
606 vmxnet3_create_if (vlib_main_t * vm, vmxnet3_create_if_args_t * args)
608 vnet_main_t *vnm = vnet_get_main ();
609 vmxnet3_main_t *vmxm = &vmxnet3_main;
610 vmxnet3_device_t *vd;
611 vlib_pci_dev_handle_t h;
612 clib_error_t *error = 0;
616 if (args->txq_num == 0)
618 if (args->rxq_num == 0)
620 if (!vmxnet3_rx_queue_num_valid (args->rxq_num))
622 args->rv = VNET_API_ERROR_INVALID_VALUE;
624 clib_error_return (error, "number of rx queues must be <= %u",
626 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
627 format_vlib_pci_addr, &args->addr,
628 "number of rx queues must be <= %u", VMXNET3_RXQ_MAX);
632 if (!vmxnet3_tx_queue_num_valid (args->txq_num))
634 args->rv = VNET_API_ERROR_INVALID_VALUE;
636 clib_error_return (error,
637 "number of tx queues must be <= %u and <= number of "
638 "CPU's assigned to VPP", VMXNET3_TXQ_MAX);
639 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
640 format_vlib_pci_addr, &args->addr,
641 "number of tx queues must be <= %u and <= number of "
642 "CPU's assigned to VPP", VMXNET3_TXQ_MAX);
645 if (args->rxq_size == 0)
646 args->rxq_size = VMXNET3_NUM_RX_DESC;
647 if (args->txq_size == 0)
648 args->txq_size = VMXNET3_NUM_TX_DESC;
650 if (!vmxnet3_queue_size_valid (args->rxq_size) ||
651 !vmxnet3_queue_size_valid (args->txq_size))
653 args->rv = VNET_API_ERROR_INVALID_VALUE;
655 clib_error_return (error,
656 "queue size must be <= 4096, >= 64, "
657 "and multiples of 64");
658 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
659 format_vlib_pci_addr, &args->addr,
660 "queue size must be <= 4096, >= 64, and multiples of 64");
665 pool_foreach (vd, vmxm->devices) {
666 if (vd->pci_addr.as_u32 == args->addr.as_u32)
668 args->rv = VNET_API_ERROR_ADDRESS_IN_USE;
670 clib_error_return (error, "%U: %s", format_vlib_pci_addr,
671 &args->addr, "pci address in use");
672 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
673 format_vlib_pci_addr, &args->addr, "pci address in use");
681 error = vlib_pci_bind_to_uio (vm, &args->addr, (char *) "auto");
684 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
686 clib_error_return (error, "%U: %s", format_vlib_pci_addr,
688 "error encountered on binding pci device");
689 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
690 format_vlib_pci_addr, &args->addr,
691 "error encountered on binding pci devicee");
697 vlib_pci_device_open (vm, &args->addr, vmxnet3_pci_device_ids, &h)))
699 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
701 clib_error_return (error, "%U: %s", format_vlib_pci_addr,
703 "error encountered on pci device open");
704 vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
705 format_vlib_pci_addr, &args->addr,
706 "error encountered on pci device open");
711 * Do not use vmxnet3_log_error prior to this line since the macro
712 * references vd->pci_dev_handle
714 pool_get (vmxm->devices, vd);
715 vd->num_tx_queues = args->txq_num;
716 vd->num_rx_queues = args->rxq_num;
717 vd->dev_instance = vd - vmxm->devices;
718 vd->per_interface_next_index = ~0;
719 vd->pci_addr = args->addr;
721 if (args->enable_elog)
722 vd->flags |= VMXNET3_DEVICE_F_ELOG;
724 vd->pci_dev_handle = h;
725 vd->numa_node = vlib_pci_get_numa_node (vm, h);
726 vd->num_intrs = vd->num_rx_queues + 1; // +1 for the event interrupt
728 vlib_pci_set_private_data (vm, h, vd->dev_instance);
730 if ((error = vlib_pci_bus_master_enable (vm, h)))
732 vmxnet3_log_error (vd, "error encountered on pci bus master enable");
736 if ((error = vlib_pci_map_region (vm, h, 0, (void **) &vd->bar[0])))
738 vmxnet3_log_error (vd, "error encountered on pci map region for bar 0");
742 if ((error = vlib_pci_map_region (vm, h, 1, (void **) &vd->bar[1])))
744 vmxnet3_log_error (vd, "error encountered on pci map region for bar 1");
748 num_intr = vlib_pci_get_num_msix_interrupts (vm, h);
749 if (num_intr < vd->num_rx_queues + 1)
751 vmxnet3_log_error (vd,
752 "No sufficient interrupt lines (%u) for rx queues",
755 clib_error_return (0,
756 "No sufficient interrupt lines (%u) for rx queues",
760 if ((error = vlib_pci_register_msix_handler (vm, h, 0, vd->num_rx_queues,
761 &vmxnet3_rxq_irq_handler)))
763 vmxnet3_log_error (vd,
764 "error encountered on pci register msix handler 0");
768 if ((error = vlib_pci_register_msix_handler (vm, h, vd->num_rx_queues, 1,
769 &vmxnet3_event_irq_handler)))
771 vmxnet3_log_error (vd,
772 "error encountered on pci register msix handler 1");
776 if ((error = vlib_pci_enable_msix_irq (vm, h, 0, vd->num_rx_queues + 1)))
778 vmxnet3_log_error (vd, "error encountered on pci enable msix irq");
782 if ((error = vlib_pci_intr_enable (vm, h)))
784 vmxnet3_log_error (vd, "error encountered on pci interrupt enable");
788 if ((error = vmxnet3_device_init (vm, vd, args)))
790 vmxnet3_log_error (vd, "error encountered on device init");
794 /* create interface */
795 error = ethernet_register_interface (vnm, vmxnet3_device_class.index,
796 vd->dev_instance, vd->mac_addr,
797 &vd->hw_if_index, vmxnet3_flag_change);
801 vmxnet3_log_error (vd,
802 "error encountered on ethernet register interface");
806 vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, vd->hw_if_index);
807 vd->sw_if_index = sw->sw_if_index;
808 args->sw_if_index = sw->sw_if_index;
810 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vd->hw_if_index);
811 hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE;
813 hw->flags |= (VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO |
814 VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD);
816 vnet_hw_if_set_input_node (vnm, vd->hw_if_index, vmxnet3_input_node.index);
817 /* Disable interrupts */
818 vmxnet3_disable_interrupt (vd);
819 vec_foreach_index (qid, vd->rxqs)
821 vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
824 qi = vnet_hw_if_register_rx_queue (vnm, vd->hw_if_index, qid,
825 VNET_HW_IF_RXQ_THREAD_ANY);
826 fi = vlib_pci_get_msix_file_index (vm, vd->pci_dev_handle, qid);
827 vnet_hw_if_set_rx_queue_file_index (vnm, qi, fi);
828 rxq->queue_index = qi;
829 rxq->buffer_pool_index =
830 vnet_hw_if_get_rx_queue_numa_node (vnm, rxq->queue_index);
831 vmxnet3_rxq_refill_ring0 (vm, vd, rxq);
832 vmxnet3_rxq_refill_ring1 (vm, vd, rxq);
834 vnet_hw_if_update_runtime_data (vnm, vd->hw_if_index);
836 vd->flags |= VMXNET3_DEVICE_F_INITIALIZED;
837 vmxnet3_enable_interrupt (vd);
839 vnet_hw_interface_set_link_speed (vnm, vd->hw_if_index,
840 vd->link_speed * 1000);
841 if (vd->flags & VMXNET3_DEVICE_F_LINK_UP)
842 vnet_hw_interface_set_flags (vnm, vd->hw_if_index,
843 VNET_HW_INTERFACE_FLAG_LINK_UP);
845 vnet_hw_interface_set_flags (vnm, vd->hw_if_index, 0);
849 vmxnet3_delete_if (vm, vd);
850 args->rv = VNET_API_ERROR_INVALID_INTERFACE;
855 vmxnet3_delete_if (vlib_main_t * vm, vmxnet3_device_t * vd)
857 vnet_main_t *vnm = vnet_get_main ();
858 vmxnet3_main_t *vmxm = &vmxnet3_main;
862 /* Quiesce the device */
863 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
865 /* Reset the device */
866 vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
870 vnet_hw_interface_set_flags (vnm, vd->hw_if_index, 0);
871 ethernet_delete_interface (vnm, vd->hw_if_index);
874 vlib_pci_device_close (vm, vd->pci_dev_handle);
877 vec_foreach_index (i, vd->rxqs)
879 vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, i);
880 u16 mask = rxq->size - 1;
883 for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
885 vmxnet3_rx_ring *ring;
887 ring = &rxq->rx_ring[rid];
888 desc_idx = (ring->consume + 1) & mask;
889 vlib_buffer_free_from_ring (vm, ring->bufs, desc_idx, rxq->size,
891 vec_free (ring->bufs);
892 vlib_physmem_free (vm, rxq->rx_desc[rid]);
894 vlib_physmem_free (vm, rxq->rx_comp);
898 vec_free (vd->rx_stats);
901 vec_foreach_index (i, vd->txqs)
903 vmxnet3_txq_t *txq = vec_elt_at_index (vd->txqs, i);
904 u16 mask = txq->size - 1;
907 desc_idx = txq->tx_ring.consume;
908 end_idx = txq->tx_ring.produce;
909 while (desc_idx != end_idx)
911 bi = txq->tx_ring.bufs[desc_idx];
912 vlib_buffer_free_no_next (vm, &bi, 1);
916 clib_spinlock_free (&txq->lock);
917 vec_free (txq->tx_ring.bufs);
918 vlib_physmem_free (vm, txq->tx_desc);
919 vlib_physmem_free (vm, txq->tx_comp);
923 vec_free (vd->tx_stats);
925 vlib_physmem_free (vm, vd->driver_shared);
926 vlib_physmem_free (vm, vd->queues);
927 vlib_physmem_free (vm, vd->rss);
929 clib_error_free (vd->error);
930 clib_memset (vd, 0, sizeof (*vd));
931 pool_put (vmxm->devices, vd);
936 * fd.io coding-style-patch-verification: ON
939 * eval: (c-set-style "gnu")