X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fplugins%2Fvmxnet3%2Fvmxnet3.c;h=16d7d86fb3a3c44d540b025b4adb7289a387e95c;hb=f059a3452;hp=ec0ab2b68639a481c5940fc4cab79698b272c820;hpb=eabd4249826259d2221ed339ea4bbe9e7bf7560e;p=vpp.git diff --git a/src/plugins/vmxnet3/vmxnet3.c b/src/plugins/vmxnet3/vmxnet3.c index ec0ab2b6863..16d7d86fb3a 100644 --- a/src/plugins/vmxnet3/vmxnet3.c +++ b/src/plugins/vmxnet3/vmxnet3.c @@ -19,7 +19,8 @@ #include #include #include - +#include +#include #include #define PCI_VENDOR_ID_VMWARE 0x15ad @@ -68,11 +69,23 @@ vmxnet3_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid, vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index); vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, hw->dev_instance); vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid); + vmxnet3_per_thread_data_t *ptd; - if (mode == VNET_HW_IF_RX_MODE_POLLING) - rxq->int_mode = 0; + if (mode == rxq->mode) + return 0; + if ((mode != VNET_HW_IF_RX_MODE_POLLING) && + (mode != VNET_HW_IF_RX_MODE_INTERRUPT)) + return clib_error_return (0, "Rx mode %U not supported", + format_vnet_hw_if_rx_mode, mode); + rxq->mode = mode; + ptd = vec_elt_at_index (vmxm->per_thread_data, rxq->thread_index); + if (rxq->mode == VNET_HW_IF_RX_MODE_POLLING) + ptd->polling_q_count++; else - rxq->int_mode = 1; + { + ASSERT (ptd->polling_q_count != 0); + ptd->polling_q_count--; + } return 0; } @@ -287,6 +300,7 @@ vmxnet3_rxq_init (vlib_main_t * vm, vmxnet3_device_t * vd, u16 qid, u16 qsz) rxq = vec_elt_at_index (vd->rxqs, qid); clib_memset (rxq, 0, sizeof (*rxq)); rxq->size = qsz; + rxq->mode = VNET_HW_IF_RX_MODE_POLLING; for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++) { rxq->rx_desc[rid] = vlib_physmem_alloc_aligned_on_numa @@ -325,23 +339,15 @@ vmxnet3_txq_init (vlib_main_t * vm, vmxnet3_device_t * vd, u16 qid, u16 qsz) vmxnet3_tx_stats *txs; u32 size; - if (qid >= vd->num_tx_queues) - { - qid = qid % vd->num_tx_queues; - txq = vec_elt_at_index (vd->txqs, qid); - if (txq->lock == 0) - clib_spinlock_init (&txq->lock); - vd->flags |= VMXNET3_DEVICE_F_SHARED_TXQ_LOCK; - return 0; - } + vec_validate_aligned (vd->txqs, qid, CLIB_CACHE_LINE_BYTES); + txq = vec_elt_at_index (vd->txqs, qid); + clib_memset (txq, 0, sizeof (*txq)); + clib_spinlock_init (&txq->lock); vec_validate (vd->tx_stats, qid); txs = vec_elt_at_index (vd->tx_stats, qid); clib_memset (txs, 0, sizeof (*txs)); - vec_validate_aligned (vd->txqs, qid, CLIB_CACHE_LINE_BYTES); - txq = vec_elt_at_index (vd->txqs, qid); - clib_memset (txq, 0, sizeof (*txq)); txq->size = qsz; txq->reg_txprod = qid * 8 + VMXNET3_REG_TXPROD; @@ -351,7 +357,7 @@ vmxnet3_txq_init (vlib_main_t * vm, vmxnet3_device_t * vd, u16 qid, u16 qsz) if (txq->tx_desc == 0) return vlib_physmem_last_error (vm); - memset (txq->tx_desc, 0, size); + clib_memset (txq->tx_desc, 0, size); size = qsz * sizeof (*txq->tx_comp); txq->tx_comp = @@ -407,7 +413,6 @@ vmxnet3_device_init (vlib_main_t * vm, vmxnet3_device_t * vd, { clib_error_t *error = 0; u32 ret, i, size; - vlib_thread_main_t *tm = vlib_get_thread_main (); /* Quiesce the device */ vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); @@ -506,7 +511,7 @@ vmxnet3_device_init (vlib_main_t * vm, vmxnet3_device_t * vd, return error; } - for (i = 0; i < tm->n_vlib_mains; i++) + for (i = 0; i < vd->num_tx_queues; i++) { error = vmxnet3_txq_init (vm, vd, i, args->txq_size); if (error) @@ -540,9 +545,15 @@ vmxnet3_rxq_irq_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h, u16 line) uword pd = vlib_pci_get_private_data (vm, h); vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, pd); u16 qid = line; + vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid); - if (vec_len (vd->rxqs) > qid && vd->rxqs[qid].int_mode != 0) - vnet_device_input_set_interrupt_pending (vnm, vd->hw_if_index, qid); + if (vec_len (vd->rxqs) > qid && (rxq->mode != VNET_HW_IF_RX_MODE_POLLING)) + { + vmxnet3_per_thread_data_t *ptd = + vec_elt_at_index (vmxm->per_thread_data, rxq->thread_index); + if (ptd->polling_q_count == 0) + vnet_hw_if_rx_queue_set_int_pending (vnm, rxq->queue_index); + } } static void @@ -661,7 +672,7 @@ vmxnet3_create_if (vlib_main_t * vm, vmxnet3_create_if_args_t * args) } /* *INDENT-OFF* */ - pool_foreach (vd, vmxm->devices, ({ + pool_foreach (vd, vmxm->devices) { if (vd->pci_addr.as_u32 == args->addr.as_u32) { args->rv = VNET_API_ERROR_ADDRESS_IN_USE; @@ -672,7 +683,7 @@ vmxnet3_create_if (vlib_main_t * vm, vmxnet3_create_if_args_t * args) format_vlib_pci_addr, &args->addr, "pci address in use"); return; } - })); + } /* *INDENT-ON* */ if (args->bind) @@ -807,30 +818,54 @@ vmxnet3_create_if (vlib_main_t * vm, vmxnet3_create_if_args_t * args) args->sw_if_index = sw->sw_if_index; vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vd->hw_if_index); - hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_INT_MODE; + hw->caps |= VNET_HW_INTERFACE_CAP_SUPPORTS_INT_MODE; if (vd->gso_enable) - hw->flags |= (VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO | - VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD); + { + hw->caps |= (VNET_HW_INTERFACE_CAP_SUPPORTS_TCP_GSO | + VNET_HW_INTERFACE_CAP_SUPPORTS_TX_TCP_CKSUM | + VNET_HW_INTERFACE_CAP_SUPPORTS_TX_UDP_CKSUM); + } - vnet_hw_interface_set_input_node (vnm, vd->hw_if_index, - vmxnet3_input_node.index); + vnet_hw_if_set_input_node (vnm, vd->hw_if_index, vmxnet3_input_node.index); /* Disable interrupts */ vmxnet3_disable_interrupt (vd); vec_foreach_index (qid, vd->rxqs) { vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid); - u32 thread_index; - u32 numa_node; - - vnet_hw_interface_assign_rx_thread (vnm, vd->hw_if_index, qid, ~0); - thread_index = vnet_get_device_input_thread_index (vnm, vd->hw_if_index, - qid); - numa_node = vlib_mains[thread_index]->numa_node; + u32 qi, fi; + vmxnet3_per_thread_data_t *ptd; + + qi = vnet_hw_if_register_rx_queue (vnm, vd->hw_if_index, qid, + VNET_HW_IF_RXQ_THREAD_ANY); + fi = vlib_pci_get_msix_file_index (vm, vd->pci_dev_handle, qid); + vnet_hw_if_set_rx_queue_file_index (vnm, qi, fi); + rxq->queue_index = qi; + rxq->thread_index = + vnet_hw_if_get_rx_queue_thread_index (vnm, rxq->queue_index); + if (rxq->mode == VNET_HW_IF_RX_MODE_POLLING) + { + ptd = vec_elt_at_index (vmxm->per_thread_data, rxq->thread_index); + ptd->polling_q_count++; + } rxq->buffer_pool_index = - vlib_buffer_pool_get_default_for_numa (vm, numa_node); + vnet_hw_if_get_rx_queue_numa_node (vnm, rxq->queue_index); vmxnet3_rxq_refill_ring0 (vm, vd, rxq); vmxnet3_rxq_refill_ring1 (vm, vd, rxq); } + + vec_foreach_index (qid, vd->txqs) + { + vmxnet3_txq_t *txq = vec_elt_at_index (vd->txqs, qid); + txq->queue_index = + vnet_hw_if_register_tx_queue (vnm, vd->hw_if_index, qid); + } + for (u32 i = 0; i < vlib_get_n_threads (); i++) + { + u32 qi = vd->txqs[i % vd->num_tx_queues].queue_index; + vnet_hw_if_tx_queue_assign_thread (vnm, qi, i); + } + vnet_hw_if_update_runtime_data (vnm, vd->hw_if_index); + vd->flags |= VMXNET3_DEVICE_F_INITIALIZED; vmxnet3_enable_interrupt (vd); @@ -855,7 +890,7 @@ vmxnet3_delete_if (vlib_main_t * vm, vmxnet3_device_t * vd) vnet_main_t *vnm = vnet_get_main (); vmxnet3_main_t *vmxm = &vmxnet3_main; u32 i, bi; - u16 desc_idx, qid; + u16 desc_idx; /* Quiesce the device */ vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV); @@ -866,8 +901,6 @@ vmxnet3_delete_if (vlib_main_t * vm, vmxnet3_device_t * vd) if (vd->hw_if_index) { vnet_hw_interface_set_flags (vnm, vd->hw_if_index, 0); - vec_foreach_index (qid, vd->rxqs) - vnet_hw_interface_unassign_rx_thread (vnm, vd->hw_if_index, qid); ethernet_delete_interface (vnm, vd->hw_if_index); } @@ -879,7 +912,14 @@ vmxnet3_delete_if (vlib_main_t * vm, vmxnet3_device_t * vd) vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, i); u16 mask = rxq->size - 1; u16 rid; + vmxnet3_per_thread_data_t *ptd = + vec_elt_at_index (vmxm->per_thread_data, rxq->thread_index); + if (rxq->mode == VNET_HW_IF_RX_MODE_POLLING) + { + ASSERT (ptd->polling_q_count != 0); + ptd->polling_q_count--; + } for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++) { vmxnet3_rx_ring *ring;