When an RX thread handles more than one RX queue and has a mix of
queues in interrupt mode and polling mode, the RX input routine is
naturally in polling mode. In that case, there is no need to set RX
interrupt pending when descriptor is available in the queue for
interrupt mode.
Type: fix
Signed-off-by: Steven Luong <sluong@cisco.com>
Change-Id: Iedbe57941eca3152c0e8ab9096cc81f315e0a915
vmxnet3_tx_comp *tx_comp;
u16 qid;
vmxnet3_tx_comp *tx_comp;
u16 qid;
+ vlib_cli_output (vm, "Global:");
+ for (u32 tid = 0; tid <= vlib_num_workers (); tid++)
+ {
+ vmxnet3_per_thread_data_t *ptd =
+ vec_elt_at_index (vmxm->per_thread_data, tid);
+ vlib_cli_output (vm, " Thread %u: polling queue count %u", tid,
+ ptd->polling_q_count);
+ }
+
if (!hw_if_indices)
return;
if (!hw_if_indices)
return;
vmxnet3_cli_init (vlib_main_t * vm)
{
vmxnet3_main_t *vmxm = &vmxnet3_main;
vmxnet3_cli_init (vlib_main_t * vm)
{
vmxnet3_main_t *vmxm = &vmxnet3_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
/* initialize binary API */
vmxnet3_plugin_api_hookup (vm);
vmxm->log_default = vlib_log_register_class ("vmxnet3", 0);
/* initialize binary API */
vmxnet3_plugin_api_hookup (vm);
vmxm->log_default = vlib_log_register_class ("vmxnet3", 0);
+
+ vec_validate (vmxm->per_thread_data, tm->n_vlib_mains - 1);
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, hw->dev_instance);
vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
vmxnet3_device_t *vd = pool_elt_at_index (vmxm->devices, hw->dev_instance);
vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
+ vmxnet3_per_thread_data_t *ptd;
- if (mode == VNET_HW_IF_RX_MODE_POLLING)
- rxq->int_mode = 0;
+ if (mode == rxq->mode)
+ return 0;
+ if ((mode != VNET_HW_IF_RX_MODE_POLLING) &&
+ (mode != VNET_HW_IF_RX_MODE_INTERRUPT))
+ return clib_error_return (0, "Rx mode %U not supported",
+ format_vnet_hw_if_rx_mode, mode);
+ rxq->mode = mode;
+ ptd = vec_elt_at_index (vmxm->per_thread_data, rxq->thread_index);
+ if (rxq->mode == VNET_HW_IF_RX_MODE_POLLING)
+ ptd->polling_q_count++;
+ {
+ ASSERT (ptd->polling_q_count != 0);
+ ptd->polling_q_count--;
+ }
rxq = vec_elt_at_index (vd->rxqs, qid);
clib_memset (rxq, 0, sizeof (*rxq));
rxq->size = qsz;
rxq = vec_elt_at_index (vd->rxqs, qid);
clib_memset (rxq, 0, sizeof (*rxq));
rxq->size = qsz;
+ rxq->mode = VNET_HW_IF_RX_MODE_POLLING;
for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
{
rxq->rx_desc[rid] = vlib_physmem_alloc_aligned_on_numa
for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
{
rxq->rx_desc[rid] = vlib_physmem_alloc_aligned_on_numa
u16 qid = line;
vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
u16 qid = line;
vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
- if (vec_len (vd->rxqs) > qid && vd->rxqs[qid].int_mode != 0)
- vnet_hw_if_rx_queue_set_int_pending (vnm, rxq->queue_index);
+ if (vec_len (vd->rxqs) > qid && (rxq->mode != VNET_HW_IF_RX_MODE_POLLING))
+ {
+ vmxnet3_per_thread_data_t *ptd =
+ vec_elt_at_index (vmxm->per_thread_data, rxq->thread_index);
+ if (ptd->polling_q_count == 0)
+ vnet_hw_if_rx_queue_set_int_pending (vnm, rxq->queue_index);
+ }
{
vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
u32 qi, fi;
{
vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
u32 qi, fi;
+ vmxnet3_per_thread_data_t *ptd;
qi = vnet_hw_if_register_rx_queue (vnm, vd->hw_if_index, qid,
VNET_HW_IF_RXQ_THREAD_ANY);
fi = vlib_pci_get_msix_file_index (vm, vd->pci_dev_handle, qid);
vnet_hw_if_set_rx_queue_file_index (vnm, qi, fi);
rxq->queue_index = qi;
qi = vnet_hw_if_register_rx_queue (vnm, vd->hw_if_index, qid,
VNET_HW_IF_RXQ_THREAD_ANY);
fi = vlib_pci_get_msix_file_index (vm, vd->pci_dev_handle, qid);
vnet_hw_if_set_rx_queue_file_index (vnm, qi, fi);
rxq->queue_index = qi;
+ rxq->thread_index =
+ vnet_hw_if_get_rx_queue_thread_index (vnm, rxq->queue_index);
+ if (rxq->mode == VNET_HW_IF_RX_MODE_POLLING)
+ {
+ ptd = vec_elt_at_index (vmxm->per_thread_data, rxq->thread_index);
+ ptd->polling_q_count++;
+ }
rxq->buffer_pool_index =
vnet_hw_if_get_rx_queue_numa_node (vnm, rxq->queue_index);
vmxnet3_rxq_refill_ring0 (vm, vd, rxq);
rxq->buffer_pool_index =
vnet_hw_if_get_rx_queue_numa_node (vnm, rxq->queue_index);
vmxnet3_rxq_refill_ring0 (vm, vd, rxq);
vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, i);
u16 mask = rxq->size - 1;
u16 rid;
vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, i);
u16 mask = rxq->size - 1;
u16 rid;
+ vmxnet3_per_thread_data_t *ptd =
+ vec_elt_at_index (vmxm->per_thread_data, rxq->thread_index);
+ if (rxq->mode == VNET_HW_IF_RX_MODE_POLLING)
+ {
+ ASSERT (ptd->polling_q_count != 0);
+ ptd->polling_q_count--;
+ }
for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
{
vmxnet3_rx_ring *ring;
for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
{
vmxnet3_rx_ring *ring;
u16 next;
} vmxnet3_rx_comp_ring;
u16 next;
} vmxnet3_rx_comp_ring;
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u32 polling_q_count;
+} vmxnet3_per_thread_data_t;
+
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u16 size;
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u16 size;
u8 buffer_pool_index;
u32 queue_index;
u8 buffer_pool_index;
u32 queue_index;
vmxnet3_rx_ring rx_ring[VMXNET3_RX_RING_SIZE];
vmxnet3_rx_desc *rx_desc[VMXNET3_RX_RING_SIZE];
vmxnet3_rx_comp *rx_comp;
vmxnet3_rx_ring rx_ring[VMXNET3_RX_RING_SIZE];
vmxnet3_rx_desc *rx_desc[VMXNET3_RX_RING_SIZE];
vmxnet3_rx_comp *rx_comp;
vmxnet3_device_t *devices;
u16 msg_id_base;
vlib_log_class_t log_default;
vmxnet3_device_t *devices;
u16 msg_id_base;
vlib_log_class_t log_default;
+ vmxnet3_per_thread_data_t *per_thread_data;
} vmxnet3_main_t;
extern vmxnet3_main_t vmxnet3_main;
} vmxnet3_main_t;
extern vmxnet3_main_t vmxnet3_main;