u32 queue_index);
void vnet_hw_if_set_rx_queue_thread_index (vnet_main_t *vnm, u32 queue_index,
u32 thread_index);
-void vnet_hw_if_update_runtime_data (vnet_main_t *vnm, u32 hw_if_index);
void vnet_hw_if_generate_rxq_int_poll_vector (vlib_main_t *vm,
vlib_node_runtime_t *node);
{
vnet_hw_if_rx_queue_t *rxq = vnet_hw_if_get_rx_queue (vnm, queue_index);
vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, rxq->hw_if_index);
- vlib_main_t *vm = vlib_mains[rxq->thread_index];
-
+ vlib_main_t *vm = vlib_get_main_by_index (rxq->thread_index);
vnet_hw_if_rx_node_runtime_t *rt;
+ if (PREDICT_FALSE (rxq->mode != VNET_HW_IF_RX_MODE_INTERRUPT &&
+ rxq->mode != VNET_HW_IF_RX_MODE_ADAPTIVE))
+ return;
rt = vlib_node_get_runtime_data (vm, hi->input_node_index);
if (vm == vlib_get_main ())
clib_interrupt_set (rt->rxq_interrupts, queue_index);
return rxq->thread_index;
}
+static_always_inline int
+vnet_hw_if_rxq_cmp_cli_api (vnet_hw_if_rx_queue_t **a,
+ vnet_hw_if_rx_queue_t **b)
+{
+ vnet_main_t *vnm;
+ vnet_hw_interface_t *hif_a;
+ vnet_hw_interface_t *hif_b;
+
+ if (*a == *b)
+ return 0;
+
+ if (a[0]->thread_index != b[0]->thread_index)
+ return 2 * (a[0]->thread_index > b[0]->thread_index) - 1;
+
+ vnm = vnet_get_main ();
+ hif_a = vnet_get_hw_interface (vnm, a[0]->hw_if_index);
+ hif_b = vnet_get_hw_interface (vnm, b[0]->hw_if_index);
+
+ if (hif_a->input_node_index != hif_b->input_node_index)
+ return 2 * (hif_a->input_node_index > hif_b->input_node_index) - 1;
+
+ if (a[0]->hw_if_index != b[0]->hw_if_index)
+ return 2 * (a[0]->hw_if_index > b[0]->hw_if_index) - 1;
+
+ if (a[0]->queue_id != b[0]->queue_id)
+ return 2 * (a[0]->queue_id > b[0]->queue_id) - 1;
+
+ ASSERT (0);
+ return ~0;
+}
+
/*
* fd.io coding-style-patch-verification: ON
*