vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
u32 node_index = hi->input_node_index;
vnet_hw_if_rx_queue_t *rxq;
- vnet_hw_if_rxq_poll_vector_t *pv, **d = 0;
+ vnet_hw_if_rxq_poll_vector_t *pv, **d = 0, **a = 0;
vnet_hw_if_output_node_runtime_t *new_out_runtimes = 0;
vlib_node_state_t *per_thread_node_state = 0;
u32 n_threads = vlib_get_n_threads ();
format_vlib_node_name, vm, node_index, hi->name);
vec_validate (d, n_threads - 1);
+ vec_validate (a, n_threads - 1);
vec_validate_init_empty (per_thread_node_state, n_threads - 1,
VLIB_NODE_STATE_DISABLED);
vec_validate_init_empty (per_thread_node_adaptive, n_threads - 1, 0);
rxq->mode == VNET_HW_IF_RX_MODE_ADAPTIVE)
last_int = clib_max (last_int, rxq - im->hw_if_rx_queues);
+ if (per_thread_node_adaptive[ti])
+ {
+ vec_add2_aligned (a[ti], pv, 1, CLIB_CACHE_LINE_BYTES);
+ pv->dev_instance = rxq->dev_instance;
+ pv->queue_id = rxq->queue_id;
+ }
+
if (per_thread_node_state[ti] != VLIB_NODE_STATE_POLLING)
continue;
{
vnet_hw_if_rx_node_runtime_t *rt;
rt = vlib_node_get_runtime_data (ovm, node_index);
- if (vec_len (rt->rxq_poll_vector) != vec_len (d[i]))
+ if (vec_len (rt->rxq_vector_int) != vec_len (d[i]))
something_changed_on_rx = 1;
- else if (memcmp (d[i], rt->rxq_poll_vector,
+ else if (memcmp (d[i], rt->rxq_vector_int,
vec_len (d[i]) * sizeof (**d)))
something_changed_on_rx = 1;
if (clib_interrupt_get_n_int (rt->rxq_interrupts) != last_int + 1)
something_changed_on_rx = 1;
+
+ if (something_changed_on_rx == 0 && per_thread_node_adaptive[i])
+ {
+ if (vec_len (rt->rxq_vector_poll) != vec_len (a[i]))
+ something_changed_on_rx = 1;
+ else if (memcmp (a[i], rt->rxq_vector_poll,
+ vec_len (a[i]) * sizeof (**a)))
+ something_changed_on_rx = 1;
+ }
}
}
- new_out_runtimes =
- vec_dup_aligned (hi->output_node_thread_runtimes, CLIB_CACHE_LINE_BYTES);
- vec_validate_aligned (new_out_runtimes, n_threads - 1,
- CLIB_CACHE_LINE_BYTES);
-
- if (vec_len (hi->output_node_thread_runtimes) != vec_len (new_out_runtimes))
- something_changed_on_tx = 1;
-
- for (int i = 0; i < vec_len (hi->tx_queue_indices); i++)
+ if (vec_len (hi->tx_queue_indices) > 0)
{
- u32 thread_index;
- u32 queue_index = hi->tx_queue_indices[i];
- vnet_hw_if_tx_queue_t *txq = vnet_hw_if_get_tx_queue (vnm, queue_index);
- uword n_threads = clib_bitmap_count_set_bits (txq->threads);
+ new_out_runtimes = vec_dup_aligned (hi->output_node_thread_runtimes,
+ CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (new_out_runtimes, n_threads - 1,
+ CLIB_CACHE_LINE_BYTES);
- clib_bitmap_foreach (thread_index, txq->threads)
+ for (u32 i = 0; i < vec_len (new_out_runtimes); i++)
{
vnet_hw_if_output_node_runtime_t *rt;
- rt = vec_elt_at_index (new_out_runtimes, thread_index);
- if ((rt->frame.queue_id != txq->queue_id) ||
- (rt->n_threads != n_threads))
+ rt = vec_elt_at_index (new_out_runtimes, i);
+ u32 n_queues = 0, total_queues = vec_len (hi->tx_queue_indices);
+ rt->frame = 0;
+ rt->lookup_table = 0;
+
+ for (u32 j = 0; j < total_queues; j++)
{
+ u32 queue_index = hi->tx_queue_indices[j];
+ vnet_hw_if_tx_frame_t frame = { .shared_queue = 0,
+ .hints = 7,
+ .queue_id = ~0 };
+ vnet_hw_if_tx_queue_t *txq =
+ vnet_hw_if_get_tx_queue (vnm, queue_index);
+ if (!clib_bitmap_get (txq->threads, i))
+ continue;
+
log_debug ("tx queue data changed for interface %v, thread %u "
- "(queue_id %u -> %u, n_threads %u -> %u)",
- hi->name, thread_index, rt->frame.queue_id,
- txq->queue_id, rt->n_threads, n_threads);
+ "(queue_id %u)",
+ hi->name, i, txq->queue_id);
+ something_changed_on_tx = 1;
+
+ frame.queue_id = txq->queue_id;
+ frame.shared_queue = txq->shared_queue;
+ vec_add1 (rt->frame, frame);
+ n_queues++;
+ }
+
+ // don't initialize rt->n_queues above
+ if (rt->n_queues != n_queues)
+ {
something_changed_on_tx = 1;
- rt->frame.queue_id = txq->queue_id;
- rt->frame.shared_queue = txq->shared_queue;
- rt->n_threads = n_threads;
+ rt->n_queues = n_queues;
+ }
+ /*
+ * It is only used in case of multiple txq.
+ */
+ if (rt->n_queues > 0)
+ {
+ if (!is_pow2 (n_queues))
+ n_queues = max_pow2 (n_queues);
+
+ vec_validate_aligned (rt->lookup_table, n_queues - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ for (u32 k = 0; k < vec_len (rt->lookup_table); k++)
+ {
+ rt->lookup_table[k] = rt->frame[k % rt->n_queues].queue_id;
+ log_debug ("tx queue lookup table changed for interface %v, "
+ "(lookup table [%u]=%u)",
+ hi->name, k, rt->lookup_table[k]);
+ }
}
}
}
+ else
+ /* interface deleted */
+ something_changed_on_tx = 1;
if (something_changed_on_rx || something_changed_on_tx)
{
vlib_main_t *vm = vlib_get_main_by_index (i);
vnet_hw_if_rx_node_runtime_t *rt;
rt = vlib_node_get_runtime_data (vm, node_index);
- pv = rt->rxq_poll_vector;
- rt->rxq_poll_vector = d[i];
+ pv = rt->rxq_vector_int;
+ rt->rxq_vector_int = d[i];
d[i] = pv;
+ if (per_thread_node_adaptive[i])
+ {
+ pv = rt->rxq_vector_poll;
+ rt->rxq_vector_poll = a[i];
+ a[i] = pv;
+ }
+
if (rt->rxq_interrupts)
{
void *in = rt->rxq_interrupts;
}
for (int i = 0; i < n_threads; i++)
- vec_free (d[i]);
+ {
+ vec_free (d[i]);
+ vec_free (a[i]);
+ if (new_out_runtimes)
+ {
+ vec_free (new_out_runtimes[i].frame);
+ vec_free (new_out_runtimes[i].lookup_table);
+ }
+ }
vec_free (d);
+ vec_free (a);
vec_free (per_thread_node_state);
vec_free (per_thread_node_adaptive);
vec_free (new_out_runtimes);