n_enqueued = count_trailing_zeros (~bitmap) / 2;
#else
u16 x = 0;
- x |= next_index ^ nexts[1];
- x |= next_index ^ nexts[2];
- x |= next_index ^ nexts[3];
- n_enqueued = (x == 0) ? 4 : 1;
+ if (count + 3 < max)
+ {
+ x |= next_index ^ nexts[1];
+ x |= next_index ^ nexts[2];
+ x |= next_index ^ nexts[3];
+ n_enqueued = (x == 0) ? 4 : 1;
+ }
+ else
+ n_enqueued = 1;
#endif
if (PREDICT_FALSE (n_enqueued > max))
#ifdef CLIB_HAVE_VEC512
if (n_enqueued >= 32)
{
- clib_memcpy (to_next, buffers, 32 * sizeof (u32));
+ clib_memcpy_fast (to_next, buffers, 32 * sizeof (u32));
nexts += 32;
to_next += 32;
buffers += 32;
#ifdef CLIB_HAVE_VEC256
if (n_enqueued >= 16)
{
- clib_memcpy (to_next, buffers, 16 * sizeof (u32));
+ clib_memcpy_fast (to_next, buffers, 16 * sizeof (u32));
nexts += 16;
to_next += 16;
buffers += 16;
#ifdef CLIB_HAVE_VEC128
if (n_enqueued >= 8)
{
- clib_memcpy (to_next, buffers, 8 * sizeof (u32));
+ clib_memcpy_fast (to_next, buffers, 8 * sizeof (u32));
nexts += 8;
to_next += 8;
buffers += 8;
if (n_enqueued >= 4)
{
- clib_memcpy (to_next, buffers, 4 * sizeof (u32));
+ clib_memcpy_fast (to_next, buffers, 4 * sizeof (u32));
nexts += 4;
to_next += 4;
buffers += 4;
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
-static_always_inline void
+static_always_inline u32
vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
u32 * buffer_indices, u16 * thread_indices,
- u32 n_left)
+ u32 n_packets, int drop_on_congestion)
{
vlib_thread_main_t *tm = vlib_get_thread_main ();
- static __thread vlib_frame_queue_elt_t **handoff_queue_elt_by_thread_index =
- 0;
- static __thread vlib_frame_queue_t **congested_handoff_queue_by_thread_index
- = 0;
+ vlib_frame_queue_main_t *fqm;
+ vlib_frame_queue_per_thread_data_t *ptd;
+ u32 n_left = n_packets;
+ u32 drop_list[VLIB_FRAME_SIZE], *dbi = drop_list, n_drop = 0;
vlib_frame_queue_elt_t *hf = 0;
u32 n_left_to_next_thread = 0, *to_next_thread = 0;
u32 next_thread_index, current_thread_index = ~0;
int i;
- if (PREDICT_FALSE (handoff_queue_elt_by_thread_index == 0))
- {
- vec_validate (handoff_queue_elt_by_thread_index, tm->n_vlib_mains - 1);
- vec_validate_init_empty (congested_handoff_queue_by_thread_index,
- tm->n_vlib_mains - 1,
- (vlib_frame_queue_t *) (~0));
- }
+ fqm = vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
+ ptd = vec_elt_at_index (fqm->per_thread_data, vm->thread_index);
while (n_left)
{
if (next_thread_index != current_thread_index)
{
+
+ if (drop_on_congestion &&
+ is_vlib_frame_queue_congested
+ (frame_queue_index, next_thread_index, fqm->queue_hi_thresh,
+ ptd->congested_handoff_queue_by_thread_index))
+ {
+ dbi[0] = buffer_indices[0];
+ dbi++;
+ n_drop++;
+ goto next;
+ }
+
if (hf)
hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
hf = vlib_get_worker_handoff_queue_elt (frame_queue_index,
next_thread_index,
- handoff_queue_elt_by_thread_index);
+ ptd->handoff_queue_elt_by_thread_index);
n_left_to_next_thread = VLIB_FRAME_SIZE - hf->n_vectors;
to_next_thread = &hf->buffer_index[hf->n_vectors];
hf->n_vectors = VLIB_FRAME_SIZE;
vlib_put_frame_queue_elt (hf);
current_thread_index = ~0;
- handoff_queue_elt_by_thread_index[next_thread_index] = 0;
+ ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0;
hf = 0;
}
/* next */
+ next:
thread_indices += 1;
buffer_indices += 1;
n_left -= 1;
hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
/* Ship frames to the thread nodes */
- for (i = 0; i < vec_len (handoff_queue_elt_by_thread_index); i++)
+ for (i = 0; i < vec_len (ptd->handoff_queue_elt_by_thread_index); i++)
{
- if (handoff_queue_elt_by_thread_index[i])
+ if (ptd->handoff_queue_elt_by_thread_index[i])
{
- hf = handoff_queue_elt_by_thread_index[i];
+ hf = ptd->handoff_queue_elt_by_thread_index[i];
/*
* It works better to let the handoff node
* rate-adapt, always ship the handoff queue element.
if (1 || hf->n_vectors == hf->last_n_vectors)
{
vlib_put_frame_queue_elt (hf);
- handoff_queue_elt_by_thread_index[i] = 0;
+ ptd->handoff_queue_elt_by_thread_index[i] = 0;
}
else
hf->last_n_vectors = hf->n_vectors;
}
- congested_handoff_queue_by_thread_index[i] =
+ ptd->congested_handoff_queue_by_thread_index[i] =
(vlib_frame_queue_t *) (~0);
}
+
+ if (drop_on_congestion && n_drop)
+ vlib_buffer_free (vm, drop_list, n_drop);
+
+ return n_packets - n_drop;
}
#endif /* included_vlib_buffer_node_h */