+static_always_inline void
+vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
+ u32 * buffers, u16 * nexts, uword count)
+{
+ u32 *to_next, n_left_to_next, max;
+ u16 next_index;
+
+ next_index = nexts[0];
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ max = clib_min (n_left_to_next, count);
+
+ while (count)
+ {
+ u32 n_enqueued;
+ if ((nexts[0] != next_index) || n_left_to_next == 0)
+ {
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ next_index = nexts[0];
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ max = clib_min (n_left_to_next, count);
+ }
+#if defined(CLIB_HAVE_VEC512)
+ u16x32 next32 = u16x32_load_unaligned (nexts);
+ next32 = (next32 == u16x32_splat (next32[0]));
+ u64 bitmap = u16x32_msb_mask (next32);
+ n_enqueued = count_trailing_zeros (~bitmap);
+#elif defined(CLIB_HAVE_VEC256)
+ u16x16 next16 = u16x16_load_unaligned (nexts);
+ next16 = (next16 == u16x16_splat (next16[0]));
+ u64 bitmap = u8x32_msb_mask ((u8x32) next16);
+ n_enqueued = count_trailing_zeros (~bitmap) / 2;
+#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
+ u16x8 next8 = u16x8_load_unaligned (nexts);
+ next8 = (next8 == u16x8_splat (next8[0]));
+ u64 bitmap = u8x16_msb_mask ((u8x16) next8);
+ n_enqueued = count_trailing_zeros (~bitmap) / 2;
+#else
+ u16 x = 0;
+ x |= next_index ^ nexts[1];
+ x |= next_index ^ nexts[2];
+ x |= next_index ^ nexts[3];
+ n_enqueued = (x == 0) ? 4 : 1;
+#endif
+
+ if (PREDICT_FALSE (n_enqueued > max))
+ n_enqueued = max;
+
+#ifdef CLIB_HAVE_VEC512
+ if (n_enqueued >= 32)
+ {
+ clib_memcpy (to_next, buffers, 32 * sizeof (u32));
+ nexts += 32;
+ to_next += 32;
+ buffers += 32;
+ n_left_to_next -= 32;
+ count -= 32;
+ max -= 32;
+ continue;
+ }
+#endif
+
+#ifdef CLIB_HAVE_VEC256
+ if (n_enqueued >= 16)
+ {
+ clib_memcpy (to_next, buffers, 16 * sizeof (u32));
+ nexts += 16;
+ to_next += 16;
+ buffers += 16;
+ n_left_to_next -= 16;
+ count -= 16;
+ max -= 16;
+ continue;
+ }
+#endif
+
+#ifdef CLIB_HAVE_VEC128
+ if (n_enqueued >= 8)
+ {
+ clib_memcpy (to_next, buffers, 8 * sizeof (u32));
+ nexts += 8;
+ to_next += 8;
+ buffers += 8;
+ n_left_to_next -= 8;
+ count -= 8;
+ max -= 8;
+ continue;
+ }
+#endif
+
+ if (n_enqueued >= 4)
+ {
+ clib_memcpy (to_next, buffers, 4 * sizeof (u32));
+ nexts += 4;
+ to_next += 4;
+ buffers += 4;
+ n_left_to_next -= 4;
+ count -= 4;
+ max -= 4;
+ continue;
+ }
+
+ /* copy */
+ to_next[0] = buffers[0];
+
+ /* next */
+ nexts += 1;
+ to_next += 1;
+ buffers += 1;
+ n_left_to_next -= 1;
+ count -= 1;
+ max -= 1;
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+}
+
+static_always_inline void
+vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
+ u32 * buffer_indices, u16 * thread_indices,
+ u32 n_left)
+{
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ static __thread vlib_frame_queue_elt_t **handoff_queue_elt_by_thread_index =
+ 0;
+ static __thread vlib_frame_queue_t **congested_handoff_queue_by_thread_index
+ = 0;
+ vlib_frame_queue_elt_t *hf = 0;
+ u32 n_left_to_next_thread = 0, *to_next_thread = 0;
+ u32 next_thread_index, current_thread_index = ~0;
+ int i;
+
+ if (PREDICT_FALSE (handoff_queue_elt_by_thread_index == 0))
+ {
+ vec_validate (handoff_queue_elt_by_thread_index, tm->n_vlib_mains - 1);
+ vec_validate_init_empty (congested_handoff_queue_by_thread_index,
+ tm->n_vlib_mains - 1,
+ (vlib_frame_queue_t *) (~0));
+ }
+
+ while (n_left)
+ {
+ next_thread_index = thread_indices[0];
+
+ if (next_thread_index != current_thread_index)
+ {
+ if (hf)
+ hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
+
+ hf = vlib_get_worker_handoff_queue_elt (frame_queue_index,
+ next_thread_index,
+ handoff_queue_elt_by_thread_index);
+
+ n_left_to_next_thread = VLIB_FRAME_SIZE - hf->n_vectors;
+ to_next_thread = &hf->buffer_index[hf->n_vectors];
+ current_thread_index = next_thread_index;
+ }
+
+ to_next_thread[0] = buffer_indices[0];
+ to_next_thread++;
+ n_left_to_next_thread--;
+
+ if (n_left_to_next_thread == 0)
+ {
+ hf->n_vectors = VLIB_FRAME_SIZE;
+ vlib_put_frame_queue_elt (hf);
+ current_thread_index = ~0;
+ handoff_queue_elt_by_thread_index[next_thread_index] = 0;
+ hf = 0;
+ }
+
+ /* next */
+ thread_indices += 1;
+ buffer_indices += 1;
+ n_left -= 1;
+ }
+
+ if (hf)
+ hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
+
+ /* Ship frames to the thread nodes */
+ for (i = 0; i < vec_len (handoff_queue_elt_by_thread_index); i++)
+ {
+ if (handoff_queue_elt_by_thread_index[i])
+ {
+ hf = handoff_queue_elt_by_thread_index[i];
+ /*
+ * It works better to let the handoff node
+ * rate-adapt, always ship the handoff queue element.
+ */
+ if (1 || hf->n_vectors == hf->last_n_vectors)
+ {
+ vlib_put_frame_queue_elt (hf);
+ handoff_queue_elt_by_thread_index[i] = 0;
+ }
+ else
+ hf->last_n_vectors = hf->n_vectors;
+ }
+ congested_handoff_queue_by_thread_index[i] =
+ (vlib_frame_queue_t *) (~0);
+ }
+}
+