#define vlib_validate_buffer_enqueue_x2(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,next0,next1) \
do { \
+ ASSERT (bi0 != 0); \
+ ASSERT (bi1 != 0); \
int enqueue_code = (next0 != next_index) + 2*(next1 != next_index); \
\
if (PREDICT_FALSE (enqueue_code != 0)) \
#define vlib_validate_buffer_enqueue_x4(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,bi2,bi3,next0,next1,next2,next3) \
do { \
+ ASSERT (bi0 != 0); \
+ ASSERT (bi1 != 0); \
+ ASSERT (bi2 != 0); \
+ ASSERT (bi3 != 0); \
/* After the fact: check the [speculative] enqueue to "next" */ \
u32 fix_speculation = (next_index ^ next0) | (next_index ^ next1) \
| (next_index ^ next2) | (next_index ^ next3); \
*/
#define vlib_validate_buffer_enqueue_x1(vm,node,next_index,to_next,n_left_to_next,bi0,next0) \
do { \
+ ASSERT (bi0 != 0); \
if (PREDICT_FALSE (next0 != next_index)) \
{ \
vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1); \
max = clib_min (n_left_to_next, count);
}
#if defined(CLIB_HAVE_VEC512)
- u16x32 next32 = u16x32_load_unaligned (nexts);
+ u16x32 next32 = CLIB_MEM_OVERFLOW_LOAD (u16x32_load_unaligned, nexts);
next32 = (next32 == u16x32_splat (next32[0]));
u64 bitmap = u16x32_msb_mask (next32);
n_enqueued = count_trailing_zeros (~bitmap);
#elif defined(CLIB_HAVE_VEC256)
- u16x16 next16 = u16x16_load_unaligned (nexts);
+ u16x16 next16 = CLIB_MEM_OVERFLOW_LOAD (u16x16_load_unaligned, nexts);
next16 = (next16 == u16x16_splat (next16[0]));
u64 bitmap = u8x32_msb_mask ((u8x32) next16);
n_enqueued = count_trailing_zeros (~bitmap) / 2;
-#elif defined(CLIB_HAVE_VEC128)
- u16x8 next8 = u16x8_load_unaligned (nexts);
+#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
+ u16x8 next8 = CLIB_MEM_OVERFLOW_LOAD (u16x8_load_unaligned, nexts);
next8 = (next8 == u16x8_splat (next8[0]));
u64 bitmap = u8x16_msb_mask ((u8x16) next8);
n_enqueued = count_trailing_zeros (~bitmap) / 2;
#else
u16 x = 0;
- x |= next_index ^ nexts[1];
- x |= next_index ^ nexts[2];
- x |= next_index ^ nexts[3];
- n_enqueued = (x == 0) ? 4 : 1;
+ if (count + 3 < max)
+ {
+ x |= next_index ^ nexts[1];
+ x |= next_index ^ nexts[2];
+ x |= next_index ^ nexts[3];
+ n_enqueued = (x == 0) ? 4 : 1;
+ }
+ else
+ n_enqueued = 1;
#endif
if (PREDICT_FALSE (n_enqueued > max))
#ifdef CLIB_HAVE_VEC512
if (n_enqueued >= 32)
{
- clib_memcpy (to_next, buffers, 32 * sizeof (u32));
+ vlib_buffer_copy_indices (to_next, buffers, 32);
nexts += 32;
to_next += 32;
buffers += 32;
#ifdef CLIB_HAVE_VEC256
if (n_enqueued >= 16)
{
- clib_memcpy (to_next, buffers, 16 * sizeof (u32));
+ vlib_buffer_copy_indices (to_next, buffers, 16);
nexts += 16;
to_next += 16;
buffers += 16;
#ifdef CLIB_HAVE_VEC128
if (n_enqueued >= 8)
{
- clib_memcpy (to_next, buffers, 8 * sizeof (u32));
+ vlib_buffer_copy_indices (to_next, buffers, 8);
nexts += 8;
to_next += 8;
buffers += 8;
if (n_enqueued >= 4)
{
- clib_memcpy (to_next, buffers, 4 * sizeof (u32));
+ vlib_buffer_copy_indices (to_next, buffers, 4);
nexts += 4;
to_next += 4;
buffers += 4;
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
+static_always_inline void
+vlib_buffer_enqueue_to_single_next (vlib_main_t * vm,
+ vlib_node_runtime_t * node, u32 * buffers,
+ u16 next_index, u32 count)
+{
+ u32 *to_next, n_left_to_next, n_enq;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ if (PREDICT_TRUE (n_left_to_next >= count))
+ {
+ vlib_buffer_copy_indices (to_next, buffers, count);
+ n_left_to_next -= count;
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ return;
+ }
+
+ n_enq = n_left_to_next;
+next:
+ vlib_buffer_copy_indices (to_next, buffers, n_enq);
+ n_left_to_next -= n_enq;
+
+ if (PREDICT_FALSE (count > n_enq))
+ {
+ count -= n_enq;
+ buffers += n_enq;
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ n_enq = clib_min (n_left_to_next, count);
+ goto next;
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+}
+
+static_always_inline u32
+vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
+ u32 * buffer_indices, u16 * thread_indices,
+ u32 n_packets, int drop_on_congestion)
+{
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ vlib_frame_queue_main_t *fqm;
+ vlib_frame_queue_per_thread_data_t *ptd;
+ u32 n_left = n_packets;
+ u32 drop_list[VLIB_FRAME_SIZE], *dbi = drop_list, n_drop = 0;
+ vlib_frame_queue_elt_t *hf = 0;
+ u32 n_left_to_next_thread = 0, *to_next_thread = 0;
+ u32 next_thread_index, current_thread_index = ~0;
+ int i;
+
+ fqm = vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
+ ptd = vec_elt_at_index (fqm->per_thread_data, vm->thread_index);
+
+ while (n_left)
+ {
+ next_thread_index = thread_indices[0];
+
+ if (next_thread_index != current_thread_index)
+ {
+ if (drop_on_congestion &&
+ is_vlib_frame_queue_congested
+ (frame_queue_index, next_thread_index, fqm->queue_hi_thresh,
+ ptd->congested_handoff_queue_by_thread_index))
+ {
+ dbi[0] = buffer_indices[0];
+ dbi++;
+ n_drop++;
+ goto next;
+ }
+
+ if (hf)
+ hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
+
+ hf = vlib_get_worker_handoff_queue_elt (frame_queue_index,
+ next_thread_index,
+ ptd->handoff_queue_elt_by_thread_index);
+
+ n_left_to_next_thread = VLIB_FRAME_SIZE - hf->n_vectors;
+ to_next_thread = &hf->buffer_index[hf->n_vectors];
+ current_thread_index = next_thread_index;
+ }
+
+ to_next_thread[0] = buffer_indices[0];
+ to_next_thread++;
+ n_left_to_next_thread--;
+
+ if (n_left_to_next_thread == 0)
+ {
+ hf->n_vectors = VLIB_FRAME_SIZE;
+ vlib_put_frame_queue_elt (hf);
+ vlib_mains[current_thread_index]->check_frame_queues = 1;
+ current_thread_index = ~0;
+ ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0;
+ hf = 0;
+ }
+
+ /* next */
+ next:
+ thread_indices += 1;
+ buffer_indices += 1;
+ n_left -= 1;
+ }
+
+ if (hf)
+ hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
+
+ /* Ship frames to the thread nodes */
+ for (i = 0; i < vec_len (ptd->handoff_queue_elt_by_thread_index); i++)
+ {
+ if (ptd->handoff_queue_elt_by_thread_index[i])
+ {
+ hf = ptd->handoff_queue_elt_by_thread_index[i];
+ /*
+ * It works better to let the handoff node
+ * rate-adapt, always ship the handoff queue element.
+ */
+ if (1 || hf->n_vectors == hf->last_n_vectors)
+ {
+ vlib_put_frame_queue_elt (hf);
+ vlib_mains[i]->check_frame_queues = 1;
+ ptd->handoff_queue_elt_by_thread_index[i] = 0;
+ }
+ else
+ hf->last_n_vectors = hf->n_vectors;
+ }
+ ptd->congested_handoff_queue_by_thread_index[i] =
+ (vlib_frame_queue_t *) (~0);
+ }
+
+ if (drop_on_congestion && n_drop)
+ vlib_buffer_free (vm, drop_list, n_drop);
+
+ return n_packets - n_drop;
+}
+
#endif /* included_vlib_buffer_node_h */
/*