tap: add support for persistance
[vpp.git] / src / vlib / buffer_node.h
index 1c4f4e7..bd82b10 100644 (file)
@@ -350,26 +350,31 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
          max = clib_min (n_left_to_next, count);
        }
 #if defined(CLIB_HAVE_VEC512)
-      u16x32 next32 = u16x32_load_unaligned (nexts);
+      u16x32 next32 = CLIB_MEM_OVERFLOW_LOAD (u16x32_load_unaligned, nexts);
       next32 = (next32 == u16x32_splat (next32[0]));
       u64 bitmap = u16x32_msb_mask (next32);
       n_enqueued = count_trailing_zeros (~bitmap);
 #elif defined(CLIB_HAVE_VEC256)
-      u16x16 next16 = u16x16_load_unaligned (nexts);
+      u16x16 next16 = CLIB_MEM_OVERFLOW_LOAD (u16x16_load_unaligned, nexts);
       next16 = (next16 == u16x16_splat (next16[0]));
       u64 bitmap = u8x32_msb_mask ((u8x32) next16);
       n_enqueued = count_trailing_zeros (~bitmap) / 2;
-#elif defined(CLIB_HAVE_VEC128)
-      u16x8 next8 = u16x8_load_unaligned (nexts);
+#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
+      u16x8 next8 = CLIB_MEM_OVERFLOW_LOAD (u16x8_load_unaligned, nexts);
       next8 = (next8 == u16x8_splat (next8[0]));
       u64 bitmap = u8x16_msb_mask ((u8x16) next8);
       n_enqueued = count_trailing_zeros (~bitmap) / 2;
 #else
       u16 x = 0;
-      x |= next_index ^ nexts[1];
-      x |= next_index ^ nexts[2];
-      x |= next_index ^ nexts[3];
-      n_enqueued = (x == 0) ? 4 : 1;
+      if (count + 3 < max)
+       {
+         x |= next_index ^ nexts[1];
+         x |= next_index ^ nexts[2];
+         x |= next_index ^ nexts[3];
+         n_enqueued = (x == 0) ? 4 : 1;
+       }
+      else
+       n_enqueued = 1;
 #endif
 
       if (PREDICT_FALSE (n_enqueued > max))
@@ -378,7 +383,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
 #ifdef CLIB_HAVE_VEC512
       if (n_enqueued >= 32)
        {
-         clib_memcpy (to_next, buffers, 32 * sizeof (u32));
+         vlib_buffer_copy_indices (to_next, buffers, 32);
          nexts += 32;
          to_next += 32;
          buffers += 32;
@@ -392,7 +397,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
 #ifdef CLIB_HAVE_VEC256
       if (n_enqueued >= 16)
        {
-         clib_memcpy (to_next, buffers, 16 * sizeof (u32));
+         vlib_buffer_copy_indices (to_next, buffers, 16);
          nexts += 16;
          to_next += 16;
          buffers += 16;
@@ -406,7 +411,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
 #ifdef CLIB_HAVE_VEC128
       if (n_enqueued >= 8)
        {
-         clib_memcpy (to_next, buffers, 8 * sizeof (u32));
+         vlib_buffer_copy_indices (to_next, buffers, 8);
          nexts += 8;
          to_next += 8;
          buffers += 8;
@@ -419,7 +424,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
 
       if (n_enqueued >= 4)
        {
-         clib_memcpy (to_next, buffers, 4 * sizeof (u32));
+         vlib_buffer_copy_indices (to_next, buffers, 4);
          nexts += 4;
          to_next += 4;
          buffers += 4;
@@ -443,6 +448,141 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
   vlib_put_next_frame (vm, node, next_index, n_left_to_next);
 }
 
+static_always_inline void
+vlib_buffer_enqueue_to_single_next (vlib_main_t * vm,
+                                   vlib_node_runtime_t * node, u32 * buffers,
+                                   u16 next_index, u32 count)
+{
+  u32 *to_next, n_left_to_next, n_enq;
+
+  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+  if (PREDICT_TRUE (n_left_to_next >= count))
+    {
+      vlib_buffer_copy_indices (to_next, buffers, count);
+      n_left_to_next -= count;
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+      return;
+    }
+
+  n_enq = n_left_to_next;
+next:
+  vlib_buffer_copy_indices (to_next, buffers, n_enq);
+  n_left_to_next -= n_enq;
+
+  if (PREDICT_FALSE (count > n_enq))
+    {
+      count -= n_enq;
+      buffers += n_enq;
+
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+      n_enq = clib_min (n_left_to_next, count);
+      goto next;
+    }
+  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+}
+
+static_always_inline u32
+vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
+                              u32 * buffer_indices, u16 * thread_indices,
+                              u32 n_packets, int drop_on_congestion)
+{
+  vlib_thread_main_t *tm = vlib_get_thread_main ();
+  vlib_frame_queue_main_t *fqm;
+  vlib_frame_queue_per_thread_data_t *ptd;
+  u32 n_left = n_packets;
+  u32 drop_list[VLIB_FRAME_SIZE], *dbi = drop_list, n_drop = 0;
+  vlib_frame_queue_elt_t *hf = 0;
+  u32 n_left_to_next_thread = 0, *to_next_thread = 0;
+  u32 next_thread_index, current_thread_index = ~0;
+  int i;
+
+  fqm = vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
+  ptd = vec_elt_at_index (fqm->per_thread_data, vm->thread_index);
+
+  while (n_left)
+    {
+      next_thread_index = thread_indices[0];
+
+      if (next_thread_index != current_thread_index)
+       {
+         if (drop_on_congestion &&
+             is_vlib_frame_queue_congested
+             (frame_queue_index, next_thread_index, fqm->queue_hi_thresh,
+              ptd->congested_handoff_queue_by_thread_index))
+           {
+             dbi[0] = buffer_indices[0];
+             dbi++;
+             n_drop++;
+             goto next;
+           }
+
+         if (hf)
+           hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
+
+         hf = vlib_get_worker_handoff_queue_elt (frame_queue_index,
+                                                 next_thread_index,
+                                                 ptd->handoff_queue_elt_by_thread_index);
+
+         n_left_to_next_thread = VLIB_FRAME_SIZE - hf->n_vectors;
+         to_next_thread = &hf->buffer_index[hf->n_vectors];
+         current_thread_index = next_thread_index;
+       }
+
+      to_next_thread[0] = buffer_indices[0];
+      to_next_thread++;
+      n_left_to_next_thread--;
+
+      if (n_left_to_next_thread == 0)
+       {
+         hf->n_vectors = VLIB_FRAME_SIZE;
+         vlib_put_frame_queue_elt (hf);
+         vlib_mains[current_thread_index]->check_frame_queues = 1;
+         current_thread_index = ~0;
+         ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0;
+         hf = 0;
+       }
+
+      /* next */
+    next:
+      thread_indices += 1;
+      buffer_indices += 1;
+      n_left -= 1;
+    }
+
+  if (hf)
+    hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
+
+  /* Ship frames to the thread nodes */
+  for (i = 0; i < vec_len (ptd->handoff_queue_elt_by_thread_index); i++)
+    {
+      if (ptd->handoff_queue_elt_by_thread_index[i])
+       {
+         hf = ptd->handoff_queue_elt_by_thread_index[i];
+         /*
+          * It works better to let the handoff node
+          * rate-adapt, always ship the handoff queue element.
+          */
+         if (1 || hf->n_vectors == hf->last_n_vectors)
+           {
+             vlib_put_frame_queue_elt (hf);
+             vlib_mains[i]->check_frame_queues = 1;
+             ptd->handoff_queue_elt_by_thread_index[i] = 0;
+           }
+         else
+           hf->last_n_vectors = hf->n_vectors;
+       }
+      ptd->congested_handoff_queue_by_thread_index[i] =
+       (vlib_frame_queue_t *) (~0);
+    }
+
+  if (drop_on_congestion && n_drop)
+    vlib_buffer_free (vm, drop_list, n_drop);
+
+  return n_packets - n_drop;
+}
+
 #endif /* included_vlib_buffer_node_h */
 
 /*