vcl: add api to retrieve num bytes for tx
[vpp.git] / src / vlib / buffer_node.h
index 2163b72..c0268b2 100644 (file)
@@ -69,6 +69,8 @@
 
 #define vlib_validate_buffer_enqueue_x2(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,next0,next1) \
 do {                                                                   \
+  ASSERT (bi0 != 0);                                                   \
+  ASSERT (bi1 != 0);                                                   \
   int enqueue_code = (next0 != next_index) + 2*(next1 != next_index);  \
                                                                        \
   if (PREDICT_FALSE (enqueue_code != 0))                               \
@@ -137,6 +139,10 @@ do {                                                                       \
 
 #define vlib_validate_buffer_enqueue_x4(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,bi2,bi3,next0,next1,next2,next3) \
 do {                                                                    \
+  ASSERT (bi0 != 0);                                                   \
+  ASSERT (bi1 != 0);                                                   \
+  ASSERT (bi2 != 0);                                                   \
+  ASSERT (bi3 != 0);                                                   \
   /* After the fact: check the [speculative] enqueue to "next" */       \
   u32 fix_speculation = (next_index ^ next0) | (next_index ^ next1)     \
     | (next_index ^ next2) | (next_index ^ next3);                      \
@@ -217,6 +223,7 @@ do {                                                                    \
 */
 #define vlib_validate_buffer_enqueue_x1(vm,node,next_index,to_next,n_left_to_next,bi0,next0) \
 do {                                                                   \
+  ASSERT (bi0 != 0);                                                   \
   if (PREDICT_FALSE (next0 != next_index))                             \
     {                                                                  \
       vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1);  \
@@ -229,6 +236,53 @@ do {                                                                       \
     }                                                                  \
 } while (0)
 
+/** \brief Finish enqueueing one buffer forward in the graph, along with its
+ aux_data if possible. Standard single loop boilerplate element. This is a
+ MACRO, with MULTIPLE SIDE EFFECTS. In the ideal case, <code>next_index ==
+ next0</code>, which means that the speculative enqueue at the top of the
+ single loop has correctly dealt with the packet in hand. In that case, the
+ macro does nothing at all. This function MAY return to_next_aux = NULL if
+ next_index does not support aux data
+
+ @param vm vlib_main_t pointer, varies by thread
+ @param node current node vlib_node_runtime_t pointer
+ @param next_index speculated next index used for both packets
+ @param to_next speculated vector pointer used for both packets
+ @param to_next_aux speculated aux_data pointer used for both packets
+ @param n_left_to_next number of slots left in speculated vector
+ @param bi0 first buffer index
+ @param aux0 first aux_data
+ @param next0 actual next index to be used for the first packet
+
+ @return @c next_index -- speculative next index to be used for future packets
+ @return @c to_next -- speculative frame to be used for future packets
+ @return @c n_left_to_next -- number of slots left in speculative frame
+*/
+#define vlib_validate_buffer_enqueue_with_aux_x1(                             \
+  vm, node, next_index, to_next, to_next_aux, n_left_to_next, bi0, aux0,      \
+  next0)                                                                      \
+  do                                                                          \
+    {                                                                         \
+      ASSERT (bi0 != 0);                                                      \
+      if (PREDICT_FALSE (next0 != next_index))                                \
+       {                                                                     \
+         vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1);     \
+         next_index = next0;                                                 \
+         vlib_get_next_frame_with_aux_safe (vm, node, next_index, to_next,   \
+                                            to_next_aux, n_left_to_next);    \
+                                                                              \
+         to_next[0] = bi0;                                                   \
+         to_next += 1;                                                       \
+         if (to_next_aux)                                                    \
+           {                                                                 \
+             to_next_aux[0] = aux0;                                          \
+             to_next_aux += 1;                                               \
+           }                                                                 \
+         n_left_to_next -= 1;                                                \
+       }                                                                     \
+    }                                                                         \
+  while (0)
+
 always_inline uword
 generic_buffer_node_inline (vlib_main_t * vm,
                            vlib_node_runtime_t * node,
@@ -280,8 +334,8 @@ generic_buffer_node_inline (vlib_main_t * vm,
            vlib_prefetch_buffer_header (p2, LOAD);
            vlib_prefetch_buffer_header (p3, LOAD);
 
-           CLIB_PREFETCH (p2->data, 64, LOAD);
-           CLIB_PREFETCH (p3->data, 64, LOAD);
+           clib_prefetch_load (p2->data);
+           clib_prefetch_load (p3->data);
          }
 
          pi0 = to_next[0] = from[0];
@@ -328,124 +382,48 @@ generic_buffer_node_inline (vlib_main_t * vm,
   return frame->n_vectors;
 }
 
+/* Minimum size for the 'buffers' and 'nexts' arrays to be used when calling
+ * vlib_buffer_enqueue_to_next().
+ * Because of optimizations, vlib_buffer_enqueue_to_next() will access
+ * past 'count' elements in the 'buffers' and 'nexts' arrays, IOW it
+ * will overflow.
+ * Those overflow elements are ignored in the final result so they do not
+ * need to be properly initialized, however if the array is allocated right
+ * before the end of a page and the next page is not mapped, accessing the
+ * overflow elements will trigger a segfault. */
+#define VLIB_BUFFER_ENQUEUE_MIN_SIZE(n) round_pow2 ((n), 64)
+
 static_always_inline void
 vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
                             u32 * buffers, u16 * nexts, uword count)
 {
-  u32 *to_next, n_left_to_next, max;
-  u16 next_index;
-
-  next_index = nexts[0];
-  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-  max = clib_min (n_left_to_next, count);
-
-  while (count)
-    {
-      u32 n_enqueued;
-      if ((nexts[0] != next_index) || n_left_to_next == 0)
-       {
-         vlib_put_next_frame (vm, node, next_index, n_left_to_next);
-         next_index = nexts[0];
-         vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-         max = clib_min (n_left_to_next, count);
-       }
-#if defined(CLIB_HAVE_VEC512)
-      u16x32 next32 = u16x32_load_unaligned (nexts);
-      next32 = (next32 == u16x32_splat (next32[0]));
-      u64 bitmap = u16x32_msb_mask (next32);
-      n_enqueued = count_trailing_zeros (~bitmap);
-#elif defined(CLIB_HAVE_VEC256)
-      u16x16 next16 = u16x16_load_unaligned (nexts);
-      next16 = (next16 == u16x16_splat (next16[0]));
-      u64 bitmap = u8x32_msb_mask ((u8x32) next16);
-      n_enqueued = count_trailing_zeros (~bitmap) / 2;
-#elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
-      u16x8 next8 = u16x8_load_unaligned (nexts);
-      next8 = (next8 == u16x8_splat (next8[0]));
-      u64 bitmap = u8x16_msb_mask ((u8x16) next8);
-      n_enqueued = count_trailing_zeros (~bitmap) / 2;
-#else
-      u16 x = 0;
-      if (count + 3 < max)
-       {
-         x |= next_index ^ nexts[1];
-         x |= next_index ^ nexts[2];
-         x |= next_index ^ nexts[3];
-         n_enqueued = (x == 0) ? 4 : 1;
-       }
-      else
-       n_enqueued = 1;
-#endif
-
-      if (PREDICT_FALSE (n_enqueued > max))
-       n_enqueued = max;
-
-#ifdef CLIB_HAVE_VEC512
-      if (n_enqueued >= 32)
-       {
-         clib_memcpy_fast (to_next, buffers, 32 * sizeof (u32));
-         nexts += 32;
-         to_next += 32;
-         buffers += 32;
-         n_left_to_next -= 32;
-         count -= 32;
-         max -= 32;
-         continue;
-       }
-#endif
-
-#ifdef CLIB_HAVE_VEC256
-      if (n_enqueued >= 16)
-       {
-         clib_memcpy_fast (to_next, buffers, 16 * sizeof (u32));
-         nexts += 16;
-         to_next += 16;
-         buffers += 16;
-         n_left_to_next -= 16;
-         count -= 16;
-         max -= 16;
-         continue;
-       }
-#endif
-
-#ifdef CLIB_HAVE_VEC128
-      if (n_enqueued >= 8)
-       {
-         clib_memcpy_fast (to_next, buffers, 8 * sizeof (u32));
-         nexts += 8;
-         to_next += 8;
-         buffers += 8;
-         n_left_to_next -= 8;
-         count -= 8;
-         max -= 8;
-         continue;
-       }
-#endif
-
-      if (n_enqueued >= 4)
-       {
-         clib_memcpy_fast (to_next, buffers, 4 * sizeof (u32));
-         nexts += 4;
-         to_next += 4;
-         buffers += 4;
-         n_left_to_next -= 4;
-         count -= 4;
-         max -= 4;
-         continue;
-       }
+  vlib_buffer_enqueue_to_next_fn_t *fn;
+  fn = vlib_buffer_func_main.buffer_enqueue_to_next_fn;
+  (fn) (vm, node, buffers, nexts, count);
+}
 
-      /* copy */
-      to_next[0] = buffers[0];
+static_always_inline void
+vlib_buffer_enqueue_to_next_with_aux (vlib_main_t *vm,
+                                     vlib_node_runtime_t *node, u32 *buffers,
+                                     u32 *aux_data, u16 *nexts, uword count)
+{
+  vlib_buffer_enqueue_to_next_with_aux_fn_t *fn;
+  fn = vlib_buffer_func_main.buffer_enqueue_to_next_with_aux_fn;
+  (fn) (vm, node, buffers, aux_data, nexts, count);
+}
 
-      /* next */
-      nexts += 1;
-      to_next += 1;
-      buffers += 1;
-      n_left_to_next -= 1;
-      count -= 1;
-      max -= 1;
-    }
-  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+static_always_inline void
+vlib_buffer_enqueue_to_next_vec (vlib_main_t *vm, vlib_node_runtime_t *node,
+                                u32 **buffers, u16 **nexts, uword count)
+{
+  const u32 bl = vec_len (*buffers), nl = vec_len (*nexts);
+  const u32 c = VLIB_BUFFER_ENQUEUE_MIN_SIZE (count);
+  ASSERT (bl >= count && nl >= count);
+  vec_validate (*buffers, c);
+  vec_validate (*nexts, c);
+  vlib_buffer_enqueue_to_next (vm, node, *buffers, *nexts, count);
+  vec_set_len (*buffers, bl);
+  vec_set_len (*nexts, nl);
 }
 
 static_always_inline void
@@ -453,133 +431,46 @@ vlib_buffer_enqueue_to_single_next (vlib_main_t * vm,
                                    vlib_node_runtime_t * node, u32 * buffers,
                                    u16 next_index, u32 count)
 {
-  u32 *to_next, n_left_to_next, n_enq;
-
-  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
-  if (PREDICT_TRUE (n_left_to_next >= count))
-    {
-      clib_memcpy_fast (to_next, buffers, count * sizeof (u32));
-      n_left_to_next -= count;
-      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
-      return;
-    }
-
-  n_enq = n_left_to_next;
-next:
-  clib_memcpy_fast (to_next, buffers, n_enq * sizeof (u32));
-  n_left_to_next -= n_enq;
-
-  if (PREDICT_FALSE (count > n_enq))
-    {
-      count -= n_enq;
-      buffers += n_enq;
+  vlib_buffer_enqueue_to_single_next_fn_t *fn;
+  fn = vlib_buffer_func_main.buffer_enqueue_to_single_next_fn;
+  (fn) (vm, node, buffers, next_index, count);
+}
 
-      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
-      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-      n_enq = clib_min (n_left_to_next, count);
-      goto next;
-    }
-  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+static_always_inline void
+vlib_buffer_enqueue_to_single_next_with_aux (vlib_main_t *vm,
+                                            vlib_node_runtime_t *node,
+                                            u32 *buffers, u32 *aux_data,
+                                            u16 next_index, u32 count)
+{
+  vlib_buffer_enqueue_to_single_next_with_aux_fn_t *fn;
+  fn = vlib_buffer_func_main.buffer_enqueue_to_single_next_with_aux_fn;
+  (fn) (vm, node, buffers, aux_data, next_index, count);
 }
 
 static_always_inline u32
-vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
-                              u32 * buffer_indices, u16 * thread_indices,
-                              u32 n_packets, int drop_on_congestion)
+vlib_buffer_enqueue_to_thread (vlib_main_t *vm, vlib_node_runtime_t *node,
+                              u32 frame_queue_index, u32 *buffer_indices,
+                              u16 *thread_indices, u32 n_packets,
+                              int drop_on_congestion)
 {
-  vlib_thread_main_t *tm = vlib_get_thread_main ();
-  vlib_frame_queue_main_t *fqm;
-  vlib_frame_queue_per_thread_data_t *ptd;
-  u32 n_left = n_packets;
-  u32 drop_list[VLIB_FRAME_SIZE], *dbi = drop_list, n_drop = 0;
-  vlib_frame_queue_elt_t *hf = 0;
-  u32 n_left_to_next_thread = 0, *to_next_thread = 0;
-  u32 next_thread_index, current_thread_index = ~0;
-  int i;
-
-  fqm = vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
-  ptd = vec_elt_at_index (fqm->per_thread_data, vm->thread_index);
-
-  while (n_left)
-    {
-      next_thread_index = thread_indices[0];
-
-      if (next_thread_index != current_thread_index)
-       {
-
-         if (drop_on_congestion &&
-             is_vlib_frame_queue_congested
-             (frame_queue_index, next_thread_index, fqm->queue_hi_thresh,
-              ptd->congested_handoff_queue_by_thread_index))
-           {
-             dbi[0] = buffer_indices[0];
-             dbi++;
-             n_drop++;
-             goto next;
-           }
-
-         if (hf)
-           hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
-
-         hf = vlib_get_worker_handoff_queue_elt (frame_queue_index,
-                                                 next_thread_index,
-                                                 ptd->handoff_queue_elt_by_thread_index);
-
-         n_left_to_next_thread = VLIB_FRAME_SIZE - hf->n_vectors;
-         to_next_thread = &hf->buffer_index[hf->n_vectors];
-         current_thread_index = next_thread_index;
-       }
-
-      to_next_thread[0] = buffer_indices[0];
-      to_next_thread++;
-      n_left_to_next_thread--;
-
-      if (n_left_to_next_thread == 0)
-       {
-         hf->n_vectors = VLIB_FRAME_SIZE;
-         vlib_put_frame_queue_elt (hf);
-         current_thread_index = ~0;
-         ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0;
-         hf = 0;
-       }
-
-      /* next */
-    next:
-      thread_indices += 1;
-      buffer_indices += 1;
-      n_left -= 1;
-    }
-
-  if (hf)
-    hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
-
-  /* Ship frames to the thread nodes */
-  for (i = 0; i < vec_len (ptd->handoff_queue_elt_by_thread_index); i++)
-    {
-      if (ptd->handoff_queue_elt_by_thread_index[i])
-       {
-         hf = ptd->handoff_queue_elt_by_thread_index[i];
-         /*
-          * It works better to let the handoff node
-          * rate-adapt, always ship the handoff queue element.
-          */
-         if (1 || hf->n_vectors == hf->last_n_vectors)
-           {
-             vlib_put_frame_queue_elt (hf);
-             ptd->handoff_queue_elt_by_thread_index[i] = 0;
-           }
-         else
-           hf->last_n_vectors = hf->n_vectors;
-       }
-      ptd->congested_handoff_queue_by_thread_index[i] =
-       (vlib_frame_queue_t *) (~0);
-    }
-
-  if (drop_on_congestion && n_drop)
-    vlib_buffer_free (vm, drop_list, n_drop);
+  vlib_buffer_enqueue_to_thread_fn_t *fn;
+  fn = vlib_buffer_func_main.buffer_enqueue_to_thread_fn;
+  return (fn) (vm, node, frame_queue_index, buffer_indices, thread_indices,
+              n_packets, drop_on_congestion);
+}
 
-  return n_packets - n_drop;
+static_always_inline u32
+vlib_buffer_enqueue_to_thread_with_aux (vlib_main_t *vm,
+                                       vlib_node_runtime_t *node,
+                                       u32 frame_queue_index,
+                                       u32 *buffer_indices, u32 *aux,
+                                       u16 *thread_indices, u32 n_packets,
+                                       int drop_on_congestion)
+{
+  vlib_buffer_enqueue_to_thread_with_aux_fn_t *fn;
+  fn = vlib_buffer_func_main.buffer_enqueue_to_thread_with_aux_fn;
+  return (fn) (vm, node, frame_queue_index, buffer_indices, aux,
+              thread_indices, n_packets, drop_on_congestion);
 }
 
 #endif /* included_vlib_buffer_node_h */