misc: silence -Wmaybe-uninitialized warnings
[vpp.git] / src / vlib / buffer_funcs.c
index 32c2d1b..d910b25 100644 (file)
 static_always_inline u32
 enqueue_one (vlib_main_t *vm, vlib_node_runtime_t *node,
             vlib_frame_bitmap_t used_elt_bmp, u16 next_index, u32 *buffers,
-            u16 *nexts, u32 n_buffers, u32 n_left, u32 *tmp)
+            u16 *nexts, u32 n_buffers, u32 n_left, u32 *tmp, u8 maybe_aux,
+            u32 *aux_data, u32 *tmp_aux)
 {
   vlib_frame_bitmap_t match_bmp;
   vlib_frame_t *f;
   u32 n_extracted, n_free;
-  u32 *to;
+  u32 *to, *to_aux = 0;
 
   f = vlib_get_next_frame_internal (vm, node, next_index, 0);
 
+  maybe_aux = maybe_aux && f->aux_offset;
+
   n_free = VLIB_FRAME_SIZE - f->n_vectors;
 
   /* if frame contains enough space for worst case scenario, we can avoid
    * use of tmp */
   if (n_free >= n_left)
-    to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
+    {
+      to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
+      if (maybe_aux)
+       to_aux = (u32 *) vlib_frame_aux_args (f) + f->n_vectors;
+    }
   else
-    to = tmp;
-
+    {
+      to = tmp;
+      if (maybe_aux)
+       to_aux = tmp_aux;
+    }
   clib_mask_compare_u16 (next_index, nexts, match_bmp, n_buffers);
   n_extracted = clib_compress_u32 (to, buffers, match_bmp, n_buffers);
+  if (maybe_aux)
+    clib_compress_u32 (to_aux, aux_data, match_bmp, n_buffers);
   vlib_frame_bitmap_or (used_elt_bmp, match_bmp);
 
   if (to != tmp)
@@ -42,6 +54,11 @@ enqueue_one (vlib_main_t *vm, vlib_node_runtime_t *node,
       /* enough space in the existing frame */
       to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
       vlib_buffer_copy_indices (to, tmp, n_extracted);
+      if (maybe_aux)
+       {
+         to_aux = (u32 *) vlib_frame_aux_args (f) + f->n_vectors;
+         vlib_buffer_copy_indices (to_aux, tmp_aux, n_extracted);
+       }
       vlib_put_next_frame (vm, node, next_index, n_free - n_extracted);
     }
   else
@@ -49,6 +66,11 @@ enqueue_one (vlib_main_t *vm, vlib_node_runtime_t *node,
       /* full frame */
       to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
       vlib_buffer_copy_indices (to, tmp, n_free);
+      if (maybe_aux)
+       {
+         to_aux = (u32 *) vlib_frame_aux_args (f) + f->n_vectors;
+         vlib_buffer_copy_indices (to_aux, tmp_aux, n_free);
+       }
       vlib_put_next_frame (vm, node, next_index, 0);
 
       /* second frame */
@@ -56,6 +78,11 @@ enqueue_one (vlib_main_t *vm, vlib_node_runtime_t *node,
       f = vlib_get_next_frame_internal (vm, node, next_index, 1);
       to = vlib_frame_vector_args (f);
       vlib_buffer_copy_indices (to, tmp + n_free, n_2nd_frame);
+      if (maybe_aux)
+       {
+         to_aux = vlib_frame_aux_args (f);
+         vlib_buffer_copy_indices (to_aux, tmp_aux + n_free, n_2nd_frame);
+       }
       vlib_put_next_frame (vm, node, next_index,
                           VLIB_FRAME_SIZE - n_2nd_frame);
     }
@@ -63,12 +90,14 @@ enqueue_one (vlib_main_t *vm, vlib_node_runtime_t *node,
   return n_left - n_extracted;
 }
 
-void __clib_section (".vlib_buffer_enqueue_to_next_fn")
-CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_next_fn)
-(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts,
- uword count)
+static_always_inline void
+vlib_buffer_enqueue_to_next_fn_inline (vlib_main_t *vm,
+                                      vlib_node_runtime_t *node, u32 *buffers,
+                                      u32 *aux_data, u16 *nexts, uword count,
+                                      u8 maybe_aux)
 {
   u32 tmp[VLIB_FRAME_SIZE];
+  u32 tmp_aux[VLIB_FRAME_SIZE];
   u32 n_left;
   u16 next_index;
 
@@ -80,7 +109,8 @@ CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_next_fn)
 
       next_index = nexts[0];
       n_left = enqueue_one (vm, node, used_elt_bmp, next_index, buffers, nexts,
-                           VLIB_FRAME_SIZE, n_left, tmp);
+                           VLIB_FRAME_SIZE, n_left, tmp, maybe_aux, aux_data,
+                           tmp_aux);
 
       while (n_left)
        {
@@ -93,10 +123,13 @@ CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_next_fn)
          next_index =
            nexts[off * 64 + count_trailing_zeros (~used_elt_bmp[off])];
          n_left = enqueue_one (vm, node, used_elt_bmp, next_index, buffers,
-                               nexts, VLIB_FRAME_SIZE, n_left, tmp);
+                               nexts, VLIB_FRAME_SIZE, n_left, tmp, maybe_aux,
+                               aux_data, tmp_aux);
        }
 
       buffers += VLIB_FRAME_SIZE;
+      if (maybe_aux)
+       aux_data += VLIB_FRAME_SIZE;
       nexts += VLIB_FRAME_SIZE;
       count -= VLIB_FRAME_SIZE;
     }
@@ -109,7 +142,7 @@ CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_next_fn)
       u32 off = 0;
 
       n_left = enqueue_one (vm, node, used_elt_bmp, next_index, buffers, nexts,
-                           count, n_left, tmp);
+                           count, n_left, tmp, maybe_aux, aux_data, tmp_aux);
 
       while (n_left)
        {
@@ -121,26 +154,55 @@ CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_next_fn)
 
          next_index =
            nexts[off * 64 + count_trailing_zeros (~used_elt_bmp[off])];
-         n_left = enqueue_one (vm, node, used_elt_bmp, next_index, buffers,
-                               nexts, count, n_left, tmp);
+         n_left =
+           enqueue_one (vm, node, used_elt_bmp, next_index, buffers, nexts,
+                        count, n_left, tmp, maybe_aux, aux_data, tmp_aux);
        }
     }
 }
 
+void __clib_section (".vlib_buffer_enqueue_to_next_fn")
+CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_next_fn)
+(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts,
+ uword count)
+{
+  vlib_buffer_enqueue_to_next_fn_inline (vm, node, buffers, NULL, nexts, count,
+                                        0 /* maybe_aux */);
+}
+
 CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_next_fn);
 
-void __clib_section (".vlib_buffer_enqueue_to_single_next_fn")
-CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_single_next_fn)
-(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 next_index,
- u32 count)
+void __clib_section (".vlib_buffer_enqueue_to_next_with_aux_fn")
+CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_next_with_aux_fn)
+(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u32 *aux_data,
+ u16 *nexts, uword count)
 {
-  u32 *to_next, n_left_to_next, n_enq;
+  vlib_buffer_enqueue_to_next_fn_inline (vm, node, buffers, aux_data, nexts,
+                                        count, 1 /* maybe_aux */);
+}
+
+CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_next_with_aux_fn);
 
-  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+static_always_inline void
+vlib_buffer_enqueue_to_single_next_fn_inline (vlib_main_t *vm,
+                                             vlib_node_runtime_t *node,
+                                             u32 *buffers, u32 *aux_data,
+                                             u16 next_index, u32 count,
+                                             u8 with_aux)
+{
+  u32 *to_next, *to_next_aux, n_left_to_next, n_enq;
+
+  if (with_aux)
+    vlib_get_next_frame_with_aux (vm, node, next_index, to_next, to_next_aux,
+                                 n_left_to_next);
+  else
+    vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
 
   if (PREDICT_TRUE (n_left_to_next >= count))
     {
       vlib_buffer_copy_indices (to_next, buffers, count);
+      if (with_aux)
+       vlib_buffer_copy_indices (to_next_aux, aux_data, count);
       n_left_to_next -= count;
       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
       return;
@@ -149,22 +211,49 @@ CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_single_next_fn)
   n_enq = n_left_to_next;
 next:
   vlib_buffer_copy_indices (to_next, buffers, n_enq);
+  if (with_aux)
+    vlib_buffer_copy_indices (to_next_aux, aux_data, n_enq);
   n_left_to_next -= n_enq;
 
   if (PREDICT_FALSE (count > n_enq))
     {
       count -= n_enq;
       buffers += n_enq;
+      if (with_aux)
+       aux_data += n_enq;
 
       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
-      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+      if (with_aux)
+       vlib_get_next_frame_with_aux (vm, node, next_index, to_next,
+                                     to_next_aux, n_left_to_next);
+      else
+       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
       n_enq = clib_min (n_left_to_next, count);
       goto next;
     }
   vlib_put_next_frame (vm, node, next_index, n_left_to_next);
 }
+
+void __clib_section (".vlib_buffer_enqueue_to_single_next_fn")
+CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_single_next_fn)
+(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 next_index,
+ u32 count)
+{
+  vlib_buffer_enqueue_to_single_next_fn_inline (
+    vm, node, buffers, NULL, next_index, count, 0 /* with_aux */);
+}
 CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_single_next_fn);
 
+void __clib_section (".vlib_buffer_enqueue_to_single_next_with_aux_fn")
+CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_single_next_with_aux_fn)
+(vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u32 *aux_data,
+ u16 next_index, u32 count)
+{
+  vlib_buffer_enqueue_to_single_next_fn_inline (
+    vm, node, buffers, aux_data, next_index, count, 1 /* with_aux */);
+}
+CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_single_next_with_aux_fn);
+
 static inline vlib_frame_queue_elt_t *
 vlib_get_frame_queue_elt (vlib_frame_queue_main_t *fqm, u32 index,
                          int dont_wait)
@@ -172,7 +261,7 @@ vlib_get_frame_queue_elt (vlib_frame_queue_main_t *fqm, u32 index,
   vlib_frame_queue_t *fq;
   u64 nelts, tail, new_tail;
 
-  fq = fqm->vlib_frame_queues[index];
+  fq = vec_elt (fqm->vlib_frame_queues, index);
   ASSERT (fq);
   nelts = fq->nelts;
 
@@ -202,7 +291,8 @@ vlib_buffer_enqueue_to_thread_inline (vlib_main_t *vm,
                                      vlib_node_runtime_t *node,
                                      vlib_frame_queue_main_t *fqm,
                                      u32 *buffer_indices, u16 *thread_indices,
-                                     u32 n_packets, int drop_on_congestion)
+                                     u32 n_packets, int drop_on_congestion,
+                                     int with_aux, u32 *aux_data)
 {
   u32 drop_list[VLIB_FRAME_SIZE], n_drop = 0;
   vlib_frame_bitmap_t mask, used_elts = {};
@@ -218,6 +308,9 @@ more:
 
   n_comp = clib_compress_u32 (hf ? hf->buffer_index : drop_list + n_drop,
                              buffer_indices, mask, n_packets);
+  if (with_aux)
+    clib_compress_u32 (hf ? hf->aux_data : drop_list + n_drop, aux_data, mask,
+                      n_packets);
 
   if (hf)
     {
@@ -269,7 +362,7 @@ CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_thread_fn)
     {
       n_enq += vlib_buffer_enqueue_to_thread_inline (
        vm, node, fqm, buffer_indices, thread_indices, VLIB_FRAME_SIZE,
-       drop_on_congestion);
+       drop_on_congestion, 0 /* with_aux */, NULL);
       buffer_indices += VLIB_FRAME_SIZE;
       thread_indices += VLIB_FRAME_SIZE;
       n_packets -= VLIB_FRAME_SIZE;
@@ -278,24 +371,58 @@ CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_thread_fn)
   if (n_packets == 0)
     return n_enq;
 
-  n_enq += vlib_buffer_enqueue_to_thread_inline (vm, node, fqm, buffer_indices,
-                                                thread_indices, n_packets,
-                                                drop_on_congestion);
+  n_enq += vlib_buffer_enqueue_to_thread_inline (
+    vm, node, fqm, buffer_indices, thread_indices, n_packets,
+    drop_on_congestion, 0 /* with_aux */, NULL);
+
+  return n_enq;
+}
+
+u32 __clib_section (".vlib_buffer_enqueue_to_thread_with_aux_fn")
+CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_thread_with_aux_fn)
+(vlib_main_t *vm, vlib_node_runtime_t *node, u32 frame_queue_index,
+ u32 *buffer_indices, u32 *aux, u16 *thread_indices, u32 n_packets,
+ int drop_on_congestion)
+{
+  vlib_thread_main_t *tm = vlib_get_thread_main ();
+  vlib_frame_queue_main_t *fqm;
+  u32 n_enq = 0;
+
+  fqm = vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
+
+  while (n_packets >= VLIB_FRAME_SIZE)
+    {
+      n_enq += vlib_buffer_enqueue_to_thread_inline (
+       vm, node, fqm, buffer_indices, thread_indices, VLIB_FRAME_SIZE,
+       drop_on_congestion, 1 /* with_aux */, aux);
+      buffer_indices += VLIB_FRAME_SIZE;
+      thread_indices += VLIB_FRAME_SIZE;
+      n_packets -= VLIB_FRAME_SIZE;
+    }
+
+  if (n_packets == 0)
+    return n_enq;
+
+  n_enq += vlib_buffer_enqueue_to_thread_inline (
+    vm, node, fqm, buffer_indices, thread_indices, n_packets,
+    drop_on_congestion, 1 /* with_aux */, aux);
 
   return n_enq;
 }
 
 CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_thread_fn);
+CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_thread_with_aux_fn);
 
-u32 __clib_section (".vlib_frame_queue_dequeue_fn")
-CLIB_MULTIARCH_FN (vlib_frame_queue_dequeue_fn)
-(vlib_main_t *vm, vlib_frame_queue_main_t *fqm)
+static_always_inline u32
+vlib_frame_queue_dequeue_inline (vlib_main_t *vm, vlib_frame_queue_main_t *fqm,
+                                u8 with_aux)
 {
   u32 thread_id = vm->thread_index;
   vlib_frame_queue_t *fq = fqm->vlib_frame_queues[thread_id];
   u32 mask = fq->nelts - 1;
   vlib_frame_queue_elt_t *elt;
-  u32 n_free, n_copy, *from, *to = 0, processed = 0, vectors = 0;
+  u32 n_free, n_copy, *from, *from_aux, *to = 0, *to_aux = 0, processed = 0,
+                                       vectors = 0;
   vlib_frame_t *f = 0;
 
   ASSERT (fq);
@@ -352,13 +479,16 @@ CLIB_MULTIARCH_FN (vlib_frame_queue_dequeue_fn)
        break;
 
       from = elt->buffer_index + elt->offset;
-
+      if (with_aux)
+       from_aux = elt->aux_data + elt->offset;
       ASSERT (elt->offset + elt->n_vectors <= VLIB_FRAME_SIZE);
 
       if (f == 0)
        {
          f = vlib_get_frame_to_node (vm, fqm->node_index);
          to = vlib_frame_vector_args (f);
+         if (with_aux)
+           to_aux = vlib_frame_aux_args (f);
          n_free = VLIB_FRAME_SIZE;
        }
 
@@ -369,6 +499,12 @@ CLIB_MULTIARCH_FN (vlib_frame_queue_dequeue_fn)
 
       vlib_buffer_copy_indices (to, from, n_copy);
       to += n_copy;
+      if (with_aux)
+       {
+         vlib_buffer_copy_indices (to_aux, from_aux, n_copy);
+         to_aux += n_copy;
+       }
+
       n_free -= n_copy;
       vectors += n_copy;
 
@@ -408,8 +544,24 @@ CLIB_MULTIARCH_FN (vlib_frame_queue_dequeue_fn)
   return processed;
 }
 
+u32 __clib_section (".vlib_frame_queue_dequeue_fn")
+CLIB_MULTIARCH_FN (vlib_frame_queue_dequeue_fn)
+(vlib_main_t *vm, vlib_frame_queue_main_t *fqm)
+{
+  return vlib_frame_queue_dequeue_inline (vm, fqm, 0 /* with_aux */);
+}
+
 CLIB_MARCH_FN_REGISTRATION (vlib_frame_queue_dequeue_fn);
 
+u32 __clib_section (".vlib_frame_queue_dequeue_with_aux_fn")
+CLIB_MULTIARCH_FN (vlib_frame_queue_dequeue_with_aux_fn)
+(vlib_main_t *vm, vlib_frame_queue_main_t *fqm)
+{
+  return vlib_frame_queue_dequeue_inline (vm, fqm, 1 /* with_aux */);
+}
+
+CLIB_MARCH_FN_REGISTRATION (vlib_frame_queue_dequeue_with_aux_fn);
+
 #ifndef CLIB_MARCH_VARIANT
 vlib_buffer_func_main_t vlib_buffer_func_main;
 
@@ -419,12 +571,16 @@ vlib_buffer_funcs_init (vlib_main_t *vm)
   vlib_buffer_func_main_t *bfm = &vlib_buffer_func_main;
   bfm->buffer_enqueue_to_next_fn =
     CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_next_fn);
+  bfm->buffer_enqueue_to_next_with_aux_fn =
+    CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_next_with_aux_fn);
   bfm->buffer_enqueue_to_single_next_fn =
     CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_single_next_fn);
+  bfm->buffer_enqueue_to_single_next_with_aux_fn =
+    CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_single_next_with_aux_fn);
   bfm->buffer_enqueue_to_thread_fn =
     CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_thread_fn);
-  bfm->frame_queue_dequeue_fn =
-    CLIB_MARCH_FN_POINTER (vlib_frame_queue_dequeue_fn);
+  bfm->buffer_enqueue_to_thread_with_aux_fn =
+    CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_thread_with_aux_fn);
   return 0;
 }