ethernet: add sanity checks to p2p_ethernet_add/del
[vpp.git] / src / vlib / buffer_node.h
index 35e15a5..0fa18d6 100644 (file)
@@ -69,6 +69,8 @@
 
 #define vlib_validate_buffer_enqueue_x2(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,next0,next1) \
 do {                                                                   \
+  ASSERT (bi0 != 0);                                                   \
+  ASSERT (bi1 != 0);                                                   \
   int enqueue_code = (next0 != next_index) + 2*(next1 != next_index);  \
                                                                        \
   if (PREDICT_FALSE (enqueue_code != 0))                               \
@@ -137,6 +139,10 @@ do {                                                                       \
 
 #define vlib_validate_buffer_enqueue_x4(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,bi2,bi3,next0,next1,next2,next3) \
 do {                                                                    \
+  ASSERT (bi0 != 0);                                                   \
+  ASSERT (bi1 != 0);                                                   \
+  ASSERT (bi2 != 0);                                                   \
+  ASSERT (bi3 != 0);                                                   \
   /* After the fact: check the [speculative] enqueue to "next" */       \
   u32 fix_speculation = (next_index ^ next0) | (next_index ^ next1)     \
     | (next_index ^ next2) | (next_index ^ next3);                      \
@@ -217,6 +223,7 @@ do {                                                                    \
 */
 #define vlib_validate_buffer_enqueue_x1(vm,node,next_index,to_next,n_left_to_next,bi0,next0) \
 do {                                                                   \
+  ASSERT (bi0 != 0);                                                   \
   if (PREDICT_FALSE (next0 != next_index))                             \
     {                                                                  \
       vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1);  \
@@ -350,17 +357,17 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
          max = clib_min (n_left_to_next, count);
        }
 #if defined(CLIB_HAVE_VEC512)
-      u16x32 next32 = u16x32_load_unaligned (nexts);
+      u16x32 next32 = CLIB_MEM_OVERFLOW_LOAD (u16x32_load_unaligned, nexts);
       next32 = (next32 == u16x32_splat (next32[0]));
       u64 bitmap = u16x32_msb_mask (next32);
       n_enqueued = count_trailing_zeros (~bitmap);
 #elif defined(CLIB_HAVE_VEC256)
-      u16x16 next16 = u16x16_load_unaligned (nexts);
+      u16x16 next16 = CLIB_MEM_OVERFLOW_LOAD (u16x16_load_unaligned, nexts);
       next16 = (next16 == u16x16_splat (next16[0]));
       u64 bitmap = u8x32_msb_mask ((u8x32) next16);
       n_enqueued = count_trailing_zeros (~bitmap) / 2;
 #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
-      u16x8 next8 = u16x8_load_unaligned (nexts);
+      u16x8 next8 = CLIB_MEM_OVERFLOW_LOAD (u16x8_load_unaligned, nexts);
       next8 = (next8 == u16x8_splat (next8[0]));
       u64 bitmap = u8x16_msb_mask ((u8x16) next8);
       n_enqueued = count_trailing_zeros (~bitmap) / 2;
@@ -383,7 +390,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
 #ifdef CLIB_HAVE_VEC512
       if (n_enqueued >= 32)
        {
-         clib_memcpy (to_next, buffers, 32 * sizeof (u32));
+         vlib_buffer_copy_indices (to_next, buffers, 32);
          nexts += 32;
          to_next += 32;
          buffers += 32;
@@ -397,7 +404,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
 #ifdef CLIB_HAVE_VEC256
       if (n_enqueued >= 16)
        {
-         clib_memcpy (to_next, buffers, 16 * sizeof (u32));
+         vlib_buffer_copy_indices (to_next, buffers, 16);
          nexts += 16;
          to_next += 16;
          buffers += 16;
@@ -411,7 +418,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
 #ifdef CLIB_HAVE_VEC128
       if (n_enqueued >= 8)
        {
-         clib_memcpy (to_next, buffers, 8 * sizeof (u32));
+         vlib_buffer_copy_indices (to_next, buffers, 8);
          nexts += 8;
          to_next += 8;
          buffers += 8;
@@ -424,7 +431,7 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
 
       if (n_enqueued >= 4)
        {
-         clib_memcpy (to_next, buffers, 4 * sizeof (u32));
+         vlib_buffer_copy_indices (to_next, buffers, 4);
          nexts += 4;
          to_next += 4;
          buffers += 4;
@@ -448,6 +455,41 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
   vlib_put_next_frame (vm, node, next_index, n_left_to_next);
 }
 
+static_always_inline void
+vlib_buffer_enqueue_to_single_next (vlib_main_t * vm,
+                                   vlib_node_runtime_t * node, u32 * buffers,
+                                   u16 next_index, u32 count)
+{
+  u32 *to_next, n_left_to_next, n_enq;
+
+  vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+  if (PREDICT_TRUE (n_left_to_next >= count))
+    {
+      vlib_buffer_copy_indices (to_next, buffers, count);
+      n_left_to_next -= count;
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+      return;
+    }
+
+  n_enq = n_left_to_next;
+next:
+  vlib_buffer_copy_indices (to_next, buffers, n_enq);
+  n_left_to_next -= n_enq;
+
+  if (PREDICT_FALSE (count > n_enq))
+    {
+      count -= n_enq;
+      buffers += n_enq;
+
+      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+      n_enq = clib_min (n_left_to_next, count);
+      goto next;
+    }
+  vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+}
+
 static_always_inline u32
 vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
                               u32 * buffer_indices, u16 * thread_indices,
@@ -472,7 +514,6 @@ vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
 
       if (next_thread_index != current_thread_index)
        {
-
          if (drop_on_congestion &&
              is_vlib_frame_queue_congested
              (frame_queue_index, next_thread_index, fqm->queue_hi_thresh,
@@ -504,6 +545,7 @@ vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
        {
          hf->n_vectors = VLIB_FRAME_SIZE;
          vlib_put_frame_queue_elt (hf);
+         vlib_mains[current_thread_index]->check_frame_queues = 1;
          current_thread_index = ~0;
          ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0;
          hf = 0;
@@ -532,6 +574,7 @@ vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
          if (1 || hf->n_vectors == hf->last_n_vectors)
            {
              vlib_put_frame_queue_elt (hf);
+             vlib_mains[i]->check_frame_queues = 1;
              ptd->handoff_queue_elt_by_thread_index[i] = 0;
            }
          else