vppinfra: numa vector placement support
[vpp.git] / src / vlib / threads.h
index 572ce77..c1188ce 100644 (file)
@@ -102,9 +102,15 @@ typedef struct
   vlib_thread_registration_t *registration;
   u8 *name;
   u64 barrier_sync_count;
+  u8 barrier_elog_enabled;
+  const char *barrier_caller;
+  const char *barrier_context;
+  volatile u32 *node_reforks_required;
 
   long lwp;
-  int lcore_id;
+  int cpu_id;
+  int core_id;
+  int numa_id;
   pthread_t thread_id;
 } vlib_worker_thread_t;
 
@@ -140,16 +146,33 @@ typedef struct
 }
 vlib_frame_queue_t;
 
+typedef struct
+{
+  vlib_frame_queue_elt_t **handoff_queue_elt_by_thread_index;
+  vlib_frame_queue_t **congested_handoff_queue_by_thread_index;
+} vlib_frame_queue_per_thread_data_t;
+
 typedef struct
 {
   u32 node_index;
+  u32 frame_queue_nelts;
+  u32 queue_hi_thresh;
+
   vlib_frame_queue_t **vlib_frame_queues;
+  vlib_frame_queue_per_thread_data_t *per_thread_data;
 
   /* for frame queue tracing */
   frame_queue_trace_t *frame_queue_traces;
   frame_queue_nelt_counter_t *frame_queue_histogram;
 } vlib_frame_queue_main_t;
 
+typedef struct
+{
+  uword node_index;
+  uword type_opaque;
+  uword data;
+} vlib_process_signal_event_mt_args_t;
+
 /* Called early, in thread 0's context */
 clib_error_t *vlib_thread_init (vlib_main_t * vm);
 
@@ -178,8 +201,13 @@ u32 vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts);
 #define BARRIER_SYNC_TIMEOUT (1.0)
 #endif
 
-void vlib_worker_thread_barrier_sync (vlib_main_t * vm);
+#define vlib_worker_thread_barrier_sync(X) {vlib_worker_thread_barrier_sync_int(X, __FUNCTION__);}
+
+void vlib_worker_thread_barrier_sync_int (vlib_main_t * vm,
+                                         const char *func_name);
 void vlib_worker_thread_barrier_release (vlib_main_t * vm);
+void vlib_worker_thread_initial_barrier_sync_and_release (vlib_main_t * vm);
+void vlib_worker_thread_node_refork (void);
 
 static_always_inline uword
 vlib_get_thread_index (void)
@@ -246,8 +274,8 @@ typedef enum
 typedef struct
 {
   clib_error_t *(*vlib_launch_thread_cb) (void *fp, vlib_worker_thread_t * w,
-                                         unsigned lcore_id);
-  clib_error_t *(*vlib_thread_set_lcore_cb) (u32 thread, u16 lcore);
+                                         unsigned cpu_id);
+  clib_error_t *(*vlib_thread_set_lcore_cb) (u32 thread, u16 cpu);
 } vlib_thread_callbacks_t;
 
 typedef struct
@@ -287,7 +315,7 @@ typedef struct
   u8 *thread_prefix;
 
   /* main thread lcore */
-  u8 main_lcore;
+  u32 main_lcore;
 
   /* Bitmap of available CPU cores */
   uword *cpu_core_bitmap;
@@ -310,6 +338,10 @@ typedef struct
   /* callbacks */
   vlib_thread_callbacks_t cb;
   int extern_thread_mgmt;
+
+  /* NUMA-bound heap size */
+  uword numa_heap_size;
+
 } vlib_thread_main_t;
 
 extern vlib_thread_main_t vlib_thread_main;
@@ -326,6 +358,13 @@ static void __vlib_add_thread_registration_##x (void)   \
   x.next = tm->next;                                    \
   tm->next = &x;                                        \
 }                                                       \
+static void __vlib_rm_thread_registration_##x (void)    \
+  __attribute__((__destructor__)) ;                     \
+static void __vlib_rm_thread_registration_##x (void)    \
+{                                                       \
+  vlib_thread_main_t * tm = &vlib_thread_main;          \
+  VLIB_REMOVE_FROM_LINKED_LIST (tm->next, &x, next);    \
+}                                                       \
 __VA_ARGS__ vlib_thread_registration_t x
 
 always_inline u32
@@ -357,18 +396,108 @@ vlib_worker_thread_barrier_check (void)
 {
   if (PREDICT_FALSE (*vlib_worker_threads->wait_at_barrier))
     {
-      vlib_main_t *vm;
-      clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1);
+      vlib_main_t *vm = vlib_get_main ();
+      u32 thread_index = vm->thread_index;
+      f64 t = vlib_time_now (vm);
+
+      if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled))
+       {
+         vlib_worker_thread_t *w = vlib_worker_threads + thread_index;
+         /* *INDENT-OFF* */
+         ELOG_TYPE_DECLARE (e) = {
+           .format = "barrier-wait-thread-%d",
+           .format_args = "i4",
+         };
+         /* *INDENT-ON* */
+
+         struct
+         {
+           u32 thread_index;
+         } __clib_packed *ed;
+
+         ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
+                               w->elog_track);
+         ed->thread_index = thread_index;
+       }
+
       if (CLIB_DEBUG > 0)
        {
          vm = vlib_get_main ();
          vm->parked_at_barrier = 1;
        }
+      clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
       while (*vlib_worker_threads->wait_at_barrier)
        ;
+
+      /*
+       * Recompute the offset from thread-0 time.
+       * Note that vlib_time_now adds vm->time_offset, so
+       * clear it first. Save the resulting idea of "now", to
+       * see how well we're doing. See show_clock_command_fn(...)
+       */
+      {
+       f64 now;
+       vm->time_offset = 0.0;
+       now = vlib_time_now (vm);
+       vm->time_offset = vlib_global_main.time_last_barrier_release - now;
+       vm->time_last_barrier_release = vlib_time_now (vm);
+      }
+
       if (CLIB_DEBUG > 0)
        vm->parked_at_barrier = 0;
-      clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1);
+      clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
+
+      if (PREDICT_FALSE (*vlib_worker_threads->node_reforks_required))
+       {
+         if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled))
+           {
+             t = vlib_time_now (vm) - t;
+             vlib_worker_thread_t *w = vlib_worker_threads + thread_index;
+              /* *INDENT-OFF* */
+              ELOG_TYPE_DECLARE (e) = {
+                .format = "barrier-refork-thread-%d",
+                .format_args = "i4",
+              };
+              /* *INDENT-ON* */
+
+             struct
+             {
+               u32 thread_index;
+             } __clib_packed *ed;
+
+             ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
+                                   w->elog_track);
+             ed->thread_index = thread_index;
+           }
+
+         vlib_worker_thread_node_refork ();
+         clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
+                                -1);
+         while (*vlib_worker_threads->node_reforks_required)
+           ;
+       }
+      if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled))
+       {
+         t = vlib_time_now (vm) - t;
+         vlib_worker_thread_t *w = vlib_worker_threads + thread_index;
+         /* *INDENT-OFF* */
+         ELOG_TYPE_DECLARE (e) = {
+           .format = "barrier-released-thread-%d: %dus",
+           .format_args = "i4i4",
+         };
+         /* *INDENT-ON* */
+
+         struct
+         {
+           u32 thread_index;
+           u32 duration;
+         } __clib_packed *ed;
+
+         ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
+                               w->elog_track);
+         ed->thread_index = thread_index;
+         ed->duration = (int) (1000000.0 * t);
+       }
     }
 }
 
@@ -383,6 +512,14 @@ vlib_get_worker_vlib_main (u32 worker_index)
   return vm;
 }
 
+static inline u8
+vlib_thread_is_main_w_barrier (void)
+{
+  return (!vlib_num_workers ()
+         || ((vlib_get_thread_index () == 0
+              && vlib_worker_threads->wait_at_barrier[0])));
+}
+
 static inline void
 vlib_put_frame_queue_elt (vlib_frame_queue_elt_t * hf)
 {
@@ -403,7 +540,7 @@ vlib_get_frame_queue_elt (u32 frame_queue_index, u32 index)
   fq = fqm->vlib_frame_queues[index];
   ASSERT (fq);
 
-  new_tail = __sync_add_and_fetch (&fq->tail, 1);
+  new_tail = clib_atomic_add_fetch (&fq->tail, 1);
 
   /* Wait until a ring slot is available */
   while (new_tail >= fq->head_hint + fq->nelts)
@@ -472,9 +609,16 @@ vlib_get_worker_handoff_queue_elt (u32 frame_queue_index,
 }
 
 u8 *vlib_thread_stack_init (uword thread_index);
-
 int vlib_thread_cb_register (struct vlib_main_t *vm,
                             vlib_thread_callbacks_t * cb);
+extern void *rpc_call_main_thread_cb_fn;
+
+void
+vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
+                                    args);
+void vlib_rpc_call_main_thread (void *function, u8 * args, u32 size);
+void vlib_get_thread_core_numa (vlib_worker_thread_t * w, unsigned cpu_id);
+
 
 #endif /* included_vlib_threads_h */