session: improve close procedure
[vpp.git] / src / vlib / threads.h
index 101d3d4..1794453 100644 (file)
 #include <vlib/main.h>
 #include <linux/sched.h>
 
+/*
+ * To enable detailed tracing of barrier usage, including call stacks and
+ * timings, define BARRIER_TRACING here or in relevant TAGS.  If also used
+ * with CLIB_DEBUG, timing will _not_ be representative of normal code
+ * execution.
+ *
+ */
+
+// #define BARRIER_TRACING 1
+
+/*
+ * Two options for barrier tracing output: syslog & elog.
+ */
+
+// #define BARRIER_TRACING_ELOG 1
+
 extern vlib_main_t **vlib_mains;
 
 void vlib_set_thread_name (char *name);
@@ -62,7 +78,7 @@ typedef struct vlib_thread_registration_
 #define VLIB_CPU_MASK (VLIB_MAX_CPUS - 1)      /* 0x3f, max */
 #define VLIB_OFFSET_MASK (~VLIB_CPU_MASK)
 
-#define VLIB_LOG2_THREAD_STACK_SIZE (20)
+#define VLIB_LOG2_THREAD_STACK_SIZE (21)
 #define VLIB_THREAD_STACK_SIZE (1<<VLIB_LOG2_THREAD_STACK_SIZE)
 
 typedef enum
@@ -102,6 +118,11 @@ typedef struct
   vlib_thread_registration_t *registration;
   u8 *name;
   u64 barrier_sync_count;
+#ifdef BARRIER_TRACING
+  const char *barrier_caller;
+  const char *barrier_context;
+#endif
+  volatile u32 *node_reforks_required;
 
   long lwp;
   int lcore_id;
@@ -140,16 +161,33 @@ typedef struct
 }
 vlib_frame_queue_t;
 
+typedef struct
+{
+  vlib_frame_queue_elt_t **handoff_queue_elt_by_thread_index;
+  vlib_frame_queue_t **congested_handoff_queue_by_thread_index;
+} vlib_frame_queue_per_thread_data_t;
+
 typedef struct
 {
   u32 node_index;
+  u32 frame_queue_nelts;
+  u32 queue_hi_thresh;
+
   vlib_frame_queue_t **vlib_frame_queues;
+  vlib_frame_queue_per_thread_data_t *per_thread_data;
 
   /* for frame queue tracing */
   frame_queue_trace_t *frame_queue_traces;
   frame_queue_nelt_counter_t *frame_queue_histogram;
 } vlib_frame_queue_main_t;
 
+typedef struct
+{
+  uword node_index;
+  uword type_opaque;
+  uword data;
+} vlib_process_signal_event_mt_args_t;
+
 /* Called early, in thread 0's context */
 clib_error_t *vlib_thread_init (vlib_main_t * vm);
 
@@ -178,14 +216,21 @@ u32 vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts);
 #define BARRIER_SYNC_TIMEOUT (1.0)
 #endif
 
-void vlib_worker_thread_barrier_sync (vlib_main_t * vm);
+#ifdef BARRIER_TRACING
+#define vlib_worker_thread_barrier_sync(X) {vlib_worker_threads[0].barrier_caller=__FUNCTION__;vlib_worker_thread_barrier_sync_int(X);}
+#else
+#define vlib_worker_thread_barrier_sync(X) vlib_worker_thread_barrier_sync_int(X)
+#endif
+
+
+void vlib_worker_thread_barrier_sync_int (vlib_main_t * vm);
 void vlib_worker_thread_barrier_release (vlib_main_t * vm);
+void vlib_worker_thread_node_refork (void);
 
-extern __thread uword vlib_thread_index;
 static_always_inline uword
 vlib_get_thread_index (void)
 {
-  return vlib_thread_index;
+  return __os_thread_index;
 }
 
 always_inline void
@@ -288,7 +333,7 @@ typedef struct
   u8 *thread_prefix;
 
   /* main thread lcore */
-  u8 main_lcore;
+  u32 main_lcore;
 
   /* Bitmap of available CPU cores */
   uword *cpu_core_bitmap;
@@ -327,6 +372,13 @@ static void __vlib_add_thread_registration_##x (void)   \
   x.next = tm->next;                                    \
   tm->next = &x;                                        \
 }                                                       \
+static void __vlib_rm_thread_registration_##x (void)    \
+  __attribute__((__destructor__)) ;                     \
+static void __vlib_rm_thread_registration_##x (void)    \
+{                                                       \
+  vlib_thread_main_t * tm = &vlib_thread_main;          \
+  VLIB_REMOVE_FROM_LINKED_LIST (tm->next, &x, next);    \
+}                                                       \
 __VA_ARGS__ vlib_thread_registration_t x
 
 always_inline u32
@@ -370,6 +422,15 @@ vlib_worker_thread_barrier_check (void)
       if (CLIB_DEBUG > 0)
        vm->parked_at_barrier = 0;
       clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1);
+
+      if (PREDICT_FALSE (*vlib_worker_threads->node_reforks_required))
+       {
+         vlib_worker_thread_node_refork ();
+         clib_smp_atomic_add (vlib_worker_threads->node_reforks_required,
+                              -1);
+         while (*vlib_worker_threads->node_reforks_required)
+           ;
+       }
     }
 }
 
@@ -384,6 +445,14 @@ vlib_get_worker_vlib_main (u32 worker_index)
   return vm;
 }
 
+static inline u8
+vlib_thread_is_main_w_barrier (void)
+{
+  return (!vlib_num_workers ()
+         || ((vlib_get_thread_index () == 0
+              && vlib_worker_threads->wait_at_barrier[0])));
+}
+
 static inline void
 vlib_put_frame_queue_elt (vlib_frame_queue_elt_t * hf)
 {
@@ -473,9 +542,14 @@ vlib_get_worker_handoff_queue_elt (u32 frame_queue_index,
 }
 
 u8 *vlib_thread_stack_init (uword thread_index);
-
 int vlib_thread_cb_register (struct vlib_main_t *vm,
                             vlib_thread_callbacks_t * cb);
+extern void *rpc_call_main_thread_cb_fn;
+
+void
+vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
+                                    args);
+void vlib_rpc_call_main_thread (void *function, u8 * args, u32 size);
 
 #endif /* included_vlib_threads_h */