X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlib%2Fthreads.h;h=17944536b6c68a5029e42e0968c2df0d78fb1eb8;hb=568ebc76b594f8f29bb131b252031d72c055e0b0;hp=eca4fc268d54782b3d3a2c3921a84ff7439d541a;hpb=ce359db3b68528ce576862129b2a7709681ad2c6;p=vpp.git diff --git a/src/vlib/threads.h b/src/vlib/threads.h index eca4fc268d5..17944536b6c 100644 --- a/src/vlib/threads.h +++ b/src/vlib/threads.h @@ -18,6 +18,22 @@ #include #include +/* + * To enable detailed tracing of barrier usage, including call stacks and + * timings, define BARRIER_TRACING here or in relevant TAGS. If also used + * with CLIB_DEBUG, timing will _not_ be representative of normal code + * execution. + * + */ + +// #define BARRIER_TRACING 1 + +/* + * Two options for barrier tracing output: syslog & elog. + */ + +// #define BARRIER_TRACING_ELOG 1 + extern vlib_main_t **vlib_mains; void vlib_set_thread_name (char *name); @@ -62,7 +78,7 @@ typedef struct vlib_thread_registration_ #define VLIB_CPU_MASK (VLIB_MAX_CPUS - 1) /* 0x3f, max */ #define VLIB_OFFSET_MASK (~VLIB_CPU_MASK) -#define VLIB_LOG2_THREAD_STACK_SIZE (20) +#define VLIB_LOG2_THREAD_STACK_SIZE (21) #define VLIB_THREAD_STACK_SIZE (1< 0) { - if (os_get_cpu_number ()) + if (vlib_get_thread_index ()) fformat (stderr, "%s: SMP unsafe warning...\n", __FUNCTION__); } } @@ -283,7 +333,7 @@ typedef struct u8 *thread_prefix; /* main thread lcore */ - u8 main_lcore; + u32 main_lcore; /* Bitmap of available CPU cores */ uword *cpu_core_bitmap; @@ -322,6 +372,13 @@ static void __vlib_add_thread_registration_##x (void) \ x.next = tm->next; \ tm->next = &x; \ } \ +static void __vlib_rm_thread_registration_##x (void) \ + __attribute__((__destructor__)) ; \ +static void __vlib_rm_thread_registration_##x (void) \ +{ \ + vlib_thread_main_t * tm = &vlib_thread_main; \ + VLIB_REMOVE_FROM_LINKED_LIST (tm->next, &x, next); \ +} \ __VA_ARGS__ vlib_thread_registration_t x always_inline u32 @@ -331,21 +388,21 @@ vlib_num_workers () } always_inline u32 -vlib_get_worker_cpu_index (u32 worker_index) +vlib_get_worker_thread_index (u32 worker_index) { return worker_index + 1; } always_inline u32 -vlib_get_worker_index (u32 cpu_index) +vlib_get_worker_index (u32 thread_index) { - return cpu_index - 1; + return thread_index - 1; } always_inline u32 vlib_get_current_worker_index () { - return os_get_cpu_number () - 1; + return vlib_get_thread_index () - 1; } static inline void @@ -365,6 +422,15 @@ vlib_worker_thread_barrier_check (void) if (CLIB_DEBUG > 0) vm->parked_at_barrier = 0; clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1); + + if (PREDICT_FALSE (*vlib_worker_threads->node_reforks_required)) + { + vlib_worker_thread_node_refork (); + clib_smp_atomic_add (vlib_worker_threads->node_reforks_required, + -1); + while (*vlib_worker_threads->node_reforks_required) + ; + } } } @@ -379,6 +445,14 @@ vlib_get_worker_vlib_main (u32 worker_index) return vm; } +static inline u8 +vlib_thread_is_main_w_barrier (void) +{ + return (!vlib_num_workers () + || ((vlib_get_thread_index () == 0 + && vlib_worker_threads->wait_at_barrier[0]))); +} + static inline void vlib_put_frame_queue_elt (vlib_frame_queue_elt_t * hf) { @@ -467,8 +541,15 @@ vlib_get_worker_handoff_queue_elt (u32 frame_queue_index, return elt; } +u8 *vlib_thread_stack_init (uword thread_index); int vlib_thread_cb_register (struct vlib_main_t *vm, vlib_thread_callbacks_t * cb); +extern void *rpc_call_main_thread_cb_fn; + +void +vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t * + args); +void vlib_rpc_call_main_thread (void *function, u8 * args, u32 size); #endif /* included_vlib_threads_h */