X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlib%2Fthreads.h;h=97df3d253a0792af68229888e84d7596d13d49c0;hb=4141ded3ec876313a5c7f74a93dec3e18940180a;hp=17d35a24b3412aa9e3ec5d8817f0a539b61e40c4;hpb=f55f9b851f59264d737d92c6277a87588c565d24;p=vpp.git diff --git a/src/vlib/threads.h b/src/vlib/threads.h index 17d35a24b34..97df3d253a0 100644 --- a/src/vlib/threads.h +++ b/src/vlib/threads.h @@ -16,10 +16,9 @@ #define included_vlib_threads_h #include +#include #include -extern vlib_main_t **vlib_mains; - void vlib_set_thread_name (char *name); /* arg is actually a vlib__thread_t * */ @@ -62,24 +61,21 @@ typedef struct vlib_thread_registration_ #define VLIB_CPU_MASK (VLIB_MAX_CPUS - 1) /* 0x3f, max */ #define VLIB_OFFSET_MASK (~VLIB_CPU_MASK) -#define VLIB_LOG2_THREAD_STACK_SIZE (20) +#define VLIB_LOG2_THREAD_STACK_SIZE (21) #define VLIB_THREAD_STACK_SIZE (1<parked_at_barrier == 1); \ - if (this_vlib_main) \ - vec_add1 (__vlib_mains, this_vlib_main); \ - } \ - \ - for (ii = 0; ii < vec_len (__vlib_mains); ii++) \ - { \ - this_vlib_main = __vlib_mains[ii]; \ - /* body uses this_vlib_main... */ \ - (body); \ - } \ - vec_free (__vlib_mains); \ -} while (0); + vlib_main_t *vm; + u32 index = ii - (vlib_main_t *) 0; + + if (index >= vec_len (vlib_global_main.vlib_mains)) + return 0; + + *p = vm = vlib_global_main.vlib_mains[index]; + ASSERT (index == 0 || vm->parked_at_barrier == 1); + return 1; +} + +#define foreach_vlib_main() \ + for (vlib_main_t *ii = 0, *this_vlib_main; \ + __foreach_vlib_main_helper (ii, &this_vlib_main); ii++) \ + if (this_vlib_main) #define foreach_sched_policy \ _(SCHED_OTHER, OTHER, "other") \ @@ -243,13 +241,6 @@ typedef enum SCHED_POLICY_N, } sched_policy_t; -typedef struct -{ - clib_error_t *(*vlib_launch_thread_cb) (void *fp, vlib_worker_thread_t * w, - unsigned lcore_id); - clib_error_t *(*vlib_thread_set_lcore_cb) (u32 thread, u16 lcore); -} vlib_thread_callbacks_t; - typedef struct { /* Link list of registrations, built by constructors */ @@ -262,10 +253,6 @@ typedef struct vlib_worker_thread_t *worker_threads; - /* - * Launch all threads as pthreads, - * not eal_rte_launch (strict affinity) threads - */ int use_pthreads; /* Number of vlib_main / vnet_main clones */ @@ -287,7 +274,7 @@ typedef struct u8 *thread_prefix; /* main thread lcore */ - u8 main_lcore; + u32 main_lcore; /* Bitmap of available CPU cores */ uword *cpu_core_bitmap; @@ -307,9 +294,9 @@ typedef struct /* scheduling policy priority */ u32 sched_priority; - /* callbacks */ - vlib_thread_callbacks_t cb; - int extern_thread_mgmt; + /* NUMA-bound heap size */ + uword numa_heap_size; + } vlib_thread_main_t; extern vlib_thread_main_t vlib_thread_main; @@ -326,6 +313,13 @@ static void __vlib_add_thread_registration_##x (void) \ x.next = tm->next; \ tm->next = &x; \ } \ +static void __vlib_rm_thread_registration_##x (void) \ + __attribute__((__destructor__)) ; \ +static void __vlib_rm_thread_registration_##x (void) \ +{ \ + vlib_thread_main_t * tm = &vlib_thread_main; \ + VLIB_REMOVE_FROM_LINKED_LIST (tm->next, &x, next); \ +} \ __VA_ARGS__ vlib_thread_registration_t x always_inline u32 @@ -357,18 +351,115 @@ vlib_worker_thread_barrier_check (void) { if (PREDICT_FALSE (*vlib_worker_threads->wait_at_barrier)) { - vlib_main_t *vm; - clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1); + vlib_global_main_t *vgm = vlib_get_global_main (); + vlib_main_t *vm = vlib_get_main (); + u32 thread_index = vm->thread_index; + f64 t = vlib_time_now (vm); + + if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0)) + clib_call_callbacks (vm->barrier_perf_callbacks, vm, + vm->clib_time.last_cpu_time, 0 /* enter */ ); + + if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled)) + { + vlib_worker_thread_t *w = vlib_worker_threads + thread_index; + /* *INDENT-OFF* */ + ELOG_TYPE_DECLARE (e) = { + .format = "barrier-wait-thread-%d", + .format_args = "i4", + }; + /* *INDENT-ON* */ + + struct + { + u32 thread_index; + } __clib_packed *ed; + + ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); + ed->thread_index = thread_index; + } + if (CLIB_DEBUG > 0) { vm = vlib_get_main (); vm->parked_at_barrier = 1; } + clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1); while (*vlib_worker_threads->wait_at_barrier) ; + + /* + * Recompute the offset from thread-0 time. + * Note that vlib_time_now adds vm->time_offset, so + * clear it first. Save the resulting idea of "now", to + * see how well we're doing. See show_clock_command_fn(...) + */ + { + f64 now; + vm->time_offset = 0.0; + now = vlib_time_now (vm); + vm->time_offset = vgm->vlib_mains[0]->time_last_barrier_release - now; + vm->time_last_barrier_release = vlib_time_now (vm); + } + if (CLIB_DEBUG > 0) vm->parked_at_barrier = 0; - clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1); + clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1); + + if (PREDICT_FALSE (*vlib_worker_threads->node_reforks_required)) + { + if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled)) + { + t = vlib_time_now (vm) - t; + vlib_worker_thread_t *w = vlib_worker_threads + thread_index; + /* *INDENT-OFF* */ + ELOG_TYPE_DECLARE (e) = { + .format = "barrier-refork-thread-%d", + .format_args = "i4", + }; + /* *INDENT-ON* */ + + struct + { + u32 thread_index; + } __clib_packed *ed; + + ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, + w->elog_track); + ed->thread_index = thread_index; + } + + vlib_worker_thread_node_refork (); + clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required, + -1); + while (*vlib_worker_threads->node_reforks_required) + ; + } + if (PREDICT_FALSE (vlib_worker_threads->barrier_elog_enabled)) + { + t = vlib_time_now (vm) - t; + vlib_worker_thread_t *w = vlib_worker_threads + thread_index; + /* *INDENT-OFF* */ + ELOG_TYPE_DECLARE (e) = { + .format = "barrier-released-thread-%d: %dus", + .format_args = "i4i4", + }; + /* *INDENT-ON* */ + + struct + { + u32 thread_index; + u32 duration; + } __clib_packed *ed; + + ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e, w->elog_track); + ed->thread_index = thread_index; + ed->duration = (int) (1000000.0 * t); + } + + if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0)) + clib_call_callbacks (vm->barrier_perf_callbacks, vm, + vm->clib_time.last_cpu_time, 1 /* leave */ ); } } @@ -378,103 +469,39 @@ vlib_get_worker_vlib_main (u32 worker_index) vlib_main_t *vm; vlib_thread_main_t *tm = &vlib_thread_main; ASSERT (worker_index < tm->n_vlib_mains - 1); - vm = vlib_mains[worker_index + 1]; + vm = vlib_get_main_by_index (worker_index + 1); ASSERT (vm); return vm; } -static inline void -vlib_put_frame_queue_elt (vlib_frame_queue_elt_t * hf) -{ - CLIB_MEMORY_BARRIER (); - hf->valid = 1; -} - -static inline vlib_frame_queue_elt_t * -vlib_get_frame_queue_elt (u32 frame_queue_index, u32 index) +static inline u8 +vlib_thread_is_main_w_barrier (void) { - vlib_frame_queue_t *fq; - vlib_frame_queue_elt_t *elt; - vlib_thread_main_t *tm = &vlib_thread_main; - vlib_frame_queue_main_t *fqm = - vec_elt_at_index (tm->frame_queue_mains, frame_queue_index); - u64 new_tail; - - fq = fqm->vlib_frame_queues[index]; - ASSERT (fq); - - new_tail = __sync_add_and_fetch (&fq->tail, 1); - - /* Wait until a ring slot is available */ - while (new_tail >= fq->head_hint + fq->nelts) - vlib_worker_thread_barrier_check (); - - elt = fq->elts + (new_tail & (fq->nelts - 1)); - - /* this would be very bad... */ - while (elt->valid) - ; - - elt->msg_type = VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME; - elt->last_n_vectors = elt->n_vectors = 0; - - return elt; -} - -static inline vlib_frame_queue_t * -is_vlib_frame_queue_congested (u32 frame_queue_index, - u32 index, - u32 queue_hi_thresh, - vlib_frame_queue_t ** - handoff_queue_by_worker_index) -{ - vlib_frame_queue_t *fq; - vlib_thread_main_t *tm = &vlib_thread_main; - vlib_frame_queue_main_t *fqm = - vec_elt_at_index (tm->frame_queue_mains, frame_queue_index); - - fq = handoff_queue_by_worker_index[index]; - if (fq != (vlib_frame_queue_t *) (~0)) - return fq; - - fq = fqm->vlib_frame_queues[index]; - ASSERT (fq); - - if (PREDICT_FALSE (fq->tail >= (fq->head_hint + queue_hi_thresh))) - { - /* a valid entry in the array will indicate the queue has reached - * the specified threshold and is congested - */ - handoff_queue_by_worker_index[index] = fq; - fq->enqueue_full_events++; - return fq; - } - - return NULL; -} - -static inline vlib_frame_queue_elt_t * -vlib_get_worker_handoff_queue_elt (u32 frame_queue_index, - u32 vlib_worker_index, - vlib_frame_queue_elt_t ** - handoff_queue_elt_by_worker_index) -{ - vlib_frame_queue_elt_t *elt; - - if (handoff_queue_elt_by_worker_index[vlib_worker_index]) - return handoff_queue_elt_by_worker_index[vlib_worker_index]; - - elt = vlib_get_frame_queue_elt (frame_queue_index, vlib_worker_index); - - handoff_queue_elt_by_worker_index[vlib_worker_index] = elt; - - return elt; + return (!vlib_num_workers () + || ((vlib_get_thread_index () == 0 + && vlib_worker_threads->wait_at_barrier[0]))); } u8 *vlib_thread_stack_init (uword thread_index); +extern void *rpc_call_main_thread_cb_fn; + +void +vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t * + args); +void vlib_rpc_call_main_thread (void *function, u8 * args, u32 size); +void vlib_get_thread_core_numa (vlib_worker_thread_t * w, unsigned cpu_id); +vlib_thread_main_t *vlib_get_thread_main_not_inline (void); -int vlib_thread_cb_register (struct vlib_main_t *vm, - vlib_thread_callbacks_t * cb); +/** + * Force workers sync from within worker + * + * Must be paired with @ref vlib_workers_continue + */ +void vlib_workers_sync (void); +/** + * Release barrier after workers sync + */ +void vlib_workers_continue (void); #endif /* included_vlib_threads_h */