X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlib%2Fthreads.c;h=a7d9155cb58b0a0fc738c291249746227bc8743f;hb=18a4a371646bccfd299e6a509e801a524aeb4c92;hp=4e4d60a5e6235cfcdff84c027425352bf15d3e01;hpb=c602b384ac022f70690a3a7c711149f7cb63ad12;p=vpp.git diff --git a/src/vlib/threads.c b/src/vlib/threads.c index 4e4d60a5e62..a7d9155cb58 100644 --- a/src/vlib/threads.c +++ b/src/vlib/threads.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -27,7 +28,6 @@ DECLARE_CJ_GLOBAL_LOG; -#define FRAME_QUEUE_NELTS 64 u32 vl (void *p) @@ -45,28 +45,6 @@ vlib_thread_main_t vlib_thread_main; * imapacts observed timings. */ -u32 -elog_global_id_for_msg_name (const char *msg_name) -{ - uword *p, r; - static uword *h; - u8 *name_copy; - - if (!h) - h = hash_create_string (0, sizeof (uword)); - - p = hash_get_mem (h, msg_name); - if (p) - return p[0]; - r = elog_string (&vlib_global_main.elog_main, "%s", msg_name); - - name_copy = format (0, "%s%c", msg_name, 0); - - hash_set_mem (h, name_copy, r); - - return r; -} - static inline void barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed) { @@ -87,8 +65,8 @@ barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed) ed = ELOG_DATA (&vlib_global_main.elog_main, e); ed->count = (int) vlib_worker_threads[0].barrier_sync_count; - ed->caller = elog_global_id_for_msg_name - (vlib_worker_threads[0].barrier_caller); + ed->caller = elog_string (&vlib_global_main.elog_main, + (char *) vlib_worker_threads[0].barrier_caller); ed->t_entry = (int) (1000000.0 * t_entry); ed->t_open = (int) (1000000.0 * t_open); ed->t_closed = (int) (1000000.0 * t_closed); @@ -114,8 +92,8 @@ barrier_trace_sync_rec (f64 t_entry) ed = ELOG_DATA (&vlib_global_main.elog_main, e); ed->depth = (int) vlib_worker_threads[0].recursion_level - 1; - ed->caller = elog_global_id_for_msg_name - (vlib_worker_threads[0].barrier_caller); + ed->caller = elog_string (&vlib_global_main.elog_main, + (char *) vlib_worker_threads[0].barrier_caller); } static inline void @@ -278,6 +256,21 @@ vlib_thread_init (vlib_main_t * vm) } avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0); + /* + * Determine if the number of workers is greater than 0. + * If so, mark CPU 0 unavailable so workers will be numbered after main. + */ + u32 n_workers = 0; + uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers"); + if (p != 0) + { + vlib_thread_registration_t *tr = (vlib_thread_registration_t *) p[0]; + int worker_thread_count = tr->count; + n_workers = worker_thread_count; + } + if (tm->skip_cores == 0 && n_workers) + avail_cpu = clib_bitmap_set (avail_cpu, 0, 0); + /* assume that there is socket 0 only if there is no data from sysfs */ if (!tm->cpu_socket_bitmap) tm->cpu_socket_bitmap = clib_bitmap_set (0, 0, 1); @@ -306,6 +299,8 @@ vlib_thread_init (vlib_main_t * vm) w->thread_id = pthread_self (); tm->n_vlib_mains = 1; + vlib_get_thread_core_numa (w, w->cpu_id); + if (tm->sched_policy != ~0) { struct sched_param sched_param; @@ -600,24 +595,38 @@ vlib_worker_thread_bootstrap_fn (void *arg) return rv; } -static void -vlib_get_thread_core_socket (vlib_worker_thread_t * w, unsigned cpu_id) +void +vlib_get_thread_core_numa (vlib_worker_thread_t * w, unsigned cpu_id) { const char *sys_cpu_path = "/sys/devices/system/cpu/cpu"; + const char *sys_node_path = "/sys/devices/system/node/node"; + clib_bitmap_t *nbmp = 0, *cbmp = 0; + u32 node; u8 *p = 0; - int core_id = -1, socket_id = -1; + int core_id = -1, numa_id = -1; p = format (p, "%s%u/topology/core_id%c", sys_cpu_path, cpu_id, 0); clib_sysfs_read ((char *) p, "%d", &core_id); vec_reset_length (p); - p = - format (p, "%s%u/topology/physical_package_id%c", sys_cpu_path, cpu_id, - 0); - clib_sysfs_read ((char *) p, "%d", &socket_id); + + /* *INDENT-OFF* */ + clib_sysfs_read ("/sys/devices/system/node/online", "%U", + unformat_bitmap_list, &nbmp); + clib_bitmap_foreach (node, nbmp, ({ + p = format (p, "%s%u/cpulist%c", sys_node_path, node, 0); + clib_sysfs_read ((char *) p, "%U", unformat_bitmap_list, &cbmp); + if (clib_bitmap_get (cbmp, cpu_id)) + numa_id = node; + vec_reset_length (cbmp); + vec_reset_length (p); + })); + /* *INDENT-ON* */ + vec_free (nbmp); + vec_free (cbmp); vec_free (p); w->core_id = core_id; - w->socket_id = socket_id; + w->numa_id = numa_id; } static clib_error_t * @@ -625,9 +634,28 @@ vlib_launch_thread_int (void *fp, vlib_worker_thread_t * w, unsigned cpu_id) { vlib_thread_main_t *tm = &vlib_thread_main; void *(*fp_arg) (void *) = fp; + void *numa_heap; w->cpu_id = cpu_id; - vlib_get_thread_core_socket (w, cpu_id); + vlib_get_thread_core_numa (w, cpu_id); + + /* Set up NUMA-bound heap if indicated */ + if (clib_per_numa_mheaps[w->numa_id] == 0) + { + /* If the user requested a NUMA heap, create it... */ + if (tm->numa_heap_size) + { + numa_heap = clib_mem_init_thread_safe_numa + (0 /* DIY */ , tm->numa_heap_size, w->numa_id); + clib_per_numa_mheaps[w->numa_id] = numa_heap; + } + else + { + /* Or, use the main heap */ + clib_per_numa_mheaps[w->numa_id] = w->thread_mheap; + } + } + if (tm->cb.vlib_launch_thread_cb && !w->registration->use_pthreads) return tm->cb.vlib_launch_thread_cb (fp, (void *) w, cpu_id); else @@ -730,15 +758,8 @@ start_workers (vlib_main_t * vm) vec_add2 (vlib_worker_threads, w, 1); /* Currently unused, may not really work */ if (tr->mheap_size) - { -#if USE_DLMALLOC == 0 - w->thread_mheap = - mheap_alloc (0 /* use VM */ , tr->mheap_size); -#else - w->thread_mheap = create_mspace (tr->mheap_size, - 0 /* unlocked */ ); -#endif - } + w->thread_mheap = create_mspace (tr->mheap_size, + 0 /* unlocked */ ); else w->thread_mheap = main_heap; @@ -868,6 +889,7 @@ start_workers (vlib_main_t * vm) #ifdef VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES nm_clone->frame_size_hash = hash_create (0, sizeof (uword)); #endif + nm_clone->node_by_error = nm->node_by_error; /* Packet trace buffers are guaranteed to be empty, nothing to do here */ @@ -901,13 +923,8 @@ start_workers (vlib_main_t * vm) vec_add2 (vlib_worker_threads, w, 1); if (tr->mheap_size) { -#if USE_DLMALLOC == 0 - w->thread_mheap = - mheap_alloc (0 /* use VM */ , tr->mheap_size); -#else w->thread_mheap = create_mspace (tr->mheap_size, 0 /* locked */ ); -#endif } else w->thread_mheap = main_heap; @@ -1082,11 +1099,7 @@ vlib_worker_thread_node_refork (void) clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*new_n_clone)); for (j = 0; j < vec_len (nm->nodes); j++) { - vlib_node_t *old_n_clone; - vlib_node_t *new_n; - - new_n = nm->nodes[j]; - old_n_clone = old_nodes_clone[j]; + vlib_node_t *new_n = nm->nodes[j]; clib_memcpy_fast (new_n_clone, new_n, sizeof (*new_n)); /* none of the copied nodes have enqueue rights given out */ @@ -1102,6 +1115,7 @@ vlib_worker_thread_node_refork (void) } else { + vlib_node_t *old_n_clone = old_nodes_clone[j]; /* Copy stats if the old data is valid */ clib_memcpy_fast (&new_n_clone->stats_total, &old_n_clone->stats_total, @@ -1205,6 +1219,7 @@ vlib_worker_thread_node_refork (void) nm_clone->processes = vec_dup_aligned (nm->processes, CLIB_CACHE_LINE_BYTES); + nm_clone->node_by_error = nm->node_by_error; } void @@ -1266,6 +1281,9 @@ cpu_config (vlib_main_t * vm, unformat_input_t * input) ; else if (unformat (input, "skip-cores %u", &tm->skip_cores)) ; + else if (unformat (input, "numa-heap-size %U", + unformat_memory_size, &tm->numa_heap_size)) + ; else if (unformat (input, "coremask-%s %U", &name, unformat_bitmap_mask, &bitmap) || unformat (input, "corelist-%s %U", &name, @@ -1281,6 +1299,10 @@ cpu_config (vlib_main_t * vm, unformat_input_t * input) return clib_error_return (0, "corelist cannot be set for '%s' threads", name); + if (tr->count) + return clib_error_return + (0, "core placement of '%s' threads is already configured", + name); tr->coremask = bitmap; tr->count = clib_bitmap_count_set_bits (tr->coremask); @@ -1299,9 +1321,14 @@ cpu_config (vlib_main_t * vm, unformat_input_t * input) return clib_error_return (0, "no such thread type 3 '%s'", name); tr = (vlib_thread_registration_t *) p[0]; + if (tr->fixed_count) return clib_error_return - (0, "number of %s threads not configurable", tr->name); + (0, "number of '%s' threads not configurable", name); + if (tr->count) + return clib_error_return + (0, "number of '%s' threads is already configured", name); + tr->count = count; } else @@ -1453,7 +1480,7 @@ vlib_worker_thread_barrier_sync_int (vlib_main_t * vm, const char *func_name) for (i = 1; i < vec_len (vlib_mains); i++) max_vector_rate = clib_max (max_vector_rate, - vlib_last_vectors_per_main_loop_as_f64 (vlib_mains[i])); + (f64) vlib_last_vectors_per_main_loop (vlib_mains[i])); vlib_worker_threads[0].barrier_sync_count++; @@ -1674,6 +1701,7 @@ vlib_frame_queue_dequeue (vlib_main_t * vm, vlib_frame_queue_main_t * fqm) while (1) { + vlib_buffer_t *b; if (fq->head == fq->tail) { fq->head_hint = fq->head; @@ -1696,6 +1724,11 @@ vlib_frame_queue_dequeue (vlib_main_t * vm, vlib_frame_queue_main_t * fqm) f = vlib_get_frame_to_node (vm, fqm->node_index); + /* If the first vector is traced, set the frame trace flag */ + b = vlib_get_buffer (vm, from[0]); + if (b->flags & VLIB_BUFFER_IS_TRACED) + f->frame_flags |= VLIB_NODE_FLAG_TRACE; + to = vlib_frame_vector_args (f); n_left_to_node = elt->n_vectors; @@ -1786,7 +1819,7 @@ vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts) int i; if (frame_queue_nelts == 0) - frame_queue_nelts = FRAME_QUEUE_NELTS; + frame_queue_nelts = FRAME_QUEUE_MAX_NELTS; ASSERT (frame_queue_nelts >= 8); @@ -1866,11 +1899,17 @@ show_clock_command_fn (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) { int i; - f64 now; + int verbose = 0; + clib_timebase_t _tb, *tb = &_tb; - now = vlib_time_now (vm); + (void) unformat (input, "verbose %=", &verbose, 1); + + clib_timebase_init (tb, 0 /* GMT */ , CLIB_TIMEBASE_DAYLIGHT_NONE, + &vm->clib_time); - vlib_cli_output (vm, "Time now %.9f", now); + vlib_cli_output (vm, "%U, %U GMT", format_clib_time, &vm->clib_time, + verbose, format_clib_timebase_time, + clib_timebase_now (tb)); if (vec_len (vlib_mains) == 1) return 0; @@ -1882,6 +1921,10 @@ show_clock_command_fn (vlib_main_t * vm, { if (vlib_mains[i] == 0) continue; + + vlib_cli_output (vm, "%d: %U", i, format_clib_time, + &vlib_mains[i]->clib_time, verbose); + vlib_cli_output (vm, "Thread %d offset %.9f error %.9f", i, vlib_mains[i]->time_offset, vm->time_last_barrier_release -