vlib_worker_thread_t *vlib_worker_threads;
vlib_thread_main_t vlib_thread_main;
+/*
+ * Barrier tracing can be enabled on a normal build to collect information
+ * on barrier use, including timings and call stacks. Deliberately not
+ * keyed off CLIB_DEBUG, because that can add significant overhead which
+ * imapacts observed timings.
+ */
+
+#ifdef BARRIER_TRACING
+ /*
+ * Output of barrier tracing can be to syslog or elog as suits
+ */
+#ifdef BARRIER_TRACING_ELOG
+static u32
+elog_id_for_msg_name (const char *msg_name)
+{
+ uword *p, r;
+ static uword *h;
+ u8 *name_copy;
+
+ if (!h)
+ h = hash_create_string (0, sizeof (uword));
+
+ p = hash_get_mem (h, msg_name);
+ if (p)
+ return p[0];
+ r = elog_string (&vlib_global_main.elog_main, "%s", msg_name);
+
+ name_copy = format (0, "%s%c", msg_name, 0);
+
+ hash_set_mem (h, name_copy, r);
+
+ return r;
+}
+
+ /*
+ * elog Barrier trace functions, which are nulled out if BARRIER_TRACING isn't
+ * defined
+ */
+
+static inline void
+barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed)
+{
+ /* *INDENT-OFF* */
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "barrier <%d#%s(O:%dus:%dus)(%dus)",
+ .format_args = "i4T4i4i4i4",
+ };
+ /* *INDENT-ON* */
+ struct
+ {
+ u32 count, caller, t_entry, t_open, t_closed;
+ } *ed = 0;
+
+ ed = ELOG_DATA (&vlib_global_main.elog_main, e);
+ ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
+ ed->caller = elog_id_for_msg_name (vlib_worker_threads[0].barrier_caller);
+ ed->t_entry = (int) (1000000.0 * t_entry);
+ ed->t_open = (int) (1000000.0 * t_open);
+ ed->t_closed = (int) (1000000.0 * t_closed);
+}
+
+static inline void
+barrier_trace_sync_rec (f64 t_entry)
+{
+ /* *INDENT-OFF* */
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "barrier <%d(%dus)%s",
+ .format_args = "i4i4T4",
+ };
+ /* *INDENT-ON* */
+ struct
+ {
+ u32 depth, t_entry, caller;
+ } *ed = 0;
+
+ ed = ELOG_DATA (&vlib_global_main.elog_main, e);
+ ed->depth = (int) vlib_worker_threads[0].recursion_level - 1;
+ ed->t_entry = (int) (1000000.0 * t_entry);
+ ed->caller = elog_id_for_msg_name (vlib_worker_threads[0].barrier_caller);
+}
+
+static inline void
+barrier_trace_release_rec (f64 t_entry)
+{
+ /* *INDENT-OFF* */
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "barrier (%dus)%d>",
+ .format_args = "i4i4",
+ };
+ /* *INDENT-ON* */
+ struct
+ {
+ u32 t_entry, depth;
+ } *ed = 0;
+
+ ed = ELOG_DATA (&vlib_global_main.elog_main, e);
+ ed->t_entry = (int) (1000000.0 * t_entry);
+ ed->depth = (int) vlib_worker_threads[0].recursion_level;
+}
+
+static inline void
+barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main)
+{
+ /* *INDENT-OFF* */
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "barrier (%dus){%d}(C:%dus)#%d>",
+ .format_args = "i4i4i4i4",
+ };
+ /* *INDENT-ON* */
+ struct
+ {
+ u32 t_entry, t_update_main, t_closed_total, count;
+ } *ed = 0;
+
+ ed = ELOG_DATA (&vlib_global_main.elog_main, e);
+ ed->t_entry = (int) (1000000.0 * t_entry);
+ ed->t_update_main = (int) (1000000.0 * t_update_main);
+ ed->t_closed_total = (int) (1000000.0 * t_closed_total);
+ ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
+
+ /* Reset context for next trace */
+ vlib_worker_threads[0].barrier_context = NULL;
+}
+#else
+char barrier_trace[65536];
+char *btp = barrier_trace;
+
+ /*
+ * syslog Barrier trace functions, which are nulled out if BARRIER_TRACING
+ * isn't defined
+ */
+
+
+static inline void
+barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed)
+{
+ btp += sprintf (btp, "<%u#%s",
+ (unsigned int) vlib_worker_threads[0].barrier_sync_count,
+ vlib_worker_threads[0].barrier_caller);
+
+ if (vlib_worker_threads[0].barrier_context)
+ {
+ btp += sprintf (btp, "[%s]", vlib_worker_threads[0].barrier_context);
+
+ }
+
+ btp += sprintf (btp, "(O:%dus:%dus)(%dus):",
+ (int) (1000000.0 * t_entry),
+ (int) (1000000.0 * t_open), (int) (1000000.0 * t_closed));
+
+}
+
+static inline void
+barrier_trace_sync_rec (f64 t_entry)
+{
+ btp += sprintf (btp, "<%u(%dus)%s:",
+ (int) vlib_worker_threads[0].recursion_level - 1,
+ (int) (1000000.0 * t_entry),
+ vlib_worker_threads[0].barrier_caller);
+}
+
+static inline void
+barrier_trace_release_rec (f64 t_entry)
+{
+ btp += sprintf (btp, ":(%dus)%u>", (int) (1000000.0 * t_entry),
+ (int) vlib_worker_threads[0].recursion_level);
+}
+
+static inline void
+barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main)
+{
+
+ btp += sprintf (btp, ":(%dus)", (int) (1000000.0 * t_entry));
+ if (t_update_main > 0)
+ {
+ btp += sprintf (btp, "{%dus}", (int) (1000000.0 * t_update_main));
+ }
+
+ btp += sprintf (btp, "(C:%dus)#%u>",
+ (int) (1000000.0 * t_closed_total),
+ (int) vlib_worker_threads[0].barrier_sync_count);
+
+ /* Dump buffer to syslog, and reset for next trace */
+ fformat (stderr, "BTRC %s\n", barrier_trace);
+ btp = barrier_trace;
+ vlib_worker_threads[0].barrier_context = NULL;
+}
+#endif
+#else
+
+ /* Null functions for default case where barrier tracing isn't used */
+static inline void
+barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed)
+{
+}
+
+static inline void
+barrier_trace_sync_rec (f64 t_entry)
+{
+}
+
+static inline void
+barrier_trace_release_rec (f64 t_entry)
+{
+}
+
+static inline void
+barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main)
+{
+}
+#endif
+
uword
os_get_nthreads (void)
{
}
static uword *
-vlib_sysfs_list_to_bitmap (char *filename)
+clib_sysfs_list_to_bitmap (char *filename)
{
FILE *fp;
uword *r = 0;
/* get bitmaps of active cpu cores and sockets */
tm->cpu_core_bitmap =
- vlib_sysfs_list_to_bitmap ("/sys/devices/system/cpu/online");
+ clib_sysfs_list_to_bitmap ("/sys/devices/system/cpu/online");
tm->cpu_socket_bitmap =
- vlib_sysfs_list_to_bitmap ("/sys/devices/system/node/online");
+ clib_sysfs_list_to_bitmap ("/sys/devices/system/node/online");
avail_cpu = clib_bitmap_dup (tm->cpu_core_bitmap);
}
/* grab cpu for main thread */
- if (!tm->main_lcore)
+ if (tm->main_lcore == ~0)
{
- tm->main_lcore = clib_bitmap_first_set (avail_cpu);
+ /* if main-lcore is not set, we try to use lcore 1 */
+ if (clib_bitmap_get (avail_cpu, 1))
+ tm->main_lcore = 1;
+ else
+ tm->main_lcore = clib_bitmap_first_set (avail_cpu);
if (tm->main_lcore == (u8) ~ 0)
return clib_error_return (0, "no available cpus to be used for the"
" main thread");
u32 n_vlib_mains = tm->n_vlib_mains;
u32 worker_thread_index;
u8 *main_heap = clib_mem_get_per_cpu_heap ();
- mheap_t *main_heap_header = mheap_header (main_heap);
vec_reset_length (vlib_worker_threads);
vlib_set_thread_name ((char *) w->name);
}
- /*
- * Truth of the matter: we always use at least two
- * threads. So, make the main heap thread-safe
- * and make the event log thread-safe.
- */
- main_heap_header->flags |= MHEAP_FLAG_THREAD_SAFE;
vm->elog_main.lock =
clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
vm->elog_main.lock[0] = 0;
*vlib_worker_threads->node_reforks_required = 0;
vm->need_vlib_worker_thread_node_runtime_update = 0;
+ /* init timing */
+ vm->barrier_epoch = 0;
+ vm->barrier_no_close_before = 0;
+
worker_thread_index = 1;
for (i = 0; i < vec_len (tm->registrations); i++)
{
vlib_node_main_t *nm, *nm_clone;
- vlib_buffer_main_t *bm_clone;
vlib_buffer_free_list_t *fl_clone, *fl_orig;
vlib_buffer_free_list_t *orig_freelist_pool;
int k;
vlib_node_t *n;
vec_add2 (vlib_worker_threads, w, 1);
+ /* Currently unused, may not really work */
if (tr->mheap_size)
- w->thread_mheap =
- mheap_alloc (0 /* use VM */ , tr->mheap_size);
+ {
+#if USE_DLMALLOC == 0
+ w->thread_mheap =
+ mheap_alloc (0 /* use VM */ , tr->mheap_size);
+#else
+ w->thread_mheap = create_mspace (tr->mheap_size,
+ 0 /* unlocked */ );
+#endif
+ }
else
w->thread_mheap = main_heap;
/* Fork vlib_global_main et al. Look for bugs here */
oldheap = clib_mem_set_heap (w->thread_mheap);
- vm_clone = clib_mem_alloc (sizeof (*vm_clone));
+ vm_clone = clib_mem_alloc_aligned (sizeof (*vm_clone),
+ CLIB_CACHE_LINE_BYTES);
clib_memcpy (vm_clone, vlib_mains[0], sizeof (*vm_clone));
vm_clone->thread_index = worker_thread_index;
vm_clone->heap_base = w->thread_mheap;
- vm_clone->mbuf_alloc_list = 0;
+ vm_clone->heap_aligned_base = (void *)
+ (((uword) w->thread_mheap) & ~(VLIB_FRAME_ALIGN - 1));
vm_clone->init_functions_called =
hash_create (0, /* value bytes */ 0);
+ vm_clone->pending_rpc_requests = 0;
+ vec_validate (vm_clone->pending_rpc_requests, 0);
+ _vec_len (vm_clone->pending_rpc_requests) = 0;
memset (&vm_clone->random_buffer, 0,
sizeof (vm_clone->random_buffer));
nm = &vlib_mains[0]->node_main;
nm_clone = &vm_clone->node_main;
/* fork next frames array, preserving node runtime indices */
- nm_clone->next_frames = vec_dup (nm->next_frames);
+ nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
+ CLIB_CACHE_LINE_BYTES);
for (j = 0; j < vec_len (nm_clone->next_frames); j++)
{
vlib_next_frame_t *nf = &nm_clone->next_frames[j];
n++;
}
nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
- vec_dup (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL]);
+ vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
+ CLIB_CACHE_LINE_BYTES);
vec_foreach (rt,
nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
{
}
nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
- vec_dup (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT]);
+ vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
+ CLIB_CACHE_LINE_BYTES);
vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
{
vlib_node_t *n = vlib_get_node (vm, rt->node_index);
n->runtime_data_bytes));
}
- nm_clone->processes = vec_dup (nm->processes);
+ nm_clone->processes = vec_dup_aligned (nm->processes,
+ CLIB_CACHE_LINE_BYTES);
/* zap the (per worker) frame freelists, etc */
nm_clone->frame_sizes = 0;
clib_mem_set_heap (oldheap);
vec_add1_aligned (vlib_mains, vm_clone, CLIB_CACHE_LINE_BYTES);
- vm_clone->error_main.counters =
- vec_dup (vlib_mains[0]->error_main.counters);
- vm_clone->error_main.counters_last_clear =
- vec_dup (vlib_mains[0]->error_main.counters_last_clear);
+ vm_clone->error_main.counters = vec_dup_aligned
+ (vlib_mains[0]->error_main.counters, CLIB_CACHE_LINE_BYTES);
+ vm_clone->error_main.counters_last_clear = vec_dup_aligned
+ (vlib_mains[0]->error_main.counters_last_clear,
+ CLIB_CACHE_LINE_BYTES);
/* Fork the vlib_buffer_main_t free lists, etc. */
- bm_clone = vec_dup (vm_clone->buffer_main);
- vm_clone->buffer_main = bm_clone;
-
- orig_freelist_pool = bm_clone->buffer_free_list_pool;
- bm_clone->buffer_free_list_pool = 0;
+ orig_freelist_pool = vm_clone->buffer_free_list_pool;
+ vm_clone->buffer_free_list_pool = 0;
/* *INDENT-OFF* */
pool_foreach (fl_orig, orig_freelist_pool,
({
- pool_get_aligned (bm_clone->buffer_free_list_pool,
+ pool_get_aligned (vm_clone->buffer_free_list_pool,
fl_clone, CLIB_CACHE_LINE_BYTES);
ASSERT (fl_orig - orig_freelist_pool
- == fl_clone - bm_clone->buffer_free_list_pool);
+ == fl_clone - vm_clone->buffer_free_list_pool);
fl_clone[0] = fl_orig[0];
fl_clone->buffers = 0;
{
vec_add2 (vlib_worker_threads, w, 1);
if (tr->mheap_size)
- w->thread_mheap =
- mheap_alloc (0 /* use VM */ , tr->mheap_size);
+ {
+#if USE_DLMALLOC == 0
+ w->thread_mheap =
+ mheap_alloc (0 /* use VM */ , tr->mheap_size);
+#else
+ w->thread_mheap =
+ create_mspace (tr->mheap_size, 0 /* locked */ );
+#endif
+ }
else
w->thread_mheap = main_heap;
w->thread_stack =
VLIB_MAIN_LOOP_ENTER_FUNCTION (start_workers);
+
static inline void
worker_thread_node_runtime_update_internal (void)
{
nm_clone = &vm_clone->node_main;
vec_free (nm_clone->next_frames);
- nm_clone->next_frames = vec_dup (nm->next_frames);
+ nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
+ CLIB_CACHE_LINE_BYTES);
for (j = 0; j < vec_len (nm_clone->next_frames); j++)
{
/* re-clone internal nodes */
old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL];
nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
- vec_dup (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL]);
+ vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
+ CLIB_CACHE_LINE_BYTES);
vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
{
/* re-clone input nodes */
old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT];
nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
- vec_dup (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT]);
+ vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
+ CLIB_CACHE_LINE_BYTES);
vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
{
vec_free (old_rt);
- nm_clone->processes = vec_dup (nm->processes);
+ nm_clone->processes = vec_dup_aligned (nm->processes,
+ CLIB_CACHE_LINE_BYTES);
}
-
void
vlib_worker_thread_node_runtime_update (void)
{
tm->n_thread_stacks = 1; /* account for main thread */
tm->sched_policy = ~0;
tm->sched_priority = ~0;
+ tm->main_lcore = ~0;
tr = tm->next;
vlib_worker_thread_barrier_release (vm);
}
+ /*
+ * Enforce minimum open time to minimize packet loss due to Rx overflow,
+ * based on a test based heuristic that barrier should be open for at least
+ * 3 time as long as it is closed (with an upper bound of 1ms because by that
+ * point it is probably too late to make a difference)
+ */
+
+#ifndef BARRIER_MINIMUM_OPEN_LIMIT
+#define BARRIER_MINIMUM_OPEN_LIMIT 0.001
+#endif
+
+#ifndef BARRIER_MINIMUM_OPEN_FACTOR
+#define BARRIER_MINIMUM_OPEN_FACTOR 3
+#endif
+
void
-vlib_worker_thread_barrier_sync (vlib_main_t * vm)
+vlib_worker_thread_barrier_sync_int (vlib_main_t * vm)
{
f64 deadline;
+ f64 now;
+ f64 t_entry;
+ f64 t_open;
+ f64 t_closed;
u32 count;
if (vec_len (vlib_mains) < 2)
count = vec_len (vlib_mains) - 1;
+ /* Record entry relative to last close */
+ now = vlib_time_now (vm);
+ t_entry = now - vm->barrier_epoch;
+
/* Tolerate recursive calls */
if (++vlib_worker_threads[0].recursion_level > 1)
- return;
+ {
+ barrier_trace_sync_rec (t_entry);
+ return;
+ }
vlib_worker_threads[0].barrier_sync_count++;
- deadline = vlib_time_now (vm) + BARRIER_SYNC_TIMEOUT;
+ /* Enforce minimum barrier open time to minimize packet loss */
+ ASSERT (vm->barrier_no_close_before <= (now + BARRIER_MINIMUM_OPEN_LIMIT));
+ while ((now = vlib_time_now (vm)) < vm->barrier_no_close_before)
+ ;
+
+ /* Record time of closure */
+ t_open = now - vm->barrier_epoch;
+ vm->barrier_epoch = now;
+
+ deadline = now + BARRIER_SYNC_TIMEOUT;
*vlib_worker_threads->wait_at_barrier = 1;
while (*vlib_worker_threads->workers_at_barrier != count)
{
- if (vlib_time_now (vm) > deadline)
+ if ((now = vlib_time_now (vm)) > deadline)
{
fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
os_panic ();
}
}
+
+ t_closed = now - vm->barrier_epoch;
+
+ barrier_trace_sync (t_entry, t_open, t_closed);
+
+}
+
+void vlib_stat_segment_lock (void) __attribute__ ((weak));
+void
+vlib_stat_segment_lock (void)
+{
+}
+
+void vlib_stat_segment_unlock (void) __attribute__ ((weak));
+void
+vlib_stat_segment_unlock (void)
+{
}
void
vlib_worker_thread_barrier_release (vlib_main_t * vm)
{
f64 deadline;
+ f64 now;
+ f64 minimum_open;
+ f64 t_entry;
+ f64 t_closed_total;
+ f64 t_update_main = 0.0;
int refork_needed = 0;
if (vec_len (vlib_mains) < 2)
ASSERT (vlib_get_thread_index () == 0);
+
+ now = vlib_time_now (vm);
+ t_entry = now - vm->barrier_epoch;
+
if (--vlib_worker_threads[0].recursion_level > 0)
- return;
+ {
+ barrier_trace_release_rec (t_entry);
+ return;
+ }
/* Update (all) node runtimes before releasing the barrier, if needed */
if (vm->need_vlib_worker_thread_node_runtime_update)
{
+ /*
+ * Lock stat segment here, so we's safe when
+ * rebuilding the stat segment node clones from the
+ * stat thread...
+ */
+ vlib_stat_segment_lock ();
+
/* Do stats elements on main thread */
worker_thread_node_runtime_update_internal ();
vm->need_vlib_worker_thread_node_runtime_update = 0;
refork_needed = 1;
clib_smp_atomic_add (vlib_worker_threads->node_reforks_required,
(vec_len (vlib_mains) - 1));
+ now = vlib_time_now (vm);
+ t_update_main = now - vm->barrier_epoch;
}
- deadline = vlib_time_now (vm) + BARRIER_SYNC_TIMEOUT;
+ deadline = now + BARRIER_SYNC_TIMEOUT;
*vlib_worker_threads->wait_at_barrier = 0;
while (*vlib_worker_threads->workers_at_barrier > 0)
{
- if (vlib_time_now (vm) > deadline)
+ if ((now = vlib_time_now (vm)) > deadline)
{
fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
os_panic ();
/* Wait for reforks before continuing */
if (refork_needed)
{
- deadline = vlib_time_now (vm) + BARRIER_SYNC_TIMEOUT;
+ now = vlib_time_now (vm);
+
+ deadline = now + BARRIER_SYNC_TIMEOUT;
while (*vlib_worker_threads->node_reforks_required > 0)
{
- if (vlib_time_now (vm) > deadline)
+ if ((now = vlib_time_now (vm)) > deadline)
{
fformat (stderr, "%s: worker thread refork deadlock\n",
__FUNCTION__);
os_panic ();
}
}
+ vlib_stat_segment_unlock ();
}
+
+ t_closed_total = now - vm->barrier_epoch;
+
+ minimum_open = t_closed_total * BARRIER_MINIMUM_OPEN_FACTOR;
+
+ if (minimum_open > BARRIER_MINIMUM_OPEN_LIMIT)
+ {
+ minimum_open = BARRIER_MINIMUM_OPEN_LIMIT;
+ }
+
+ vm->barrier_no_close_before = now + minimum_open;
+
+ /* Record barrier epoch (used to enforce minimum open time) */
+ vm->barrier_epoch = now;
+
+ barrier_trace_release (t_entry, t_closed_total, t_update_main);
+
}
/*
return (fqm - tm->frame_queue_mains);
}
-
int
vlib_thread_cb_register (struct vlib_main_t *vm, vlib_thread_callbacks_t * cb)
{
return 0;
}
+void
+vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
+ args)
+{
+ ASSERT (vlib_get_thread_index () == 0);
+ vlib_process_signal_event (vlib_get_main (), args->node_index,
+ args->type_opaque, args->data);
+}
+
+void *rpc_call_main_thread_cb_fn;
+
+void
+vlib_rpc_call_main_thread (void *callback, u8 * args, u32 arg_size)
+{
+ if (rpc_call_main_thread_cb_fn)
+ {
+ void (*fp) (void *, u8 *, u32) = rpc_call_main_thread_cb_fn;
+ (*fp) (callback, args, arg_size);
+ }
+ else
+ clib_warning ("BUG: rpc_call_main_thread_cb_fn NULL!");
+}
+
clib_error_t *
threads_init (vlib_main_t * vm)
{