}
/* grab cpu for main thread */
- if (!tm->main_lcore)
+ if (tm->main_lcore == ~0)
{
- tm->main_lcore = clib_bitmap_first_set (avail_cpu);
+ /* if main-lcore is not set, we try to use lcore 1 */
+ if (clib_bitmap_get (avail_cpu, 1))
+ tm->main_lcore = 1;
+ else
+ tm->main_lcore = clib_bitmap_first_set (avail_cpu);
if (tm->main_lcore == (u8) ~ 0)
return clib_error_return (0, "no available cpus to be used for the"
" main thread");
u32 n_vlib_mains = tm->n_vlib_mains;
u32 worker_thread_index;
u8 *main_heap = clib_mem_get_per_cpu_heap ();
- mheap_t *main_heap_header = mheap_header (main_heap);
vec_reset_length (vlib_worker_threads);
vlib_set_thread_name ((char *) w->name);
}
- /*
- * Truth of the matter: we always use at least two
- * threads. So, make the main heap thread-safe
- * and make the event log thread-safe.
- */
- main_heap_header->flags |= MHEAP_FLAG_THREAD_SAFE;
vm->elog_main.lock =
clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
vm->elog_main.lock[0] = 0;
vlib_node_t *n;
vec_add2 (vlib_worker_threads, w, 1);
+ /* Currently unused, may not really work */
if (tr->mheap_size)
- w->thread_mheap =
- mheap_alloc (0 /* use VM */ , tr->mheap_size);
+ {
+#if USE_DLMALLOC == 0
+ w->thread_mheap =
+ mheap_alloc (0 /* use VM */ , tr->mheap_size);
+#else
+ w->thread_mheap = create_mspace (tr->mheap_size,
+ 0 /* unlocked */ );
+#endif
+ }
else
w->thread_mheap = main_heap;
vm_clone->thread_index = worker_thread_index;
vm_clone->heap_base = w->thread_mheap;
- vm_clone->mbuf_alloc_list = 0;
+ vm_clone->heap_aligned_base = (void *)
+ (((uword) w->thread_mheap) & ~(VLIB_FRAME_ALIGN - 1));
vm_clone->init_functions_called =
hash_create (0, /* value bytes */ 0);
vm_clone->pending_rpc_requests = 0;
{
vec_add2 (vlib_worker_threads, w, 1);
if (tr->mheap_size)
- w->thread_mheap =
- mheap_alloc (0 /* use VM */ , tr->mheap_size);
+ {
+#if USE_DLMALLOC == 0
+ w->thread_mheap =
+ mheap_alloc (0 /* use VM */ , tr->mheap_size);
+#else
+ w->thread_mheap =
+ create_mspace (tr->mheap_size, 0 /* locked */ );
+#endif
+ }
else
w->thread_mheap = main_heap;
w->thread_stack =
tm->n_thread_stacks = 1; /* account for main thread */
tm->sched_policy = ~0;
tm->sched_priority = ~0;
+ tm->main_lcore = ~0;
tr = tm->next;
}
+void vlib_stat_segment_lock (void) __attribute__ ((weak));
+void
+vlib_stat_segment_lock (void)
+{
+}
+
+void vlib_stat_segment_unlock (void) __attribute__ ((weak));
+void
+vlib_stat_segment_unlock (void)
+{
+}
+
void
vlib_worker_thread_barrier_release (vlib_main_t * vm)
{
/* Update (all) node runtimes before releasing the barrier, if needed */
if (vm->need_vlib_worker_thread_node_runtime_update)
{
+ /*
+ * Lock stat segment here, so we's safe when
+ * rebuilding the stat segment node clones from the
+ * stat thread...
+ */
+ vlib_stat_segment_lock ();
+
/* Do stats elements on main thread */
worker_thread_node_runtime_update_internal ();
vm->need_vlib_worker_thread_node_runtime_update = 0;
os_panic ();
}
}
+ vlib_stat_segment_unlock ();
}
t_closed_total = now - vm->barrier_epoch;