#include <vlib/vlib.h>
#include <vlib/threads.h>
-#include <vlib/unix/cj.h>
#include <vlib/stat_weak_inlines.h>
-DECLARE_CJ_GLOBAL_LOG;
-
-
u32
vl (void *p)
{
}
avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0);
- /*
- * Determine if the number of workers is greater than 0.
- * If so, mark CPU 0 unavailable so workers will be numbered after main.
- */
- u32 n_workers = 0;
- uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers");
- if (p != 0)
- {
- vlib_thread_registration_t *tr = (vlib_thread_registration_t *) p[0];
- int worker_thread_count = tr->count;
- n_workers = worker_thread_count;
- }
- if (tm->skip_cores == 0 && n_workers)
- avail_cpu = clib_bitmap_set (avail_cpu, 0, 0);
-
/* assume that there is socket 0 only if there is no data from sysfs */
if (!tm->cpu_socket_bitmap)
tm->cpu_socket_bitmap = clib_bitmap_set (0, 0, 1);
{
for (j = 0; j < tr->count; j++)
{
+ /* Do not use CPU 0 by default - leave it to the host and IRQs */
+ uword avail_c0 = clib_bitmap_get (avail_cpu, 0);
+ avail_cpu = clib_bitmap_set (avail_cpu, 0, 0);
+
uword c = clib_bitmap_first_set (avail_cpu);
+ /* Use CPU 0 as a last resort */
+ if (c == ~0 && avail_c0)
+ {
+ c = 0;
+ avail_c0 = 0;
+ }
+
if (c == ~0)
return clib_error_return (0,
"no available cpus to be used for"
" the '%s' thread", tr->name);
+ avail_cpu = clib_bitmap_set (avail_cpu, 0, avail_c0);
avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
tr->coremask = clib_bitmap_set (tr->coremask, c, 1);
}
__os_thread_index = w - vlib_worker_threads;
+ vlib_process_start_switch_stack (vlib_mains[__os_thread_index], 0);
rv = (void *) clib_calljmp
((uword (*)(uword)) w->thread_function,
(uword) arg, w->thread_stack + VLIB_THREAD_STACK_SIZE);
clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
vm->elog_main.lock[0] = 0;
+ clib_callback_data_init (&vm->vlib_node_runtime_perf_callbacks,
+ &vm->worker_thread_main_loop_callback_lock);
+
if (n_vlib_mains > 1)
{
/* Replace hand-crafted length-1 vector with a real vector */
vm->barrier_no_close_before = 0;
worker_thread_index = 1;
+ clib_spinlock_init (&vm->worker_thread_main_loop_callback_lock);
for (i = 0; i < vec_len (tm->registrations); i++)
{
_vec_len (vm_clone->pending_rpc_requests) = 0;
clib_memset (&vm_clone->random_buffer, 0,
sizeof (vm_clone->random_buffer));
+ clib_spinlock_init
+ (&vm_clone->worker_thread_main_loop_callback_lock);
+ clib_callback_data_init
+ (&vm_clone->vlib_node_runtime_perf_callbacks,
+ &vm_clone->worker_thread_main_loop_callback_lock);
nm = &vlib_mains[0]->node_main;
nm_clone = &vm_clone->node_main;
return clib_error_return (0,
"corelist cannot be set for '%s' threads",
name);
+ if (tr->count)
+ return clib_error_return
+ (0, "core placement of '%s' threads is already configured",
+ name);
tr->coremask = bitmap;
tr->count = clib_bitmap_count_set_bits (tr->coremask);
return clib_error_return (0, "no such thread type 3 '%s'", name);
tr = (vlib_thread_registration_t *) p[0];
+
if (tr->fixed_count)
return clib_error_return
- (0, "number of %s threads not configurable", tr->name);
+ (0, "number of '%s' threads not configurable", name);
+ if (tr->count)
+ return clib_error_return
+ (0, "number of '%s' threads is already configured", name);
+
tr->count = count;
}
else
*vlib_worker_threads->wait_at_barrier = 0;
}
+/**
+ * Return true if the wroker thread barrier is held
+ */
+u8
+vlib_worker_thread_barrier_held (void)
+{
+ if (vec_len (vlib_mains) < 2)
+ return (1);
+
+ return (*vlib_worker_threads->wait_at_barrier == 1);
+}
+
void
vlib_worker_thread_barrier_sync_int (vlib_main_t * vm, const char *func_name)
{
return;
}
+ if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0))
+ clib_call_callbacks (vm->barrier_perf_callbacks, vm,
+ vm->clib_time.last_cpu_time, 0 /* enter */ );
+
/*
* Need data to decide if we're working hard enough to honor
* the barrier hold-down timer.
barrier_trace_release (t_entry, t_closed_total, t_update_main);
+ if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0))
+ clib_call_callbacks (vm->barrier_perf_callbacks, vm,
+ vm->clib_time.last_cpu_time, 1 /* leave */ );
+}
+
+/**
+ * Wait until each of the workers has been once around the track
+ */
+void
+vlib_worker_wait_one_loop (void)
+{
+ ASSERT (vlib_get_thread_index () == 0);
+
+ if (vec_len (vlib_mains) < 2)
+ return;
+
+ if (vlib_worker_thread_barrier_held ())
+ return;
+
+ u32 *counts = 0;
+ u32 ii;
+
+ vec_validate (counts, vec_len (vlib_mains) - 1);
+
+ /* record the current loop counts */
+ vec_foreach_index (ii, vlib_mains)
+ counts[ii] = vlib_mains[ii]->main_loop_count;
+
+ /* spin until each changes, apart from the main thread, or we'd be
+ * a while */
+ for (ii = 1; ii < vec_len (counts); ii++)
+ {
+ while (counts[ii] == vlib_mains[ii]->main_loop_count)
+ CLIB_PAUSE ();
+ }
+
+ vec_free (counts);
+ return;
}
/*
vlib_main_t *vm = vlib_get_main ();
clib_error_t *e;
+ vlib_process_finish_switch_stack (vm);
+
ASSERT (vm->thread_index == vlib_get_thread_index ());
vlib_worker_thread_init (w);
vlib_frame_queue_main_t *fqm;
vlib_frame_queue_t *fq;
int i;
+ u32 num_threads;
if (frame_queue_nelts == 0)
frame_queue_nelts = FRAME_QUEUE_MAX_NELTS;
- ASSERT (frame_queue_nelts >= 8);
+ num_threads = 1 /* main thread */ + tm->n_threads;
+ ASSERT (frame_queue_nelts >= 8 + num_threads);
vec_add2 (tm->frame_queue_mains, fqm, 1);
fqm->node_index = node_index;
fqm->frame_queue_nelts = frame_queue_nelts;
- fqm->queue_hi_thresh = frame_queue_nelts - 2;
+ fqm->queue_hi_thresh = frame_queue_nelts - num_threads;
vec_validate (fqm->vlib_frame_queues, tm->n_vlib_mains - 1);
vec_validate (fqm->per_thread_data, tm->n_vlib_mains - 1);