X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlib%2Fthreads.c;h=6cd325b3e2ce6079db771a36270c8cd163107a8f;hb=8a19f12a0cfe6d611f6e266931af691fb69a74ad;hp=e3ea3c9cb478c69b9669ba7db3ee9f6988850d1e;hpb=bd69a5f24c6e83e9101f203dd124864fb2877a17;p=vpp.git diff --git a/src/vlib/threads.c b/src/vlib/threads.c index e3ea3c9cb47..6cd325b3e2c 100644 --- a/src/vlib/threads.c +++ b/src/vlib/threads.c @@ -36,30 +36,7 @@ vlib_worker_thread_t *vlib_worker_threads; vlib_thread_main_t vlib_thread_main; uword -os_get_cpu_number (void) -{ - void *sp; - uword n; - u32 len; - - len = vec_len (vlib_thread_stacks); - if (len == 0) - return 0; - - /* Get any old stack address. */ - sp = &sp; - - n = ((uword) sp - (uword) vlib_thread_stacks[0]) - >> VLIB_LOG2_THREAD_STACK_SIZE; - - /* "processes" have their own stacks, and they always run in thread 0 */ - n = n >= len ? 0 : n; - - return n; -} - -uword -os_get_ncpus (void) +os_get_nthreads (void) { u32 len; @@ -275,21 +252,6 @@ vlib_thread_init (vlib_main_t * vm) return 0; } -vlib_worker_thread_t * -vlib_alloc_thread (vlib_main_t * vm) -{ - vlib_worker_thread_t *w; - - if (vec_len (vlib_worker_threads) >= vec_len (vlib_thread_stacks)) - { - clib_warning ("out of worker threads... Quitting..."); - exit (1); - } - vec_add2 (vlib_worker_threads, w, 1); - w->thread_stack = vlib_thread_stacks[w - vlib_worker_threads]; - return w; -} - vlib_frame_queue_t * vlib_frame_queue_alloc (int nelts) { @@ -427,7 +389,7 @@ vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index, f64 b4 = vlib_time_now_ticks (vm, before); vlib_worker_thread_barrier_check (vm, b4); /* Bad idea. Dequeue -> enqueue -> dequeue -> trouble */ - // vlib_frame_queue_dequeue (vm->cpu_index, vm, nm); + // vlib_frame_queue_dequeue (vm->thread_index, vm, nm); } elt = fq->elts + (new_tail & (fq->nelts - 1)); @@ -497,6 +459,8 @@ vlib_worker_thread_bootstrap_fn (void *arg) w->lwp = syscall (SYS_gettid); w->thread_id = pthread_self (); + __os_thread_index = w - vlib_worker_threads; + rv = (void *) clib_calljmp ((uword (*)(uword)) w->thread_function, (uword) arg, w->thread_stack + VLIB_THREAD_STACK_SIZE); @@ -570,19 +534,30 @@ start_workers (vlib_main_t * vm) if (n_vlib_mains > 1) { - vec_validate (vlib_mains, tm->n_vlib_mains - 1); + /* Replace hand-crafted length-1 vector with a real vector */ + vlib_mains = 0; + + vec_validate_aligned (vlib_mains, tm->n_vlib_mains - 1, + CLIB_CACHE_LINE_BYTES); _vec_len (vlib_mains) = 0; - vec_add1 (vlib_mains, vm); + vec_add1_aligned (vlib_mains, vm, CLIB_CACHE_LINE_BYTES); vlib_worker_threads->wait_at_barrier = clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES); vlib_worker_threads->workers_at_barrier = clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES); + vlib_worker_threads->node_reforks_required = + clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES); + /* Ask for an initial barrier sync */ *vlib_worker_threads->workers_at_barrier = 0; *vlib_worker_threads->wait_at_barrier = 1; + /* Without update or refork */ + *vlib_worker_threads->node_reforks_required = 0; + vm->need_vlib_worker_thread_node_runtime_update = 0; + worker_thread_index = 1; for (i = 0; i < vec_len (tm->registrations); i++) @@ -600,13 +575,17 @@ start_workers (vlib_main_t * vm) for (k = 0; k < tr->count; k++) { + vlib_node_t *n; + vec_add2 (vlib_worker_threads, w, 1); if (tr->mheap_size) w->thread_mheap = mheap_alloc (0 /* use VM */ , tr->mheap_size); else w->thread_mheap = main_heap; - w->thread_stack = vlib_thread_stacks[w - vlib_worker_threads]; + + w->thread_stack = + vlib_thread_stack_init (w - vlib_worker_threads); w->thread_function = tr->function; w->thread_function_arg = w; w->instance_id = k; @@ -626,9 +605,11 @@ start_workers (vlib_main_t * vm) vm_clone = clib_mem_alloc (sizeof (*vm_clone)); clib_memcpy (vm_clone, vlib_mains[0], sizeof (*vm_clone)); - vm_clone->cpu_index = worker_thread_index; + vm_clone->thread_index = worker_thread_index; vm_clone->heap_base = w->thread_mheap; vm_clone->mbuf_alloc_list = 0; + vm_clone->init_functions_called = + hash_create (0, /* value bytes */ 0); memset (&vm_clone->random_buffer, 0, sizeof (vm_clone->random_buffer)); @@ -656,10 +637,12 @@ start_workers (vlib_main_t * vm) /* fork nodes */ nm_clone->nodes = 0; + + /* Allocate all nodes in single block for speed */ + n = clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*n)); + for (j = 0; j < vec_len (nm->nodes); j++) { - vlib_node_t *n; - n = clib_mem_alloc_no_fail (sizeof (*n)); clib_memcpy (n, nm->nodes[j], sizeof (*n)); /* none of the copied nodes have enqueue rights given out */ n->owner_node_index = VLIB_INVALID_NODE_INDEX; @@ -667,25 +650,45 @@ start_workers (vlib_main_t * vm) memset (&n->stats_last_clear, 0, sizeof (n->stats_last_clear)); vec_add1 (nm_clone->nodes, n); + n++; } nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] = vec_dup (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL]); + vec_foreach (rt, + nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL]) + { + vlib_node_t *n = vlib_get_node (vm, rt->node_index); + rt->thread_index = vm_clone->thread_index; + /* copy initial runtime_data from node */ + if (n->runtime_data && n->runtime_data_bytes > 0) + clib_memcpy (rt->runtime_data, n->runtime_data, + clib_min (VLIB_NODE_RUNTIME_DATA_SIZE, + n->runtime_data_bytes)); + } nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] = vec_dup (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT]); vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]) - rt->cpu_index = vm_clone->cpu_index; + { + vlib_node_t *n = vlib_get_node (vm, rt->node_index); + rt->thread_index = vm_clone->thread_index; + /* copy initial runtime_data from node */ + if (n->runtime_data && n->runtime_data_bytes > 0) + clib_memcpy (rt->runtime_data, n->runtime_data, + clib_min (VLIB_NODE_RUNTIME_DATA_SIZE, + n->runtime_data_bytes)); + } nm_clone->processes = vec_dup (nm->processes); /* zap the (per worker) frame freelists, etc */ nm_clone->frame_sizes = 0; - nm_clone->frame_size_hash = 0; + nm_clone->frame_size_hash = hash_create (0, sizeof (uword)); /* Packet trace buffers are guaranteed to be empty, nothing to do here */ clib_mem_set_heap (oldheap); - vec_add1 (vlib_mains, vm_clone); + vec_add1_aligned (vlib_mains, vm_clone, CLIB_CACHE_LINE_BYTES); vm_clone->error_main.counters = vec_dup (vlib_mains[0]->error_main.counters); @@ -732,7 +735,8 @@ start_workers (vlib_main_t * vm) mheap_alloc (0 /* use VM */ , tr->mheap_size); else w->thread_mheap = main_heap; - w->thread_stack = vlib_thread_stacks[w - vlib_worker_threads]; + w->thread_stack = + vlib_thread_stack_init (w - vlib_worker_threads); w->thread_function = tr->function; w->thread_function_arg = w; w->instance_id = j; @@ -786,32 +790,25 @@ start_workers (vlib_main_t * vm) VLIB_MAIN_LOOP_ENTER_FUNCTION (start_workers); -void -vlib_worker_thread_node_runtime_update (void) +static inline void +worker_thread_node_runtime_update_internal (void) { int i, j; - vlib_worker_thread_t *w; vlib_main_t *vm; vlib_node_main_t *nm, *nm_clone; - vlib_node_t **old_nodes_clone; vlib_main_t *vm_clone; - vlib_node_runtime_t *rt, *old_rt; - void *oldheap; + vlib_node_runtime_t *rt; never_inline void vlib_node_runtime_sync_stats (vlib_main_t * vm, vlib_node_runtime_t * r, uword n_calls, uword n_vectors, uword n_clocks); - ASSERT (os_get_cpu_number () == 0); - - if (vec_len (vlib_mains) == 0) - return; + ASSERT (vlib_get_thread_index () == 0); vm = vlib_mains[0]; nm = &vm->node_main; - ASSERT (os_get_cpu_number () == 0); ASSERT (*vlib_worker_threads->wait_at_barrier == 1); /* @@ -841,117 +838,170 @@ vlib_worker_thread_node_runtime_update (void) } } - for (i = 1; i < vec_len (vlib_mains); i++) - { - vlib_node_runtime_t *rt; - w = vlib_worker_threads + i; - oldheap = clib_mem_set_heap (w->thread_mheap); + /* Per-worker clone rebuilds are now done on each thread */ +} - vm_clone = vlib_mains[i]; - /* Re-clone error heap */ - u64 *old_counters = vm_clone->error_main.counters; - u64 *old_counters_all_clear = vm_clone->error_main.counters_last_clear; - clib_memcpy (&vm_clone->error_main, &vm->error_main, - sizeof (vm->error_main)); - j = vec_len (vm->error_main.counters) - 1; - vec_validate_aligned (old_counters, j, CLIB_CACHE_LINE_BYTES); - vec_validate_aligned (old_counters_all_clear, j, CLIB_CACHE_LINE_BYTES); - vm_clone->error_main.counters = old_counters; - vm_clone->error_main.counters_last_clear = old_counters_all_clear; +void +vlib_worker_thread_node_refork (void) +{ + vlib_main_t *vm, *vm_clone; + vlib_node_main_t *nm, *nm_clone; + vlib_node_t **old_nodes_clone; + vlib_node_runtime_t *rt, *old_rt; - nm_clone = &vm_clone->node_main; - vec_free (nm_clone->next_frames); - nm_clone->next_frames = vec_dup (nm->next_frames); + vlib_node_t *new_n_clone; - for (j = 0; j < vec_len (nm_clone->next_frames); j++) - { - vlib_next_frame_t *nf = &nm_clone->next_frames[j]; - u32 save_node_runtime_index; - u32 save_flags; - - save_node_runtime_index = nf->node_runtime_index; - save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH; - vlib_next_frame_init (nf); - nf->node_runtime_index = save_node_runtime_index; - nf->flags = save_flags; - } + int j; - old_nodes_clone = nm_clone->nodes; - nm_clone->nodes = 0; + vm = vlib_mains[0]; + nm = &vm->node_main; + vm_clone = vlib_get_main (); + nm_clone = &vm_clone->node_main; + + /* Re-clone error heap */ + u64 *old_counters = vm_clone->error_main.counters; + u64 *old_counters_all_clear = vm_clone->error_main.counters_last_clear; + + clib_memcpy (&vm_clone->error_main, &vm->error_main, + sizeof (vm->error_main)); + j = vec_len (vm->error_main.counters) - 1; + vec_validate_aligned (old_counters, j, CLIB_CACHE_LINE_BYTES); + vec_validate_aligned (old_counters_all_clear, j, CLIB_CACHE_LINE_BYTES); + vm_clone->error_main.counters = old_counters; + vm_clone->error_main.counters_last_clear = old_counters_all_clear; + + nm_clone = &vm_clone->node_main; + vec_free (nm_clone->next_frames); + nm_clone->next_frames = vec_dup (nm->next_frames); + + for (j = 0; j < vec_len (nm_clone->next_frames); j++) + { + vlib_next_frame_t *nf = &nm_clone->next_frames[j]; + u32 save_node_runtime_index; + u32 save_flags; + + save_node_runtime_index = nf->node_runtime_index; + save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH; + vlib_next_frame_init (nf); + nf->node_runtime_index = save_node_runtime_index; + nf->flags = save_flags; + } - /* re-fork nodes */ - for (j = 0; j < vec_len (nm->nodes); j++) - { - vlib_node_t *old_n_clone; - vlib_node_t *new_n, *new_n_clone; + old_nodes_clone = nm_clone->nodes; + nm_clone->nodes = 0; + + /* re-fork nodes */ + + /* Allocate all nodes in single block for speed */ + new_n_clone = + clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*new_n_clone)); + for (j = 0; j < vec_len (nm->nodes); j++) + { + vlib_node_t *old_n_clone; + vlib_node_t *new_n; - new_n = nm->nodes[j]; - old_n_clone = old_nodes_clone[j]; + new_n = nm->nodes[j]; + old_n_clone = old_nodes_clone[j]; - new_n_clone = clib_mem_alloc_no_fail (sizeof (*new_n_clone)); - clib_memcpy (new_n_clone, new_n, sizeof (*new_n)); - /* none of the copied nodes have enqueue rights given out */ - new_n_clone->owner_node_index = VLIB_INVALID_NODE_INDEX; + clib_memcpy (new_n_clone, new_n, sizeof (*new_n)); + /* none of the copied nodes have enqueue rights given out */ + new_n_clone->owner_node_index = VLIB_INVALID_NODE_INDEX; - if (j >= vec_len (old_nodes_clone)) - { - /* new node, set to zero */ - memset (&new_n_clone->stats_total, 0, - sizeof (new_n_clone->stats_total)); - memset (&new_n_clone->stats_last_clear, 0, - sizeof (new_n_clone->stats_last_clear)); - } - else - { - /* Copy stats if the old data is valid */ - clib_memcpy (&new_n_clone->stats_total, - &old_n_clone->stats_total, - sizeof (new_n_clone->stats_total)); - clib_memcpy (&new_n_clone->stats_last_clear, - &old_n_clone->stats_last_clear, - sizeof (new_n_clone->stats_last_clear)); - - /* keep previous node state */ - new_n_clone->state = old_n_clone->state; - } - vec_add1 (nm_clone->nodes, new_n_clone); + if (j >= vec_len (old_nodes_clone)) + { + /* new node, set to zero */ + memset (&new_n_clone->stats_total, 0, + sizeof (new_n_clone->stats_total)); + memset (&new_n_clone->stats_last_clear, 0, + sizeof (new_n_clone->stats_last_clear)); + } + else + { + /* Copy stats if the old data is valid */ + clib_memcpy (&new_n_clone->stats_total, + &old_n_clone->stats_total, + sizeof (new_n_clone->stats_total)); + clib_memcpy (&new_n_clone->stats_last_clear, + &old_n_clone->stats_last_clear, + sizeof (new_n_clone->stats_last_clear)); + + /* keep previous node state */ + new_n_clone->state = old_n_clone->state; } - /* Free the old node clone */ - for (j = 0; j < vec_len (old_nodes_clone); j++) - clib_mem_free (old_nodes_clone[j]); - vec_free (old_nodes_clone); + vec_add1 (nm_clone->nodes, new_n_clone); + new_n_clone++; + } + /* Free the old node clones */ + clib_mem_free (old_nodes_clone[0]); - vec_free (nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL]); + vec_free (old_nodes_clone); - nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] = - vec_dup (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL]); - /* clone input node runtime */ - old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]; + /* re-clone internal nodes */ + old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL]; + nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] = + vec_dup (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL]); - nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] = - vec_dup (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT]); + vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL]) + { + vlib_node_t *n = vlib_get_node (vm, rt->node_index); + rt->thread_index = vm_clone->thread_index; + /* copy runtime_data, will be overwritten later for existing rt */ + if (n->runtime_data && n->runtime_data_bytes > 0) + clib_memcpy (rt->runtime_data, n->runtime_data, + clib_min (VLIB_NODE_RUNTIME_DATA_SIZE, + n->runtime_data_bytes)); + } - vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]) - { - rt->cpu_index = vm_clone->cpu_index; - } + for (j = 0; j < vec_len (old_rt); j++) + { + rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index); + rt->state = old_rt[j].state; + clib_memcpy (rt->runtime_data, old_rt[j].runtime_data, + VLIB_NODE_RUNTIME_DATA_SIZE); + } - for (j = 0; j < vec_len (old_rt); j++) - { - rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index); - rt->state = old_rt[j].state; - } + vec_free (old_rt); + + /* re-clone input nodes */ + old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]; + nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] = + vec_dup (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT]); + + vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]) + { + vlib_node_t *n = vlib_get_node (vm, rt->node_index); + rt->thread_index = vm_clone->thread_index; + /* copy runtime_data, will be overwritten later for existing rt */ + if (n->runtime_data && n->runtime_data_bytes > 0) + clib_memcpy (rt->runtime_data, n->runtime_data, + clib_min (VLIB_NODE_RUNTIME_DATA_SIZE, + n->runtime_data_bytes)); + } + + for (j = 0; j < vec_len (old_rt); j++) + { + rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index); + rt->state = old_rt[j].state; + clib_memcpy (rt->runtime_data, old_rt[j].runtime_data, + VLIB_NODE_RUNTIME_DATA_SIZE); + } - vec_free (old_rt); + vec_free (old_rt); - nm_clone->processes = vec_dup (nm->processes); + nm_clone->processes = vec_dup (nm->processes); +} - clib_mem_set_heap (oldheap); - // vnet_main_fork_fixup (i); - } +void +vlib_worker_thread_node_runtime_update (void) +{ + /* + * Make a note that we need to do a node runtime update + * prior to releasing the barrier. + */ + vlib_global_main.need_vlib_worker_thread_node_runtime_update = 1; } u32 @@ -1097,7 +1147,7 @@ cpu_config (vlib_main_t * vm, unformat_input_t * input) VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu"); -#if !defined (__x86_64__) && !defined (__aarch64__) && !defined (__powerpc64__) && !defined(__arm__) +#if !defined (__x86_64__) && !defined (__i386__) && !defined (__aarch64__) && !defined (__powerpc64__) && !defined(__arm__) void __sync_fetch_and_add_8 (void) { @@ -1127,7 +1177,7 @@ vlib_worker_thread_fork_fixup (vlib_fork_fixup_t which) if (vlib_mains == 0) return; - ASSERT (os_get_cpu_number () == 0); + ASSERT (vlib_get_thread_index () == 0); vlib_worker_thread_barrier_sync (vm); switch (which) @@ -1148,9 +1198,11 @@ vlib_worker_thread_barrier_sync (vlib_main_t * vm) f64 deadline; u32 count; - if (!vlib_mains) + if (vec_len (vlib_mains) < 2) return; + ASSERT (vlib_get_thread_index () == 0); + count = vec_len (vlib_mains) - 1; /* Tolerate recursive calls */ @@ -1159,8 +1211,6 @@ vlib_worker_thread_barrier_sync (vlib_main_t * vm) vlib_worker_threads[0].barrier_sync_count++; - ASSERT (os_get_cpu_number () == 0); - deadline = vlib_time_now (vm) + BARRIER_SYNC_TIMEOUT; *vlib_worker_threads->wait_at_barrier = 1; @@ -1178,13 +1228,29 @@ void vlib_worker_thread_barrier_release (vlib_main_t * vm) { f64 deadline; + int refork_needed = 0; - if (!vlib_mains) + if (vec_len (vlib_mains) < 2) return; + ASSERT (vlib_get_thread_index () == 0); + if (--vlib_worker_threads[0].recursion_level > 0) return; + /* Update (all) node runtimes before releasing the barrier, if needed */ + if (vm->need_vlib_worker_thread_node_runtime_update) + { + /* Do stats elements on main thread */ + worker_thread_node_runtime_update_internal (); + vm->need_vlib_worker_thread_node_runtime_update = 0; + + /* Do per thread rebuilds in parallel */ + refork_needed = 1; + clib_smp_atomic_add (vlib_worker_threads->node_reforks_required, + (vec_len (vlib_mains) - 1)); + } + deadline = vlib_time_now (vm) + BARRIER_SYNC_TIMEOUT; *vlib_worker_threads->wait_at_barrier = 0; @@ -1197,6 +1263,22 @@ vlib_worker_thread_barrier_release (vlib_main_t * vm) os_panic (); } } + + /* Wait for reforks before continuing */ + if (refork_needed) + { + deadline = vlib_time_now (vm) + BARRIER_SYNC_TIMEOUT; + + while (*vlib_worker_threads->node_reforks_required > 0) + { + if (vlib_time_now (vm) > deadline) + { + fformat (stderr, "%s: worker thread refork deadlock\n", + __FUNCTION__); + os_panic (); + } + } + } } /* @@ -1204,11 +1286,10 @@ vlib_worker_thread_barrier_release (vlib_main_t * vm) * If so, pull the packets off the frames and put them to * the handoff node. */ -static inline int -vlib_frame_queue_dequeue_internal (vlib_main_t * vm, - vlib_frame_queue_main_t * fqm) +int +vlib_frame_queue_dequeue (vlib_main_t * vm, vlib_frame_queue_main_t * fqm) { - u32 thread_id = vm->cpu_index; + u32 thread_id = vm->thread_index; vlib_frame_queue_t *fq = fqm->vlib_frame_queues[thread_id]; vlib_frame_queue_elt_t *elt; u32 *from, *to; @@ -1333,83 +1414,15 @@ vlib_frame_queue_dequeue_internal (vlib_main_t * vm, return processed; } -static_always_inline void -vlib_worker_thread_internal (vlib_main_t * vm) -{ - vlib_node_main_t *nm = &vm->node_main; - vlib_thread_main_t *tm = vlib_get_thread_main (); - u64 cpu_time_now = clib_cpu_time_now (); - vlib_frame_queue_main_t *fqm; - - vec_alloc (nm->pending_interrupt_node_runtime_indices, 32); - - while (1) - { - vlib_worker_thread_barrier_check (); - - vec_foreach (fqm, tm->frame_queue_mains) - vlib_frame_queue_dequeue_internal (vm, fqm); - - vlib_node_runtime_t *n; - vec_foreach (n, nm->nodes_by_type[VLIB_NODE_TYPE_INPUT]) - { - cpu_time_now = dispatch_node (vm, n, VLIB_NODE_TYPE_INPUT, - VLIB_NODE_STATE_POLLING, /* frame */ 0, - cpu_time_now); - } - - /* Next handle interrupts. */ - { - uword l = _vec_len (nm->pending_interrupt_node_runtime_indices); - uword i; - if (l > 0) - { - _vec_len (nm->pending_interrupt_node_runtime_indices) = 0; - for (i = 0; i < l; i++) - { - n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT], - nm-> - pending_interrupt_node_runtime_indices - [i]); - cpu_time_now = - dispatch_node (vm, n, VLIB_NODE_TYPE_INPUT, - VLIB_NODE_STATE_INTERRUPT, - /* frame */ 0, - cpu_time_now); - } - } - } - - if (_vec_len (nm->pending_frames)) - { - int i; - cpu_time_now = clib_cpu_time_now (); - for (i = 0; i < _vec_len (nm->pending_frames); i++) - { - vlib_pending_frame_t *p; - - p = nm->pending_frames + i; - - cpu_time_now = dispatch_pending_node (vm, p, cpu_time_now); - } - _vec_len (nm->pending_frames) = 0; - } - vlib_increment_main_loop_counter (vm); - - /* Record time stamp in case there are no enabled nodes and above - calls do not update time stamp. */ - cpu_time_now = clib_cpu_time_now (); - } -} - void vlib_worker_thread_fn (void *arg) { vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg; vlib_thread_main_t *tm = vlib_get_thread_main (); vlib_main_t *vm = vlib_get_main (); + clib_error_t *e; - ASSERT (vm->cpu_index == os_get_cpu_number ()); + ASSERT (vm->thread_index == vlib_get_thread_index ()); vlib_worker_thread_init (w); clib_time_init (&vm->clib_time); @@ -1419,7 +1432,12 @@ vlib_worker_thread_fn (void *arg) while (tm->extern_thread_mgmt && tm->worker_thread_release == 0) vlib_worker_thread_barrier_check (); - vlib_worker_thread_internal (vm); + e = vlib_call_init_exit_functions + (vm, vm->worker_init_function_registrations, 1 /* call_once */ ); + if (e) + clib_error_report (e); + + vlib_worker_loop (vm); } /* *INDENT-OFF* */