2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
19 #include <vppinfra/format.h>
20 #include <vppinfra/time_range.h>
21 #include <vppinfra/interrupt.h>
22 #include <vppinfra/bitmap.h>
23 #include <vppinfra/unix.h>
24 #include <vlib/vlib.h>
26 #include <vlib/threads.h>
28 #include <vlib/stats/stats.h>
36 vlib_worker_thread_t *vlib_worker_threads;
37 vlib_thread_main_t vlib_thread_main;
40 * Barrier tracing can be enabled on a normal build to collect information
41 * on barrier use, including timings and call stacks. Deliberately not
42 * keyed off CLIB_DEBUG, because that can add significant overhead which
43 * imapacts observed timings.
47 barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed)
49 if (!vlib_worker_threads->barrier_elog_enabled)
52 ELOG_TYPE_DECLARE (e) = {
53 .format = "bar-trace-%s-#%d",
54 .format_args = "T4i4",
59 u32 caller, count, t_entry, t_open, t_closed;
62 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
63 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
64 ed->caller = elog_string (&vlib_global_main.elog_main,
65 (char *) vlib_worker_threads[0].barrier_caller);
66 ed->t_entry = (int) (1000000.0 * t_entry);
67 ed->t_open = (int) (1000000.0 * t_open);
68 ed->t_closed = (int) (1000000.0 * t_closed);
72 barrier_trace_sync_rec (f64 t_entry)
74 if (!vlib_worker_threads->barrier_elog_enabled)
77 ELOG_TYPE_DECLARE (e) = {
78 .format = "bar-syncrec-%s-#%d",
79 .format_args = "T4i4",
87 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
88 ed->depth = (int) vlib_worker_threads[0].recursion_level - 1;
89 ed->caller = elog_string (&vlib_global_main.elog_main,
90 (char *) vlib_worker_threads[0].barrier_caller);
94 barrier_trace_release_rec (f64 t_entry)
96 if (!vlib_worker_threads->barrier_elog_enabled)
99 ELOG_TYPE_DECLARE (e) = {
100 .format = "bar-relrrec-#%d",
109 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
110 ed->depth = (int) vlib_worker_threads[0].recursion_level;
114 barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main)
116 if (!vlib_worker_threads->barrier_elog_enabled)
119 ELOG_TYPE_DECLARE (e) = {
120 .format = "bar-rel-#%d-e%d-u%d-t%d",
121 .format_args = "i4i4i4i4",
126 u32 count, t_entry, t_update_main, t_closed_total;
129 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
130 ed->t_entry = (int) (1000000.0 * t_entry);
131 ed->t_update_main = (int) (1000000.0 * t_update_main);
132 ed->t_closed_total = (int) (1000000.0 * t_closed_total);
133 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
135 /* Reset context for next trace */
136 vlib_worker_threads[0].barrier_context = NULL;
140 os_get_nthreads (void)
142 return vec_len (vlib_thread_stacks);
146 vlib_set_thread_name (char *name)
148 int pthread_setname_np (pthread_t __target_thread, const char *__name);
150 pthread_t thread = pthread_self ();
154 rv = pthread_setname_np (thread, name);
156 clib_warning ("pthread_setname_np returned %d", rv);
161 sort_registrations_by_no_clone (void *a0, void *a1)
163 vlib_thread_registration_t **tr0 = a0;
164 vlib_thread_registration_t **tr1 = a1;
166 return ((i32) ((*tr0)->no_data_structure_clone)
167 - ((i32) ((*tr1)->no_data_structure_clone)));
171 /* Called early in the init sequence */
174 vlib_thread_init (vlib_main_t * vm)
176 vlib_thread_main_t *tm = &vlib_thread_main;
177 vlib_worker_thread_t *w;
178 vlib_thread_registration_t *tr;
179 u32 n_vlib_mains = 1;
183 uword *avail_cpu, *affinity_cpu;
185 u32 stats_num_worker_threads_dir_index;
187 stats_num_worker_threads_dir_index =
188 vlib_stats_add_gauge ("/sys/num_worker_threads");
189 ASSERT (stats_num_worker_threads_dir_index != ~0);
191 /* get bitmaps of active cpu cores and sockets */
192 tm->cpu_core_bitmap = os_get_online_cpu_core_bitmap ();
193 tm->cpu_socket_bitmap = os_get_online_cpu_node_bitmap ();
195 /* get bitmap of active cpu cores vpp has affinity to */
197 tm->cpu_affinity_bitmap = os_get_cpu_affinity_bitmap (pid);
199 /* if fetching affinity fails, return online cpu core bmp */
200 if (tm->cpu_affinity_bitmap == 0)
201 tm->cpu_affinity_bitmap = os_get_online_cpu_core_bitmap ();
203 avail_cpu = clib_bitmap_dup (tm->cpu_core_bitmap);
204 affinity_cpu = clib_bitmap_dup (tm->cpu_affinity_bitmap);
207 n_cpus = clib_bitmap_count_set_bits (avail_cpu);
208 if (tm->skip_cores >= n_cpus)
209 return clib_error_return (0, "skip-core greater than available cpus");
210 n_cpus = clib_bitmap_count_set_bits (affinity_cpu);
211 if (tm->skip_cores >= n_cpus)
212 return clib_error_return (0, "skip-core greater than affinity cpus");
214 for (i = 0; i < tm->skip_cores; i++)
217 c = clib_bitmap_first_set (avail_cpu);
219 return clib_error_return (0, "no available cpus to skip");
221 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
223 c = clib_bitmap_first_set (affinity_cpu);
225 return clib_error_return (0, "no available env cpus to skip");
227 affinity_cpu = clib_bitmap_set (affinity_cpu, c, 0);
230 /* grab cpu for main thread */
231 if (tm->main_lcore != ~0)
233 if (clib_bitmap_get (avail_cpu, tm->main_lcore) == 0)
234 return clib_error_return (0, "cpu %u is not available to be used"
235 " for the main thread", tm->main_lcore);
236 avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0);
237 affinity_cpu = clib_bitmap_set (affinity_cpu, tm->main_lcore, 0);
239 /* if auto enabled, grab first cpu vpp has affinity to for main thread */
240 else if (tm->use_main_core_auto)
242 uword c = clib_bitmap_first_set (affinity_cpu);
246 avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0);
247 affinity_cpu = clib_bitmap_set (affinity_cpu, tm->main_lcore, 0);
250 /* assume that there is socket 0 only if there is no data from sysfs */
251 if (!tm->cpu_socket_bitmap)
252 tm->cpu_socket_bitmap = clib_bitmap_set (0, 0, 1);
254 /* pin main thread to main_lcore */
255 if (tm->main_lcore != ~0)
259 CPU_SET (tm->main_lcore, &cpuset);
260 if (pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t),
263 return clib_error_return (0, "could not pin main thread to cpu %u",
268 /* Set up thread 0 */
269 vec_validate_aligned (vlib_worker_threads, 0, CLIB_CACHE_LINE_BYTES);
270 vec_set_len (vlib_worker_threads, 1);
271 w = vlib_worker_threads;
272 w->thread_mheap = clib_mem_get_heap ();
273 w->thread_stack = vlib_thread_stacks[0];
274 w->cpu_id = tm->main_lcore;
275 w->lwp = syscall (SYS_gettid);
276 w->thread_id = pthread_self ();
277 tm->n_vlib_mains = 1;
279 vlib_get_thread_core_numa (w, w->cpu_id);
281 if (tm->sched_policy != ~0)
283 struct sched_param sched_param;
284 if (!sched_getparam (w->lwp, &sched_param))
286 if (tm->sched_priority != ~0)
287 sched_param.sched_priority = tm->sched_priority;
288 sched_setscheduler (w->lwp, tm->sched_policy, &sched_param);
292 /* assign threads to cores and set n_vlib_mains */
297 vec_add1 (tm->registrations, tr);
301 vec_sort_with_function (tm->registrations, sort_registrations_by_no_clone);
303 for (i = 0; i < vec_len (tm->registrations); i++)
306 tr = tm->registrations[i];
307 tr->first_index = first_index;
308 first_index += tr->count;
309 n_vlib_mains += (tr->no_data_structure_clone == 0) ? tr->count : 0;
311 /* construct coremask */
312 if (tr->use_pthreads || !tr->count)
318 clib_bitmap_foreach (c, tr->coremask) {
319 if (clib_bitmap_get(avail_cpu, c) == 0)
320 return clib_error_return (0, "cpu %u is not available to be used"
321 " for the '%s' thread",c, tr->name);
323 avail_cpu = clib_bitmap_set(avail_cpu, c, 0);
328 /* for automatic pinning, use cpu affinity list */
330 n_env_cpu = clib_bitmap_count_set_bits (affinity_cpu);
332 if (n_env_cpu < tr->count)
333 return clib_error_return (0,
334 "no available cpus to be used for"
335 " the '%s' thread #%u",
336 tr->name, n_env_cpu);
338 for (j = 0; j < tr->count; j++)
340 /* Do not use CPU 0 by default - leave it to the host and IRQs */
341 uword avail_c0 = clib_bitmap_get (affinity_cpu, 0);
342 affinity_cpu = clib_bitmap_set (affinity_cpu, 0, 0);
344 uword c = clib_bitmap_first_set (affinity_cpu);
345 /* Use CPU 0 as a last resort */
346 if (c == ~0 && avail_c0)
353 return clib_error_return (0,
354 "no available cpus to be used for"
355 " the '%s' thread #%u",
356 tr->name, tr->count);
358 affinity_cpu = clib_bitmap_set (affinity_cpu, 0, avail_c0);
359 affinity_cpu = clib_bitmap_set (affinity_cpu, c, 0);
360 tr->coremask = clib_bitmap_set (tr->coremask, c, 1);
365 clib_bitmap_free (avail_cpu);
366 clib_bitmap_free (affinity_cpu);
368 tm->n_vlib_mains = n_vlib_mains;
369 vlib_stats_set_gauge (stats_num_worker_threads_dir_index, n_vlib_mains - 1);
372 * Allocate the remaining worker threads, and thread stack vector slots
373 * from now on, calls to os_get_nthreads() will return the correct
376 vec_validate_aligned (vlib_worker_threads, first_index - 1,
377 CLIB_CACHE_LINE_BYTES);
378 vec_validate (vlib_thread_stacks, vec_len (vlib_worker_threads) - 1);
383 vlib_frame_queue_alloc (int nelts)
385 vlib_frame_queue_t *fq;
387 fq = clib_mem_alloc_aligned (sizeof (*fq), CLIB_CACHE_LINE_BYTES);
388 clib_memset (fq, 0, sizeof (*fq));
390 fq->vector_threshold = 2 * VLIB_FRAME_SIZE;
391 vec_validate_aligned (fq->elts, nelts - 1, CLIB_CACHE_LINE_BYTES);
393 if (nelts & (nelts - 1))
395 fformat (stderr, "FATAL: nelts MUST be a power of 2\n");
402 void vl_msg_api_handler_no_free (void *) __attribute__ ((weak));
404 vl_msg_api_handler_no_free (void *v)
408 /* To be called by vlib worker threads upon startup */
410 vlib_worker_thread_init (vlib_worker_thread_t * w)
412 vlib_thread_main_t *tm = vlib_get_thread_main ();
415 * Note: disabling signals in worker threads as follows
416 * prevents the api post-mortem dump scheme from working
420 * pthread_sigmask (SIG_SETMASK, &s, 0);
424 clib_mem_set_heap (w->thread_mheap);
426 if (vec_len (tm->thread_prefix) && w->registration->short_name)
428 w->name = format (0, "%v_%s_%d%c", tm->thread_prefix,
429 w->registration->short_name, w->instance_id, '\0');
430 vlib_set_thread_name ((char *) w->name);
433 if (!w->registration->use_pthreads)
436 /* Initial barrier sync, for both worker and i/o threads */
437 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
439 while (*vlib_worker_threads->wait_at_barrier)
442 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
447 vlib_worker_thread_bootstrap_fn (void *arg)
449 vlib_worker_thread_t *w = arg;
451 w->lwp = syscall (SYS_gettid);
452 w->thread_id = pthread_self ();
454 __os_thread_index = w - vlib_worker_threads;
458 void *frame_addr = __builtin_frame_address (0);
459 if (frame_addr < (void *) w->thread_stack ||
460 frame_addr > (void *) w->thread_stack + VLIB_THREAD_STACK_SIZE)
462 /* heap is not set yet */
463 fprintf (stderr, "thread stack is not set properly\n");
468 w->thread_function (arg);
474 vlib_get_thread_core_numa (vlib_worker_thread_t * w, unsigned cpu_id)
476 clib_bitmap_t *nbmp = 0, *cbmp = 0;
477 int node, core_id = -1, numa_id = -1;
479 core_id = os_get_cpu_phys_core_id (cpu_id);
480 nbmp = os_get_online_cpu_node_bitmap ();
482 clib_bitmap_foreach (node, nbmp) {
483 cbmp = os_get_cpu_on_node_bitmap (node);
484 if (clib_bitmap_get (cbmp, cpu_id))
486 vec_reset_length (cbmp);
492 w->core_id = core_id;
493 w->numa_id = numa_id;
496 static clib_error_t *
497 vlib_launch_thread_int (void *fp, vlib_worker_thread_t * w, unsigned cpu_id)
499 clib_mem_main_t *mm = &clib_mem_main;
500 vlib_thread_main_t *tm = &vlib_thread_main;
504 void *(*fp_arg) (void *) = fp;
508 vlib_get_thread_core_numa (w, cpu_id);
510 /* Set up NUMA-bound heap if indicated */
511 if (mm->per_numa_mheaps[w->numa_id] == 0)
513 /* If the user requested a NUMA heap, create it... */
514 if (tm->numa_heap_size)
516 clib_mem_set_numa_affinity (w->numa_id, 1 /* force */ );
517 numa_heap = clib_mem_create_heap (0 /* DIY */ , tm->numa_heap_size,
519 "numa %u heap", w->numa_id);
520 clib_mem_set_default_numa_affinity ();
521 mm->per_numa_mheaps[w->numa_id] = numa_heap;
525 /* Or, use the main heap */
526 mm->per_numa_mheaps[w->numa_id] = w->thread_mheap;
531 CPU_SET (cpu_id, &cpuset);
533 if (pthread_attr_init (&attr))
534 return clib_error_return_unix (0, "pthread_attr_init");
536 if (pthread_attr_setstack (&attr, w->thread_stack,
537 VLIB_THREAD_STACK_SIZE))
538 return clib_error_return_unix (0, "pthread_attr_setstack");
540 if (pthread_create (&worker, &attr, fp_arg, (void *) w))
541 return clib_error_return_unix (0, "pthread_create");
543 if (pthread_setaffinity_np (worker, sizeof (cpu_set_t), &cpuset))
544 return clib_error_return_unix (0, "pthread_setaffinity_np");
546 if (pthread_attr_destroy (&attr))
547 return clib_error_return_unix (0, "pthread_attr_destroy");
552 static clib_error_t *
553 start_workers (vlib_main_t * vm)
555 vlib_global_main_t *vgm = vlib_get_global_main ();
556 vlib_main_t *fvm = vlib_get_first_main ();
558 vlib_worker_thread_t *w;
559 vlib_main_t *vm_clone;
561 vlib_thread_main_t *tm = &vlib_thread_main;
562 vlib_thread_registration_t *tr;
563 vlib_node_runtime_t *rt;
564 u32 n_vlib_mains = tm->n_vlib_mains;
565 u32 worker_thread_index;
566 u32 stats_err_entry_index = fvm->error_main.stats_err_entry_index;
567 clib_mem_heap_t *main_heap = clib_mem_get_per_cpu_heap ();
568 vlib_stats_register_mem_heap (main_heap);
570 vec_reset_length (vlib_worker_threads);
572 /* Set up the main thread */
573 vec_add2_aligned (vlib_worker_threads, w, 1, CLIB_CACHE_LINE_BYTES);
574 w->elog_track.name = "main thread";
575 elog_track_register (vlib_get_elog_main (), &w->elog_track);
577 if (vec_len (tm->thread_prefix))
579 w->name = format (0, "%v_main%c", tm->thread_prefix, '\0');
580 vlib_set_thread_name ((char *) w->name);
583 vgm->elog_main.lock =
584 clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
585 vgm->elog_main.lock[0] = 0;
587 clib_callback_data_init (&vm->vlib_node_runtime_perf_callbacks,
588 &vm->worker_thread_main_loop_callback_lock);
590 vec_validate_aligned (vgm->vlib_mains, n_vlib_mains - 1,
591 CLIB_CACHE_LINE_BYTES);
592 vec_set_len (vgm->vlib_mains, 0);
593 vec_add1_aligned (vgm->vlib_mains, vm, CLIB_CACHE_LINE_BYTES);
595 if (n_vlib_mains > 1)
597 vlib_worker_threads->wait_at_barrier =
598 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
599 vlib_worker_threads->workers_at_barrier =
600 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
602 vlib_worker_threads->node_reforks_required =
603 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
605 /* We'll need the rpc vector lock... */
606 clib_spinlock_init (&vm->pending_rpc_lock);
608 /* Ask for an initial barrier sync */
609 *vlib_worker_threads->workers_at_barrier = 0;
610 *vlib_worker_threads->wait_at_barrier = 1;
612 /* Without update or refork */
613 *vlib_worker_threads->node_reforks_required = 0;
614 vgm->need_vlib_worker_thread_node_runtime_update = 0;
617 vm->barrier_epoch = 0;
618 vm->barrier_no_close_before = 0;
620 worker_thread_index = 1;
621 clib_spinlock_init (&vm->worker_thread_main_loop_callback_lock);
623 for (i = 0; i < vec_len (tm->registrations); i++)
625 vlib_node_main_t *nm, *nm_clone;
628 tr = tm->registrations[i];
633 for (k = 0; k < tr->count; k++)
638 vec_add2 (vlib_worker_threads, w, 1);
639 /* Currently unused, may not really work */
641 w->thread_mheap = clib_mem_create_heap (0, tr->mheap_size,
646 w->thread_mheap = main_heap;
649 vlib_thread_stack_init (w - vlib_worker_threads);
650 w->thread_function = tr->function;
651 w->thread_function_arg = w;
653 w->registration = tr;
656 (char *) format (0, "%s %d", tr->name, k + 1);
657 vec_add1 (w->elog_track.name, 0);
658 elog_track_register (vlib_get_elog_main (), &w->elog_track);
660 if (tr->no_data_structure_clone)
663 /* Fork vlib_global_main et al. Look for bugs here */
664 oldheap = clib_mem_set_heap (w->thread_mheap);
666 vm_clone = clib_mem_alloc_aligned (sizeof (*vm_clone),
667 CLIB_CACHE_LINE_BYTES);
668 clib_memcpy (vm_clone, vlib_get_first_main (),
671 vm_clone->thread_index = worker_thread_index;
672 vm_clone->pending_rpc_requests = 0;
673 vec_validate (vm_clone->pending_rpc_requests, 0);
674 vec_set_len (vm_clone->pending_rpc_requests, 0);
675 clib_memset (&vm_clone->random_buffer, 0,
676 sizeof (vm_clone->random_buffer));
678 (&vm_clone->worker_thread_main_loop_callback_lock);
679 clib_callback_data_init
680 (&vm_clone->vlib_node_runtime_perf_callbacks,
681 &vm_clone->worker_thread_main_loop_callback_lock);
683 nm = &vlib_get_first_main ()->node_main;
684 nm_clone = &vm_clone->node_main;
685 /* fork next frames array, preserving node runtime indices */
686 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
687 CLIB_CACHE_LINE_BYTES);
688 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
690 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
691 u32 save_node_runtime_index;
694 save_node_runtime_index = nf->node_runtime_index;
695 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
696 vlib_next_frame_init (nf);
697 nf->node_runtime_index = save_node_runtime_index;
698 nf->flags = save_flags;
701 /* fork the frame dispatch queue */
702 nm_clone->pending_frames = 0;
703 vec_validate (nm_clone->pending_frames, 10);
704 vec_set_len (nm_clone->pending_frames, 0);
709 /* Allocate all nodes in single block for speed */
710 n = clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*n));
712 for (j = 0; j < vec_len (nm->nodes); j++)
714 clib_memcpy (n, nm->nodes[j], sizeof (*n));
715 /* none of the copied nodes have enqueue rights given out */
716 n->owner_node_index = VLIB_INVALID_NODE_INDEX;
717 clib_memset (&n->stats_total, 0, sizeof (n->stats_total));
718 clib_memset (&n->stats_last_clear, 0,
719 sizeof (n->stats_last_clear));
720 vec_add1 (nm_clone->nodes, n);
723 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
724 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
725 CLIB_CACHE_LINE_BYTES);
727 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
729 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
730 /* copy initial runtime_data from node */
731 if (n->runtime_data && n->runtime_data_bytes > 0)
732 clib_memcpy (rt->runtime_data, n->runtime_data,
733 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
734 n->runtime_data_bytes));
737 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
738 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
739 CLIB_CACHE_LINE_BYTES);
740 clib_interrupt_init (
741 &nm_clone->input_node_interrupts,
742 vec_len (nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]));
743 clib_interrupt_init (
744 &nm_clone->pre_input_node_interrupts,
745 vec_len (nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT]));
746 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
748 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
749 /* copy initial runtime_data from node */
750 if (n->runtime_data && n->runtime_data_bytes > 0)
751 clib_memcpy (rt->runtime_data, n->runtime_data,
752 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
753 n->runtime_data_bytes));
756 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT] =
757 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT],
758 CLIB_CACHE_LINE_BYTES);
760 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
762 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
763 /* copy initial runtime_data from node */
764 if (n->runtime_data && n->runtime_data_bytes > 0)
765 clib_memcpy (rt->runtime_data, n->runtime_data,
766 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
767 n->runtime_data_bytes));
770 nm_clone->processes = vec_dup_aligned (nm->processes,
771 CLIB_CACHE_LINE_BYTES);
773 /* Create per-thread frame freelist */
774 nm_clone->frame_sizes = 0;
775 nm_clone->node_by_error = nm->node_by_error;
777 /* Packet trace buffers are guaranteed to be empty, nothing to do here */
779 clib_mem_set_heap (oldheap);
780 vec_add1_aligned (vgm->vlib_mains, vm_clone,
781 CLIB_CACHE_LINE_BYTES);
783 /* Switch to the stats segment ... */
784 vlib_stats_validate (stats_err_entry_index, worker_thread_index,
785 vec_len (fvm->error_main.counters) - 1);
786 c = vlib_stats_get_entry_data_pointer (stats_err_entry_index);
787 vm_clone->error_main.counters = c[worker_thread_index];
789 vm_clone->error_main.counters_last_clear = vec_dup_aligned (
790 vlib_get_first_main ()->error_main.counters_last_clear,
791 CLIB_CACHE_LINE_BYTES);
793 worker_thread_index++;
799 /* only have non-data-structure copy threads to create... */
800 for (i = 0; i < vec_len (tm->registrations); i++)
802 tr = tm->registrations[i];
804 for (j = 0; j < tr->count; j++)
806 vec_add2 (vlib_worker_threads, w, 1);
809 w->thread_mheap = clib_mem_create_heap (0, tr->mheap_size,
815 w->thread_mheap = main_heap;
817 vlib_thread_stack_init (w - vlib_worker_threads);
818 w->thread_function = tr->function;
819 w->thread_function_arg = w;
822 (char *) format (0, "%s %d", tr->name, j + 1);
823 w->registration = tr;
824 vec_add1 (w->elog_track.name, 0);
825 elog_track_register (vlib_get_elog_main (), &w->elog_track);
830 worker_thread_index = 1;
832 for (i = 0; i < vec_len (tm->registrations); i++)
837 tr = tm->registrations[i];
839 if (tr->use_pthreads || tm->use_pthreads)
841 for (j = 0; j < tr->count; j++)
844 w = vlib_worker_threads + worker_thread_index++;
845 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
848 clib_unix_error ("%U, thread %s init on cpu %d failed",
849 format_clib_error, err, tr->name, 0);
855 clib_bitmap_foreach (c, tr->coremask) {
856 w = vlib_worker_threads + worker_thread_index++;
857 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
860 clib_unix_error ("%U, thread %s init on cpu %d failed",
861 format_clib_error, err, tr->name, c);
865 vlib_worker_thread_barrier_sync (vm);
868 err = vlib_call_init_exit_functions (
869 vm, &vgm->num_workers_change_function_registrations, 1 /* call_once */,
872 clib_error_report (err);
874 vlib_worker_thread_barrier_release (vm);
878 VLIB_MAIN_LOOP_ENTER_FUNCTION (start_workers);
882 worker_thread_node_runtime_update_internal (void)
886 vlib_node_main_t *nm, *nm_clone;
887 vlib_main_t *vm_clone;
888 vlib_node_runtime_t *rt;
890 ASSERT (vlib_get_thread_index () == 0);
892 vm = vlib_get_first_main ();
895 ASSERT (*vlib_worker_threads->wait_at_barrier == 1);
898 * Scrape all runtime stats, so we don't lose node runtime(s) with
899 * pending counts, or throw away worker / io thread counts.
901 for (j = 0; j < vec_len (nm->nodes); j++)
905 vlib_node_sync_stats (vm, n);
908 for (i = 1; i < vlib_get_n_threads (); i++)
912 vm_clone = vlib_get_main_by_index (i);
913 nm_clone = &vm_clone->node_main;
915 for (j = 0; j < vec_len (nm_clone->nodes); j++)
917 n = nm_clone->nodes[j];
919 rt = vlib_node_get_runtime (vm_clone, n->index);
920 vlib_node_runtime_sync_stats (vm_clone, rt, 0, 0, 0);
924 /* Per-worker clone rebuilds are now done on each thread */
929 vlib_worker_thread_node_refork (void)
931 vlib_main_t *vm, *vm_clone;
932 vlib_node_main_t *nm, *nm_clone;
933 vlib_node_t **old_nodes_clone;
934 vlib_node_runtime_t *rt, *old_rt;
937 vlib_node_t *new_n_clone;
941 vm = vlib_get_first_main ();
943 vm_clone = vlib_get_main ();
944 nm_clone = &vm_clone->node_main;
946 /* Re-clone error heap */
947 u64 *old_counters_all_clear = vm_clone->error_main.counters_last_clear;
949 clib_memcpy_fast (&vm_clone->error_main, &vm->error_main,
950 sizeof (vm->error_main));
951 j = vec_len (vm->error_main.counters) - 1;
953 c = vlib_stats_get_entry_data_pointer (vm->error_main.stats_err_entry_index);
954 vm_clone->error_main.counters = c[vm_clone->thread_index];
956 vec_validate_aligned (old_counters_all_clear, j, CLIB_CACHE_LINE_BYTES);
957 vm_clone->error_main.counters_last_clear = old_counters_all_clear;
959 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
961 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
962 if ((nf->flags & VLIB_FRAME_IS_ALLOCATED) && nf->frame != NULL)
964 vlib_frame_t *f = nf->frame;
966 vlib_frame_free (vm_clone, f);
970 vec_free (nm_clone->next_frames);
971 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
972 CLIB_CACHE_LINE_BYTES);
974 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
976 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
977 u32 save_node_runtime_index;
980 save_node_runtime_index = nf->node_runtime_index;
981 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
982 vlib_next_frame_init (nf);
983 nf->node_runtime_index = save_node_runtime_index;
984 nf->flags = save_flags;
987 old_nodes_clone = nm_clone->nodes;
992 /* Allocate all nodes in single block for speed */
994 clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*new_n_clone));
995 for (j = 0; j < vec_len (nm->nodes); j++)
997 vlib_node_t *new_n = nm->nodes[j];
999 clib_memcpy_fast (new_n_clone, new_n, sizeof (*new_n));
1000 /* none of the copied nodes have enqueue rights given out */
1001 new_n_clone->owner_node_index = VLIB_INVALID_NODE_INDEX;
1003 if (j >= vec_len (old_nodes_clone))
1005 /* new node, set to zero */
1006 clib_memset (&new_n_clone->stats_total, 0,
1007 sizeof (new_n_clone->stats_total));
1008 clib_memset (&new_n_clone->stats_last_clear, 0,
1009 sizeof (new_n_clone->stats_last_clear));
1013 vlib_node_t *old_n_clone = old_nodes_clone[j];
1014 /* Copy stats if the old data is valid */
1015 clib_memcpy_fast (&new_n_clone->stats_total,
1016 &old_n_clone->stats_total,
1017 sizeof (new_n_clone->stats_total));
1018 clib_memcpy_fast (&new_n_clone->stats_last_clear,
1019 &old_n_clone->stats_last_clear,
1020 sizeof (new_n_clone->stats_last_clear));
1022 /* keep previous node state */
1023 new_n_clone->state = old_n_clone->state;
1024 new_n_clone->flags = old_n_clone->flags;
1026 vec_add1 (nm_clone->nodes, new_n_clone);
1029 /* Free the old node clones */
1030 clib_mem_free (old_nodes_clone[0]);
1032 vec_free (old_nodes_clone);
1035 /* re-clone internal nodes */
1036 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL];
1037 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
1038 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
1039 CLIB_CACHE_LINE_BYTES);
1041 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
1043 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1044 /* copy runtime_data, will be overwritten later for existing rt */
1045 if (n->runtime_data && n->runtime_data_bytes > 0)
1046 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1047 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1048 n->runtime_data_bytes));
1051 for (j = 0; j < vec_len (old_rt); j++)
1053 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1054 rt->state = old_rt[j].state;
1055 rt->flags = old_rt[j].flags;
1056 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1057 VLIB_NODE_RUNTIME_DATA_SIZE);
1062 /* re-clone input nodes */
1063 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT];
1064 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
1065 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
1066 CLIB_CACHE_LINE_BYTES);
1067 clib_interrupt_resize (
1068 &nm_clone->input_node_interrupts,
1069 vec_len (nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]));
1070 clib_interrupt_resize (
1071 &nm_clone->pre_input_node_interrupts,
1072 vec_len (nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT]));
1074 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
1076 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1077 /* copy runtime_data, will be overwritten later for existing rt */
1078 if (n->runtime_data && n->runtime_data_bytes > 0)
1079 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1080 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1081 n->runtime_data_bytes));
1084 for (j = 0; j < vec_len (old_rt); j++)
1086 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1087 rt->state = old_rt[j].state;
1088 rt->flags = old_rt[j].flags;
1089 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1090 VLIB_NODE_RUNTIME_DATA_SIZE);
1095 /* re-clone pre-input nodes */
1096 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT];
1097 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT] =
1098 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT],
1099 CLIB_CACHE_LINE_BYTES);
1101 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
1103 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1104 /* copy runtime_data, will be overwritten later for existing rt */
1105 if (n->runtime_data && n->runtime_data_bytes > 0)
1106 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1107 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1108 n->runtime_data_bytes));
1111 for (j = 0; j < vec_len (old_rt); j++)
1113 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1114 rt->state = old_rt[j].state;
1115 rt->flags = old_rt[j].flags;
1116 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1117 VLIB_NODE_RUNTIME_DATA_SIZE);
1122 vec_free (nm_clone->processes);
1123 nm_clone->processes = vec_dup_aligned (nm->processes,
1124 CLIB_CACHE_LINE_BYTES);
1125 nm_clone->node_by_error = nm->node_by_error;
1129 vlib_worker_thread_node_runtime_update (void)
1132 * Make a note that we need to do a node runtime update
1133 * prior to releasing the barrier.
1135 vlib_global_main.need_vlib_worker_thread_node_runtime_update = 1;
1139 unformat_sched_policy (unformat_input_t * input, va_list * args)
1141 u32 *r = va_arg (*args, u32 *);
1144 #define _(v,f,s) else if (unformat (input, s)) *r = SCHED_POLICY_##f;
1145 foreach_sched_policy
1152 static clib_error_t *
1153 cpu_config (vlib_main_t * vm, unformat_input_t * input)
1155 vlib_thread_registration_t *tr;
1157 vlib_thread_main_t *tm = &vlib_thread_main;
1162 tm->thread_registrations_by_name = hash_create_string (0, sizeof (uword));
1164 tm->n_thread_stacks = 1; /* account for main thread */
1165 tm->sched_policy = ~0;
1166 tm->sched_priority = ~0;
1167 tm->main_lcore = ~0;
1168 tm->use_main_core_auto = 0;
1174 hash_set_mem (tm->thread_registrations_by_name, tr->name, (uword) tr);
1178 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1180 if (unformat (input, "use-pthreads"))
1181 tm->use_pthreads = 1;
1182 else if (unformat (input, "thread-prefix %v", &tm->thread_prefix))
1184 else if (unformat (input, "main-core auto"))
1185 tm->use_main_core_auto = 1;
1186 else if (unformat (input, "main-core %u", &tm->main_lcore))
1188 else if (unformat (input, "skip-cores %u", &tm->skip_cores))
1190 else if (unformat (input, "numa-heap-size %U",
1191 unformat_memory_size, &tm->numa_heap_size))
1193 else if (unformat (input, "coremask-%s %U", &name,
1194 unformat_bitmap_mask, &bitmap) ||
1195 unformat (input, "corelist-%s %U", &name,
1196 unformat_bitmap_list, &bitmap))
1198 p = hash_get_mem (tm->thread_registrations_by_name, name);
1200 return clib_error_return (0, "no such thread type '%s'", name);
1202 tr = (vlib_thread_registration_t *) p[0];
1204 if (tr->use_pthreads)
1205 return clib_error_return (0,
1206 "corelist cannot be set for '%s' threads",
1209 return clib_error_return
1210 (0, "core placement of '%s' threads is already configured",
1213 tr->coremask = bitmap;
1214 tr->count = clib_bitmap_count_set_bits (tr->coremask);
1218 (input, "scheduler-policy %U", unformat_sched_policy,
1221 else if (unformat (input, "scheduler-priority %u", &tm->sched_priority))
1223 else if (unformat (input, "%s %u", &name, &count))
1225 p = hash_get_mem (tm->thread_registrations_by_name, name);
1227 return clib_error_return (0, "no such thread type 3 '%s'", name);
1229 tr = (vlib_thread_registration_t *) p[0];
1231 if (tr->fixed_count)
1232 return clib_error_return
1233 (0, "number of '%s' threads not configurable", name);
1235 return clib_error_return
1236 (0, "number of '%s' threads is already configured", name);
1244 if (tm->main_lcore != ~0 && tm->use_main_core_auto)
1246 return clib_error_return (
1247 0, "cannot set both 'main-core %u' and 'main-core auto'",
1251 if (tm->sched_priority != ~0)
1253 if (tm->sched_policy == SCHED_FIFO || tm->sched_policy == SCHED_RR)
1255 u32 prio_max = sched_get_priority_max (tm->sched_policy);
1256 u32 prio_min = sched_get_priority_min (tm->sched_policy);
1257 if (tm->sched_priority > prio_max)
1258 tm->sched_priority = prio_max;
1259 if (tm->sched_priority < prio_min)
1260 tm->sched_priority = prio_min;
1264 return clib_error_return
1266 "scheduling priority (%d) is not allowed for `normal` scheduling policy",
1267 tm->sched_priority);
1272 if (!tm->thread_prefix)
1273 tm->thread_prefix = format (0, "vpp");
1277 tm->n_thread_stacks += tr->count;
1278 tm->n_pthreads += tr->count * tr->use_pthreads;
1279 tm->n_threads += tr->count * (tr->use_pthreads == 0);
1286 VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu");
1289 * Enforce minimum open time to minimize packet loss due to Rx overflow,
1290 * based on a test based heuristic that barrier should be open for at least
1291 * 3 time as long as it is closed (with an upper bound of 1ms because by that
1292 * point it is probably too late to make a difference)
1295 #ifndef BARRIER_MINIMUM_OPEN_LIMIT
1296 #define BARRIER_MINIMUM_OPEN_LIMIT 0.001
1299 #ifndef BARRIER_MINIMUM_OPEN_FACTOR
1300 #define BARRIER_MINIMUM_OPEN_FACTOR 3
1304 vlib_worker_thread_initial_barrier_sync_and_release (vlib_main_t * vm)
1307 f64 now = vlib_time_now (vm);
1308 u32 count = vlib_get_n_threads () - 1;
1310 /* No worker threads? */
1314 deadline = now + BARRIER_SYNC_TIMEOUT;
1315 *vlib_worker_threads->wait_at_barrier = 1;
1316 while (*vlib_worker_threads->workers_at_barrier != count)
1318 if ((now = vlib_time_now (vm)) > deadline)
1320 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1325 *vlib_worker_threads->wait_at_barrier = 0;
1329 * Return true if the wroker thread barrier is held
1332 vlib_worker_thread_barrier_held (void)
1334 if (vlib_get_n_threads () < 2)
1337 return (*vlib_worker_threads->wait_at_barrier == 1);
1341 vlib_worker_thread_barrier_sync_int (vlib_main_t * vm, const char *func_name)
1348 f64 max_vector_rate;
1352 if (vlib_get_n_threads () < 2)
1355 ASSERT (vlib_get_thread_index () == 0);
1357 vlib_worker_threads[0].barrier_caller = func_name;
1358 count = vlib_get_n_threads () - 1;
1360 /* Record entry relative to last close */
1361 now = vlib_time_now (vm);
1362 t_entry = now - vm->barrier_epoch;
1364 /* Tolerate recursive calls */
1365 if (++vlib_worker_threads[0].recursion_level > 1)
1367 barrier_trace_sync_rec (t_entry);
1371 if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0))
1372 clib_call_callbacks (vm->barrier_perf_callbacks, vm,
1373 vm->clib_time.last_cpu_time, 0 /* enter */ );
1376 * Need data to decide if we're working hard enough to honor
1377 * the barrier hold-down timer.
1379 max_vector_rate = 0.0;
1380 for (i = 1; i < vlib_get_n_threads (); i++)
1382 vlib_main_t *ovm = vlib_get_main_by_index (i);
1383 max_vector_rate = clib_max (max_vector_rate,
1384 (f64) vlib_last_vectors_per_main_loop (ovm));
1387 vlib_worker_threads[0].barrier_sync_count++;
1389 /* Enforce minimum barrier open time to minimize packet loss */
1390 ASSERT (vm->barrier_no_close_before <= (now + BARRIER_MINIMUM_OPEN_LIMIT));
1393 * If any worker thread seems busy, which we define
1394 * as a vector rate above 10, we enforce the barrier hold-down timer
1396 if (max_vector_rate > 10.0)
1400 now = vlib_time_now (vm);
1401 /* Barrier hold-down timer expired? */
1402 if (now >= vm->barrier_no_close_before)
1404 if ((vm->barrier_no_close_before - now)
1405 > (2.0 * BARRIER_MINIMUM_OPEN_LIMIT))
1408 ("clock change: would have waited for %.4f seconds",
1409 (vm->barrier_no_close_before - now));
1414 /* Record time of closure */
1415 t_open = now - vm->barrier_epoch;
1416 vm->barrier_epoch = now;
1418 deadline = now + BARRIER_SYNC_TIMEOUT;
1420 *vlib_worker_threads->wait_at_barrier = 1;
1421 while (*vlib_worker_threads->workers_at_barrier != count)
1423 if ((now = vlib_time_now (vm)) > deadline)
1425 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1430 t_closed = now - vm->barrier_epoch;
1432 barrier_trace_sync (t_entry, t_open, t_closed);
1437 vlib_worker_thread_barrier_release (vlib_main_t * vm)
1439 vlib_global_main_t *vgm = vlib_get_global_main ();
1445 f64 t_update_main = 0.0;
1446 int refork_needed = 0;
1448 if (vlib_get_n_threads () < 2)
1451 ASSERT (vlib_get_thread_index () == 0);
1454 now = vlib_time_now (vm);
1455 t_entry = now - vm->barrier_epoch;
1457 if (--vlib_worker_threads[0].recursion_level > 0)
1459 barrier_trace_release_rec (t_entry);
1463 /* Update (all) node runtimes before releasing the barrier, if needed */
1464 if (vgm->need_vlib_worker_thread_node_runtime_update)
1467 * Lock stat segment here, so we's safe when
1468 * rebuilding the stat segment node clones from the
1471 vlib_stats_segment_lock ();
1473 /* Do stats elements on main thread */
1474 worker_thread_node_runtime_update_internal ();
1475 vgm->need_vlib_worker_thread_node_runtime_update = 0;
1477 /* Do per thread rebuilds in parallel */
1479 clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
1480 (vlib_get_n_threads () - 1));
1481 now = vlib_time_now (vm);
1482 t_update_main = now - vm->barrier_epoch;
1485 deadline = now + BARRIER_SYNC_TIMEOUT;
1488 * Note when we let go of the barrier.
1489 * Workers can use this to derive a reasonably accurate
1490 * time offset. See vlib_time_now(...)
1492 vm->time_last_barrier_release = vlib_time_now (vm);
1493 CLIB_MEMORY_STORE_BARRIER ();
1495 *vlib_worker_threads->wait_at_barrier = 0;
1497 while (*vlib_worker_threads->workers_at_barrier > 0)
1499 if ((now = vlib_time_now (vm)) > deadline)
1501 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1506 /* Wait for reforks before continuing */
1509 now = vlib_time_now (vm);
1511 deadline = now + BARRIER_SYNC_TIMEOUT;
1513 while (*vlib_worker_threads->node_reforks_required > 0)
1515 if ((now = vlib_time_now (vm)) > deadline)
1517 fformat (stderr, "%s: worker thread refork deadlock\n",
1522 vlib_stats_segment_unlock ();
1525 t_closed_total = now - vm->barrier_epoch;
1527 minimum_open = t_closed_total * BARRIER_MINIMUM_OPEN_FACTOR;
1529 if (minimum_open > BARRIER_MINIMUM_OPEN_LIMIT)
1531 minimum_open = BARRIER_MINIMUM_OPEN_LIMIT;
1534 vm->barrier_no_close_before = now + minimum_open;
1536 /* Record barrier epoch (used to enforce minimum open time) */
1537 vm->barrier_epoch = now;
1539 barrier_trace_release (t_entry, t_closed_total, t_update_main);
1541 if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0))
1542 clib_call_callbacks (vm->barrier_perf_callbacks, vm,
1543 vm->clib_time.last_cpu_time, 1 /* leave */ );
1547 vlib_worker_sync_rpc (void *args)
1549 ASSERT (vlib_thread_is_main_w_barrier ());
1550 vlib_worker_threads->wait_before_barrier = 0;
1554 vlib_workers_sync (void)
1556 if (PREDICT_FALSE (!vlib_num_workers ()))
1559 if (!(*vlib_worker_threads->wait_at_barrier) &&
1560 !clib_atomic_swap_rel_n (&vlib_worker_threads->wait_before_barrier, 1))
1562 u32 thread_index = vlib_get_thread_index ();
1563 vlib_rpc_call_main_thread (vlib_worker_sync_rpc, (u8 *) &thread_index,
1564 sizeof (thread_index));
1565 vlib_worker_flush_pending_rpc_requests (vlib_get_main ());
1568 /* Wait until main thread asks for barrier */
1569 while (!(*vlib_worker_threads->wait_at_barrier))
1572 /* Stop before barrier and make sure all threads are either
1573 * at worker barrier or the barrier before it */
1574 clib_atomic_fetch_add (&vlib_worker_threads->workers_before_barrier, 1);
1575 while (vlib_num_workers () > (*vlib_worker_threads->workers_at_barrier +
1576 vlib_worker_threads->workers_before_barrier))
1581 vlib_workers_continue (void)
1583 if (PREDICT_FALSE (!vlib_num_workers ()))
1586 clib_atomic_fetch_add (&vlib_worker_threads->done_work_before_barrier, 1);
1588 /* Wait until all workers are done with work before barrier */
1589 while (vlib_worker_threads->done_work_before_barrier <
1590 vlib_worker_threads->workers_before_barrier)
1593 clib_atomic_fetch_add (&vlib_worker_threads->done_work_before_barrier, -1);
1594 clib_atomic_fetch_add (&vlib_worker_threads->workers_before_barrier, -1);
1598 * Wait until each of the workers has been once around the track
1601 vlib_worker_wait_one_loop (void)
1603 vlib_global_main_t *vgm = vlib_get_global_main ();
1604 ASSERT (vlib_get_thread_index () == 0);
1606 if (vlib_get_n_threads () < 2)
1609 if (vlib_worker_thread_barrier_held ())
1615 vec_validate (counts, vlib_get_n_threads () - 1);
1617 /* record the current loop counts */
1618 vec_foreach_index (ii, vgm->vlib_mains)
1619 counts[ii] = vgm->vlib_mains[ii]->main_loop_count;
1621 /* spin until each changes, apart from the main thread, or we'd be
1623 for (ii = 1; ii < vec_len (counts); ii++)
1625 while (counts[ii] == vgm->vlib_mains[ii]->main_loop_count)
1634 vlib_worker_flush_pending_rpc_requests (vlib_main_t *vm)
1636 vlib_main_t *vm_global = vlib_get_first_main ();
1638 ASSERT (vm != vm_global);
1640 clib_spinlock_lock_if_init (&vm_global->pending_rpc_lock);
1641 vec_append (vm_global->pending_rpc_requests, vm->pending_rpc_requests);
1642 vec_reset_length (vm->pending_rpc_requests);
1643 clib_spinlock_unlock_if_init (&vm_global->pending_rpc_lock);
1647 vlib_worker_thread_fn (void *arg)
1649 vlib_global_main_t *vgm = vlib_get_global_main ();
1650 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
1651 vlib_main_t *vm = vlib_get_main ();
1654 ASSERT (vm->thread_index == vlib_get_thread_index ());
1656 vlib_worker_thread_init (w);
1657 clib_time_init (&vm->clib_time);
1658 clib_mem_set_heap (w->thread_mheap);
1660 vm->worker_init_functions_called = hash_create (0, 0);
1662 e = vlib_call_init_exit_functions_no_sort (
1663 vm, &vgm->worker_init_function_registrations, 1 /* call_once */,
1666 clib_error_report (e);
1668 vlib_worker_loop (vm);
1671 VLIB_REGISTER_THREAD (worker_thread_reg, static) = {
1674 .function = vlib_worker_thread_fn,
1677 extern clib_march_fn_registration
1678 *vlib_frame_queue_dequeue_with_aux_fn_march_fn_registrations;
1679 extern clib_march_fn_registration
1680 *vlib_frame_queue_dequeue_fn_march_fn_registrations;
1682 vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts)
1684 vlib_thread_main_t *tm = vlib_get_thread_main ();
1685 vlib_main_t *vm = vlib_get_main ();
1686 vlib_frame_queue_main_t *fqm;
1687 vlib_frame_queue_t *fq;
1692 if (frame_queue_nelts == 0)
1693 frame_queue_nelts = FRAME_QUEUE_MAX_NELTS;
1695 num_threads = 1 /* main thread */ + tm->n_threads;
1696 ASSERT (frame_queue_nelts >= 8 + num_threads);
1698 vec_add2 (tm->frame_queue_mains, fqm, 1);
1700 node = vlib_get_node (vm, fqm->node_index);
1702 if (node->aux_offset)
1704 fqm->frame_queue_dequeue_fn =
1705 CLIB_MARCH_FN_VOID_POINTER (vlib_frame_queue_dequeue_with_aux_fn);
1709 fqm->frame_queue_dequeue_fn =
1710 CLIB_MARCH_FN_VOID_POINTER (vlib_frame_queue_dequeue_fn);
1713 fqm->node_index = node_index;
1714 fqm->frame_queue_nelts = frame_queue_nelts;
1716 vec_validate (fqm->vlib_frame_queues, tm->n_vlib_mains - 1);
1717 vec_set_len (fqm->vlib_frame_queues, 0);
1718 for (i = 0; i < tm->n_vlib_mains; i++)
1720 fq = vlib_frame_queue_alloc (frame_queue_nelts);
1721 vec_add1 (fqm->vlib_frame_queues, fq);
1724 return (fqm - tm->frame_queue_mains);
1728 vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
1731 ASSERT (vlib_get_thread_index () == 0);
1732 vlib_process_signal_event (vlib_get_main (), args->node_index,
1733 args->type_opaque, args->data);
1736 void *rpc_call_main_thread_cb_fn;
1739 vlib_rpc_call_main_thread (void *callback, u8 * args, u32 arg_size)
1741 if (rpc_call_main_thread_cb_fn)
1743 void (*fp) (void *, u8 *, u32) = rpc_call_main_thread_cb_fn;
1744 (*fp) (callback, args, arg_size);
1747 clib_warning ("BUG: rpc_call_main_thread_cb_fn NULL!");
1751 threads_init (vlib_main_t * vm)
1753 const vlib_thread_main_t *tm = vlib_get_thread_main ();
1755 if (tm->main_lcore == ~0 && tm->n_vlib_mains > 1)
1756 return clib_error_return (0, "Configuration error, a main core must "
1757 "be specified when using worker threads");
1762 VLIB_INIT_FUNCTION (threads_init);
1764 static clib_error_t *
1765 show_clock_command_fn (vlib_main_t * vm,
1766 unformat_input_t * input, vlib_cli_command_t * cmd)
1769 clib_timebase_t _tb, *tb = &_tb;
1771 (void) unformat (input, "verbose %=", &verbose, 1);
1773 clib_timebase_init (tb, 0 /* GMT */ , CLIB_TIMEBASE_DAYLIGHT_NONE,
1776 vlib_cli_output (vm, "%U, %U GMT", format_clib_time, &vm->clib_time,
1777 verbose, format_clib_timebase_time,
1778 clib_timebase_now (tb));
1780 vlib_cli_output (vm, "Time last barrier release %.9f",
1781 vm->time_last_barrier_release);
1783 foreach_vlib_main ()
1785 vlib_cli_output (vm, "%d: %U", this_vlib_main->thread_index,
1786 format_clib_time, &this_vlib_main->clib_time, verbose);
1788 vlib_cli_output (vm, "Thread %d offset %.9f error %.9f",
1789 this_vlib_main->thread_index,
1790 this_vlib_main->time_offset,
1791 vm->time_last_barrier_release -
1792 this_vlib_main->time_last_barrier_release);
1797 VLIB_CLI_COMMAND (f_command, static) =
1799 .path = "show clock",
1800 .short_help = "show clock",
1801 .function = show_clock_command_fn,
1804 vlib_thread_main_t *
1805 vlib_get_thread_main_not_inline (void)
1807 return vlib_get_thread_main ();
1811 * fd.io coding-style-patch-verification: ON
1814 * eval: (c-set-style "gnu")