2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
19 #include <vppinfra/format.h>
20 #include <vppinfra/time_range.h>
21 #include <vppinfra/interrupt.h>
22 #include <vppinfra/linux/sysfs.h>
23 #include <vlib/vlib.h>
25 #include <vlib/threads.h>
27 #include <vlib/stats/stats.h>
35 vlib_worker_thread_t *vlib_worker_threads;
36 vlib_thread_main_t vlib_thread_main;
39 * Barrier tracing can be enabled on a normal build to collect information
40 * on barrier use, including timings and call stacks. Deliberately not
41 * keyed off CLIB_DEBUG, because that can add significant overhead which
42 * imapacts observed timings.
46 barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed)
48 if (!vlib_worker_threads->barrier_elog_enabled)
51 ELOG_TYPE_DECLARE (e) = {
52 .format = "bar-trace-%s-#%d",
53 .format_args = "T4i4",
58 u32 caller, count, t_entry, t_open, t_closed;
61 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
62 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
63 ed->caller = elog_string (&vlib_global_main.elog_main,
64 (char *) vlib_worker_threads[0].barrier_caller);
65 ed->t_entry = (int) (1000000.0 * t_entry);
66 ed->t_open = (int) (1000000.0 * t_open);
67 ed->t_closed = (int) (1000000.0 * t_closed);
71 barrier_trace_sync_rec (f64 t_entry)
73 if (!vlib_worker_threads->barrier_elog_enabled)
76 ELOG_TYPE_DECLARE (e) = {
77 .format = "bar-syncrec-%s-#%d",
78 .format_args = "T4i4",
86 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
87 ed->depth = (int) vlib_worker_threads[0].recursion_level - 1;
88 ed->caller = elog_string (&vlib_global_main.elog_main,
89 (char *) vlib_worker_threads[0].barrier_caller);
93 barrier_trace_release_rec (f64 t_entry)
95 if (!vlib_worker_threads->barrier_elog_enabled)
98 ELOG_TYPE_DECLARE (e) = {
99 .format = "bar-relrrec-#%d",
108 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
109 ed->depth = (int) vlib_worker_threads[0].recursion_level;
113 barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main)
115 if (!vlib_worker_threads->barrier_elog_enabled)
118 ELOG_TYPE_DECLARE (e) = {
119 .format = "bar-rel-#%d-e%d-u%d-t%d",
120 .format_args = "i4i4i4i4",
125 u32 count, t_entry, t_update_main, t_closed_total;
128 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
129 ed->t_entry = (int) (1000000.0 * t_entry);
130 ed->t_update_main = (int) (1000000.0 * t_update_main);
131 ed->t_closed_total = (int) (1000000.0 * t_closed_total);
132 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
134 /* Reset context for next trace */
135 vlib_worker_threads[0].barrier_context = NULL;
139 os_get_nthreads (void)
141 return vec_len (vlib_thread_stacks);
145 vlib_set_thread_name (char *name)
147 int pthread_setname_np (pthread_t __target_thread, const char *__name);
149 pthread_t thread = pthread_self ();
153 rv = pthread_setname_np (thread, name);
155 clib_warning ("pthread_setname_np returned %d", rv);
160 sort_registrations_by_no_clone (void *a0, void *a1)
162 vlib_thread_registration_t **tr0 = a0;
163 vlib_thread_registration_t **tr1 = a1;
165 return ((i32) ((*tr0)->no_data_structure_clone)
166 - ((i32) ((*tr1)->no_data_structure_clone)));
170 /* Called early in the init sequence */
173 vlib_thread_init (vlib_main_t * vm)
175 vlib_thread_main_t *tm = &vlib_thread_main;
176 vlib_worker_thread_t *w;
177 vlib_thread_registration_t *tr;
179 u32 n_vlib_mains = 1;
183 u32 stats_num_worker_threads_dir_index;
185 stats_num_worker_threads_dir_index =
186 vlib_stats_add_gauge ("/sys/num_worker_threads");
187 ASSERT (stats_num_worker_threads_dir_index != ~0);
189 /* get bitmaps of active cpu cores and sockets */
190 tm->cpu_core_bitmap =
191 clib_sysfs_list_to_bitmap ("/sys/devices/system/cpu/online");
192 tm->cpu_socket_bitmap =
193 clib_sysfs_list_to_bitmap ("/sys/devices/system/node/online");
195 avail_cpu = clib_bitmap_dup (tm->cpu_core_bitmap);
198 for (i = 0; i < tm->skip_cores; i++)
200 uword c = clib_bitmap_first_set (avail_cpu);
202 return clib_error_return (0, "no available cpus to skip");
204 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
207 /* grab cpu for main thread */
208 if (tm->main_lcore == ~0)
210 /* if main-lcore is not set, we try to use lcore 1 */
211 if (clib_bitmap_get (avail_cpu, 1))
214 tm->main_lcore = clib_bitmap_first_set (avail_cpu);
215 if (tm->main_lcore == (u8) ~ 0)
216 return clib_error_return (0, "no available cpus to be used for the"
221 if (clib_bitmap_get (avail_cpu, tm->main_lcore) == 0)
222 return clib_error_return (0, "cpu %u is not available to be used"
223 " for the main thread", tm->main_lcore);
225 avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0);
227 /* assume that there is socket 0 only if there is no data from sysfs */
228 if (!tm->cpu_socket_bitmap)
229 tm->cpu_socket_bitmap = clib_bitmap_set (0, 0, 1);
231 /* pin main thread to main_lcore */
233 CPU_SET (tm->main_lcore, &cpuset);
234 pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset);
236 /* Set up thread 0 */
237 vec_validate_aligned (vlib_worker_threads, 0, CLIB_CACHE_LINE_BYTES);
238 _vec_len (vlib_worker_threads) = 1;
239 w = vlib_worker_threads;
240 w->thread_mheap = clib_mem_get_heap ();
241 w->thread_stack = vlib_thread_stacks[0];
242 w->cpu_id = tm->main_lcore;
243 w->lwp = syscall (SYS_gettid);
244 w->thread_id = pthread_self ();
245 tm->n_vlib_mains = 1;
247 vlib_get_thread_core_numa (w, w->cpu_id);
249 if (tm->sched_policy != ~0)
251 struct sched_param sched_param;
252 if (!sched_getparam (w->lwp, &sched_param))
254 if (tm->sched_priority != ~0)
255 sched_param.sched_priority = tm->sched_priority;
256 sched_setscheduler (w->lwp, tm->sched_policy, &sched_param);
260 /* assign threads to cores and set n_vlib_mains */
265 vec_add1 (tm->registrations, tr);
269 vec_sort_with_function (tm->registrations, sort_registrations_by_no_clone);
271 for (i = 0; i < vec_len (tm->registrations); i++)
274 tr = tm->registrations[i];
275 tr->first_index = first_index;
276 first_index += tr->count;
277 n_vlib_mains += (tr->no_data_structure_clone == 0) ? tr->count : 0;
279 /* construct coremask */
280 if (tr->use_pthreads || !tr->count)
287 clib_bitmap_foreach (c, tr->coremask) {
288 if (clib_bitmap_get(avail_cpu, c) == 0)
289 return clib_error_return (0, "cpu %u is not available to be used"
290 " for the '%s' thread",c, tr->name);
292 avail_cpu = clib_bitmap_set(avail_cpu, c, 0);
298 for (j = 0; j < tr->count; j++)
300 /* Do not use CPU 0 by default - leave it to the host and IRQs */
301 uword avail_c0 = clib_bitmap_get (avail_cpu, 0);
302 avail_cpu = clib_bitmap_set (avail_cpu, 0, 0);
304 uword c = clib_bitmap_first_set (avail_cpu);
305 /* Use CPU 0 as a last resort */
306 if (c == ~0 && avail_c0)
313 return clib_error_return (0,
314 "no available cpus to be used for"
315 " the '%s' thread", tr->name);
317 avail_cpu = clib_bitmap_set (avail_cpu, 0, avail_c0);
318 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
319 tr->coremask = clib_bitmap_set (tr->coremask, c, 1);
324 clib_bitmap_free (avail_cpu);
326 tm->n_vlib_mains = n_vlib_mains;
327 vlib_stats_set_gauge (stats_num_worker_threads_dir_index, n_vlib_mains - 1);
330 * Allocate the remaining worker threads, and thread stack vector slots
331 * from now on, calls to os_get_nthreads() will return the correct
334 vec_validate_aligned (vlib_worker_threads, first_index - 1,
335 CLIB_CACHE_LINE_BYTES);
336 vec_validate (vlib_thread_stacks, vec_len (vlib_worker_threads) - 1);
341 vlib_frame_queue_alloc (int nelts)
343 vlib_frame_queue_t *fq;
345 fq = clib_mem_alloc_aligned (sizeof (*fq), CLIB_CACHE_LINE_BYTES);
346 clib_memset (fq, 0, sizeof (*fq));
348 fq->vector_threshold = 2 * VLIB_FRAME_SIZE;
349 vec_validate_aligned (fq->elts, nelts - 1, CLIB_CACHE_LINE_BYTES);
351 if (nelts & (nelts - 1))
353 fformat (stderr, "FATAL: nelts MUST be a power of 2\n");
360 void vl_msg_api_handler_no_free (void *) __attribute__ ((weak));
362 vl_msg_api_handler_no_free (void *v)
366 /* To be called by vlib worker threads upon startup */
368 vlib_worker_thread_init (vlib_worker_thread_t * w)
370 vlib_thread_main_t *tm = vlib_get_thread_main ();
373 * Note: disabling signals in worker threads as follows
374 * prevents the api post-mortem dump scheme from working
378 * pthread_sigmask (SIG_SETMASK, &s, 0);
382 clib_mem_set_heap (w->thread_mheap);
384 if (vec_len (tm->thread_prefix) && w->registration->short_name)
386 w->name = format (0, "%v_%s_%d%c", tm->thread_prefix,
387 w->registration->short_name, w->instance_id, '\0');
388 vlib_set_thread_name ((char *) w->name);
391 if (!w->registration->use_pthreads)
394 /* Initial barrier sync, for both worker and i/o threads */
395 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
397 while (*vlib_worker_threads->wait_at_barrier)
400 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
405 vlib_worker_thread_bootstrap_fn (void *arg)
407 vlib_worker_thread_t *w = arg;
409 w->lwp = syscall (SYS_gettid);
410 w->thread_id = pthread_self ();
412 __os_thread_index = w - vlib_worker_threads;
416 void *frame_addr = __builtin_frame_address (0);
417 if (frame_addr < (void *) w->thread_stack ||
418 frame_addr > (void *) w->thread_stack + VLIB_THREAD_STACK_SIZE)
420 /* heap is not set yet */
421 fprintf (stderr, "thread stack is not set properly\n");
426 w->thread_function (arg);
432 vlib_get_thread_core_numa (vlib_worker_thread_t * w, unsigned cpu_id)
434 const char *sys_cpu_path = "/sys/devices/system/cpu/cpu";
435 const char *sys_node_path = "/sys/devices/system/node/node";
436 clib_bitmap_t *nbmp = 0, *cbmp = 0;
439 int core_id = -1, numa_id = -1;
441 p = format (p, "%s%u/topology/core_id%c", sys_cpu_path, cpu_id, 0);
442 clib_sysfs_read ((char *) p, "%d", &core_id);
443 vec_reset_length (p);
446 clib_sysfs_read ("/sys/devices/system/node/online", "%U",
447 unformat_bitmap_list, &nbmp);
448 clib_bitmap_foreach (node, nbmp) {
449 p = format (p, "%s%u/cpulist%c", sys_node_path, node, 0);
450 clib_sysfs_read ((char *) p, "%U", unformat_bitmap_list, &cbmp);
451 if (clib_bitmap_get (cbmp, cpu_id))
453 vec_reset_length (cbmp);
454 vec_reset_length (p);
461 w->core_id = core_id;
462 w->numa_id = numa_id;
465 static clib_error_t *
466 vlib_launch_thread_int (void *fp, vlib_worker_thread_t * w, unsigned cpu_id)
468 clib_mem_main_t *mm = &clib_mem_main;
469 vlib_thread_main_t *tm = &vlib_thread_main;
473 void *(*fp_arg) (void *) = fp;
477 vlib_get_thread_core_numa (w, cpu_id);
479 /* Set up NUMA-bound heap if indicated */
480 if (mm->per_numa_mheaps[w->numa_id] == 0)
482 /* If the user requested a NUMA heap, create it... */
483 if (tm->numa_heap_size)
485 clib_mem_set_numa_affinity (w->numa_id, 1 /* force */ );
486 numa_heap = clib_mem_create_heap (0 /* DIY */ , tm->numa_heap_size,
488 "numa %u heap", w->numa_id);
489 clib_mem_set_default_numa_affinity ();
490 mm->per_numa_mheaps[w->numa_id] = numa_heap;
494 /* Or, use the main heap */
495 mm->per_numa_mheaps[w->numa_id] = w->thread_mheap;
500 CPU_SET (cpu_id, &cpuset);
502 if (pthread_attr_init (&attr))
503 return clib_error_return_unix (0, "pthread_attr_init");
505 if (pthread_attr_setstack (&attr, w->thread_stack,
506 VLIB_THREAD_STACK_SIZE))
507 return clib_error_return_unix (0, "pthread_attr_setstack");
509 if (pthread_create (&worker, &attr, fp_arg, (void *) w))
510 return clib_error_return_unix (0, "pthread_create");
512 if (pthread_setaffinity_np (worker, sizeof (cpu_set_t), &cpuset))
513 return clib_error_return_unix (0, "pthread_setaffinity_np");
515 if (pthread_attr_destroy (&attr))
516 return clib_error_return_unix (0, "pthread_attr_destroy");
521 static clib_error_t *
522 start_workers (vlib_main_t * vm)
524 vlib_global_main_t *vgm = vlib_get_global_main ();
526 vlib_worker_thread_t *w;
527 vlib_main_t *vm_clone;
529 vlib_thread_main_t *tm = &vlib_thread_main;
530 vlib_thread_registration_t *tr;
531 vlib_node_runtime_t *rt;
532 u32 n_vlib_mains = tm->n_vlib_mains;
533 u32 worker_thread_index;
534 clib_mem_heap_t *main_heap = clib_mem_get_per_cpu_heap ();
535 vlib_stats_register_mem_heap (main_heap);
537 vec_reset_length (vlib_worker_threads);
539 /* Set up the main thread */
540 vec_add2_aligned (vlib_worker_threads, w, 1, CLIB_CACHE_LINE_BYTES);
541 w->elog_track.name = "main thread";
542 elog_track_register (vlib_get_elog_main (), &w->elog_track);
544 if (vec_len (tm->thread_prefix))
546 w->name = format (0, "%v_main%c", tm->thread_prefix, '\0');
547 vlib_set_thread_name ((char *) w->name);
550 vgm->elog_main.lock =
551 clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
552 vgm->elog_main.lock[0] = 0;
554 clib_callback_data_init (&vm->vlib_node_runtime_perf_callbacks,
555 &vm->worker_thread_main_loop_callback_lock);
557 vec_validate_aligned (vgm->vlib_mains, n_vlib_mains - 1,
558 CLIB_CACHE_LINE_BYTES);
559 _vec_len (vgm->vlib_mains) = 0;
560 vec_add1_aligned (vgm->vlib_mains, vm, CLIB_CACHE_LINE_BYTES);
562 if (n_vlib_mains > 1)
564 vlib_worker_threads->wait_at_barrier =
565 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
566 vlib_worker_threads->workers_at_barrier =
567 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
569 vlib_worker_threads->node_reforks_required =
570 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
572 /* We'll need the rpc vector lock... */
573 clib_spinlock_init (&vm->pending_rpc_lock);
575 /* Ask for an initial barrier sync */
576 *vlib_worker_threads->workers_at_barrier = 0;
577 *vlib_worker_threads->wait_at_barrier = 1;
579 /* Without update or refork */
580 *vlib_worker_threads->node_reforks_required = 0;
581 vgm->need_vlib_worker_thread_node_runtime_update = 0;
584 vm->barrier_epoch = 0;
585 vm->barrier_no_close_before = 0;
587 worker_thread_index = 1;
588 clib_spinlock_init (&vm->worker_thread_main_loop_callback_lock);
590 for (i = 0; i < vec_len (tm->registrations); i++)
592 vlib_node_main_t *nm, *nm_clone;
595 tr = tm->registrations[i];
600 for (k = 0; k < tr->count; k++)
604 vec_add2 (vlib_worker_threads, w, 1);
605 /* Currently unused, may not really work */
607 w->thread_mheap = clib_mem_create_heap (0, tr->mheap_size,
612 w->thread_mheap = main_heap;
615 vlib_thread_stack_init (w - vlib_worker_threads);
616 w->thread_function = tr->function;
617 w->thread_function_arg = w;
619 w->registration = tr;
622 (char *) format (0, "%s %d", tr->name, k + 1);
623 vec_add1 (w->elog_track.name, 0);
624 elog_track_register (vlib_get_elog_main (), &w->elog_track);
626 if (tr->no_data_structure_clone)
629 /* Fork vlib_global_main et al. Look for bugs here */
630 oldheap = clib_mem_set_heap (w->thread_mheap);
632 vm_clone = clib_mem_alloc_aligned (sizeof (*vm_clone),
633 CLIB_CACHE_LINE_BYTES);
634 clib_memcpy (vm_clone, vlib_get_first_main (),
637 vm_clone->thread_index = worker_thread_index;
638 vm_clone->heap_base = w->thread_mheap;
639 vm_clone->heap_aligned_base =
640 (void *) (((uword) w->thread_mheap) &
641 ~(CLIB_CACHE_LINE_BYTES - 1));
642 vm_clone->pending_rpc_requests = 0;
643 vec_validate (vm_clone->pending_rpc_requests, 0);
644 _vec_len (vm_clone->pending_rpc_requests) = 0;
645 clib_memset (&vm_clone->random_buffer, 0,
646 sizeof (vm_clone->random_buffer));
648 (&vm_clone->worker_thread_main_loop_callback_lock);
649 clib_callback_data_init
650 (&vm_clone->vlib_node_runtime_perf_callbacks,
651 &vm_clone->worker_thread_main_loop_callback_lock);
653 nm = &vlib_get_first_main ()->node_main;
654 nm_clone = &vm_clone->node_main;
655 /* fork next frames array, preserving node runtime indices */
656 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
657 CLIB_CACHE_LINE_BYTES);
658 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
660 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
661 u32 save_node_runtime_index;
664 save_node_runtime_index = nf->node_runtime_index;
665 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
666 vlib_next_frame_init (nf);
667 nf->node_runtime_index = save_node_runtime_index;
668 nf->flags = save_flags;
671 /* fork the frame dispatch queue */
672 nm_clone->pending_frames = 0;
673 vec_validate (nm_clone->pending_frames, 10);
674 _vec_len (nm_clone->pending_frames) = 0;
679 /* Allocate all nodes in single block for speed */
680 n = clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*n));
682 for (j = 0; j < vec_len (nm->nodes); j++)
684 clib_memcpy (n, nm->nodes[j], sizeof (*n));
685 /* none of the copied nodes have enqueue rights given out */
686 n->owner_node_index = VLIB_INVALID_NODE_INDEX;
687 clib_memset (&n->stats_total, 0, sizeof (n->stats_total));
688 clib_memset (&n->stats_last_clear, 0,
689 sizeof (n->stats_last_clear));
690 vec_add1 (nm_clone->nodes, n);
693 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
694 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
695 CLIB_CACHE_LINE_BYTES);
697 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
699 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
700 /* copy initial runtime_data from node */
701 if (n->runtime_data && n->runtime_data_bytes > 0)
702 clib_memcpy (rt->runtime_data, n->runtime_data,
703 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
704 n->runtime_data_bytes));
707 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
708 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
709 CLIB_CACHE_LINE_BYTES);
710 clib_interrupt_init (
711 &nm_clone->interrupts,
712 vec_len (nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]));
713 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
715 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
716 /* copy initial runtime_data from node */
717 if (n->runtime_data && n->runtime_data_bytes > 0)
718 clib_memcpy (rt->runtime_data, n->runtime_data,
719 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
720 n->runtime_data_bytes));
723 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT] =
724 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT],
725 CLIB_CACHE_LINE_BYTES);
727 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
729 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
730 /* copy initial runtime_data from node */
731 if (n->runtime_data && n->runtime_data_bytes > 0)
732 clib_memcpy (rt->runtime_data, n->runtime_data,
733 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
734 n->runtime_data_bytes));
737 nm_clone->processes = vec_dup_aligned (nm->processes,
738 CLIB_CACHE_LINE_BYTES);
740 /* Create per-thread frame freelist */
741 nm_clone->frame_sizes = 0;
742 nm_clone->node_by_error = nm->node_by_error;
744 /* Packet trace buffers are guaranteed to be empty, nothing to do here */
746 clib_mem_set_heap (oldheap);
747 vec_add1_aligned (vgm->vlib_mains, vm_clone,
748 CLIB_CACHE_LINE_BYTES);
750 /* Switch to the stats segment ... */
751 void *oldheap = vlib_stats_set_heap ();
752 vm_clone->error_main.counters =
753 vec_dup_aligned (vlib_get_first_main ()->error_main.counters,
754 CLIB_CACHE_LINE_BYTES);
755 clib_mem_set_heap (oldheap);
756 vlib_stats_update_error_vector (vm_clone->error_main.counters,
757 worker_thread_index, 1);
759 vm_clone->error_main.counters_last_clear = vec_dup_aligned (
760 vlib_get_first_main ()->error_main.counters_last_clear,
761 CLIB_CACHE_LINE_BYTES);
763 worker_thread_index++;
769 /* only have non-data-structure copy threads to create... */
770 for (i = 0; i < vec_len (tm->registrations); i++)
772 tr = tm->registrations[i];
774 for (j = 0; j < tr->count; j++)
776 vec_add2 (vlib_worker_threads, w, 1);
779 w->thread_mheap = clib_mem_create_heap (0, tr->mheap_size,
785 w->thread_mheap = main_heap;
787 vlib_thread_stack_init (w - vlib_worker_threads);
788 w->thread_function = tr->function;
789 w->thread_function_arg = w;
792 (char *) format (0, "%s %d", tr->name, j + 1);
793 w->registration = tr;
794 vec_add1 (w->elog_track.name, 0);
795 elog_track_register (vlib_get_elog_main (), &w->elog_track);
800 worker_thread_index = 1;
802 for (i = 0; i < vec_len (tm->registrations); i++)
807 tr = tm->registrations[i];
809 if (tr->use_pthreads || tm->use_pthreads)
811 for (j = 0; j < tr->count; j++)
813 w = vlib_worker_threads + worker_thread_index++;
814 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
817 clib_error_report (err);
824 clib_bitmap_foreach (c, tr->coremask) {
825 w = vlib_worker_threads + worker_thread_index++;
826 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
829 clib_error_report (err);
834 vlib_worker_thread_barrier_sync (vm);
835 vlib_worker_thread_barrier_release (vm);
839 VLIB_MAIN_LOOP_ENTER_FUNCTION (start_workers);
843 worker_thread_node_runtime_update_internal (void)
847 vlib_node_main_t *nm, *nm_clone;
848 vlib_main_t *vm_clone;
849 vlib_node_runtime_t *rt;
851 ASSERT (vlib_get_thread_index () == 0);
853 vm = vlib_get_first_main ();
856 ASSERT (*vlib_worker_threads->wait_at_barrier == 1);
859 * Scrape all runtime stats, so we don't lose node runtime(s) with
860 * pending counts, or throw away worker / io thread counts.
862 for (j = 0; j < vec_len (nm->nodes); j++)
866 vlib_node_sync_stats (vm, n);
869 for (i = 1; i < vlib_get_n_threads (); i++)
873 vm_clone = vlib_get_main_by_index (i);
874 nm_clone = &vm_clone->node_main;
876 for (j = 0; j < vec_len (nm_clone->nodes); j++)
878 n = nm_clone->nodes[j];
880 rt = vlib_node_get_runtime (vm_clone, n->index);
881 vlib_node_runtime_sync_stats (vm_clone, rt, 0, 0, 0);
885 /* Per-worker clone rebuilds are now done on each thread */
890 vlib_worker_thread_node_refork (void)
892 vlib_main_t *vm, *vm_clone;
893 vlib_node_main_t *nm, *nm_clone;
894 vlib_node_t **old_nodes_clone;
895 vlib_node_runtime_t *rt, *old_rt;
897 vlib_node_t *new_n_clone;
901 vm = vlib_get_first_main ();
903 vm_clone = vlib_get_main ();
904 nm_clone = &vm_clone->node_main;
906 /* Re-clone error heap */
907 u64 *old_counters = vm_clone->error_main.counters;
908 u64 *old_counters_all_clear = vm_clone->error_main.counters_last_clear;
910 clib_memcpy_fast (&vm_clone->error_main, &vm->error_main,
911 sizeof (vm->error_main));
912 j = vec_len (vm->error_main.counters) - 1;
914 /* Switch to the stats segment ... */
915 void *oldheap = vlib_stats_set_heap ();
916 vec_validate_aligned (old_counters, j, CLIB_CACHE_LINE_BYTES);
917 clib_mem_set_heap (oldheap);
918 vm_clone->error_main.counters = old_counters;
919 vlib_stats_update_error_vector (vm_clone->error_main.counters,
920 vm_clone->thread_index, 0);
922 vec_validate_aligned (old_counters_all_clear, j, CLIB_CACHE_LINE_BYTES);
923 vm_clone->error_main.counters_last_clear = old_counters_all_clear;
925 nm_clone = &vm_clone->node_main;
926 vec_free (nm_clone->next_frames);
927 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
928 CLIB_CACHE_LINE_BYTES);
930 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
932 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
933 u32 save_node_runtime_index;
936 save_node_runtime_index = nf->node_runtime_index;
937 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
938 vlib_next_frame_init (nf);
939 nf->node_runtime_index = save_node_runtime_index;
940 nf->flags = save_flags;
943 old_nodes_clone = nm_clone->nodes;
948 /* Allocate all nodes in single block for speed */
950 clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*new_n_clone));
951 for (j = 0; j < vec_len (nm->nodes); j++)
953 vlib_node_t *new_n = nm->nodes[j];
955 clib_memcpy_fast (new_n_clone, new_n, sizeof (*new_n));
956 /* none of the copied nodes have enqueue rights given out */
957 new_n_clone->owner_node_index = VLIB_INVALID_NODE_INDEX;
959 if (j >= vec_len (old_nodes_clone))
961 /* new node, set to zero */
962 clib_memset (&new_n_clone->stats_total, 0,
963 sizeof (new_n_clone->stats_total));
964 clib_memset (&new_n_clone->stats_last_clear, 0,
965 sizeof (new_n_clone->stats_last_clear));
969 vlib_node_t *old_n_clone = old_nodes_clone[j];
970 /* Copy stats if the old data is valid */
971 clib_memcpy_fast (&new_n_clone->stats_total,
972 &old_n_clone->stats_total,
973 sizeof (new_n_clone->stats_total));
974 clib_memcpy_fast (&new_n_clone->stats_last_clear,
975 &old_n_clone->stats_last_clear,
976 sizeof (new_n_clone->stats_last_clear));
978 /* keep previous node state */
979 new_n_clone->state = old_n_clone->state;
980 new_n_clone->flags = old_n_clone->flags;
982 vec_add1 (nm_clone->nodes, new_n_clone);
985 /* Free the old node clones */
986 clib_mem_free (old_nodes_clone[0]);
988 vec_free (old_nodes_clone);
991 /* re-clone internal nodes */
992 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL];
993 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
994 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
995 CLIB_CACHE_LINE_BYTES);
997 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
999 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1000 /* copy runtime_data, will be overwritten later for existing rt */
1001 if (n->runtime_data && n->runtime_data_bytes > 0)
1002 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1003 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1004 n->runtime_data_bytes));
1007 for (j = 0; j < vec_len (old_rt); j++)
1009 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1010 rt->state = old_rt[j].state;
1011 rt->flags = old_rt[j].flags;
1012 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1013 VLIB_NODE_RUNTIME_DATA_SIZE);
1018 /* re-clone input nodes */
1019 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT];
1020 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
1021 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
1022 CLIB_CACHE_LINE_BYTES);
1023 clib_interrupt_resize (
1024 &nm_clone->interrupts,
1025 vec_len (nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]));
1027 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
1029 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1030 /* copy runtime_data, will be overwritten later for existing rt */
1031 if (n->runtime_data && n->runtime_data_bytes > 0)
1032 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1033 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1034 n->runtime_data_bytes));
1037 for (j = 0; j < vec_len (old_rt); j++)
1039 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1040 rt->state = old_rt[j].state;
1041 rt->flags = old_rt[j].flags;
1042 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1043 VLIB_NODE_RUNTIME_DATA_SIZE);
1048 /* re-clone pre-input nodes */
1049 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT];
1050 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT] =
1051 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT],
1052 CLIB_CACHE_LINE_BYTES);
1054 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
1056 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1057 /* copy runtime_data, will be overwritten later for existing rt */
1058 if (n->runtime_data && n->runtime_data_bytes > 0)
1059 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1060 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1061 n->runtime_data_bytes));
1064 for (j = 0; j < vec_len (old_rt); j++)
1066 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1067 rt->state = old_rt[j].state;
1068 rt->flags = old_rt[j].flags;
1069 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1070 VLIB_NODE_RUNTIME_DATA_SIZE);
1075 vec_free (nm_clone->processes);
1076 nm_clone->processes = vec_dup_aligned (nm->processes,
1077 CLIB_CACHE_LINE_BYTES);
1078 nm_clone->node_by_error = nm->node_by_error;
1082 vlib_worker_thread_node_runtime_update (void)
1085 * Make a note that we need to do a node runtime update
1086 * prior to releasing the barrier.
1088 vlib_global_main.need_vlib_worker_thread_node_runtime_update = 1;
1092 unformat_sched_policy (unformat_input_t * input, va_list * args)
1094 u32 *r = va_arg (*args, u32 *);
1097 #define _(v,f,s) else if (unformat (input, s)) *r = SCHED_POLICY_##f;
1098 foreach_sched_policy
1105 static clib_error_t *
1106 cpu_config (vlib_main_t * vm, unformat_input_t * input)
1108 vlib_thread_registration_t *tr;
1110 vlib_thread_main_t *tm = &vlib_thread_main;
1115 tm->thread_registrations_by_name = hash_create_string (0, sizeof (uword));
1117 tm->n_thread_stacks = 1; /* account for main thread */
1118 tm->sched_policy = ~0;
1119 tm->sched_priority = ~0;
1120 tm->main_lcore = ~0;
1126 hash_set_mem (tm->thread_registrations_by_name, tr->name, (uword) tr);
1130 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1132 if (unformat (input, "use-pthreads"))
1133 tm->use_pthreads = 1;
1134 else if (unformat (input, "thread-prefix %v", &tm->thread_prefix))
1136 else if (unformat (input, "main-core %u", &tm->main_lcore))
1138 else if (unformat (input, "skip-cores %u", &tm->skip_cores))
1140 else if (unformat (input, "numa-heap-size %U",
1141 unformat_memory_size, &tm->numa_heap_size))
1143 else if (unformat (input, "coremask-%s %U", &name,
1144 unformat_bitmap_mask, &bitmap) ||
1145 unformat (input, "corelist-%s %U", &name,
1146 unformat_bitmap_list, &bitmap))
1148 p = hash_get_mem (tm->thread_registrations_by_name, name);
1150 return clib_error_return (0, "no such thread type '%s'", name);
1152 tr = (vlib_thread_registration_t *) p[0];
1154 if (tr->use_pthreads)
1155 return clib_error_return (0,
1156 "corelist cannot be set for '%s' threads",
1159 return clib_error_return
1160 (0, "core placement of '%s' threads is already configured",
1163 tr->coremask = bitmap;
1164 tr->count = clib_bitmap_count_set_bits (tr->coremask);
1168 (input, "scheduler-policy %U", unformat_sched_policy,
1171 else if (unformat (input, "scheduler-priority %u", &tm->sched_priority))
1173 else if (unformat (input, "%s %u", &name, &count))
1175 p = hash_get_mem (tm->thread_registrations_by_name, name);
1177 return clib_error_return (0, "no such thread type 3 '%s'", name);
1179 tr = (vlib_thread_registration_t *) p[0];
1181 if (tr->fixed_count)
1182 return clib_error_return
1183 (0, "number of '%s' threads not configurable", name);
1185 return clib_error_return
1186 (0, "number of '%s' threads is already configured", name);
1194 if (tm->sched_priority != ~0)
1196 if (tm->sched_policy == SCHED_FIFO || tm->sched_policy == SCHED_RR)
1198 u32 prio_max = sched_get_priority_max (tm->sched_policy);
1199 u32 prio_min = sched_get_priority_min (tm->sched_policy);
1200 if (tm->sched_priority > prio_max)
1201 tm->sched_priority = prio_max;
1202 if (tm->sched_priority < prio_min)
1203 tm->sched_priority = prio_min;
1207 return clib_error_return
1209 "scheduling priority (%d) is not allowed for `normal` scheduling policy",
1210 tm->sched_priority);
1215 if (!tm->thread_prefix)
1216 tm->thread_prefix = format (0, "vpp");
1220 tm->n_thread_stacks += tr->count;
1221 tm->n_pthreads += tr->count * tr->use_pthreads;
1222 tm->n_threads += tr->count * (tr->use_pthreads == 0);
1229 VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu");
1232 * Enforce minimum open time to minimize packet loss due to Rx overflow,
1233 * based on a test based heuristic that barrier should be open for at least
1234 * 3 time as long as it is closed (with an upper bound of 1ms because by that
1235 * point it is probably too late to make a difference)
1238 #ifndef BARRIER_MINIMUM_OPEN_LIMIT
1239 #define BARRIER_MINIMUM_OPEN_LIMIT 0.001
1242 #ifndef BARRIER_MINIMUM_OPEN_FACTOR
1243 #define BARRIER_MINIMUM_OPEN_FACTOR 3
1247 vlib_worker_thread_initial_barrier_sync_and_release (vlib_main_t * vm)
1250 f64 now = vlib_time_now (vm);
1251 u32 count = vlib_get_n_threads () - 1;
1253 /* No worker threads? */
1257 deadline = now + BARRIER_SYNC_TIMEOUT;
1258 *vlib_worker_threads->wait_at_barrier = 1;
1259 while (*vlib_worker_threads->workers_at_barrier != count)
1261 if ((now = vlib_time_now (vm)) > deadline)
1263 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1268 *vlib_worker_threads->wait_at_barrier = 0;
1272 * Return true if the wroker thread barrier is held
1275 vlib_worker_thread_barrier_held (void)
1277 if (vlib_get_n_threads () < 2)
1280 return (*vlib_worker_threads->wait_at_barrier == 1);
1284 vlib_worker_thread_barrier_sync_int (vlib_main_t * vm, const char *func_name)
1291 f64 max_vector_rate;
1295 if (vlib_get_n_threads () < 2)
1298 ASSERT (vlib_get_thread_index () == 0);
1300 vlib_worker_threads[0].barrier_caller = func_name;
1301 count = vlib_get_n_threads () - 1;
1303 /* Record entry relative to last close */
1304 now = vlib_time_now (vm);
1305 t_entry = now - vm->barrier_epoch;
1307 /* Tolerate recursive calls */
1308 if (++vlib_worker_threads[0].recursion_level > 1)
1310 barrier_trace_sync_rec (t_entry);
1314 if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0))
1315 clib_call_callbacks (vm->barrier_perf_callbacks, vm,
1316 vm->clib_time.last_cpu_time, 0 /* enter */ );
1319 * Need data to decide if we're working hard enough to honor
1320 * the barrier hold-down timer.
1322 max_vector_rate = 0.0;
1323 for (i = 1; i < vlib_get_n_threads (); i++)
1325 vlib_main_t *ovm = vlib_get_main_by_index (i);
1326 max_vector_rate = clib_max (max_vector_rate,
1327 (f64) vlib_last_vectors_per_main_loop (ovm));
1330 vlib_worker_threads[0].barrier_sync_count++;
1332 /* Enforce minimum barrier open time to minimize packet loss */
1333 ASSERT (vm->barrier_no_close_before <= (now + BARRIER_MINIMUM_OPEN_LIMIT));
1336 * If any worker thread seems busy, which we define
1337 * as a vector rate above 10, we enforce the barrier hold-down timer
1339 if (max_vector_rate > 10.0)
1343 now = vlib_time_now (vm);
1344 /* Barrier hold-down timer expired? */
1345 if (now >= vm->barrier_no_close_before)
1347 if ((vm->barrier_no_close_before - now)
1348 > (2.0 * BARRIER_MINIMUM_OPEN_LIMIT))
1351 ("clock change: would have waited for %.4f seconds",
1352 (vm->barrier_no_close_before - now));
1357 /* Record time of closure */
1358 t_open = now - vm->barrier_epoch;
1359 vm->barrier_epoch = now;
1361 deadline = now + BARRIER_SYNC_TIMEOUT;
1363 *vlib_worker_threads->wait_at_barrier = 1;
1364 while (*vlib_worker_threads->workers_at_barrier != count)
1366 if ((now = vlib_time_now (vm)) > deadline)
1368 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1373 t_closed = now - vm->barrier_epoch;
1375 barrier_trace_sync (t_entry, t_open, t_closed);
1380 vlib_worker_thread_barrier_release (vlib_main_t * vm)
1382 vlib_global_main_t *vgm = vlib_get_global_main ();
1388 f64 t_update_main = 0.0;
1389 int refork_needed = 0;
1391 if (vlib_get_n_threads () < 2)
1394 ASSERT (vlib_get_thread_index () == 0);
1397 now = vlib_time_now (vm);
1398 t_entry = now - vm->barrier_epoch;
1400 if (--vlib_worker_threads[0].recursion_level > 0)
1402 barrier_trace_release_rec (t_entry);
1406 /* Update (all) node runtimes before releasing the barrier, if needed */
1407 if (vgm->need_vlib_worker_thread_node_runtime_update)
1410 * Lock stat segment here, so we's safe when
1411 * rebuilding the stat segment node clones from the
1414 vlib_stats_segment_lock ();
1416 /* Do stats elements on main thread */
1417 worker_thread_node_runtime_update_internal ();
1418 vgm->need_vlib_worker_thread_node_runtime_update = 0;
1420 /* Do per thread rebuilds in parallel */
1422 clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
1423 (vlib_get_n_threads () - 1));
1424 now = vlib_time_now (vm);
1425 t_update_main = now - vm->barrier_epoch;
1428 deadline = now + BARRIER_SYNC_TIMEOUT;
1431 * Note when we let go of the barrier.
1432 * Workers can use this to derive a reasonably accurate
1433 * time offset. See vlib_time_now(...)
1435 vm->time_last_barrier_release = vlib_time_now (vm);
1436 CLIB_MEMORY_STORE_BARRIER ();
1438 *vlib_worker_threads->wait_at_barrier = 0;
1440 while (*vlib_worker_threads->workers_at_barrier > 0)
1442 if ((now = vlib_time_now (vm)) > deadline)
1444 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1449 /* Wait for reforks before continuing */
1452 now = vlib_time_now (vm);
1454 deadline = now + BARRIER_SYNC_TIMEOUT;
1456 while (*vlib_worker_threads->node_reforks_required > 0)
1458 if ((now = vlib_time_now (vm)) > deadline)
1460 fformat (stderr, "%s: worker thread refork deadlock\n",
1465 vlib_stats_segment_unlock ();
1468 t_closed_total = now - vm->barrier_epoch;
1470 minimum_open = t_closed_total * BARRIER_MINIMUM_OPEN_FACTOR;
1472 if (minimum_open > BARRIER_MINIMUM_OPEN_LIMIT)
1474 minimum_open = BARRIER_MINIMUM_OPEN_LIMIT;
1477 vm->barrier_no_close_before = now + minimum_open;
1479 /* Record barrier epoch (used to enforce minimum open time) */
1480 vm->barrier_epoch = now;
1482 barrier_trace_release (t_entry, t_closed_total, t_update_main);
1484 if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0))
1485 clib_call_callbacks (vm->barrier_perf_callbacks, vm,
1486 vm->clib_time.last_cpu_time, 1 /* leave */ );
1490 * Wait until each of the workers has been once around the track
1493 vlib_worker_wait_one_loop (void)
1495 vlib_global_main_t *vgm = vlib_get_global_main ();
1496 ASSERT (vlib_get_thread_index () == 0);
1498 if (vlib_get_n_threads () < 2)
1501 if (vlib_worker_thread_barrier_held ())
1507 vec_validate (counts, vlib_get_n_threads () - 1);
1509 /* record the current loop counts */
1510 vec_foreach_index (ii, vgm->vlib_mains)
1511 counts[ii] = vgm->vlib_mains[ii]->main_loop_count;
1513 /* spin until each changes, apart from the main thread, or we'd be
1515 for (ii = 1; ii < vec_len (counts); ii++)
1517 while (counts[ii] == vgm->vlib_mains[ii]->main_loop_count)
1526 vlib_worker_thread_fn (void *arg)
1528 vlib_global_main_t *vgm = vlib_get_global_main ();
1529 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
1530 vlib_main_t *vm = vlib_get_main ();
1533 ASSERT (vm->thread_index == vlib_get_thread_index ());
1535 vlib_worker_thread_init (w);
1536 clib_time_init (&vm->clib_time);
1537 clib_mem_set_heap (w->thread_mheap);
1539 vm->worker_init_functions_called = hash_create (0, 0);
1541 e = vlib_call_init_exit_functions_no_sort (
1542 vm, &vgm->worker_init_function_registrations, 1 /* call_once */,
1545 clib_error_report (e);
1547 vlib_worker_loop (vm);
1551 VLIB_REGISTER_THREAD (worker_thread_reg, static) = {
1554 .function = vlib_worker_thread_fn,
1559 vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts)
1561 vlib_thread_main_t *tm = vlib_get_thread_main ();
1562 vlib_frame_queue_main_t *fqm;
1563 vlib_frame_queue_t *fq;
1567 if (frame_queue_nelts == 0)
1568 frame_queue_nelts = FRAME_QUEUE_MAX_NELTS;
1570 num_threads = 1 /* main thread */ + tm->n_threads;
1571 ASSERT (frame_queue_nelts >= 8 + num_threads);
1573 vec_add2 (tm->frame_queue_mains, fqm, 1);
1575 fqm->node_index = node_index;
1576 fqm->frame_queue_nelts = frame_queue_nelts;
1578 vec_validate (fqm->vlib_frame_queues, tm->n_vlib_mains - 1);
1579 _vec_len (fqm->vlib_frame_queues) = 0;
1580 for (i = 0; i < tm->n_vlib_mains; i++)
1582 fq = vlib_frame_queue_alloc (frame_queue_nelts);
1583 vec_add1 (fqm->vlib_frame_queues, fq);
1586 return (fqm - tm->frame_queue_mains);
1590 vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
1593 ASSERT (vlib_get_thread_index () == 0);
1594 vlib_process_signal_event (vlib_get_main (), args->node_index,
1595 args->type_opaque, args->data);
1598 void *rpc_call_main_thread_cb_fn;
1601 vlib_rpc_call_main_thread (void *callback, u8 * args, u32 arg_size)
1603 if (rpc_call_main_thread_cb_fn)
1605 void (*fp) (void *, u8 *, u32) = rpc_call_main_thread_cb_fn;
1606 (*fp) (callback, args, arg_size);
1609 clib_warning ("BUG: rpc_call_main_thread_cb_fn NULL!");
1613 threads_init (vlib_main_t * vm)
1618 VLIB_INIT_FUNCTION (threads_init);
1621 static clib_error_t *
1622 show_clock_command_fn (vlib_main_t * vm,
1623 unformat_input_t * input, vlib_cli_command_t * cmd)
1626 clib_timebase_t _tb, *tb = &_tb;
1628 (void) unformat (input, "verbose %=", &verbose, 1);
1630 clib_timebase_init (tb, 0 /* GMT */ , CLIB_TIMEBASE_DAYLIGHT_NONE,
1633 vlib_cli_output (vm, "%U, %U GMT", format_clib_time, &vm->clib_time,
1634 verbose, format_clib_timebase_time,
1635 clib_timebase_now (tb));
1637 vlib_cli_output (vm, "Time last barrier release %.9f",
1638 vm->time_last_barrier_release);
1640 foreach_vlib_main ()
1642 vlib_cli_output (vm, "%d: %U", this_vlib_main->thread_index,
1643 format_clib_time, &this_vlib_main->clib_time, verbose);
1645 vlib_cli_output (vm, "Thread %d offset %.9f error %.9f",
1646 this_vlib_main->thread_index,
1647 this_vlib_main->time_offset,
1648 vm->time_last_barrier_release -
1649 this_vlib_main->time_last_barrier_release);
1655 VLIB_CLI_COMMAND (f_command, static) =
1657 .path = "show clock",
1658 .short_help = "show clock",
1659 .function = show_clock_command_fn,
1663 vlib_thread_main_t *
1664 vlib_get_thread_main_not_inline (void)
1666 return vlib_get_thread_main ();
1670 * fd.io coding-style-patch-verification: ON
1673 * eval: (c-set-style "gnu")