2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
19 #include <vppinfra/format.h>
20 #include <vppinfra/time_range.h>
21 #include <vppinfra/interrupt.h>
22 #include <vppinfra/linux/sysfs.h>
23 #include <vlib/vlib.h>
25 #include <vlib/threads.h>
27 #include <vlib/stat_weak_inlines.h>
35 vlib_worker_thread_t *vlib_worker_threads;
36 vlib_thread_main_t vlib_thread_main;
39 * Barrier tracing can be enabled on a normal build to collect information
40 * on barrier use, including timings and call stacks. Deliberately not
41 * keyed off CLIB_DEBUG, because that can add significant overhead which
42 * imapacts observed timings.
46 barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed)
48 if (!vlib_worker_threads->barrier_elog_enabled)
51 ELOG_TYPE_DECLARE (e) = {
52 .format = "bar-trace-%s-#%d",
53 .format_args = "T4i4",
58 u32 caller, count, t_entry, t_open, t_closed;
61 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
62 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
63 ed->caller = elog_string (&vlib_global_main.elog_main,
64 (char *) vlib_worker_threads[0].barrier_caller);
65 ed->t_entry = (int) (1000000.0 * t_entry);
66 ed->t_open = (int) (1000000.0 * t_open);
67 ed->t_closed = (int) (1000000.0 * t_closed);
71 barrier_trace_sync_rec (f64 t_entry)
73 if (!vlib_worker_threads->barrier_elog_enabled)
76 ELOG_TYPE_DECLARE (e) = {
77 .format = "bar-syncrec-%s-#%d",
78 .format_args = "T4i4",
86 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
87 ed->depth = (int) vlib_worker_threads[0].recursion_level - 1;
88 ed->caller = elog_string (&vlib_global_main.elog_main,
89 (char *) vlib_worker_threads[0].barrier_caller);
93 barrier_trace_release_rec (f64 t_entry)
95 if (!vlib_worker_threads->barrier_elog_enabled)
98 ELOG_TYPE_DECLARE (e) = {
99 .format = "bar-relrrec-#%d",
108 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
109 ed->depth = (int) vlib_worker_threads[0].recursion_level;
113 barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main)
115 if (!vlib_worker_threads->barrier_elog_enabled)
118 ELOG_TYPE_DECLARE (e) = {
119 .format = "bar-rel-#%d-e%d-u%d-t%d",
120 .format_args = "i4i4i4i4",
125 u32 count, t_entry, t_update_main, t_closed_total;
128 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
129 ed->t_entry = (int) (1000000.0 * t_entry);
130 ed->t_update_main = (int) (1000000.0 * t_update_main);
131 ed->t_closed_total = (int) (1000000.0 * t_closed_total);
132 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
134 /* Reset context for next trace */
135 vlib_worker_threads[0].barrier_context = NULL;
139 os_get_nthreads (void)
141 return vec_len (vlib_thread_stacks);
145 vlib_set_thread_name (char *name)
147 int pthread_setname_np (pthread_t __target_thread, const char *__name);
149 pthread_t thread = pthread_self ();
153 rv = pthread_setname_np (thread, name);
155 clib_warning ("pthread_setname_np returned %d", rv);
160 sort_registrations_by_no_clone (void *a0, void *a1)
162 vlib_thread_registration_t **tr0 = a0;
163 vlib_thread_registration_t **tr1 = a1;
165 return ((i32) ((*tr0)->no_data_structure_clone)
166 - ((i32) ((*tr1)->no_data_structure_clone)));
170 /* Called early in the init sequence */
173 vlib_thread_init (vlib_main_t * vm)
175 vlib_thread_main_t *tm = &vlib_thread_main;
176 vlib_worker_thread_t *w;
177 vlib_thread_registration_t *tr;
178 u32 n_vlib_mains = 1;
183 /* get bitmaps of active cpu cores and sockets */
184 tm->cpu_core_bitmap =
185 clib_sysfs_list_to_bitmap ("/sys/devices/system/cpu/online");
186 tm->cpu_socket_bitmap =
187 clib_sysfs_list_to_bitmap ("/sys/devices/system/node/online");
189 avail_cpu = clib_bitmap_dup (tm->cpu_core_bitmap);
192 for (i = 0; i < tm->skip_cores; i++)
194 uword c = clib_bitmap_first_set (avail_cpu);
196 return clib_error_return (0, "no available cpus to skip");
198 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
201 /* grab cpu for main thread */
202 if (tm->main_lcore == ~0)
204 /* if main-lcore is not set, we try to use lcore 1 */
205 if (clib_bitmap_get (avail_cpu, 1))
208 tm->main_lcore = clib_bitmap_first_set (avail_cpu);
209 if (tm->main_lcore == (u8) ~ 0)
210 return clib_error_return (0, "no available cpus to be used for the"
215 if (clib_bitmap_get (avail_cpu, tm->main_lcore) == 0)
216 return clib_error_return (0, "cpu %u is not available to be used"
217 " for the main thread", tm->main_lcore);
219 avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0);
221 /* assume that there is socket 0 only if there is no data from sysfs */
222 if (!tm->cpu_socket_bitmap)
223 tm->cpu_socket_bitmap = clib_bitmap_set (0, 0, 1);
225 /* pin main thread to main_lcore */
226 if (tm->cb.vlib_thread_set_lcore_cb)
228 tm->cb.vlib_thread_set_lcore_cb (0, tm->main_lcore);
234 CPU_SET (tm->main_lcore, &cpuset);
235 pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset);
238 /* Set up thread 0 */
239 vec_validate_aligned (vlib_worker_threads, 0, CLIB_CACHE_LINE_BYTES);
240 _vec_len (vlib_worker_threads) = 1;
241 w = vlib_worker_threads;
242 w->thread_mheap = clib_mem_get_heap ();
243 w->thread_stack = vlib_thread_stacks[0];
244 w->cpu_id = tm->main_lcore;
245 w->lwp = syscall (SYS_gettid);
246 w->thread_id = pthread_self ();
247 tm->n_vlib_mains = 1;
249 vlib_get_thread_core_numa (w, w->cpu_id);
251 if (tm->sched_policy != ~0)
253 struct sched_param sched_param;
254 if (!sched_getparam (w->lwp, &sched_param))
256 if (tm->sched_priority != ~0)
257 sched_param.sched_priority = tm->sched_priority;
258 sched_setscheduler (w->lwp, tm->sched_policy, &sched_param);
262 /* assign threads to cores and set n_vlib_mains */
267 vec_add1 (tm->registrations, tr);
271 vec_sort_with_function (tm->registrations, sort_registrations_by_no_clone);
273 for (i = 0; i < vec_len (tm->registrations); i++)
276 tr = tm->registrations[i];
277 tr->first_index = first_index;
278 first_index += tr->count;
279 n_vlib_mains += (tr->no_data_structure_clone == 0) ? tr->count : 0;
281 /* construct coremask */
282 if (tr->use_pthreads || !tr->count)
289 clib_bitmap_foreach (c, tr->coremask) {
290 if (clib_bitmap_get(avail_cpu, c) == 0)
291 return clib_error_return (0, "cpu %u is not available to be used"
292 " for the '%s' thread",c, tr->name);
294 avail_cpu = clib_bitmap_set(avail_cpu, c, 0);
300 for (j = 0; j < tr->count; j++)
302 /* Do not use CPU 0 by default - leave it to the host and IRQs */
303 uword avail_c0 = clib_bitmap_get (avail_cpu, 0);
304 avail_cpu = clib_bitmap_set (avail_cpu, 0, 0);
306 uword c = clib_bitmap_first_set (avail_cpu);
307 /* Use CPU 0 as a last resort */
308 if (c == ~0 && avail_c0)
315 return clib_error_return (0,
316 "no available cpus to be used for"
317 " the '%s' thread", tr->name);
319 avail_cpu = clib_bitmap_set (avail_cpu, 0, avail_c0);
320 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
321 tr->coremask = clib_bitmap_set (tr->coremask, c, 1);
326 clib_bitmap_free (avail_cpu);
328 tm->n_vlib_mains = n_vlib_mains;
331 * Allocate the remaining worker threads, and thread stack vector slots
332 * from now on, calls to os_get_nthreads() will return the correct
335 vec_validate_aligned (vlib_worker_threads, first_index - 1,
336 CLIB_CACHE_LINE_BYTES);
337 vec_validate (vlib_thread_stacks, vec_len (vlib_worker_threads) - 1);
342 vlib_frame_queue_alloc (int nelts)
344 vlib_frame_queue_t *fq;
346 fq = clib_mem_alloc_aligned (sizeof (*fq), CLIB_CACHE_LINE_BYTES);
347 clib_memset (fq, 0, sizeof (*fq));
349 fq->vector_threshold = 128; // packets
350 vec_validate_aligned (fq->elts, nelts - 1, CLIB_CACHE_LINE_BYTES);
354 if (((uword) & fq->tail) & (CLIB_CACHE_LINE_BYTES - 1))
355 fformat (stderr, "WARNING: fq->tail unaligned\n");
356 if (((uword) & fq->head) & (CLIB_CACHE_LINE_BYTES - 1))
357 fformat (stderr, "WARNING: fq->head unaligned\n");
358 if (((uword) fq->elts) & (CLIB_CACHE_LINE_BYTES - 1))
359 fformat (stderr, "WARNING: fq->elts unaligned\n");
361 if (sizeof (fq->elts[0]) % CLIB_CACHE_LINE_BYTES)
362 fformat (stderr, "WARNING: fq->elts[0] size %d\n",
363 sizeof (fq->elts[0]));
364 if (nelts & (nelts - 1))
366 fformat (stderr, "FATAL: nelts MUST be a power of 2\n");
374 void vl_msg_api_handler_no_free (void *) __attribute__ ((weak));
376 vl_msg_api_handler_no_free (void *v)
380 /* Turned off, save as reference material... */
383 vlib_frame_queue_dequeue_internal (int thread_id,
384 vlib_main_t * vm, vlib_node_main_t * nm)
386 vlib_frame_queue_t *fq = vlib_frame_queues[thread_id];
387 vlib_frame_queue_elt_t *elt;
389 vlib_pending_frame_t *p;
390 vlib_node_runtime_t *r;
391 u32 node_runtime_index;
396 ASSERT (vm == vlib_mains[thread_id]);
400 if (fq->head == fq->tail)
403 elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1));
408 before = clib_cpu_time_now ();
411 node_runtime_index = elt->node_runtime_index;
412 msg_type = elt->msg_type;
416 case VLIB_FRAME_QUEUE_ELT_FREE_BUFFERS:
417 vlib_buffer_free (vm, vlib_frame_vector_args (f), f->n_vectors);
418 /* note fallthrough... */
419 case VLIB_FRAME_QUEUE_ELT_FREE_FRAME:
420 r = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
422 vlib_frame_free (vm, r, f);
424 case VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME:
425 vec_add2 (vm->node_main.pending_frames, p, 1);
426 f->flags |= (VLIB_FRAME_PENDING | VLIB_FRAME_FREE_AFTER_DISPATCH);
427 p->node_runtime_index = elt->node_runtime_index;
428 p->frame_index = vlib_frame_index (vm, f);
429 p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME;
430 fq->dequeue_vectors += (u64) f->n_vectors;
432 case VLIB_FRAME_QUEUE_ELT_API_MSG:
433 vl_msg_api_handler_no_free (f);
436 clib_warning ("bogus frame queue message, type %d", msg_type);
441 fq->dequeue_ticks += clib_cpu_time_now () - before;
442 CLIB_MEMORY_BARRIER ();
451 vlib_frame_queue_dequeue (int thread_id,
452 vlib_main_t * vm, vlib_node_main_t * nm)
454 return vlib_frame_queue_dequeue_internal (thread_id, vm, nm);
458 vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index,
459 u32 frame_queue_index, vlib_frame_t * frame,
460 vlib_frame_queue_msg_type_t type)
462 vlib_frame_queue_t *fq = vlib_frame_queues[frame_queue_index];
463 vlib_frame_queue_elt_t *elt;
466 u64 before = clib_cpu_time_now ();
470 new_tail = clib_atomic_add_fetch (&fq->tail, 1);
472 /* Wait until a ring slot is available */
473 while (new_tail >= fq->head + fq->nelts)
475 f64 b4 = vlib_time_now_ticks (vm, before);
476 vlib_worker_thread_barrier_check (vm, b4);
477 /* Bad idea. Dequeue -> enqueue -> dequeue -> trouble */
478 // vlib_frame_queue_dequeue (vm->thread_index, vm, nm);
481 elt = fq->elts + (new_tail & (fq->nelts - 1));
483 /* this would be very bad... */
488 /* Once we enqueue the frame, frame->n_vectors is owned elsewhere... */
489 save_count = frame->n_vectors;
492 elt->node_runtime_index = node_runtime_index;
493 elt->msg_type = type;
494 CLIB_MEMORY_BARRIER ();
501 /* To be called by vlib worker threads upon startup */
503 vlib_worker_thread_init (vlib_worker_thread_t * w)
505 vlib_thread_main_t *tm = vlib_get_thread_main ();
508 * Note: disabling signals in worker threads as follows
509 * prevents the api post-mortem dump scheme from working
513 * pthread_sigmask (SIG_SETMASK, &s, 0);
517 clib_mem_set_heap (w->thread_mheap);
519 if (vec_len (tm->thread_prefix) && w->registration->short_name)
521 w->name = format (0, "%v_%s_%d%c", tm->thread_prefix,
522 w->registration->short_name, w->instance_id, '\0');
523 vlib_set_thread_name ((char *) w->name);
526 if (!w->registration->use_pthreads)
529 /* Initial barrier sync, for both worker and i/o threads */
530 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
532 while (*vlib_worker_threads->wait_at_barrier)
535 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
540 vlib_worker_thread_bootstrap_fn (void *arg)
543 vlib_worker_thread_t *w = arg;
545 w->lwp = syscall (SYS_gettid);
546 w->thread_id = pthread_self ();
548 __os_thread_index = w - vlib_worker_threads;
550 vlib_process_start_switch_stack (vlib_get_main_by_index (__os_thread_index),
552 rv = (void *) clib_calljmp
553 ((uword (*)(uword)) w->thread_function,
554 (uword) arg, w->thread_stack + VLIB_THREAD_STACK_SIZE);
555 /* NOTREACHED, we hope */
560 vlib_get_thread_core_numa (vlib_worker_thread_t * w, unsigned cpu_id)
562 const char *sys_cpu_path = "/sys/devices/system/cpu/cpu";
563 const char *sys_node_path = "/sys/devices/system/node/node";
564 clib_bitmap_t *nbmp = 0, *cbmp = 0;
567 int core_id = -1, numa_id = -1;
569 p = format (p, "%s%u/topology/core_id%c", sys_cpu_path, cpu_id, 0);
570 clib_sysfs_read ((char *) p, "%d", &core_id);
571 vec_reset_length (p);
574 clib_sysfs_read ("/sys/devices/system/node/online", "%U",
575 unformat_bitmap_list, &nbmp);
576 clib_bitmap_foreach (node, nbmp) {
577 p = format (p, "%s%u/cpulist%c", sys_node_path, node, 0);
578 clib_sysfs_read ((char *) p, "%U", unformat_bitmap_list, &cbmp);
579 if (clib_bitmap_get (cbmp, cpu_id))
581 vec_reset_length (cbmp);
582 vec_reset_length (p);
589 w->core_id = core_id;
590 w->numa_id = numa_id;
593 static clib_error_t *
594 vlib_launch_thread_int (void *fp, vlib_worker_thread_t * w, unsigned cpu_id)
596 clib_mem_main_t *mm = &clib_mem_main;
597 vlib_thread_main_t *tm = &vlib_thread_main;
598 void *(*fp_arg) (void *) = fp;
602 vlib_get_thread_core_numa (w, cpu_id);
604 /* Set up NUMA-bound heap if indicated */
605 if (mm->per_numa_mheaps[w->numa_id] == 0)
607 /* If the user requested a NUMA heap, create it... */
608 if (tm->numa_heap_size)
610 clib_mem_set_numa_affinity (w->numa_id, 1 /* force */ );
611 numa_heap = clib_mem_create_heap (0 /* DIY */ , tm->numa_heap_size,
613 "numa %u heap", w->numa_id);
614 clib_mem_set_default_numa_affinity ();
615 mm->per_numa_mheaps[w->numa_id] = numa_heap;
619 /* Or, use the main heap */
620 mm->per_numa_mheaps[w->numa_id] = w->thread_mheap;
624 if (tm->cb.vlib_launch_thread_cb && !w->registration->use_pthreads)
625 return tm->cb.vlib_launch_thread_cb (fp, (void *) w, cpu_id);
631 CPU_SET (cpu_id, &cpuset);
633 if (pthread_create (&worker, NULL /* attr */ , fp_arg, (void *) w))
634 return clib_error_return_unix (0, "pthread_create");
636 if (pthread_setaffinity_np (worker, sizeof (cpu_set_t), &cpuset))
637 return clib_error_return_unix (0, "pthread_setaffinity_np");
643 static clib_error_t *
644 start_workers (vlib_main_t * vm)
646 vlib_global_main_t *vgm = vlib_get_global_main ();
648 vlib_worker_thread_t *w;
649 vlib_main_t *vm_clone;
651 vlib_thread_main_t *tm = &vlib_thread_main;
652 vlib_thread_registration_t *tr;
653 vlib_node_runtime_t *rt;
654 u32 n_vlib_mains = tm->n_vlib_mains;
655 u32 worker_thread_index;
656 clib_mem_heap_t *main_heap = clib_mem_get_per_cpu_heap ();
658 vec_reset_length (vlib_worker_threads);
660 /* Set up the main thread */
661 vec_add2_aligned (vlib_worker_threads, w, 1, CLIB_CACHE_LINE_BYTES);
662 w->elog_track.name = "main thread";
663 elog_track_register (vlib_get_elog_main (), &w->elog_track);
665 if (vec_len (tm->thread_prefix))
667 w->name = format (0, "%v_main%c", tm->thread_prefix, '\0');
668 vlib_set_thread_name ((char *) w->name);
671 vgm->elog_main.lock =
672 clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
673 vgm->elog_main.lock[0] = 0;
675 clib_callback_data_init (&vm->vlib_node_runtime_perf_callbacks,
676 &vm->worker_thread_main_loop_callback_lock);
678 vec_validate_aligned (vgm->vlib_mains, n_vlib_mains - 1,
679 CLIB_CACHE_LINE_BYTES);
680 _vec_len (vgm->vlib_mains) = 0;
681 vec_add1_aligned (vgm->vlib_mains, vm, CLIB_CACHE_LINE_BYTES);
683 if (n_vlib_mains > 1)
685 vlib_worker_threads->wait_at_barrier =
686 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
687 vlib_worker_threads->workers_at_barrier =
688 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
690 vlib_worker_threads->node_reforks_required =
691 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
693 /* We'll need the rpc vector lock... */
694 clib_spinlock_init (&vm->pending_rpc_lock);
696 /* Ask for an initial barrier sync */
697 *vlib_worker_threads->workers_at_barrier = 0;
698 *vlib_worker_threads->wait_at_barrier = 1;
700 /* Without update or refork */
701 *vlib_worker_threads->node_reforks_required = 0;
702 vgm->need_vlib_worker_thread_node_runtime_update = 0;
705 vm->barrier_epoch = 0;
706 vm->barrier_no_close_before = 0;
708 worker_thread_index = 1;
709 clib_spinlock_init (&vm->worker_thread_main_loop_callback_lock);
711 for (i = 0; i < vec_len (tm->registrations); i++)
713 vlib_node_main_t *nm, *nm_clone;
716 tr = tm->registrations[i];
721 for (k = 0; k < tr->count; k++)
725 vec_add2 (vlib_worker_threads, w, 1);
726 /* Currently unused, may not really work */
728 w->thread_mheap = clib_mem_create_heap (0, tr->mheap_size,
733 w->thread_mheap = main_heap;
736 vlib_thread_stack_init (w - vlib_worker_threads);
737 w->thread_function = tr->function;
738 w->thread_function_arg = w;
740 w->registration = tr;
743 (char *) format (0, "%s %d", tr->name, k + 1);
744 vec_add1 (w->elog_track.name, 0);
745 elog_track_register (vlib_get_elog_main (), &w->elog_track);
747 if (tr->no_data_structure_clone)
750 /* Fork vlib_global_main et al. Look for bugs here */
751 oldheap = clib_mem_set_heap (w->thread_mheap);
753 vm_clone = clib_mem_alloc_aligned (sizeof (*vm_clone),
754 CLIB_CACHE_LINE_BYTES);
755 clib_memcpy (vm_clone, vlib_get_first_main (),
758 vm_clone->thread_index = worker_thread_index;
759 vm_clone->heap_base = w->thread_mheap;
760 vm_clone->heap_aligned_base = (void *)
761 (((uword) w->thread_mheap) & ~(VLIB_FRAME_ALIGN - 1));
762 vm_clone->pending_rpc_requests = 0;
763 vec_validate (vm_clone->pending_rpc_requests, 0);
764 _vec_len (vm_clone->pending_rpc_requests) = 0;
765 clib_memset (&vm_clone->random_buffer, 0,
766 sizeof (vm_clone->random_buffer));
768 (&vm_clone->worker_thread_main_loop_callback_lock);
769 clib_callback_data_init
770 (&vm_clone->vlib_node_runtime_perf_callbacks,
771 &vm_clone->worker_thread_main_loop_callback_lock);
773 nm = &vlib_get_first_main ()->node_main;
774 nm_clone = &vm_clone->node_main;
775 /* fork next frames array, preserving node runtime indices */
776 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
777 CLIB_CACHE_LINE_BYTES);
778 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
780 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
781 u32 save_node_runtime_index;
784 save_node_runtime_index = nf->node_runtime_index;
785 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
786 vlib_next_frame_init (nf);
787 nf->node_runtime_index = save_node_runtime_index;
788 nf->flags = save_flags;
791 /* fork the frame dispatch queue */
792 nm_clone->pending_frames = 0;
793 vec_validate (nm_clone->pending_frames, 10);
794 _vec_len (nm_clone->pending_frames) = 0;
799 /* Allocate all nodes in single block for speed */
800 n = clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*n));
802 for (j = 0; j < vec_len (nm->nodes); j++)
804 clib_memcpy (n, nm->nodes[j], sizeof (*n));
805 /* none of the copied nodes have enqueue rights given out */
806 n->owner_node_index = VLIB_INVALID_NODE_INDEX;
807 clib_memset (&n->stats_total, 0, sizeof (n->stats_total));
808 clib_memset (&n->stats_last_clear, 0,
809 sizeof (n->stats_last_clear));
810 vec_add1 (nm_clone->nodes, n);
813 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
814 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
815 CLIB_CACHE_LINE_BYTES);
817 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
819 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
820 rt->thread_index = vm_clone->thread_index;
821 /* copy initial runtime_data from node */
822 if (n->runtime_data && n->runtime_data_bytes > 0)
823 clib_memcpy (rt->runtime_data, n->runtime_data,
824 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
825 n->runtime_data_bytes));
828 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
829 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
830 CLIB_CACHE_LINE_BYTES);
831 clib_interrupt_init (
832 &nm_clone->interrupts,
833 vec_len (nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]));
834 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
836 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
837 rt->thread_index = vm_clone->thread_index;
838 /* copy initial runtime_data from node */
839 if (n->runtime_data && n->runtime_data_bytes > 0)
840 clib_memcpy (rt->runtime_data, n->runtime_data,
841 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
842 n->runtime_data_bytes));
845 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT] =
846 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT],
847 CLIB_CACHE_LINE_BYTES);
849 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
851 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
852 rt->thread_index = vm_clone->thread_index;
853 /* copy initial runtime_data from node */
854 if (n->runtime_data && n->runtime_data_bytes > 0)
855 clib_memcpy (rt->runtime_data, n->runtime_data,
856 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
857 n->runtime_data_bytes));
860 nm_clone->processes = vec_dup_aligned (nm->processes,
861 CLIB_CACHE_LINE_BYTES);
863 /* Create per-thread frame freelist */
864 nm_clone->frame_sizes = vec_new (vlib_frame_size_t, 1);
865 #ifdef VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES
866 nm_clone->frame_size_hash = hash_create (0, sizeof (uword));
868 nm_clone->node_by_error = nm->node_by_error;
870 /* Packet trace buffers are guaranteed to be empty, nothing to do here */
872 clib_mem_set_heap (oldheap);
873 vec_add1_aligned (vgm->vlib_mains, vm_clone,
874 CLIB_CACHE_LINE_BYTES);
876 /* Switch to the stats segment ... */
877 void *oldheap = vlib_stats_push_heap (0);
878 vm_clone->error_main.counters =
879 vec_dup_aligned (vlib_get_first_main ()->error_main.counters,
880 CLIB_CACHE_LINE_BYTES);
881 vlib_stats_pop_heap2 (vm_clone->error_main.counters,
882 worker_thread_index, oldheap, 1);
884 vm_clone->error_main.counters_last_clear = vec_dup_aligned (
885 vlib_get_first_main ()->error_main.counters_last_clear,
886 CLIB_CACHE_LINE_BYTES);
888 worker_thread_index++;
894 /* only have non-data-structure copy threads to create... */
895 for (i = 0; i < vec_len (tm->registrations); i++)
897 tr = tm->registrations[i];
899 for (j = 0; j < tr->count; j++)
901 vec_add2 (vlib_worker_threads, w, 1);
904 w->thread_mheap = clib_mem_create_heap (0, tr->mheap_size,
910 w->thread_mheap = main_heap;
912 vlib_thread_stack_init (w - vlib_worker_threads);
913 w->thread_function = tr->function;
914 w->thread_function_arg = w;
917 (char *) format (0, "%s %d", tr->name, j + 1);
918 w->registration = tr;
919 vec_add1 (w->elog_track.name, 0);
920 elog_track_register (vlib_get_elog_main (), &w->elog_track);
925 worker_thread_index = 1;
927 for (i = 0; i < vec_len (tm->registrations); i++)
932 tr = tm->registrations[i];
934 if (tr->use_pthreads || tm->use_pthreads)
936 for (j = 0; j < tr->count; j++)
938 w = vlib_worker_threads + worker_thread_index++;
939 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
942 clib_error_report (err);
949 clib_bitmap_foreach (c, tr->coremask) {
950 w = vlib_worker_threads + worker_thread_index++;
951 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
954 clib_error_report (err);
959 vlib_worker_thread_barrier_sync (vm);
960 vlib_worker_thread_barrier_release (vm);
964 VLIB_MAIN_LOOP_ENTER_FUNCTION (start_workers);
968 worker_thread_node_runtime_update_internal (void)
972 vlib_node_main_t *nm, *nm_clone;
973 vlib_main_t *vm_clone;
974 vlib_node_runtime_t *rt;
976 ASSERT (vlib_get_thread_index () == 0);
978 vm = vlib_get_first_main ();
981 ASSERT (*vlib_worker_threads->wait_at_barrier == 1);
984 * Scrape all runtime stats, so we don't lose node runtime(s) with
985 * pending counts, or throw away worker / io thread counts.
987 for (j = 0; j < vec_len (nm->nodes); j++)
991 vlib_node_sync_stats (vm, n);
994 for (i = 1; i < vlib_get_n_threads (); i++)
998 vm_clone = vlib_get_main_by_index (i);
999 nm_clone = &vm_clone->node_main;
1001 for (j = 0; j < vec_len (nm_clone->nodes); j++)
1003 n = nm_clone->nodes[j];
1005 rt = vlib_node_get_runtime (vm_clone, n->index);
1006 vlib_node_runtime_sync_stats (vm_clone, rt, 0, 0, 0);
1010 /* Per-worker clone rebuilds are now done on each thread */
1015 vlib_worker_thread_node_refork (void)
1017 vlib_main_t *vm, *vm_clone;
1018 vlib_node_main_t *nm, *nm_clone;
1019 vlib_node_t **old_nodes_clone;
1020 vlib_node_runtime_t *rt, *old_rt;
1022 vlib_node_t *new_n_clone;
1026 vm = vlib_get_first_main ();
1027 nm = &vm->node_main;
1028 vm_clone = vlib_get_main ();
1029 nm_clone = &vm_clone->node_main;
1031 /* Re-clone error heap */
1032 u64 *old_counters = vm_clone->error_main.counters;
1033 u64 *old_counters_all_clear = vm_clone->error_main.counters_last_clear;
1035 clib_memcpy_fast (&vm_clone->error_main, &vm->error_main,
1036 sizeof (vm->error_main));
1037 j = vec_len (vm->error_main.counters) - 1;
1039 /* Switch to the stats segment ... */
1040 void *oldheap = vlib_stats_push_heap (0);
1041 vec_validate_aligned (old_counters, j, CLIB_CACHE_LINE_BYTES);
1042 vm_clone->error_main.counters = old_counters;
1043 vlib_stats_pop_heap2 (vm_clone->error_main.counters, vm_clone->thread_index,
1046 vec_validate_aligned (old_counters_all_clear, j, CLIB_CACHE_LINE_BYTES);
1047 vm_clone->error_main.counters_last_clear = old_counters_all_clear;
1049 nm_clone = &vm_clone->node_main;
1050 vec_free (nm_clone->next_frames);
1051 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
1052 CLIB_CACHE_LINE_BYTES);
1054 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
1056 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
1057 u32 save_node_runtime_index;
1060 save_node_runtime_index = nf->node_runtime_index;
1061 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
1062 vlib_next_frame_init (nf);
1063 nf->node_runtime_index = save_node_runtime_index;
1064 nf->flags = save_flags;
1067 old_nodes_clone = nm_clone->nodes;
1068 nm_clone->nodes = 0;
1072 /* Allocate all nodes in single block for speed */
1074 clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*new_n_clone));
1075 for (j = 0; j < vec_len (nm->nodes); j++)
1077 vlib_node_t *new_n = nm->nodes[j];
1079 clib_memcpy_fast (new_n_clone, new_n, sizeof (*new_n));
1080 /* none of the copied nodes have enqueue rights given out */
1081 new_n_clone->owner_node_index = VLIB_INVALID_NODE_INDEX;
1083 if (j >= vec_len (old_nodes_clone))
1085 /* new node, set to zero */
1086 clib_memset (&new_n_clone->stats_total, 0,
1087 sizeof (new_n_clone->stats_total));
1088 clib_memset (&new_n_clone->stats_last_clear, 0,
1089 sizeof (new_n_clone->stats_last_clear));
1093 vlib_node_t *old_n_clone = old_nodes_clone[j];
1094 /* Copy stats if the old data is valid */
1095 clib_memcpy_fast (&new_n_clone->stats_total,
1096 &old_n_clone->stats_total,
1097 sizeof (new_n_clone->stats_total));
1098 clib_memcpy_fast (&new_n_clone->stats_last_clear,
1099 &old_n_clone->stats_last_clear,
1100 sizeof (new_n_clone->stats_last_clear));
1102 /* keep previous node state */
1103 new_n_clone->state = old_n_clone->state;
1105 vec_add1 (nm_clone->nodes, new_n_clone);
1108 /* Free the old node clones */
1109 clib_mem_free (old_nodes_clone[0]);
1111 vec_free (old_nodes_clone);
1114 /* re-clone internal nodes */
1115 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL];
1116 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
1117 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
1118 CLIB_CACHE_LINE_BYTES);
1120 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
1122 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1123 rt->thread_index = vm_clone->thread_index;
1124 /* copy runtime_data, will be overwritten later for existing rt */
1125 if (n->runtime_data && n->runtime_data_bytes > 0)
1126 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1127 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1128 n->runtime_data_bytes));
1131 for (j = 0; j < vec_len (old_rt); j++)
1133 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1134 rt->state = old_rt[j].state;
1135 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1136 VLIB_NODE_RUNTIME_DATA_SIZE);
1141 /* re-clone input nodes */
1142 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT];
1143 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
1144 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
1145 CLIB_CACHE_LINE_BYTES);
1146 clib_interrupt_resize (
1147 &nm_clone->interrupts,
1148 vec_len (nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT]));
1150 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
1152 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1153 rt->thread_index = vm_clone->thread_index;
1154 /* copy runtime_data, will be overwritten later for existing rt */
1155 if (n->runtime_data && n->runtime_data_bytes > 0)
1156 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1157 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1158 n->runtime_data_bytes));
1161 for (j = 0; j < vec_len (old_rt); j++)
1163 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1164 rt->state = old_rt[j].state;
1165 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1166 VLIB_NODE_RUNTIME_DATA_SIZE);
1171 /* re-clone pre-input nodes */
1172 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT];
1173 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT] =
1174 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT],
1175 CLIB_CACHE_LINE_BYTES);
1177 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
1179 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1180 rt->thread_index = vm_clone->thread_index;
1181 /* copy runtime_data, will be overwritten later for existing rt */
1182 if (n->runtime_data && n->runtime_data_bytes > 0)
1183 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1184 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1185 n->runtime_data_bytes));
1188 for (j = 0; j < vec_len (old_rt); j++)
1190 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1191 rt->state = old_rt[j].state;
1192 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1193 VLIB_NODE_RUNTIME_DATA_SIZE);
1198 nm_clone->processes = vec_dup_aligned (nm->processes,
1199 CLIB_CACHE_LINE_BYTES);
1200 nm_clone->node_by_error = nm->node_by_error;
1204 vlib_worker_thread_node_runtime_update (void)
1207 * Make a note that we need to do a node runtime update
1208 * prior to releasing the barrier.
1210 vlib_global_main.need_vlib_worker_thread_node_runtime_update = 1;
1214 unformat_sched_policy (unformat_input_t * input, va_list * args)
1216 u32 *r = va_arg (*args, u32 *);
1219 #define _(v,f,s) else if (unformat (input, s)) *r = SCHED_POLICY_##f;
1220 foreach_sched_policy
1227 static clib_error_t *
1228 cpu_config (vlib_main_t * vm, unformat_input_t * input)
1230 vlib_thread_registration_t *tr;
1232 vlib_thread_main_t *tm = &vlib_thread_main;
1237 tm->thread_registrations_by_name = hash_create_string (0, sizeof (uword));
1239 tm->n_thread_stacks = 1; /* account for main thread */
1240 tm->sched_policy = ~0;
1241 tm->sched_priority = ~0;
1242 tm->main_lcore = ~0;
1248 hash_set_mem (tm->thread_registrations_by_name, tr->name, (uword) tr);
1252 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1254 if (unformat (input, "use-pthreads"))
1255 tm->use_pthreads = 1;
1256 else if (unformat (input, "thread-prefix %v", &tm->thread_prefix))
1258 else if (unformat (input, "main-core %u", &tm->main_lcore))
1260 else if (unformat (input, "skip-cores %u", &tm->skip_cores))
1262 else if (unformat (input, "numa-heap-size %U",
1263 unformat_memory_size, &tm->numa_heap_size))
1265 else if (unformat (input, "coremask-%s %U", &name,
1266 unformat_bitmap_mask, &bitmap) ||
1267 unformat (input, "corelist-%s %U", &name,
1268 unformat_bitmap_list, &bitmap))
1270 p = hash_get_mem (tm->thread_registrations_by_name, name);
1272 return clib_error_return (0, "no such thread type '%s'", name);
1274 tr = (vlib_thread_registration_t *) p[0];
1276 if (tr->use_pthreads)
1277 return clib_error_return (0,
1278 "corelist cannot be set for '%s' threads",
1281 return clib_error_return
1282 (0, "core placement of '%s' threads is already configured",
1285 tr->coremask = bitmap;
1286 tr->count = clib_bitmap_count_set_bits (tr->coremask);
1290 (input, "scheduler-policy %U", unformat_sched_policy,
1293 else if (unformat (input, "scheduler-priority %u", &tm->sched_priority))
1295 else if (unformat (input, "%s %u", &name, &count))
1297 p = hash_get_mem (tm->thread_registrations_by_name, name);
1299 return clib_error_return (0, "no such thread type 3 '%s'", name);
1301 tr = (vlib_thread_registration_t *) p[0];
1303 if (tr->fixed_count)
1304 return clib_error_return
1305 (0, "number of '%s' threads not configurable", name);
1307 return clib_error_return
1308 (0, "number of '%s' threads is already configured", name);
1316 if (tm->sched_priority != ~0)
1318 if (tm->sched_policy == SCHED_FIFO || tm->sched_policy == SCHED_RR)
1320 u32 prio_max = sched_get_priority_max (tm->sched_policy);
1321 u32 prio_min = sched_get_priority_min (tm->sched_policy);
1322 if (tm->sched_priority > prio_max)
1323 tm->sched_priority = prio_max;
1324 if (tm->sched_priority < prio_min)
1325 tm->sched_priority = prio_min;
1329 return clib_error_return
1331 "scheduling priority (%d) is not allowed for `normal` scheduling policy",
1332 tm->sched_priority);
1337 if (!tm->thread_prefix)
1338 tm->thread_prefix = format (0, "vpp");
1342 tm->n_thread_stacks += tr->count;
1343 tm->n_pthreads += tr->count * tr->use_pthreads;
1344 tm->n_threads += tr->count * (tr->use_pthreads == 0);
1351 VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu");
1354 * Enforce minimum open time to minimize packet loss due to Rx overflow,
1355 * based on a test based heuristic that barrier should be open for at least
1356 * 3 time as long as it is closed (with an upper bound of 1ms because by that
1357 * point it is probably too late to make a difference)
1360 #ifndef BARRIER_MINIMUM_OPEN_LIMIT
1361 #define BARRIER_MINIMUM_OPEN_LIMIT 0.001
1364 #ifndef BARRIER_MINIMUM_OPEN_FACTOR
1365 #define BARRIER_MINIMUM_OPEN_FACTOR 3
1369 vlib_worker_thread_initial_barrier_sync_and_release (vlib_main_t * vm)
1372 f64 now = vlib_time_now (vm);
1373 u32 count = vlib_get_n_threads () - 1;
1375 /* No worker threads? */
1379 deadline = now + BARRIER_SYNC_TIMEOUT;
1380 *vlib_worker_threads->wait_at_barrier = 1;
1381 while (*vlib_worker_threads->workers_at_barrier != count)
1383 if ((now = vlib_time_now (vm)) > deadline)
1385 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1390 *vlib_worker_threads->wait_at_barrier = 0;
1394 * Return true if the wroker thread barrier is held
1397 vlib_worker_thread_barrier_held (void)
1399 if (vlib_get_n_threads () < 2)
1402 return (*vlib_worker_threads->wait_at_barrier == 1);
1406 vlib_worker_thread_barrier_sync_int (vlib_main_t * vm, const char *func_name)
1413 f64 max_vector_rate;
1417 if (vlib_get_n_threads () < 2)
1420 ASSERT (vlib_get_thread_index () == 0);
1422 vlib_worker_threads[0].barrier_caller = func_name;
1423 count = vlib_get_n_threads () - 1;
1425 /* Record entry relative to last close */
1426 now = vlib_time_now (vm);
1427 t_entry = now - vm->barrier_epoch;
1429 /* Tolerate recursive calls */
1430 if (++vlib_worker_threads[0].recursion_level > 1)
1432 barrier_trace_sync_rec (t_entry);
1436 if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0))
1437 clib_call_callbacks (vm->barrier_perf_callbacks, vm,
1438 vm->clib_time.last_cpu_time, 0 /* enter */ );
1441 * Need data to decide if we're working hard enough to honor
1442 * the barrier hold-down timer.
1444 max_vector_rate = 0.0;
1445 for (i = 1; i < vlib_get_n_threads (); i++)
1447 vlib_main_t *ovm = vlib_get_main_by_index (i);
1448 max_vector_rate = clib_max (max_vector_rate,
1449 (f64) vlib_last_vectors_per_main_loop (ovm));
1452 vlib_worker_threads[0].barrier_sync_count++;
1454 /* Enforce minimum barrier open time to minimize packet loss */
1455 ASSERT (vm->barrier_no_close_before <= (now + BARRIER_MINIMUM_OPEN_LIMIT));
1458 * If any worker thread seems busy, which we define
1459 * as a vector rate above 10, we enforce the barrier hold-down timer
1461 if (max_vector_rate > 10.0)
1465 now = vlib_time_now (vm);
1466 /* Barrier hold-down timer expired? */
1467 if (now >= vm->barrier_no_close_before)
1469 if ((vm->barrier_no_close_before - now)
1470 > (2.0 * BARRIER_MINIMUM_OPEN_LIMIT))
1473 ("clock change: would have waited for %.4f seconds",
1474 (vm->barrier_no_close_before - now));
1479 /* Record time of closure */
1480 t_open = now - vm->barrier_epoch;
1481 vm->barrier_epoch = now;
1483 deadline = now + BARRIER_SYNC_TIMEOUT;
1485 *vlib_worker_threads->wait_at_barrier = 1;
1486 while (*vlib_worker_threads->workers_at_barrier != count)
1488 if ((now = vlib_time_now (vm)) > deadline)
1490 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1495 t_closed = now - vm->barrier_epoch;
1497 barrier_trace_sync (t_entry, t_open, t_closed);
1502 vlib_worker_thread_barrier_release (vlib_main_t * vm)
1504 vlib_global_main_t *vgm = vlib_get_global_main ();
1510 f64 t_update_main = 0.0;
1511 int refork_needed = 0;
1513 if (vlib_get_n_threads () < 2)
1516 ASSERT (vlib_get_thread_index () == 0);
1519 now = vlib_time_now (vm);
1520 t_entry = now - vm->barrier_epoch;
1522 if (--vlib_worker_threads[0].recursion_level > 0)
1524 barrier_trace_release_rec (t_entry);
1528 /* Update (all) node runtimes before releasing the barrier, if needed */
1529 if (vgm->need_vlib_worker_thread_node_runtime_update)
1532 * Lock stat segment here, so we's safe when
1533 * rebuilding the stat segment node clones from the
1536 vlib_stat_segment_lock ();
1538 /* Do stats elements on main thread */
1539 worker_thread_node_runtime_update_internal ();
1540 vgm->need_vlib_worker_thread_node_runtime_update = 0;
1542 /* Do per thread rebuilds in parallel */
1544 clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
1545 (vlib_get_n_threads () - 1));
1546 now = vlib_time_now (vm);
1547 t_update_main = now - vm->barrier_epoch;
1550 deadline = now + BARRIER_SYNC_TIMEOUT;
1553 * Note when we let go of the barrier.
1554 * Workers can use this to derive a reasonably accurate
1555 * time offset. See vlib_time_now(...)
1557 vm->time_last_barrier_release = vlib_time_now (vm);
1558 CLIB_MEMORY_STORE_BARRIER ();
1560 *vlib_worker_threads->wait_at_barrier = 0;
1562 while (*vlib_worker_threads->workers_at_barrier > 0)
1564 if ((now = vlib_time_now (vm)) > deadline)
1566 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1571 /* Wait for reforks before continuing */
1574 now = vlib_time_now (vm);
1576 deadline = now + BARRIER_SYNC_TIMEOUT;
1578 while (*vlib_worker_threads->node_reforks_required > 0)
1580 if ((now = vlib_time_now (vm)) > deadline)
1582 fformat (stderr, "%s: worker thread refork deadlock\n",
1587 vlib_stat_segment_unlock ();
1590 t_closed_total = now - vm->barrier_epoch;
1592 minimum_open = t_closed_total * BARRIER_MINIMUM_OPEN_FACTOR;
1594 if (minimum_open > BARRIER_MINIMUM_OPEN_LIMIT)
1596 minimum_open = BARRIER_MINIMUM_OPEN_LIMIT;
1599 vm->barrier_no_close_before = now + minimum_open;
1601 /* Record barrier epoch (used to enforce minimum open time) */
1602 vm->barrier_epoch = now;
1604 barrier_trace_release (t_entry, t_closed_total, t_update_main);
1606 if (PREDICT_FALSE (vec_len (vm->barrier_perf_callbacks) != 0))
1607 clib_call_callbacks (vm->barrier_perf_callbacks, vm,
1608 vm->clib_time.last_cpu_time, 1 /* leave */ );
1612 * Wait until each of the workers has been once around the track
1615 vlib_worker_wait_one_loop (void)
1617 vlib_global_main_t *vgm = vlib_get_global_main ();
1618 ASSERT (vlib_get_thread_index () == 0);
1620 if (vlib_get_n_threads () < 2)
1623 if (vlib_worker_thread_barrier_held ())
1629 vec_validate (counts, vlib_get_n_threads () - 1);
1631 /* record the current loop counts */
1632 vec_foreach_index (ii, vgm->vlib_mains)
1633 counts[ii] = vgm->vlib_mains[ii]->main_loop_count;
1635 /* spin until each changes, apart from the main thread, or we'd be
1637 for (ii = 1; ii < vec_len (counts); ii++)
1639 while (counts[ii] == vgm->vlib_mains[ii]->main_loop_count)
1648 * Check the frame queue to see if any frames are available.
1649 * If so, pull the packets off the frames and put them to
1653 vlib_frame_queue_dequeue (vlib_main_t * vm, vlib_frame_queue_main_t * fqm)
1655 u32 thread_id = vm->thread_index;
1656 vlib_frame_queue_t *fq = fqm->vlib_frame_queues[thread_id];
1657 vlib_frame_queue_elt_t *elt;
1666 ASSERT (vm == vlib_global_main.vlib_mains[thread_id]);
1668 if (PREDICT_FALSE (fqm->node_index == ~0))
1671 * Gather trace data for frame queues
1673 if (PREDICT_FALSE (fq->trace))
1675 frame_queue_trace_t *fqt;
1676 frame_queue_nelt_counter_t *fqh;
1679 fqt = &fqm->frame_queue_traces[thread_id];
1681 fqt->nelts = fq->nelts;
1682 fqt->head = fq->head;
1683 fqt->head_hint = fq->head_hint;
1684 fqt->tail = fq->tail;
1685 fqt->threshold = fq->vector_threshold;
1686 fqt->n_in_use = fqt->tail - fqt->head;
1687 if (fqt->n_in_use >= fqt->nelts)
1689 // if beyond max then use max
1690 fqt->n_in_use = fqt->nelts - 1;
1693 /* Record the number of elements in use in the histogram */
1694 fqh = &fqm->frame_queue_histogram[thread_id];
1695 fqh->count[fqt->n_in_use]++;
1697 /* Record a snapshot of the elements in use */
1698 for (elix = 0; elix < fqt->nelts; elix++)
1700 elt = fq->elts + ((fq->head + 1 + elix) & (fq->nelts - 1));
1701 if (1 || elt->valid)
1703 fqt->n_vectors[elix] = elt->n_vectors;
1712 if (fq->head == fq->tail)
1714 fq->head_hint = fq->head;
1718 elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1));
1722 fq->head_hint = fq->head;
1726 from = elt->buffer_index;
1727 msg_type = elt->msg_type;
1729 ASSERT (msg_type == VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME);
1730 ASSERT (elt->n_vectors <= VLIB_FRAME_SIZE);
1732 f = vlib_get_frame_to_node (vm, fqm->node_index);
1734 /* If the first vector is traced, set the frame trace flag */
1735 b = vlib_get_buffer (vm, from[0]);
1736 if (b->flags & VLIB_BUFFER_IS_TRACED)
1737 f->frame_flags |= VLIB_NODE_FLAG_TRACE;
1739 to = vlib_frame_vector_args (f);
1741 n_left_to_node = elt->n_vectors;
1743 while (n_left_to_node >= 4)
1751 n_left_to_node -= 4;
1754 while (n_left_to_node > 0)
1762 vectors += elt->n_vectors;
1763 f->n_vectors = elt->n_vectors;
1764 vlib_put_frame_to_node (vm, fqm->node_index, f);
1768 elt->msg_type = 0xfefefefe;
1769 CLIB_MEMORY_BARRIER ();
1774 * Limit the number of packets pushed into the graph
1776 if (vectors >= fq->vector_threshold)
1778 fq->head_hint = fq->head;
1787 vlib_worker_thread_fn (void *arg)
1789 vlib_global_main_t *vgm = vlib_get_global_main ();
1790 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
1791 vlib_thread_main_t *tm = vlib_get_thread_main ();
1792 vlib_main_t *vm = vlib_get_main ();
1795 vlib_process_finish_switch_stack (vm);
1797 ASSERT (vm->thread_index == vlib_get_thread_index ());
1799 vlib_worker_thread_init (w);
1800 clib_time_init (&vm->clib_time);
1801 clib_mem_set_heap (w->thread_mheap);
1803 vm->worker_init_functions_called = hash_create (0, 0);
1805 e = vlib_call_init_exit_functions_no_sort (
1806 vm, &vgm->worker_init_function_registrations, 1 /* call_once */,
1809 clib_error_report (e);
1811 /* Wait until the dpdk init sequence is complete */
1812 while (tm->extern_thread_mgmt && tm->worker_thread_release == 0)
1813 vlib_worker_thread_barrier_check ();
1815 vlib_worker_loop (vm);
1819 VLIB_REGISTER_THREAD (worker_thread_reg, static) = {
1822 .function = vlib_worker_thread_fn,
1827 vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts)
1829 vlib_thread_main_t *tm = vlib_get_thread_main ();
1830 vlib_frame_queue_main_t *fqm;
1831 vlib_frame_queue_t *fq;
1835 if (frame_queue_nelts == 0)
1836 frame_queue_nelts = FRAME_QUEUE_MAX_NELTS;
1838 num_threads = 1 /* main thread */ + tm->n_threads;
1839 ASSERT (frame_queue_nelts >= 8 + num_threads);
1841 vec_add2 (tm->frame_queue_mains, fqm, 1);
1843 fqm->node_index = node_index;
1844 fqm->frame_queue_nelts = frame_queue_nelts;
1845 fqm->queue_hi_thresh = frame_queue_nelts - num_threads;
1847 vec_validate (fqm->vlib_frame_queues, tm->n_vlib_mains - 1);
1848 vec_validate (fqm->per_thread_data, tm->n_vlib_mains - 1);
1849 _vec_len (fqm->vlib_frame_queues) = 0;
1850 for (i = 0; i < tm->n_vlib_mains; i++)
1852 vlib_frame_queue_per_thread_data_t *ptd;
1853 fq = vlib_frame_queue_alloc (frame_queue_nelts);
1854 vec_add1 (fqm->vlib_frame_queues, fq);
1856 ptd = vec_elt_at_index (fqm->per_thread_data, i);
1857 vec_validate (ptd->handoff_queue_elt_by_thread_index,
1858 tm->n_vlib_mains - 1);
1859 vec_validate_init_empty (ptd->congested_handoff_queue_by_thread_index,
1860 tm->n_vlib_mains - 1,
1861 (vlib_frame_queue_t *) (~0));
1864 return (fqm - tm->frame_queue_mains);
1868 vlib_thread_cb_register (struct vlib_main_t *vm, vlib_thread_callbacks_t * cb)
1870 vlib_thread_main_t *tm = vlib_get_thread_main ();
1872 if (tm->extern_thread_mgmt)
1875 tm->cb.vlib_launch_thread_cb = cb->vlib_launch_thread_cb;
1876 tm->extern_thread_mgmt = 1;
1881 vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
1884 ASSERT (vlib_get_thread_index () == 0);
1885 vlib_process_signal_event (vlib_get_main (), args->node_index,
1886 args->type_opaque, args->data);
1889 void *rpc_call_main_thread_cb_fn;
1892 vlib_rpc_call_main_thread (void *callback, u8 * args, u32 arg_size)
1894 if (rpc_call_main_thread_cb_fn)
1896 void (*fp) (void *, u8 *, u32) = rpc_call_main_thread_cb_fn;
1897 (*fp) (callback, args, arg_size);
1900 clib_warning ("BUG: rpc_call_main_thread_cb_fn NULL!");
1904 threads_init (vlib_main_t * vm)
1909 VLIB_INIT_FUNCTION (threads_init);
1912 static clib_error_t *
1913 show_clock_command_fn (vlib_main_t * vm,
1914 unformat_input_t * input, vlib_cli_command_t * cmd)
1918 clib_timebase_t _tb, *tb = &_tb;
1920 (void) unformat (input, "verbose %=", &verbose, 1);
1922 clib_timebase_init (tb, 0 /* GMT */ , CLIB_TIMEBASE_DAYLIGHT_NONE,
1925 vlib_cli_output (vm, "%U, %U GMT", format_clib_time, &vm->clib_time,
1926 verbose, format_clib_timebase_time,
1927 clib_timebase_now (tb));
1929 if (vlib_get_n_threads () == 1)
1932 vlib_cli_output (vm, "Time last barrier release %.9f",
1933 vm->time_last_barrier_release);
1935 for (i = 1; i < vlib_get_n_threads (); i++)
1937 vlib_main_t *ovm = vlib_get_main_by_index (i);
1941 vlib_cli_output (vm, "%d: %U", i, format_clib_time, &ovm->clib_time,
1945 vm, "Thread %d offset %.9f error %.9f", i, ovm->time_offset,
1946 vm->time_last_barrier_release - ovm->time_last_barrier_release);
1952 VLIB_CLI_COMMAND (f_command, static) =
1954 .path = "show clock",
1955 .short_help = "show clock",
1956 .function = show_clock_command_fn,
1960 vlib_thread_main_t *
1961 vlib_get_thread_main_not_inline (void)
1963 return vlib_get_thread_main ();
1967 * fd.io coding-style-patch-verification: ON
1970 * eval: (c-set-style "gnu")