2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
19 #include <vppinfra/format.h>
20 #include <vppinfra/linux/sysfs.h>
21 #include <vlib/vlib.h>
23 #include <vlib/threads.h>
24 #include <vlib/unix/cj.h>
26 #include <vlib/stat_weak_inlines.h>
28 DECLARE_CJ_GLOBAL_LOG;
37 vlib_worker_thread_t *vlib_worker_threads;
38 vlib_thread_main_t vlib_thread_main;
41 * Barrier tracing can be enabled on a normal build to collect information
42 * on barrier use, including timings and call stacks. Deliberately not
43 * keyed off CLIB_DEBUG, because that can add significant overhead which
44 * imapacts observed timings.
48 barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed)
50 if (!vlib_worker_threads->barrier_elog_enabled)
54 ELOG_TYPE_DECLARE (e) =
56 .format = "bar-trace-%s-#%d",
57 .format_args = "T4i4",
62 u32 caller, count, t_entry, t_open, t_closed;
65 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
66 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
67 ed->caller = elog_string (&vlib_global_main.elog_main,
68 (char *) vlib_worker_threads[0].barrier_caller);
69 ed->t_entry = (int) (1000000.0 * t_entry);
70 ed->t_open = (int) (1000000.0 * t_open);
71 ed->t_closed = (int) (1000000.0 * t_closed);
75 barrier_trace_sync_rec (f64 t_entry)
77 if (!vlib_worker_threads->barrier_elog_enabled)
81 ELOG_TYPE_DECLARE (e) =
83 .format = "bar-syncrec-%s-#%d",
84 .format_args = "T4i4",
92 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
93 ed->depth = (int) vlib_worker_threads[0].recursion_level - 1;
94 ed->caller = elog_string (&vlib_global_main.elog_main,
95 (char *) vlib_worker_threads[0].barrier_caller);
99 barrier_trace_release_rec (f64 t_entry)
101 if (!vlib_worker_threads->barrier_elog_enabled)
105 ELOG_TYPE_DECLARE (e) =
107 .format = "bar-relrrec-#%d",
116 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
117 ed->depth = (int) vlib_worker_threads[0].recursion_level;
121 barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main)
123 if (!vlib_worker_threads->barrier_elog_enabled)
127 ELOG_TYPE_DECLARE (e) =
129 .format = "bar-rel-#%d-e%d-u%d-t%d",
130 .format_args = "i4i4i4i4",
135 u32 count, t_entry, t_update_main, t_closed_total;
138 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
139 ed->t_entry = (int) (1000000.0 * t_entry);
140 ed->t_update_main = (int) (1000000.0 * t_update_main);
141 ed->t_closed_total = (int) (1000000.0 * t_closed_total);
142 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
144 /* Reset context for next trace */
145 vlib_worker_threads[0].barrier_context = NULL;
149 os_get_nthreads (void)
151 return vec_len (vlib_thread_stacks);
155 vlib_set_thread_name (char *name)
157 int pthread_setname_np (pthread_t __target_thread, const char *__name);
159 pthread_t thread = pthread_self ();
163 rv = pthread_setname_np (thread, name);
165 clib_warning ("pthread_setname_np returned %d", rv);
170 sort_registrations_by_no_clone (void *a0, void *a1)
172 vlib_thread_registration_t **tr0 = a0;
173 vlib_thread_registration_t **tr1 = a1;
175 return ((i32) ((*tr0)->no_data_structure_clone)
176 - ((i32) ((*tr1)->no_data_structure_clone)));
180 clib_sysfs_list_to_bitmap (char *filename)
185 fp = fopen (filename, "r");
190 vec_validate (buffer, 256 - 1);
191 if (fgets ((char *) buffer, 256, fp))
194 unformat_init_string (&in, (char *) buffer,
195 strlen ((char *) buffer));
196 if (unformat (&in, "%U", unformat_bitmap_list, &r) != 1)
197 clib_warning ("unformat_bitmap_list failed");
207 /* Called early in the init sequence */
210 vlib_thread_init (vlib_main_t * vm)
212 vlib_thread_main_t *tm = &vlib_thread_main;
213 vlib_worker_thread_t *w;
214 vlib_thread_registration_t *tr;
215 u32 n_vlib_mains = 1;
220 /* get bitmaps of active cpu cores and sockets */
221 tm->cpu_core_bitmap =
222 clib_sysfs_list_to_bitmap ("/sys/devices/system/cpu/online");
223 tm->cpu_socket_bitmap =
224 clib_sysfs_list_to_bitmap ("/sys/devices/system/node/online");
226 avail_cpu = clib_bitmap_dup (tm->cpu_core_bitmap);
229 for (i = 0; i < tm->skip_cores; i++)
231 uword c = clib_bitmap_first_set (avail_cpu);
233 return clib_error_return (0, "no available cpus to skip");
235 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
238 /* grab cpu for main thread */
239 if (tm->main_lcore == ~0)
241 /* if main-lcore is not set, we try to use lcore 1 */
242 if (clib_bitmap_get (avail_cpu, 1))
245 tm->main_lcore = clib_bitmap_first_set (avail_cpu);
246 if (tm->main_lcore == (u8) ~ 0)
247 return clib_error_return (0, "no available cpus to be used for the"
252 if (clib_bitmap_get (avail_cpu, tm->main_lcore) == 0)
253 return clib_error_return (0, "cpu %u is not available to be used"
254 " for the main thread", tm->main_lcore);
256 avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0);
259 * Determine if the number of workers is greater than 0.
260 * If so, mark CPU 0 unavailable so workers will be numbered after main.
263 uword *p = hash_get_mem (tm->thread_registrations_by_name, "workers");
266 vlib_thread_registration_t *tr = (vlib_thread_registration_t *) p[0];
267 int worker_thread_count = tr->count;
268 n_workers = worker_thread_count;
270 if (tm->skip_cores == 0 && n_workers)
271 avail_cpu = clib_bitmap_set (avail_cpu, 0, 0);
273 /* assume that there is socket 0 only if there is no data from sysfs */
274 if (!tm->cpu_socket_bitmap)
275 tm->cpu_socket_bitmap = clib_bitmap_set (0, 0, 1);
277 /* pin main thread to main_lcore */
278 if (tm->cb.vlib_thread_set_lcore_cb)
280 tm->cb.vlib_thread_set_lcore_cb (0, tm->main_lcore);
286 CPU_SET (tm->main_lcore, &cpuset);
287 pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset);
290 /* Set up thread 0 */
291 vec_validate_aligned (vlib_worker_threads, 0, CLIB_CACHE_LINE_BYTES);
292 _vec_len (vlib_worker_threads) = 1;
293 w = vlib_worker_threads;
294 w->thread_mheap = clib_mem_get_heap ();
295 w->thread_stack = vlib_thread_stacks[0];
296 w->cpu_id = tm->main_lcore;
297 w->lwp = syscall (SYS_gettid);
298 w->thread_id = pthread_self ();
299 tm->n_vlib_mains = 1;
301 vlib_get_thread_core_numa (w, w->cpu_id);
303 if (tm->sched_policy != ~0)
305 struct sched_param sched_param;
306 if (!sched_getparam (w->lwp, &sched_param))
308 if (tm->sched_priority != ~0)
309 sched_param.sched_priority = tm->sched_priority;
310 sched_setscheduler (w->lwp, tm->sched_policy, &sched_param);
314 /* assign threads to cores and set n_vlib_mains */
319 vec_add1 (tm->registrations, tr);
323 vec_sort_with_function (tm->registrations, sort_registrations_by_no_clone);
325 for (i = 0; i < vec_len (tm->registrations); i++)
328 tr = tm->registrations[i];
329 tr->first_index = first_index;
330 first_index += tr->count;
331 n_vlib_mains += (tr->no_data_structure_clone == 0) ? tr->count : 0;
333 /* construct coremask */
334 if (tr->use_pthreads || !tr->count)
341 clib_bitmap_foreach (c, tr->coremask, ({
342 if (clib_bitmap_get(avail_cpu, c) == 0)
343 return clib_error_return (0, "cpu %u is not available to be used"
344 " for the '%s' thread",c, tr->name);
346 avail_cpu = clib_bitmap_set(avail_cpu, c, 0);
352 for (j = 0; j < tr->count; j++)
354 uword c = clib_bitmap_first_set (avail_cpu);
356 return clib_error_return (0,
357 "no available cpus to be used for"
358 " the '%s' thread", tr->name);
360 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
361 tr->coremask = clib_bitmap_set (tr->coremask, c, 1);
366 clib_bitmap_free (avail_cpu);
368 tm->n_vlib_mains = n_vlib_mains;
371 * Allocate the remaining worker threads, and thread stack vector slots
372 * from now on, calls to os_get_nthreads() will return the correct
375 vec_validate_aligned (vlib_worker_threads, first_index - 1,
376 CLIB_CACHE_LINE_BYTES);
377 vec_validate (vlib_thread_stacks, vec_len (vlib_worker_threads) - 1);
382 vlib_frame_queue_alloc (int nelts)
384 vlib_frame_queue_t *fq;
386 fq = clib_mem_alloc_aligned (sizeof (*fq), CLIB_CACHE_LINE_BYTES);
387 clib_memset (fq, 0, sizeof (*fq));
389 fq->vector_threshold = 128; // packets
390 vec_validate_aligned (fq->elts, nelts - 1, CLIB_CACHE_LINE_BYTES);
394 if (((uword) & fq->tail) & (CLIB_CACHE_LINE_BYTES - 1))
395 fformat (stderr, "WARNING: fq->tail unaligned\n");
396 if (((uword) & fq->head) & (CLIB_CACHE_LINE_BYTES - 1))
397 fformat (stderr, "WARNING: fq->head unaligned\n");
398 if (((uword) fq->elts) & (CLIB_CACHE_LINE_BYTES - 1))
399 fformat (stderr, "WARNING: fq->elts unaligned\n");
401 if (sizeof (fq->elts[0]) % CLIB_CACHE_LINE_BYTES)
402 fformat (stderr, "WARNING: fq->elts[0] size %d\n",
403 sizeof (fq->elts[0]));
404 if (nelts & (nelts - 1))
406 fformat (stderr, "FATAL: nelts MUST be a power of 2\n");
414 void vl_msg_api_handler_no_free (void *) __attribute__ ((weak));
416 vl_msg_api_handler_no_free (void *v)
420 /* Turned off, save as reference material... */
423 vlib_frame_queue_dequeue_internal (int thread_id,
424 vlib_main_t * vm, vlib_node_main_t * nm)
426 vlib_frame_queue_t *fq = vlib_frame_queues[thread_id];
427 vlib_frame_queue_elt_t *elt;
429 vlib_pending_frame_t *p;
430 vlib_node_runtime_t *r;
431 u32 node_runtime_index;
436 ASSERT (vm == vlib_mains[thread_id]);
440 if (fq->head == fq->tail)
443 elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1));
448 before = clib_cpu_time_now ();
451 node_runtime_index = elt->node_runtime_index;
452 msg_type = elt->msg_type;
456 case VLIB_FRAME_QUEUE_ELT_FREE_BUFFERS:
457 vlib_buffer_free (vm, vlib_frame_vector_args (f), f->n_vectors);
458 /* note fallthrough... */
459 case VLIB_FRAME_QUEUE_ELT_FREE_FRAME:
460 r = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
462 vlib_frame_free (vm, r, f);
464 case VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME:
465 vec_add2 (vm->node_main.pending_frames, p, 1);
466 f->flags |= (VLIB_FRAME_PENDING | VLIB_FRAME_FREE_AFTER_DISPATCH);
467 p->node_runtime_index = elt->node_runtime_index;
468 p->frame_index = vlib_frame_index (vm, f);
469 p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME;
470 fq->dequeue_vectors += (u64) f->n_vectors;
472 case VLIB_FRAME_QUEUE_ELT_API_MSG:
473 vl_msg_api_handler_no_free (f);
476 clib_warning ("bogus frame queue message, type %d", msg_type);
481 fq->dequeue_ticks += clib_cpu_time_now () - before;
482 CLIB_MEMORY_BARRIER ();
491 vlib_frame_queue_dequeue (int thread_id,
492 vlib_main_t * vm, vlib_node_main_t * nm)
494 return vlib_frame_queue_dequeue_internal (thread_id, vm, nm);
498 vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index,
499 u32 frame_queue_index, vlib_frame_t * frame,
500 vlib_frame_queue_msg_type_t type)
502 vlib_frame_queue_t *fq = vlib_frame_queues[frame_queue_index];
503 vlib_frame_queue_elt_t *elt;
506 u64 before = clib_cpu_time_now ();
510 new_tail = clib_atomic_add_fetch (&fq->tail, 1);
512 /* Wait until a ring slot is available */
513 while (new_tail >= fq->head + fq->nelts)
515 f64 b4 = vlib_time_now_ticks (vm, before);
516 vlib_worker_thread_barrier_check (vm, b4);
517 /* Bad idea. Dequeue -> enqueue -> dequeue -> trouble */
518 // vlib_frame_queue_dequeue (vm->thread_index, vm, nm);
521 elt = fq->elts + (new_tail & (fq->nelts - 1));
523 /* this would be very bad... */
528 /* Once we enqueue the frame, frame->n_vectors is owned elsewhere... */
529 save_count = frame->n_vectors;
532 elt->node_runtime_index = node_runtime_index;
533 elt->msg_type = type;
534 CLIB_MEMORY_BARRIER ();
541 /* To be called by vlib worker threads upon startup */
543 vlib_worker_thread_init (vlib_worker_thread_t * w)
545 vlib_thread_main_t *tm = vlib_get_thread_main ();
548 * Note: disabling signals in worker threads as follows
549 * prevents the api post-mortem dump scheme from working
553 * pthread_sigmask (SIG_SETMASK, &s, 0);
557 clib_mem_set_heap (w->thread_mheap);
559 if (vec_len (tm->thread_prefix) && w->registration->short_name)
561 w->name = format (0, "%v_%s_%d%c", tm->thread_prefix,
562 w->registration->short_name, w->instance_id, '\0');
563 vlib_set_thread_name ((char *) w->name);
566 if (!w->registration->use_pthreads)
569 /* Initial barrier sync, for both worker and i/o threads */
570 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
572 while (*vlib_worker_threads->wait_at_barrier)
575 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
580 vlib_worker_thread_bootstrap_fn (void *arg)
583 vlib_worker_thread_t *w = arg;
585 w->lwp = syscall (SYS_gettid);
586 w->thread_id = pthread_self ();
588 __os_thread_index = w - vlib_worker_threads;
590 rv = (void *) clib_calljmp
591 ((uword (*)(uword)) w->thread_function,
592 (uword) arg, w->thread_stack + VLIB_THREAD_STACK_SIZE);
593 /* NOTREACHED, we hope */
598 vlib_get_thread_core_numa (vlib_worker_thread_t * w, unsigned cpu_id)
600 const char *sys_cpu_path = "/sys/devices/system/cpu/cpu";
602 int core_id = -1, numa_id = -1;
604 p = format (p, "%s%u/topology/core_id%c", sys_cpu_path, cpu_id, 0);
605 clib_sysfs_read ((char *) p, "%d", &core_id);
606 vec_reset_length (p);
607 p = format (p, "%s%u/topology/physical_package_id%c", sys_cpu_path,
609 clib_sysfs_read ((char *) p, "%d", &numa_id);
612 w->core_id = core_id;
613 w->numa_id = numa_id;
616 static clib_error_t *
617 vlib_launch_thread_int (void *fp, vlib_worker_thread_t * w, unsigned cpu_id)
619 vlib_thread_main_t *tm = &vlib_thread_main;
620 void *(*fp_arg) (void *) = fp;
624 vlib_get_thread_core_numa (w, cpu_id);
626 /* Set up NUMA-bound heap if indicated */
627 if (clib_per_numa_mheaps[w->numa_id] == 0)
629 /* If the user requested a NUMA heap, create it... */
630 if (tm->numa_heap_size)
632 numa_heap = clib_mem_init_thread_safe_numa
633 (0 /* DIY */ , tm->numa_heap_size, w->numa_id);
634 clib_per_numa_mheaps[w->numa_id] = numa_heap;
638 /* Or, use the main heap */
639 clib_per_numa_mheaps[w->numa_id] = w->thread_mheap;
643 if (tm->cb.vlib_launch_thread_cb && !w->registration->use_pthreads)
644 return tm->cb.vlib_launch_thread_cb (fp, (void *) w, cpu_id);
650 CPU_SET (cpu_id, &cpuset);
652 if (pthread_create (&worker, NULL /* attr */ , fp_arg, (void *) w))
653 return clib_error_return_unix (0, "pthread_create");
655 if (pthread_setaffinity_np (worker, sizeof (cpu_set_t), &cpuset))
656 return clib_error_return_unix (0, "pthread_setaffinity_np");
662 static clib_error_t *
663 start_workers (vlib_main_t * vm)
666 vlib_worker_thread_t *w;
667 vlib_main_t *vm_clone;
669 vlib_thread_main_t *tm = &vlib_thread_main;
670 vlib_thread_registration_t *tr;
671 vlib_node_runtime_t *rt;
672 u32 n_vlib_mains = tm->n_vlib_mains;
673 u32 worker_thread_index;
674 u8 *main_heap = clib_mem_get_per_cpu_heap ();
676 vec_reset_length (vlib_worker_threads);
678 /* Set up the main thread */
679 vec_add2_aligned (vlib_worker_threads, w, 1, CLIB_CACHE_LINE_BYTES);
680 w->elog_track.name = "main thread";
681 elog_track_register (&vm->elog_main, &w->elog_track);
683 if (vec_len (tm->thread_prefix))
685 w->name = format (0, "%v_main%c", tm->thread_prefix, '\0');
686 vlib_set_thread_name ((char *) w->name);
690 clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
691 vm->elog_main.lock[0] = 0;
693 if (n_vlib_mains > 1)
695 /* Replace hand-crafted length-1 vector with a real vector */
698 vec_validate_aligned (vlib_mains, tm->n_vlib_mains - 1,
699 CLIB_CACHE_LINE_BYTES);
700 _vec_len (vlib_mains) = 0;
701 vec_add1_aligned (vlib_mains, vm, CLIB_CACHE_LINE_BYTES);
703 vlib_worker_threads->wait_at_barrier =
704 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
705 vlib_worker_threads->workers_at_barrier =
706 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
708 vlib_worker_threads->node_reforks_required =
709 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
711 /* We'll need the rpc vector lock... */
712 clib_spinlock_init (&vm->pending_rpc_lock);
714 /* Ask for an initial barrier sync */
715 *vlib_worker_threads->workers_at_barrier = 0;
716 *vlib_worker_threads->wait_at_barrier = 1;
718 /* Without update or refork */
719 *vlib_worker_threads->node_reforks_required = 0;
720 vm->need_vlib_worker_thread_node_runtime_update = 0;
723 vm->barrier_epoch = 0;
724 vm->barrier_no_close_before = 0;
726 worker_thread_index = 1;
728 for (i = 0; i < vec_len (tm->registrations); i++)
730 vlib_node_main_t *nm, *nm_clone;
733 tr = tm->registrations[i];
738 for (k = 0; k < tr->count; k++)
742 vec_add2 (vlib_worker_threads, w, 1);
743 /* Currently unused, may not really work */
746 #if USE_DLMALLOC == 0
748 mheap_alloc (0 /* use VM */ , tr->mheap_size);
750 w->thread_mheap = create_mspace (tr->mheap_size,
755 w->thread_mheap = main_heap;
758 vlib_thread_stack_init (w - vlib_worker_threads);
759 w->thread_function = tr->function;
760 w->thread_function_arg = w;
762 w->registration = tr;
765 (char *) format (0, "%s %d", tr->name, k + 1);
766 vec_add1 (w->elog_track.name, 0);
767 elog_track_register (&vm->elog_main, &w->elog_track);
769 if (tr->no_data_structure_clone)
772 /* Fork vlib_global_main et al. Look for bugs here */
773 oldheap = clib_mem_set_heap (w->thread_mheap);
775 vm_clone = clib_mem_alloc_aligned (sizeof (*vm_clone),
776 CLIB_CACHE_LINE_BYTES);
777 clib_memcpy (vm_clone, vlib_mains[0], sizeof (*vm_clone));
779 vm_clone->thread_index = worker_thread_index;
780 vm_clone->heap_base = w->thread_mheap;
781 vm_clone->heap_aligned_base = (void *)
782 (((uword) w->thread_mheap) & ~(VLIB_FRAME_ALIGN - 1));
783 vm_clone->init_functions_called =
784 hash_create (0, /* value bytes */ 0);
785 vm_clone->pending_rpc_requests = 0;
786 vec_validate (vm_clone->pending_rpc_requests, 0);
787 _vec_len (vm_clone->pending_rpc_requests) = 0;
788 clib_memset (&vm_clone->random_buffer, 0,
789 sizeof (vm_clone->random_buffer));
791 nm = &vlib_mains[0]->node_main;
792 nm_clone = &vm_clone->node_main;
793 /* fork next frames array, preserving node runtime indices */
794 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
795 CLIB_CACHE_LINE_BYTES);
796 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
798 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
799 u32 save_node_runtime_index;
802 save_node_runtime_index = nf->node_runtime_index;
803 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
804 vlib_next_frame_init (nf);
805 nf->node_runtime_index = save_node_runtime_index;
806 nf->flags = save_flags;
809 /* fork the frame dispatch queue */
810 nm_clone->pending_frames = 0;
811 vec_validate (nm_clone->pending_frames, 10);
812 _vec_len (nm_clone->pending_frames) = 0;
817 /* Allocate all nodes in single block for speed */
818 n = clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*n));
820 for (j = 0; j < vec_len (nm->nodes); j++)
822 clib_memcpy (n, nm->nodes[j], sizeof (*n));
823 /* none of the copied nodes have enqueue rights given out */
824 n->owner_node_index = VLIB_INVALID_NODE_INDEX;
825 clib_memset (&n->stats_total, 0, sizeof (n->stats_total));
826 clib_memset (&n->stats_last_clear, 0,
827 sizeof (n->stats_last_clear));
828 vec_add1 (nm_clone->nodes, n);
831 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
832 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
833 CLIB_CACHE_LINE_BYTES);
835 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
837 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
838 rt->thread_index = vm_clone->thread_index;
839 /* copy initial runtime_data from node */
840 if (n->runtime_data && n->runtime_data_bytes > 0)
841 clib_memcpy (rt->runtime_data, n->runtime_data,
842 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
843 n->runtime_data_bytes));
846 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
847 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
848 CLIB_CACHE_LINE_BYTES);
849 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
851 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
852 rt->thread_index = vm_clone->thread_index;
853 /* copy initial runtime_data from node */
854 if (n->runtime_data && n->runtime_data_bytes > 0)
855 clib_memcpy (rt->runtime_data, n->runtime_data,
856 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
857 n->runtime_data_bytes));
860 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT] =
861 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT],
862 CLIB_CACHE_LINE_BYTES);
864 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
866 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
867 rt->thread_index = vm_clone->thread_index;
868 /* copy initial runtime_data from node */
869 if (n->runtime_data && n->runtime_data_bytes > 0)
870 clib_memcpy (rt->runtime_data, n->runtime_data,
871 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
872 n->runtime_data_bytes));
875 nm_clone->processes = vec_dup_aligned (nm->processes,
876 CLIB_CACHE_LINE_BYTES);
878 /* Create per-thread frame freelist */
879 nm_clone->frame_sizes = vec_new (vlib_frame_size_t, 1);
880 #ifdef VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES
881 nm_clone->frame_size_hash = hash_create (0, sizeof (uword));
883 nm_clone->node_by_error = nm->node_by_error;
885 /* Packet trace buffers are guaranteed to be empty, nothing to do here */
887 clib_mem_set_heap (oldheap);
888 vec_add1_aligned (vlib_mains, vm_clone, CLIB_CACHE_LINE_BYTES);
890 /* Switch to the stats segment ... */
891 void *oldheap = vlib_stats_push_heap (0);
892 vm_clone->error_main.counters = vec_dup_aligned
893 (vlib_mains[0]->error_main.counters, CLIB_CACHE_LINE_BYTES);
894 vlib_stats_pop_heap2 (vm_clone->error_main.counters,
895 worker_thread_index, oldheap, 1);
897 vm_clone->error_main.counters_last_clear = vec_dup_aligned
898 (vlib_mains[0]->error_main.counters_last_clear,
899 CLIB_CACHE_LINE_BYTES);
901 worker_thread_index++;
907 /* only have non-data-structure copy threads to create... */
908 for (i = 0; i < vec_len (tm->registrations); i++)
910 tr = tm->registrations[i];
912 for (j = 0; j < tr->count; j++)
914 vec_add2 (vlib_worker_threads, w, 1);
917 #if USE_DLMALLOC == 0
919 mheap_alloc (0 /* use VM */ , tr->mheap_size);
922 create_mspace (tr->mheap_size, 0 /* locked */ );
926 w->thread_mheap = main_heap;
928 vlib_thread_stack_init (w - vlib_worker_threads);
929 w->thread_function = tr->function;
930 w->thread_function_arg = w;
933 (char *) format (0, "%s %d", tr->name, j + 1);
934 w->registration = tr;
935 vec_add1 (w->elog_track.name, 0);
936 elog_track_register (&vm->elog_main, &w->elog_track);
941 worker_thread_index = 1;
943 for (i = 0; i < vec_len (tm->registrations); i++)
948 tr = tm->registrations[i];
950 if (tr->use_pthreads || tm->use_pthreads)
952 for (j = 0; j < tr->count; j++)
954 w = vlib_worker_threads + worker_thread_index++;
955 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
958 clib_error_report (err);
965 clib_bitmap_foreach (c, tr->coremask, ({
966 w = vlib_worker_threads + worker_thread_index++;
967 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
970 clib_error_report (err);
975 vlib_worker_thread_barrier_sync (vm);
976 vlib_worker_thread_barrier_release (vm);
980 VLIB_MAIN_LOOP_ENTER_FUNCTION (start_workers);
984 worker_thread_node_runtime_update_internal (void)
988 vlib_node_main_t *nm, *nm_clone;
989 vlib_main_t *vm_clone;
990 vlib_node_runtime_t *rt;
992 vlib_node_runtime_sync_stats (vlib_main_t * vm,
993 vlib_node_runtime_t * r,
995 uword n_vectors, uword n_clocks);
997 ASSERT (vlib_get_thread_index () == 0);
1000 nm = &vm->node_main;
1002 ASSERT (*vlib_worker_threads->wait_at_barrier == 1);
1005 * Scrape all runtime stats, so we don't lose node runtime(s) with
1006 * pending counts, or throw away worker / io thread counts.
1008 for (j = 0; j < vec_len (nm->nodes); j++)
1012 vlib_node_sync_stats (vm, n);
1015 for (i = 1; i < vec_len (vlib_mains); i++)
1019 vm_clone = vlib_mains[i];
1020 nm_clone = &vm_clone->node_main;
1022 for (j = 0; j < vec_len (nm_clone->nodes); j++)
1024 n = nm_clone->nodes[j];
1026 rt = vlib_node_get_runtime (vm_clone, n->index);
1027 vlib_node_runtime_sync_stats (vm_clone, rt, 0, 0, 0);
1031 /* Per-worker clone rebuilds are now done on each thread */
1036 vlib_worker_thread_node_refork (void)
1038 vlib_main_t *vm, *vm_clone;
1039 vlib_node_main_t *nm, *nm_clone;
1040 vlib_node_t **old_nodes_clone;
1041 vlib_node_runtime_t *rt, *old_rt;
1043 vlib_node_t *new_n_clone;
1048 nm = &vm->node_main;
1049 vm_clone = vlib_get_main ();
1050 nm_clone = &vm_clone->node_main;
1052 /* Re-clone error heap */
1053 u64 *old_counters = vm_clone->error_main.counters;
1054 u64 *old_counters_all_clear = vm_clone->error_main.counters_last_clear;
1056 clib_memcpy_fast (&vm_clone->error_main, &vm->error_main,
1057 sizeof (vm->error_main));
1058 j = vec_len (vm->error_main.counters) - 1;
1060 /* Switch to the stats segment ... */
1061 void *oldheap = vlib_stats_push_heap (0);
1062 vec_validate_aligned (old_counters, j, CLIB_CACHE_LINE_BYTES);
1063 vm_clone->error_main.counters = old_counters;
1064 vlib_stats_pop_heap2 (vm_clone->error_main.counters, vm_clone->thread_index,
1067 vec_validate_aligned (old_counters_all_clear, j, CLIB_CACHE_LINE_BYTES);
1068 vm_clone->error_main.counters_last_clear = old_counters_all_clear;
1070 nm_clone = &vm_clone->node_main;
1071 vec_free (nm_clone->next_frames);
1072 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
1073 CLIB_CACHE_LINE_BYTES);
1075 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
1077 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
1078 u32 save_node_runtime_index;
1081 save_node_runtime_index = nf->node_runtime_index;
1082 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
1083 vlib_next_frame_init (nf);
1084 nf->node_runtime_index = save_node_runtime_index;
1085 nf->flags = save_flags;
1088 old_nodes_clone = nm_clone->nodes;
1089 nm_clone->nodes = 0;
1093 /* Allocate all nodes in single block for speed */
1095 clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*new_n_clone));
1096 for (j = 0; j < vec_len (nm->nodes); j++)
1098 vlib_node_t *new_n = nm->nodes[j];
1100 clib_memcpy_fast (new_n_clone, new_n, sizeof (*new_n));
1101 /* none of the copied nodes have enqueue rights given out */
1102 new_n_clone->owner_node_index = VLIB_INVALID_NODE_INDEX;
1104 if (j >= vec_len (old_nodes_clone))
1106 /* new node, set to zero */
1107 clib_memset (&new_n_clone->stats_total, 0,
1108 sizeof (new_n_clone->stats_total));
1109 clib_memset (&new_n_clone->stats_last_clear, 0,
1110 sizeof (new_n_clone->stats_last_clear));
1114 vlib_node_t *old_n_clone = old_nodes_clone[j];
1115 /* Copy stats if the old data is valid */
1116 clib_memcpy_fast (&new_n_clone->stats_total,
1117 &old_n_clone->stats_total,
1118 sizeof (new_n_clone->stats_total));
1119 clib_memcpy_fast (&new_n_clone->stats_last_clear,
1120 &old_n_clone->stats_last_clear,
1121 sizeof (new_n_clone->stats_last_clear));
1123 /* keep previous node state */
1124 new_n_clone->state = old_n_clone->state;
1126 vec_add1 (nm_clone->nodes, new_n_clone);
1129 /* Free the old node clones */
1130 clib_mem_free (old_nodes_clone[0]);
1132 vec_free (old_nodes_clone);
1135 /* re-clone internal nodes */
1136 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL];
1137 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
1138 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
1139 CLIB_CACHE_LINE_BYTES);
1141 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
1143 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1144 rt->thread_index = vm_clone->thread_index;
1145 /* copy runtime_data, will be overwritten later for existing rt */
1146 if (n->runtime_data && n->runtime_data_bytes > 0)
1147 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1148 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1149 n->runtime_data_bytes));
1152 for (j = 0; j < vec_len (old_rt); j++)
1154 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1155 rt->state = old_rt[j].state;
1156 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1157 VLIB_NODE_RUNTIME_DATA_SIZE);
1162 /* re-clone input nodes */
1163 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT];
1164 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
1165 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
1166 CLIB_CACHE_LINE_BYTES);
1168 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
1170 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1171 rt->thread_index = vm_clone->thread_index;
1172 /* copy runtime_data, will be overwritten later for existing rt */
1173 if (n->runtime_data && n->runtime_data_bytes > 0)
1174 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1175 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1176 n->runtime_data_bytes));
1179 for (j = 0; j < vec_len (old_rt); j++)
1181 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1182 rt->state = old_rt[j].state;
1183 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1184 VLIB_NODE_RUNTIME_DATA_SIZE);
1189 /* re-clone pre-input nodes */
1190 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT];
1191 nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT] =
1192 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT],
1193 CLIB_CACHE_LINE_BYTES);
1195 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
1197 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1198 rt->thread_index = vm_clone->thread_index;
1199 /* copy runtime_data, will be overwritten later for existing rt */
1200 if (n->runtime_data && n->runtime_data_bytes > 0)
1201 clib_memcpy_fast (rt->runtime_data, n->runtime_data,
1202 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1203 n->runtime_data_bytes));
1206 for (j = 0; j < vec_len (old_rt); j++)
1208 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1209 rt->state = old_rt[j].state;
1210 clib_memcpy_fast (rt->runtime_data, old_rt[j].runtime_data,
1211 VLIB_NODE_RUNTIME_DATA_SIZE);
1216 nm_clone->processes = vec_dup_aligned (nm->processes,
1217 CLIB_CACHE_LINE_BYTES);
1218 nm_clone->node_by_error = nm->node_by_error;
1222 vlib_worker_thread_node_runtime_update (void)
1225 * Make a note that we need to do a node runtime update
1226 * prior to releasing the barrier.
1228 vlib_global_main.need_vlib_worker_thread_node_runtime_update = 1;
1232 unformat_sched_policy (unformat_input_t * input, va_list * args)
1234 u32 *r = va_arg (*args, u32 *);
1237 #define _(v,f,s) else if (unformat (input, s)) *r = SCHED_POLICY_##f;
1238 foreach_sched_policy
1245 static clib_error_t *
1246 cpu_config (vlib_main_t * vm, unformat_input_t * input)
1248 vlib_thread_registration_t *tr;
1250 vlib_thread_main_t *tm = &vlib_thread_main;
1255 tm->thread_registrations_by_name = hash_create_string (0, sizeof (uword));
1257 tm->n_thread_stacks = 1; /* account for main thread */
1258 tm->sched_policy = ~0;
1259 tm->sched_priority = ~0;
1260 tm->main_lcore = ~0;
1266 hash_set_mem (tm->thread_registrations_by_name, tr->name, (uword) tr);
1270 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1272 if (unformat (input, "use-pthreads"))
1273 tm->use_pthreads = 1;
1274 else if (unformat (input, "thread-prefix %v", &tm->thread_prefix))
1276 else if (unformat (input, "main-core %u", &tm->main_lcore))
1278 else if (unformat (input, "skip-cores %u", &tm->skip_cores))
1280 else if (unformat (input, "numa-heap-size %U",
1281 unformat_memory_size, &tm->numa_heap_size))
1283 else if (unformat (input, "coremask-%s %U", &name,
1284 unformat_bitmap_mask, &bitmap) ||
1285 unformat (input, "corelist-%s %U", &name,
1286 unformat_bitmap_list, &bitmap))
1288 p = hash_get_mem (tm->thread_registrations_by_name, name);
1290 return clib_error_return (0, "no such thread type '%s'", name);
1292 tr = (vlib_thread_registration_t *) p[0];
1294 if (tr->use_pthreads)
1295 return clib_error_return (0,
1296 "corelist cannot be set for '%s' threads",
1299 tr->coremask = bitmap;
1300 tr->count = clib_bitmap_count_set_bits (tr->coremask);
1304 (input, "scheduler-policy %U", unformat_sched_policy,
1307 else if (unformat (input, "scheduler-priority %u", &tm->sched_priority))
1309 else if (unformat (input, "%s %u", &name, &count))
1311 p = hash_get_mem (tm->thread_registrations_by_name, name);
1313 return clib_error_return (0, "no such thread type 3 '%s'", name);
1315 tr = (vlib_thread_registration_t *) p[0];
1316 if (tr->fixed_count)
1317 return clib_error_return
1318 (0, "number of %s threads not configurable", tr->name);
1325 if (tm->sched_priority != ~0)
1327 if (tm->sched_policy == SCHED_FIFO || tm->sched_policy == SCHED_RR)
1329 u32 prio_max = sched_get_priority_max (tm->sched_policy);
1330 u32 prio_min = sched_get_priority_min (tm->sched_policy);
1331 if (tm->sched_priority > prio_max)
1332 tm->sched_priority = prio_max;
1333 if (tm->sched_priority < prio_min)
1334 tm->sched_priority = prio_min;
1338 return clib_error_return
1340 "scheduling priority (%d) is not allowed for `normal` scheduling policy",
1341 tm->sched_priority);
1346 if (!tm->thread_prefix)
1347 tm->thread_prefix = format (0, "vpp");
1351 tm->n_thread_stacks += tr->count;
1352 tm->n_pthreads += tr->count * tr->use_pthreads;
1353 tm->n_threads += tr->count * (tr->use_pthreads == 0);
1360 VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu");
1362 void vnet_main_fixup (vlib_fork_fixup_t which) __attribute__ ((weak));
1364 vnet_main_fixup (vlib_fork_fixup_t which)
1369 vlib_worker_thread_fork_fixup (vlib_fork_fixup_t which)
1371 vlib_main_t *vm = vlib_get_main ();
1373 if (vlib_mains == 0)
1376 ASSERT (vlib_get_thread_index () == 0);
1377 vlib_worker_thread_barrier_sync (vm);
1381 case VLIB_WORKER_THREAD_FORK_FIXUP_NEW_SW_IF_INDEX:
1382 vnet_main_fixup (VLIB_WORKER_THREAD_FORK_FIXUP_NEW_SW_IF_INDEX);
1388 vlib_worker_thread_barrier_release (vm);
1392 * Enforce minimum open time to minimize packet loss due to Rx overflow,
1393 * based on a test based heuristic that barrier should be open for at least
1394 * 3 time as long as it is closed (with an upper bound of 1ms because by that
1395 * point it is probably too late to make a difference)
1398 #ifndef BARRIER_MINIMUM_OPEN_LIMIT
1399 #define BARRIER_MINIMUM_OPEN_LIMIT 0.001
1402 #ifndef BARRIER_MINIMUM_OPEN_FACTOR
1403 #define BARRIER_MINIMUM_OPEN_FACTOR 3
1407 vlib_worker_thread_initial_barrier_sync_and_release (vlib_main_t * vm)
1410 f64 now = vlib_time_now (vm);
1411 u32 count = vec_len (vlib_mains) - 1;
1413 /* No worker threads? */
1417 deadline = now + BARRIER_SYNC_TIMEOUT;
1418 *vlib_worker_threads->wait_at_barrier = 1;
1419 while (*vlib_worker_threads->workers_at_barrier != count)
1421 if ((now = vlib_time_now (vm)) > deadline)
1423 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1428 *vlib_worker_threads->wait_at_barrier = 0;
1432 vlib_worker_thread_barrier_sync_int (vlib_main_t * vm, const char *func_name)
1439 f64 max_vector_rate;
1443 if (vec_len (vlib_mains) < 2)
1446 ASSERT (vlib_get_thread_index () == 0);
1448 vlib_worker_threads[0].barrier_caller = func_name;
1449 count = vec_len (vlib_mains) - 1;
1451 /* Record entry relative to last close */
1452 now = vlib_time_now (vm);
1453 t_entry = now - vm->barrier_epoch;
1455 /* Tolerate recursive calls */
1456 if (++vlib_worker_threads[0].recursion_level > 1)
1458 barrier_trace_sync_rec (t_entry);
1463 * Need data to decide if we're working hard enough to honor
1464 * the barrier hold-down timer.
1466 max_vector_rate = 0.0;
1467 for (i = 1; i < vec_len (vlib_mains); i++)
1469 clib_max (max_vector_rate,
1470 (f64) vlib_last_vectors_per_main_loop (vlib_mains[i]));
1472 vlib_worker_threads[0].barrier_sync_count++;
1474 /* Enforce minimum barrier open time to minimize packet loss */
1475 ASSERT (vm->barrier_no_close_before <= (now + BARRIER_MINIMUM_OPEN_LIMIT));
1478 * If any worker thread seems busy, which we define
1479 * as a vector rate above 10, we enforce the barrier hold-down timer
1481 if (max_vector_rate > 10.0)
1485 now = vlib_time_now (vm);
1486 /* Barrier hold-down timer expired? */
1487 if (now >= vm->barrier_no_close_before)
1489 if ((vm->barrier_no_close_before - now)
1490 > (2.0 * BARRIER_MINIMUM_OPEN_LIMIT))
1493 ("clock change: would have waited for %.4f seconds",
1494 (vm->barrier_no_close_before - now));
1499 /* Record time of closure */
1500 t_open = now - vm->barrier_epoch;
1501 vm->barrier_epoch = now;
1503 deadline = now + BARRIER_SYNC_TIMEOUT;
1505 *vlib_worker_threads->wait_at_barrier = 1;
1506 while (*vlib_worker_threads->workers_at_barrier != count)
1508 if ((now = vlib_time_now (vm)) > deadline)
1510 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1515 t_closed = now - vm->barrier_epoch;
1517 barrier_trace_sync (t_entry, t_open, t_closed);
1522 vlib_worker_thread_barrier_release (vlib_main_t * vm)
1529 f64 t_update_main = 0.0;
1530 int refork_needed = 0;
1532 if (vec_len (vlib_mains) < 2)
1535 ASSERT (vlib_get_thread_index () == 0);
1538 now = vlib_time_now (vm);
1539 t_entry = now - vm->barrier_epoch;
1541 if (--vlib_worker_threads[0].recursion_level > 0)
1543 barrier_trace_release_rec (t_entry);
1547 /* Update (all) node runtimes before releasing the barrier, if needed */
1548 if (vm->need_vlib_worker_thread_node_runtime_update)
1551 * Lock stat segment here, so we's safe when
1552 * rebuilding the stat segment node clones from the
1555 vlib_stat_segment_lock ();
1557 /* Do stats elements on main thread */
1558 worker_thread_node_runtime_update_internal ();
1559 vm->need_vlib_worker_thread_node_runtime_update = 0;
1561 /* Do per thread rebuilds in parallel */
1563 clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
1564 (vec_len (vlib_mains) - 1));
1565 now = vlib_time_now (vm);
1566 t_update_main = now - vm->barrier_epoch;
1569 deadline = now + BARRIER_SYNC_TIMEOUT;
1572 * Note when we let go of the barrier.
1573 * Workers can use this to derive a reasonably accurate
1574 * time offset. See vlib_time_now(...)
1576 vm->time_last_barrier_release = vlib_time_now (vm);
1577 CLIB_MEMORY_STORE_BARRIER ();
1579 *vlib_worker_threads->wait_at_barrier = 0;
1581 while (*vlib_worker_threads->workers_at_barrier > 0)
1583 if ((now = vlib_time_now (vm)) > deadline)
1585 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1590 /* Wait for reforks before continuing */
1593 now = vlib_time_now (vm);
1595 deadline = now + BARRIER_SYNC_TIMEOUT;
1597 while (*vlib_worker_threads->node_reforks_required > 0)
1599 if ((now = vlib_time_now (vm)) > deadline)
1601 fformat (stderr, "%s: worker thread refork deadlock\n",
1606 vlib_stat_segment_unlock ();
1609 t_closed_total = now - vm->barrier_epoch;
1611 minimum_open = t_closed_total * BARRIER_MINIMUM_OPEN_FACTOR;
1613 if (minimum_open > BARRIER_MINIMUM_OPEN_LIMIT)
1615 minimum_open = BARRIER_MINIMUM_OPEN_LIMIT;
1618 vm->barrier_no_close_before = now + minimum_open;
1620 /* Record barrier epoch (used to enforce minimum open time) */
1621 vm->barrier_epoch = now;
1623 barrier_trace_release (t_entry, t_closed_total, t_update_main);
1628 * Check the frame queue to see if any frames are available.
1629 * If so, pull the packets off the frames and put them to
1633 vlib_frame_queue_dequeue (vlib_main_t * vm, vlib_frame_queue_main_t * fqm)
1635 u32 thread_id = vm->thread_index;
1636 vlib_frame_queue_t *fq = fqm->vlib_frame_queues[thread_id];
1637 vlib_frame_queue_elt_t *elt;
1646 ASSERT (vm == vlib_mains[thread_id]);
1648 if (PREDICT_FALSE (fqm->node_index == ~0))
1651 * Gather trace data for frame queues
1653 if (PREDICT_FALSE (fq->trace))
1655 frame_queue_trace_t *fqt;
1656 frame_queue_nelt_counter_t *fqh;
1659 fqt = &fqm->frame_queue_traces[thread_id];
1661 fqt->nelts = fq->nelts;
1662 fqt->head = fq->head;
1663 fqt->head_hint = fq->head_hint;
1664 fqt->tail = fq->tail;
1665 fqt->threshold = fq->vector_threshold;
1666 fqt->n_in_use = fqt->tail - fqt->head;
1667 if (fqt->n_in_use >= fqt->nelts)
1669 // if beyond max then use max
1670 fqt->n_in_use = fqt->nelts - 1;
1673 /* Record the number of elements in use in the histogram */
1674 fqh = &fqm->frame_queue_histogram[thread_id];
1675 fqh->count[fqt->n_in_use]++;
1677 /* Record a snapshot of the elements in use */
1678 for (elix = 0; elix < fqt->nelts; elix++)
1680 elt = fq->elts + ((fq->head + 1 + elix) & (fq->nelts - 1));
1681 if (1 || elt->valid)
1683 fqt->n_vectors[elix] = elt->n_vectors;
1692 if (fq->head == fq->tail)
1694 fq->head_hint = fq->head;
1698 elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1));
1702 fq->head_hint = fq->head;
1706 from = elt->buffer_index;
1707 msg_type = elt->msg_type;
1709 ASSERT (msg_type == VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME);
1710 ASSERT (elt->n_vectors <= VLIB_FRAME_SIZE);
1712 f = vlib_get_frame_to_node (vm, fqm->node_index);
1714 /* If the first vector is traced, set the frame trace flag */
1715 b = vlib_get_buffer (vm, from[0]);
1716 if (b->flags & VLIB_BUFFER_IS_TRACED)
1717 f->frame_flags |= VLIB_NODE_FLAG_TRACE;
1719 to = vlib_frame_vector_args (f);
1721 n_left_to_node = elt->n_vectors;
1723 while (n_left_to_node >= 4)
1731 n_left_to_node -= 4;
1734 while (n_left_to_node > 0)
1742 vectors += elt->n_vectors;
1743 f->n_vectors = elt->n_vectors;
1744 vlib_put_frame_to_node (vm, fqm->node_index, f);
1748 elt->msg_type = 0xfefefefe;
1749 CLIB_MEMORY_BARRIER ();
1754 * Limit the number of packets pushed into the graph
1756 if (vectors >= fq->vector_threshold)
1758 fq->head_hint = fq->head;
1767 vlib_worker_thread_fn (void *arg)
1769 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
1770 vlib_thread_main_t *tm = vlib_get_thread_main ();
1771 vlib_main_t *vm = vlib_get_main ();
1774 ASSERT (vm->thread_index == vlib_get_thread_index ());
1776 vlib_worker_thread_init (w);
1777 clib_time_init (&vm->clib_time);
1778 clib_mem_set_heap (w->thread_mheap);
1780 e = vlib_call_init_exit_functions_no_sort
1781 (vm, &vm->worker_init_function_registrations, 1 /* call_once */ );
1783 clib_error_report (e);
1785 /* Wait until the dpdk init sequence is complete */
1786 while (tm->extern_thread_mgmt && tm->worker_thread_release == 0)
1787 vlib_worker_thread_barrier_check ();
1789 vlib_worker_loop (vm);
1793 VLIB_REGISTER_THREAD (worker_thread_reg, static) = {
1796 .function = vlib_worker_thread_fn,
1801 vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts)
1803 vlib_thread_main_t *tm = vlib_get_thread_main ();
1804 vlib_frame_queue_main_t *fqm;
1805 vlib_frame_queue_t *fq;
1808 if (frame_queue_nelts == 0)
1809 frame_queue_nelts = FRAME_QUEUE_MAX_NELTS;
1811 ASSERT (frame_queue_nelts >= 8);
1813 vec_add2 (tm->frame_queue_mains, fqm, 1);
1815 fqm->node_index = node_index;
1816 fqm->frame_queue_nelts = frame_queue_nelts;
1817 fqm->queue_hi_thresh = frame_queue_nelts - 2;
1819 vec_validate (fqm->vlib_frame_queues, tm->n_vlib_mains - 1);
1820 vec_validate (fqm->per_thread_data, tm->n_vlib_mains - 1);
1821 _vec_len (fqm->vlib_frame_queues) = 0;
1822 for (i = 0; i < tm->n_vlib_mains; i++)
1824 vlib_frame_queue_per_thread_data_t *ptd;
1825 fq = vlib_frame_queue_alloc (frame_queue_nelts);
1826 vec_add1 (fqm->vlib_frame_queues, fq);
1828 ptd = vec_elt_at_index (fqm->per_thread_data, i);
1829 vec_validate (ptd->handoff_queue_elt_by_thread_index,
1830 tm->n_vlib_mains - 1);
1831 vec_validate_init_empty (ptd->congested_handoff_queue_by_thread_index,
1832 tm->n_vlib_mains - 1,
1833 (vlib_frame_queue_t *) (~0));
1836 return (fqm - tm->frame_queue_mains);
1840 vlib_thread_cb_register (struct vlib_main_t *vm, vlib_thread_callbacks_t * cb)
1842 vlib_thread_main_t *tm = vlib_get_thread_main ();
1844 if (tm->extern_thread_mgmt)
1847 tm->cb.vlib_launch_thread_cb = cb->vlib_launch_thread_cb;
1848 tm->extern_thread_mgmt = 1;
1853 vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
1856 ASSERT (vlib_get_thread_index () == 0);
1857 vlib_process_signal_event (vlib_get_main (), args->node_index,
1858 args->type_opaque, args->data);
1861 void *rpc_call_main_thread_cb_fn;
1864 vlib_rpc_call_main_thread (void *callback, u8 * args, u32 arg_size)
1866 if (rpc_call_main_thread_cb_fn)
1868 void (*fp) (void *, u8 *, u32) = rpc_call_main_thread_cb_fn;
1869 (*fp) (callback, args, arg_size);
1872 clib_warning ("BUG: rpc_call_main_thread_cb_fn NULL!");
1876 threads_init (vlib_main_t * vm)
1881 VLIB_INIT_FUNCTION (threads_init);
1884 static clib_error_t *
1885 show_clock_command_fn (vlib_main_t * vm,
1886 unformat_input_t * input, vlib_cli_command_t * cmd)
1891 (void) unformat (input, "verbose %=", &verbose, 1);
1893 vlib_cli_output (vm, "%U", format_clib_time, &vm->clib_time, verbose);
1895 if (vec_len (vlib_mains) == 1)
1898 vlib_cli_output (vm, "Time last barrier release %.9f",
1899 vm->time_last_barrier_release);
1901 for (i = 1; i < vec_len (vlib_mains); i++)
1903 if (vlib_mains[i] == 0)
1906 vlib_cli_output (vm, "%d: %U", i, format_clib_time,
1907 &vlib_mains[i]->clib_time, verbose);
1909 vlib_cli_output (vm, "Thread %d offset %.9f error %.9f", i,
1910 vlib_mains[i]->time_offset,
1911 vm->time_last_barrier_release -
1912 vlib_mains[i]->time_last_barrier_release);
1918 VLIB_CLI_COMMAND (f_command, static) =
1920 .path = "show clock",
1921 .short_help = "show clock",
1922 .function = show_clock_command_fn,
1927 * fd.io coding-style-patch-verification: ON
1930 * eval: (c-set-style "gnu")