2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
19 #include <vppinfra/format.h>
20 #include <vppinfra/linux/sysfs.h>
21 #include <vlib/vlib.h>
23 #include <vlib/threads.h>
24 #include <vlib/unix/cj.h>
26 DECLARE_CJ_GLOBAL_LOG;
28 #define FRAME_QUEUE_NELTS 64
36 vlib_worker_thread_t *vlib_worker_threads;
37 vlib_thread_main_t vlib_thread_main;
40 * Barrier tracing can be enabled on a normal build to collect information
41 * on barrier use, including timings and call stacks. Deliberately not
42 * keyed off CLIB_DEBUG, because that can add significant overhead which
43 * imapacts observed timings.
47 elog_id_for_msg_name (const char *msg_name)
54 h = hash_create_string (0, sizeof (uword));
56 p = hash_get_mem (h, msg_name);
59 r = elog_string (&vlib_global_main.elog_main, "%s", msg_name);
61 name_copy = format (0, "%s%c", msg_name, 0);
63 hash_set_mem (h, name_copy, r);
69 barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed)
71 if (!vlib_worker_threads->barrier_elog_enabled)
75 ELOG_TYPE_DECLARE (e) =
77 .format = "bar-trace-%s-#%d",
78 .format_args = "T4i4",
83 u32 caller, count, t_entry, t_open, t_closed;
86 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
87 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
88 ed->caller = elog_id_for_msg_name (vlib_worker_threads[0].barrier_caller);
89 ed->t_entry = (int) (1000000.0 * t_entry);
90 ed->t_open = (int) (1000000.0 * t_open);
91 ed->t_closed = (int) (1000000.0 * t_closed);
95 barrier_trace_sync_rec (f64 t_entry)
97 if (!vlib_worker_threads->barrier_elog_enabled)
101 ELOG_TYPE_DECLARE (e) =
103 .format = "bar-syncrec-%s-#%d",
104 .format_args = "T4i4",
112 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
113 ed->depth = (int) vlib_worker_threads[0].recursion_level - 1;
114 ed->caller = elog_id_for_msg_name (vlib_worker_threads[0].barrier_caller);
118 barrier_trace_release_rec (f64 t_entry)
120 if (!vlib_worker_threads->barrier_elog_enabled)
124 ELOG_TYPE_DECLARE (e) =
126 .format = "bar-relrrec-#%d",
135 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
136 ed->depth = (int) vlib_worker_threads[0].recursion_level;
140 barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main)
142 if (!vlib_worker_threads->barrier_elog_enabled)
146 ELOG_TYPE_DECLARE (e) =
148 .format = "bar-rel-#%d-e%d-u%d-t%d",
149 .format_args = "i4i4i4i4",
154 u32 count, t_entry, t_update_main, t_closed_total;
157 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
158 ed->t_entry = (int) (1000000.0 * t_entry);
159 ed->t_update_main = (int) (1000000.0 * t_update_main);
160 ed->t_closed_total = (int) (1000000.0 * t_closed_total);
161 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
163 /* Reset context for next trace */
164 vlib_worker_threads[0].barrier_context = NULL;
168 os_get_nthreads (void)
172 len = vec_len (vlib_thread_stacks);
180 vlib_set_thread_name (char *name)
182 int pthread_setname_np (pthread_t __target_thread, const char *__name);
184 pthread_t thread = pthread_self ();
188 rv = pthread_setname_np (thread, name);
190 clib_warning ("pthread_setname_np returned %d", rv);
195 sort_registrations_by_no_clone (void *a0, void *a1)
197 vlib_thread_registration_t **tr0 = a0;
198 vlib_thread_registration_t **tr1 = a1;
200 return ((i32) ((*tr0)->no_data_structure_clone)
201 - ((i32) ((*tr1)->no_data_structure_clone)));
205 clib_sysfs_list_to_bitmap (char *filename)
210 fp = fopen (filename, "r");
215 vec_validate (buffer, 256 - 1);
216 if (fgets ((char *) buffer, 256, fp))
219 unformat_init_string (&in, (char *) buffer,
220 strlen ((char *) buffer));
221 if (unformat (&in, "%U", unformat_bitmap_list, &r) != 1)
222 clib_warning ("unformat_bitmap_list failed");
232 /* Called early in the init sequence */
235 vlib_thread_init (vlib_main_t * vm)
237 vlib_thread_main_t *tm = &vlib_thread_main;
238 vlib_worker_thread_t *w;
239 vlib_thread_registration_t *tr;
240 u32 n_vlib_mains = 1;
245 /* get bitmaps of active cpu cores and sockets */
246 tm->cpu_core_bitmap =
247 clib_sysfs_list_to_bitmap ("/sys/devices/system/cpu/online");
248 tm->cpu_socket_bitmap =
249 clib_sysfs_list_to_bitmap ("/sys/devices/system/node/online");
251 avail_cpu = clib_bitmap_dup (tm->cpu_core_bitmap);
254 for (i = 0; i < tm->skip_cores; i++)
256 uword c = clib_bitmap_first_set (avail_cpu);
258 return clib_error_return (0, "no available cpus to skip");
260 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
263 /* grab cpu for main thread */
264 if (tm->main_lcore == ~0)
266 /* if main-lcore is not set, we try to use lcore 1 */
267 if (clib_bitmap_get (avail_cpu, 1))
270 tm->main_lcore = clib_bitmap_first_set (avail_cpu);
271 if (tm->main_lcore == (u8) ~ 0)
272 return clib_error_return (0, "no available cpus to be used for the"
277 if (clib_bitmap_get (avail_cpu, tm->main_lcore) == 0)
278 return clib_error_return (0, "cpu %u is not available to be used"
279 " for the main thread", tm->main_lcore);
281 avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0);
283 /* assume that there is socket 0 only if there is no data from sysfs */
284 if (!tm->cpu_socket_bitmap)
285 tm->cpu_socket_bitmap = clib_bitmap_set (0, 0, 1);
287 /* pin main thread to main_lcore */
288 if (tm->cb.vlib_thread_set_lcore_cb)
290 tm->cb.vlib_thread_set_lcore_cb (0, tm->main_lcore);
296 CPU_SET (tm->main_lcore, &cpuset);
297 pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset);
300 /* as many threads as stacks... */
301 vec_validate_aligned (vlib_worker_threads, vec_len (vlib_thread_stacks) - 1,
302 CLIB_CACHE_LINE_BYTES);
304 /* Preallocate thread 0 */
305 _vec_len (vlib_worker_threads) = 1;
306 w = vlib_worker_threads;
307 w->thread_mheap = clib_mem_get_heap ();
308 w->thread_stack = vlib_thread_stacks[0];
309 w->cpu_id = tm->main_lcore;
310 w->lwp = syscall (SYS_gettid);
311 w->thread_id = pthread_self ();
312 tm->n_vlib_mains = 1;
314 if (tm->sched_policy != ~0)
316 struct sched_param sched_param;
317 if (!sched_getparam (w->lwp, &sched_param))
319 if (tm->sched_priority != ~0)
320 sched_param.sched_priority = tm->sched_priority;
321 sched_setscheduler (w->lwp, tm->sched_policy, &sched_param);
325 /* assign threads to cores and set n_vlib_mains */
330 vec_add1 (tm->registrations, tr);
334 vec_sort_with_function (tm->registrations, sort_registrations_by_no_clone);
336 for (i = 0; i < vec_len (tm->registrations); i++)
339 tr = tm->registrations[i];
340 tr->first_index = first_index;
341 first_index += tr->count;
342 n_vlib_mains += (tr->no_data_structure_clone == 0) ? tr->count : 0;
344 /* construct coremask */
345 if (tr->use_pthreads || !tr->count)
352 clib_bitmap_foreach (c, tr->coremask, ({
353 if (clib_bitmap_get(avail_cpu, c) == 0)
354 return clib_error_return (0, "cpu %u is not available to be used"
355 " for the '%s' thread",c, tr->name);
357 avail_cpu = clib_bitmap_set(avail_cpu, c, 0);
364 for (j = 0; j < tr->count; j++)
366 uword c = clib_bitmap_first_set (avail_cpu);
368 return clib_error_return (0,
369 "no available cpus to be used for"
370 " the '%s' thread", tr->name);
372 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
373 tr->coremask = clib_bitmap_set (tr->coremask, c, 1);
378 clib_bitmap_free (avail_cpu);
380 tm->n_vlib_mains = n_vlib_mains;
382 vec_validate_aligned (vlib_worker_threads, first_index - 1,
383 CLIB_CACHE_LINE_BYTES);
389 vlib_frame_queue_alloc (int nelts)
391 vlib_frame_queue_t *fq;
393 fq = clib_mem_alloc_aligned (sizeof (*fq), CLIB_CACHE_LINE_BYTES);
394 memset (fq, 0, sizeof (*fq));
396 fq->vector_threshold = 128; // packets
397 vec_validate_aligned (fq->elts, nelts - 1, CLIB_CACHE_LINE_BYTES);
401 if (((uword) & fq->tail) & (CLIB_CACHE_LINE_BYTES - 1))
402 fformat (stderr, "WARNING: fq->tail unaligned\n");
403 if (((uword) & fq->head) & (CLIB_CACHE_LINE_BYTES - 1))
404 fformat (stderr, "WARNING: fq->head unaligned\n");
405 if (((uword) fq->elts) & (CLIB_CACHE_LINE_BYTES - 1))
406 fformat (stderr, "WARNING: fq->elts unaligned\n");
408 if (sizeof (fq->elts[0]) % CLIB_CACHE_LINE_BYTES)
409 fformat (stderr, "WARNING: fq->elts[0] size %d\n",
410 sizeof (fq->elts[0]));
411 if (nelts & (nelts - 1))
413 fformat (stderr, "FATAL: nelts MUST be a power of 2\n");
421 void vl_msg_api_handler_no_free (void *) __attribute__ ((weak));
423 vl_msg_api_handler_no_free (void *v)
427 /* Turned off, save as reference material... */
430 vlib_frame_queue_dequeue_internal (int thread_id,
431 vlib_main_t * vm, vlib_node_main_t * nm)
433 vlib_frame_queue_t *fq = vlib_frame_queues[thread_id];
434 vlib_frame_queue_elt_t *elt;
436 vlib_pending_frame_t *p;
437 vlib_node_runtime_t *r;
438 u32 node_runtime_index;
443 ASSERT (vm == vlib_mains[thread_id]);
447 if (fq->head == fq->tail)
450 elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1));
455 before = clib_cpu_time_now ();
458 node_runtime_index = elt->node_runtime_index;
459 msg_type = elt->msg_type;
463 case VLIB_FRAME_QUEUE_ELT_FREE_BUFFERS:
464 vlib_buffer_free (vm, vlib_frame_vector_args (f), f->n_vectors);
465 /* note fallthrough... */
466 case VLIB_FRAME_QUEUE_ELT_FREE_FRAME:
467 r = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
469 vlib_frame_free (vm, r, f);
471 case VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME:
472 vec_add2 (vm->node_main.pending_frames, p, 1);
473 f->flags |= (VLIB_FRAME_PENDING | VLIB_FRAME_FREE_AFTER_DISPATCH);
474 p->node_runtime_index = elt->node_runtime_index;
475 p->frame_index = vlib_frame_index (vm, f);
476 p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME;
477 fq->dequeue_vectors += (u64) f->n_vectors;
479 case VLIB_FRAME_QUEUE_ELT_API_MSG:
480 vl_msg_api_handler_no_free (f);
483 clib_warning ("bogus frame queue message, type %d", msg_type);
488 fq->dequeue_ticks += clib_cpu_time_now () - before;
489 CLIB_MEMORY_BARRIER ();
498 vlib_frame_queue_dequeue (int thread_id,
499 vlib_main_t * vm, vlib_node_main_t * nm)
501 return vlib_frame_queue_dequeue_internal (thread_id, vm, nm);
505 vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index,
506 u32 frame_queue_index, vlib_frame_t * frame,
507 vlib_frame_queue_msg_type_t type)
509 vlib_frame_queue_t *fq = vlib_frame_queues[frame_queue_index];
510 vlib_frame_queue_elt_t *elt;
513 u64 before = clib_cpu_time_now ();
517 new_tail = __sync_add_and_fetch (&fq->tail, 1);
519 /* Wait until a ring slot is available */
520 while (new_tail >= fq->head + fq->nelts)
522 f64 b4 = vlib_time_now_ticks (vm, before);
523 vlib_worker_thread_barrier_check (vm, b4);
524 /* Bad idea. Dequeue -> enqueue -> dequeue -> trouble */
525 // vlib_frame_queue_dequeue (vm->thread_index, vm, nm);
528 elt = fq->elts + (new_tail & (fq->nelts - 1));
530 /* this would be very bad... */
535 /* Once we enqueue the frame, frame->n_vectors is owned elsewhere... */
536 save_count = frame->n_vectors;
539 elt->node_runtime_index = node_runtime_index;
540 elt->msg_type = type;
541 CLIB_MEMORY_BARRIER ();
548 /* To be called by vlib worker threads upon startup */
550 vlib_worker_thread_init (vlib_worker_thread_t * w)
552 vlib_thread_main_t *tm = vlib_get_thread_main ();
555 * Note: disabling signals in worker threads as follows
556 * prevents the api post-mortem dump scheme from working
560 * pthread_sigmask (SIG_SETMASK, &s, 0);
564 clib_mem_set_heap (w->thread_mheap);
566 if (vec_len (tm->thread_prefix) && w->registration->short_name)
568 w->name = format (0, "%v_%s_%d%c", tm->thread_prefix,
569 w->registration->short_name, w->instance_id, '\0');
570 vlib_set_thread_name ((char *) w->name);
573 if (!w->registration->use_pthreads)
576 /* Initial barrier sync, for both worker and i/o threads */
577 clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1);
579 while (*vlib_worker_threads->wait_at_barrier)
582 clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1);
587 vlib_worker_thread_bootstrap_fn (void *arg)
590 vlib_worker_thread_t *w = arg;
592 w->lwp = syscall (SYS_gettid);
593 w->thread_id = pthread_self ();
595 __os_thread_index = w - vlib_worker_threads;
597 rv = (void *) clib_calljmp
598 ((uword (*)(uword)) w->thread_function,
599 (uword) arg, w->thread_stack + VLIB_THREAD_STACK_SIZE);
600 /* NOTREACHED, we hope */
605 vlib_get_thread_core_socket (vlib_worker_thread_t * w, unsigned cpu_id)
607 const char *sys_cpu_path = "/sys/devices/system/cpu/cpu";
609 int core_id = -1, socket_id = -1;
611 p = format (p, "%s%u/topology/core_id%c", sys_cpu_path, cpu_id, 0);
612 clib_sysfs_read ((char *) p, "%d", &core_id);
613 vec_reset_length (p);
615 format (p, "%s%u/topology/physical_package_id%c", sys_cpu_path, cpu_id,
617 clib_sysfs_read ((char *) p, "%d", &socket_id);
620 w->core_id = core_id;
621 w->socket_id = socket_id;
624 static clib_error_t *
625 vlib_launch_thread_int (void *fp, vlib_worker_thread_t * w, unsigned cpu_id)
627 vlib_thread_main_t *tm = &vlib_thread_main;
628 void *(*fp_arg) (void *) = fp;
631 vlib_get_thread_core_socket (w, cpu_id);
632 if (tm->cb.vlib_launch_thread_cb && !w->registration->use_pthreads)
633 return tm->cb.vlib_launch_thread_cb (fp, (void *) w, cpu_id);
639 CPU_SET (cpu_id, &cpuset);
641 if (pthread_create (&worker, NULL /* attr */ , fp_arg, (void *) w))
642 return clib_error_return_unix (0, "pthread_create");
644 if (pthread_setaffinity_np (worker, sizeof (cpu_set_t), &cpuset))
645 return clib_error_return_unix (0, "pthread_setaffinity_np");
651 static clib_error_t *
652 start_workers (vlib_main_t * vm)
655 vlib_worker_thread_t *w;
656 vlib_main_t *vm_clone;
658 vlib_thread_main_t *tm = &vlib_thread_main;
659 vlib_thread_registration_t *tr;
660 vlib_node_runtime_t *rt;
661 u32 n_vlib_mains = tm->n_vlib_mains;
662 u32 worker_thread_index;
663 u8 *main_heap = clib_mem_get_per_cpu_heap ();
665 vec_reset_length (vlib_worker_threads);
667 /* Set up the main thread */
668 vec_add2_aligned (vlib_worker_threads, w, 1, CLIB_CACHE_LINE_BYTES);
669 w->elog_track.name = "main thread";
670 elog_track_register (&vm->elog_main, &w->elog_track);
672 if (vec_len (tm->thread_prefix))
674 w->name = format (0, "%v_main%c", tm->thread_prefix, '\0');
675 vlib_set_thread_name ((char *) w->name);
679 clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
680 vm->elog_main.lock[0] = 0;
682 if (n_vlib_mains > 1)
684 /* Replace hand-crafted length-1 vector with a real vector */
687 vec_validate_aligned (vlib_mains, tm->n_vlib_mains - 1,
688 CLIB_CACHE_LINE_BYTES);
689 _vec_len (vlib_mains) = 0;
690 vec_add1_aligned (vlib_mains, vm, CLIB_CACHE_LINE_BYTES);
692 vlib_worker_threads->wait_at_barrier =
693 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
694 vlib_worker_threads->workers_at_barrier =
695 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
697 vlib_worker_threads->node_reforks_required =
698 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
700 /* Ask for an initial barrier sync */
701 *vlib_worker_threads->workers_at_barrier = 0;
702 *vlib_worker_threads->wait_at_barrier = 1;
704 /* Without update or refork */
705 *vlib_worker_threads->node_reforks_required = 0;
706 vm->need_vlib_worker_thread_node_runtime_update = 0;
709 vm->barrier_epoch = 0;
710 vm->barrier_no_close_before = 0;
712 worker_thread_index = 1;
714 for (i = 0; i < vec_len (tm->registrations); i++)
716 vlib_node_main_t *nm, *nm_clone;
717 vlib_buffer_free_list_t *fl_clone, *fl_orig;
718 vlib_buffer_free_list_t *orig_freelist_pool;
721 tr = tm->registrations[i];
726 for (k = 0; k < tr->count; k++)
730 vec_add2 (vlib_worker_threads, w, 1);
731 /* Currently unused, may not really work */
734 #if USE_DLMALLOC == 0
736 mheap_alloc (0 /* use VM */ , tr->mheap_size);
738 w->thread_mheap = create_mspace (tr->mheap_size,
743 w->thread_mheap = main_heap;
746 vlib_thread_stack_init (w - vlib_worker_threads);
747 w->thread_function = tr->function;
748 w->thread_function_arg = w;
750 w->registration = tr;
753 (char *) format (0, "%s %d", tr->name, k + 1);
754 vec_add1 (w->elog_track.name, 0);
755 elog_track_register (&vm->elog_main, &w->elog_track);
757 if (tr->no_data_structure_clone)
760 /* Fork vlib_global_main et al. Look for bugs here */
761 oldheap = clib_mem_set_heap (w->thread_mheap);
763 vm_clone = clib_mem_alloc_aligned (sizeof (*vm_clone),
764 CLIB_CACHE_LINE_BYTES);
765 clib_memcpy (vm_clone, vlib_mains[0], sizeof (*vm_clone));
767 vm_clone->thread_index = worker_thread_index;
768 vm_clone->heap_base = w->thread_mheap;
769 vm_clone->heap_aligned_base = (void *)
770 (((uword) w->thread_mheap) & ~(VLIB_FRAME_ALIGN - 1));
771 vm_clone->init_functions_called =
772 hash_create (0, /* value bytes */ 0);
773 vm_clone->pending_rpc_requests = 0;
774 vec_validate (vm_clone->pending_rpc_requests, 0);
775 _vec_len (vm_clone->pending_rpc_requests) = 0;
776 memset (&vm_clone->random_buffer, 0,
777 sizeof (vm_clone->random_buffer));
779 nm = &vlib_mains[0]->node_main;
780 nm_clone = &vm_clone->node_main;
781 /* fork next frames array, preserving node runtime indices */
782 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
783 CLIB_CACHE_LINE_BYTES);
784 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
786 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
787 u32 save_node_runtime_index;
790 save_node_runtime_index = nf->node_runtime_index;
791 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
792 vlib_next_frame_init (nf);
793 nf->node_runtime_index = save_node_runtime_index;
794 nf->flags = save_flags;
797 /* fork the frame dispatch queue */
798 nm_clone->pending_frames = 0;
799 vec_validate (nm_clone->pending_frames, 10); /* $$$$$?????? */
800 _vec_len (nm_clone->pending_frames) = 0;
805 /* Allocate all nodes in single block for speed */
806 n = clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*n));
808 for (j = 0; j < vec_len (nm->nodes); j++)
810 clib_memcpy (n, nm->nodes[j], sizeof (*n));
811 /* none of the copied nodes have enqueue rights given out */
812 n->owner_node_index = VLIB_INVALID_NODE_INDEX;
813 memset (&n->stats_total, 0, sizeof (n->stats_total));
814 memset (&n->stats_last_clear, 0,
815 sizeof (n->stats_last_clear));
816 vec_add1 (nm_clone->nodes, n);
819 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
820 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
821 CLIB_CACHE_LINE_BYTES);
823 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
825 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
826 rt->thread_index = vm_clone->thread_index;
827 /* copy initial runtime_data from node */
828 if (n->runtime_data && n->runtime_data_bytes > 0)
829 clib_memcpy (rt->runtime_data, n->runtime_data,
830 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
831 n->runtime_data_bytes));
834 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
835 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
836 CLIB_CACHE_LINE_BYTES);
837 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
839 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
840 rt->thread_index = vm_clone->thread_index;
841 /* copy initial runtime_data from node */
842 if (n->runtime_data && n->runtime_data_bytes > 0)
843 clib_memcpy (rt->runtime_data, n->runtime_data,
844 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
845 n->runtime_data_bytes));
848 nm_clone->processes = vec_dup_aligned (nm->processes,
849 CLIB_CACHE_LINE_BYTES);
851 /* zap the (per worker) frame freelists, etc */
852 nm_clone->frame_sizes = 0;
853 nm_clone->frame_size_hash = hash_create (0, sizeof (uword));
855 /* Packet trace buffers are guaranteed to be empty, nothing to do here */
857 clib_mem_set_heap (oldheap);
858 vec_add1_aligned (vlib_mains, vm_clone, CLIB_CACHE_LINE_BYTES);
860 vm_clone->error_main.counters = vec_dup_aligned
861 (vlib_mains[0]->error_main.counters, CLIB_CACHE_LINE_BYTES);
862 vm_clone->error_main.counters_last_clear = vec_dup_aligned
863 (vlib_mains[0]->error_main.counters_last_clear,
864 CLIB_CACHE_LINE_BYTES);
866 /* Fork the vlib_buffer_main_t free lists, etc. */
867 orig_freelist_pool = vm_clone->buffer_free_list_pool;
868 vm_clone->buffer_free_list_pool = 0;
871 pool_foreach (fl_orig, orig_freelist_pool,
873 pool_get_aligned (vm_clone->buffer_free_list_pool,
874 fl_clone, CLIB_CACHE_LINE_BYTES);
875 ASSERT (fl_orig - orig_freelist_pool
876 == fl_clone - vm_clone->buffer_free_list_pool);
878 fl_clone[0] = fl_orig[0];
879 fl_clone->buffers = 0;
880 fl_clone->n_alloc = 0;
884 worker_thread_index++;
890 /* only have non-data-structure copy threads to create... */
891 for (i = 0; i < vec_len (tm->registrations); i++)
893 tr = tm->registrations[i];
895 for (j = 0; j < tr->count; j++)
897 vec_add2 (vlib_worker_threads, w, 1);
900 #if USE_DLMALLOC == 0
902 mheap_alloc (0 /* use VM */ , tr->mheap_size);
905 create_mspace (tr->mheap_size, 0 /* locked */ );
909 w->thread_mheap = main_heap;
911 vlib_thread_stack_init (w - vlib_worker_threads);
912 w->thread_function = tr->function;
913 w->thread_function_arg = w;
916 (char *) format (0, "%s %d", tr->name, j + 1);
917 w->registration = tr;
918 vec_add1 (w->elog_track.name, 0);
919 elog_track_register (&vm->elog_main, &w->elog_track);
924 worker_thread_index = 1;
926 for (i = 0; i < vec_len (tm->registrations); i++)
931 tr = tm->registrations[i];
933 if (tr->use_pthreads || tm->use_pthreads)
935 for (j = 0; j < tr->count; j++)
937 w = vlib_worker_threads + worker_thread_index++;
938 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
941 clib_error_report (err);
948 clib_bitmap_foreach (c, tr->coremask, ({
949 w = vlib_worker_threads + worker_thread_index++;
950 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
953 clib_error_report (err);
958 vlib_worker_thread_barrier_sync (vm);
959 vlib_worker_thread_barrier_release (vm);
963 VLIB_MAIN_LOOP_ENTER_FUNCTION (start_workers);
967 worker_thread_node_runtime_update_internal (void)
971 vlib_node_main_t *nm, *nm_clone;
972 vlib_main_t *vm_clone;
973 vlib_node_runtime_t *rt;
975 vlib_node_runtime_sync_stats (vlib_main_t * vm,
976 vlib_node_runtime_t * r,
978 uword n_vectors, uword n_clocks);
980 ASSERT (vlib_get_thread_index () == 0);
985 ASSERT (*vlib_worker_threads->wait_at_barrier == 1);
988 * Scrape all runtime stats, so we don't lose node runtime(s) with
989 * pending counts, or throw away worker / io thread counts.
991 for (j = 0; j < vec_len (nm->nodes); j++)
995 vlib_node_sync_stats (vm, n);
998 for (i = 1; i < vec_len (vlib_mains); i++)
1002 vm_clone = vlib_mains[i];
1003 nm_clone = &vm_clone->node_main;
1005 for (j = 0; j < vec_len (nm_clone->nodes); j++)
1007 n = nm_clone->nodes[j];
1009 rt = vlib_node_get_runtime (vm_clone, n->index);
1010 vlib_node_runtime_sync_stats (vm_clone, rt, 0, 0, 0);
1014 /* Per-worker clone rebuilds are now done on each thread */
1019 vlib_worker_thread_node_refork (void)
1021 vlib_main_t *vm, *vm_clone;
1022 vlib_node_main_t *nm, *nm_clone;
1023 vlib_node_t **old_nodes_clone;
1024 vlib_node_runtime_t *rt, *old_rt;
1026 vlib_node_t *new_n_clone;
1031 nm = &vm->node_main;
1032 vm_clone = vlib_get_main ();
1033 nm_clone = &vm_clone->node_main;
1035 /* Re-clone error heap */
1036 u64 *old_counters = vm_clone->error_main.counters;
1037 u64 *old_counters_all_clear = vm_clone->error_main.counters_last_clear;
1039 clib_memcpy (&vm_clone->error_main, &vm->error_main,
1040 sizeof (vm->error_main));
1041 j = vec_len (vm->error_main.counters) - 1;
1042 vec_validate_aligned (old_counters, j, CLIB_CACHE_LINE_BYTES);
1043 vec_validate_aligned (old_counters_all_clear, j, CLIB_CACHE_LINE_BYTES);
1044 vm_clone->error_main.counters = old_counters;
1045 vm_clone->error_main.counters_last_clear = old_counters_all_clear;
1047 nm_clone = &vm_clone->node_main;
1048 vec_free (nm_clone->next_frames);
1049 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
1050 CLIB_CACHE_LINE_BYTES);
1052 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
1054 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
1055 u32 save_node_runtime_index;
1058 save_node_runtime_index = nf->node_runtime_index;
1059 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
1060 vlib_next_frame_init (nf);
1061 nf->node_runtime_index = save_node_runtime_index;
1062 nf->flags = save_flags;
1065 old_nodes_clone = nm_clone->nodes;
1066 nm_clone->nodes = 0;
1070 /* Allocate all nodes in single block for speed */
1072 clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*new_n_clone));
1073 for (j = 0; j < vec_len (nm->nodes); j++)
1075 vlib_node_t *old_n_clone;
1078 new_n = nm->nodes[j];
1079 old_n_clone = old_nodes_clone[j];
1081 clib_memcpy (new_n_clone, new_n, sizeof (*new_n));
1082 /* none of the copied nodes have enqueue rights given out */
1083 new_n_clone->owner_node_index = VLIB_INVALID_NODE_INDEX;
1085 if (j >= vec_len (old_nodes_clone))
1087 /* new node, set to zero */
1088 memset (&new_n_clone->stats_total, 0,
1089 sizeof (new_n_clone->stats_total));
1090 memset (&new_n_clone->stats_last_clear, 0,
1091 sizeof (new_n_clone->stats_last_clear));
1095 /* Copy stats if the old data is valid */
1096 clib_memcpy (&new_n_clone->stats_total,
1097 &old_n_clone->stats_total,
1098 sizeof (new_n_clone->stats_total));
1099 clib_memcpy (&new_n_clone->stats_last_clear,
1100 &old_n_clone->stats_last_clear,
1101 sizeof (new_n_clone->stats_last_clear));
1103 /* keep previous node state */
1104 new_n_clone->state = old_n_clone->state;
1106 vec_add1 (nm_clone->nodes, new_n_clone);
1109 /* Free the old node clones */
1110 clib_mem_free (old_nodes_clone[0]);
1112 vec_free (old_nodes_clone);
1115 /* re-clone internal nodes */
1116 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL];
1117 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
1118 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
1119 CLIB_CACHE_LINE_BYTES);
1121 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
1123 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1124 rt->thread_index = vm_clone->thread_index;
1125 /* copy runtime_data, will be overwritten later for existing rt */
1126 if (n->runtime_data && n->runtime_data_bytes > 0)
1127 clib_memcpy (rt->runtime_data, n->runtime_data,
1128 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1129 n->runtime_data_bytes));
1132 for (j = 0; j < vec_len (old_rt); j++)
1134 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1135 rt->state = old_rt[j].state;
1136 clib_memcpy (rt->runtime_data, old_rt[j].runtime_data,
1137 VLIB_NODE_RUNTIME_DATA_SIZE);
1142 /* re-clone input nodes */
1143 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT];
1144 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
1145 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
1146 CLIB_CACHE_LINE_BYTES);
1148 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
1150 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1151 rt->thread_index = vm_clone->thread_index;
1152 /* copy runtime_data, will be overwritten later for existing rt */
1153 if (n->runtime_data && n->runtime_data_bytes > 0)
1154 clib_memcpy (rt->runtime_data, n->runtime_data,
1155 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1156 n->runtime_data_bytes));
1159 for (j = 0; j < vec_len (old_rt); j++)
1161 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1162 rt->state = old_rt[j].state;
1163 clib_memcpy (rt->runtime_data, old_rt[j].runtime_data,
1164 VLIB_NODE_RUNTIME_DATA_SIZE);
1169 nm_clone->processes = vec_dup_aligned (nm->processes,
1170 CLIB_CACHE_LINE_BYTES);
1174 vlib_worker_thread_node_runtime_update (void)
1177 * Make a note that we need to do a node runtime update
1178 * prior to releasing the barrier.
1180 vlib_global_main.need_vlib_worker_thread_node_runtime_update = 1;
1184 unformat_sched_policy (unformat_input_t * input, va_list * args)
1186 u32 *r = va_arg (*args, u32 *);
1189 #define _(v,f,s) else if (unformat (input, s)) *r = SCHED_POLICY_##f;
1190 foreach_sched_policy
1197 static clib_error_t *
1198 cpu_config (vlib_main_t * vm, unformat_input_t * input)
1200 vlib_thread_registration_t *tr;
1202 vlib_thread_main_t *tm = &vlib_thread_main;
1207 tm->thread_registrations_by_name = hash_create_string (0, sizeof (uword));
1209 tm->n_thread_stacks = 1; /* account for main thread */
1210 tm->sched_policy = ~0;
1211 tm->sched_priority = ~0;
1212 tm->main_lcore = ~0;
1218 hash_set_mem (tm->thread_registrations_by_name, tr->name, (uword) tr);
1222 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1224 if (unformat (input, "use-pthreads"))
1225 tm->use_pthreads = 1;
1226 else if (unformat (input, "thread-prefix %v", &tm->thread_prefix))
1228 else if (unformat (input, "main-core %u", &tm->main_lcore))
1230 else if (unformat (input, "skip-cores %u", &tm->skip_cores))
1232 else if (unformat (input, "coremask-%s %U", &name,
1233 unformat_bitmap_mask, &bitmap) ||
1234 unformat (input, "corelist-%s %U", &name,
1235 unformat_bitmap_list, &bitmap))
1237 p = hash_get_mem (tm->thread_registrations_by_name, name);
1239 return clib_error_return (0, "no such thread type '%s'", name);
1241 tr = (vlib_thread_registration_t *) p[0];
1243 if (tr->use_pthreads)
1244 return clib_error_return (0,
1245 "corelist cannot be set for '%s' threads",
1248 tr->coremask = bitmap;
1249 tr->count = clib_bitmap_count_set_bits (tr->coremask);
1253 (input, "scheduler-policy %U", unformat_sched_policy,
1256 else if (unformat (input, "scheduler-priority %u", &tm->sched_priority))
1258 else if (unformat (input, "%s %u", &name, &count))
1260 p = hash_get_mem (tm->thread_registrations_by_name, name);
1262 return clib_error_return (0, "no such thread type 3 '%s'", name);
1264 tr = (vlib_thread_registration_t *) p[0];
1265 if (tr->fixed_count)
1266 return clib_error_return
1267 (0, "number of %s threads not configurable", tr->name);
1274 if (tm->sched_priority != ~0)
1276 if (tm->sched_policy == SCHED_FIFO || tm->sched_policy == SCHED_RR)
1278 u32 prio_max = sched_get_priority_max (tm->sched_policy);
1279 u32 prio_min = sched_get_priority_min (tm->sched_policy);
1280 if (tm->sched_priority > prio_max)
1281 tm->sched_priority = prio_max;
1282 if (tm->sched_priority < prio_min)
1283 tm->sched_priority = prio_min;
1287 return clib_error_return
1289 "scheduling priority (%d) is not allowed for `normal` scheduling policy",
1290 tm->sched_priority);
1295 if (!tm->thread_prefix)
1296 tm->thread_prefix = format (0, "vpp");
1300 tm->n_thread_stacks += tr->count;
1301 tm->n_pthreads += tr->count * tr->use_pthreads;
1302 tm->n_threads += tr->count * (tr->use_pthreads == 0);
1309 VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu");
1311 #if !defined (__x86_64__) && !defined (__i386__) && !defined (__aarch64__) && !defined (__powerpc64__) && !defined(__arm__)
1313 __sync_fetch_and_add_8 (void)
1315 fformat (stderr, "%s called\n", __FUNCTION__);
1320 __sync_add_and_fetch_8 (void)
1322 fformat (stderr, "%s called\n", __FUNCTION__);
1327 void vnet_main_fixup (vlib_fork_fixup_t which) __attribute__ ((weak));
1329 vnet_main_fixup (vlib_fork_fixup_t which)
1334 vlib_worker_thread_fork_fixup (vlib_fork_fixup_t which)
1336 vlib_main_t *vm = vlib_get_main ();
1338 if (vlib_mains == 0)
1341 ASSERT (vlib_get_thread_index () == 0);
1342 vlib_worker_thread_barrier_sync (vm);
1346 case VLIB_WORKER_THREAD_FORK_FIXUP_NEW_SW_IF_INDEX:
1347 vnet_main_fixup (VLIB_WORKER_THREAD_FORK_FIXUP_NEW_SW_IF_INDEX);
1353 vlib_worker_thread_barrier_release (vm);
1357 * Enforce minimum open time to minimize packet loss due to Rx overflow,
1358 * based on a test based heuristic that barrier should be open for at least
1359 * 3 time as long as it is closed (with an upper bound of 1ms because by that
1360 * point it is probably too late to make a difference)
1363 #ifndef BARRIER_MINIMUM_OPEN_LIMIT
1364 #define BARRIER_MINIMUM_OPEN_LIMIT 0.001
1367 #ifndef BARRIER_MINIMUM_OPEN_FACTOR
1368 #define BARRIER_MINIMUM_OPEN_FACTOR 3
1372 vlib_worker_thread_barrier_sync_int (vlib_main_t * vm)
1381 if (vec_len (vlib_mains) < 2)
1384 ASSERT (vlib_get_thread_index () == 0);
1386 count = vec_len (vlib_mains) - 1;
1388 /* Record entry relative to last close */
1389 now = vlib_time_now (vm);
1390 t_entry = now - vm->barrier_epoch;
1392 /* Tolerate recursive calls */
1393 if (++vlib_worker_threads[0].recursion_level > 1)
1395 barrier_trace_sync_rec (t_entry);
1399 vlib_worker_threads[0].barrier_sync_count++;
1401 /* Enforce minimum barrier open time to minimize packet loss */
1402 ASSERT (vm->barrier_no_close_before <= (now + BARRIER_MINIMUM_OPEN_LIMIT));
1406 now = vlib_time_now (vm);
1407 /* Barrier hold-down timer expired? */
1408 if (now >= vm->barrier_no_close_before)
1410 if ((vm->barrier_no_close_before - now)
1411 > (2.0 * BARRIER_MINIMUM_OPEN_LIMIT))
1413 clib_warning ("clock change: would have waited for %.4f seconds",
1414 (vm->barrier_no_close_before - now));
1418 /* Record time of closure */
1419 t_open = now - vm->barrier_epoch;
1420 vm->barrier_epoch = now;
1422 deadline = now + BARRIER_SYNC_TIMEOUT;
1424 *vlib_worker_threads->wait_at_barrier = 1;
1425 while (*vlib_worker_threads->workers_at_barrier != count)
1427 if ((now = vlib_time_now (vm)) > deadline)
1429 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1434 t_closed = now - vm->barrier_epoch;
1436 barrier_trace_sync (t_entry, t_open, t_closed);
1440 void vlib_stat_segment_lock (void) __attribute__ ((weak));
1442 vlib_stat_segment_lock (void)
1446 void vlib_stat_segment_unlock (void) __attribute__ ((weak));
1448 vlib_stat_segment_unlock (void)
1453 vlib_worker_thread_barrier_release (vlib_main_t * vm)
1460 f64 t_update_main = 0.0;
1461 int refork_needed = 0;
1463 if (vec_len (vlib_mains) < 2)
1466 ASSERT (vlib_get_thread_index () == 0);
1469 now = vlib_time_now (vm);
1470 t_entry = now - vm->barrier_epoch;
1472 if (--vlib_worker_threads[0].recursion_level > 0)
1474 barrier_trace_release_rec (t_entry);
1478 /* Update (all) node runtimes before releasing the barrier, if needed */
1479 if (vm->need_vlib_worker_thread_node_runtime_update)
1482 * Lock stat segment here, so we's safe when
1483 * rebuilding the stat segment node clones from the
1486 vlib_stat_segment_lock ();
1488 /* Do stats elements on main thread */
1489 worker_thread_node_runtime_update_internal ();
1490 vm->need_vlib_worker_thread_node_runtime_update = 0;
1492 /* Do per thread rebuilds in parallel */
1494 clib_smp_atomic_add (vlib_worker_threads->node_reforks_required,
1495 (vec_len (vlib_mains) - 1));
1496 now = vlib_time_now (vm);
1497 t_update_main = now - vm->barrier_epoch;
1500 deadline = now + BARRIER_SYNC_TIMEOUT;
1502 *vlib_worker_threads->wait_at_barrier = 0;
1504 while (*vlib_worker_threads->workers_at_barrier > 0)
1506 if ((now = vlib_time_now (vm)) > deadline)
1508 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1513 /* Wait for reforks before continuing */
1516 now = vlib_time_now (vm);
1518 deadline = now + BARRIER_SYNC_TIMEOUT;
1520 while (*vlib_worker_threads->node_reforks_required > 0)
1522 if ((now = vlib_time_now (vm)) > deadline)
1524 fformat (stderr, "%s: worker thread refork deadlock\n",
1529 vlib_stat_segment_unlock ();
1532 t_closed_total = now - vm->barrier_epoch;
1534 minimum_open = t_closed_total * BARRIER_MINIMUM_OPEN_FACTOR;
1536 if (minimum_open > BARRIER_MINIMUM_OPEN_LIMIT)
1538 minimum_open = BARRIER_MINIMUM_OPEN_LIMIT;
1541 vm->barrier_no_close_before = now + minimum_open;
1543 /* Record barrier epoch (used to enforce minimum open time) */
1544 vm->barrier_epoch = now;
1546 barrier_trace_release (t_entry, t_closed_total, t_update_main);
1551 * Check the frame queue to see if any frames are available.
1552 * If so, pull the packets off the frames and put them to
1556 vlib_frame_queue_dequeue (vlib_main_t * vm, vlib_frame_queue_main_t * fqm)
1558 u32 thread_id = vm->thread_index;
1559 vlib_frame_queue_t *fq = fqm->vlib_frame_queues[thread_id];
1560 vlib_frame_queue_elt_t *elt;
1569 ASSERT (vm == vlib_mains[thread_id]);
1571 if (PREDICT_FALSE (fqm->node_index == ~0))
1574 * Gather trace data for frame queues
1576 if (PREDICT_FALSE (fq->trace))
1578 frame_queue_trace_t *fqt;
1579 frame_queue_nelt_counter_t *fqh;
1582 fqt = &fqm->frame_queue_traces[thread_id];
1584 fqt->nelts = fq->nelts;
1585 fqt->head = fq->head;
1586 fqt->head_hint = fq->head_hint;
1587 fqt->tail = fq->tail;
1588 fqt->threshold = fq->vector_threshold;
1589 fqt->n_in_use = fqt->tail - fqt->head;
1590 if (fqt->n_in_use >= fqt->nelts)
1592 // if beyond max then use max
1593 fqt->n_in_use = fqt->nelts - 1;
1596 /* Record the number of elements in use in the histogram */
1597 fqh = &fqm->frame_queue_histogram[thread_id];
1598 fqh->count[fqt->n_in_use]++;
1600 /* Record a snapshot of the elements in use */
1601 for (elix = 0; elix < fqt->nelts; elix++)
1603 elt = fq->elts + ((fq->head + 1 + elix) & (fq->nelts - 1));
1604 if (1 || elt->valid)
1606 fqt->n_vectors[elix] = elt->n_vectors;
1614 if (fq->head == fq->tail)
1616 fq->head_hint = fq->head;
1620 elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1));
1624 fq->head_hint = fq->head;
1628 from = elt->buffer_index;
1629 msg_type = elt->msg_type;
1631 ASSERT (msg_type == VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME);
1632 ASSERT (elt->n_vectors <= VLIB_FRAME_SIZE);
1634 f = vlib_get_frame_to_node (vm, fqm->node_index);
1636 to = vlib_frame_vector_args (f);
1638 n_left_to_node = elt->n_vectors;
1640 while (n_left_to_node >= 4)
1648 n_left_to_node -= 4;
1651 while (n_left_to_node > 0)
1659 vectors += elt->n_vectors;
1660 f->n_vectors = elt->n_vectors;
1661 vlib_put_frame_to_node (vm, fqm->node_index, f);
1665 elt->msg_type = 0xfefefefe;
1666 CLIB_MEMORY_BARRIER ();
1671 * Limit the number of packets pushed into the graph
1673 if (vectors >= fq->vector_threshold)
1675 fq->head_hint = fq->head;
1684 vlib_worker_thread_fn (void *arg)
1686 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
1687 vlib_thread_main_t *tm = vlib_get_thread_main ();
1688 vlib_main_t *vm = vlib_get_main ();
1691 ASSERT (vm->thread_index == vlib_get_thread_index ());
1693 vlib_worker_thread_init (w);
1694 clib_time_init (&vm->clib_time);
1695 clib_mem_set_heap (w->thread_mheap);
1697 /* Wait until the dpdk init sequence is complete */
1698 while (tm->extern_thread_mgmt && tm->worker_thread_release == 0)
1699 vlib_worker_thread_barrier_check ();
1701 e = vlib_call_init_exit_functions
1702 (vm, vm->worker_init_function_registrations, 1 /* call_once */ );
1704 clib_error_report (e);
1706 vlib_worker_loop (vm);
1710 VLIB_REGISTER_THREAD (worker_thread_reg, static) = {
1713 .function = vlib_worker_thread_fn,
1718 vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts)
1720 vlib_thread_main_t *tm = vlib_get_thread_main ();
1721 vlib_frame_queue_main_t *fqm;
1722 vlib_frame_queue_t *fq;
1725 if (frame_queue_nelts == 0)
1726 frame_queue_nelts = FRAME_QUEUE_NELTS;
1728 ASSERT (frame_queue_nelts >= 8);
1730 vec_add2 (tm->frame_queue_mains, fqm, 1);
1732 fqm->node_index = node_index;
1733 fqm->frame_queue_nelts = frame_queue_nelts;
1734 fqm->queue_hi_thresh = frame_queue_nelts - 2;
1736 vec_validate (fqm->vlib_frame_queues, tm->n_vlib_mains - 1);
1737 vec_validate (fqm->per_thread_data, tm->n_vlib_mains - 1);
1738 _vec_len (fqm->vlib_frame_queues) = 0;
1739 for (i = 0; i < tm->n_vlib_mains; i++)
1741 vlib_frame_queue_per_thread_data_t *ptd;
1742 fq = vlib_frame_queue_alloc (frame_queue_nelts);
1743 vec_add1 (fqm->vlib_frame_queues, fq);
1745 ptd = vec_elt_at_index (fqm->per_thread_data, i);
1746 vec_validate (ptd->handoff_queue_elt_by_thread_index,
1747 tm->n_vlib_mains - 1);
1748 vec_validate_init_empty (ptd->congested_handoff_queue_by_thread_index,
1749 tm->n_vlib_mains - 1,
1750 (vlib_frame_queue_t *) (~0));
1753 return (fqm - tm->frame_queue_mains);
1757 vlib_thread_cb_register (struct vlib_main_t *vm, vlib_thread_callbacks_t * cb)
1759 vlib_thread_main_t *tm = vlib_get_thread_main ();
1761 if (tm->extern_thread_mgmt)
1764 tm->cb.vlib_launch_thread_cb = cb->vlib_launch_thread_cb;
1765 tm->extern_thread_mgmt = 1;
1770 vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
1773 ASSERT (vlib_get_thread_index () == 0);
1774 vlib_process_signal_event (vlib_get_main (), args->node_index,
1775 args->type_opaque, args->data);
1778 void *rpc_call_main_thread_cb_fn;
1781 vlib_rpc_call_main_thread (void *callback, u8 * args, u32 arg_size)
1783 if (rpc_call_main_thread_cb_fn)
1785 void (*fp) (void *, u8 *, u32) = rpc_call_main_thread_cb_fn;
1786 (*fp) (callback, args, arg_size);
1789 clib_warning ("BUG: rpc_call_main_thread_cb_fn NULL!");
1793 threads_init (vlib_main_t * vm)
1798 VLIB_INIT_FUNCTION (threads_init);
1801 * fd.io coding-style-patch-verification: ON
1804 * eval: (c-set-style "gnu")