2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
19 #include <vppinfra/format.h>
20 #include <vppinfra/linux/sysfs.h>
21 #include <vlib/vlib.h>
23 #include <vlib/threads.h>
24 #include <vlib/unix/cj.h>
26 DECLARE_CJ_GLOBAL_LOG;
28 #define FRAME_QUEUE_NELTS 64
36 vlib_worker_thread_t *vlib_worker_threads;
37 vlib_thread_main_t vlib_thread_main;
40 * Barrier tracing can be enabled on a normal build to collect information
41 * on barrier use, including timings and call stacks. Deliberately not
42 * keyed off CLIB_DEBUG, because that can add significant overhead which
43 * imapacts observed timings.
47 elog_global_id_for_msg_name (const char *msg_name)
54 h = hash_create_string (0, sizeof (uword));
56 p = hash_get_mem (h, msg_name);
59 r = elog_string (&vlib_global_main.elog_main, "%s", msg_name);
61 name_copy = format (0, "%s%c", msg_name, 0);
63 hash_set_mem (h, name_copy, r);
69 barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed)
71 if (!vlib_worker_threads->barrier_elog_enabled)
75 ELOG_TYPE_DECLARE (e) =
77 .format = "bar-trace-%s-#%d",
78 .format_args = "T4i4",
83 u32 caller, count, t_entry, t_open, t_closed;
86 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
87 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
88 ed->caller = elog_global_id_for_msg_name
89 (vlib_worker_threads[0].barrier_caller);
90 ed->t_entry = (int) (1000000.0 * t_entry);
91 ed->t_open = (int) (1000000.0 * t_open);
92 ed->t_closed = (int) (1000000.0 * t_closed);
96 barrier_trace_sync_rec (f64 t_entry)
98 if (!vlib_worker_threads->barrier_elog_enabled)
102 ELOG_TYPE_DECLARE (e) =
104 .format = "bar-syncrec-%s-#%d",
105 .format_args = "T4i4",
113 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
114 ed->depth = (int) vlib_worker_threads[0].recursion_level - 1;
115 ed->caller = elog_global_id_for_msg_name
116 (vlib_worker_threads[0].barrier_caller);
120 barrier_trace_release_rec (f64 t_entry)
122 if (!vlib_worker_threads->barrier_elog_enabled)
126 ELOG_TYPE_DECLARE (e) =
128 .format = "bar-relrrec-#%d",
137 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
138 ed->depth = (int) vlib_worker_threads[0].recursion_level;
142 barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main)
144 if (!vlib_worker_threads->barrier_elog_enabled)
148 ELOG_TYPE_DECLARE (e) =
150 .format = "bar-rel-#%d-e%d-u%d-t%d",
151 .format_args = "i4i4i4i4",
156 u32 count, t_entry, t_update_main, t_closed_total;
159 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
160 ed->t_entry = (int) (1000000.0 * t_entry);
161 ed->t_update_main = (int) (1000000.0 * t_update_main);
162 ed->t_closed_total = (int) (1000000.0 * t_closed_total);
163 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
165 /* Reset context for next trace */
166 vlib_worker_threads[0].barrier_context = NULL;
170 os_get_nthreads (void)
174 len = vec_len (vlib_thread_stacks);
182 vlib_set_thread_name (char *name)
184 int pthread_setname_np (pthread_t __target_thread, const char *__name);
186 pthread_t thread = pthread_self ();
190 rv = pthread_setname_np (thread, name);
192 clib_warning ("pthread_setname_np returned %d", rv);
197 sort_registrations_by_no_clone (void *a0, void *a1)
199 vlib_thread_registration_t **tr0 = a0;
200 vlib_thread_registration_t **tr1 = a1;
202 return ((i32) ((*tr0)->no_data_structure_clone)
203 - ((i32) ((*tr1)->no_data_structure_clone)));
207 clib_sysfs_list_to_bitmap (char *filename)
212 fp = fopen (filename, "r");
217 vec_validate (buffer, 256 - 1);
218 if (fgets ((char *) buffer, 256, fp))
221 unformat_init_string (&in, (char *) buffer,
222 strlen ((char *) buffer));
223 if (unformat (&in, "%U", unformat_bitmap_list, &r) != 1)
224 clib_warning ("unformat_bitmap_list failed");
234 /* Called early in the init sequence */
237 vlib_thread_init (vlib_main_t * vm)
239 vlib_thread_main_t *tm = &vlib_thread_main;
240 vlib_worker_thread_t *w;
241 vlib_thread_registration_t *tr;
242 u32 n_vlib_mains = 1;
247 /* get bitmaps of active cpu cores and sockets */
248 tm->cpu_core_bitmap =
249 clib_sysfs_list_to_bitmap ("/sys/devices/system/cpu/online");
250 tm->cpu_socket_bitmap =
251 clib_sysfs_list_to_bitmap ("/sys/devices/system/node/online");
253 avail_cpu = clib_bitmap_dup (tm->cpu_core_bitmap);
256 for (i = 0; i < tm->skip_cores; i++)
258 uword c = clib_bitmap_first_set (avail_cpu);
260 return clib_error_return (0, "no available cpus to skip");
262 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
265 /* grab cpu for main thread */
266 if (tm->main_lcore == ~0)
268 /* if main-lcore is not set, we try to use lcore 1 */
269 if (clib_bitmap_get (avail_cpu, 1))
272 tm->main_lcore = clib_bitmap_first_set (avail_cpu);
273 if (tm->main_lcore == (u8) ~ 0)
274 return clib_error_return (0, "no available cpus to be used for the"
279 if (clib_bitmap_get (avail_cpu, tm->main_lcore) == 0)
280 return clib_error_return (0, "cpu %u is not available to be used"
281 " for the main thread", tm->main_lcore);
283 avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0);
285 /* assume that there is socket 0 only if there is no data from sysfs */
286 if (!tm->cpu_socket_bitmap)
287 tm->cpu_socket_bitmap = clib_bitmap_set (0, 0, 1);
289 /* pin main thread to main_lcore */
290 if (tm->cb.vlib_thread_set_lcore_cb)
292 tm->cb.vlib_thread_set_lcore_cb (0, tm->main_lcore);
298 CPU_SET (tm->main_lcore, &cpuset);
299 pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset);
302 /* as many threads as stacks... */
303 vec_validate_aligned (vlib_worker_threads, vec_len (vlib_thread_stacks) - 1,
304 CLIB_CACHE_LINE_BYTES);
306 /* Preallocate thread 0 */
307 _vec_len (vlib_worker_threads) = 1;
308 w = vlib_worker_threads;
309 w->thread_mheap = clib_mem_get_heap ();
310 w->thread_stack = vlib_thread_stacks[0];
311 w->cpu_id = tm->main_lcore;
312 w->lwp = syscall (SYS_gettid);
313 w->thread_id = pthread_self ();
314 tm->n_vlib_mains = 1;
316 if (tm->sched_policy != ~0)
318 struct sched_param sched_param;
319 if (!sched_getparam (w->lwp, &sched_param))
321 if (tm->sched_priority != ~0)
322 sched_param.sched_priority = tm->sched_priority;
323 sched_setscheduler (w->lwp, tm->sched_policy, &sched_param);
327 /* assign threads to cores and set n_vlib_mains */
332 vec_add1 (tm->registrations, tr);
336 vec_sort_with_function (tm->registrations, sort_registrations_by_no_clone);
338 for (i = 0; i < vec_len (tm->registrations); i++)
341 tr = tm->registrations[i];
342 tr->first_index = first_index;
343 first_index += tr->count;
344 n_vlib_mains += (tr->no_data_structure_clone == 0) ? tr->count : 0;
346 /* construct coremask */
347 if (tr->use_pthreads || !tr->count)
354 clib_bitmap_foreach (c, tr->coremask, ({
355 if (clib_bitmap_get(avail_cpu, c) == 0)
356 return clib_error_return (0, "cpu %u is not available to be used"
357 " for the '%s' thread",c, tr->name);
359 avail_cpu = clib_bitmap_set(avail_cpu, c, 0);
366 for (j = 0; j < tr->count; j++)
368 uword c = clib_bitmap_first_set (avail_cpu);
370 return clib_error_return (0,
371 "no available cpus to be used for"
372 " the '%s' thread", tr->name);
374 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
375 tr->coremask = clib_bitmap_set (tr->coremask, c, 1);
380 clib_bitmap_free (avail_cpu);
382 tm->n_vlib_mains = n_vlib_mains;
384 vec_validate_aligned (vlib_worker_threads, first_index - 1,
385 CLIB_CACHE_LINE_BYTES);
391 vlib_frame_queue_alloc (int nelts)
393 vlib_frame_queue_t *fq;
395 fq = clib_mem_alloc_aligned (sizeof (*fq), CLIB_CACHE_LINE_BYTES);
396 memset (fq, 0, sizeof (*fq));
398 fq->vector_threshold = 128; // packets
399 vec_validate_aligned (fq->elts, nelts - 1, CLIB_CACHE_LINE_BYTES);
403 if (((uword) & fq->tail) & (CLIB_CACHE_LINE_BYTES - 1))
404 fformat (stderr, "WARNING: fq->tail unaligned\n");
405 if (((uword) & fq->head) & (CLIB_CACHE_LINE_BYTES - 1))
406 fformat (stderr, "WARNING: fq->head unaligned\n");
407 if (((uword) fq->elts) & (CLIB_CACHE_LINE_BYTES - 1))
408 fformat (stderr, "WARNING: fq->elts unaligned\n");
410 if (sizeof (fq->elts[0]) % CLIB_CACHE_LINE_BYTES)
411 fformat (stderr, "WARNING: fq->elts[0] size %d\n",
412 sizeof (fq->elts[0]));
413 if (nelts & (nelts - 1))
415 fformat (stderr, "FATAL: nelts MUST be a power of 2\n");
423 void vl_msg_api_handler_no_free (void *) __attribute__ ((weak));
425 vl_msg_api_handler_no_free (void *v)
429 /* Turned off, save as reference material... */
432 vlib_frame_queue_dequeue_internal (int thread_id,
433 vlib_main_t * vm, vlib_node_main_t * nm)
435 vlib_frame_queue_t *fq = vlib_frame_queues[thread_id];
436 vlib_frame_queue_elt_t *elt;
438 vlib_pending_frame_t *p;
439 vlib_node_runtime_t *r;
440 u32 node_runtime_index;
445 ASSERT (vm == vlib_mains[thread_id]);
449 if (fq->head == fq->tail)
452 elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1));
457 before = clib_cpu_time_now ();
460 node_runtime_index = elt->node_runtime_index;
461 msg_type = elt->msg_type;
465 case VLIB_FRAME_QUEUE_ELT_FREE_BUFFERS:
466 vlib_buffer_free (vm, vlib_frame_vector_args (f), f->n_vectors);
467 /* note fallthrough... */
468 case VLIB_FRAME_QUEUE_ELT_FREE_FRAME:
469 r = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
471 vlib_frame_free (vm, r, f);
473 case VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME:
474 vec_add2 (vm->node_main.pending_frames, p, 1);
475 f->flags |= (VLIB_FRAME_PENDING | VLIB_FRAME_FREE_AFTER_DISPATCH);
476 p->node_runtime_index = elt->node_runtime_index;
477 p->frame_index = vlib_frame_index (vm, f);
478 p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME;
479 fq->dequeue_vectors += (u64) f->n_vectors;
481 case VLIB_FRAME_QUEUE_ELT_API_MSG:
482 vl_msg_api_handler_no_free (f);
485 clib_warning ("bogus frame queue message, type %d", msg_type);
490 fq->dequeue_ticks += clib_cpu_time_now () - before;
491 CLIB_MEMORY_BARRIER ();
500 vlib_frame_queue_dequeue (int thread_id,
501 vlib_main_t * vm, vlib_node_main_t * nm)
503 return vlib_frame_queue_dequeue_internal (thread_id, vm, nm);
507 vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index,
508 u32 frame_queue_index, vlib_frame_t * frame,
509 vlib_frame_queue_msg_type_t type)
511 vlib_frame_queue_t *fq = vlib_frame_queues[frame_queue_index];
512 vlib_frame_queue_elt_t *elt;
515 u64 before = clib_cpu_time_now ();
519 new_tail = clib_atomic_add_fetch (&fq->tail, 1);
521 /* Wait until a ring slot is available */
522 while (new_tail >= fq->head + fq->nelts)
524 f64 b4 = vlib_time_now_ticks (vm, before);
525 vlib_worker_thread_barrier_check (vm, b4);
526 /* Bad idea. Dequeue -> enqueue -> dequeue -> trouble */
527 // vlib_frame_queue_dequeue (vm->thread_index, vm, nm);
530 elt = fq->elts + (new_tail & (fq->nelts - 1));
532 /* this would be very bad... */
537 /* Once we enqueue the frame, frame->n_vectors is owned elsewhere... */
538 save_count = frame->n_vectors;
541 elt->node_runtime_index = node_runtime_index;
542 elt->msg_type = type;
543 CLIB_MEMORY_BARRIER ();
550 /* To be called by vlib worker threads upon startup */
552 vlib_worker_thread_init (vlib_worker_thread_t * w)
554 vlib_thread_main_t *tm = vlib_get_thread_main ();
557 * Note: disabling signals in worker threads as follows
558 * prevents the api post-mortem dump scheme from working
562 * pthread_sigmask (SIG_SETMASK, &s, 0);
566 clib_mem_set_heap (w->thread_mheap);
568 if (vec_len (tm->thread_prefix) && w->registration->short_name)
570 w->name = format (0, "%v_%s_%d%c", tm->thread_prefix,
571 w->registration->short_name, w->instance_id, '\0');
572 vlib_set_thread_name ((char *) w->name);
575 if (!w->registration->use_pthreads)
578 /* Initial barrier sync, for both worker and i/o threads */
579 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1);
581 while (*vlib_worker_threads->wait_at_barrier)
584 clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1);
589 vlib_worker_thread_bootstrap_fn (void *arg)
592 vlib_worker_thread_t *w = arg;
594 w->lwp = syscall (SYS_gettid);
595 w->thread_id = pthread_self ();
597 __os_thread_index = w - vlib_worker_threads;
599 rv = (void *) clib_calljmp
600 ((uword (*)(uword)) w->thread_function,
601 (uword) arg, w->thread_stack + VLIB_THREAD_STACK_SIZE);
602 /* NOTREACHED, we hope */
607 vlib_get_thread_core_socket (vlib_worker_thread_t * w, unsigned cpu_id)
609 const char *sys_cpu_path = "/sys/devices/system/cpu/cpu";
611 int core_id = -1, socket_id = -1;
613 p = format (p, "%s%u/topology/core_id%c", sys_cpu_path, cpu_id, 0);
614 clib_sysfs_read ((char *) p, "%d", &core_id);
615 vec_reset_length (p);
617 format (p, "%s%u/topology/physical_package_id%c", sys_cpu_path, cpu_id,
619 clib_sysfs_read ((char *) p, "%d", &socket_id);
622 w->core_id = core_id;
623 w->socket_id = socket_id;
626 static clib_error_t *
627 vlib_launch_thread_int (void *fp, vlib_worker_thread_t * w, unsigned cpu_id)
629 vlib_thread_main_t *tm = &vlib_thread_main;
630 void *(*fp_arg) (void *) = fp;
633 vlib_get_thread_core_socket (w, cpu_id);
634 if (tm->cb.vlib_launch_thread_cb && !w->registration->use_pthreads)
635 return tm->cb.vlib_launch_thread_cb (fp, (void *) w, cpu_id);
641 CPU_SET (cpu_id, &cpuset);
643 if (pthread_create (&worker, NULL /* attr */ , fp_arg, (void *) w))
644 return clib_error_return_unix (0, "pthread_create");
646 if (pthread_setaffinity_np (worker, sizeof (cpu_set_t), &cpuset))
647 return clib_error_return_unix (0, "pthread_setaffinity_np");
653 static clib_error_t *
654 start_workers (vlib_main_t * vm)
657 vlib_worker_thread_t *w;
658 vlib_main_t *vm_clone;
660 vlib_thread_main_t *tm = &vlib_thread_main;
661 vlib_thread_registration_t *tr;
662 vlib_node_runtime_t *rt;
663 u32 n_vlib_mains = tm->n_vlib_mains;
664 u32 worker_thread_index;
665 u8 *main_heap = clib_mem_get_per_cpu_heap ();
667 vec_reset_length (vlib_worker_threads);
669 /* Set up the main thread */
670 vec_add2_aligned (vlib_worker_threads, w, 1, CLIB_CACHE_LINE_BYTES);
671 w->elog_track.name = "main thread";
672 elog_track_register (&vm->elog_main, &w->elog_track);
674 if (vec_len (tm->thread_prefix))
676 w->name = format (0, "%v_main%c", tm->thread_prefix, '\0');
677 vlib_set_thread_name ((char *) w->name);
681 clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
682 vm->elog_main.lock[0] = 0;
684 if (n_vlib_mains > 1)
686 /* Replace hand-crafted length-1 vector with a real vector */
689 vec_validate_aligned (vlib_mains, tm->n_vlib_mains - 1,
690 CLIB_CACHE_LINE_BYTES);
691 _vec_len (vlib_mains) = 0;
692 vec_add1_aligned (vlib_mains, vm, CLIB_CACHE_LINE_BYTES);
694 vlib_worker_threads->wait_at_barrier =
695 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
696 vlib_worker_threads->workers_at_barrier =
697 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
699 vlib_worker_threads->node_reforks_required =
700 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
702 /* Ask for an initial barrier sync */
703 *vlib_worker_threads->workers_at_barrier = 0;
704 *vlib_worker_threads->wait_at_barrier = 1;
706 /* Without update or refork */
707 *vlib_worker_threads->node_reforks_required = 0;
708 vm->need_vlib_worker_thread_node_runtime_update = 0;
711 vm->barrier_epoch = 0;
712 vm->barrier_no_close_before = 0;
714 worker_thread_index = 1;
716 for (i = 0; i < vec_len (tm->registrations); i++)
718 vlib_node_main_t *nm, *nm_clone;
719 vlib_buffer_free_list_t *fl_clone, *fl_orig;
720 vlib_buffer_free_list_t *orig_freelist_pool;
723 tr = tm->registrations[i];
728 for (k = 0; k < tr->count; k++)
732 vec_add2 (vlib_worker_threads, w, 1);
733 /* Currently unused, may not really work */
736 #if USE_DLMALLOC == 0
738 mheap_alloc (0 /* use VM */ , tr->mheap_size);
740 w->thread_mheap = create_mspace (tr->mheap_size,
745 w->thread_mheap = main_heap;
748 vlib_thread_stack_init (w - vlib_worker_threads);
749 w->thread_function = tr->function;
750 w->thread_function_arg = w;
752 w->registration = tr;
755 (char *) format (0, "%s %d", tr->name, k + 1);
756 vec_add1 (w->elog_track.name, 0);
757 elog_track_register (&vm->elog_main, &w->elog_track);
759 if (tr->no_data_structure_clone)
762 /* Fork vlib_global_main et al. Look for bugs here */
763 oldheap = clib_mem_set_heap (w->thread_mheap);
765 vm_clone = clib_mem_alloc_aligned (sizeof (*vm_clone),
766 CLIB_CACHE_LINE_BYTES);
767 clib_memcpy (vm_clone, vlib_mains[0], sizeof (*vm_clone));
769 vm_clone->thread_index = worker_thread_index;
770 vm_clone->heap_base = w->thread_mheap;
771 vm_clone->heap_aligned_base = (void *)
772 (((uword) w->thread_mheap) & ~(VLIB_FRAME_ALIGN - 1));
773 vm_clone->init_functions_called =
774 hash_create (0, /* value bytes */ 0);
775 vm_clone->pending_rpc_requests = 0;
776 vec_validate (vm_clone->pending_rpc_requests, 0);
777 _vec_len (vm_clone->pending_rpc_requests) = 0;
778 memset (&vm_clone->random_buffer, 0,
779 sizeof (vm_clone->random_buffer));
781 nm = &vlib_mains[0]->node_main;
782 nm_clone = &vm_clone->node_main;
783 /* fork next frames array, preserving node runtime indices */
784 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
785 CLIB_CACHE_LINE_BYTES);
786 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
788 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
789 u32 save_node_runtime_index;
792 save_node_runtime_index = nf->node_runtime_index;
793 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
794 vlib_next_frame_init (nf);
795 nf->node_runtime_index = save_node_runtime_index;
796 nf->flags = save_flags;
799 /* fork the frame dispatch queue */
800 nm_clone->pending_frames = 0;
801 vec_validate (nm_clone->pending_frames, 10); /* $$$$$?????? */
802 _vec_len (nm_clone->pending_frames) = 0;
807 /* Allocate all nodes in single block for speed */
808 n = clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*n));
810 for (j = 0; j < vec_len (nm->nodes); j++)
812 clib_memcpy (n, nm->nodes[j], sizeof (*n));
813 /* none of the copied nodes have enqueue rights given out */
814 n->owner_node_index = VLIB_INVALID_NODE_INDEX;
815 memset (&n->stats_total, 0, sizeof (n->stats_total));
816 memset (&n->stats_last_clear, 0,
817 sizeof (n->stats_last_clear));
818 vec_add1 (nm_clone->nodes, n);
821 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
822 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
823 CLIB_CACHE_LINE_BYTES);
825 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
827 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
828 rt->thread_index = vm_clone->thread_index;
829 /* copy initial runtime_data from node */
830 if (n->runtime_data && n->runtime_data_bytes > 0)
831 clib_memcpy (rt->runtime_data, n->runtime_data,
832 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
833 n->runtime_data_bytes));
836 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
837 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
838 CLIB_CACHE_LINE_BYTES);
839 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
841 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
842 rt->thread_index = vm_clone->thread_index;
843 /* copy initial runtime_data from node */
844 if (n->runtime_data && n->runtime_data_bytes > 0)
845 clib_memcpy (rt->runtime_data, n->runtime_data,
846 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
847 n->runtime_data_bytes));
850 nm_clone->processes = vec_dup_aligned (nm->processes,
851 CLIB_CACHE_LINE_BYTES);
853 /* zap the (per worker) frame freelists, etc */
854 nm_clone->frame_sizes = 0;
855 nm_clone->frame_size_hash = hash_create (0, sizeof (uword));
857 /* Packet trace buffers are guaranteed to be empty, nothing to do here */
859 clib_mem_set_heap (oldheap);
860 vec_add1_aligned (vlib_mains, vm_clone, CLIB_CACHE_LINE_BYTES);
862 vm_clone->error_main.counters = vec_dup_aligned
863 (vlib_mains[0]->error_main.counters, CLIB_CACHE_LINE_BYTES);
864 vm_clone->error_main.counters_last_clear = vec_dup_aligned
865 (vlib_mains[0]->error_main.counters_last_clear,
866 CLIB_CACHE_LINE_BYTES);
868 /* Fork the vlib_buffer_main_t free lists, etc. */
869 orig_freelist_pool = vm_clone->buffer_free_list_pool;
870 vm_clone->buffer_free_list_pool = 0;
873 pool_foreach (fl_orig, orig_freelist_pool,
875 pool_get_aligned (vm_clone->buffer_free_list_pool,
876 fl_clone, CLIB_CACHE_LINE_BYTES);
877 ASSERT (fl_orig - orig_freelist_pool
878 == fl_clone - vm_clone->buffer_free_list_pool);
880 fl_clone[0] = fl_orig[0];
881 fl_clone->buffers = 0;
882 fl_clone->n_alloc = 0;
886 worker_thread_index++;
892 /* only have non-data-structure copy threads to create... */
893 for (i = 0; i < vec_len (tm->registrations); i++)
895 tr = tm->registrations[i];
897 for (j = 0; j < tr->count; j++)
899 vec_add2 (vlib_worker_threads, w, 1);
902 #if USE_DLMALLOC == 0
904 mheap_alloc (0 /* use VM */ , tr->mheap_size);
907 create_mspace (tr->mheap_size, 0 /* locked */ );
911 w->thread_mheap = main_heap;
913 vlib_thread_stack_init (w - vlib_worker_threads);
914 w->thread_function = tr->function;
915 w->thread_function_arg = w;
918 (char *) format (0, "%s %d", tr->name, j + 1);
919 w->registration = tr;
920 vec_add1 (w->elog_track.name, 0);
921 elog_track_register (&vm->elog_main, &w->elog_track);
926 worker_thread_index = 1;
928 for (i = 0; i < vec_len (tm->registrations); i++)
933 tr = tm->registrations[i];
935 if (tr->use_pthreads || tm->use_pthreads)
937 for (j = 0; j < tr->count; j++)
939 w = vlib_worker_threads + worker_thread_index++;
940 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
943 clib_error_report (err);
950 clib_bitmap_foreach (c, tr->coremask, ({
951 w = vlib_worker_threads + worker_thread_index++;
952 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
955 clib_error_report (err);
960 vlib_worker_thread_barrier_sync (vm);
961 vlib_worker_thread_barrier_release (vm);
965 VLIB_MAIN_LOOP_ENTER_FUNCTION (start_workers);
969 worker_thread_node_runtime_update_internal (void)
973 vlib_node_main_t *nm, *nm_clone;
974 vlib_main_t *vm_clone;
975 vlib_node_runtime_t *rt;
977 vlib_node_runtime_sync_stats (vlib_main_t * vm,
978 vlib_node_runtime_t * r,
980 uword n_vectors, uword n_clocks);
982 ASSERT (vlib_get_thread_index () == 0);
987 ASSERT (*vlib_worker_threads->wait_at_barrier == 1);
990 * Scrape all runtime stats, so we don't lose node runtime(s) with
991 * pending counts, or throw away worker / io thread counts.
993 for (j = 0; j < vec_len (nm->nodes); j++)
997 vlib_node_sync_stats (vm, n);
1000 for (i = 1; i < vec_len (vlib_mains); i++)
1004 vm_clone = vlib_mains[i];
1005 nm_clone = &vm_clone->node_main;
1007 for (j = 0; j < vec_len (nm_clone->nodes); j++)
1009 n = nm_clone->nodes[j];
1011 rt = vlib_node_get_runtime (vm_clone, n->index);
1012 vlib_node_runtime_sync_stats (vm_clone, rt, 0, 0, 0);
1016 /* Per-worker clone rebuilds are now done on each thread */
1021 vlib_worker_thread_node_refork (void)
1023 vlib_main_t *vm, *vm_clone;
1024 vlib_node_main_t *nm, *nm_clone;
1025 vlib_node_t **old_nodes_clone;
1026 vlib_node_runtime_t *rt, *old_rt;
1028 vlib_node_t *new_n_clone;
1033 nm = &vm->node_main;
1034 vm_clone = vlib_get_main ();
1035 nm_clone = &vm_clone->node_main;
1037 /* Re-clone error heap */
1038 u64 *old_counters = vm_clone->error_main.counters;
1039 u64 *old_counters_all_clear = vm_clone->error_main.counters_last_clear;
1041 clib_memcpy (&vm_clone->error_main, &vm->error_main,
1042 sizeof (vm->error_main));
1043 j = vec_len (vm->error_main.counters) - 1;
1044 vec_validate_aligned (old_counters, j, CLIB_CACHE_LINE_BYTES);
1045 vec_validate_aligned (old_counters_all_clear, j, CLIB_CACHE_LINE_BYTES);
1046 vm_clone->error_main.counters = old_counters;
1047 vm_clone->error_main.counters_last_clear = old_counters_all_clear;
1049 nm_clone = &vm_clone->node_main;
1050 vec_free (nm_clone->next_frames);
1051 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
1052 CLIB_CACHE_LINE_BYTES);
1054 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
1056 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
1057 u32 save_node_runtime_index;
1060 save_node_runtime_index = nf->node_runtime_index;
1061 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
1062 vlib_next_frame_init (nf);
1063 nf->node_runtime_index = save_node_runtime_index;
1064 nf->flags = save_flags;
1067 old_nodes_clone = nm_clone->nodes;
1068 nm_clone->nodes = 0;
1072 /* Allocate all nodes in single block for speed */
1074 clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*new_n_clone));
1075 for (j = 0; j < vec_len (nm->nodes); j++)
1077 vlib_node_t *old_n_clone;
1080 new_n = nm->nodes[j];
1081 old_n_clone = old_nodes_clone[j];
1083 clib_memcpy (new_n_clone, new_n, sizeof (*new_n));
1084 /* none of the copied nodes have enqueue rights given out */
1085 new_n_clone->owner_node_index = VLIB_INVALID_NODE_INDEX;
1087 if (j >= vec_len (old_nodes_clone))
1089 /* new node, set to zero */
1090 memset (&new_n_clone->stats_total, 0,
1091 sizeof (new_n_clone->stats_total));
1092 memset (&new_n_clone->stats_last_clear, 0,
1093 sizeof (new_n_clone->stats_last_clear));
1097 /* Copy stats if the old data is valid */
1098 clib_memcpy (&new_n_clone->stats_total,
1099 &old_n_clone->stats_total,
1100 sizeof (new_n_clone->stats_total));
1101 clib_memcpy (&new_n_clone->stats_last_clear,
1102 &old_n_clone->stats_last_clear,
1103 sizeof (new_n_clone->stats_last_clear));
1105 /* keep previous node state */
1106 new_n_clone->state = old_n_clone->state;
1108 vec_add1 (nm_clone->nodes, new_n_clone);
1111 /* Free the old node clones */
1112 clib_mem_free (old_nodes_clone[0]);
1114 vec_free (old_nodes_clone);
1117 /* re-clone internal nodes */
1118 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL];
1119 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
1120 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
1121 CLIB_CACHE_LINE_BYTES);
1123 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
1125 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1126 rt->thread_index = vm_clone->thread_index;
1127 /* copy runtime_data, will be overwritten later for existing rt */
1128 if (n->runtime_data && n->runtime_data_bytes > 0)
1129 clib_memcpy (rt->runtime_data, n->runtime_data,
1130 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1131 n->runtime_data_bytes));
1134 for (j = 0; j < vec_len (old_rt); j++)
1136 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1137 rt->state = old_rt[j].state;
1138 clib_memcpy (rt->runtime_data, old_rt[j].runtime_data,
1139 VLIB_NODE_RUNTIME_DATA_SIZE);
1144 /* re-clone input nodes */
1145 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT];
1146 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
1147 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
1148 CLIB_CACHE_LINE_BYTES);
1150 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
1152 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1153 rt->thread_index = vm_clone->thread_index;
1154 /* copy runtime_data, will be overwritten later for existing rt */
1155 if (n->runtime_data && n->runtime_data_bytes > 0)
1156 clib_memcpy (rt->runtime_data, n->runtime_data,
1157 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1158 n->runtime_data_bytes));
1161 for (j = 0; j < vec_len (old_rt); j++)
1163 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1164 rt->state = old_rt[j].state;
1165 clib_memcpy (rt->runtime_data, old_rt[j].runtime_data,
1166 VLIB_NODE_RUNTIME_DATA_SIZE);
1171 nm_clone->processes = vec_dup_aligned (nm->processes,
1172 CLIB_CACHE_LINE_BYTES);
1176 vlib_worker_thread_node_runtime_update (void)
1179 * Make a note that we need to do a node runtime update
1180 * prior to releasing the barrier.
1182 vlib_global_main.need_vlib_worker_thread_node_runtime_update = 1;
1186 unformat_sched_policy (unformat_input_t * input, va_list * args)
1188 u32 *r = va_arg (*args, u32 *);
1191 #define _(v,f,s) else if (unformat (input, s)) *r = SCHED_POLICY_##f;
1192 foreach_sched_policy
1199 static clib_error_t *
1200 cpu_config (vlib_main_t * vm, unformat_input_t * input)
1202 vlib_thread_registration_t *tr;
1204 vlib_thread_main_t *tm = &vlib_thread_main;
1209 tm->thread_registrations_by_name = hash_create_string (0, sizeof (uword));
1211 tm->n_thread_stacks = 1; /* account for main thread */
1212 tm->sched_policy = ~0;
1213 tm->sched_priority = ~0;
1214 tm->main_lcore = ~0;
1220 hash_set_mem (tm->thread_registrations_by_name, tr->name, (uword) tr);
1224 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1226 if (unformat (input, "use-pthreads"))
1227 tm->use_pthreads = 1;
1228 else if (unformat (input, "thread-prefix %v", &tm->thread_prefix))
1230 else if (unformat (input, "main-core %u", &tm->main_lcore))
1232 else if (unformat (input, "skip-cores %u", &tm->skip_cores))
1234 else if (unformat (input, "coremask-%s %U", &name,
1235 unformat_bitmap_mask, &bitmap) ||
1236 unformat (input, "corelist-%s %U", &name,
1237 unformat_bitmap_list, &bitmap))
1239 p = hash_get_mem (tm->thread_registrations_by_name, name);
1241 return clib_error_return (0, "no such thread type '%s'", name);
1243 tr = (vlib_thread_registration_t *) p[0];
1245 if (tr->use_pthreads)
1246 return clib_error_return (0,
1247 "corelist cannot be set for '%s' threads",
1250 tr->coremask = bitmap;
1251 tr->count = clib_bitmap_count_set_bits (tr->coremask);
1255 (input, "scheduler-policy %U", unformat_sched_policy,
1258 else if (unformat (input, "scheduler-priority %u", &tm->sched_priority))
1260 else if (unformat (input, "%s %u", &name, &count))
1262 p = hash_get_mem (tm->thread_registrations_by_name, name);
1264 return clib_error_return (0, "no such thread type 3 '%s'", name);
1266 tr = (vlib_thread_registration_t *) p[0];
1267 if (tr->fixed_count)
1268 return clib_error_return
1269 (0, "number of %s threads not configurable", tr->name);
1276 if (tm->sched_priority != ~0)
1278 if (tm->sched_policy == SCHED_FIFO || tm->sched_policy == SCHED_RR)
1280 u32 prio_max = sched_get_priority_max (tm->sched_policy);
1281 u32 prio_min = sched_get_priority_min (tm->sched_policy);
1282 if (tm->sched_priority > prio_max)
1283 tm->sched_priority = prio_max;
1284 if (tm->sched_priority < prio_min)
1285 tm->sched_priority = prio_min;
1289 return clib_error_return
1291 "scheduling priority (%d) is not allowed for `normal` scheduling policy",
1292 tm->sched_priority);
1297 if (!tm->thread_prefix)
1298 tm->thread_prefix = format (0, "vpp");
1302 tm->n_thread_stacks += tr->count;
1303 tm->n_pthreads += tr->count * tr->use_pthreads;
1304 tm->n_threads += tr->count * (tr->use_pthreads == 0);
1311 VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu");
1313 void vnet_main_fixup (vlib_fork_fixup_t which) __attribute__ ((weak));
1315 vnet_main_fixup (vlib_fork_fixup_t which)
1320 vlib_worker_thread_fork_fixup (vlib_fork_fixup_t which)
1322 vlib_main_t *vm = vlib_get_main ();
1324 if (vlib_mains == 0)
1327 ASSERT (vlib_get_thread_index () == 0);
1328 vlib_worker_thread_barrier_sync (vm);
1332 case VLIB_WORKER_THREAD_FORK_FIXUP_NEW_SW_IF_INDEX:
1333 vnet_main_fixup (VLIB_WORKER_THREAD_FORK_FIXUP_NEW_SW_IF_INDEX);
1339 vlib_worker_thread_barrier_release (vm);
1343 * Enforce minimum open time to minimize packet loss due to Rx overflow,
1344 * based on a test based heuristic that barrier should be open for at least
1345 * 3 time as long as it is closed (with an upper bound of 1ms because by that
1346 * point it is probably too late to make a difference)
1349 #ifndef BARRIER_MINIMUM_OPEN_LIMIT
1350 #define BARRIER_MINIMUM_OPEN_LIMIT 0.001
1353 #ifndef BARRIER_MINIMUM_OPEN_FACTOR
1354 #define BARRIER_MINIMUM_OPEN_FACTOR 3
1358 vlib_worker_thread_barrier_sync_int (vlib_main_t * vm)
1367 if (vec_len (vlib_mains) < 2)
1370 ASSERT (vlib_get_thread_index () == 0);
1372 count = vec_len (vlib_mains) - 1;
1374 /* Record entry relative to last close */
1375 now = vlib_time_now (vm);
1376 t_entry = now - vm->barrier_epoch;
1378 /* Tolerate recursive calls */
1379 if (++vlib_worker_threads[0].recursion_level > 1)
1381 barrier_trace_sync_rec (t_entry);
1385 vlib_worker_threads[0].barrier_sync_count++;
1387 /* Enforce minimum barrier open time to minimize packet loss */
1388 ASSERT (vm->barrier_no_close_before <= (now + BARRIER_MINIMUM_OPEN_LIMIT));
1392 now = vlib_time_now (vm);
1393 /* Barrier hold-down timer expired? */
1394 if (now >= vm->barrier_no_close_before)
1396 if ((vm->barrier_no_close_before - now)
1397 > (2.0 * BARRIER_MINIMUM_OPEN_LIMIT))
1399 clib_warning ("clock change: would have waited for %.4f seconds",
1400 (vm->barrier_no_close_before - now));
1404 /* Record time of closure */
1405 t_open = now - vm->barrier_epoch;
1406 vm->barrier_epoch = now;
1408 deadline = now + BARRIER_SYNC_TIMEOUT;
1410 *vlib_worker_threads->wait_at_barrier = 1;
1411 while (*vlib_worker_threads->workers_at_barrier != count)
1413 if ((now = vlib_time_now (vm)) > deadline)
1415 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1420 t_closed = now - vm->barrier_epoch;
1422 barrier_trace_sync (t_entry, t_open, t_closed);
1426 void vlib_stat_segment_lock (void) __attribute__ ((weak));
1428 vlib_stat_segment_lock (void)
1432 void vlib_stat_segment_unlock (void) __attribute__ ((weak));
1434 vlib_stat_segment_unlock (void)
1439 vlib_worker_thread_barrier_release (vlib_main_t * vm)
1446 f64 t_update_main = 0.0;
1447 int refork_needed = 0;
1449 if (vec_len (vlib_mains) < 2)
1452 ASSERT (vlib_get_thread_index () == 0);
1455 now = vlib_time_now (vm);
1456 t_entry = now - vm->barrier_epoch;
1458 if (--vlib_worker_threads[0].recursion_level > 0)
1460 barrier_trace_release_rec (t_entry);
1464 /* Update (all) node runtimes before releasing the barrier, if needed */
1465 if (vm->need_vlib_worker_thread_node_runtime_update)
1468 * Lock stat segment here, so we's safe when
1469 * rebuilding the stat segment node clones from the
1472 vlib_stat_segment_lock ();
1474 /* Do stats elements on main thread */
1475 worker_thread_node_runtime_update_internal ();
1476 vm->need_vlib_worker_thread_node_runtime_update = 0;
1478 /* Do per thread rebuilds in parallel */
1480 clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required,
1481 (vec_len (vlib_mains) - 1));
1482 now = vlib_time_now (vm);
1483 t_update_main = now - vm->barrier_epoch;
1486 deadline = now + BARRIER_SYNC_TIMEOUT;
1488 *vlib_worker_threads->wait_at_barrier = 0;
1490 while (*vlib_worker_threads->workers_at_barrier > 0)
1492 if ((now = vlib_time_now (vm)) > deadline)
1494 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1499 /* Wait for reforks before continuing */
1502 now = vlib_time_now (vm);
1504 deadline = now + BARRIER_SYNC_TIMEOUT;
1506 while (*vlib_worker_threads->node_reforks_required > 0)
1508 if ((now = vlib_time_now (vm)) > deadline)
1510 fformat (stderr, "%s: worker thread refork deadlock\n",
1515 vlib_stat_segment_unlock ();
1518 t_closed_total = now - vm->barrier_epoch;
1520 minimum_open = t_closed_total * BARRIER_MINIMUM_OPEN_FACTOR;
1522 if (minimum_open > BARRIER_MINIMUM_OPEN_LIMIT)
1524 minimum_open = BARRIER_MINIMUM_OPEN_LIMIT;
1527 vm->barrier_no_close_before = now + minimum_open;
1529 /* Record barrier epoch (used to enforce minimum open time) */
1530 vm->barrier_epoch = now;
1532 barrier_trace_release (t_entry, t_closed_total, t_update_main);
1537 * Check the frame queue to see if any frames are available.
1538 * If so, pull the packets off the frames and put them to
1542 vlib_frame_queue_dequeue (vlib_main_t * vm, vlib_frame_queue_main_t * fqm)
1544 u32 thread_id = vm->thread_index;
1545 vlib_frame_queue_t *fq = fqm->vlib_frame_queues[thread_id];
1546 vlib_frame_queue_elt_t *elt;
1555 ASSERT (vm == vlib_mains[thread_id]);
1557 if (PREDICT_FALSE (fqm->node_index == ~0))
1560 * Gather trace data for frame queues
1562 if (PREDICT_FALSE (fq->trace))
1564 frame_queue_trace_t *fqt;
1565 frame_queue_nelt_counter_t *fqh;
1568 fqt = &fqm->frame_queue_traces[thread_id];
1570 fqt->nelts = fq->nelts;
1571 fqt->head = fq->head;
1572 fqt->head_hint = fq->head_hint;
1573 fqt->tail = fq->tail;
1574 fqt->threshold = fq->vector_threshold;
1575 fqt->n_in_use = fqt->tail - fqt->head;
1576 if (fqt->n_in_use >= fqt->nelts)
1578 // if beyond max then use max
1579 fqt->n_in_use = fqt->nelts - 1;
1582 /* Record the number of elements in use in the histogram */
1583 fqh = &fqm->frame_queue_histogram[thread_id];
1584 fqh->count[fqt->n_in_use]++;
1586 /* Record a snapshot of the elements in use */
1587 for (elix = 0; elix < fqt->nelts; elix++)
1589 elt = fq->elts + ((fq->head + 1 + elix) & (fq->nelts - 1));
1590 if (1 || elt->valid)
1592 fqt->n_vectors[elix] = elt->n_vectors;
1600 if (fq->head == fq->tail)
1602 fq->head_hint = fq->head;
1606 elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1));
1610 fq->head_hint = fq->head;
1614 from = elt->buffer_index;
1615 msg_type = elt->msg_type;
1617 ASSERT (msg_type == VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME);
1618 ASSERT (elt->n_vectors <= VLIB_FRAME_SIZE);
1620 f = vlib_get_frame_to_node (vm, fqm->node_index);
1622 to = vlib_frame_vector_args (f);
1624 n_left_to_node = elt->n_vectors;
1626 while (n_left_to_node >= 4)
1634 n_left_to_node -= 4;
1637 while (n_left_to_node > 0)
1645 vectors += elt->n_vectors;
1646 f->n_vectors = elt->n_vectors;
1647 vlib_put_frame_to_node (vm, fqm->node_index, f);
1651 elt->msg_type = 0xfefefefe;
1652 CLIB_MEMORY_BARRIER ();
1657 * Limit the number of packets pushed into the graph
1659 if (vectors >= fq->vector_threshold)
1661 fq->head_hint = fq->head;
1670 vlib_worker_thread_fn (void *arg)
1672 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
1673 vlib_thread_main_t *tm = vlib_get_thread_main ();
1674 vlib_main_t *vm = vlib_get_main ();
1677 ASSERT (vm->thread_index == vlib_get_thread_index ());
1679 vlib_worker_thread_init (w);
1680 clib_time_init (&vm->clib_time);
1681 clib_mem_set_heap (w->thread_mheap);
1683 /* Wait until the dpdk init sequence is complete */
1684 while (tm->extern_thread_mgmt && tm->worker_thread_release == 0)
1685 vlib_worker_thread_barrier_check ();
1687 e = vlib_call_init_exit_functions
1688 (vm, vm->worker_init_function_registrations, 1 /* call_once */ );
1690 clib_error_report (e);
1692 vlib_worker_loop (vm);
1696 VLIB_REGISTER_THREAD (worker_thread_reg, static) = {
1699 .function = vlib_worker_thread_fn,
1704 vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts)
1706 vlib_thread_main_t *tm = vlib_get_thread_main ();
1707 vlib_frame_queue_main_t *fqm;
1708 vlib_frame_queue_t *fq;
1711 if (frame_queue_nelts == 0)
1712 frame_queue_nelts = FRAME_QUEUE_NELTS;
1714 ASSERT (frame_queue_nelts >= 8);
1716 vec_add2 (tm->frame_queue_mains, fqm, 1);
1718 fqm->node_index = node_index;
1719 fqm->frame_queue_nelts = frame_queue_nelts;
1720 fqm->queue_hi_thresh = frame_queue_nelts - 2;
1722 vec_validate (fqm->vlib_frame_queues, tm->n_vlib_mains - 1);
1723 vec_validate (fqm->per_thread_data, tm->n_vlib_mains - 1);
1724 _vec_len (fqm->vlib_frame_queues) = 0;
1725 for (i = 0; i < tm->n_vlib_mains; i++)
1727 vlib_frame_queue_per_thread_data_t *ptd;
1728 fq = vlib_frame_queue_alloc (frame_queue_nelts);
1729 vec_add1 (fqm->vlib_frame_queues, fq);
1731 ptd = vec_elt_at_index (fqm->per_thread_data, i);
1732 vec_validate (ptd->handoff_queue_elt_by_thread_index,
1733 tm->n_vlib_mains - 1);
1734 vec_validate_init_empty (ptd->congested_handoff_queue_by_thread_index,
1735 tm->n_vlib_mains - 1,
1736 (vlib_frame_queue_t *) (~0));
1739 return (fqm - tm->frame_queue_mains);
1743 vlib_thread_cb_register (struct vlib_main_t *vm, vlib_thread_callbacks_t * cb)
1745 vlib_thread_main_t *tm = vlib_get_thread_main ();
1747 if (tm->extern_thread_mgmt)
1750 tm->cb.vlib_launch_thread_cb = cb->vlib_launch_thread_cb;
1751 tm->extern_thread_mgmt = 1;
1756 vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
1759 ASSERT (vlib_get_thread_index () == 0);
1760 vlib_process_signal_event (vlib_get_main (), args->node_index,
1761 args->type_opaque, args->data);
1764 void *rpc_call_main_thread_cb_fn;
1767 vlib_rpc_call_main_thread (void *callback, u8 * args, u32 arg_size)
1769 if (rpc_call_main_thread_cb_fn)
1771 void (*fp) (void *, u8 *, u32) = rpc_call_main_thread_cb_fn;
1772 (*fp) (callback, args, arg_size);
1775 clib_warning ("BUG: rpc_call_main_thread_cb_fn NULL!");
1779 threads_init (vlib_main_t * vm)
1784 VLIB_INIT_FUNCTION (threads_init);
1787 * fd.io coding-style-patch-verification: ON
1790 * eval: (c-set-style "gnu")