2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
19 #include <vppinfra/format.h>
20 #include <vlib/vlib.h>
22 #include <vlib/threads.h>
23 #include <vlib/unix/cj.h>
25 DECLARE_CJ_GLOBAL_LOG;
27 #define FRAME_QUEUE_NELTS 32
35 vlib_worker_thread_t *vlib_worker_threads;
36 vlib_thread_main_t vlib_thread_main;
39 * Barrier tracing can be enabled on a normal build to collect information
40 * on barrier use, including timings and call stacks. Deliberately not
41 * keyed off CLIB_DEBUG, because that can add significant overhead which
42 * imapacts observed timings.
45 #ifdef BARRIER_TRACING
47 * Output of barrier tracing can be to syslog or elog as suits
49 #ifdef BARRIER_TRACING_ELOG
51 elog_id_for_msg_name (const char *msg_name)
58 h = hash_create_string (0, sizeof (uword));
60 p = hash_get_mem (h, msg_name);
63 r = elog_string (&vlib_global_main.elog_main, "%s", msg_name);
65 name_copy = format (0, "%s%c", msg_name, 0);
67 hash_set_mem (h, name_copy, r);
73 * elog Barrier trace functions, which are nulled out if BARRIER_TRACING isn't
78 barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed)
81 ELOG_TYPE_DECLARE (e) =
83 .format = "barrier <%d#%s(O:%dus:%dus)(%dus)",
84 .format_args = "i4T4i4i4i4",
89 u32 count, caller, t_entry, t_open, t_closed;
92 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
93 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
94 ed->caller = elog_id_for_msg_name (vlib_worker_threads[0].barrier_caller);
95 ed->t_entry = (int) (1000000.0 * t_entry);
96 ed->t_open = (int) (1000000.0 * t_open);
97 ed->t_closed = (int) (1000000.0 * t_closed);
101 barrier_trace_sync_rec (f64 t_entry)
104 ELOG_TYPE_DECLARE (e) =
106 .format = "barrier <%d(%dus)%s",
107 .format_args = "i4i4T4",
112 u32 depth, t_entry, caller;
115 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
116 ed->depth = (int) vlib_worker_threads[0].recursion_level - 1;
117 ed->t_entry = (int) (1000000.0 * t_entry);
118 ed->caller = elog_id_for_msg_name (vlib_worker_threads[0].barrier_caller);
122 barrier_trace_release_rec (f64 t_entry)
125 ELOG_TYPE_DECLARE (e) =
127 .format = "barrier (%dus)%d>",
128 .format_args = "i4i4",
136 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
137 ed->t_entry = (int) (1000000.0 * t_entry);
138 ed->depth = (int) vlib_worker_threads[0].recursion_level;
142 barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main)
145 ELOG_TYPE_DECLARE (e) =
147 .format = "barrier (%dus){%d}(C:%dus)#%d>",
148 .format_args = "i4i4i4i4",
153 u32 t_entry, t_update_main, t_closed_total, count;
156 ed = ELOG_DATA (&vlib_global_main.elog_main, e);
157 ed->t_entry = (int) (1000000.0 * t_entry);
158 ed->t_update_main = (int) (1000000.0 * t_update_main);
159 ed->t_closed_total = (int) (1000000.0 * t_closed_total);
160 ed->count = (int) vlib_worker_threads[0].barrier_sync_count;
162 /* Reset context for next trace */
163 vlib_worker_threads[0].barrier_context = NULL;
166 char barrier_trace[65536];
167 char *btp = barrier_trace;
170 * syslog Barrier trace functions, which are nulled out if BARRIER_TRACING
176 barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed)
178 btp += sprintf (btp, "<%u#%s",
179 (unsigned int) vlib_worker_threads[0].barrier_sync_count,
180 vlib_worker_threads[0].barrier_caller);
182 if (vlib_worker_threads[0].barrier_context)
184 btp += sprintf (btp, "[%s]", vlib_worker_threads[0].barrier_context);
188 btp += sprintf (btp, "(O:%dus:%dus)(%dus):",
189 (int) (1000000.0 * t_entry),
190 (int) (1000000.0 * t_open), (int) (1000000.0 * t_closed));
195 barrier_trace_sync_rec (f64 t_entry)
197 btp += sprintf (btp, "<%u(%dus)%s:",
198 (int) vlib_worker_threads[0].recursion_level - 1,
199 (int) (1000000.0 * t_entry),
200 vlib_worker_threads[0].barrier_caller);
204 barrier_trace_release_rec (f64 t_entry)
206 btp += sprintf (btp, ":(%dus)%u>", (int) (1000000.0 * t_entry),
207 (int) vlib_worker_threads[0].recursion_level);
211 barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main)
214 btp += sprintf (btp, ":(%dus)", (int) (1000000.0 * t_entry));
215 if (t_update_main > 0)
217 btp += sprintf (btp, "{%dus}", (int) (1000000.0 * t_update_main));
220 btp += sprintf (btp, "(C:%dus)#%u>",
221 (int) (1000000.0 * t_closed_total),
222 (int) vlib_worker_threads[0].barrier_sync_count);
224 /* Dump buffer to syslog, and reset for next trace */
225 fformat (stderr, "BTRC %s\n", barrier_trace);
227 vlib_worker_threads[0].barrier_context = NULL;
232 /* Null functions for default case where barrier tracing isn't used */
234 barrier_trace_sync (f64 t_entry, f64 t_open, f64 t_closed)
239 barrier_trace_sync_rec (f64 t_entry)
244 barrier_trace_release_rec (f64 t_entry)
249 barrier_trace_release (f64 t_entry, f64 t_closed_total, f64 t_update_main)
255 os_get_nthreads (void)
259 len = vec_len (vlib_thread_stacks);
267 vlib_set_thread_name (char *name)
269 int pthread_setname_np (pthread_t __target_thread, const char *__name);
271 pthread_t thread = pthread_self ();
275 rv = pthread_setname_np (thread, name);
277 clib_warning ("pthread_setname_np returned %d", rv);
282 sort_registrations_by_no_clone (void *a0, void *a1)
284 vlib_thread_registration_t **tr0 = a0;
285 vlib_thread_registration_t **tr1 = a1;
287 return ((i32) ((*tr0)->no_data_structure_clone)
288 - ((i32) ((*tr1)->no_data_structure_clone)));
292 clib_sysfs_list_to_bitmap (char *filename)
297 fp = fopen (filename, "r");
302 vec_validate (buffer, 256 - 1);
303 if (fgets ((char *) buffer, 256, fp))
306 unformat_init_string (&in, (char *) buffer,
307 strlen ((char *) buffer));
308 if (unformat (&in, "%U", unformat_bitmap_list, &r) != 1)
309 clib_warning ("unformat_bitmap_list failed");
319 /* Called early in the init sequence */
322 vlib_thread_init (vlib_main_t * vm)
324 vlib_thread_main_t *tm = &vlib_thread_main;
325 vlib_worker_thread_t *w;
326 vlib_thread_registration_t *tr;
327 u32 n_vlib_mains = 1;
332 /* get bitmaps of active cpu cores and sockets */
333 tm->cpu_core_bitmap =
334 clib_sysfs_list_to_bitmap ("/sys/devices/system/cpu/online");
335 tm->cpu_socket_bitmap =
336 clib_sysfs_list_to_bitmap ("/sys/devices/system/node/online");
338 avail_cpu = clib_bitmap_dup (tm->cpu_core_bitmap);
341 for (i = 0; i < tm->skip_cores; i++)
343 uword c = clib_bitmap_first_set (avail_cpu);
345 return clib_error_return (0, "no available cpus to skip");
347 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
350 /* grab cpu for main thread */
353 tm->main_lcore = clib_bitmap_first_set (avail_cpu);
354 if (tm->main_lcore == (u8) ~ 0)
355 return clib_error_return (0, "no available cpus to be used for the"
360 if (clib_bitmap_get (avail_cpu, tm->main_lcore) == 0)
361 return clib_error_return (0, "cpu %u is not available to be used"
362 " for the main thread", tm->main_lcore);
364 avail_cpu = clib_bitmap_set (avail_cpu, tm->main_lcore, 0);
366 /* assume that there is socket 0 only if there is no data from sysfs */
367 if (!tm->cpu_socket_bitmap)
368 tm->cpu_socket_bitmap = clib_bitmap_set (0, 0, 1);
370 /* pin main thread to main_lcore */
371 if (tm->cb.vlib_thread_set_lcore_cb)
373 tm->cb.vlib_thread_set_lcore_cb (0, tm->main_lcore);
379 CPU_SET (tm->main_lcore, &cpuset);
380 pthread_setaffinity_np (pthread_self (), sizeof (cpu_set_t), &cpuset);
383 /* as many threads as stacks... */
384 vec_validate_aligned (vlib_worker_threads, vec_len (vlib_thread_stacks) - 1,
385 CLIB_CACHE_LINE_BYTES);
387 /* Preallocate thread 0 */
388 _vec_len (vlib_worker_threads) = 1;
389 w = vlib_worker_threads;
390 w->thread_mheap = clib_mem_get_heap ();
391 w->thread_stack = vlib_thread_stacks[0];
392 w->lcore_id = tm->main_lcore;
393 w->lwp = syscall (SYS_gettid);
394 w->thread_id = pthread_self ();
395 tm->n_vlib_mains = 1;
397 if (tm->sched_policy != ~0)
399 struct sched_param sched_param;
400 if (!sched_getparam (w->lwp, &sched_param))
402 if (tm->sched_priority != ~0)
403 sched_param.sched_priority = tm->sched_priority;
404 sched_setscheduler (w->lwp, tm->sched_policy, &sched_param);
408 /* assign threads to cores and set n_vlib_mains */
413 vec_add1 (tm->registrations, tr);
417 vec_sort_with_function (tm->registrations, sort_registrations_by_no_clone);
419 for (i = 0; i < vec_len (tm->registrations); i++)
422 tr = tm->registrations[i];
423 tr->first_index = first_index;
424 first_index += tr->count;
425 n_vlib_mains += (tr->no_data_structure_clone == 0) ? tr->count : 0;
427 /* construct coremask */
428 if (tr->use_pthreads || !tr->count)
435 clib_bitmap_foreach (c, tr->coremask, ({
436 if (clib_bitmap_get(avail_cpu, c) == 0)
437 return clib_error_return (0, "cpu %u is not available to be used"
438 " for the '%s' thread",c, tr->name);
440 avail_cpu = clib_bitmap_set(avail_cpu, c, 0);
447 for (j = 0; j < tr->count; j++)
449 uword c = clib_bitmap_first_set (avail_cpu);
451 return clib_error_return (0,
452 "no available cpus to be used for"
453 " the '%s' thread", tr->name);
455 avail_cpu = clib_bitmap_set (avail_cpu, c, 0);
456 tr->coremask = clib_bitmap_set (tr->coremask, c, 1);
461 clib_bitmap_free (avail_cpu);
463 tm->n_vlib_mains = n_vlib_mains;
465 vec_validate_aligned (vlib_worker_threads, first_index - 1,
466 CLIB_CACHE_LINE_BYTES);
472 vlib_frame_queue_alloc (int nelts)
474 vlib_frame_queue_t *fq;
476 fq = clib_mem_alloc_aligned (sizeof (*fq), CLIB_CACHE_LINE_BYTES);
477 memset (fq, 0, sizeof (*fq));
479 fq->vector_threshold = 128; // packets
480 vec_validate_aligned (fq->elts, nelts - 1, CLIB_CACHE_LINE_BYTES);
484 if (((uword) & fq->tail) & (CLIB_CACHE_LINE_BYTES - 1))
485 fformat (stderr, "WARNING: fq->tail unaligned\n");
486 if (((uword) & fq->head) & (CLIB_CACHE_LINE_BYTES - 1))
487 fformat (stderr, "WARNING: fq->head unaligned\n");
488 if (((uword) fq->elts) & (CLIB_CACHE_LINE_BYTES - 1))
489 fformat (stderr, "WARNING: fq->elts unaligned\n");
491 if (sizeof (fq->elts[0]) % CLIB_CACHE_LINE_BYTES)
492 fformat (stderr, "WARNING: fq->elts[0] size %d\n",
493 sizeof (fq->elts[0]));
494 if (nelts & (nelts - 1))
496 fformat (stderr, "FATAL: nelts MUST be a power of 2\n");
504 void vl_msg_api_handler_no_free (void *) __attribute__ ((weak));
506 vl_msg_api_handler_no_free (void *v)
510 /* Turned off, save as reference material... */
513 vlib_frame_queue_dequeue_internal (int thread_id,
514 vlib_main_t * vm, vlib_node_main_t * nm)
516 vlib_frame_queue_t *fq = vlib_frame_queues[thread_id];
517 vlib_frame_queue_elt_t *elt;
519 vlib_pending_frame_t *p;
520 vlib_node_runtime_t *r;
521 u32 node_runtime_index;
526 ASSERT (vm == vlib_mains[thread_id]);
530 if (fq->head == fq->tail)
533 elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1));
538 before = clib_cpu_time_now ();
541 node_runtime_index = elt->node_runtime_index;
542 msg_type = elt->msg_type;
546 case VLIB_FRAME_QUEUE_ELT_FREE_BUFFERS:
547 vlib_buffer_free (vm, vlib_frame_vector_args (f), f->n_vectors);
548 /* note fallthrough... */
549 case VLIB_FRAME_QUEUE_ELT_FREE_FRAME:
550 r = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
552 vlib_frame_free (vm, r, f);
554 case VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME:
555 vec_add2 (vm->node_main.pending_frames, p, 1);
556 f->flags |= (VLIB_FRAME_PENDING | VLIB_FRAME_FREE_AFTER_DISPATCH);
557 p->node_runtime_index = elt->node_runtime_index;
558 p->frame_index = vlib_frame_index (vm, f);
559 p->next_frame_index = VLIB_PENDING_FRAME_NO_NEXT_FRAME;
560 fq->dequeue_vectors += (u64) f->n_vectors;
562 case VLIB_FRAME_QUEUE_ELT_API_MSG:
563 vl_msg_api_handler_no_free (f);
566 clib_warning ("bogus frame queue message, type %d", msg_type);
571 fq->dequeue_ticks += clib_cpu_time_now () - before;
572 CLIB_MEMORY_BARRIER ();
581 vlib_frame_queue_dequeue (int thread_id,
582 vlib_main_t * vm, vlib_node_main_t * nm)
584 return vlib_frame_queue_dequeue_internal (thread_id, vm, nm);
588 vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index,
589 u32 frame_queue_index, vlib_frame_t * frame,
590 vlib_frame_queue_msg_type_t type)
592 vlib_frame_queue_t *fq = vlib_frame_queues[frame_queue_index];
593 vlib_frame_queue_elt_t *elt;
596 u64 before = clib_cpu_time_now ();
600 new_tail = __sync_add_and_fetch (&fq->tail, 1);
602 /* Wait until a ring slot is available */
603 while (new_tail >= fq->head + fq->nelts)
605 f64 b4 = vlib_time_now_ticks (vm, before);
606 vlib_worker_thread_barrier_check (vm, b4);
607 /* Bad idea. Dequeue -> enqueue -> dequeue -> trouble */
608 // vlib_frame_queue_dequeue (vm->thread_index, vm, nm);
611 elt = fq->elts + (new_tail & (fq->nelts - 1));
613 /* this would be very bad... */
618 /* Once we enqueue the frame, frame->n_vectors is owned elsewhere... */
619 save_count = frame->n_vectors;
622 elt->node_runtime_index = node_runtime_index;
623 elt->msg_type = type;
624 CLIB_MEMORY_BARRIER ();
631 /* To be called by vlib worker threads upon startup */
633 vlib_worker_thread_init (vlib_worker_thread_t * w)
635 vlib_thread_main_t *tm = vlib_get_thread_main ();
638 * Note: disabling signals in worker threads as follows
639 * prevents the api post-mortem dump scheme from working
643 * pthread_sigmask (SIG_SETMASK, &s, 0);
647 clib_mem_set_heap (w->thread_mheap);
649 if (vec_len (tm->thread_prefix) && w->registration->short_name)
651 w->name = format (0, "%v_%s_%d%c", tm->thread_prefix,
652 w->registration->short_name, w->instance_id, '\0');
653 vlib_set_thread_name ((char *) w->name);
656 if (!w->registration->use_pthreads)
659 /* Initial barrier sync, for both worker and i/o threads */
660 clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1);
662 while (*vlib_worker_threads->wait_at_barrier)
665 clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1);
670 vlib_worker_thread_bootstrap_fn (void *arg)
673 vlib_worker_thread_t *w = arg;
675 w->lwp = syscall (SYS_gettid);
676 w->thread_id = pthread_self ();
678 __os_thread_index = w - vlib_worker_threads;
680 rv = (void *) clib_calljmp
681 ((uword (*)(uword)) w->thread_function,
682 (uword) arg, w->thread_stack + VLIB_THREAD_STACK_SIZE);
683 /* NOTREACHED, we hope */
687 static clib_error_t *
688 vlib_launch_thread_int (void *fp, vlib_worker_thread_t * w, unsigned lcore_id)
690 vlib_thread_main_t *tm = &vlib_thread_main;
691 void *(*fp_arg) (void *) = fp;
693 w->lcore_id = lcore_id;
694 if (tm->cb.vlib_launch_thread_cb && !w->registration->use_pthreads)
695 return tm->cb.vlib_launch_thread_cb (fp, (void *) w, lcore_id);
701 CPU_SET (lcore_id, &cpuset);
703 if (pthread_create (&worker, NULL /* attr */ , fp_arg, (void *) w))
704 return clib_error_return_unix (0, "pthread_create");
706 if (pthread_setaffinity_np (worker, sizeof (cpu_set_t), &cpuset))
707 return clib_error_return_unix (0, "pthread_setaffinity_np");
713 static clib_error_t *
714 start_workers (vlib_main_t * vm)
717 vlib_worker_thread_t *w;
718 vlib_main_t *vm_clone;
720 vlib_thread_main_t *tm = &vlib_thread_main;
721 vlib_thread_registration_t *tr;
722 vlib_node_runtime_t *rt;
723 u32 n_vlib_mains = tm->n_vlib_mains;
724 u32 worker_thread_index;
725 u8 *main_heap = clib_mem_get_per_cpu_heap ();
726 mheap_t *main_heap_header = mheap_header (main_heap);
728 vec_reset_length (vlib_worker_threads);
730 /* Set up the main thread */
731 vec_add2_aligned (vlib_worker_threads, w, 1, CLIB_CACHE_LINE_BYTES);
732 w->elog_track.name = "main thread";
733 elog_track_register (&vm->elog_main, &w->elog_track);
735 if (vec_len (tm->thread_prefix))
737 w->name = format (0, "%v_main%c", tm->thread_prefix, '\0');
738 vlib_set_thread_name ((char *) w->name);
742 * Truth of the matter: we always use at least two
743 * threads. So, make the main heap thread-safe
744 * and make the event log thread-safe.
746 main_heap_header->flags |= MHEAP_FLAG_THREAD_SAFE;
748 clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES);
749 vm->elog_main.lock[0] = 0;
751 if (n_vlib_mains > 1)
753 /* Replace hand-crafted length-1 vector with a real vector */
756 vec_validate_aligned (vlib_mains, tm->n_vlib_mains - 1,
757 CLIB_CACHE_LINE_BYTES);
758 _vec_len (vlib_mains) = 0;
759 vec_add1_aligned (vlib_mains, vm, CLIB_CACHE_LINE_BYTES);
761 vlib_worker_threads->wait_at_barrier =
762 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
763 vlib_worker_threads->workers_at_barrier =
764 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
766 vlib_worker_threads->node_reforks_required =
767 clib_mem_alloc_aligned (sizeof (u32), CLIB_CACHE_LINE_BYTES);
769 /* Ask for an initial barrier sync */
770 *vlib_worker_threads->workers_at_barrier = 0;
771 *vlib_worker_threads->wait_at_barrier = 1;
773 /* Without update or refork */
774 *vlib_worker_threads->node_reforks_required = 0;
775 vm->need_vlib_worker_thread_node_runtime_update = 0;
778 vm->barrier_epoch = 0;
779 vm->barrier_no_close_before = 0;
781 worker_thread_index = 1;
783 for (i = 0; i < vec_len (tm->registrations); i++)
785 vlib_node_main_t *nm, *nm_clone;
786 vlib_buffer_free_list_t *fl_clone, *fl_orig;
787 vlib_buffer_free_list_t *orig_freelist_pool;
790 tr = tm->registrations[i];
795 for (k = 0; k < tr->count; k++)
799 vec_add2 (vlib_worker_threads, w, 1);
802 mheap_alloc (0 /* use VM */ , tr->mheap_size);
804 w->thread_mheap = main_heap;
807 vlib_thread_stack_init (w - vlib_worker_threads);
808 w->thread_function = tr->function;
809 w->thread_function_arg = w;
811 w->registration = tr;
814 (char *) format (0, "%s %d", tr->name, k + 1);
815 vec_add1 (w->elog_track.name, 0);
816 elog_track_register (&vm->elog_main, &w->elog_track);
818 if (tr->no_data_structure_clone)
821 /* Fork vlib_global_main et al. Look for bugs here */
822 oldheap = clib_mem_set_heap (w->thread_mheap);
824 vm_clone = clib_mem_alloc_aligned (sizeof (*vm_clone),
825 CLIB_CACHE_LINE_BYTES);
826 clib_memcpy (vm_clone, vlib_mains[0], sizeof (*vm_clone));
828 vm_clone->thread_index = worker_thread_index;
829 vm_clone->heap_base = w->thread_mheap;
830 vm_clone->init_functions_called =
831 hash_create (0, /* value bytes */ 0);
832 vm_clone->pending_rpc_requests = 0;
833 vec_validate (vm_clone->pending_rpc_requests, 0);
834 _vec_len (vm_clone->pending_rpc_requests) = 0;
835 memset (&vm_clone->random_buffer, 0,
836 sizeof (vm_clone->random_buffer));
838 nm = &vlib_mains[0]->node_main;
839 nm_clone = &vm_clone->node_main;
840 /* fork next frames array, preserving node runtime indices */
841 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
842 CLIB_CACHE_LINE_BYTES);
843 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
845 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
846 u32 save_node_runtime_index;
849 save_node_runtime_index = nf->node_runtime_index;
850 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
851 vlib_next_frame_init (nf);
852 nf->node_runtime_index = save_node_runtime_index;
853 nf->flags = save_flags;
856 /* fork the frame dispatch queue */
857 nm_clone->pending_frames = 0;
858 vec_validate (nm_clone->pending_frames, 10); /* $$$$$?????? */
859 _vec_len (nm_clone->pending_frames) = 0;
864 /* Allocate all nodes in single block for speed */
865 n = clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*n));
867 for (j = 0; j < vec_len (nm->nodes); j++)
869 clib_memcpy (n, nm->nodes[j], sizeof (*n));
870 /* none of the copied nodes have enqueue rights given out */
871 n->owner_node_index = VLIB_INVALID_NODE_INDEX;
872 memset (&n->stats_total, 0, sizeof (n->stats_total));
873 memset (&n->stats_last_clear, 0,
874 sizeof (n->stats_last_clear));
875 vec_add1 (nm_clone->nodes, n);
878 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
879 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
880 CLIB_CACHE_LINE_BYTES);
882 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
884 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
885 rt->thread_index = vm_clone->thread_index;
886 /* copy initial runtime_data from node */
887 if (n->runtime_data && n->runtime_data_bytes > 0)
888 clib_memcpy (rt->runtime_data, n->runtime_data,
889 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
890 n->runtime_data_bytes));
893 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
894 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
895 CLIB_CACHE_LINE_BYTES);
896 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
898 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
899 rt->thread_index = vm_clone->thread_index;
900 /* copy initial runtime_data from node */
901 if (n->runtime_data && n->runtime_data_bytes > 0)
902 clib_memcpy (rt->runtime_data, n->runtime_data,
903 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
904 n->runtime_data_bytes));
907 nm_clone->processes = vec_dup_aligned (nm->processes,
908 CLIB_CACHE_LINE_BYTES);
910 /* zap the (per worker) frame freelists, etc */
911 nm_clone->frame_sizes = 0;
912 nm_clone->frame_size_hash = hash_create (0, sizeof (uword));
914 /* Packet trace buffers are guaranteed to be empty, nothing to do here */
916 clib_mem_set_heap (oldheap);
917 vec_add1_aligned (vlib_mains, vm_clone, CLIB_CACHE_LINE_BYTES);
919 vm_clone->error_main.counters = vec_dup_aligned
920 (vlib_mains[0]->error_main.counters, CLIB_CACHE_LINE_BYTES);
921 vm_clone->error_main.counters_last_clear = vec_dup_aligned
922 (vlib_mains[0]->error_main.counters_last_clear,
923 CLIB_CACHE_LINE_BYTES);
925 /* Fork the vlib_buffer_main_t free lists, etc. */
926 orig_freelist_pool = vm_clone->buffer_free_list_pool;
927 vm_clone->buffer_free_list_pool = 0;
930 pool_foreach (fl_orig, orig_freelist_pool,
932 pool_get_aligned (vm_clone->buffer_free_list_pool,
933 fl_clone, CLIB_CACHE_LINE_BYTES);
934 ASSERT (fl_orig - orig_freelist_pool
935 == fl_clone - vm_clone->buffer_free_list_pool);
937 fl_clone[0] = fl_orig[0];
938 fl_clone->buffers = 0;
939 fl_clone->n_alloc = 0;
943 worker_thread_index++;
949 /* only have non-data-structure copy threads to create... */
950 for (i = 0; i < vec_len (tm->registrations); i++)
952 tr = tm->registrations[i];
954 for (j = 0; j < tr->count; j++)
956 vec_add2 (vlib_worker_threads, w, 1);
959 mheap_alloc (0 /* use VM */ , tr->mheap_size);
961 w->thread_mheap = main_heap;
963 vlib_thread_stack_init (w - vlib_worker_threads);
964 w->thread_function = tr->function;
965 w->thread_function_arg = w;
968 (char *) format (0, "%s %d", tr->name, j + 1);
969 w->registration = tr;
970 vec_add1 (w->elog_track.name, 0);
971 elog_track_register (&vm->elog_main, &w->elog_track);
976 worker_thread_index = 1;
978 for (i = 0; i < vec_len (tm->registrations); i++)
983 tr = tm->registrations[i];
985 if (tr->use_pthreads || tm->use_pthreads)
987 for (j = 0; j < tr->count; j++)
989 w = vlib_worker_threads + worker_thread_index++;
990 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
993 clib_error_report (err);
1000 clib_bitmap_foreach (c, tr->coremask, ({
1001 w = vlib_worker_threads + worker_thread_index++;
1002 err = vlib_launch_thread_int (vlib_worker_thread_bootstrap_fn,
1005 clib_error_report (err);
1010 vlib_worker_thread_barrier_sync (vm);
1011 vlib_worker_thread_barrier_release (vm);
1015 VLIB_MAIN_LOOP_ENTER_FUNCTION (start_workers);
1019 worker_thread_node_runtime_update_internal (void)
1023 vlib_node_main_t *nm, *nm_clone;
1024 vlib_main_t *vm_clone;
1025 vlib_node_runtime_t *rt;
1027 vlib_node_runtime_sync_stats (vlib_main_t * vm,
1028 vlib_node_runtime_t * r,
1030 uword n_vectors, uword n_clocks);
1032 ASSERT (vlib_get_thread_index () == 0);
1035 nm = &vm->node_main;
1037 ASSERT (*vlib_worker_threads->wait_at_barrier == 1);
1040 * Scrape all runtime stats, so we don't lose node runtime(s) with
1041 * pending counts, or throw away worker / io thread counts.
1043 for (j = 0; j < vec_len (nm->nodes); j++)
1047 vlib_node_sync_stats (vm, n);
1050 for (i = 1; i < vec_len (vlib_mains); i++)
1054 vm_clone = vlib_mains[i];
1055 nm_clone = &vm_clone->node_main;
1057 for (j = 0; j < vec_len (nm_clone->nodes); j++)
1059 n = nm_clone->nodes[j];
1061 rt = vlib_node_get_runtime (vm_clone, n->index);
1062 vlib_node_runtime_sync_stats (vm_clone, rt, 0, 0, 0);
1066 /* Per-worker clone rebuilds are now done on each thread */
1071 vlib_worker_thread_node_refork (void)
1073 vlib_main_t *vm, *vm_clone;
1074 vlib_node_main_t *nm, *nm_clone;
1075 vlib_node_t **old_nodes_clone;
1076 vlib_node_runtime_t *rt, *old_rt;
1078 vlib_node_t *new_n_clone;
1083 nm = &vm->node_main;
1084 vm_clone = vlib_get_main ();
1085 nm_clone = &vm_clone->node_main;
1087 /* Re-clone error heap */
1088 u64 *old_counters = vm_clone->error_main.counters;
1089 u64 *old_counters_all_clear = vm_clone->error_main.counters_last_clear;
1091 clib_memcpy (&vm_clone->error_main, &vm->error_main,
1092 sizeof (vm->error_main));
1093 j = vec_len (vm->error_main.counters) - 1;
1094 vec_validate_aligned (old_counters, j, CLIB_CACHE_LINE_BYTES);
1095 vec_validate_aligned (old_counters_all_clear, j, CLIB_CACHE_LINE_BYTES);
1096 vm_clone->error_main.counters = old_counters;
1097 vm_clone->error_main.counters_last_clear = old_counters_all_clear;
1099 nm_clone = &vm_clone->node_main;
1100 vec_free (nm_clone->next_frames);
1101 nm_clone->next_frames = vec_dup_aligned (nm->next_frames,
1102 CLIB_CACHE_LINE_BYTES);
1104 for (j = 0; j < vec_len (nm_clone->next_frames); j++)
1106 vlib_next_frame_t *nf = &nm_clone->next_frames[j];
1107 u32 save_node_runtime_index;
1110 save_node_runtime_index = nf->node_runtime_index;
1111 save_flags = nf->flags & VLIB_FRAME_NO_FREE_AFTER_DISPATCH;
1112 vlib_next_frame_init (nf);
1113 nf->node_runtime_index = save_node_runtime_index;
1114 nf->flags = save_flags;
1117 old_nodes_clone = nm_clone->nodes;
1118 nm_clone->nodes = 0;
1122 /* Allocate all nodes in single block for speed */
1124 clib_mem_alloc_no_fail (vec_len (nm->nodes) * sizeof (*new_n_clone));
1125 for (j = 0; j < vec_len (nm->nodes); j++)
1127 vlib_node_t *old_n_clone;
1130 new_n = nm->nodes[j];
1131 old_n_clone = old_nodes_clone[j];
1133 clib_memcpy (new_n_clone, new_n, sizeof (*new_n));
1134 /* none of the copied nodes have enqueue rights given out */
1135 new_n_clone->owner_node_index = VLIB_INVALID_NODE_INDEX;
1137 if (j >= vec_len (old_nodes_clone))
1139 /* new node, set to zero */
1140 memset (&new_n_clone->stats_total, 0,
1141 sizeof (new_n_clone->stats_total));
1142 memset (&new_n_clone->stats_last_clear, 0,
1143 sizeof (new_n_clone->stats_last_clear));
1147 /* Copy stats if the old data is valid */
1148 clib_memcpy (&new_n_clone->stats_total,
1149 &old_n_clone->stats_total,
1150 sizeof (new_n_clone->stats_total));
1151 clib_memcpy (&new_n_clone->stats_last_clear,
1152 &old_n_clone->stats_last_clear,
1153 sizeof (new_n_clone->stats_last_clear));
1155 /* keep previous node state */
1156 new_n_clone->state = old_n_clone->state;
1158 vec_add1 (nm_clone->nodes, new_n_clone);
1161 /* Free the old node clones */
1162 clib_mem_free (old_nodes_clone[0]);
1164 vec_free (old_nodes_clone);
1167 /* re-clone internal nodes */
1168 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL];
1169 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
1170 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
1171 CLIB_CACHE_LINE_BYTES);
1173 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
1175 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1176 rt->thread_index = vm_clone->thread_index;
1177 /* copy runtime_data, will be overwritten later for existing rt */
1178 if (n->runtime_data && n->runtime_data_bytes > 0)
1179 clib_memcpy (rt->runtime_data, n->runtime_data,
1180 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1181 n->runtime_data_bytes));
1184 for (j = 0; j < vec_len (old_rt); j++)
1186 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1187 rt->state = old_rt[j].state;
1188 clib_memcpy (rt->runtime_data, old_rt[j].runtime_data,
1189 VLIB_NODE_RUNTIME_DATA_SIZE);
1194 /* re-clone input nodes */
1195 old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT];
1196 nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
1197 vec_dup_aligned (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
1198 CLIB_CACHE_LINE_BYTES);
1200 vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
1202 vlib_node_t *n = vlib_get_node (vm, rt->node_index);
1203 rt->thread_index = vm_clone->thread_index;
1204 /* copy runtime_data, will be overwritten later for existing rt */
1205 if (n->runtime_data && n->runtime_data_bytes > 0)
1206 clib_memcpy (rt->runtime_data, n->runtime_data,
1207 clib_min (VLIB_NODE_RUNTIME_DATA_SIZE,
1208 n->runtime_data_bytes));
1211 for (j = 0; j < vec_len (old_rt); j++)
1213 rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
1214 rt->state = old_rt[j].state;
1215 clib_memcpy (rt->runtime_data, old_rt[j].runtime_data,
1216 VLIB_NODE_RUNTIME_DATA_SIZE);
1221 nm_clone->processes = vec_dup_aligned (nm->processes,
1222 CLIB_CACHE_LINE_BYTES);
1226 vlib_worker_thread_node_runtime_update (void)
1229 * Make a note that we need to do a node runtime update
1230 * prior to releasing the barrier.
1232 vlib_global_main.need_vlib_worker_thread_node_runtime_update = 1;
1236 unformat_sched_policy (unformat_input_t * input, va_list * args)
1238 u32 *r = va_arg (*args, u32 *);
1241 #define _(v,f,s) else if (unformat (input, s)) *r = SCHED_POLICY_##f;
1242 foreach_sched_policy
1249 static clib_error_t *
1250 cpu_config (vlib_main_t * vm, unformat_input_t * input)
1252 vlib_thread_registration_t *tr;
1254 vlib_thread_main_t *tm = &vlib_thread_main;
1260 tm->thread_registrations_by_name = hash_create_string (0, sizeof (uword));
1262 tm->n_thread_stacks = 1; /* account for main thread */
1263 tm->sched_policy = ~0;
1264 tm->sched_priority = ~0;
1270 hash_set_mem (tm->thread_registrations_by_name, tr->name, (uword) tr);
1274 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1276 if (unformat (input, "use-pthreads"))
1277 tm->use_pthreads = 1;
1278 else if (unformat (input, "thread-prefix %v", &tm->thread_prefix))
1280 else if (unformat (input, "main-core %u", &tm->main_lcore))
1282 else if (unformat (input, "skip-cores %u", &tm->skip_cores))
1284 else if (unformat (input, "coremask-%s %llx", &name, &coremask))
1286 p = hash_get_mem (tm->thread_registrations_by_name, name);
1288 return clib_error_return (0, "no such thread type '%s'", name);
1290 tr = (vlib_thread_registration_t *) p[0];
1292 if (tr->use_pthreads)
1293 return clib_error_return (0,
1294 "coremask cannot be set for '%s' threads",
1297 tr->coremask = clib_bitmap_set_multiple
1298 (tr->coremask, 0, coremask, BITS (coremask));
1299 tr->count = clib_bitmap_count_set_bits (tr->coremask);
1301 else if (unformat (input, "corelist-%s %U", &name, unformat_bitmap_list,
1304 p = hash_get_mem (tm->thread_registrations_by_name, name);
1306 return clib_error_return (0, "no such thread type '%s'", name);
1308 tr = (vlib_thread_registration_t *) p[0];
1310 if (tr->use_pthreads)
1311 return clib_error_return (0,
1312 "corelist cannot be set for '%s' threads",
1315 tr->coremask = bitmap;
1316 tr->count = clib_bitmap_count_set_bits (tr->coremask);
1320 (input, "scheduler-policy %U", unformat_sched_policy,
1323 else if (unformat (input, "scheduler-priority %u", &tm->sched_priority))
1325 else if (unformat (input, "%s %u", &name, &count))
1327 p = hash_get_mem (tm->thread_registrations_by_name, name);
1329 return clib_error_return (0, "no such thread type 3 '%s'", name);
1331 tr = (vlib_thread_registration_t *) p[0];
1332 if (tr->fixed_count)
1333 return clib_error_return
1334 (0, "number of %s threads not configurable", tr->name);
1341 if (tm->sched_priority != ~0)
1343 if (tm->sched_policy == SCHED_FIFO || tm->sched_policy == SCHED_RR)
1345 u32 prio_max = sched_get_priority_max (tm->sched_policy);
1346 u32 prio_min = sched_get_priority_min (tm->sched_policy);
1347 if (tm->sched_priority > prio_max)
1348 tm->sched_priority = prio_max;
1349 if (tm->sched_priority < prio_min)
1350 tm->sched_priority = prio_min;
1354 return clib_error_return
1356 "scheduling priority (%d) is not allowed for `normal` scheduling policy",
1357 tm->sched_priority);
1362 if (!tm->thread_prefix)
1363 tm->thread_prefix = format (0, "vpp");
1367 tm->n_thread_stacks += tr->count;
1368 tm->n_pthreads += tr->count * tr->use_pthreads;
1369 tm->n_threads += tr->count * (tr->use_pthreads == 0);
1376 VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu");
1378 #if !defined (__x86_64__) && !defined (__i386__) && !defined (__aarch64__) && !defined (__powerpc64__) && !defined(__arm__)
1380 __sync_fetch_and_add_8 (void)
1382 fformat (stderr, "%s called\n", __FUNCTION__);
1387 __sync_add_and_fetch_8 (void)
1389 fformat (stderr, "%s called\n", __FUNCTION__);
1394 void vnet_main_fixup (vlib_fork_fixup_t which) __attribute__ ((weak));
1396 vnet_main_fixup (vlib_fork_fixup_t which)
1401 vlib_worker_thread_fork_fixup (vlib_fork_fixup_t which)
1403 vlib_main_t *vm = vlib_get_main ();
1405 if (vlib_mains == 0)
1408 ASSERT (vlib_get_thread_index () == 0);
1409 vlib_worker_thread_barrier_sync (vm);
1413 case VLIB_WORKER_THREAD_FORK_FIXUP_NEW_SW_IF_INDEX:
1414 vnet_main_fixup (VLIB_WORKER_THREAD_FORK_FIXUP_NEW_SW_IF_INDEX);
1420 vlib_worker_thread_barrier_release (vm);
1424 * Enforce minimum open time to minimize packet loss due to Rx overflow,
1425 * based on a test based heuristic that barrier should be open for at least
1426 * 3 time as long as it is closed (with an upper bound of 1ms because by that
1427 * point it is probably too late to make a difference)
1430 #ifndef BARRIER_MINIMUM_OPEN_LIMIT
1431 #define BARRIER_MINIMUM_OPEN_LIMIT 0.001
1434 #ifndef BARRIER_MINIMUM_OPEN_FACTOR
1435 #define BARRIER_MINIMUM_OPEN_FACTOR 3
1439 vlib_worker_thread_barrier_sync_int (vlib_main_t * vm)
1448 if (vec_len (vlib_mains) < 2)
1451 ASSERT (vlib_get_thread_index () == 0);
1453 count = vec_len (vlib_mains) - 1;
1455 /* Record entry relative to last close */
1456 now = vlib_time_now (vm);
1457 t_entry = now - vm->barrier_epoch;
1459 /* Tolerate recursive calls */
1460 if (++vlib_worker_threads[0].recursion_level > 1)
1462 barrier_trace_sync_rec (t_entry);
1466 vlib_worker_threads[0].barrier_sync_count++;
1468 /* Enforce minimum barrier open time to minimize packet loss */
1469 ASSERT (vm->barrier_no_close_before <= (now + BARRIER_MINIMUM_OPEN_LIMIT));
1470 while ((now = vlib_time_now (vm)) < vm->barrier_no_close_before)
1473 /* Record time of closure */
1474 t_open = now - vm->barrier_epoch;
1475 vm->barrier_epoch = now;
1477 deadline = now + BARRIER_SYNC_TIMEOUT;
1479 *vlib_worker_threads->wait_at_barrier = 1;
1480 while (*vlib_worker_threads->workers_at_barrier != count)
1482 if ((now = vlib_time_now (vm)) > deadline)
1484 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1489 t_closed = now - vm->barrier_epoch;
1491 barrier_trace_sync (t_entry, t_open, t_closed);
1495 void vlib_stat_segment_lock (void) __attribute__ ((weak));
1497 vlib_stat_segment_lock (void)
1501 void vlib_stat_segment_unlock (void) __attribute__ ((weak));
1503 vlib_stat_segment_unlock (void)
1508 vlib_worker_thread_barrier_release (vlib_main_t * vm)
1515 f64 t_update_main = 0.0;
1516 int refork_needed = 0;
1518 if (vec_len (vlib_mains) < 2)
1521 ASSERT (vlib_get_thread_index () == 0);
1524 now = vlib_time_now (vm);
1525 t_entry = now - vm->barrier_epoch;
1527 if (--vlib_worker_threads[0].recursion_level > 0)
1529 barrier_trace_release_rec (t_entry);
1533 /* Update (all) node runtimes before releasing the barrier, if needed */
1534 if (vm->need_vlib_worker_thread_node_runtime_update)
1537 * Lock stat segment here, so we's safe when
1538 * rebuilding the stat segment node clones from the
1541 vlib_stat_segment_lock ();
1543 /* Do stats elements on main thread */
1544 worker_thread_node_runtime_update_internal ();
1545 vm->need_vlib_worker_thread_node_runtime_update = 0;
1547 /* Do per thread rebuilds in parallel */
1549 clib_smp_atomic_add (vlib_worker_threads->node_reforks_required,
1550 (vec_len (vlib_mains) - 1));
1551 now = vlib_time_now (vm);
1552 t_update_main = now - vm->barrier_epoch;
1555 deadline = now + BARRIER_SYNC_TIMEOUT;
1557 *vlib_worker_threads->wait_at_barrier = 0;
1559 while (*vlib_worker_threads->workers_at_barrier > 0)
1561 if ((now = vlib_time_now (vm)) > deadline)
1563 fformat (stderr, "%s: worker thread deadlock\n", __FUNCTION__);
1568 /* Wait for reforks before continuing */
1571 now = vlib_time_now (vm);
1573 deadline = now + BARRIER_SYNC_TIMEOUT;
1575 while (*vlib_worker_threads->node_reforks_required > 0)
1577 if ((now = vlib_time_now (vm)) > deadline)
1579 fformat (stderr, "%s: worker thread refork deadlock\n",
1584 vlib_stat_segment_unlock ();
1587 t_closed_total = now - vm->barrier_epoch;
1589 minimum_open = t_closed_total * BARRIER_MINIMUM_OPEN_FACTOR;
1591 if (minimum_open > BARRIER_MINIMUM_OPEN_LIMIT)
1593 minimum_open = BARRIER_MINIMUM_OPEN_LIMIT;
1596 vm->barrier_no_close_before = now + minimum_open;
1598 /* Record barrier epoch (used to enforce minimum open time) */
1599 vm->barrier_epoch = now;
1601 barrier_trace_release (t_entry, t_closed_total, t_update_main);
1606 * Check the frame queue to see if any frames are available.
1607 * If so, pull the packets off the frames and put them to
1611 vlib_frame_queue_dequeue (vlib_main_t * vm, vlib_frame_queue_main_t * fqm)
1613 u32 thread_id = vm->thread_index;
1614 vlib_frame_queue_t *fq = fqm->vlib_frame_queues[thread_id];
1615 vlib_frame_queue_elt_t *elt;
1624 ASSERT (vm == vlib_mains[thread_id]);
1626 if (PREDICT_FALSE (fqm->node_index == ~0))
1629 * Gather trace data for frame queues
1631 if (PREDICT_FALSE (fq->trace))
1633 frame_queue_trace_t *fqt;
1634 frame_queue_nelt_counter_t *fqh;
1637 fqt = &fqm->frame_queue_traces[thread_id];
1639 fqt->nelts = fq->nelts;
1640 fqt->head = fq->head;
1641 fqt->head_hint = fq->head_hint;
1642 fqt->tail = fq->tail;
1643 fqt->threshold = fq->vector_threshold;
1644 fqt->n_in_use = fqt->tail - fqt->head;
1645 if (fqt->n_in_use >= fqt->nelts)
1647 // if beyond max then use max
1648 fqt->n_in_use = fqt->nelts - 1;
1651 /* Record the number of elements in use in the histogram */
1652 fqh = &fqm->frame_queue_histogram[thread_id];
1653 fqh->count[fqt->n_in_use]++;
1655 /* Record a snapshot of the elements in use */
1656 for (elix = 0; elix < fqt->nelts; elix++)
1658 elt = fq->elts + ((fq->head + 1 + elix) & (fq->nelts - 1));
1659 if (1 || elt->valid)
1661 fqt->n_vectors[elix] = elt->n_vectors;
1669 if (fq->head == fq->tail)
1671 fq->head_hint = fq->head;
1675 elt = fq->elts + ((fq->head + 1) & (fq->nelts - 1));
1679 fq->head_hint = fq->head;
1683 from = elt->buffer_index;
1684 msg_type = elt->msg_type;
1686 ASSERT (msg_type == VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME);
1687 ASSERT (elt->n_vectors <= VLIB_FRAME_SIZE);
1689 f = vlib_get_frame_to_node (vm, fqm->node_index);
1691 to = vlib_frame_vector_args (f);
1693 n_left_to_node = elt->n_vectors;
1695 while (n_left_to_node >= 4)
1703 n_left_to_node -= 4;
1706 while (n_left_to_node > 0)
1714 vectors += elt->n_vectors;
1715 f->n_vectors = elt->n_vectors;
1716 vlib_put_frame_to_node (vm, fqm->node_index, f);
1720 elt->msg_type = 0xfefefefe;
1721 CLIB_MEMORY_BARRIER ();
1726 * Limit the number of packets pushed into the graph
1728 if (vectors >= fq->vector_threshold)
1730 fq->head_hint = fq->head;
1739 vlib_worker_thread_fn (void *arg)
1741 vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
1742 vlib_thread_main_t *tm = vlib_get_thread_main ();
1743 vlib_main_t *vm = vlib_get_main ();
1746 ASSERT (vm->thread_index == vlib_get_thread_index ());
1748 vlib_worker_thread_init (w);
1749 clib_time_init (&vm->clib_time);
1750 clib_mem_set_heap (w->thread_mheap);
1752 /* Wait until the dpdk init sequence is complete */
1753 while (tm->extern_thread_mgmt && tm->worker_thread_release == 0)
1754 vlib_worker_thread_barrier_check ();
1756 e = vlib_call_init_exit_functions
1757 (vm, vm->worker_init_function_registrations, 1 /* call_once */ );
1759 clib_error_report (e);
1761 vlib_worker_loop (vm);
1765 VLIB_REGISTER_THREAD (worker_thread_reg, static) = {
1768 .function = vlib_worker_thread_fn,
1773 vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts)
1775 vlib_thread_main_t *tm = vlib_get_thread_main ();
1776 vlib_frame_queue_main_t *fqm;
1777 vlib_frame_queue_t *fq;
1780 if (frame_queue_nelts == 0)
1781 frame_queue_nelts = FRAME_QUEUE_NELTS;
1783 vec_add2 (tm->frame_queue_mains, fqm, 1);
1785 fqm->node_index = node_index;
1787 vec_validate (fqm->vlib_frame_queues, tm->n_vlib_mains - 1);
1788 _vec_len (fqm->vlib_frame_queues) = 0;
1789 for (i = 0; i < tm->n_vlib_mains; i++)
1791 fq = vlib_frame_queue_alloc (frame_queue_nelts);
1792 vec_add1 (fqm->vlib_frame_queues, fq);
1795 return (fqm - tm->frame_queue_mains);
1799 vlib_thread_cb_register (struct vlib_main_t *vm, vlib_thread_callbacks_t * cb)
1801 vlib_thread_main_t *tm = vlib_get_thread_main ();
1803 if (tm->extern_thread_mgmt)
1806 tm->cb.vlib_launch_thread_cb = cb->vlib_launch_thread_cb;
1807 tm->extern_thread_mgmt = 1;
1812 vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
1815 ASSERT (vlib_get_thread_index () == 0);
1816 vlib_process_signal_event (vlib_get_main (), args->node_index,
1817 args->type_opaque, args->data);
1820 void *rpc_call_main_thread_cb_fn;
1823 vlib_rpc_call_main_thread (void *callback, u8 * args, u32 arg_size)
1825 if (rpc_call_main_thread_cb_fn)
1827 void (*fp) (void *, u8 *, u32) = rpc_call_main_thread_cb_fn;
1828 (*fp) (callback, args, arg_size);
1831 clib_warning ("BUG: rpc_call_main_thread_cb_fn NULL!");
1835 threads_init (vlib_main_t * vm)
1840 VLIB_INIT_FUNCTION (threads_init);
1843 * fd.io coding-style-patch-verification: ON
1846 * eval: (c-set-style "gnu")