2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef included_vlib_threads_h
16 #define included_vlib_threads_h
18 #include <vlib/main.h>
19 #include <linux/sched.h>
22 * To enable detailed tracing of barrier usage, including call stacks and
23 * timings, define BARRIER_TRACING here or in relevant TAGS. If also used
24 * with CLIB_DEBUG, timing will _not_ be representative of normal code
29 // #define BARRIER_TRACING 1
32 * Two options for barrier tracing output: syslog & elog.
35 // #define BARRIER_TRACING_ELOG 1
37 extern vlib_main_t **vlib_mains;
39 void vlib_set_thread_name (char *name);
41 /* arg is actually a vlib__thread_t * */
42 typedef void (vlib_thread_function_t) (void *arg);
44 typedef struct vlib_thread_registration_
46 /* constructor generated list of thread registrations */
47 struct vlib_thread_registration_ *next;
49 /* config parameters */
52 vlib_thread_function_t *function;
56 int no_data_structure_clone;
57 u32 frame_queue_nelts;
59 /* All threads of this type run on pthreads */
63 } vlib_thread_registration_t;
66 * Frames have their cpu / vlib_main_t index in the low-order N bits
67 * Make VLIB_MAX_CPUS a power-of-two, please...
71 #define VLIB_MAX_CPUS 256
74 #if VLIB_MAX_CPUS > CLIB_MAX_MHEAPS
75 #error Please increase number of per-cpu mheaps
78 #define VLIB_CPU_MASK (VLIB_MAX_CPUS - 1) /* 0x3f, max */
79 #define VLIB_OFFSET_MASK (~VLIB_CPU_MASK)
81 #define VLIB_LOG2_THREAD_STACK_SIZE (21)
82 #define VLIB_THREAD_STACK_SIZE (1<<VLIB_LOG2_THREAD_STACK_SIZE)
86 VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME,
87 } vlib_frame_queue_msg_type_t;
91 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
97 /* 256 * 4 = 1024 bytes, even mult of cache line size */
98 u32 buffer_index[VLIB_FRAME_SIZE];
100 vlib_frame_queue_elt_t;
104 /* First cache line */
105 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
106 volatile u32 *wait_at_barrier;
107 volatile u32 *workers_at_barrier;
109 /* Second Cache Line */
110 CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
113 void (*thread_function) (void *);
114 void *thread_function_arg;
116 elog_track_t elog_track;
118 vlib_thread_registration_t *registration;
120 u64 barrier_sync_count;
121 #ifdef BARRIER_TRACING
122 const char *barrier_caller;
123 const char *barrier_context;
125 volatile u32 *node_reforks_required;
130 } vlib_worker_thread_t;
132 extern vlib_worker_thread_t *vlib_worker_threads;
137 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
142 u32 enqueue_full_events;
145 CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
151 u64 vector_threshold;
153 /* dequeue hint to enqueue side */
154 CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
155 volatile u64 head_hint;
157 /* read-only, constant, shared */
158 CLIB_CACHE_LINE_ALIGN_MARK (cacheline3);
159 vlib_frame_queue_elt_t *elts;
167 vlib_frame_queue_t **vlib_frame_queues;
169 /* for frame queue tracing */
170 frame_queue_trace_t *frame_queue_traces;
171 frame_queue_nelt_counter_t *frame_queue_histogram;
172 } vlib_frame_queue_main_t;
179 } vlib_process_signal_event_mt_args_t;
181 /* Called early, in thread 0's context */
182 clib_error_t *vlib_thread_init (vlib_main_t * vm);
184 int vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index,
185 u32 frame_queue_index, vlib_frame_t * frame,
186 vlib_frame_queue_msg_type_t type);
189 vlib_frame_queue_dequeue (vlib_main_t * vm, vlib_frame_queue_main_t * fqm);
191 void vlib_worker_thread_node_runtime_update (void);
193 void vlib_create_worker_threads (vlib_main_t * vm, int n,
194 void (*thread_function) (void *));
196 void vlib_worker_thread_init (vlib_worker_thread_t * w);
197 u32 vlib_frame_queue_main_init (u32 node_index, u32 frame_queue_nelts);
199 /* Check for a barrier sync request every 30ms */
200 #define BARRIER_SYNC_DELAY (0.030000)
203 /* long barrier timeout, for gdb... */
204 #define BARRIER_SYNC_TIMEOUT (600.1)
206 #define BARRIER_SYNC_TIMEOUT (1.0)
209 #ifdef BARRIER_TRACING
210 #define vlib_worker_thread_barrier_sync(X) {vlib_worker_threads[0].barrier_caller=__FUNCTION__;vlib_worker_thread_barrier_sync_int(X);}
212 #define vlib_worker_thread_barrier_sync(X) vlib_worker_thread_barrier_sync_int(X)
216 void vlib_worker_thread_barrier_sync_int (vlib_main_t * vm);
217 void vlib_worker_thread_barrier_release (vlib_main_t * vm);
218 void vlib_worker_thread_node_refork (void);
220 static_always_inline uword
221 vlib_get_thread_index (void)
223 return __os_thread_index;
227 vlib_smp_unsafe_warning (void)
231 if (vlib_get_thread_index ())
232 fformat (stderr, "%s: SMP unsafe warning...\n", __FUNCTION__);
238 VLIB_WORKER_THREAD_FORK_FIXUP_ILLEGAL = 0,
239 VLIB_WORKER_THREAD_FORK_FIXUP_NEW_SW_IF_INDEX,
242 void vlib_worker_thread_fork_fixup (vlib_fork_fixup_t which);
244 #define foreach_vlib_main(body) \
246 vlib_main_t ** __vlib_mains = 0, *this_vlib_main; \
249 for (ii = 0; ii < vec_len (vlib_mains); ii++) \
251 this_vlib_main = vlib_mains[ii]; \
253 this_vlib_main->parked_at_barrier == 1); \
254 if (this_vlib_main) \
255 vec_add1 (__vlib_mains, this_vlib_main); \
258 for (ii = 0; ii < vec_len (__vlib_mains); ii++) \
260 this_vlib_main = __vlib_mains[ii]; \
261 /* body uses this_vlib_main... */ \
264 vec_free (__vlib_mains); \
267 #define foreach_sched_policy \
268 _(SCHED_OTHER, OTHER, "other") \
269 _(SCHED_BATCH, BATCH, "batch") \
270 _(SCHED_IDLE, IDLE, "idle") \
271 _(SCHED_FIFO, FIFO, "fifo") \
272 _(SCHED_RR, RR, "rr")
276 #define _(v,f,s) SCHED_POLICY_##f = v,
284 clib_error_t *(*vlib_launch_thread_cb) (void *fp, vlib_worker_thread_t * w,
286 clib_error_t *(*vlib_thread_set_lcore_cb) (u32 thread, u16 lcore);
287 } vlib_thread_callbacks_t;
291 /* Link list of registrations, built by constructors */
292 vlib_thread_registration_t *next;
294 /* Vector of registrations, w/ non-data-structure clones at the top */
295 vlib_thread_registration_t **registrations;
297 uword *thread_registrations_by_name;
299 vlib_worker_thread_t *worker_threads;
302 * Launch all threads as pthreads,
303 * not eal_rte_launch (strict affinity) threads
307 /* Number of vlib_main / vnet_main clones */
310 /* Number of thread stacks to create */
313 /* Number of pthreads */
316 /* Number of threads */
319 /* Number of cores to skip, must match the core mask */
322 /* Thread prefix name */
325 /* main thread lcore */
328 /* Bitmap of available CPU cores */
329 uword *cpu_core_bitmap;
331 /* Bitmap of available CPU sockets (NUMA nodes) */
332 uword *cpu_socket_bitmap;
334 /* Worker handoff queues */
335 vlib_frame_queue_main_t *frame_queue_mains;
337 /* worker thread initialization barrier */
338 volatile u32 worker_thread_release;
340 /* scheduling policy */
343 /* scheduling policy priority */
347 vlib_thread_callbacks_t cb;
348 int extern_thread_mgmt;
349 } vlib_thread_main_t;
351 extern vlib_thread_main_t vlib_thread_main;
353 #include <vlib/global_funcs.h>
355 #define VLIB_REGISTER_THREAD(x,...) \
356 __VA_ARGS__ vlib_thread_registration_t x; \
357 static void __vlib_add_thread_registration_##x (void) \
358 __attribute__((__constructor__)) ; \
359 static void __vlib_add_thread_registration_##x (void) \
361 vlib_thread_main_t * tm = &vlib_thread_main; \
365 static void __vlib_rm_thread_registration_##x (void) \
366 __attribute__((__destructor__)) ; \
367 static void __vlib_rm_thread_registration_##x (void) \
369 vlib_thread_main_t * tm = &vlib_thread_main; \
370 VLIB_REMOVE_FROM_LINKED_LIST (tm->next, &x, next); \
372 __VA_ARGS__ vlib_thread_registration_t x
377 return vlib_thread_main.n_vlib_mains - 1;
381 vlib_get_worker_thread_index (u32 worker_index)
383 return worker_index + 1;
387 vlib_get_worker_index (u32 thread_index)
389 return thread_index - 1;
393 vlib_get_current_worker_index ()
395 return vlib_get_thread_index () - 1;
399 vlib_worker_thread_barrier_check (void)
401 if (PREDICT_FALSE (*vlib_worker_threads->wait_at_barrier))
404 clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1);
407 vm = vlib_get_main ();
408 vm->parked_at_barrier = 1;
410 while (*vlib_worker_threads->wait_at_barrier)
413 vm->parked_at_barrier = 0;
414 clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1);
416 if (PREDICT_FALSE (*vlib_worker_threads->node_reforks_required))
418 vlib_worker_thread_node_refork ();
419 clib_smp_atomic_add (vlib_worker_threads->node_reforks_required,
421 while (*vlib_worker_threads->node_reforks_required)
427 always_inline vlib_main_t *
428 vlib_get_worker_vlib_main (u32 worker_index)
431 vlib_thread_main_t *tm = &vlib_thread_main;
432 ASSERT (worker_index < tm->n_vlib_mains - 1);
433 vm = vlib_mains[worker_index + 1];
439 vlib_put_frame_queue_elt (vlib_frame_queue_elt_t * hf)
441 CLIB_MEMORY_BARRIER ();
445 static inline vlib_frame_queue_elt_t *
446 vlib_get_frame_queue_elt (u32 frame_queue_index, u32 index)
448 vlib_frame_queue_t *fq;
449 vlib_frame_queue_elt_t *elt;
450 vlib_thread_main_t *tm = &vlib_thread_main;
451 vlib_frame_queue_main_t *fqm =
452 vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
455 fq = fqm->vlib_frame_queues[index];
458 new_tail = __sync_add_and_fetch (&fq->tail, 1);
460 /* Wait until a ring slot is available */
461 while (new_tail >= fq->head_hint + fq->nelts)
462 vlib_worker_thread_barrier_check ();
464 elt = fq->elts + (new_tail & (fq->nelts - 1));
466 /* this would be very bad... */
470 elt->msg_type = VLIB_FRAME_QUEUE_ELT_DISPATCH_FRAME;
471 elt->last_n_vectors = elt->n_vectors = 0;
476 static inline vlib_frame_queue_t *
477 is_vlib_frame_queue_congested (u32 frame_queue_index,
480 vlib_frame_queue_t **
481 handoff_queue_by_worker_index)
483 vlib_frame_queue_t *fq;
484 vlib_thread_main_t *tm = &vlib_thread_main;
485 vlib_frame_queue_main_t *fqm =
486 vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
488 fq = handoff_queue_by_worker_index[index];
489 if (fq != (vlib_frame_queue_t *) (~0))
492 fq = fqm->vlib_frame_queues[index];
495 if (PREDICT_FALSE (fq->tail >= (fq->head_hint + queue_hi_thresh)))
497 /* a valid entry in the array will indicate the queue has reached
498 * the specified threshold and is congested
500 handoff_queue_by_worker_index[index] = fq;
501 fq->enqueue_full_events++;
508 static inline vlib_frame_queue_elt_t *
509 vlib_get_worker_handoff_queue_elt (u32 frame_queue_index,
510 u32 vlib_worker_index,
511 vlib_frame_queue_elt_t **
512 handoff_queue_elt_by_worker_index)
514 vlib_frame_queue_elt_t *elt;
516 if (handoff_queue_elt_by_worker_index[vlib_worker_index])
517 return handoff_queue_elt_by_worker_index[vlib_worker_index];
519 elt = vlib_get_frame_queue_elt (frame_queue_index, vlib_worker_index);
521 handoff_queue_elt_by_worker_index[vlib_worker_index] = elt;
526 u8 *vlib_thread_stack_init (uword thread_index);
527 int vlib_thread_cb_register (struct vlib_main_t *vm,
528 vlib_thread_callbacks_t * cb);
529 extern void *rpc_call_main_thread_cb_fn;
532 vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
534 void vlib_rpc_call_main_thread (void *function, u8 * args, u32 size);
536 #endif /* included_vlib_threads_h */
539 * fd.io coding-style-patch-verification: ON
542 * eval: (c-set-style "gnu")