}
vlib_frame_queue_t;
+typedef struct
+{
+ vlib_frame_queue_elt_t **handoff_queue_elt_by_thread_index;
+ vlib_frame_queue_t **congested_handoff_queue_by_thread_index;
+} vlib_frame_queue_per_thread_data_t;
+
typedef struct
{
u32 node_index;
+ u32 frame_queue_nelts;
+ u32 queue_hi_thresh;
+
vlib_frame_queue_t **vlib_frame_queues;
+ vlib_frame_queue_per_thread_data_t *per_thread_data;
/* for frame queue tracing */
frame_queue_trace_t *frame_queue_traces;
frame_queue_nelt_counter_t *frame_queue_histogram;
} vlib_frame_queue_main_t;
+typedef struct
+{
+ uword node_index;
+ uword type_opaque;
+ uword data;
+} vlib_process_signal_event_mt_args_t;
+
/* Called early, in thread 0's context */
clib_error_t *vlib_thread_init (vlib_main_t * vm);
u8 *thread_prefix;
/* main thread lcore */
- u8 main_lcore;
+ u32 main_lcore;
/* Bitmap of available CPU cores */
uword *cpu_core_bitmap;
x.next = tm->next; \
tm->next = &x; \
} \
+static void __vlib_rm_thread_registration_##x (void) \
+ __attribute__((__destructor__)) ; \
+static void __vlib_rm_thread_registration_##x (void) \
+{ \
+ vlib_thread_main_t * tm = &vlib_thread_main; \
+ VLIB_REMOVE_FROM_LINKED_LIST (tm->next, &x, next); \
+} \
__VA_ARGS__ vlib_thread_registration_t x
always_inline u32
return vm;
}
+static inline u8
+vlib_thread_is_main_w_barrier (void)
+{
+ return (!vlib_num_workers ()
+ || ((vlib_get_thread_index () == 0
+ && vlib_worker_threads->wait_at_barrier[0])));
+}
+
static inline void
vlib_put_frame_queue_elt (vlib_frame_queue_elt_t * hf)
{
}
u8 *vlib_thread_stack_init (uword thread_index);
-
int vlib_thread_cb_register (struct vlib_main_t *vm,
vlib_thread_callbacks_t * cb);
+extern void *rpc_call_main_thread_cb_fn;
+
+void
+vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
+ args);
+void vlib_rpc_call_main_thread (void *function, u8 * args, u32 size);
#endif /* included_vlib_threads_h */