#include <vlib/main.h>
#include <linux/sched.h>
+/*
+ * To enable detailed tracing of barrier usage, including call stacks and
+ * timings, define BARRIER_TRACING here or in relevant TAGS. If also used
+ * with CLIB_DEBUG, timing will _not_ be representative of normal code
+ * execution.
+ *
+ */
+
+// #define BARRIER_TRACING 1
+
+/*
+ * Two options for barrier tracing output: syslog & elog.
+ */
+
+// #define BARRIER_TRACING_ELOG 1
+
extern vlib_main_t **vlib_mains;
void vlib_set_thread_name (char *name);
#define VLIB_CPU_MASK (VLIB_MAX_CPUS - 1) /* 0x3f, max */
#define VLIB_OFFSET_MASK (~VLIB_CPU_MASK)
-#define VLIB_LOG2_THREAD_STACK_SIZE (20)
+#define VLIB_LOG2_THREAD_STACK_SIZE (21)
#define VLIB_THREAD_STACK_SIZE (1<<VLIB_LOG2_THREAD_STACK_SIZE)
typedef enum
vlib_thread_registration_t *registration;
u8 *name;
u64 barrier_sync_count;
+#ifdef BARRIER_TRACING
+ const char *barrier_caller;
+ const char *barrier_context;
+#endif
+ volatile u32 *node_reforks_required;
long lwp;
int lcore_id;
frame_queue_nelt_counter_t *frame_queue_histogram;
} vlib_frame_queue_main_t;
+typedef struct
+{
+ uword node_index;
+ uword type_opaque;
+ uword data;
+} vlib_process_signal_event_mt_args_t;
+
/* Called early, in thread 0's context */
clib_error_t *vlib_thread_init (vlib_main_t * vm);
-vlib_worker_thread_t *vlib_alloc_thread (vlib_main_t * vm);
-
int vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index,
u32 frame_queue_index, vlib_frame_t * frame,
vlib_frame_queue_msg_type_t type);
-int vlib_frame_queue_dequeue (int thread_id,
- vlib_main_t * vm, vlib_node_main_t * nm);
-
-u64 dispatch_node (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_node_type_t type,
- vlib_node_state_t dispatch_state,
- vlib_frame_t * frame, u64 last_time_stamp);
-
-u64 dispatch_pending_node (vlib_main_t * vm,
- vlib_pending_frame_t * p, u64 last_time_stamp);
+int
+vlib_frame_queue_dequeue (vlib_main_t * vm, vlib_frame_queue_main_t * fqm);
void vlib_worker_thread_node_runtime_update (void);
#define BARRIER_SYNC_TIMEOUT (1.0)
#endif
-void vlib_worker_thread_barrier_sync (vlib_main_t * vm);
+#ifdef BARRIER_TRACING
+#define vlib_worker_thread_barrier_sync(X) {vlib_worker_threads[0].barrier_caller=__FUNCTION__;vlib_worker_thread_barrier_sync_int(X);}
+#else
+#define vlib_worker_thread_barrier_sync(X) vlib_worker_thread_barrier_sync_int(X)
+#endif
+
+
+void vlib_worker_thread_barrier_sync_int (vlib_main_t * vm);
void vlib_worker_thread_barrier_release (vlib_main_t * vm);
+void vlib_worker_thread_node_refork (void);
+
+static_always_inline uword
+vlib_get_thread_index (void)
+{
+ return __os_thread_index;
+}
always_inline void
vlib_smp_unsafe_warning (void)
{
if (CLIB_DEBUG > 0)
{
- if (os_get_cpu_number ())
+ if (vlib_get_thread_index ())
fformat (stderr, "%s: SMP unsafe warning...\n", __FUNCTION__);
}
}
void vlib_worker_thread_fork_fixup (vlib_fork_fixup_t which);
-static inline void
-vlib_worker_thread_barrier_check (void)
-{
- if (PREDICT_FALSE (*vlib_worker_threads->wait_at_barrier))
- {
- clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1);
- while (*vlib_worker_threads->wait_at_barrier)
- ;
- clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1);
- }
-}
-
#define foreach_vlib_main(body) \
do { \
vlib_main_t ** __vlib_mains = 0, *this_vlib_main; \
for (ii = 0; ii < vec_len (vlib_mains); ii++) \
{ \
this_vlib_main = vlib_mains[ii]; \
+ ASSERT (ii == 0 || \
+ this_vlib_main->parked_at_barrier == 1); \
if (this_vlib_main) \
vec_add1 (__vlib_mains, this_vlib_main); \
} \
extern vlib_thread_main_t vlib_thread_main;
+#include <vlib/global_funcs.h>
+
#define VLIB_REGISTER_THREAD(x,...) \
__VA_ARGS__ vlib_thread_registration_t x; \
static void __vlib_add_thread_registration_##x (void) \
x.next = tm->next; \
tm->next = &x; \
} \
+static void __vlib_rm_thread_registration_##x (void) \
+ __attribute__((__destructor__)) ; \
+static void __vlib_rm_thread_registration_##x (void) \
+{ \
+ vlib_thread_main_t * tm = &vlib_thread_main; \
+ VLIB_REMOVE_FROM_LINKED_LIST (tm->next, &x, next); \
+} \
__VA_ARGS__ vlib_thread_registration_t x
always_inline u32
}
always_inline u32
-vlib_get_worker_cpu_index (u32 worker_index)
+vlib_get_worker_thread_index (u32 worker_index)
{
return worker_index + 1;
}
always_inline u32
-vlib_get_worker_index (u32 cpu_index)
+vlib_get_worker_index (u32 thread_index)
{
- return cpu_index - 1;
+ return thread_index - 1;
}
always_inline u32
vlib_get_current_worker_index ()
{
- return os_get_cpu_number () - 1;
+ return vlib_get_thread_index () - 1;
+}
+
+static inline void
+vlib_worker_thread_barrier_check (void)
+{
+ if (PREDICT_FALSE (*vlib_worker_threads->wait_at_barrier))
+ {
+ vlib_main_t *vm;
+ clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1);
+ if (CLIB_DEBUG > 0)
+ {
+ vm = vlib_get_main ();
+ vm->parked_at_barrier = 1;
+ }
+ while (*vlib_worker_threads->wait_at_barrier)
+ ;
+ if (CLIB_DEBUG > 0)
+ vm->parked_at_barrier = 0;
+ clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1);
+
+ if (PREDICT_FALSE (*vlib_worker_threads->node_reforks_required))
+ {
+ vlib_worker_thread_node_refork ();
+ clib_smp_atomic_add (vlib_worker_threads->node_reforks_required,
+ -1);
+ while (*vlib_worker_threads->node_reforks_required)
+ ;
+ }
+ }
}
always_inline vlib_main_t *
return elt;
}
+u8 *vlib_thread_stack_init (uword thread_index);
int vlib_thread_cb_register (struct vlib_main_t *vm,
vlib_thread_callbacks_t * cb);
+extern void *rpc_call_main_thread_cb_fn;
+
+void
+vlib_process_signal_event_mt_helper (vlib_process_signal_event_mt_args_t *
+ args);
+void vlib_rpc_call_main_thread (void *function, u8 * args, u32 size);
#endif /* included_vlib_threads_h */