#include <vppinfra/format.h>
#include <vlib/vlib.h>
#include <vlib/threads.h>
+#include <vppinfra/tw_timer_1t_3w_1024sl_ov.h>
+#include <vlib/unix/unix.h>
#include <vlib/unix/cj.h>
CJ_GLOBAL_LOG_PROTOTYPE;
else
{
f = clib_mem_alloc_aligned_no_fail (n, VLIB_FRAME_ALIGN);
- f->cpu_index = vm->cpu_index;
fi = vlib_frame_index_no_check (vm, f);
}
/* Poison frame when debugging. */
if (CLIB_DEBUG > 0)
- {
- u32 save_cpu_index = f->cpu_index;
-
- memset (f, 0xfe, n);
-
- f->cpu_index = save_cpu_index;
- }
+ memset (f, 0xfe, n);
/* Insert magic number. */
{
vlib_frame_t *f;
u32 n_vectors_in_frame;
- if (DPDK == 0 && CLIB_DEBUG > 0)
+ if (vm->buffer_main->callbacks_registered == 0 && CLIB_DEBUG > 0)
vlib_put_next_frame_validate (vm, r, next_index, n_vectors_left);
nf = vlib_node_runtime_get_next_frame (vm, r, next_index);
* a dangling frame reference. Each thread has its own copy of
* the next_frames vector.
*/
- if (0 && r->cpu_index != next_runtime->cpu_index)
+ if (0 && r->thread_index != next_runtime->thread_index)
{
nf->frame_index = ~0;
nf->flags &= ~(VLIB_FRAME_PENDING | VLIB_FRAME_IS_ALLOCATED);
elog_buffer_capacity (em), chroot_file);
vlib_worker_thread_barrier_sync (vm);
- error = elog_write_file (em, chroot_file);
+ error = elog_write_file (em, chroot_file, 1 /* flush ring */ );
vlib_worker_thread_barrier_release (vm);
vec_free (chroot_file);
return error;
}
+void
+elog_post_mortem_dump (void)
+{
+ vlib_main_t *vm = &vlib_global_main;
+ elog_main_t *em = &vm->elog_main;
+ u8 *filename;
+ clib_error_t *error;
+
+ if (!vm->elog_post_mortem_dump)
+ return;
+
+ filename = format (0, "/tmp/elog_post_mortem.%d%c", getpid (), 0);
+ error = elog_write_file (em, (char *) filename, 1 /* flush ring */ );
+ if (error)
+ clib_error_report (error);
+ vec_free (filename);
+}
+
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (elog_save_cli, static) = {
.path = "event-logger save",
: evm->node_call_elog_event_types,
node_index),
/* track */
- (vm->cpu_index ? &vlib_worker_threads[vm->cpu_index].
+ (vm->thread_index ? &vlib_worker_threads[vm->thread_index].
elog_track : &em->default_track),
/* data to log */ n_vectors);
}
+#if VLIB_BUFFER_TRACE_TRAJECTORY > 0
+void (*vlib_buffer_trace_trajectory_cb) (vlib_buffer_t * b, u32 node_index);
+void (*vlib_buffer_trace_trajectory_init_cb) (vlib_buffer_t * b);
+
void
-vlib_dump_context_trace (vlib_main_t * vm, u32 bi)
+vlib_buffer_trace_trajectory_init (vlib_buffer_t * b)
{
- vlib_node_main_t *vnm = &vm->node_main;
- vlib_buffer_t *b;
- u8 i, n;
-
- if (VLIB_BUFFER_TRACE_TRAJECTORY)
+ if (PREDICT_TRUE (vlib_buffer_trace_trajectory_init_cb != 0))
{
- b = vlib_get_buffer (vm, bi);
- n = b->pre_data[0];
-
- fformat (stderr, "Context trace for bi %d b 0x%llx, visited %d\n",
- bi, b, n);
-
- if (n == 0 || n > 20)
- {
- fformat (stderr, "n is unreasonable\n");
- return;
- }
-
-
- for (i = 0; i < n; i++)
- {
- u32 node_index;
+ (*vlib_buffer_trace_trajectory_init_cb) (b);
+ }
+}
- node_index = b->pre_data[i + 1];
+#endif
- if (node_index > vec_len (vnm->nodes))
- {
- fformat (stderr, "Skip bogus node index %d\n", node_index);
- continue;
- }
-
- fformat (stderr, "%v (%d)\n", vnm->nodes[node_index]->name,
- node_index);
- }
- }
- else
+static inline void
+add_trajectory_trace (vlib_buffer_t * b, u32 node_index)
+{
+#if VLIB_BUFFER_TRACE_TRAJECTORY > 0
+ if (PREDICT_TRUE (vlib_buffer_trace_trajectory_cb != 0))
{
- fformat (stderr,
- "in vlib/buffers.h, #define VLIB_BUFFER_TRACE_TRAJECTORY 1\n");
+ (*vlib_buffer_trace_trajectory_cb) (b, node_index);
}
+#endif
}
-
-/* static_always_inline */ u64
+static_always_inline u64
dispatch_node (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_node_type_t type,
vm->cpu_time_last_node_dispatch = last_time_stamp;
- if (1 /* || vm->cpu_index == node->cpu_index */ )
+ if (1 /* || vm->thread_index == node->thread_index */ )
{
vlib_main_t *stat_vm;
if (VLIB_BUFFER_TRACE_TRAJECTORY && frame)
{
int i;
- int log_index;
u32 *from;
from = vlib_frame_vector_args (frame);
for (i = 0; i < frame->n_vectors; i++)
{
vlib_buffer_t *b = vlib_get_buffer (vm, from[i]);
- ASSERT (b->pre_data[0] < 32);
- log_index = b->pre_data[0]++ + 1;
- b->pre_data[log_index] = node->node_index;
+ add_trajectory_trace (b, node->node_index);
}
n = node->function (vm, node, frame);
}
/* When in interrupt mode and vector rate crosses threshold switch to
polling mode. */
- if ((DPDK == 0 && dispatch_state == VLIB_NODE_STATE_INTERRUPT)
- || (DPDK == 0 && dispatch_state == VLIB_NODE_STATE_POLLING
+ if ((dispatch_state == VLIB_NODE_STATE_INTERRUPT)
+ || (dispatch_state == VLIB_NODE_STATE_POLLING
&& (node->flags
& VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE)))
{
+#ifdef DISPATCH_NODE_ELOG_REQUIRED
ELOG_TYPE_DECLARE (e) =
{
.function = (char *) __FUNCTION__,.format =
{
u32 node_name, vector_length, is_polling;
} *ed;
+ vlib_worker_thread_t *w = vlib_worker_threads + vm->thread_index;
+#endif
- if (dispatch_state == VLIB_NODE_STATE_INTERRUPT
- && v >= nm->polling_threshold_vector_length)
+ if ((dispatch_state == VLIB_NODE_STATE_INTERRUPT
+ && v >= nm->polling_threshold_vector_length) &&
+ !(node->flags &
+ VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE))
{
vlib_node_t *n = vlib_get_node (vm, node->node_index);
n->state = VLIB_NODE_STATE_POLLING;
node->state = VLIB_NODE_STATE_POLLING;
- ASSERT (!
- (node->flags &
- VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE));
node->flags &=
~VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE;
node->flags |=
nm->input_node_counts_by_state[VLIB_NODE_STATE_INTERRUPT] -= 1;
nm->input_node_counts_by_state[VLIB_NODE_STATE_POLLING] += 1;
- ed = ELOG_DATA (&vm->elog_main, e);
+#ifdef DISPATCH_NODE_ELOG_REQUIRED
+ ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
+ w->elog_track);
ed->node_name = n->name_elog_string;
ed->vector_length = v;
ed->is_polling = 1;
+#endif
}
else if (dispatch_state == VLIB_NODE_STATE_POLLING
&& v <= nm->interrupt_threshold_vector_length)
{
node->flags |=
VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE;
- ed = ELOG_DATA (&vm->elog_main, e);
+#ifdef DISPATCH_NODE_ELOG_REQUIRED
+ ed = ELOG_TRACK_DATA (&vlib_global_main.elog_main, e,
+ w->elog_track);
ed->node_name = n->name_elog_string;
ed->vector_length = v;
ed->is_polling = 0;
+#endif
}
}
}
return t;
}
-/* static */ u64
-dispatch_pending_node (vlib_main_t * vm,
- vlib_pending_frame_t * p, u64 last_time_stamp)
+static u64
+dispatch_pending_node (vlib_main_t * vm, uword pending_frame_index,
+ u64 last_time_stamp)
{
vlib_node_main_t *nm = &vm->node_main;
vlib_frame_t *f;
vlib_next_frame_t *nf, nf_dummy;
vlib_node_runtime_t *n;
u32 restore_frame_index;
+ vlib_pending_frame_t *p;
+
+ /* See comment below about dangling references to nm->pending_frames */
+ p = nm->pending_frames + pending_frame_index;
n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
p->node_runtime_index);
/* Frame is ready to be used again, so restore it. */
if (restore_frame_index != ~0)
{
- /* we musn't restore a frame that is flagged to be freed. This shouldn't
- happen since frames to be freed post dispatch are those used
- when the to-node frame becomes full i.e. they form a sort of queue of
- frames to a single node. If we get here then the to-node frame and the
- pending frame *were* the same, and so we removed the to-node frame.
- Therefore this frame is no longer part of the queue for that node
- and hence it cannot be it's overspill.
+ /*
+ * We musn't restore a frame that is flagged to be freed. This
+ * shouldn't happen since frames to be freed post dispatch are
+ * those used when the to-node frame becomes full i.e. they form a
+ * sort of queue of frames to a single node. If we get here then
+ * the to-node frame and the pending frame *were* the same, and so
+ * we removed the to-node frame. Therefore this frame is no
+ * longer part of the queue for that node and hence it cannot be
+ * it's overspill.
*/
ASSERT (!(f->flags & VLIB_FRAME_FREE_AFTER_DISPATCH));
- /* p->next_frame_index can change during node dispatch if node
- function decides to change graph hook up. */
+ /*
+ * NB: dispatching node n can result in the creation and scheduling
+ * of new frames, and hence in the reallocation of nm->pending_frames.
+ * Recompute p, or no supper. This was broken for more than 10 years.
+ */
+ p = nm->pending_frames + pending_frame_index;
+
+ /*
+ * p->next_frame_index can change during node dispatch if node
+ * function decides to change graph hook up.
+ */
nf = vec_elt_at_index (nm->next_frames, p->next_frame_index);
nf->flags |= VLIB_FRAME_IS_ALLOCATED;
p->suspended_process_frame_index = pf - nm->suspended_process_frames;
if (p->flags & VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK)
- timing_wheel_insert (&nm->timing_wheel, p->resume_cpu_time,
- vlib_timing_wheel_data_set_suspended_process
- (node->runtime_index));
+ {
+ TWT (tw_timer_wheel) * tw =
+ (TWT (tw_timer_wheel) *) nm->timing_wheel;
+ p->stop_timer_handle =
+ TW (tw_timer_start) (tw,
+ vlib_timing_wheel_data_set_suspended_process
+ (node->runtime_index) /* [sic] pool idex */ ,
+ 0 /* timer_id */ ,
+ p->resume_clock_interval);
+ }
}
else
p->flags &= ~VLIB_PROCESS_IS_RUNNING;
n_vectors = 0;
p->n_suspends += 1;
if (p->flags & VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK)
- timing_wheel_insert (&nm->timing_wheel, p->resume_cpu_time,
- vlib_timing_wheel_data_set_suspended_process
- (node->runtime_index));
+ {
+ p->stop_timer_handle =
+ TW (tw_timer_start) ((TWT (tw_timer_wheel) *) nm->timing_wheel,
+ vlib_timing_wheel_data_set_suspended_process
+ (node->runtime_index) /* [sic] pool idex */ ,
+ 0 /* timer_id */ ,
+ p->resume_clock_interval);
+ }
}
else
{
return t;
}
-static void
-vlib_main_loop (vlib_main_t * vm)
+void vl_api_send_pending_rpc_requests (vlib_main_t *) __attribute__ ((weak));
+void
+vl_api_send_pending_rpc_requests (vlib_main_t * vm)
+{
+}
+
+
+static_always_inline void
+vlib_main_or_worker_loop (vlib_main_t * vm, int is_main)
{
vlib_node_main_t *nm = &vm->node_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
uword i;
u64 cpu_time_now;
+ vlib_frame_queue_main_t *fqm;
+ u32 *last_node_runtime_indices = 0;
/* Initialize pending node vector. */
- vec_resize (nm->pending_frames, 32);
- _vec_len (nm->pending_frames) = 0;
+ if (is_main)
+ {
+ vec_resize (nm->pending_frames, 32);
+ _vec_len (nm->pending_frames) = 0;
+ }
/* Mark time of main loop start. */
- cpu_time_now = vm->clib_time.last_cpu_time;
- vm->cpu_time_main_loop_start = cpu_time_now;
-
- /* Arrange for first level of timing wheel to cover times we care
- most about. */
- nm->timing_wheel.min_sched_time = 10e-6;
- nm->timing_wheel.max_sched_time = 10e-3;
- timing_wheel_init (&nm->timing_wheel,
- cpu_time_now, vm->clib_time.clocks_per_second);
+ if (is_main)
+ {
+ cpu_time_now = vm->clib_time.last_cpu_time;
+ vm->cpu_time_main_loop_start = cpu_time_now;
+ }
+ else
+ cpu_time_now = clib_cpu_time_now ();
- /* Pre-allocate expired nodes. */
- vec_alloc (nm->data_from_advancing_timing_wheel, 32);
+ /* Pre-allocate interupt runtime indices and lock. */
vec_alloc (nm->pending_interrupt_node_runtime_indices, 32);
+ vec_alloc (last_node_runtime_indices, 32);
+ if (!is_main)
+ clib_spinlock_init (&nm->pending_interrupt_lock);
+ /* Pre-allocate expired nodes. */
if (!nm->polling_threshold_vector_length)
nm->polling_threshold_vector_length = 10;
if (!nm->interrupt_threshold_vector_length)
nm->interrupt_threshold_vector_length = 5;
- nm->current_process_index = ~0;
-
/* Start all processes. */
- {
- uword i;
- for (i = 0; i < vec_len (nm->processes); i++)
- cpu_time_now =
- dispatch_process (vm, nm->processes[i], /* frame */ 0, cpu_time_now);
- }
+ if (is_main)
+ {
+ uword i;
+ nm->current_process_index = ~0;
+ for (i = 0; i < vec_len (nm->processes); i++)
+ cpu_time_now = dispatch_process (vm, nm->processes[i], /* frame */ 0,
+ cpu_time_now);
+ }
while (1)
{
vlib_node_runtime_t *n;
+ if (PREDICT_FALSE (_vec_len (vm->pending_rpc_requests) > 0))
+ vl_api_send_pending_rpc_requests (vm);
+
+ if (!is_main)
+ {
+ vlib_worker_thread_barrier_check ();
+ vec_foreach (fqm, tm->frame_queue_mains)
+ vlib_frame_queue_dequeue (vm, fqm);
+ }
+
/* Process pre-input nodes. */
- vec_foreach (n, nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
- cpu_time_now = dispatch_node (vm, n,
- VLIB_NODE_TYPE_PRE_INPUT,
- VLIB_NODE_STATE_POLLING,
- /* frame */ 0,
- cpu_time_now);
+ if (is_main)
+ vec_foreach (n, nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
+ cpu_time_now = dispatch_node (vm, n,
+ VLIB_NODE_TYPE_PRE_INPUT,
+ VLIB_NODE_STATE_POLLING,
+ /* frame */ 0,
+ cpu_time_now);
/* Next process input nodes. */
vec_foreach (n, nm->nodes_by_type[VLIB_NODE_TYPE_INPUT])
/* frame */ 0,
cpu_time_now);
- if (PREDICT_TRUE (vm->queue_signal_pending == 0))
+ if (PREDICT_TRUE (is_main && vm->queue_signal_pending == 0))
vm->queue_signal_callback (vm);
/* Next handle interrupts. */
{
+ /* unlocked read, for performance */
uword l = _vec_len (nm->pending_interrupt_node_runtime_indices);
uword i;
- if (l > 0)
+ if (PREDICT_FALSE (l > 0))
{
- _vec_len (nm->pending_interrupt_node_runtime_indices) = 0;
+ u32 *tmp;
+ if (!is_main)
+ {
+ clib_spinlock_lock (&nm->pending_interrupt_lock);
+ /* Re-read w/ lock held, in case another thread added an item */
+ l = _vec_len (nm->pending_interrupt_node_runtime_indices);
+ }
+
+ tmp = nm->pending_interrupt_node_runtime_indices;
+ nm->pending_interrupt_node_runtime_indices =
+ last_node_runtime_indices;
+ last_node_runtime_indices = tmp;
+ _vec_len (last_node_runtime_indices) = 0;
+ if (!is_main)
+ clib_spinlock_unlock (&nm->pending_interrupt_lock);
for (i = 0; i < l; i++)
{
n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
- nm->
- pending_interrupt_node_runtime_indices
- [i]);
+ last_node_runtime_indices[i]);
cpu_time_now =
dispatch_node (vm, n, VLIB_NODE_TYPE_INPUT,
VLIB_NODE_STATE_INTERRUPT,
}
}
- /* Check if process nodes have expired from timing wheel. */
- nm->data_from_advancing_timing_wheel
- = timing_wheel_advance (&nm->timing_wheel, cpu_time_now,
- nm->data_from_advancing_timing_wheel,
- &nm->cpu_time_next_process_ready);
-
- ASSERT (nm->data_from_advancing_timing_wheel != 0);
- if (PREDICT_FALSE (_vec_len (nm->data_from_advancing_timing_wheel) > 0))
+ if (is_main)
{
- uword i;
+ /* Check if process nodes have expired from timing wheel. */
+ ASSERT (nm->data_from_advancing_timing_wheel != 0);
+
+ nm->data_from_advancing_timing_wheel =
+ TW (tw_timer_expire_timers_vec)
+ ((TWT (tw_timer_wheel) *) nm->timing_wheel, vlib_time_now (vm),
+ nm->data_from_advancing_timing_wheel);
- processes_timing_wheel_data:
- for (i = 0; i < _vec_len (nm->data_from_advancing_timing_wheel);
- i++)
+ ASSERT (nm->data_from_advancing_timing_wheel != 0);
+
+ if (PREDICT_FALSE
+ (_vec_len (nm->data_from_advancing_timing_wheel) > 0))
{
- u32 d = nm->data_from_advancing_timing_wheel[i];
- u32 di = vlib_timing_wheel_data_get_index (d);
+ uword i;
- if (vlib_timing_wheel_data_is_timed_event (d))
+ processes_timing_wheel_data:
+ for (i = 0; i < _vec_len (nm->data_from_advancing_timing_wheel);
+ i++)
{
- vlib_signal_timed_event_data_t *te =
- pool_elt_at_index (nm->signal_timed_event_data_pool, di);
- vlib_node_t *n = vlib_get_node (vm, te->process_node_index);
- vlib_process_t *p =
- vec_elt (nm->processes, n->runtime_index);
- void *data;
- data =
- vlib_process_signal_event_helper (nm, n, p,
- te->event_type_index,
- te->n_data_elts,
- te->n_data_elt_bytes);
- if (te->n_data_bytes < sizeof (te->inline_event_data))
- clib_memcpy (data, te->inline_event_data,
- te->n_data_bytes);
+ u32 d = nm->data_from_advancing_timing_wheel[i];
+ u32 di = vlib_timing_wheel_data_get_index (d);
+
+ if (vlib_timing_wheel_data_is_timed_event (d))
+ {
+ vlib_signal_timed_event_data_t *te =
+ pool_elt_at_index (nm->signal_timed_event_data_pool,
+ di);
+ vlib_node_t *n =
+ vlib_get_node (vm, te->process_node_index);
+ vlib_process_t *p =
+ vec_elt (nm->processes, n->runtime_index);
+ void *data;
+ data =
+ vlib_process_signal_event_helper (nm, n, p,
+ te->event_type_index,
+ te->n_data_elts,
+ te->n_data_elt_bytes);
+ if (te->n_data_bytes < sizeof (te->inline_event_data))
+ clib_memcpy (data, te->inline_event_data,
+ te->n_data_bytes);
+ else
+ {
+ clib_memcpy (data, te->event_data_as_vector,
+ te->n_data_bytes);
+ vec_free (te->event_data_as_vector);
+ }
+ pool_put (nm->signal_timed_event_data_pool, te);
+ }
else
{
- clib_memcpy (data, te->event_data_as_vector,
- te->n_data_bytes);
- vec_free (te->event_data_as_vector);
+ cpu_time_now = clib_cpu_time_now ();
+ cpu_time_now =
+ dispatch_suspended_process (vm, di, cpu_time_now);
}
- pool_put (nm->signal_timed_event_data_pool, te);
- }
- else
- {
- cpu_time_now = clib_cpu_time_now ();
- cpu_time_now =
- dispatch_suspended_process (vm, di, cpu_time_now);
}
+ _vec_len (nm->data_from_advancing_timing_wheel) = 0;
}
-
- /* Reset vector. */
- _vec_len (nm->data_from_advancing_timing_wheel) = 0;
}
/* Input nodes may have added work to the pending vector.
Process pending vector until there is nothing left.
All pending vectors will be processed from input -> output. */
for (i = 0; i < _vec_len (nm->pending_frames); i++)
- cpu_time_now = dispatch_pending_node (vm, nm->pending_frames + i,
- cpu_time_now);
+ cpu_time_now = dispatch_pending_node (vm, i, cpu_time_now);
/* Reset pending vector for next iteration. */
_vec_len (nm->pending_frames) = 0;
/* Pending internal nodes may resume processes. */
- if (_vec_len (nm->data_from_advancing_timing_wheel) > 0)
+ if (is_main && _vec_len (nm->data_from_advancing_timing_wheel) > 0)
goto processes_timing_wheel_data;
vlib_increment_main_loop_counter (vm);
}
}
+static void
+vlib_main_loop (vlib_main_t * vm)
+{
+ vlib_main_or_worker_loop (vm, /* is_main */ 1);
+}
+
+void
+vlib_worker_loop (vlib_main_t * vm)
+{
+ vlib_main_or_worker_loop (vm, /* is_main */ 0);
+}
+
vlib_main_t vlib_global_main;
static clib_error_t *
else if (unformat (input, "elog-events %d",
&vm->elog_main.event_ring_size))
;
+ else if (unformat (input, "elog-post-mortem-dump"))
+ vm->elog_post_mortem_dump = 1;
else
return unformat_parse_error (input);
}
vlib_main (vlib_main_t * volatile vm, unformat_input_t * input)
{
clib_error_t *volatile error;
+ vlib_node_main_t *nm = &vm->node_main;
vm->queue_signal_callback = dummy_queue_signal_callback;
if (!vm->name)
vm->name = "VLIB";
- vec_validate (vm->buffer_main, 0);
+ if ((error = unix_physmem_init (vm)))
+ {
+ clib_error_report (error);
+ goto done;
+ }
+
+ if ((error = vlib_buffer_main_init (vm)))
+ {
+ clib_error_report (error);
+ goto done;
+ }
if ((error = vlib_thread_init (vm)))
{
VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES,
"default");
+ nm->timing_wheel = clib_mem_alloc_aligned (sizeof (TWT (tw_timer_wheel)),
+ CLIB_CACHE_LINE_BYTES);
+
+ vec_validate (nm->data_from_advancing_timing_wheel, 10);
+ _vec_len (nm->data_from_advancing_timing_wheel) = 0;
+
+ /* Create the process timing wheel */
+ TW (tw_timer_wheel_init) ((TWT (tw_timer_wheel) *) nm->timing_wheel,
+ 0 /* no callback */ ,
+ 10e-6 /* timer period 10us */ ,
+ ~0 /* max expirations per call */ );
+
+ vec_validate (vm->pending_rpc_requests, 0);
+ _vec_len (vm->pending_rpc_requests) = 0;
+
switch (clib_setjmp (&vm->main_loop_exit, VLIB_MAIN_LOOP_EXIT_NONE))
{
case VLIB_MAIN_LOOP_EXIT_NONE: