#include <vppinfra/tw_timer_1t_3w_1024sl_ov.h>
#include <vlib/unix/unix.h>
-#include <vlib/unix/cj.h>
-
-CJ_GLOBAL_LOG_PROTOTYPE;
/* Actually allocate a few extra slots of vector data to support
speculative vector enqueues which overflow vector data in next frame. */
return vlib_get_frame (vm, f);
}
+static inline void
+vlib_validate_frame_indices (vlib_frame_t * f)
+{
+ if (CLIB_DEBUG > 0)
+ {
+ int i;
+ u32 *from = vlib_frame_vector_args (f);
+
+ /* Check for bad buffer index values */
+ for (i = 0; i < f->n_vectors; i++)
+ {
+ if (from[i] == 0)
+ {
+ clib_warning ("BUG: buffer index 0 at index %d", i);
+ ASSERT (0);
+ }
+ else if (from[i] == 0xfefefefe)
+ {
+ clib_warning ("BUG: frame poison pattern at index %d", i);
+ ASSERT (0);
+ }
+ }
+ }
+}
+
void
vlib_put_frame_to_node (vlib_main_t * vm, u32 to_node_index, vlib_frame_t * f)
{
if (f->n_vectors == 0)
return;
+ vlib_validate_frame_indices (f);
+
to_node = vlib_get_node (vm, to_node_index);
vec_add2 (vm->node_main.pending_frames, p, 1);
f = vlib_get_frame (vm, nf->frame);
ASSERT (n_vectors_left <= VLIB_FRAME_SIZE);
+
+ vlib_validate_frame_indices (f);
+
n_after = VLIB_FRAME_SIZE - n_vectors_left;
n_before = f->n_vectors;
never_inline void
vlib_node_runtime_sync_stats (vlib_main_t * vm,
vlib_node_runtime_t * r,
- uword n_calls, uword n_vectors, uword n_clocks,
- uword n_ticks0, uword n_ticks1)
+ uword n_calls, uword n_vectors, uword n_clocks)
{
vlib_node_t *n = vlib_get_node (vm, r->node_index);
n->stats_total.calls += n_calls + r->calls_since_last_overflow;
n->stats_total.vectors += n_vectors + r->vectors_since_last_overflow;
n->stats_total.clocks += n_clocks + r->clocks_since_last_overflow;
- n->stats_total.perf_counter0_ticks += n_ticks0 +
- r->perf_counter0_ticks_since_last_overflow;
- n->stats_total.perf_counter1_ticks += n_ticks1 +
- r->perf_counter1_ticks_since_last_overflow;
- n->stats_total.perf_counter_vectors += n_vectors +
- r->perf_counter_vectors_since_last_overflow;
n->stats_total.max_clock = r->max_clock;
n->stats_total.max_clock_n = r->max_clock_n;
r->calls_since_last_overflow = 0;
r->vectors_since_last_overflow = 0;
r->clocks_since_last_overflow = 0;
- r->perf_counter0_ticks_since_last_overflow = 0ULL;
- r->perf_counter1_ticks_since_last_overflow = 0ULL;
- r->perf_counter_vectors_since_last_overflow = 0ULL;
}
always_inline void __attribute__ ((unused))
vlib_process_sync_stats (vlib_main_t * vm,
vlib_process_t * p,
- uword n_calls, uword n_vectors, uword n_clocks,
- uword n_ticks0, uword n_ticks1)
+ uword n_calls, uword n_vectors, uword n_clocks)
{
vlib_node_runtime_t *rt = &p->node_runtime;
vlib_node_t *n = vlib_get_node (vm, rt->node_index);
- vlib_node_runtime_sync_stats (vm, rt, n_calls, n_vectors, n_clocks,
- n_ticks0, n_ticks1);
+ vlib_node_runtime_sync_stats (vm, rt, n_calls, n_vectors, n_clocks);
n->stats_total.suspends += p->n_suspends;
p->n_suspends = 0;
}
vec_elt_at_index (vm->node_main.nodes_by_type[n->type],
n->runtime_index);
- vlib_node_runtime_sync_stats (vm, rt, 0, 0, 0, 0, 0);
+ vlib_node_runtime_sync_stats (vm, rt, 0, 0, 0);
/* Sync up runtime next frame vector counters with main node structure. */
{
vlib_node_runtime_update_stats (vlib_main_t * vm,
vlib_node_runtime_t * node,
uword n_calls,
- uword n_vectors, uword n_clocks,
- uword n_ticks0, uword n_ticks1)
+ uword n_vectors, uword n_clocks)
{
u32 ca0, ca1, v0, v1, cl0, cl1, r;
- u32 ptick00, ptick01, ptick10, ptick11, pvec0, pvec1;
cl0 = cl1 = node->clocks_since_last_overflow;
ca0 = ca1 = node->calls_since_last_overflow;
v0 = v1 = node->vectors_since_last_overflow;
- ptick00 = ptick01 = node->perf_counter0_ticks_since_last_overflow;
- ptick10 = ptick11 = node->perf_counter1_ticks_since_last_overflow;
- pvec0 = pvec1 = node->perf_counter_vectors_since_last_overflow;
ca1 = ca0 + n_calls;
v1 = v0 + n_vectors;
cl1 = cl0 + n_clocks;
- ptick01 = ptick00 + n_ticks0;
- ptick11 = ptick10 + n_ticks1;
- pvec1 = pvec0 + n_vectors;
node->calls_since_last_overflow = ca1;
node->clocks_since_last_overflow = cl1;
node->vectors_since_last_overflow = v1;
- node->perf_counter0_ticks_since_last_overflow = ptick01;
- node->perf_counter1_ticks_since_last_overflow = ptick11;
- node->perf_counter_vectors_since_last_overflow = pvec1;
node->max_clock_n = node->max_clock > n_clocks ?
node->max_clock_n : n_vectors;
r = vlib_node_runtime_update_main_loop_vector_stats (vm, node, n_vectors);
- if (PREDICT_FALSE (ca1 < ca0 || v1 < v0 || cl1 < cl0) || (ptick01 < ptick00)
- || (ptick11 < ptick10) || (pvec1 < pvec0))
+ if (PREDICT_FALSE (ca1 < ca0 || v1 < v0 || cl1 < cl0))
{
node->calls_since_last_overflow = ca0;
node->clocks_since_last_overflow = cl0;
node->vectors_since_last_overflow = v0;
- node->perf_counter0_ticks_since_last_overflow = ptick00;
- node->perf_counter1_ticks_since_last_overflow = ptick10;
- node->perf_counter_vectors_since_last_overflow = pvec0;
- vlib_node_runtime_sync_stats (vm, node, n_calls, n_vectors, n_clocks,
- n_ticks0, n_ticks1);
+ vlib_node_runtime_sync_stats (vm, node, n_calls, n_vectors, n_clocks);
}
return r;
}
-always_inline void
-vlib_node_runtime_perf_counter (vlib_main_t * vm, u64 * pmc0, u64 * pmc1,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame, int before_or_after)
-{
- *pmc0 = 0;
- *pmc1 = 0;
- if (PREDICT_FALSE (vec_len (vm->vlib_node_runtime_perf_counter_cbs) != 0))
- clib_call_callbacks (vm->vlib_node_runtime_perf_counter_cbs, vm, pmc0,
- pmc1, node, frame, before_or_after);
-}
-
always_inline void
vlib_process_update_stats (vlib_main_t * vm,
vlib_process_t * p,
uword n_calls, uword n_vectors, uword n_clocks)
{
vlib_node_runtime_update_stats (vm, &p->node_runtime,
- n_calls, n_vectors, n_clocks, 0ULL, 0ULL);
+ n_calls, n_vectors, n_clocks);
}
static clib_error_t *
}
void
-elog_post_mortem_dump (void)
+vlib_post_mortem_dump (void)
{
vlib_main_t *vm = &vlib_global_main;
elog_main_t *em = &vm->elog_main;
+
u8 *filename;
clib_error_t *error;
- if (!vm->elog_post_mortem_dump)
+ if ((vm->elog_post_mortem_dump + vm->dispatch_pcap_postmortem) == 0)
return;
- filename = format (0, "/tmp/elog_post_mortem.%d%c", getpid (), 0);
- error = elog_write_file (em, (char *) filename, 1 /* flush ring */ );
- if (error)
- clib_error_report (error);
- vec_free (filename);
+ if (vm->dispatch_pcap_postmortem)
+ {
+ clib_error_t *error;
+ pcap_main_t *pm = &vm->dispatch_pcap_main;
+
+ pm->n_packets_to_capture = pm->n_packets_captured;
+ pm->file_name = (char *) format (0, "/tmp/dispatch_post_mortem.%d%c",
+ getpid (), 0);
+ error = pcap_write (pm);
+ pcap_close (pm);
+ if (error)
+ clib_error_report (error);
+ /*
+ * We're in the middle of crashing. Don't try to free the filename.
+ */
+ }
+
+ if (vm->elog_post_mortem_dump)
+ {
+ filename = format (0, "/tmp/elog_post_mortem.%d%c", getpid (), 0);
+ error = elog_write_file (em, (char *) filename, 1 /* flush ring */ );
+ if (error)
+ clib_error_report (error);
+ /*
+ * We're in the middle of crashing. Don't try to free the filename.
+ */
+ }
}
/* *INDENT-OFF* */
/* *INDENT-ON* */
static clib_error_t *
-elog_resize (vlib_main_t * vm,
- unformat_input_t * input, vlib_cli_command_t * cmd)
+elog_resize_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
{
elog_main_t *em = &vm->elog_main;
u32 tmp;
VLIB_CLI_COMMAND (elog_resize_cli, static) = {
.path = "event-logger resize",
.short_help = "event-logger resize <nnn>",
- .function = elog_resize,
+ .function = elog_resize_command_fn,
};
/* *INDENT-ON* */
u64 t;
vlib_node_main_t *nm = &vm->node_main;
vlib_next_frame_t *nf;
- u64 pmc_before[2], pmc_after[2], pmc_delta[2];
if (CLIB_DEBUG > 0)
{
last_time_stamp, frame ? frame->n_vectors : 0,
/* is_after */ 0);
- vlib_node_runtime_perf_counter (vm, &pmc_before[0], &pmc_before[1],
- node, frame, 0 /* before */ );
+ vlib_node_runtime_perf_counter (vm, node, frame, 0, last_time_stamp,
+ VLIB_NODE_RUNTIME_PERF_BEFORE);
/*
* Turn this on if you run into
}
if (PREDICT_FALSE (vm->dispatch_pcap_enable))
dispatch_pcap_trace (vm, node, frame);
- n = node->function (vm, node, frame);
+
+ if (PREDICT_TRUE (vm->dispatch_wrapper_fn == 0))
+ n = node->function (vm, node, frame);
+ else
+ n = vm->dispatch_wrapper_fn (vm, node, frame);
}
else
{
if (PREDICT_FALSE (vm->dispatch_pcap_enable))
dispatch_pcap_trace (vm, node, frame);
- n = node->function (vm, node, frame);
+
+ if (PREDICT_TRUE (vm->dispatch_wrapper_fn == 0))
+ n = node->function (vm, node, frame);
+ else
+ n = vm->dispatch_wrapper_fn (vm, node, frame);
}
t = clib_cpu_time_now ();
- /*
- * To validate accounting: pmc_delta = t - pmc_before;
- * perf ticks should equal clocks/pkt...
- */
- vlib_node_runtime_perf_counter (vm, &pmc_after[0], &pmc_after[1], node,
- frame, 1 /* after */ );
-
- pmc_delta[0] = pmc_after[0] - pmc_before[0];
- pmc_delta[1] = pmc_after[1] - pmc_before[1];
+ vlib_node_runtime_perf_counter (vm, node, frame, n, t,
+ VLIB_NODE_RUNTIME_PERF_AFTER);
vlib_elog_main_loop_event (vm, node->node_index, t, n, 1 /* is_after */ );
v = vlib_node_runtime_update_stats (vm, node,
/* n_calls */ 1,
/* n_vectors */ n,
- /* n_clocks */ t - last_time_stamp,
- pmc_delta[0] /* PMC0 */ ,
- pmc_delta[1] /* PMC1 */ );
+ /* n_clocks */ t - last_time_stamp);
/* When in interrupt mode and vector rate crosses threshold switch to
polling mode. */
{
vlib_node_main_t *nm = &vm->node_main;
vlib_frame_t *f;
- vlib_next_frame_t *nf, nf_dummy;
+ vlib_next_frame_t *nf, nf_placeholder;
vlib_node_runtime_t *n;
vlib_frame_t *restore_frame;
vlib_pending_frame_t *p;
f = vlib_get_frame (vm, p->frame);
if (p->next_frame_index == VLIB_PENDING_FRAME_NO_NEXT_FRAME)
{
- /* No next frame: so use dummy on stack. */
- nf = &nf_dummy;
+ /* No next frame: so use placeholder on stack. */
+ nf = &nf_placeholder;
nf->flags = f->frame_flags & VLIB_NODE_FLAG_TRACE;
nf->frame = NULL;
}
vm = a->vm;
p = a->process;
+ vlib_process_finish_switch_stack (vm);
+
f = a->frame;
node = &p->node_runtime;
ASSERT (vlib_process_stack_is_valid (p));
+ vlib_process_start_switch_stack (vm, 0);
clib_longjmp (&p->return_longjmp, n);
return n;
r = clib_setjmp (&p->return_longjmp, VLIB_PROCESS_RETURN_LONGJMP_RETURN);
if (r == VLIB_PROCESS_RETURN_LONGJMP_RETURN)
- r = clib_calljmp (vlib_process_bootstrap, pointer_to_uword (&a),
- (void *) p->stack + (1 << p->log2_n_stack_bytes));
+ {
+ vlib_process_start_switch_stack (vm, p);
+ r = clib_calljmp (vlib_process_bootstrap, pointer_to_uword (&a),
+ (void *) p->stack + (1 << p->log2_n_stack_bytes));
+ }
+ else
+ vlib_process_finish_switch_stack (vm);
return r;
}
static_always_inline uword
-vlib_process_resume (vlib_process_t * p)
+vlib_process_resume (vlib_main_t * vm, vlib_process_t * p)
{
uword r;
p->flags &= ~(VLIB_PROCESS_IS_SUSPENDED_WAITING_FOR_CLOCK
| VLIB_PROCESS_RESUME_PENDING);
r = clib_setjmp (&p->return_longjmp, VLIB_PROCESS_RETURN_LONGJMP_RETURN);
if (r == VLIB_PROCESS_RETURN_LONGJMP_RETURN)
- clib_longjmp (&p->resume_longjmp, VLIB_PROCESS_RESUME_LONGJMP_RESUME);
+ {
+ vlib_process_start_switch_stack (vm, p);
+ clib_longjmp (&p->resume_longjmp, VLIB_PROCESS_RESUME_LONGJMP_RESUME);
+ }
+ else
+ vlib_process_finish_switch_stack (vm);
return r;
}
old_process_index = nm->current_process_index;
nm->current_process_index = node->runtime_index;
+ vlib_node_runtime_perf_counter (vm, node_runtime, f, 0, last_time_stamp,
+ VLIB_NODE_RUNTIME_PERF_BEFORE);
+
n_vectors = vlib_process_startup (vm, p, f);
nm->current_process_index = old_process_index;
vlib_elog_main_loop_event (vm, node_runtime->node_index, t, is_suspend,
/* is_after */ 1);
+ vlib_node_runtime_perf_counter (vm, node_runtime, f, n_vectors, t,
+ VLIB_NODE_RUNTIME_PERF_AFTER);
+
vlib_process_update_stats (vm, p,
/* n_calls */ !is_suspend,
/* n_vectors */ n_vectors,
/* Save away current process for suspend. */
nm->current_process_index = node->runtime_index;
- n_vectors = vlib_process_resume (p);
+ vlib_node_runtime_perf_counter (vm, node_runtime, f, 0, last_time_stamp,
+ VLIB_NODE_RUNTIME_PERF_BEFORE);
+
+ n_vectors = vlib_process_resume (vm, p);
t = clib_cpu_time_now ();
nm->current_process_index = ~0;
vlib_elog_main_loop_event (vm, node_runtime->node_index, t, !is_suspend,
/* is_after */ 1);
+ vlib_node_runtime_perf_counter (vm, node_runtime, f, n_vectors, t,
+ VLIB_NODE_RUNTIME_PERF_AFTER);
+
vlib_process_update_stats (vm, p,
/* n_calls */ !is_suspend,
/* n_vectors */ n_vectors,
{
}
+static_always_inline u64
+dispatch_pending_interrupts (vlib_main_t * vm, vlib_node_main_t * nm,
+ u64 cpu_time_now,
+ vlib_node_interrupt_t * interrupts)
+{
+ vlib_node_runtime_t *n;
+
+ for (int i = 0; i < _vec_len (interrupts); i++)
+ {
+ vlib_node_interrupt_t *in;
+ in = vec_elt_at_index (interrupts, i);
+ n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
+ in->node_runtime_index);
+ n->interrupt_data = in->data;
+ cpu_time_now = dispatch_node (vm, n, VLIB_NODE_TYPE_INPUT,
+ VLIB_NODE_STATE_INTERRUPT, /* frame */ 0,
+ cpu_time_now);
+ }
+ return cpu_time_now;
+}
+
+static inline void
+pcap_postmortem_reset (vlib_main_t * vm)
+{
+ pcap_main_t *pm = &vm->dispatch_pcap_main;
+
+ /* Reset the trace buffer and capture count */
+ clib_spinlock_lock_if_init (&pm->lock);
+ vec_reset_length (pm->pcap_data);
+ pm->n_packets_captured = 0;
+ clib_spinlock_unlock_if_init (&pm->lock);
+}
+
static_always_inline void
vlib_main_or_worker_loop (vlib_main_t * vm, int is_main)
vlib_thread_main_t *tm = vlib_get_thread_main ();
uword i;
u64 cpu_time_now;
+ f64 now;
vlib_frame_queue_main_t *fqm;
- u32 *last_node_runtime_indices = 0;
u32 frame_queue_check_counter = 0;
+ vlib_node_interrupt_t *empty_int_list = 0;
/* Initialize pending node vector. */
if (is_main)
cpu_time_now = clib_cpu_time_now ();
/* Pre-allocate interupt runtime indices and lock. */
- vec_alloc (nm->pending_interrupt_node_runtime_indices, 32);
- vec_alloc (last_node_runtime_indices, 32);
- if (!is_main)
- clib_spinlock_init (&nm->pending_interrupt_lock);
+ vec_alloc (nm->pending_local_interrupts, 32);
+ vec_alloc (nm->pending_remote_interrupts, 32);
+ vec_alloc (empty_int_list, 32);
+ vec_alloc_aligned (nm->pending_remote_interrupts_notify, 1,
+ CLIB_CACHE_LINE_BYTES);
+ clib_spinlock_init (&nm->pending_interrupt_lock);
/* Pre-allocate expired nodes. */
if (!nm->polling_threshold_vector_length)
vm->cpu_id = clib_get_current_cpu_id ();
vm->numa_node = clib_get_current_numa_node ();
+ os_set_numa_index (vm->numa_node);
/* Start all processes. */
if (is_main)
}
if (!is_main)
+ vlib_worker_thread_barrier_check ();
+
+ if (PREDICT_FALSE (vm->check_frame_queues + frame_queue_check_counter))
{
- vlib_worker_thread_barrier_check ();
- if (PREDICT_FALSE (vm->check_frame_queues +
- frame_queue_check_counter))
- {
- u32 processed = 0;
+ u32 processed = 0;
- if (vm->check_frame_queues)
- {
- frame_queue_check_counter = 100;
- vm->check_frame_queues = 0;
- }
+ if (vm->check_frame_queues)
+ {
+ frame_queue_check_counter = 100;
+ vm->check_frame_queues = 0;
+ }
- vec_foreach (fqm, tm->frame_queue_mains)
- processed += vlib_frame_queue_dequeue (vm, fqm);
+ vec_foreach (fqm, tm->frame_queue_mains)
+ processed += vlib_frame_queue_dequeue (vm, fqm);
- /* No handoff queue work found? */
- if (processed)
- frame_queue_check_counter = 100;
- else
- frame_queue_check_counter--;
- }
- if (PREDICT_FALSE (vec_len (vm->worker_thread_main_loop_callbacks)))
- clib_call_callbacks (vm->worker_thread_main_loop_callbacks, vm);
+ /* No handoff queue work found? */
+ if (processed)
+ frame_queue_check_counter = 100;
+ else
+ frame_queue_check_counter--;
}
+ if (PREDICT_FALSE (vec_len (vm->worker_thread_main_loop_callbacks)))
+ clib_call_callbacks (vm->worker_thread_main_loop_callbacks, vm,
+ cpu_time_now);
+
+ /*
+ * When trying to understand aggravating, hard-to-reproduce
+ * bugs: reset / restart the pcap dispatch trace once per
+ * main thread dispatch cycle. All threads share the same
+ * (spinlock-protected) dispatch trace buffer. It might take
+ * a few tries to capture a good post-mortem trace of
+ * a multi-thread issue. Best we can do without a big refactor job.
+ */
+ if (is_main && PREDICT_FALSE (vm->dispatch_pcap_postmortem != 0))
+ pcap_postmortem_reset (vm);
+
/* Process pre-input nodes. */
+ cpu_time_now = clib_cpu_time_now ();
vec_foreach (n, nm->nodes_by_type[VLIB_NODE_TYPE_PRE_INPUT])
cpu_time_now = dispatch_node (vm, n,
VLIB_NODE_TYPE_PRE_INPUT,
if (PREDICT_TRUE (is_main && vm->queue_signal_pending == 0))
vm->queue_signal_callback (vm);
- /* Next handle interrupts. */
- {
- /* unlocked read, for performance */
- uword l = _vec_len (nm->pending_interrupt_node_runtime_indices);
- uword i;
- if (PREDICT_FALSE (l > 0))
- {
- u32 *tmp;
- if (!is_main)
- {
- clib_spinlock_lock (&nm->pending_interrupt_lock);
- /* Re-read w/ lock held, in case another thread added an item */
- l = _vec_len (nm->pending_interrupt_node_runtime_indices);
- }
+ /* handle local interruots */
+ if (_vec_len (nm->pending_local_interrupts))
+ {
+ vlib_node_interrupt_t *interrupts = nm->pending_local_interrupts;
+ nm->pending_local_interrupts = empty_int_list;
+ cpu_time_now = dispatch_pending_interrupts (vm, nm, cpu_time_now,
+ interrupts);
+ empty_int_list = interrupts;
+ vec_reset_length (empty_int_list);
+ }
+
+ /* handle remote interruots */
+ if (PREDICT_FALSE (_vec_len (nm->pending_remote_interrupts)))
+ {
+ vlib_node_interrupt_t *interrupts;
+
+ /* at this point it is known that
+ * vec_len (nm->pending_local_interrupts) is zero so we quickly swap
+ * local and remote vector under the spinlock */
+ clib_spinlock_lock (&nm->pending_interrupt_lock);
+ interrupts = nm->pending_remote_interrupts;
+ nm->pending_remote_interrupts = empty_int_list;
+ *nm->pending_remote_interrupts_notify = 0;
+ clib_spinlock_unlock (&nm->pending_interrupt_lock);
+
+ cpu_time_now = dispatch_pending_interrupts (vm, nm, cpu_time_now,
+ interrupts);
+ empty_int_list = interrupts;
+ vec_reset_length (empty_int_list);
+ }
- tmp = nm->pending_interrupt_node_runtime_indices;
- nm->pending_interrupt_node_runtime_indices =
- last_node_runtime_indices;
- last_node_runtime_indices = tmp;
- _vec_len (last_node_runtime_indices) = 0;
- if (!is_main)
- clib_spinlock_unlock (&nm->pending_interrupt_lock);
- for (i = 0; i < l; i++)
- {
- n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT],
- last_node_runtime_indices[i]);
- cpu_time_now =
- dispatch_node (vm, n, VLIB_NODE_TYPE_INPUT,
- VLIB_NODE_STATE_INTERRUPT,
- /* frame */ 0,
- cpu_time_now);
- }
- }
- }
/* Input nodes may have added work to the pending vector.
Process pending vector until there is nothing left.
All pending vectors will be processed from input -> output. */
/* Record time stamp in case there are no enabled nodes and above
calls do not update time stamp. */
cpu_time_now = clib_cpu_time_now ();
+ vm->loops_this_reporting_interval++;
+ now = clib_time_now_internal (&vm->clib_time, cpu_time_now);
+ /* Time to update loops_per_second? */
+ if (PREDICT_FALSE (now >= vm->loop_interval_end))
+ {
+ /* Next sample ends in 20ms */
+ if (vm->loop_interval_start)
+ {
+ f64 this_loops_per_second;
+
+ this_loops_per_second =
+ ((f64) vm->loops_this_reporting_interval) / (now -
+ vm->loop_interval_start);
+
+ vm->loops_per_second =
+ vm->loops_per_second * vm->damping_constant +
+ (1.0 - vm->damping_constant) * this_loops_per_second;
+ if (vm->loops_per_second != 0.0)
+ vm->seconds_per_loop = 1.0 / vm->loops_per_second;
+ else
+ vm->seconds_per_loop = 0.0;
+ }
+ /* New interval starts now, and ends in 20ms */
+ vm->loop_interval_start = now;
+ vm->loop_interval_end = now + 2e-4;
+ vm->loops_this_reporting_interval = 0;
+ }
}
}
turn_on_mem_trace = 1;
else if (unformat (input, "elog-events %d",
- &vm->elog_main.event_ring_size))
- ;
+ &vm->configured_elog_ring_size))
+ vm->configured_elog_ring_size =
+ 1 << max_log2 (vm->configured_elog_ring_size);
else if (unformat (input, "elog-post-mortem-dump"))
vm->elog_post_mortem_dump = 1;
+ else if (unformat (input, "buffer-alloc-success-rate %f",
+ &vm->buffer_alloc_success_rate))
+ {
+ if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR == 0)
+ return clib_error_return
+ (0, "Buffer fault injection not configured");
+ }
+ else if (unformat (input, "buffer-alloc-success-seed %u",
+ &vm->buffer_alloc_success_seed))
+ {
+ if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR == 0)
+ return clib_error_return
+ (0, "Buffer fault injection not configured");
+ }
else
return unformat_parse_error (input);
}
VLIB_EARLY_CONFIG_FUNCTION (vlib_main_configure, "vlib");
static void
-dummy_queue_signal_callback (vlib_main_t * vm)
+placeholder_queue_signal_callback (vlib_main_t * vm)
{
}
clib_error_t *volatile error;
vlib_node_main_t *nm = &vm->node_main;
- vm->queue_signal_callback = dummy_queue_signal_callback;
+ vm->queue_signal_callback = placeholder_queue_signal_callback;
- clib_time_init (&vm->clib_time);
-
- /* Turn on event log. */
- if (!vm->elog_main.event_ring_size)
- vm->elog_main.event_ring_size = 128 << 10;
- elog_init (&vm->elog_main, vm->elog_main.event_ring_size);
- elog_enable_disable (&vm->elog_main, 1);
+ /* Reconfigure event log which is enabled very early */
+ if (vm->configured_elog_ring_size &&
+ vm->configured_elog_ring_size != vm->elog_main.event_ring_size)
+ elog_resize (&vm->elog_main, vm->configured_elog_ring_size);
vl_api_set_elog_main (&vm->elog_main);
(void) vl_api_set_elog_trace_api_messages (1);
vec_validate (vm->processing_rpc_requests, 0);
_vec_len (vm->processing_rpc_requests) = 0;
+ /* Default params for the buffer allocator fault injector, if configured */
+ if (VLIB_BUFFER_ALLOC_FAULT_INJECTOR > 0)
+ {
+ vm->buffer_alloc_success_seed = 0xdeaddabe;
+ vm->buffer_alloc_success_rate = 0.80;
+ }
+
if ((error = vlib_call_all_config_functions (vm, input, 0 /* is_early */ )))
goto done;
+ /*
+ * Use exponential smoothing, with a half-life of 1 second
+ * reported_rate(t) = reported_rate(t-1) * K + rate(t)*(1-K)
+ *
+ * Sample every 20ms, aka 50 samples per second
+ * K = exp (-1.0/20.0);
+ * K = 0.95
+ */
+ vm->damping_constant = exp (-1.0 / 20.0);
+
/* Sort per-thread init functions before we start threads */
vlib_sort_init_exit_functions (&vm->worker_init_function_registrations);
vec_validate (tm->nodes, a->buffer_trace_node_index);
tn = tm->nodes + a->buffer_trace_node_index;
tn->limit += a->buffer_traces_to_capture;
+ if (a->post_mortem)
+ {
+ tm->filter_flag = FILTER_FLAG_POST_MORTEM;
+ tm->filter_count = ~0;
+ }
tm->trace_enable = 1;
}));
/* *INDENT-ON* */
vec_free (pm->pcap_data);
memset (pm, 0, sizeof (*pm));
- vec_validate_aligned (vnet_trace_dummy, 2048, CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (vnet_trace_placeholder, 2048,
+ CLIB_CACHE_LINE_BYTES);
if (pm->lock == 0)
clib_spinlock_init (&(pm->lock));
pm->n_packets_captured = 0;
pm->packet_type = PCAP_PACKET_TYPE_vpp;
pm->n_packets_to_capture = a->packets_to_capture;
+ vm->dispatch_pcap_postmortem = a->post_mortem;
/* *INDENT-OFF* */
- foreach_vlib_main (({this_vlib_main->dispatch_pcap_enable = 1;}));
+ foreach_vlib_main (({ this_vlib_main->dispatch_pcap_enable = 1;}));
/* *INDENT-ON* */
}
else
{
/* *INDENT-OFF* */
- foreach_vlib_main (({this_vlib_main->dispatch_pcap_enable = 0;}));
+ foreach_vlib_main ((
+ {
+ this_vlib_main->dispatch_pcap_enable = 0;
+ this_vlib_main->dispatch_pcap_postmortem = 0;
+ tm = &this_vlib_main->trace_main;
+ tm->filter_flag = 0;
+ tm->filter_count = 0;
+ tm->trace_enable = 0;
+ }));
/* *INDENT-ON* */
vec_reset_length (vm->dispatch_buffer_trace_nodes);
if (pm->n_packets_captured)
int rv;
int enable = 0;
int status = 0;
+ int post_mortem = 0;
u32 node_index = ~0, buffer_traces_to_capture = 100;
/* Get a line of input. */
unformat_vlib_node, vm, &node_index,
&buffer_traces_to_capture))
;
+ else if (unformat (line_input, "post-mortem %=", &post_mortem, 1))
+ ;
else
{
return clib_error_return (0, "unknown input `%U'",
a->packets_to_capture = max;
a->buffer_trace_node_index = node_index;
a->buffer_traces_to_capture = buffer_traces_to_capture;
+ a->post_mortem = post_mortem;
rv = vlib_pcap_dispatch_trace_configure (a);
* @cliexstart{vppctl pcap dispatch trace off}
* captured 21 pkts...
* saved to /tmp/dispatchTrace.pcap...
+ * Example of how to start a post-mortem dispatch trace:
+ * pcap dispatch trace on max 20000 buffer-trace
+ * dpdk-input 3000000000 post-mortem
* @cliexend
?*/
/* *INDENT-OFF* */
.path = "pcap dispatch trace",
.short_help =
"pcap dispatch trace [on|off] [max <nn>] [file <name>] [status]\n"
- " [buffer-trace <input-node-name> <nn>]",
+ " [buffer-trace <input-node-name> <nn>][post-mortem]",
.function = dispatch_trace_command_fn,
};
/* *INDENT-ON* */
+vlib_main_t *
+vlib_get_main_not_inline (void)
+{
+ return vlib_get_main ();
+}
+
+elog_main_t *
+vlib_get_elog_main_not_inline ()
+{
+ return &vlib_global_main.elog_main;
+}
+
/*
* fd.io coding-style-patch-verification: ON
*