else
{
f = clib_mem_alloc_aligned_no_fail (n, VLIB_FRAME_ALIGN);
- f->cpu_index = vm->cpu_index;
+ f->thread_index = vm->thread_index;
fi = vlib_frame_index_no_check (vm, f);
}
/* Poison frame when debugging. */
if (CLIB_DEBUG > 0)
{
- u32 save_cpu_index = f->cpu_index;
+ u32 save_thread_index = f->thread_index;
memset (f, 0xfe, n);
- f->cpu_index = save_cpu_index;
+ f->thread_index = save_thread_index;
}
/* Insert magic number. */
* a dangling frame reference. Each thread has its own copy of
* the next_frames vector.
*/
- if (0 && r->cpu_index != next_runtime->cpu_index)
+ if (0 && r->thread_index != next_runtime->thread_index)
{
nf->frame_index = ~0;
nf->flags &= ~(VLIB_FRAME_PENDING | VLIB_FRAME_IS_ALLOCATED);
return error;
}
+void
+elog_post_mortem_dump (void)
+{
+ vlib_main_t *vm = &vlib_global_main;
+ elog_main_t *em = &vm->elog_main;
+ u8 *filename;
+ clib_error_t *error;
+
+ if (!vm->elog_post_mortem_dump)
+ return;
+
+ filename = format (0, "/tmp/elog_post_mortem.%d%c", getpid (), 0);
+ error = elog_write_file (em, (char *) filename, 1 /* flush ring */ );
+ if (error)
+ clib_error_report (error);
+ vec_free (filename);
+}
+
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (elog_save_cli, static) = {
.path = "event-logger save",
: evm->node_call_elog_event_types,
node_index),
/* track */
- (vm->cpu_index ? &vlib_worker_threads[vm->cpu_index].
+ (vm->thread_index ? &vlib_worker_threads[vm->thread_index].
elog_track : &em->default_track),
/* data to log */ n_vectors);
}
vm->cpu_time_last_node_dispatch = last_time_stamp;
- if (1 /* || vm->cpu_index == node->cpu_index */ )
+ if (1 /* || vm->thread_index == node->thread_index */ )
{
vlib_main_t *stat_vm;
{
u32 node_name, vector_length, is_polling;
} *ed;
- vlib_worker_thread_t *w = vlib_worker_threads + vm->cpu_index;
+ vlib_worker_thread_t *w = vlib_worker_threads + vm->thread_index;
#endif
if ((dispatch_state == VLIB_NODE_STATE_INTERRUPT
}
static u64
-dispatch_pending_node (vlib_main_t * vm,
- vlib_pending_frame_t * p, u64 last_time_stamp)
+dispatch_pending_node (vlib_main_t * vm, uword pending_frame_index,
+ u64 last_time_stamp)
{
vlib_node_main_t *nm = &vm->node_main;
vlib_frame_t *f;
vlib_next_frame_t *nf, nf_dummy;
vlib_node_runtime_t *n;
u32 restore_frame_index;
+ vlib_pending_frame_t *p;
+
+ /* See comment below about dangling references to nm->pending_frames */
+ p = nm->pending_frames + pending_frame_index;
n = vec_elt_at_index (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL],
p->node_runtime_index);
/* Frame is ready to be used again, so restore it. */
if (restore_frame_index != ~0)
{
- /* we musn't restore a frame that is flagged to be freed. This shouldn't
- happen since frames to be freed post dispatch are those used
- when the to-node frame becomes full i.e. they form a sort of queue of
- frames to a single node. If we get here then the to-node frame and the
- pending frame *were* the same, and so we removed the to-node frame.
- Therefore this frame is no longer part of the queue for that node
- and hence it cannot be it's overspill.
+ /*
+ * We musn't restore a frame that is flagged to be freed. This
+ * shouldn't happen since frames to be freed post dispatch are
+ * those used when the to-node frame becomes full i.e. they form a
+ * sort of queue of frames to a single node. If we get here then
+ * the to-node frame and the pending frame *were* the same, and so
+ * we removed the to-node frame. Therefore this frame is no
+ * longer part of the queue for that node and hence it cannot be
+ * it's overspill.
*/
ASSERT (!(f->flags & VLIB_FRAME_FREE_AFTER_DISPATCH));
- /* p->next_frame_index can change during node dispatch if node
- function decides to change graph hook up. */
+ /*
+ * NB: dispatching node n can result in the creation and scheduling
+ * of new frames, and hence in the reallocation of nm->pending_frames.
+ * Recompute p, or no supper. This was broken for more than 10 years.
+ */
+ p = nm->pending_frames + pending_frame_index;
+
+ /*
+ * p->next_frame_index can change during node dispatch if node
+ * function decides to change graph hook up.
+ */
nf = vec_elt_at_index (nm->next_frames, p->next_frame_index);
nf->flags |= VLIB_FRAME_IS_ALLOCATED;
if (!nm->interrupt_threshold_vector_length)
nm->interrupt_threshold_vector_length = 5;
- if (is_main)
- {
- if (!nm->polling_threshold_vector_length)
- nm->polling_threshold_vector_length = 10;
- if (!nm->interrupt_threshold_vector_length)
- nm->interrupt_threshold_vector_length = 5;
-
- nm->current_process_index = ~0;
- }
-
/* Start all processes. */
if (is_main)
{
uword i;
+ nm->current_process_index = ~0;
for (i = 0; i < vec_len (nm->processes); i++)
cpu_time_now = dispatch_process (vm, nm->processes[i], /* frame */ 0,
cpu_time_now);
Process pending vector until there is nothing left.
All pending vectors will be processed from input -> output. */
for (i = 0; i < _vec_len (nm->pending_frames); i++)
- cpu_time_now = dispatch_pending_node (vm, nm->pending_frames + i,
- cpu_time_now);
+ cpu_time_now = dispatch_pending_node (vm, i, cpu_time_now);
/* Reset pending vector for next iteration. */
_vec_len (nm->pending_frames) = 0;
else if (unformat (input, "elog-events %d",
&vm->elog_main.event_ring_size))
;
+ else if (unformat (input, "elog-post-mortem-dump"))
+ vm->elog_post_mortem_dump = 1;
else
return unformat_parse_error (input);
}