vlib_node_main_t *nm = &vm->node_main;
uword *p;
u8 *key = name;
- if (!clib_mem_is_heap_object (key))
- key = format (0, "%s", key);
+ key = format (0, "%s", key);
p = hash_get (nm->node_by_name, key);
if (key != name)
vec_free (key);
hash_set (nm->node_by_name, n->name, n->index);
node_set_elog_name (vm, node_index);
+
+ /* Propagate the change to all worker threads */
+ vlib_worker_thread_node_runtime_update ();
}
static void
vlib_pending_frame_t *pf;
i32 i, j, n_insert;
- ASSERT (vlib_get_thread_index () == 0);
-
- vlib_worker_thread_barrier_sync (vm);
-
node = vec_elt (nm->nodes, node_index);
r = vlib_node_get_runtime (vm, node_index);
nf->node_runtime_index = next_node->runtime_index;
vlib_worker_thread_node_runtime_update ();
+}
- vlib_worker_thread_barrier_release (vm);
+uword
+vlib_node_get_next (vlib_main_t * vm, uword node_index, uword next_node_index)
+{
+ vlib_node_main_t *nm = &vm->node_main;
+ vlib_node_t *node;
+ uword *p;
+
+ node = vec_elt (nm->nodes, node_index);
+
+ /* Runtime has to be initialized. */
+ ASSERT (nm->flags & VLIB_NODE_MAIN_RUNTIME_STARTED);
+
+ if ((p = hash_get (node->next_slot_by_node, next_node_index)))
+ {
+ return p[0];
+ }
+
+ return (~0);
}
/* Add next node to given node in given slot. */
uword next_node_index, uword slot)
{
vlib_node_main_t *nm = &vm->node_main;
- vlib_node_t *node, *next;
+ vlib_node_t *node, *next, *old_next;
+ u32 old_next_index;
uword *p;
+ ASSERT (vlib_get_thread_index () == 0);
+
node = vec_elt (nm->nodes, node_index);
next = vec_elt (nm->nodes, next_node_index);
return p[0];
}
+ vlib_worker_thread_barrier_sync (vm);
+
if (slot == ~0)
slot = vec_len (node->next_nodes);
vec_validate_init_empty (node->next_nodes, slot, ~0);
vec_validate (node->n_vectors_by_next_node, slot);
+ if ((old_next_index = node->next_nodes[slot]) != ~0u)
+ {
+ hash_unset (node->next_slot_by_node, old_next_index);
+ old_next = vlib_get_node (vm, old_next_index);
+ old_next->prev_node_bitmap =
+ clib_bitmap_andnoti (old_next->prev_node_bitmap, node_index);
+ }
+
node->next_nodes[slot] = next_node_index;
hash_set (node->next_slot_by_node, next_node_index, slot);
/* *INDENT-ON* */
}
+ vlib_worker_thread_barrier_release (vm);
return slot;
}
{
elog_event_type_t t;
- memset (&t, 0, sizeof (t));
+ clib_memset (&t, 0, sizeof (t));
/* 2 event types for this node: one when node function is called.
One when it returns. */
ASSERT (VLIB_NODE_TYPE_INTERNAL == zero.type);
}
+ if (r->node_fn_registrations)
+ {
+ vlib_node_fn_registration_t *fnr = r->node_fn_registrations;
+ int priority = -1;
+
+ /* to avoid confusion, please remove ".function " statiement from
+ CLIB_NODE_REGISTRATION() if using function function candidates */
+ ASSERT (r->function == 0);
+
+ while (fnr)
+ {
+ if (fnr->priority > priority)
+ {
+ priority = fnr->priority;
+ r->function = fnr->function;
+ }
+ fnr = fnr->next_registration;
+ }
+ }
+
ASSERT (r->function != 0);
n = clib_mem_alloc_no_fail (sizeof (n[0]));
- memset (n, 0, sizeof (n[0]));
+ clib_memset (n, 0, sizeof (n[0]));
n->index = vec_len (nm->nodes);
+ n->node_fn_registrations = r->node_fn_registrations;
+ n->protocol_hint = r->protocol_hint;
vec_add1 (nm->nodes, n);
if (n->type == VLIB_NODE_TYPE_PROCESS)
{
vlib_process_t *p;
- uword log2_n_stack_bytes;
+ void *map;
+ uword log2_n_stack_bytes, stack_bytes;
+ int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
- log2_n_stack_bytes = clib_max (r->process_log2_n_stack_bytes, 15);
+ log2_n_stack_bytes = clib_max (r->process_log2_n_stack_bytes,
+ VLIB_PROCESS_LOG2_STACK_SIZE);
+ log2_n_stack_bytes = clib_max (log2_n_stack_bytes,
+ min_log2 (page_size));
-#ifdef CLIB_UNIX
- /*
- * Bump the stack size if running over a kernel with a large page size,
- * and the stack isn't any too big to begin with. Otherwise, we'll
- * trip over the stack guard page for sure.
- */
- if ((page_size > (4 << 10)) && log2_n_stack_bytes < 19)
- {
- if ((1 << log2_n_stack_bytes) <= page_size)
- log2_n_stack_bytes = min_log2 (page_size) + 1;
- else
- log2_n_stack_bytes++;
- }
-#endif
+ p = clib_mem_alloc_aligned (sizeof (p[0]), CLIB_CACHE_LINE_BYTES);
+ clib_memset (p, 0, sizeof (p[0]));
+ p->log2_n_stack_bytes = log2_n_stack_bytes;
- p = clib_mem_alloc_aligned_at_offset
- (sizeof (p[0]) + (1 << log2_n_stack_bytes),
- STACK_ALIGN, STRUCT_OFFSET_OF (vlib_process_t, stack),
- 0 /* no, don't call os_out_of_memory */ );
- if (p == 0)
+ stack_bytes = 1ULL << log2_n_stack_bytes;
+ /* map stack size + 2 extra guard pages */
+ map = mmap (0, stack_bytes + page_size, PROT_READ | PROT_WRITE,
+ mmap_flags, -1, 0);
+
+ if (map == MAP_FAILED)
clib_panic ("failed to allocate process stack (%d bytes)",
- 1 << log2_n_stack_bytes);
+ stack_bytes);
- memset (p, 0, sizeof (p[0]));
- p->log2_n_stack_bytes = log2_n_stack_bytes;
+ /* skip the guard page */
+ p->stack = map + page_size;
+
+ mmap_flags |= MAP_FIXED;
+ map = mmap (map, page_size, PROT_NONE, mmap_flags, -1, 0);
+
+ if (map == MAP_FAILED)
+ clib_unix_warning ("failed to create stack guard page");
/* Process node's runtime index is really index into process
pointer vector. */
/* Node runtime is stored inside of process. */
rt = &p->node_runtime;
-
-#ifdef CLIB_UNIX
- /*
- * Disallow writes to the bottom page of the stack, to
- * catch stack overflows.
- */
- if (mprotect (p->stack, page_size, PROT_READ) < 0)
- clib_unix_warning ("process stack");
-#endif
-
}
else
{
vec_resize (rt->errors, r->n_errors);
for (i = 0; i < vec_len (rt->errors); i++)
- rt->errors[i] = vlib_error_set (n->index, i);
+ rt->errors[i] = n->error_heap_index + i;
STATIC_ASSERT_SIZEOF (vlib_node_runtime_t, 128);
ASSERT (vec_len (n->runtime_data) <= VLIB_NODE_RUNTIME_DATA_SIZE);
u16 n_vectors = frame->n_vectors;
vlib_node_increment_counter (vm, node->node_index, 0, n_vectors);
- vlib_buffer_free (vm, vlib_frame_args (frame), n_vectors);
+ vlib_buffer_free (vm, vlib_frame_vector_args (frame), n_vectors);
vlib_frame_free (vm, node, frame);
return n_vectors;
}
}
+void
+vlib_node_get_nodes (vlib_main_t * vm, u32 max_threads, int include_stats,
+ int barrier_sync, vlib_node_t **** node_dupsp,
+ vlib_main_t *** stat_vmsp)
+{
+ vlib_node_main_t *nm = &vm->node_main;
+ vlib_node_t *n;
+ vlib_node_t ***node_dups = *node_dupsp;
+ vlib_node_t **nodes;
+ vlib_main_t **stat_vms = *stat_vmsp;
+ vlib_main_t *stat_vm;
+ uword i, j;
+ u32 threads_to_serialize;
+
+ if (vec_len (stat_vms) == 0)
+ {
+ for (i = 0; i < vec_len (vlib_mains); i++)
+ {
+ stat_vm = vlib_mains[i];
+ if (stat_vm)
+ vec_add1 (stat_vms, stat_vm);
+ }
+ }
+
+ threads_to_serialize = clib_min (max_threads, vec_len (stat_vms));
+
+ vec_validate (node_dups, threads_to_serialize - 1);
+
+ /*
+ * Barrier sync across stats scraping.
+ * Otherwise, the counts will be grossly inaccurate.
+ */
+ if (barrier_sync)
+ vlib_worker_thread_barrier_sync (vm);
+
+ for (j = 0; j < threads_to_serialize; j++)
+ {
+ stat_vm = stat_vms[j];
+ nm = &stat_vm->node_main;
+
+ if (include_stats)
+ {
+ for (i = 0; i < vec_len (nm->nodes); i++)
+ {
+ n = nm->nodes[i];
+ vlib_node_sync_stats (stat_vm, n);
+ }
+ }
+
+ nodes = node_dups[j];
+ vec_validate (nodes, vec_len (nm->nodes) - 1);
+ clib_memcpy (nodes, nm->nodes, vec_len (nm->nodes) * sizeof (nodes[0]));
+ node_dups[j] = nodes;
+ }
+
+ if (barrier_sync)
+ vlib_worker_thread_barrier_release (vm);
+
+ *node_dupsp = node_dups;
+ *stat_vmsp = stat_vms;
+}
+
clib_error_t *
vlib_node_main_init (vlib_main_t * vm)
{
vlib_node_t *n;
uword ni;
+ nm->frame_sizes = vec_new (vlib_frame_size_t, 1);
+#ifdef VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES
nm->frame_size_hash = hash_create (0, sizeof (uword));
+#endif
nm->flags |= VLIB_NODE_MAIN_RUNTIME_STARTED;
/* Generate sibling relationships */
return error;
}
+u32
+vlib_process_create (vlib_main_t * vm, char *name,
+ vlib_node_function_t * f, u32 log2_n_stack_bytes)
+{
+ vlib_node_registration_t r;
+ vlib_node_t *n;
+
+ memset (&r, 0, sizeof (r));
+
+ r.name = (char *) format (0, "%s", name, 0);
+ r.function = f;
+ r.process_log2_n_stack_bytes = log2_n_stack_bytes;
+ r.type = VLIB_NODE_TYPE_PROCESS;
+
+ vlib_worker_thread_barrier_sync (vm);
+
+ vlib_register_node (vm, &r);
+ vec_free (r.name);
+
+ vlib_worker_thread_node_runtime_update ();
+ vlib_worker_thread_barrier_release (vm);
+
+ n = vlib_get_node (vm, r.index);
+ vlib_start_process (vm, n->runtime_index);
+
+ return (r.index);
+}
+
/*
* fd.io coding-style-patch-verification: ON
*