vlib_node_main_t *nm = &vm->node_main;
uword *p;
u8 *key = name;
- if (!clib_mem_is_heap_object (key))
- key = format (0, "%s", key);
+ key = format (0, "%s", key);
p = hash_get (nm->node_by_name, key);
if (key != name)
vec_free (key);
vec_free (t->format);
t->format = (char *) format (0, "%v-return: %%d%c", n->name, 0);
- n->name_elog_string = elog_string (&vm->elog_main, "%v%c", n->name, 0);
-}
-
-static void
-vlib_worker_thread_node_rename (u32 node_index)
-{
- int i;
- vlib_main_t *vm;
- vlib_node_t *n;
-
- if (vec_len (vlib_mains) == 1)
- return;
-
- vm = vlib_mains[0];
- n = vlib_get_node (vm, node_index);
-
- ASSERT (vlib_get_thread_index () == 0);
- ASSERT (*vlib_worker_threads->wait_at_barrier == 1);
-
- for (i = 1; i < vec_len (vlib_mains); i++)
- {
- vlib_main_t *vm_worker = vlib_mains[i];
- vlib_node_t *n_worker = vlib_get_node (vm_worker, node_index);
-
- n_worker->name = n->name;
- n_worker->name_elog_string = n->name_elog_string;
- }
+ n->name_elog_string =
+ elog_string (&vlib_global_main.elog_main, "%v%c", n->name, 0);
}
void
node_set_elog_name (vm, node_index);
/* Propagate the change to all worker threads */
- vlib_worker_thread_node_rename (node_index);
+ vlib_worker_thread_node_runtime_update ();
}
static void
vlib_pending_frame_t *pf;
i32 i, j, n_insert;
- ASSERT (vlib_get_thread_index () == 0);
-
- vlib_worker_thread_barrier_sync (vm);
-
node = vec_elt (nm->nodes, node_index);
r = vlib_node_get_runtime (vm, node_index);
pf->next_frame_index += n_insert;
}
/* *INDENT-OFF* */
- pool_foreach (pf, nm->suspended_process_frames, ({
+ pool_foreach (pf, nm->suspended_process_frames) {
if (pf->next_frame_index != ~0 && pf->next_frame_index >= i)
pf->next_frame_index += n_insert;
- }));
+ }
/* *INDENT-ON* */
r->n_next_nodes = vec_len (node->next_nodes);
nf->node_runtime_index = next_node->runtime_index;
vlib_worker_thread_node_runtime_update ();
-
- vlib_worker_thread_barrier_release (vm);
}
uword
uword next_node_index, uword slot)
{
vlib_node_main_t *nm = &vm->node_main;
- vlib_node_t *node, *next;
+ vlib_node_t *node, *next, *old_next;
+ u32 old_next_index;
uword *p;
+ ASSERT (vlib_get_thread_index () == 0);
+
node = vec_elt (nm->nodes, node_index);
next = vec_elt (nm->nodes, next_node_index);
return p[0];
}
+ vlib_worker_thread_barrier_sync (vm);
+
if (slot == ~0)
slot = vec_len (node->next_nodes);
vec_validate_init_empty (node->next_nodes, slot, ~0);
vec_validate (node->n_vectors_by_next_node, slot);
+ if ((old_next_index = node->next_nodes[slot]) != ~0u)
+ {
+ hash_unset (node->next_slot_by_node, old_next_index);
+ old_next = vlib_get_node (vm, old_next_index);
+ old_next->prev_node_bitmap =
+ clib_bitmap_andnoti (old_next->prev_node_bitmap, node_index);
+ }
+
node->next_nodes[slot] = next_node_index;
hash_set (node->next_slot_by_node, next_node_index, slot);
uword sib_node_index, sib_slot;
vlib_node_t *sib_node;
/* *INDENT-OFF* */
- clib_bitmap_foreach (sib_node_index, node->sibling_bitmap, ({
+ clib_bitmap_foreach (sib_node_index, node->sibling_bitmap) {
sib_node = vec_elt (nm->nodes, sib_node_index);
if (sib_node != node)
{
sib_slot = vlib_node_add_next_with_slot (vm, sib_node_index, next_node_index, slot);
ASSERT (sib_slot == slot);
}
- }));
+ }
/* *INDENT-ON* */
}
+ vlib_worker_thread_barrier_release (vm);
return slot;
}
{
elog_event_type_t t;
- memset (&t, 0, sizeof (t));
+ clib_memset (&t, 0, sizeof (t));
/* 2 event types for this node: one when node function is called.
One when it returns. */
#define STACK_ALIGN CLIB_CACHE_LINE_BYTES
#endif
+vlib_node_function_t *
+vlib_node_get_preferred_node_fn_variant (vlib_main_t *vm,
+ vlib_node_fn_registration_t *regs)
+{
+ vlib_node_main_t *nm = &vm->node_main;
+ vlib_node_fn_registration_t *r;
+ vlib_node_fn_variant_t *v;
+ vlib_node_function_t *fn = 0;
+ int priority = -1;
+
+ if (nm->node_fn_default_march_variant != ~0)
+ {
+ r = regs;
+ while (r)
+ {
+ if (r->march_variant == nm->node_fn_default_march_variant)
+ return r->function;
+ r = r->next_registration;
+ }
+ }
+
+ r = regs;
+ while (r)
+ {
+ v = vec_elt_at_index (nm->variants, r->march_variant);
+ if (v->priority > priority)
+ {
+ priority = v->priority;
+ fn = r->function;
+ }
+ r = r->next_registration;
+ }
+
+ ASSERT (fn);
+ return fn;
+}
+
static void
register_node (vlib_main_t * vm, vlib_node_registration_t * r)
{
vlib_node_main_t *nm = &vm->node_main;
vlib_node_t *n;
- u32 page_size = clib_mem_get_page_size ();
int i;
if (CLIB_DEBUG > 0)
if (r->node_fn_registrations)
{
- vlib_node_fn_registration_t *fnr = r->node_fn_registrations;
- int priority = -1;
-
/* to avoid confusion, please remove ".function " statiement from
CLIB_NODE_REGISTRATION() if using function function candidates */
ASSERT (r->function == 0);
- while (fnr)
- {
- if (fnr->priority > priority)
- {
- priority = fnr->priority;
- r->function = fnr->function;
- }
- fnr = fnr->next_registration;
- }
+ r->function =
+ vlib_node_get_preferred_node_fn_variant (vm, r->node_fn_registrations);
}
ASSERT (r->function != 0);
n = clib_mem_alloc_no_fail (sizeof (n[0]));
- memset (n, 0, sizeof (n[0]));
+ clib_memset (n, 0, sizeof (n[0]));
n->index = vec_len (nm->nodes);
+ n->node_fn_registrations = r->node_fn_registrations;
+ n->protocol_hint = r->protocol_hint;
vec_add1 (nm->nodes, n);
/* Node names must be unique. */
{
- vlib_node_t *o = vlib_get_node_by_name (vm, n->name);
+ /* vlib_get_node_by_name() expects NULL-terminated strings */
+ u8 *name = format (0, "%v%c", n->name, 0);
+ vlib_node_t *o = vlib_get_node_by_name (vm, name);
+ vec_free (name);
if (o)
clib_error ("more than one node named `%v'", n->name);
}
_(validate_frame);
/* Register error counters. */
- vlib_register_errors (vm, n->index, r->n_errors, r->error_strings);
+ vlib_register_errors (vm, n->index, r->n_errors, r->error_strings,
+ r->error_counters);
node_elog_init (vm, n->index);
_(runtime_data_bytes);
vlib_process_t *p;
uword log2_n_stack_bytes;
- log2_n_stack_bytes = clib_max (r->process_log2_n_stack_bytes, 15);
+ log2_n_stack_bytes = clib_max (r->process_log2_n_stack_bytes,
+ VLIB_PROCESS_LOG2_STACK_SIZE);
+ log2_n_stack_bytes = clib_max (log2_n_stack_bytes,
+ clib_mem_get_log2_page_size ());
-#ifdef CLIB_UNIX
- /*
- * Bump the stack size if running over a kernel with a large page size,
- * and the stack isn't any too big to begin with. Otherwise, we'll
- * trip over the stack guard page for sure.
- */
- if ((page_size > (4 << 10)) && log2_n_stack_bytes < 19)
- {
- if ((1 << log2_n_stack_bytes) <= page_size)
- log2_n_stack_bytes = min_log2 (page_size) + 1;
- else
- log2_n_stack_bytes++;
- }
-#endif
+ p = clib_mem_alloc_aligned (sizeof (p[0]), CLIB_CACHE_LINE_BYTES);
+ clib_memset (p, 0, sizeof (p[0]));
+ p->log2_n_stack_bytes = log2_n_stack_bytes;
- p = clib_mem_alloc_aligned_at_offset
- (sizeof (p[0]) + (1 << log2_n_stack_bytes),
- STACK_ALIGN, STRUCT_OFFSET_OF (vlib_process_t, stack),
- 0 /* no, don't call os_out_of_memory */ );
- if (p == 0)
- clib_panic ("failed to allocate process stack (%d bytes)",
- 1 << log2_n_stack_bytes);
+ p->stack = clib_mem_vm_map_stack (1ULL << log2_n_stack_bytes,
+ CLIB_MEM_PAGE_SZ_DEFAULT,
+ "process stack: %U",
+ format_vlib_node_name, vm,
+ n->index);
- memset (p, 0, sizeof (p[0]));
- p->log2_n_stack_bytes = log2_n_stack_bytes;
+ if (p->stack == CLIB_MEM_VM_MAP_FAILED)
+ clib_panic ("failed to allocate process stack (%d bytes)",
+ 1ULL << log2_n_stack_bytes);
/* Process node's runtime index is really index into process
pointer vector. */
/* Node runtime is stored inside of process. */
rt = &p->node_runtime;
-
-#ifdef CLIB_UNIX
- /*
- * Disallow writes to the bottom page of the stack, to
- * catch stack overflows.
- */
- if (mprotect (p->stack, page_size, PROT_READ) < 0)
- clib_unix_warning ("process stack");
-#endif
-
}
else
{
vec_add2_aligned (nm->nodes_by_type[n->type], rt, 1,
/* align */ CLIB_CACHE_LINE_BYTES);
+ if (n->type == VLIB_NODE_TYPE_INPUT)
+ clib_interrupt_resize (&nm->interrupts,
+ vec_len (nm->nodes_by_type[n->type]));
n->runtime_index = rt - nm->nodes_by_type[n->type];
}
vec_resize (rt->errors, r->n_errors);
for (i = 0; i < vec_len (rt->errors); i++)
- rt->errors[i] = vlib_error_set (n->index, i);
+ rt->errors[i] = n->error_heap_index + i;
STATIC_ASSERT_SIZEOF (vlib_node_runtime_t, 128);
ASSERT (vec_len (n->runtime_data) <= VLIB_NODE_RUNTIME_DATA_SIZE);
if (vec_len (n->runtime_data) > 0)
clib_memcpy (rt->runtime_data, n->runtime_data,
vec_len (n->runtime_data));
+ else
+ clib_memset (rt->runtime_data, 0, VLIB_NODE_RUNTIME_DATA_SIZE);
vec_free (n->runtime_data);
}
+#undef _
}
/* Register new packet processing node. */
u16 n_vectors = frame->n_vectors;
vlib_node_increment_counter (vm, node->node_index, 0, n_vectors);
- vlib_buffer_free (vm, vlib_frame_args (frame), n_vectors);
+ vlib_buffer_free (vm, vlib_frame_vector_args (frame), n_vectors);
vlib_frame_free (vm, node, frame);
return n_vectors;
}
+void
+vlib_register_all_node_march_variants (vlib_main_t *vm)
+{
+ vlib_node_main_t *nm = &vm->node_main;
+ vlib_node_fn_variant_t *v;
+ int prio = -1;
+
+ nm->node_fn_default_march_variant = ~0;
+ ASSERT (nm->variants == 0);
+ vec_add2 (nm->variants, v, 1);
+ v->desc = v->suffix = "default";
+ v->index = CLIB_MARCH_VARIANT_TYPE;
+
+#define _(s, n) \
+ vec_add2 (nm->variants, v, 1); \
+ v->suffix = #s; \
+ v->index = CLIB_MARCH_VARIANT_TYPE_##s; \
+ v->priority = clib_cpu_march_priority_##s (); \
+ v->desc = n;
+
+ foreach_march_variant;
+#undef _
+
+ nm->node_fn_march_variant_by_suffix = hash_create_string (0, sizeof (u32));
+
+ vec_foreach (v, nm->variants)
+ {
+ ASSERT (v->index == v - nm->variants);
+ hash_set (nm->node_fn_march_variant_by_suffix, v->suffix, v->index);
+ if (v->priority > prio)
+ prio = v->priority;
+ }
+}
+
void
vlib_register_all_static_nodes (vlib_main_t * vm)
{
if (vec_len (stat_vms) == 0)
{
- for (i = 0; i < vec_len (vlib_mains); i++)
+ for (i = 0; i < vlib_get_n_threads (); i++)
{
- stat_vm = vlib_mains[i];
+ stat_vm = vlib_get_main_by_index (i);
if (stat_vm)
vec_add1 (stat_vms, stat_vm);
}
vlib_node_t *n;
uword ni;
+ nm->frame_sizes = vec_new (vlib_frame_size_t, 1);
+#ifdef VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES
nm->frame_size_hash = hash_create (0, sizeof (uword));
+#endif
nm->flags |= VLIB_NODE_MAIN_RUNTIME_STARTED;
/* Generate sibling relationships */
}
/* *INDENT-OFF* */
- clib_bitmap_foreach (si, sib->sibling_bitmap, ({
+ clib_bitmap_foreach (si, sib->sibling_bitmap) {
vlib_node_t * m = vec_elt (nm->nodes, si);
/* Connect all of sibling's siblings to us. */
/* Connect us to all of sibling's siblings. */
n->sibling_bitmap = clib_bitmap_ori (n->sibling_bitmap, si);
- }));
+ }
/* *INDENT-ON* */
/* Connect sibling to us. */
return error;
}
+u32
+vlib_process_create (vlib_main_t * vm, char *name,
+ vlib_node_function_t * f, u32 log2_n_stack_bytes)
+{
+ vlib_node_registration_t r;
+ vlib_node_t *n;
+
+ memset (&r, 0, sizeof (r));
+
+ r.name = (char *) format (0, "%s", name, 0);
+ r.function = f;
+ r.process_log2_n_stack_bytes = log2_n_stack_bytes;
+ r.type = VLIB_NODE_TYPE_PROCESS;
+
+ vlib_worker_thread_barrier_sync (vm);
+
+ vlib_register_node (vm, &r);
+ vec_free (r.name);
+
+ vlib_worker_thread_node_runtime_update ();
+ vlib_worker_thread_barrier_release (vm);
+
+ n = vlib_get_node (vm, r.index);
+ vlib_start_process (vm, n->runtime_index);
+
+ return (r.index);
+}
+
+int
+vlib_node_set_march_variant (vlib_main_t *vm, u32 node_index,
+ clib_march_variant_type_t march_variant)
+{
+ vlib_node_fn_registration_t *fnr;
+ vlib_node_fn_variant_t *v;
+ vlib_node_t *n = vlib_get_node (vm, node_index);
+
+ if (n->node_fn_registrations == 0)
+ return -1;
+
+ fnr = n->node_fn_registrations;
+ v = vec_elt_at_index (vm->node_main.variants, march_variant);
+
+ while (fnr)
+ {
+ if (fnr->march_variant == v->index)
+ {
+ n->function = fnr->function;
+
+ for (int i = 0; i < vlib_get_n_threads (); i++)
+ {
+ vlib_node_runtime_t *nrt;
+ nrt =
+ vlib_node_get_runtime (vlib_get_main_by_index (i), n->index);
+ nrt->function = fnr->function;
+ }
+ return 0;
+ }
+ fnr = fnr->next_registration;
+ }
+ return -1;
+}
/*
* fd.io coding-style-patch-verification: ON
*