X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlib%2Fnode.c;h=618baecfde0fa516d08c9dd972b09785b279aa6c;hb=a31698bb7401f6e2389c0e805bf280ae52278524;hp=2cda0f064759a8b9d1f8f08aff9f4e0e3015d3d5;hpb=bb620d74b247f419eb485886c55148099b0213bb;p=vpp.git diff --git a/src/vlib/node.c b/src/vlib/node.c index 2cda0f06475..618baecfde0 100644 --- a/src/vlib/node.c +++ b/src/vlib/node.c @@ -47,8 +47,7 @@ vlib_get_node_by_name (vlib_main_t * vm, u8 * name) vlib_node_main_t *nm = &vm->node_main; uword *p; u8 *key = name; - if (!clib_mem_is_heap_object (key)) - key = format (0, "%s", key); + key = format (0, "%s", key); p = hash_get (nm->node_by_name, key); if (key != name) vec_free (key); @@ -87,6 +86,9 @@ vlib_node_rename (vlib_main_t * vm, u32 node_index, char *fmt, ...) hash_set (nm->node_by_name, n->name, n->index); node_set_elog_name (vm, node_index); + + /* Propagate the change to all worker threads */ + vlib_worker_thread_node_runtime_update (); } static void @@ -99,10 +101,6 @@ vlib_node_runtime_update (vlib_main_t * vm, u32 node_index, u32 next_index) vlib_pending_frame_t *pf; i32 i, j, n_insert; - ASSERT (vlib_get_thread_index () == 0); - - vlib_worker_thread_barrier_sync (vm); - node = vec_elt (nm->nodes, node_index); r = vlib_node_get_runtime (vm, node_index); @@ -132,10 +130,10 @@ vlib_node_runtime_update (vlib_main_t * vm, u32 node_index, u32 next_index) pf->next_frame_index += n_insert; } /* *INDENT-OFF* */ - pool_foreach (pf, nm->suspended_process_frames, ({ + pool_foreach (pf, nm->suspended_process_frames) { if (pf->next_frame_index != ~0 && pf->next_frame_index >= i) pf->next_frame_index += n_insert; - })); + } /* *INDENT-ON* */ r->n_next_nodes = vec_len (node->next_nodes); @@ -147,8 +145,6 @@ vlib_node_runtime_update (vlib_main_t * vm, u32 node_index, u32 next_index) nf->node_runtime_index = next_node->runtime_index; vlib_worker_thread_node_runtime_update (); - - vlib_worker_thread_barrier_release (vm); } uword @@ -178,9 +174,12 @@ vlib_node_add_next_with_slot (vlib_main_t * vm, uword next_node_index, uword slot) { vlib_node_main_t *nm = &vm->node_main; - vlib_node_t *node, *next; + vlib_node_t *node, *next, *old_next; + u32 old_next_index; uword *p; + ASSERT (vlib_get_thread_index () == 0); + node = vec_elt (nm->nodes, node_index); next = vec_elt (nm->nodes, next_node_index); @@ -195,12 +194,22 @@ vlib_node_add_next_with_slot (vlib_main_t * vm, return p[0]; } + vlib_worker_thread_barrier_sync (vm); + if (slot == ~0) slot = vec_len (node->next_nodes); vec_validate_init_empty (node->next_nodes, slot, ~0); vec_validate (node->n_vectors_by_next_node, slot); + if ((old_next_index = node->next_nodes[slot]) != ~0u) + { + hash_unset (node->next_slot_by_node, old_next_index); + old_next = vlib_get_node (vm, old_next_index); + old_next->prev_node_bitmap = + clib_bitmap_andnoti (old_next->prev_node_bitmap, node_index); + } + node->next_nodes[slot] = next_node_index; hash_set (node->next_slot_by_node, next_node_index, slot); @@ -214,17 +223,18 @@ vlib_node_add_next_with_slot (vlib_main_t * vm, uword sib_node_index, sib_slot; vlib_node_t *sib_node; /* *INDENT-OFF* */ - clib_bitmap_foreach (sib_node_index, node->sibling_bitmap, ({ + clib_bitmap_foreach (sib_node_index, node->sibling_bitmap) { sib_node = vec_elt (nm->nodes, sib_node_index); if (sib_node != node) { sib_slot = vlib_node_add_next_with_slot (vm, sib_node_index, next_node_index, slot); ASSERT (sib_slot == slot); } - })); + } /* *INDENT-ON* */ } + vlib_worker_thread_barrier_release (vm); return slot; } @@ -261,7 +271,7 @@ node_elog_init (vlib_main_t * vm, uword ni) { elog_event_type_t t; - memset (&t, 0, sizeof (t)); + clib_memset (&t, 0, sizeof (t)); /* 2 event types for this node: one when node function is called. One when it returns. */ @@ -280,12 +290,48 @@ node_elog_init (vlib_main_t * vm, uword ni) #define STACK_ALIGN CLIB_CACHE_LINE_BYTES #endif +vlib_node_function_t * +vlib_node_get_preferred_node_fn_variant (vlib_main_t *vm, + vlib_node_fn_registration_t *regs) +{ + vlib_node_main_t *nm = &vm->node_main; + vlib_node_fn_registration_t *r; + vlib_node_fn_variant_t *v; + vlib_node_function_t *fn = 0; + int priority = -1; + + if (nm->node_fn_default_march_variant != ~0) + { + r = regs; + while (r) + { + if (r->march_variant == nm->node_fn_default_march_variant) + return r->function; + r = r->next_registration; + } + } + + r = regs; + while (r) + { + v = vec_elt_at_index (nm->variants, r->march_variant); + if (v->priority > priority) + { + priority = v->priority; + fn = r->function; + } + r = r->next_registration; + } + + ASSERT (fn); + return fn; +} + static void register_node (vlib_main_t * vm, vlib_node_registration_t * r) { vlib_node_main_t *nm = &vm->node_main; vlib_node_t *n; - u32 page_size = clib_mem_get_page_size (); int i; if (CLIB_DEBUG > 0) @@ -295,11 +341,23 @@ register_node (vlib_main_t * vm, vlib_node_registration_t * r) ASSERT (VLIB_NODE_TYPE_INTERNAL == zero.type); } + if (r->node_fn_registrations) + { + /* to avoid confusion, please remove ".function " statiement from + CLIB_NODE_REGISTRATION() if using function function candidates */ + ASSERT (r->function == 0); + + r->function = + vlib_node_get_preferred_node_fn_variant (vm, r->node_fn_registrations); + } + ASSERT (r->function != 0); n = clib_mem_alloc_no_fail (sizeof (n[0])); - memset (n, 0, sizeof (n[0])); + clib_memset (n, 0, sizeof (n[0])); n->index = vec_len (nm->nodes); + n->node_fn_registrations = r->node_fn_registrations; + n->protocol_hint = r->protocol_hint; vec_add1 (nm->nodes, n); @@ -315,7 +373,10 @@ register_node (vlib_main_t * vm, vlib_node_registration_t * r) /* Node names must be unique. */ { - vlib_node_t *o = vlib_get_node_by_name (vm, n->name); + /* vlib_get_node_by_name() expects NULL-terminated strings */ + u8 *name = format (0, "%v%c", n->name, 0); + vlib_node_t *o = vlib_get_node_by_name (vm, name); + vec_free (name); if (o) clib_error ("more than one node named `%v'", n->name); } @@ -346,7 +407,8 @@ register_node (vlib_main_t * vm, vlib_node_registration_t * r) _(validate_frame); /* Register error counters. */ - vlib_register_errors (vm, n->index, r->n_errors, r->error_strings); + vlib_register_errors (vm, n->index, r->n_errors, r->error_strings, + r->error_counters); node_elog_init (vm, n->index); _(runtime_data_bytes); @@ -376,33 +438,24 @@ register_node (vlib_main_t * vm, vlib_node_registration_t * r) vlib_process_t *p; uword log2_n_stack_bytes; - log2_n_stack_bytes = clib_max (r->process_log2_n_stack_bytes, 15); + log2_n_stack_bytes = clib_max (r->process_log2_n_stack_bytes, + VLIB_PROCESS_LOG2_STACK_SIZE); + log2_n_stack_bytes = clib_max (log2_n_stack_bytes, + clib_mem_get_log2_page_size ()); -#ifdef CLIB_UNIX - /* - * Bump the stack size if running over a kernel with a large page size, - * and the stack isn't any too big to begin with. Otherwise, we'll - * trip over the stack guard page for sure. - */ - if ((page_size > (4 << 10)) && log2_n_stack_bytes < 19) - { - if ((1 << log2_n_stack_bytes) <= page_size) - log2_n_stack_bytes = min_log2 (page_size) + 1; - else - log2_n_stack_bytes++; - } -#endif + p = clib_mem_alloc_aligned (sizeof (p[0]), CLIB_CACHE_LINE_BYTES); + clib_memset (p, 0, sizeof (p[0])); + p->log2_n_stack_bytes = log2_n_stack_bytes; - p = clib_mem_alloc_aligned_at_offset - (sizeof (p[0]) + (1 << log2_n_stack_bytes), - STACK_ALIGN, STRUCT_OFFSET_OF (vlib_process_t, stack), - 0 /* no, don't call os_out_of_memory */ ); - if (p == 0) - clib_panic ("failed to allocate process stack (%d bytes)", - 1 << log2_n_stack_bytes); + p->stack = clib_mem_vm_map_stack (1ULL << log2_n_stack_bytes, + CLIB_MEM_PAGE_SZ_DEFAULT, + "process stack: %U", + format_vlib_node_name, vm, + n->index); - memset (p, 0, sizeof (p[0])); - p->log2_n_stack_bytes = log2_n_stack_bytes; + if (p->stack == CLIB_MEM_VM_MAP_FAILED) + clib_panic ("failed to allocate process stack (%d bytes)", + 1ULL << log2_n_stack_bytes); /* Process node's runtime index is really index into process pointer vector. */ @@ -416,21 +469,14 @@ register_node (vlib_main_t * vm, vlib_node_registration_t * r) /* Node runtime is stored inside of process. */ rt = &p->node_runtime; - -#ifdef CLIB_UNIX - /* - * Disallow writes to the bottom page of the stack, to - * catch stack overflows. - */ - if (mprotect (p->stack, page_size, PROT_READ) < 0) - clib_unix_warning ("process stack"); -#endif - } else { vec_add2_aligned (nm->nodes_by_type[n->type], rt, 1, /* align */ CLIB_CACHE_LINE_BYTES); + if (n->type == VLIB_NODE_TYPE_INPUT) + clib_interrupt_resize (&nm->interrupts, + vec_len (nm->nodes_by_type[n->type])); n->runtime_index = rt - nm->nodes_by_type[n->type]; } @@ -451,7 +497,7 @@ register_node (vlib_main_t * vm, vlib_node_registration_t * r) vec_resize (rt->errors, r->n_errors); for (i = 0; i < vec_len (rt->errors); i++) - rt->errors[i] = vlib_error_set (n->index, i); + rt->errors[i] = n->error_heap_index + i; STATIC_ASSERT_SIZEOF (vlib_node_runtime_t, 128); ASSERT (vec_len (n->runtime_data) <= VLIB_NODE_RUNTIME_DATA_SIZE); @@ -459,9 +505,12 @@ register_node (vlib_main_t * vm, vlib_node_registration_t * r) if (vec_len (n->runtime_data) > 0) clib_memcpy (rt->runtime_data, n->runtime_data, vec_len (n->runtime_data)); + else + clib_memset (rt->runtime_data, 0, VLIB_NODE_RUNTIME_DATA_SIZE); vec_free (n->runtime_data); } +#undef _ } /* Register new packet processing node. */ @@ -479,12 +528,46 @@ null_node_fn (vlib_main_t * vm, u16 n_vectors = frame->n_vectors; vlib_node_increment_counter (vm, node->node_index, 0, n_vectors); - vlib_buffer_free (vm, vlib_frame_args (frame), n_vectors); + vlib_buffer_free (vm, vlib_frame_vector_args (frame), n_vectors); vlib_frame_free (vm, node, frame); return n_vectors; } +void +vlib_register_all_node_march_variants (vlib_main_t *vm) +{ + vlib_node_main_t *nm = &vm->node_main; + vlib_node_fn_variant_t *v; + int prio = -1; + + nm->node_fn_default_march_variant = ~0; + ASSERT (nm->variants == 0); + vec_add2 (nm->variants, v, 1); + v->desc = v->suffix = "default"; + v->index = CLIB_MARCH_VARIANT_TYPE; + +#define _(s, n) \ + vec_add2 (nm->variants, v, 1); \ + v->suffix = #s; \ + v->index = CLIB_MARCH_VARIANT_TYPE_##s; \ + v->priority = clib_cpu_march_priority_##s (); \ + v->desc = n; + + foreach_march_variant; +#undef _ + + nm->node_fn_march_variant_by_suffix = hash_create_string (0, sizeof (u32)); + + vec_foreach (v, nm->variants) + { + ASSERT (v->index == v - nm->variants); + hash_set (nm->node_fn_march_variant_by_suffix, v->suffix, v->index); + if (v->priority > prio) + prio = v->priority; + } +} + void vlib_register_all_static_nodes (vlib_main_t * vm) { @@ -514,6 +597,68 @@ vlib_register_all_static_nodes (vlib_main_t * vm) } } +void +vlib_node_get_nodes (vlib_main_t * vm, u32 max_threads, int include_stats, + int barrier_sync, vlib_node_t **** node_dupsp, + vlib_main_t *** stat_vmsp) +{ + vlib_node_main_t *nm = &vm->node_main; + vlib_node_t *n; + vlib_node_t ***node_dups = *node_dupsp; + vlib_node_t **nodes; + vlib_main_t **stat_vms = *stat_vmsp; + vlib_main_t *stat_vm; + uword i, j; + u32 threads_to_serialize; + + if (vec_len (stat_vms) == 0) + { + for (i = 0; i < vec_len (vlib_mains); i++) + { + stat_vm = vlib_mains[i]; + if (stat_vm) + vec_add1 (stat_vms, stat_vm); + } + } + + threads_to_serialize = clib_min (max_threads, vec_len (stat_vms)); + + vec_validate (node_dups, threads_to_serialize - 1); + + /* + * Barrier sync across stats scraping. + * Otherwise, the counts will be grossly inaccurate. + */ + if (barrier_sync) + vlib_worker_thread_barrier_sync (vm); + + for (j = 0; j < threads_to_serialize; j++) + { + stat_vm = stat_vms[j]; + nm = &stat_vm->node_main; + + if (include_stats) + { + for (i = 0; i < vec_len (nm->nodes); i++) + { + n = nm->nodes[i]; + vlib_node_sync_stats (stat_vm, n); + } + } + + nodes = node_dups[j]; + vec_validate (nodes, vec_len (nm->nodes) - 1); + clib_memcpy (nodes, nm->nodes, vec_len (nm->nodes) * sizeof (nodes[0])); + node_dups[j] = nodes; + } + + if (barrier_sync) + vlib_worker_thread_barrier_release (vm); + + *node_dupsp = node_dups; + *stat_vmsp = stat_vms; +} + clib_error_t * vlib_node_main_init (vlib_main_t * vm) { @@ -522,7 +667,10 @@ vlib_node_main_init (vlib_main_t * vm) vlib_node_t *n; uword ni; + nm->frame_sizes = vec_new (vlib_frame_size_t, 1); +#ifdef VLIB_SUPPORTS_ARBITRARY_SCALAR_SIZES nm->frame_size_hash = hash_create (0, sizeof (uword)); +#endif nm->flags |= VLIB_NODE_MAIN_RUNTIME_STARTED; /* Generate sibling relationships */ @@ -546,7 +694,7 @@ vlib_node_main_init (vlib_main_t * vm) } /* *INDENT-OFF* */ - clib_bitmap_foreach (si, sib->sibling_bitmap, ({ + clib_bitmap_foreach (si, sib->sibling_bitmap) { vlib_node_t * m = vec_elt (nm->nodes, si); /* Connect all of sibling's siblings to us. */ @@ -554,7 +702,7 @@ vlib_node_main_init (vlib_main_t * vm) /* Connect us to all of sibling's siblings. */ n->sibling_bitmap = clib_bitmap_ori (n->sibling_bitmap, si); - })); + } /* *INDENT-ON* */ /* Connect sibling to us. */ @@ -641,6 +789,66 @@ done: return error; } +u32 +vlib_process_create (vlib_main_t * vm, char *name, + vlib_node_function_t * f, u32 log2_n_stack_bytes) +{ + vlib_node_registration_t r; + vlib_node_t *n; + + memset (&r, 0, sizeof (r)); + + r.name = (char *) format (0, "%s", name, 0); + r.function = f; + r.process_log2_n_stack_bytes = log2_n_stack_bytes; + r.type = VLIB_NODE_TYPE_PROCESS; + + vlib_worker_thread_barrier_sync (vm); + + vlib_register_node (vm, &r); + vec_free (r.name); + + vlib_worker_thread_node_runtime_update (); + vlib_worker_thread_barrier_release (vm); + + n = vlib_get_node (vm, r.index); + vlib_start_process (vm, n->runtime_index); + + return (r.index); +} + +int +vlib_node_set_march_variant (vlib_main_t *vm, u32 node_index, + clib_march_variant_type_t march_variant) +{ + vlib_node_fn_registration_t *fnr; + vlib_node_fn_variant_t *v; + vlib_node_t *n = vlib_get_node (vm, node_index); + + if (n->node_fn_registrations == 0) + return -1; + + fnr = n->node_fn_registrations; + v = vec_elt_at_index (vm->node_main.variants, march_variant); + + while (fnr) + { + if (fnr->march_variant == v->index) + { + n->function = fnr->function; + + for (int i = 0; i < vec_len (vlib_mains); i++) + { + vlib_node_runtime_t *nrt; + nrt = vlib_node_get_runtime (vlib_mains[i], n->index); + nrt->function = fnr->function; + } + return 0; + } + fnr = fnr->next_registration; + } + return -1; +} /* * fd.io coding-style-patch-verification: ON *