}
#define VLIB_INIT_FUNCTION(x) VLIB_DECLARE_INIT_FUNCTION(x,init)
+#define VLIB_WORKER_INIT_FUNCTION(x) VLIB_DECLARE_INIT_FUNCTION(x,worker_init)
#define VLIB_MAIN_LOOP_ENTER_FUNCTION(x) \
VLIB_DECLARE_INIT_FUNCTION(x,main_loop_enter)
/* List of init functions to call, setup by constructors */
_vlib_init_function_list_elt_t *init_function_registrations;
+ _vlib_init_function_list_elt_t *worker_init_function_registrations;
_vlib_init_function_list_elt_t *main_loop_enter_function_registrations;
_vlib_init_function_list_elt_t *main_loop_exit_function_registrations;
_vlib_init_function_list_elt_t *api_init_function_registrations;
rt->errors[i] = vlib_error_set (n->index, i);
STATIC_ASSERT_SIZEOF (vlib_node_runtime_t, 128);
- ASSERT (vec_len (n->runtime_data) <=
- sizeof (vlib_node_runtime_t) -
- STRUCT_OFFSET_OF (vlib_node_runtime_t, runtime_data));
+ ASSERT (vec_len (n->runtime_data) <= VLIB_NODE_RUNTIME_DATA_SIZE);
if (vec_len (n->runtime_data) > 0)
clib_memcpy (rt->runtime_data, n->runtime_data,
typedef struct vlib_node_runtime_t
{
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
- /* Node function to call. */
- vlib_node_function_t *function;
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); /**< cacheline mark */
- /* Vector of errors for this node. */
- vlib_error_t *errors;
+ vlib_node_function_t *function; /**< Node function to call. */
- /* Number of clock cycles. */
- u32 clocks_since_last_overflow;
+ vlib_error_t *errors; /**< Vector of errors for this node. */
- /* Maximum clock cycle for an invocation. */
- u32 max_clock;
+ u32 clocks_since_last_overflow; /**< Number of clock cycles. */
- /* Number of vectors in the recorded max_clock. */
- u32 max_clock_n;
+ u32 max_clock; /**< Maximum clock cycle for an
+ invocation. */
- /* Number of calls. */
- u32 calls_since_last_overflow;
+ u32 max_clock_n; /**< Number of vectors in the recorded
+ max_clock. */
- /* Number of vector elements processed by this node. */
- u32 vectors_since_last_overflow;
+ u32 calls_since_last_overflow; /**< Number of calls. */
- /* Start of next frames for this node. */
- u32 next_frame_index;
+ u32 vectors_since_last_overflow; /**< Number of vector elements
+ processed by this node. */
- /* Node index. */
- u32 node_index;
+ u32 next_frame_index; /**< Start of next frames for this
+ node. */
- /* For input nodes: decremented on each main loop interation until it reaches zero
- and function is called. Allows some input nodes to be called
- more than others. */
- u32 input_main_loops_per_call;
+ u32 node_index; /**< Node index. */
- /* Saved main loop counter of last dispatch of this node. */
- u32 main_loop_count_last_dispatch;
+ u32 input_main_loops_per_call; /**< For input nodes: decremented
+ on each main loop interation until
+ it reaches zero and function is
+ called. Allows some input nodes to
+ be called more than others. */
+
+ u32 main_loop_count_last_dispatch; /**< Saved main loop counter of last
+ dispatch of this node. */
u32 main_loop_vector_stats[2];
- /* Copy of main node flags. */
- u16 flags;
+ u16 flags; /**< Copy of main node flags. */
- /* Input node state. */
- u16 state;
+ u16 state; /**< Input node state. */
u16 n_next_nodes;
- /* Next frame index that vector arguments were last enqueued to
- last time this node ran. Set to zero before first run
- of this node. */
- u16 cached_next_index;
-
- /* CPU this node runs on */
- u16 cpu_index;
-
- /* Function dependent node-runtime. */
- u8 runtime_data[0];
+ u16 cached_next_index; /**< Next frame index that vector
+ arguments were last enqueued to
+ last time this node ran. Set to
+ zero before first run of this
+ node. */
+
+ u16 cpu_index; /**< CPU this node runs on */
+
+ u8 runtime_data[0]; /**< Function dependent
+ node-runtime data. This data is
+ thread local, and it is not
+ cloned from main thread. It needs
+ to be initialized for each thread
+ before it is used unless
+ runtime_data template exists in
+ vlib_node_t. */
}
vlib_node_runtime_t;
+#define VLIB_NODE_RUNTIME_DATA_SIZE (sizeof (vlib_node_runtime_t) - STRUCT_OFFSET_OF (vlib_node_runtime_t, runtime_data))
+
typedef struct
{
/* Number of allocated frames for this scalar/vector size. */
vm_clone->cpu_index = worker_thread_index;
vm_clone->heap_base = w->thread_mheap;
vm_clone->mbuf_alloc_list = 0;
+ vm_clone->init_functions_called =
+ hash_create (0, /* value bytes */ 0);
memset (&vm_clone->random_buffer, 0,
sizeof (vm_clone->random_buffer));
}
nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
vec_dup (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL]);
+ vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
+ {
+ vlib_node_t *n = vlib_get_node (vm, rt->node_index);
+ rt->cpu_index = vm_clone->cpu_index;
+ /* copy initial runtime_data from node */
+ if (n->runtime_data_bytes > 0)
+ clib_memcpy (rt->runtime_data, n->runtime_data,
+ VLIB_NODE_RUNTIME_DATA_SIZE);
+ else if (CLIB_DEBUG > 0)
+ memset (rt->runtime_data, 0xfe,
+ VLIB_NODE_RUNTIME_DATA_SIZE);
+ }
nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
vec_dup (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT]);
vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
+ {
+ vlib_node_t *n = vlib_get_node (vm, rt->node_index);
rt->cpu_index = vm_clone->cpu_index;
+ /* copy initial runtime_data from node */
+ if (n->runtime_data_bytes > 0)
+ clib_memcpy (rt->runtime_data, n->runtime_data,
+ VLIB_NODE_RUNTIME_DATA_SIZE);
+ else if (CLIB_DEBUG > 0)
+ memset (rt->runtime_data, 0xfe,
+ VLIB_NODE_RUNTIME_DATA_SIZE);
+ }
nm_clone->processes = vec_dup (nm->processes);
clib_mem_free (old_nodes_clone[j]);
vec_free (old_nodes_clone);
- vec_free (nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL]);
+ /* re-clone internal nodes */
+ old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL];
nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL] =
vec_dup (nm->nodes_by_type[VLIB_NODE_TYPE_INTERNAL]);
- /* clone input node runtime */
- old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT];
+ vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INTERNAL])
+ {
+ vlib_node_t *n = vlib_get_node (vm, rt->node_index);
+ rt->cpu_index = vm_clone->cpu_index;
+ /* copy runtime_data, will be overwritten later for existing rt */
+ clib_memcpy (rt->runtime_data, n->runtime_data,
+ VLIB_NODE_RUNTIME_DATA_SIZE);
+ }
+
+ for (j = 0; j < vec_len (old_rt); j++)
+ {
+ rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
+ rt->state = old_rt[j].state;
+ clib_memcpy (rt->runtime_data, old_rt[j].runtime_data,
+ VLIB_NODE_RUNTIME_DATA_SIZE);
+ }
+ vec_free (old_rt);
+
+ /* re-clone input nodes */
+ old_rt = nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT];
nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT] =
vec_dup (nm->nodes_by_type[VLIB_NODE_TYPE_INPUT]);
vec_foreach (rt, nm_clone->nodes_by_type[VLIB_NODE_TYPE_INPUT])
{
+ vlib_node_t *n = vlib_get_node (vm, rt->node_index);
rt->cpu_index = vm_clone->cpu_index;
+ /* copy runtime_data, will be overwritten later for existing rt */
+ clib_memcpy (rt->runtime_data, n->runtime_data,
+ VLIB_NODE_RUNTIME_DATA_SIZE);
}
for (j = 0; j < vec_len (old_rt); j++)
{
rt = vlib_node_get_runtime (vm_clone, old_rt[j].node_index);
rt->state = old_rt[j].state;
+ clib_memcpy (rt->runtime_data, old_rt[j].runtime_data,
+ VLIB_NODE_RUNTIME_DATA_SIZE);
}
vec_free (old_rt);
vlib_worker_thread_t *w = (vlib_worker_thread_t *) arg;
vlib_thread_main_t *tm = vlib_get_thread_main ();
vlib_main_t *vm = vlib_get_main ();
+ clib_error_t *e;
ASSERT (vm->cpu_index == os_get_cpu_number ());
clib_time_init (&vm->clib_time);
clib_mem_set_heap (w->thread_mheap);
+ e = vlib_call_init_exit_functions
+ (vm, vm->worker_init_function_registrations, 1 /* call_once */ );
+ if (e)
+ clib_error_report (e);
+
/* Wait until the dpdk init sequence is complete */
while (tm->extern_thread_mgmt && tm->worker_thread_release == 0)
vlib_worker_thread_barrier_check ();
{
gre_main_t * em = &gre_main;
gre_protocol_info_t * pi;
- gre_input_runtime_t * rt;
u16 * n;
{
node_index);
/* Setup gre protocol -> next index sparse vector mapping. */
- rt = vlib_node_get_runtime_data (vm, gre_input_node.index);
- n = sparse_vec_validate (rt->next_by_protocol,
- clib_host_to_net_u16 (protocol));
- n[0] = pi->next_index;
+ foreach_vlib_main ({
+ gre_input_runtime_t * rt;
+ rt = vlib_node_get_runtime_data (this_vlib_main, gre_input_node.index);
+ n = sparse_vec_validate (rt->next_by_protocol,
+ clib_host_to_net_u16 (protocol));
+ n[0] = pi->next_index;
+ });
}
static void
}
VLIB_INIT_FUNCTION (gre_input_init);
+
+static clib_error_t * gre_input_worker_init (vlib_main_t * vm)
+{
+ gre_input_runtime_t * rt;
+
+ rt = vlib_node_get_runtime_data (vm, gre_input_node.index);
+
+ rt->next_by_protocol = sparse_vec_new
+ (/* elt bytes */ sizeof (rt->next_by_protocol[0]),
+ /* bits in index */ BITS (((gre_header_t *) 0)->protocol));
+ return 0;
+}
+
+VLIB_WORKER_INIT_FUNCTION (gre_input_worker_init);
.unformat_buffer = unformat_hdlc_header,
};
-static clib_error_t * hdlc_input_init (vlib_main_t * vm)
+static clib_error_t * hdlc_input_runtime_init (vlib_main_t * vm)
{
hdlc_input_runtime_t * rt;
-
- {
- clib_error_t * error = vlib_call_init_function (vm, hdlc_init);
- if (error)
- clib_error_report (error);
- }
-
- hdlc_setup_node (vm, hdlc_input_node.index);
-
rt = vlib_node_get_runtime_data (vm, hdlc_input_node.index);
rt->next_by_protocol = sparse_vec_new
return 0;
}
+static clib_error_t * hdlc_input_init (vlib_main_t * vm)
+{
+
+ {
+ clib_error_t * error = vlib_call_init_function (vm, hdlc_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ hdlc_setup_node (vm, hdlc_input_node.index);
+ hdlc_input_runtime_init (vm);
+
+ return 0;
+}
+
VLIB_INIT_FUNCTION (hdlc_input_init);
+VLIB_WORKER_INIT_FUNCTION (hdlc_input_runtime_init);
void
hdlc_register_input_protocol (vlib_main_t * vm,
VLIB_INIT_FUNCTION (l2_input_classify_init);
+clib_error_t *
+l2_input_classify_worker_init (vlib_main_t * vm)
+{
+ l2_input_classify_main_t *cm = &l2_input_classify_main;
+ l2_input_classify_runtime_t *rt;
+
+ rt = vlib_node_get_runtime_data (vm, l2_input_classify_node.index);
+
+ rt->l2cm = cm;
+ rt->vcm = cm->vnet_classify_main;
+
+ return 0;
+}
+
+VLIB_WORKER_INIT_FUNCTION (l2_input_classify_worker_init);
/** Enable/disable l2 input classification on a specific interface. */
void
VLIB_INIT_FUNCTION (l2_output_classify_init);
+clib_error_t *
+l2_output_classify_worker_init (vlib_main_t * vm)
+{
+ l2_output_classify_main_t *cm = &l2_output_classify_main;
+ l2_output_classify_runtime_t *rt;
+
+ rt = vlib_node_get_runtime_data (vm, l2_output_classify_node.index);
+
+ rt->l2cm = cm;
+ rt->vcm = cm->vnet_classify_main;
+
+ return 0;
+}
+
+VLIB_WORKER_INIT_FUNCTION (l2_output_classify_worker_init);
+
/** Enable/disable l2 input classification on a specific interface. */
void
vnet_l2_output_classify_enable_disable (u32 sw_if_index, int enable_disable)
VLIB_INIT_FUNCTION (l2tp_init);
+clib_error_t *
+l2tp_worker_init (vlib_main_t * vm)
+{
+ l2tp_encap_init (vm);
+
+ return 0;
+}
+
+VLIB_WORKER_INIT_FUNCTION (l2tp_worker_init);
+
/*
* fd.io coding-style-patch-verification: ON
*
}
VLIB_INIT_FUNCTION (mpls_input_init);
+
+static clib_error_t * mpls_input_worker_init (vlib_main_t * vm)
+{
+ mpls_input_runtime_t * rt;
+ rt = vlib_node_get_runtime_data (vm, mpls_input_node.index);
+ rt->last_label = (u32) ~0;
+ rt->last_inner_fib_index = 0;
+ rt->last_outer_fib_index = 0;
+ rt->mpls_main = &mpls_main;
+ return 0;
+}
+
+VLIB_WORKER_INIT_FUNCTION (mpls_input_worker_init);
/* *INDENT-ON* */
static clib_error_t *
-ppp_input_init (vlib_main_t * vm)
+ppp_input_runtime_init (vlib_main_t * vm)
{
ppp_input_runtime_t *rt;
- {
- clib_error_t *error = vlib_call_init_function (vm, ppp_init);
- if (error)
- clib_error_report (error);
- }
-
- ppp_setup_node (vm, ppp_input_node.index);
-
rt = vlib_node_get_runtime_data (vm, ppp_input_node.index);
rt->next_by_protocol = sparse_vec_new
return 0;
}
+static clib_error_t *
+ppp_input_init (vlib_main_t * vm)
+{
+
+ {
+ clib_error_t *error = vlib_call_init_function (vm, ppp_init);
+ if (error)
+ clib_error_report (error);
+ }
+
+ ppp_setup_node (vm, ppp_input_node.index);
+ ppp_input_runtime_init (vm);
+
+ return 0;
+}
+
VLIB_INIT_FUNCTION (ppp_input_init);
+VLIB_WORKER_INIT_FUNCTION (ppp_input_runtime_init);
void
ppp_register_input_protocol (vlib_main_t * vm,
if (enable_disable)
{
- vlib_main_t *vm = vlib_get_main ();
syn_filter4_runtime_t *rt;
- rt = vlib_node_get_runtime_data (vm, syn_filter4_node.index);
- vec_validate (rt->syn_counts, 1023);
- /*
- * Given perfect disperson / optimal hashing results:
- * Allow 128k (successful) syns/sec. 1024, buckets each of which
- * absorb 128 syns before filtering. Reset table once a second.
- * Reality bites, lets try resetting once every 100ms.
- */
- rt->reset_interval = 0.1; /* reset interval in seconds */
+ /* *INDENT-OFF* */
+ foreach_vlib_main ({
+ rt = vlib_node_get_runtime_data (this_vlib_main, syn_filter4_node.index);
+ vec_validate (rt->syn_counts, 1023);
+ /*
+ * Given perfect disperson / optimal hashing results:
+ * Allow 128k (successful) syns/sec. 1024, buckets each of which
+ * absorb 128 syns before filtering. Reset table once a second.
+ * Reality bites, lets try resetting once every 100ms.
+ */
+ rt->reset_interval = 0.1; /* reset interval in seconds */
+ });
+ /* *INDENT-ON* */
}
rv = vnet_feature_enable_disable ("ip4-local", "syn-filter-4",
: udp6_input_node.index, node_index);
/* Setup udp protocol -> next index sparse vector mapping. */
- rt = vlib_node_get_runtime_data
- (vm, is_ip4 ? udp4_input_node.index : udp6_input_node.index);
- n = sparse_vec_validate (rt->next_by_dst_port,
- clib_host_to_net_u16 (dst_port));
- n[0] = pi->next_index;
+ /* *INDENT-OFF* */
+ foreach_vlib_main({
+ rt = vlib_node_get_runtime_data
+ (this_vlib_main, is_ip4 ? udp4_input_node.index : udp6_input_node.index);
+ n = sparse_vec_validate (rt->next_by_dst_port,
+ clib_host_to_net_u16 (dst_port));
+ n[0] = pi->next_index;
+ });
+ /* *INDENT-ON* */
}
void
return;
/* Kill the mapping. Don't bother killing the pi, it may be back. */
- rt = vlib_node_get_runtime_data
- (vm, is_ip4 ? udp4_input_node.index : udp6_input_node.index);
- n = sparse_vec_validate (rt->next_by_dst_port,
- clib_host_to_net_u16 (dst_port));
- n[0] = SPARSE_VEC_INVALID_INDEX;
+ /* *INDENT-OFF* */
+ foreach_vlib_main({
+ rt = vlib_node_get_runtime_data
+ (this_vlib_main, is_ip4 ? udp4_input_node.index : udp6_input_node.index);
+ n = sparse_vec_validate (rt->next_by_dst_port,
+ clib_host_to_net_u16 (dst_port));
+ n[0] = SPARSE_VEC_INVALID_INDEX;
+ });
+ /* *INDENT-ON* */
}
void
pn->unformat_edit = unformat_pg_udp_header;
}
+static void
+udp_local_node_runtime_init (vlib_main_t * vm)
+{
+ udp_input_runtime_t *rt;
+
+ rt = vlib_node_get_runtime_data (vm, udp4_input_node.index);
+ rt->next_by_dst_port = sparse_vec_new
+ ( /* elt bytes */ sizeof (rt->next_by_dst_port[0]),
+ /* bits in index */ BITS (((udp_header_t *) 0)->dst_port));
+ rt->punt_unknown = 0;
+
+ rt = vlib_node_get_runtime_data (vm, udp6_input_node.index);
+ rt->next_by_dst_port = sparse_vec_new
+ ( /* elt bytes */ sizeof (rt->next_by_dst_port[0]),
+ /* bits in index */ BITS (((udp_header_t *) 0)->dst_port));
+ rt->punt_unknown = 0;
+}
+
clib_error_t *
udp_local_init (vlib_main_t * vm)
{
- udp_input_runtime_t *rt;
udp_main_t *um = &udp_main;
int i;
udp_setup_node (vm, udp4_input_node.index);
udp_setup_node (vm, udp6_input_node.index);
- rt = vlib_node_get_runtime_data (vm, udp4_input_node.index);
-
- rt->next_by_dst_port = sparse_vec_new
- ( /* elt bytes */ sizeof (rt->next_by_dst_port[0]),
- /* bits in index */ BITS (((udp_header_t *) 0)->dst_port));
-
- rt->punt_unknown = 0;
+ udp_local_node_runtime_init (vm);
#define _(n,s) add_dst_port (um, UDP_DST_PORT_##s, #s, 1 /* is_ip4 */);
foreach_udp4_dst_port
#undef _
- rt = vlib_node_get_runtime_data (vm, udp6_input_node.index);
-
- rt->next_by_dst_port = sparse_vec_new
- ( /* elt bytes */ sizeof (rt->next_by_dst_port[0]),
- /* bits in index */ BITS (((udp_header_t *) 0)->dst_port));
-
- rt->punt_unknown = 0;
-
#define _(n,s) add_dst_port (um, UDP_DST_PORT_##s, #s, 0 /* is_ip4 */);
- foreach_udp6_dst_port
+ foreach_udp6_dst_port
#undef _
ip4_register_protocol (IP_PROTOCOL_UDP, udp4_input_node.index);
/* Note: ip6 differs from ip4, UDP is hotwired to ip6-udp-lookup */
VLIB_INIT_FUNCTION (udp_local_init);
+clib_error_t *
+udp_local_worker_init (vlib_main_t * vm)
+{
+ udp_local_node_runtime_init (vm);
+ return 0;
+}
+
+VLIB_WORKER_INIT_FUNCTION (udp_local_worker_init);
+
/*
* fd.io coding-style-patch-verification: ON
*