typedef struct _vlib_node_fn_registration
{
vlib_node_function_t *function;
- int priority;
+ clib_march_variant_type_t march_variant;
struct _vlib_node_fn_registration *next_registration;
- char *name;
} vlib_node_fn_registration_t;
typedef struct _vlib_node_registration
/* Error strings indexed by error code for this node. */
char **error_strings;
+ vl_counter_t *error_counters;
/* Buffer format/unformat for this node. */
format_function_t *format_buffer;
__VA_ARGS__ vlib_node_registration_t x
#else
#define VLIB_REGISTER_NODE(x,...) \
+STATIC_ASSERT (sizeof(# __VA_ARGS__) != 7,"node " #x " must not be declared as static"); \
static __clib_unused vlib_node_registration_t __clib_unused_##x
#endif
#define CLIB_MARCH_VARIANT_STR _CLIB_MARCH_VARIANT_STR(CLIB_MARCH_VARIANT)
#endif
-#define VLIB_NODE_FN(node) \
-uword CLIB_MARCH_SFX (node##_fn)(); \
-static vlib_node_fn_registration_t \
- CLIB_MARCH_SFX(node##_fn_registration) = \
- { .function = &CLIB_MARCH_SFX (node##_fn), }; \
- \
-static void __clib_constructor \
-CLIB_MARCH_SFX (node##_multiarch_register) (void) \
-{ \
- extern vlib_node_registration_t node; \
- vlib_node_fn_registration_t *r; \
- r = & CLIB_MARCH_SFX (node##_fn_registration); \
- r->priority = CLIB_MARCH_FN_PRIORITY(); \
- r->name = CLIB_MARCH_VARIANT_STR; \
- r->next_registration = node.node_fn_registrations; \
- node.node_fn_registrations = r; \
-} \
-uword CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (node##_fn)
-
-always_inline vlib_node_registration_t *
-vlib_node_next_registered (vlib_node_registration_t * c)
-{
- c =
- clib_elf_section_data_next (c,
- c->n_next_nodes * sizeof (c->next_nodes[0]));
- return c;
-}
+#define VLIB_NODE_FN(node) \
+ uword CLIB_MARCH_SFX (node##_fn) (); \
+ static vlib_node_fn_registration_t CLIB_MARCH_SFX ( \
+ node##_fn_registration) = { \
+ .function = &CLIB_MARCH_SFX (node##_fn), \
+ }; \
+ \
+ static void __clib_constructor CLIB_MARCH_SFX (node##_multiarch_register) ( \
+ void) \
+ { \
+ extern vlib_node_registration_t node; \
+ vlib_node_fn_registration_t *r; \
+ r = &CLIB_MARCH_SFX (node##_fn_registration); \
+ r->march_variant = CLIB_MARCH_SFX (CLIB_MARCH_VARIANT_TYPE); \
+ r->next_registration = node.node_fn_registrations; \
+ node.node_fn_registrations = r; \
+ } \
+ uword CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (node##_fn)
+
+unformat_function_t unformat_vlib_node_variant;
typedef struct
{
u64 calls, vectors, clocks, suspends;
u64 max_clock;
u64 max_clock_n;
- u64 perf_counter0_ticks;
- u64 perf_counter1_ticks;
- u64 perf_counter_vectors;
} vlib_node_stats_t;
#define foreach_vlib_node_state \
#define VLIB_NODE_FLAG_SWITCH_FROM_INTERRUPT_TO_POLLING_MODE (1 << 6)
#define VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE (1 << 7)
+#define VLIB_NODE_FLAG_TRACE_SUPPORTED (1 << 8)
/* State for input nodes. */
u8 state;
u32 error_heap_handle;
u32 error_heap_index;
- /* Error strings indexed by error code for this node. */
- char **error_strings;
+ /* Counter structures indexed by counter code for this node. */
+ vl_counter_t *error_counters;
/* Vector of next node names.
Only used before next_nodes array is initialized. */
vlib_error_t *errors; /**< Vector of errors for this node. */
-#if __SIZEOF_POINTER__ == 4
- u8 pad[8];
-#endif
-
u32 clocks_since_last_overflow; /**< Number of clock cycles. */
u32 max_clock; /**< Maximum clock cycle for an
u32 vectors_since_last_overflow; /**< Number of vector elements
processed by this node. */
- u32 perf_counter0_ticks_since_last_overflow; /**< Perf counter 0 ticks */
- u32 perf_counter1_ticks_since_last_overflow; /**< Perf counter 1 ticks */
- u32 perf_counter_vectors_since_last_overflow; /**< Perf counter vectors */
-
u32 next_frame_index; /**< Start of next frames for this
node. */
typedef struct
{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
/* Node runtime for this process. */
vlib_node_runtime_t node_runtime;
vlib_cli_output_function_t *output_function;
uword output_function_arg;
-#ifdef CLIB_UNIX
- /* Pad to a multiple of the page size so we can mprotect process stacks */
-#define PAGE_SIZE_MULTIPLE 0x1000
-#define ALIGN_ON_MULTIPLE_PAGE_BOUNDARY_FOR_MPROTECT __attribute__ ((aligned (PAGE_SIZE_MULTIPLE)))
-#else
-#define ALIGN_ON_MULTIPLE_PAGE_BOUNDARY_FOR_MPROTECT
-#endif
-
- /* Process stack. Starts here and extends 2^log2_n_stack_bytes
- bytes. */
-
+ /* Process stack */
#define VLIB_PROCESS_STACK_MAGIC (0xdead7ead)
- u32 stack[0] ALIGN_ON_MULTIPLE_PAGE_BOUNDARY_FOR_MPROTECT;
-} vlib_process_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
-
-#ifdef CLIB_UNIX
- /* Ensure that the stack is aligned on the multiple of the page size */
-typedef char
- assert_process_stack_must_be_aligned_exactly_to_page_size_multiple[(sizeof
- (vlib_process_t)
- -
- PAGE_SIZE_MULTIPLE)
- ==
- 0 ? 0 :
- -1];
-#endif
+ u32 *stack;
+} vlib_process_t;
typedef struct
{
return d / 2;
}
+typedef struct
+{
+ clib_march_variant_type_t index;
+ int priority;
+ char *suffix;
+ char *desc;
+} vlib_node_fn_variant_t;
+
typedef struct
{
/* Public nodes. */
vlib_node_runtime_t *nodes_by_type[VLIB_N_NODE_TYPE];
/* Node runtime indices for input nodes with pending interrupts. */
- u32 *pending_interrupt_node_runtime_indices;
- clib_spinlock_t pending_interrupt_lock;
+ void *interrupts;
+ volatile u32 *pending_interrupts;
/* Input nodes are switched from/to interrupt to/from polling mode
when average vector length goes above/below polling/interrupt
/* Node registrations added by constructors */
vlib_node_registration_t *node_registrations;
+
+ /* Node index from error code */
+ u32 *node_by_error;
+
+ /* Node Function Variants */
+ vlib_node_fn_variant_t *variants;
+
+ /* Node Function Default Variant Index */
+ u32 node_fn_default_march_variant;
+
+ /* Node Function march Variant by Suffix Hash */
+ uword *node_fn_march_variant_by_suffix;
} vlib_node_main_t;
+typedef u16 vlib_error_t;
+
+always_inline u32
+vlib_error_get_node (vlib_node_main_t * nm, vlib_error_t e)
+{
+ return nm->node_by_error[e];
+}
+
+always_inline u32
+vlib_error_get_code (vlib_node_main_t * nm, vlib_error_t e)
+{
+ u32 node_index = nm->node_by_error[e];
+ vlib_node_t *n = nm->nodes[node_index];
+ u32 error_code = e - n->error_heap_index;
+ return error_code;
+}
#define FRAME_QUEUE_MAX_NELTS 64
typedef struct