#ifndef included_vlib_node_funcs_h
#define included_vlib_node_funcs_h
+#include <vppinfra/clib.h>
#include <vppinfra/fifo.h>
#include <vppinfra/tw_timer_1t_3w_1024sl_ov.h>
#include <vppinfra/interrupt.h>
{
#ifdef CLIB_SANITIZE_ADDR
void *stack = p ? (void *) p->stack : vlib_thread_stacks[vm->thread_index];
- u32 stack_bytes = p ? p->log2_n_stack_bytes : VLIB_THREAD_STACK_SIZE;
+ u32 stack_bytes =
+ p ? (1ULL < p->log2_n_stack_bytes) : VLIB_THREAD_STACK_SIZE;
__sanitizer_start_switch_fiber (&vm->asan_stack_save, stack, stack_bytes);
#endif
}
{
vlib_node_main_t *nm = &vm->node_main;
vlib_node_t *n = vec_elt (nm->nodes, node_index);
+ void *interrupts;
- ASSERT (n->type == VLIB_NODE_TYPE_INPUT);
+ if (n->type == VLIB_NODE_TYPE_INPUT)
+ interrupts = nm->input_node_interrupts;
+ else if (n->type == VLIB_NODE_TYPE_PRE_INPUT)
+ interrupts = nm->pre_input_node_interrupts;
+ else
+ ASSERT (0);
if (vm != vlib_get_main ())
- clib_interrupt_set_atomic (nm->interrupts, n->runtime_index);
+ clib_interrupt_set_atomic (interrupts, n->runtime_index);
else
- clib_interrupt_set (nm->interrupts, n->runtime_index);
+ clib_interrupt_set (interrupts, n->runtime_index);
- __atomic_store_n (nm->pending_interrupts, 1, __ATOMIC_RELEASE);
+ __atomic_store_n (&nm->pending_interrupts, 1, __ATOMIC_RELEASE);
}
always_inline vlib_process_t *
u32 next_index,
u32 alloc_new_frame);
-#define vlib_get_next_frame_macro(vm,node,next_index,vectors,n_vectors_left,alloc_new_frame) \
-do { \
- vlib_frame_t * _f \
- = vlib_get_next_frame_internal ((vm), (node), (next_index), \
- (alloc_new_frame)); \
- u32 _n = _f->n_vectors; \
- (vectors) = vlib_frame_vector_args (_f) + _n * sizeof ((vectors)[0]); \
- (n_vectors_left) = VLIB_FRAME_SIZE - _n; \
-} while (0)
-
+#define vlib_get_next_frame_macro(vm, node, next_index, vectors, \
+ n_vectors_left, alloc_new_frame) \
+ do \
+ { \
+ vlib_frame_t *_f = vlib_get_next_frame_internal ( \
+ (vm), (node), (next_index), (alloc_new_frame)); \
+ u32 _n = _f->n_vectors; \
+ (vectors) = vlib_frame_vector_args (_f) + _n * sizeof ((vectors)[0]); \
+ (n_vectors_left) = VLIB_FRAME_SIZE - _n; \
+ } \
+ while (0)
+
+#define vlib_get_next_frame_macro_with_aux(vm, node, next_index, vectors, \
+ n_vectors_left, alloc_new_frame, \
+ aux_data, maybe_no_aux) \
+ do \
+ { \
+ vlib_frame_t *_f = vlib_get_next_frame_internal ( \
+ (vm), (node), (next_index), (alloc_new_frame)); \
+ u32 _n = _f->n_vectors; \
+ (vectors) = vlib_frame_vector_args (_f) + _n * sizeof ((vectors)[0]); \
+ if ((maybe_no_aux) && (_f)->aux_offset == 0) \
+ (aux_data) = NULL; \
+ else \
+ (aux_data) = vlib_frame_aux_args (_f) + _n * sizeof ((aux_data)[0]); \
+ (n_vectors_left) = VLIB_FRAME_SIZE - _n; \
+ } \
+ while (0)
/** \brief Get pointer to next frame vector data by
(@c vlib_node_runtime_t, @c next_index).
@return @c vectors -- pointer to next available vector slot
@return @c n_vectors_left -- number of vector slots available
*/
-#define vlib_get_next_frame(vm,node,next_index,vectors,n_vectors_left) \
- vlib_get_next_frame_macro (vm, node, next_index, \
- vectors, n_vectors_left, \
+#define vlib_get_next_frame(vm, node, next_index, vectors, n_vectors_left) \
+ vlib_get_next_frame_macro (vm, node, next_index, vectors, n_vectors_left, \
/* alloc new frame */ 0)
-#define vlib_get_new_next_frame(vm,node,next_index,vectors,n_vectors_left) \
- vlib_get_next_frame_macro (vm, node, next_index, \
- vectors, n_vectors_left, \
+#define vlib_get_new_next_frame(vm, node, next_index, vectors, \
+ n_vectors_left) \
+ vlib_get_next_frame_macro (vm, node, next_index, vectors, n_vectors_left, \
/* alloc new frame */ 1)
+/** \brief Get pointer to next frame vector data and next frame aux data by
+ (@c vlib_node_runtime_t, @c next_index).
+ Standard single/dual loop boilerplate element.
+ @attention This is a MACRO, with SIDE EFFECTS.
+ @attention This MACRO is unsafe in case the next node does not support
+ aux_data
+
+ @param vm vlib_main_t pointer, varies by thread
+ @param node current node vlib_node_runtime_t pointer
+ @param next_index requested graph arc index
+
+ @return @c vectors -- pointer to next available vector slot
+ @return @c aux_data -- pointer to next available aux data slot
+ @return @c n_vectors_left -- number of vector slots available
+*/
+#define vlib_get_next_frame_with_aux(vm, node, next_index, vectors, aux_data, \
+ n_vectors_left) \
+ vlib_get_next_frame_macro_with_aux ( \
+ vm, node, next_index, vectors, n_vectors_left, /* alloc new frame */ 0, \
+ aux_data, /* maybe_no_aux */ 0)
+
+#define vlib_get_new_next_frame_with_aux(vm, node, next_index, vectors, \
+ aux_data, n_vectors_left) \
+ vlib_get_next_frame_macro_with_aux ( \
+ vm, node, next_index, vectors, n_vectors_left, /* alloc new frame */ 1, \
+ aux_data, /* maybe_no_aux */ 0)
+
+/** \brief Get pointer to next frame vector data and next frame aux data by
+ (@c vlib_node_runtime_t, @c next_index).
+ Standard single/dual loop boilerplate element.
+ @attention This is a MACRO, with SIDE EFFECTS.
+ @attention This MACRO is safe in case the next node does not support aux_data.
+ In that case aux_data is set to NULL.
+
+ @param vm vlib_main_t pointer, varies by thread
+ @param node current node vlib_node_runtime_t pointer
+ @param next_index requested graph arc index
+
+ @return @c vectors -- pointer to next available vector slot
+ @return @c aux_data -- pointer to next available aux data slot
+ @return @c n_vectors_left -- number of vector slots available
+*/
+#define vlib_get_next_frame_with_aux_safe(vm, node, next_index, vectors, \
+ aux_data, n_vectors_left) \
+ vlib_get_next_frame_macro_with_aux ( \
+ vm, node, next_index, vectors, n_vectors_left, /* alloc new frame */ 0, \
+ aux_data, /* maybe_no_aux */ 1)
+
+#define vlib_get_new_next_frame_with_aux_safe(vm, node, next_index, vectors, \
+ aux_data, n_vectors_left) \
+ vlib_get_next_frame_macro_with_aux ( \
+ vm, node, next_index, vectors, n_vectors_left, /* alloc new frame */ 1, \
+ aux_data, /* maybe_no_aux */ 1)
+
/** \brief Release pointer to next frame vector data.
Standard single/dual loop boilerplate element.
@param vm vlib_main_t pointer, varies by thread
(v); \
})
+#define vlib_set_next_frame_with_aux_safe(vm, node, next_index, v, aux) \
+ ({ \
+ uword _n_left; \
+ vlib_get_next_frame_with_aux_safe ((vm), (node), (next_index), (v), \
+ (aux), _n_left); \
+ ASSERT (_n_left > 0); \
+ vlib_put_next_frame ((vm), (node), (next_index), _n_left - 1); \
+ (v); \
+ })
+
always_inline void
vlib_set_next_frame_buffer (vlib_main_t * vm,
vlib_node_runtime_t * node,
p[0] = buffer_index;
}
+always_inline void
+vlib_set_next_frame_buffer_with_aux_safe (vlib_main_t *vm,
+ vlib_node_runtime_t *node,
+ u32 next_index, u32 buffer_index,
+ u32 aux)
+{
+ u32 *p;
+ u32 *a;
+ p = vlib_set_next_frame_with_aux_safe (vm, node, next_index, p, a);
+ p[0] = buffer_index;
+ if (a)
+ a[0] = aux;
+}
+
vlib_frame_t *vlib_get_frame_to_node (vlib_main_t * vm, u32 to_node_index);
void vlib_put_frame_to_node (vlib_main_t * vm, u32 to_node_index,
vlib_frame_t * f);
l = _vec_len (p->pending_event_data_by_type_index[t]);
if (data_vector)
vec_add (*data_vector, p->pending_event_data_by_type_index[t], l);
- _vec_len (p->pending_event_data_by_type_index[t]) = 0;
+ vec_set_len (p->pending_event_data_by_type_index[t], 0);
et = pool_elt_at_index (p->event_type_pool, t);
l = _vec_len (p->pending_event_data_by_type_index[t]);
if (data_vector)
vec_add (*data_vector, p->pending_event_data_by_type_index[t], l);
- _vec_len (p->pending_event_data_by_type_index[t]) = 0;
+ vec_set_len (p->pending_event_data_by_type_index[t], 0);
vlib_process_maybe_free_event_type (p, t);
uword n_data_elts, uword n_data_elt_bytes)
{
uword p_flags, add_to_pending, delete_from_wheel;
- void *data_to_be_written_by_caller;
+ u8 *data_to_be_written_by_caller;
+ vec_attr_t va = { .elt_sz = n_data_elt_bytes };
ASSERT (n->type == VLIB_NODE_TYPE_PROCESS);
/* Resize data vector and return caller's data to be written. */
{
- void *data_vec = p->pending_event_data_by_type_index[t];
+ u8 *data_vec = p->pending_event_data_by_type_index[t];
uword l;
if (!data_vec && vec_len (nm->recycled_event_data_vectors))
l = vec_len (data_vec);
- data_vec = _vec_resize (data_vec,
- /* length_increment */ n_data_elts,
- /* total size after increment */
- (l + n_data_elts) * n_data_elt_bytes,
- /* header_bytes */ 0, /* data_align */ 0);
+ data_vec = _vec_realloc_internal (data_vec, l + n_data_elts, &va);
p->pending_event_data_by_type_index[t] = data_vec;
data_to_be_written_by_caller = data_vec + l * n_data_elt_bytes;
p->flags = p_flags | VLIB_PROCESS_RESUME_PENDING;
vec_add1 (nm->data_from_advancing_timing_wheel, x);
if (delete_from_wheel)
- TW (tw_timer_stop) ((TWT (tw_timer_wheel) *) nm->timing_wheel,
- p->stop_timer_handle);
+ {
+ TW (tw_timer_stop)
+ ((TWT (tw_timer_wheel) *) nm->timing_wheel, p->stop_timer_handle);
+ p->stop_timer_handle = ~0;
+ }
}
return data_to_be_written_by_caller;
return v >> VLIB_LOG2_MAIN_LOOPS_PER_STATS_UPDATE;
}
-void
-vlib_frame_free (vlib_main_t * vm, vlib_node_runtime_t * r, vlib_frame_t * f);
+void vlib_frame_free (vlib_main_t *vm, vlib_frame_t *f);
/* Return the edge index if present, ~0 otherwise */
uword vlib_node_get_next (vlib_main_t * vm, uword node, uword next_node);
/* Register new packet processing node. Nodes can be registered
dynamically via this call or statically via the VLIB_REGISTER_NODE
macro. */
-u32 vlib_register_node (vlib_main_t * vm, vlib_node_registration_t * r);
+u32 vlib_register_node (vlib_main_t *vm, vlib_node_registration_t *r,
+ char *fmt, ...);
/* Register all node function variants */
void vlib_register_all_node_march_variants (vlib_main_t *vm);
bmp++[0] = 0;
}
+static_always_inline void
+vlib_frame_bitmap_set_bit_at_index (uword *bmp, uword bit_index)
+{
+ uword_bitmap_set_bits_at_index (bmp, bit_index, 1);
+}
+
+static_always_inline void
+_vlib_frame_bitmap_clear_bit_at_index (uword *bmp, uword bit_index)
+{
+ uword_bitmap_clear_bits_at_index (bmp, bit_index, 1);
+}
+
+static_always_inline void
+vlib_frame_bitmap_set_bits_at_index (uword *bmp, uword bit_index, uword n_bits)
+{
+ uword_bitmap_set_bits_at_index (bmp, bit_index, n_bits);
+}
+
+static_always_inline void
+vlib_frame_bitmap_clear_bits_at_index (uword *bmp, uword bit_index,
+ uword n_bits)
+{
+ uword_bitmap_clear_bits_at_index (bmp, bit_index, n_bits);
+}
+
static_always_inline void
vlib_frame_bitmap_clear (uword *bmp)
{
bmp++[0] &= bmp2++[0];
}
-static_always_inline u32
+static_always_inline uword
vlib_frame_bitmap_count_set_bits (uword *bmp)
{
- u32 n_left = VLIB_FRAME_BITMAP_N_UWORDS;
- u32 count = 0;
- while (n_left--)
- count += count_set_bits (bmp++[0]);
- return count;
+ return uword_bitmap_count_set_bits (bmp, VLIB_FRAME_BITMAP_N_UWORDS);
}
-static_always_inline int
-vlib_frame_bitmap_find_first_set (uword *bmp)
+static_always_inline uword
+vlib_frame_bitmap_is_bit_set (uword *bmp, uword bit_index)
{
- uword *b = bmp;
- while (b[0] == 0)
- {
- ASSERT (b - bmp < VLIB_FRAME_BITMAP_N_UWORDS);
- b++;
- }
+ return uword_bitmap_is_bit_set (bmp, bit_index);
+}
- return (b - bmp) * uword_bits + get_lowest_set_bit_index (b[0]);
+static_always_inline uword
+vlib_frame_bitmap_find_first_set (uword *bmp)
+{
+ uword rv = uword_bitmap_find_first_set (bmp);
+ ASSERT (rv < VLIB_FRAME_BITMAP_N_UWORDS * uword_bits);
+ return rv;
}
#define foreach_vlib_frame_bitmap_set_bit_index(i, v) \