#define vlib_validate_buffer_enqueue_x2(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,next0,next1) \
do { \
+ ASSERT (bi0 != 0); \
+ ASSERT (bi1 != 0); \
int enqueue_code = (next0 != next_index) + 2*(next1 != next_index); \
\
if (PREDICT_FALSE (enqueue_code != 0)) \
#define vlib_validate_buffer_enqueue_x4(vm,node,next_index,to_next,n_left_to_next,bi0,bi1,bi2,bi3,next0,next1,next2,next3) \
do { \
+ ASSERT (bi0 != 0); \
+ ASSERT (bi1 != 0); \
+ ASSERT (bi2 != 0); \
+ ASSERT (bi3 != 0); \
/* After the fact: check the [speculative] enqueue to "next" */ \
u32 fix_speculation = (next_index ^ next0) | (next_index ^ next1) \
| (next_index ^ next2) | (next_index ^ next3); \
*/
#define vlib_validate_buffer_enqueue_x1(vm,node,next_index,to_next,n_left_to_next,bi0,next0) \
do { \
+ ASSERT (bi0 != 0); \
if (PREDICT_FALSE (next0 != next_index)) \
{ \
vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1); \
return frame->n_vectors;
}
+static_always_inline void
+vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
+ u32 * buffers, u16 * nexts, uword count)
+{
+ vlib_buffer_enqueue_to_next_fn_t *fn;
+ fn = vlib_buffer_func_main.buffer_enqueue_to_next_fn;
+ (fn) (vm, node, buffers, nexts, count);
+}
+
+static_always_inline void
+vlib_buffer_enqueue_to_single_next (vlib_main_t * vm,
+ vlib_node_runtime_t * node, u32 * buffers,
+ u16 next_index, u32 count)
+{
+ vlib_buffer_enqueue_to_single_next_fn_t *fn;
+ fn = vlib_buffer_func_main.buffer_enqueue_to_single_next_fn;
+ (fn) (vm, node, buffers, next_index, count);
+}
+
+static_always_inline u32
+vlib_buffer_enqueue_to_thread (vlib_main_t *vm, vlib_node_runtime_t *node,
+ u32 frame_queue_index, u32 *buffer_indices,
+ u16 *thread_indices, u32 n_packets,
+ int drop_on_congestion)
+{
+ vlib_buffer_enqueue_to_thread_fn_t *fn;
+ fn = vlib_buffer_func_main.buffer_enqueue_to_thread_fn;
+ return (fn) (vm, node, frame_queue_index, buffer_indices, thread_indices,
+ n_packets, drop_on_congestion);
+}
+
#endif /* included_vlib_buffer_node_h */
/*