X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvlib%2Fbuffer_node.h;h=c0268b215627800133dfbe84cd3536dc1d511a6c;hb=8ae63db02066a2b5ac18a89fd63dc0dd2a811ab3;hp=10ebd253c1b6b080e23db8c4b24c07f8969d4738;hpb=10bb21fb13aa74dcb0c2c0841d41a698bb274fbe;p=vpp.git diff --git a/src/vlib/buffer_node.h b/src/vlib/buffer_node.h index 10ebd253c1b..c0268b21562 100644 --- a/src/vlib/buffer_node.h +++ b/src/vlib/buffer_node.h @@ -236,6 +236,53 @@ do { \ } \ } while (0) +/** \brief Finish enqueueing one buffer forward in the graph, along with its + aux_data if possible. Standard single loop boilerplate element. This is a + MACRO, with MULTIPLE SIDE EFFECTS. In the ideal case, next_index == + next0, which means that the speculative enqueue at the top of the + single loop has correctly dealt with the packet in hand. In that case, the + macro does nothing at all. This function MAY return to_next_aux = NULL if + next_index does not support aux data + + @param vm vlib_main_t pointer, varies by thread + @param node current node vlib_node_runtime_t pointer + @param next_index speculated next index used for both packets + @param to_next speculated vector pointer used for both packets + @param to_next_aux speculated aux_data pointer used for both packets + @param n_left_to_next number of slots left in speculated vector + @param bi0 first buffer index + @param aux0 first aux_data + @param next0 actual next index to be used for the first packet + + @return @c next_index -- speculative next index to be used for future packets + @return @c to_next -- speculative frame to be used for future packets + @return @c n_left_to_next -- number of slots left in speculative frame +*/ +#define vlib_validate_buffer_enqueue_with_aux_x1( \ + vm, node, next_index, to_next, to_next_aux, n_left_to_next, bi0, aux0, \ + next0) \ + do \ + { \ + ASSERT (bi0 != 0); \ + if (PREDICT_FALSE (next0 != next_index)) \ + { \ + vlib_put_next_frame (vm, node, next_index, n_left_to_next + 1); \ + next_index = next0; \ + vlib_get_next_frame_with_aux_safe (vm, node, next_index, to_next, \ + to_next_aux, n_left_to_next); \ + \ + to_next[0] = bi0; \ + to_next += 1; \ + if (to_next_aux) \ + { \ + to_next_aux[0] = aux0; \ + to_next_aux += 1; \ + } \ + n_left_to_next -= 1; \ + } \ + } \ + while (0) + always_inline uword generic_buffer_node_inline (vlib_main_t * vm, vlib_node_runtime_t * node, @@ -355,6 +402,16 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node, (fn) (vm, node, buffers, nexts, count); } +static_always_inline void +vlib_buffer_enqueue_to_next_with_aux (vlib_main_t *vm, + vlib_node_runtime_t *node, u32 *buffers, + u32 *aux_data, u16 *nexts, uword count) +{ + vlib_buffer_enqueue_to_next_with_aux_fn_t *fn; + fn = vlib_buffer_func_main.buffer_enqueue_to_next_with_aux_fn; + (fn) (vm, node, buffers, aux_data, nexts, count); +} + static_always_inline void vlib_buffer_enqueue_to_next_vec (vlib_main_t *vm, vlib_node_runtime_t *node, u32 **buffers, u16 **nexts, uword count) @@ -379,6 +436,17 @@ vlib_buffer_enqueue_to_single_next (vlib_main_t * vm, (fn) (vm, node, buffers, next_index, count); } +static_always_inline void +vlib_buffer_enqueue_to_single_next_with_aux (vlib_main_t *vm, + vlib_node_runtime_t *node, + u32 *buffers, u32 *aux_data, + u16 next_index, u32 count) +{ + vlib_buffer_enqueue_to_single_next_with_aux_fn_t *fn; + fn = vlib_buffer_func_main.buffer_enqueue_to_single_next_with_aux_fn; + (fn) (vm, node, buffers, aux_data, next_index, count); +} + static_always_inline u32 vlib_buffer_enqueue_to_thread (vlib_main_t *vm, vlib_node_runtime_t *node, u32 frame_queue_index, u32 *buffer_indices, @@ -391,6 +459,20 @@ vlib_buffer_enqueue_to_thread (vlib_main_t *vm, vlib_node_runtime_t *node, n_packets, drop_on_congestion); } +static_always_inline u32 +vlib_buffer_enqueue_to_thread_with_aux (vlib_main_t *vm, + vlib_node_runtime_t *node, + u32 frame_queue_index, + u32 *buffer_indices, u32 *aux, + u16 *thread_indices, u32 n_packets, + int drop_on_congestion) +{ + vlib_buffer_enqueue_to_thread_with_aux_fn_t *fn; + fn = vlib_buffer_func_main.buffer_enqueue_to_thread_with_aux_fn; + return (fn) (vm, node, frame_queue_index, buffer_indices, aux, + thread_indices, n_packets, drop_on_congestion); +} + #endif /* included_vlib_buffer_node_h */ /*