1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright(c) 2021 Cisco Systems, Inc.
7 void __clib_section (".vlib_buffer_enqueue_to_next_fn") CLIB_MULTIARCH_FN (
8 vlib_buffer_enqueue_to_next_fn) (vlib_main_t *vm, vlib_node_runtime_t *node,
9 u32 *buffers, u16 *nexts, uword count)
11 u32 *to_next, n_left_to_next, max;
14 next_index = nexts[0];
15 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
16 max = clib_min (n_left_to_next, count);
21 if ((nexts[0] != next_index) || n_left_to_next == 0)
23 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
24 next_index = nexts[0];
25 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
26 max = clib_min (n_left_to_next, count);
28 #if defined(CLIB_HAVE_VEC512)
29 u16x32 next32 = CLIB_MEM_OVERFLOW_LOAD (u16x32_load_unaligned, nexts);
30 next32 = (next32 == u16x32_splat (next32[0]));
31 u64 bitmap = u16x32_msb_mask (next32);
32 n_enqueued = count_trailing_zeros (~bitmap);
33 #elif defined(CLIB_HAVE_VEC256)
34 u16x16 next16 = CLIB_MEM_OVERFLOW_LOAD (u16x16_load_unaligned, nexts);
35 next16 = (next16 == u16x16_splat (next16[0]));
36 u64 bitmap = u8x32_msb_mask ((u8x32) next16);
37 n_enqueued = count_trailing_zeros (~bitmap) / 2;
38 #elif defined(CLIB_HAVE_VEC128) && defined(CLIB_HAVE_VEC128_MSB_MASK)
39 u16x8 next8 = CLIB_MEM_OVERFLOW_LOAD (u16x8_load_unaligned, nexts);
40 next8 = (next8 == u16x8_splat (next8[0]));
41 u64 bitmap = u8x16_msb_mask ((u8x16) next8);
42 n_enqueued = count_trailing_zeros (~bitmap) / 2;
47 x |= next_index ^ nexts[1];
48 x |= next_index ^ nexts[2];
49 x |= next_index ^ nexts[3];
50 n_enqueued = (x == 0) ? 4 : 1;
56 if (PREDICT_FALSE (n_enqueued > max))
59 #ifdef CLIB_HAVE_VEC512
62 vlib_buffer_copy_indices (to_next, buffers, 32);
73 #ifdef CLIB_HAVE_VEC256
76 vlib_buffer_copy_indices (to_next, buffers, 16);
87 #ifdef CLIB_HAVE_VEC128
90 vlib_buffer_copy_indices (to_next, buffers, 8);
103 vlib_buffer_copy_indices (to_next, buffers, 4);
114 to_next[0] = buffers[0];
124 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
126 CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_next_fn);
128 void __clib_section (".vlib_buffer_enqueue_to_single_next_fn")
129 CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_single_next_fn) (
130 vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 next_index,
133 u32 *to_next, n_left_to_next, n_enq;
135 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
137 if (PREDICT_TRUE (n_left_to_next >= count))
139 vlib_buffer_copy_indices (to_next, buffers, count);
140 n_left_to_next -= count;
141 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
145 n_enq = n_left_to_next;
147 vlib_buffer_copy_indices (to_next, buffers, n_enq);
148 n_left_to_next -= n_enq;
150 if (PREDICT_FALSE (count > n_enq))
155 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
156 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
157 n_enq = clib_min (n_left_to_next, count);
160 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
162 CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_single_next_fn);
164 u32 __clib_section (".vlib_buffer_enqueue_to_thread_fn")
165 CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_thread_fn) (
166 vlib_main_t *vm, u32 frame_queue_index, u32 *buffer_indices,
167 u16 *thread_indices, u32 n_packets, int drop_on_congestion)
169 vlib_thread_main_t *tm = vlib_get_thread_main ();
170 vlib_frame_queue_main_t *fqm;
171 vlib_frame_queue_per_thread_data_t *ptd;
172 u32 n_left = n_packets;
173 u32 drop_list[VLIB_FRAME_SIZE], *dbi = drop_list, n_drop = 0;
174 vlib_frame_queue_elt_t *hf = 0;
175 u32 n_left_to_next_thread = 0, *to_next_thread = 0;
176 u32 next_thread_index, current_thread_index = ~0;
179 fqm = vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
180 ptd = vec_elt_at_index (fqm->per_thread_data, vm->thread_index);
184 next_thread_index = thread_indices[0];
186 if (next_thread_index != current_thread_index)
188 if (drop_on_congestion &&
189 is_vlib_frame_queue_congested (
190 frame_queue_index, next_thread_index, fqm->queue_hi_thresh,
191 ptd->congested_handoff_queue_by_thread_index))
193 dbi[0] = buffer_indices[0];
200 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
202 hf = vlib_get_worker_handoff_queue_elt (
203 frame_queue_index, next_thread_index,
204 ptd->handoff_queue_elt_by_thread_index);
206 n_left_to_next_thread = VLIB_FRAME_SIZE - hf->n_vectors;
207 to_next_thread = &hf->buffer_index[hf->n_vectors];
208 current_thread_index = next_thread_index;
211 to_next_thread[0] = buffer_indices[0];
213 n_left_to_next_thread--;
215 if (n_left_to_next_thread == 0)
217 hf->n_vectors = VLIB_FRAME_SIZE;
218 vlib_put_frame_queue_elt (hf);
219 vlib_get_main_by_index (current_thread_index)->check_frame_queues =
221 current_thread_index = ~0;
222 ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0;
234 hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
236 /* Ship frames to the thread nodes */
237 for (i = 0; i < vec_len (ptd->handoff_queue_elt_by_thread_index); i++)
239 if (ptd->handoff_queue_elt_by_thread_index[i])
241 hf = ptd->handoff_queue_elt_by_thread_index[i];
243 * It works better to let the handoff node
244 * rate-adapt, always ship the handoff queue element.
246 if (1 || hf->n_vectors == hf->last_n_vectors)
248 vlib_put_frame_queue_elt (hf);
249 vlib_get_main_by_index (i)->check_frame_queues = 1;
250 ptd->handoff_queue_elt_by_thread_index[i] = 0;
253 hf->last_n_vectors = hf->n_vectors;
255 ptd->congested_handoff_queue_by_thread_index[i] =
256 (vlib_frame_queue_t *) (~0);
259 if (drop_on_congestion && n_drop)
260 vlib_buffer_free (vm, drop_list, n_drop);
262 return n_packets - n_drop;
265 CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_thread_fn);
267 #ifndef CLIB_MARCH_VARIANT
268 vlib_buffer_func_main_t vlib_buffer_func_main;
270 static clib_error_t *
271 vlib_buffer_funcs_init (vlib_main_t *vm)
273 vlib_buffer_func_main_t *bfm = &vlib_buffer_func_main;
274 bfm->buffer_enqueue_to_next_fn =
275 CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_next_fn);
276 bfm->buffer_enqueue_to_single_next_fn =
277 CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_single_next_fn);
278 bfm->buffer_enqueue_to_thread_fn =
279 CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_thread_fn);
283 VLIB_INIT_FUNCTION (vlib_buffer_funcs_init);