1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright(c) 2021 Cisco Systems, Inc.
5 #include <vppinfra/clib.h>
7 #include <vppinfra/vector/mask_compare.h>
8 #include <vppinfra/vector/compress.h>
10 static_always_inline u32
11 enqueue_one (vlib_main_t *vm, vlib_node_runtime_t *node, u64 *used_elt_bmp,
12 u16 next_index, u32 *buffers, u16 *nexts, u32 n_buffers,
15 u64 match_bmp[VLIB_FRAME_SIZE / 64];
17 u32 n_extracted, n_free;
20 f = vlib_get_next_frame_internal (vm, node, next_index, 0);
22 n_free = VLIB_FRAME_SIZE - f->n_vectors;
24 /* if frame contains enough space for worst case scenario, we can avoid
27 to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
31 clib_mask_compare_u16 (next_index, nexts, match_bmp, n_buffers);
33 n_extracted = clib_compress_u32 (to, buffers, match_bmp, n_buffers);
35 for (int i = 0; i < ARRAY_LEN (match_bmp); i++)
36 used_elt_bmp[i] |= match_bmp[i];
40 /* indices already written to frame, just close it */
41 vlib_put_next_frame (vm, node, next_index, n_free - n_extracted);
43 else if (n_free >= n_extracted)
45 /* enough space in the existing frame */
46 to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
47 vlib_buffer_copy_indices (to, tmp, n_extracted);
48 vlib_put_next_frame (vm, node, next_index, n_free - n_extracted);
53 to = (u32 *) vlib_frame_vector_args (f) + f->n_vectors;
54 vlib_buffer_copy_indices (to, tmp, n_free);
55 vlib_put_next_frame (vm, node, next_index, 0);
58 u32 n_2nd_frame = n_extracted - n_free;
59 f = vlib_get_next_frame_internal (vm, node, next_index, 1);
60 to = vlib_frame_vector_args (f);
61 vlib_buffer_copy_indices (to, tmp + n_free, n_2nd_frame);
62 vlib_put_next_frame (vm, node, next_index,
63 VLIB_FRAME_SIZE - n_2nd_frame);
66 return n_left - n_extracted;
69 void __clib_section (".vlib_buffer_enqueue_to_next_fn")
70 CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_next_fn)
71 (vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 *nexts,
74 u32 tmp[VLIB_FRAME_SIZE];
78 while (count >= VLIB_FRAME_SIZE)
80 u64 used_elt_bmp[VLIB_FRAME_SIZE / 64] = {};
81 n_left = VLIB_FRAME_SIZE;
84 next_index = nexts[0];
85 n_left = enqueue_one (vm, node, used_elt_bmp, next_index, buffers, nexts,
86 VLIB_FRAME_SIZE, n_left, tmp);
90 while (PREDICT_FALSE (used_elt_bmp[off] == ~0))
93 ASSERT (off < ARRAY_LEN (used_elt_bmp));
97 nexts[off * 64 + count_trailing_zeros (~used_elt_bmp[off])];
98 n_left = enqueue_one (vm, node, used_elt_bmp, next_index, buffers,
99 nexts, VLIB_FRAME_SIZE, n_left, tmp);
102 buffers += VLIB_FRAME_SIZE;
103 nexts += VLIB_FRAME_SIZE;
104 count -= VLIB_FRAME_SIZE;
109 u64 used_elt_bmp[VLIB_FRAME_SIZE / 64] = {};
110 next_index = nexts[0];
114 n_left = enqueue_one (vm, node, used_elt_bmp, next_index, buffers, nexts,
119 while (PREDICT_FALSE (used_elt_bmp[off] == ~0))
122 ASSERT (off < ARRAY_LEN (used_elt_bmp));
126 nexts[off * 64 + count_trailing_zeros (~used_elt_bmp[off])];
127 n_left = enqueue_one (vm, node, used_elt_bmp, next_index, buffers,
128 nexts, count, n_left, tmp);
133 CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_next_fn);
135 void __clib_section (".vlib_buffer_enqueue_to_single_next_fn")
136 CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_single_next_fn)
137 (vlib_main_t *vm, vlib_node_runtime_t *node, u32 *buffers, u16 next_index,
140 u32 *to_next, n_left_to_next, n_enq;
142 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
144 if (PREDICT_TRUE (n_left_to_next >= count))
146 vlib_buffer_copy_indices (to_next, buffers, count);
147 n_left_to_next -= count;
148 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
152 n_enq = n_left_to_next;
154 vlib_buffer_copy_indices (to_next, buffers, n_enq);
155 n_left_to_next -= n_enq;
157 if (PREDICT_FALSE (count > n_enq))
162 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
163 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
164 n_enq = clib_min (n_left_to_next, count);
167 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
169 CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_single_next_fn);
171 static inline vlib_frame_queue_elt_t *
172 vlib_get_frame_queue_elt (vlib_frame_queue_main_t *fqm, u32 index,
175 vlib_frame_queue_t *fq;
176 u64 nelts, tail, new_tail;
178 fq = fqm->vlib_frame_queues[index];
183 tail = __atomic_load_n (&fq->tail, __ATOMIC_ACQUIRE);
186 if (new_tail >= fq->head + nelts)
191 /* Wait until a ring slot is available */
192 while (new_tail >= fq->head + nelts)
193 vlib_worker_thread_barrier_check ();
196 if (!__atomic_compare_exchange_n (&fq->tail, &tail, new_tail, 0 /* weak */,
197 __ATOMIC_RELAXED, __ATOMIC_RELAXED))
200 return fq->elts + (new_tail & (nelts - 1));
203 static_always_inline u32
204 vlib_buffer_enqueue_to_thread_inline (vlib_main_t *vm,
205 vlib_node_runtime_t *node,
206 vlib_frame_queue_main_t *fqm,
207 u32 *buffer_indices, u16 *thread_indices,
208 u32 n_packets, int drop_on_congestion)
210 u32 drop_list[VLIB_FRAME_SIZE], n_drop = 0;
211 u64 used_elts[VLIB_FRAME_SIZE / 64] = {};
212 u64 mask[VLIB_FRAME_SIZE / 64];
213 vlib_frame_queue_elt_t *hf = 0;
215 u32 n_comp, off = 0, n_left = n_packets;
217 thread_index = thread_indices[0];
220 clib_mask_compare_u16 (thread_index, thread_indices, mask, n_packets);
221 hf = vlib_get_frame_queue_elt (fqm, thread_index, drop_on_congestion);
223 n_comp = clib_compress_u32 (hf ? hf->buffer_index : drop_list + n_drop,
224 buffer_indices, mask, n_packets);
228 if (node->flags & VLIB_NODE_FLAG_TRACE)
230 hf->n_vectors = n_comp;
231 __atomic_store_n (&hf->valid, 1, __ATOMIC_RELEASE);
232 vlib_get_main_by_index (thread_index)->check_frame_queues = 1;
241 for (int i = 0; i < ARRAY_LEN (used_elts); i++)
242 used_elts[i] |= mask[i];
244 while (PREDICT_FALSE (used_elts[off] == ~0))
247 ASSERT (off < ARRAY_LEN (used_elts));
251 thread_indices[off * 64 + count_trailing_zeros (~used_elts[off])];
255 if (drop_on_congestion && n_drop)
256 vlib_buffer_free (vm, drop_list, n_drop);
258 return n_packets - n_drop;
261 u32 __clib_section (".vlib_buffer_enqueue_to_thread_fn")
262 CLIB_MULTIARCH_FN (vlib_buffer_enqueue_to_thread_fn)
263 (vlib_main_t *vm, vlib_node_runtime_t *node, u32 frame_queue_index,
264 u32 *buffer_indices, u16 *thread_indices, u32 n_packets,
265 int drop_on_congestion)
267 vlib_thread_main_t *tm = vlib_get_thread_main ();
268 vlib_frame_queue_main_t *fqm;
271 fqm = vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
273 while (n_packets >= VLIB_FRAME_SIZE)
275 n_enq += vlib_buffer_enqueue_to_thread_inline (
276 vm, node, fqm, buffer_indices, thread_indices, VLIB_FRAME_SIZE,
278 buffer_indices += VLIB_FRAME_SIZE;
279 thread_indices += VLIB_FRAME_SIZE;
280 n_packets -= VLIB_FRAME_SIZE;
286 n_enq += vlib_buffer_enqueue_to_thread_inline (vm, node, fqm, buffer_indices,
287 thread_indices, n_packets,
293 CLIB_MARCH_FN_REGISTRATION (vlib_buffer_enqueue_to_thread_fn);
295 u32 __clib_section (".vlib_frame_queue_dequeue_fn")
296 CLIB_MULTIARCH_FN (vlib_frame_queue_dequeue_fn)
297 (vlib_main_t *vm, vlib_frame_queue_main_t *fqm)
299 u32 thread_id = vm->thread_index;
300 vlib_frame_queue_t *fq = fqm->vlib_frame_queues[thread_id];
301 u32 mask = fq->nelts - 1;
302 vlib_frame_queue_elt_t *elt;
303 u32 n_free, n_copy, *from, *to = 0, processed = 0, vectors = 0;
307 ASSERT (vm == vlib_global_main.vlib_mains[thread_id]);
309 if (PREDICT_FALSE (fqm->node_index == ~0))
312 * Gather trace data for frame queues
314 if (PREDICT_FALSE (fq->trace))
316 frame_queue_trace_t *fqt;
317 frame_queue_nelt_counter_t *fqh;
320 fqt = &fqm->frame_queue_traces[thread_id];
322 fqt->nelts = fq->nelts;
323 fqt->head = fq->head;
324 fqt->tail = fq->tail;
325 fqt->threshold = fq->vector_threshold;
326 fqt->n_in_use = fqt->tail - fqt->head;
327 if (fqt->n_in_use >= fqt->nelts)
329 // if beyond max then use max
330 fqt->n_in_use = fqt->nelts - 1;
333 /* Record the number of elements in use in the histogram */
334 fqh = &fqm->frame_queue_histogram[thread_id];
335 fqh->count[fqt->n_in_use]++;
337 /* Record a snapshot of the elements in use */
338 for (elix = 0; elix < fqt->nelts; elix++)
340 elt = fq->elts + ((fq->head + 1 + elix) & (mask));
343 fqt->n_vectors[elix] = elt->n_vectors;
351 if (fq->head == fq->tail)
354 elt = fq->elts + ((fq->head + 1) & mask);
356 if (!__atomic_load_n (&elt->valid, __ATOMIC_ACQUIRE))
359 from = elt->buffer_index + elt->offset;
361 ASSERT (elt->offset + elt->n_vectors <= VLIB_FRAME_SIZE);
365 f = vlib_get_frame_to_node (vm, fqm->node_index);
366 to = vlib_frame_vector_args (f);
367 n_free = VLIB_FRAME_SIZE;
370 if (elt->maybe_trace)
371 f->frame_flags |= VLIB_NODE_FLAG_TRACE;
373 n_copy = clib_min (n_free, elt->n_vectors);
375 vlib_buffer_copy_indices (to, from, n_copy);
382 f->n_vectors = VLIB_FRAME_SIZE;
383 vlib_put_frame_to_node (vm, fqm->node_index, f);
387 if (n_copy < elt->n_vectors)
389 /* not empty - leave it on the ring */
390 elt->n_vectors -= n_copy;
391 elt->offset += n_copy;
395 /* empty - reset and bump head */
396 u32 sz = STRUCT_OFFSET_OF (vlib_frame_queue_elt_t, end_of_reset);
397 clib_memset (elt, 0, sz);
398 __atomic_store_n (&fq->head, fq->head + 1, __ATOMIC_RELEASE);
402 /* Limit the number of packets pushed into the graph */
403 if (vectors >= fq->vector_threshold)
409 f->n_vectors = VLIB_FRAME_SIZE - n_free;
410 vlib_put_frame_to_node (vm, fqm->node_index, f);
416 CLIB_MARCH_FN_REGISTRATION (vlib_frame_queue_dequeue_fn);
418 #ifndef CLIB_MARCH_VARIANT
419 vlib_buffer_func_main_t vlib_buffer_func_main;
421 static clib_error_t *
422 vlib_buffer_funcs_init (vlib_main_t *vm)
424 vlib_buffer_func_main_t *bfm = &vlib_buffer_func_main;
425 bfm->buffer_enqueue_to_next_fn =
426 CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_next_fn);
427 bfm->buffer_enqueue_to_single_next_fn =
428 CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_single_next_fn);
429 bfm->buffer_enqueue_to_thread_fn =
430 CLIB_MARCH_FN_POINTER (vlib_buffer_enqueue_to_thread_fn);
431 bfm->frame_queue_dequeue_fn =
432 CLIB_MARCH_FN_POINTER (vlib_frame_queue_dequeue_fn);
436 VLIB_INIT_FUNCTION (vlib_buffer_funcs_init);