2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <svm/svm_fifo.h>
17 #include <vppinfra/cpu.h>
19 #if __x86_64__ && CLIB_DEBUG == 0
20 #define foreach_march_variant_runtime(macro, _args...) \
21 macro(avx512, avx512f, _args) \
22 macro(avx2, avx2, _args)
24 #define foreach_march_variant_runtime(macro, _args...)
27 #define CLIB_MARCH_ARCH_CHECK(arch, archname, fn) \
28 if (clib_cpu_supports_ ## archname ()) \
29 return & fn ## _ ##arch;
31 #define CLIB_MARCH_SELECT_FN(fn,...) \
32 __VA_ARGS__ void * fn ## _multiarch_select(void) \
34 foreach_march_variant_runtime(CLIB_MARCH_ARCH_CHECK, fn) \
38 #define DEFINE_WEAK_FN(_arch, _fn, _args...) \
39 int __attribute__((weak)) _fn##_ma_##_arch(_args);
40 #define DEFINE_FN_HELPER(arch, archname, macro, _args...) \
43 #ifndef CLIB_MARCH_VARIANT
44 #define MARCH_FN(fn, _args...) \
45 static void * (*fn ## _selected) (void); \
46 foreach_march_variant_runtime (DEFINE_FN_HELPER, DEFINE_WEAK_FN, fn, \
48 static inline int CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args)
50 #define MARCH_FN(fn, _args...) \
51 int CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args)
55 position_lt (svm_fifo_t * f, u32 a, u32 b)
57 return (ooo_segment_distance_from_tail (f, a)
58 < ooo_segment_distance_from_tail (f, b));
62 position_leq (svm_fifo_t * f, u32 a, u32 b)
64 return (ooo_segment_distance_from_tail (f, a)
65 <= ooo_segment_distance_from_tail (f, b));
69 position_gt (svm_fifo_t * f, u32 a, u32 b)
71 return (ooo_segment_distance_from_tail (f, a)
72 > ooo_segment_distance_from_tail (f, b));
76 position_diff (svm_fifo_t * f, u32 posa, u32 posb)
78 return ooo_segment_distance_from_tail (f, posa)
79 - ooo_segment_distance_from_tail (f, posb);
83 ooo_segment_end_pos (svm_fifo_t * f, ooo_segment_t * s)
85 return (s->start + s->length) % f->nitems;
88 #ifndef CLIB_MARCH_VARIANT
91 format_ooo_segment (u8 * s, va_list * args)
93 svm_fifo_t *f = va_arg (*args, svm_fifo_t *);
94 ooo_segment_t *seg = va_arg (*args, ooo_segment_t *);
95 u32 normalized_start = (seg->start + f->nitems - f->tail) % f->nitems;
96 s = format (s, "[%u, %u], len %u, next %d, prev %d", normalized_start,
97 (normalized_start + seg->length) % f->nitems, seg->length,
98 seg->next, seg->prev);
103 svm_fifo_dump_trace (u8 * s, svm_fifo_t * f)
106 svm_fifo_trace_elem_t *seg = 0;
111 vec_foreach (seg, f->trace)
113 s = format (s, "{%u, %u, %u}, ", seg->offset, seg->len, seg->action);
116 s = format (s, "\n");
118 s = format (s, "\n");
127 svm_fifo_replay (u8 * s, svm_fifo_t * f, u8 no_read, u8 verbose)
131 svm_fifo_trace_elem_t *trace;
133 svm_fifo_t *dummy_fifo;
140 trace_len = vec_len (trace);
146 dummy_fifo = svm_fifo_create (f->nitems);
147 memset (f->data, 0xFF, f->nitems);
149 vec_validate (data, f->nitems);
150 for (i = 0; i < vec_len (data); i++)
153 for (i = 0; i < trace_len; i++)
155 offset = trace[i].offset;
156 if (trace[i].action == 1)
159 s = format (s, "adding [%u, %u]:", trace[i].offset,
161 trace[i].len) % dummy_fifo->nitems);
162 svm_fifo_enqueue_with_offset (dummy_fifo, trace[i].offset,
163 trace[i].len, &data[offset]);
165 else if (trace[i].action == 2)
168 s = format (s, "adding [%u, %u]:", 0, trace[i].len);
169 svm_fifo_enqueue_nowait (dummy_fifo, trace[i].len, &data[offset]);
174 s = format (s, "read: %u", trace[i].len);
175 svm_fifo_dequeue_drop (dummy_fifo, trace[i].len);
178 s = format (s, "%U", format_svm_fifo, dummy_fifo, 1);
181 s = format (s, "result: %U", format_svm_fifo, dummy_fifo, 1);
187 format_ooo_list (u8 * s, va_list * args)
189 svm_fifo_t *f = va_arg (*args, svm_fifo_t *);
190 u32 ooo_segment_index = f->ooos_list_head;
193 while (ooo_segment_index != OOO_SEGMENT_INVALID_INDEX)
195 seg = pool_elt_at_index (f->ooo_segments, ooo_segment_index);
196 s = format (s, " %U\n", format_ooo_segment, f, seg);
197 ooo_segment_index = seg->next;
204 format_svm_fifo (u8 * s, va_list * args)
206 svm_fifo_t *f = va_arg (*args, svm_fifo_t *);
207 int verbose = va_arg (*args, int);
212 s = format (s, "cursize %u nitems %u has_event %d\n",
213 f->cursize, f->nitems, f->has_event);
214 s = format (s, " head %d tail %d segment manager %u\n", f->head, f->tail,
219 (s, " vpp session %d thread %d app session %d thread %d\n",
220 f->master_session_index, f->master_thread_index,
221 f->client_session_index, f->client_thread_index);
225 s = format (s, " ooo pool %d active elts newest %u\n",
226 pool_elts (f->ooo_segments), f->ooos_newest);
227 if (svm_fifo_has_ooo_data (f))
228 s = format (s, " %U", format_ooo_list, f, verbose);
233 /** create an svm fifo, in the current heap. Fails vs blow up the process */
235 svm_fifo_create (u32 data_size_in_bytes)
238 u32 rounded_data_size;
240 /* always round fifo data size to the next highest power-of-two */
241 rounded_data_size = (1 << (max_log2 (data_size_in_bytes)));
242 f = clib_mem_alloc_aligned_or_null (sizeof (*f) + rounded_data_size,
243 CLIB_CACHE_LINE_BYTES);
247 memset (f, 0, sizeof (*f));
248 f->nitems = data_size_in_bytes;
249 f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX;
255 svm_fifo_free (svm_fifo_t * f)
257 ASSERT (f->refcnt > 0);
259 if (--f->refcnt == 0)
261 pool_free (f->ooo_segments);
267 always_inline ooo_segment_t *
268 ooo_segment_new (svm_fifo_t * f, u32 start, u32 length)
272 pool_get (f->ooo_segments, s);
277 s->prev = s->next = OOO_SEGMENT_INVALID_INDEX;
283 ooo_segment_del (svm_fifo_t * f, u32 index)
285 ooo_segment_t *cur, *prev = 0, *next = 0;
286 cur = pool_elt_at_index (f->ooo_segments, index);
288 if (cur->next != OOO_SEGMENT_INVALID_INDEX)
290 next = pool_elt_at_index (f->ooo_segments, cur->next);
291 next->prev = cur->prev;
294 if (cur->prev != OOO_SEGMENT_INVALID_INDEX)
296 prev = pool_elt_at_index (f->ooo_segments, cur->prev);
297 prev->next = cur->next;
301 f->ooos_list_head = cur->next;
304 pool_put (f->ooo_segments, cur);
308 * Add segment to fifo's out-of-order segment list. Takes care of merging
309 * adjacent segments and removing overlapping ones.
312 ooo_segment_add (svm_fifo_t * f, u32 offset, u32 length)
314 ooo_segment_t *s, *new_s, *prev, *next, *it;
315 u32 new_index, s_end_pos, s_index;
316 u32 normalized_position, normalized_end_position;
318 ASSERT (offset + length <= ooo_segment_distance_from_tail (f, f->head));
319 normalized_position = (f->tail + offset) % f->nitems;
320 normalized_end_position = (f->tail + offset + length) % f->nitems;
322 f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
324 if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX)
326 s = ooo_segment_new (f, normalized_position, length);
327 f->ooos_list_head = s - f->ooo_segments;
328 f->ooos_newest = f->ooos_list_head;
332 /* Find first segment that starts after new segment */
333 s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
334 while (s->next != OOO_SEGMENT_INVALID_INDEX
335 && position_lt (f, s->start, normalized_position))
336 s = pool_elt_at_index (f->ooo_segments, s->next);
338 /* If we have a previous and we overlap it, use it as starting point */
339 prev = ooo_segment_get_prev (f, s);
341 && position_leq (f, normalized_position, ooo_segment_end_pos (f, prev)))
344 s_end_pos = ooo_segment_end_pos (f, s);
346 /* Since we have previous, normalized start position cannot be smaller
347 * than prev->start. Check tail */
348 ASSERT (position_lt (f, s->start, normalized_position));
352 s_index = s - f->ooo_segments;
353 s_end_pos = ooo_segment_end_pos (f, s);
355 /* No overlap, add before current segment */
356 if (position_lt (f, normalized_end_position, s->start))
358 new_s = ooo_segment_new (f, normalized_position, length);
359 new_index = new_s - f->ooo_segments;
361 /* Pool might've moved, get segment again */
362 s = pool_elt_at_index (f->ooo_segments, s_index);
363 if (s->prev != OOO_SEGMENT_INVALID_INDEX)
365 new_s->prev = s->prev;
366 prev = pool_elt_at_index (f->ooo_segments, new_s->prev);
367 prev->next = new_index;
372 f->ooos_list_head = new_index;
375 new_s->next = s_index;
377 f->ooos_newest = new_index;
380 /* No overlap, add after current segment */
381 else if (position_gt (f, normalized_position, s_end_pos))
383 new_s = ooo_segment_new (f, normalized_position, length);
384 new_index = new_s - f->ooo_segments;
386 /* Pool might've moved, get segment again */
387 s = pool_elt_at_index (f->ooo_segments, s_index);
389 /* Needs to be last */
390 ASSERT (s->next == OOO_SEGMENT_INVALID_INDEX);
392 new_s->prev = s_index;
394 f->ooos_newest = new_index;
404 if (position_lt (f, normalized_position, s->start))
406 s->start = normalized_position;
407 s->length = position_diff (f, s_end_pos, s->start);
408 f->ooos_newest = s - f->ooo_segments;
413 /* Overlapping tail */
414 if (position_gt (f, normalized_end_position, s_end_pos))
416 s->length = position_diff (f, normalized_end_position, s->start);
418 /* Remove the completely overlapped segments in the tail */
419 it = ooo_segment_next (f, s);
420 while (it && position_leq (f, ooo_segment_end_pos (f, it),
421 normalized_end_position))
423 next = ooo_segment_next (f, it);
424 ooo_segment_del (f, it - f->ooo_segments);
428 /* If partial overlap with last, merge */
429 if (it && position_leq (f, it->start, normalized_end_position))
431 s->length = position_diff (f, ooo_segment_end_pos (f, it),
433 ooo_segment_del (f, it - f->ooo_segments);
435 f->ooos_newest = s - f->ooo_segments;
440 * Removes segments that can now be enqueued because the fifo's tail has
441 * advanced. Returns the number of bytes added to tail.
444 ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued)
447 u32 index, bytes = 0;
450 s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
451 diff = ooo_segment_distance_to_tail (f, s->start);
453 ASSERT (diff != n_bytes_enqueued);
455 if (diff > n_bytes_enqueued)
458 /* If last tail update overlaps one/multiple ooo segments, remove them */
459 while (0 <= diff && diff < n_bytes_enqueued)
461 index = s - f->ooo_segments;
463 /* Segment end is beyond the tail. Advance tail and remove segment */
464 if (s->length > diff)
466 bytes = s->length - diff;
468 f->tail %= f->nitems;
469 ooo_segment_del (f, index);
473 /* If we have next go on */
474 if (s->next != OOO_SEGMENT_INVALID_INDEX)
476 s = pool_elt_at_index (f->ooo_segments, s->next);
477 diff = ooo_segment_distance_to_tail (f, s->start);
478 ooo_segment_del (f, index);
483 ooo_segment_del (f, index);
488 ASSERT (bytes <= f->nitems);
492 MARCH_FN (svm_fifo_enqueue_nowait, svm_fifo_t * f, u32 max_bytes,
493 const u8 * copy_from_here)
495 u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
498 /* read cursize, which can only increase while we're working */
499 cursize = svm_fifo_max_dequeue (f);
500 f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
502 if (PREDICT_FALSE (cursize == f->nitems))
503 return SVM_FIFO_FULL;
507 /* Number of bytes we're going to copy */
508 total_copy_bytes = (nitems - cursize) < max_bytes ?
509 (nitems - cursize) : max_bytes;
511 if (PREDICT_TRUE (copy_from_here != 0))
513 /* Number of bytes in first copy segment */
514 first_copy_bytes = ((nitems - f->tail) < total_copy_bytes)
515 ? (nitems - f->tail) : total_copy_bytes;
517 clib_memcpy (&f->data[f->tail], copy_from_here, first_copy_bytes);
518 f->tail += first_copy_bytes;
519 f->tail = (f->tail == nitems) ? 0 : f->tail;
521 /* Number of bytes in second copy segment, if any */
522 second_copy_bytes = total_copy_bytes - first_copy_bytes;
523 if (second_copy_bytes)
525 clib_memcpy (&f->data[f->tail], copy_from_here + first_copy_bytes,
527 f->tail += second_copy_bytes;
528 f->tail = (f->tail == nitems) ? 0 : f->tail;
535 /* Account for a zero-copy enqueue done elsewhere */
536 ASSERT (max_bytes <= (nitems - cursize));
537 f->tail += max_bytes;
538 f->tail = f->tail % nitems;
539 total_copy_bytes = max_bytes;
542 svm_fifo_trace_add (f, f->head, total_copy_bytes, 2);
544 /* Any out-of-order segments to collect? */
545 if (PREDICT_FALSE (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX))
546 total_copy_bytes += ooo_segment_try_collect (f, total_copy_bytes);
548 /* Atomically increase the queue length */
549 ASSERT (cursize + total_copy_bytes <= nitems);
550 __sync_fetch_and_add (&f->cursize, total_copy_bytes);
552 return (total_copy_bytes);
555 #ifndef CLIB_MARCH_VARIANT
557 svm_fifo_enqueue_nowait (svm_fifo_t * f, u32 max_bytes,
558 const u8 * copy_from_here)
560 return ((int (*)(svm_fifo_t * f, u32, const u8 *))
561 (*svm_fifo_enqueue_nowait_selected)) (f, max_bytes, copy_from_here);
566 * Enqueue a future segment.
568 * Two choices: either copies the entire segment, or copies nothing
569 * Returns 0 of the entire segment was copied
570 * Returns -1 if none of the segment was copied due to lack of space
572 MARCH_FN (svm_fifo_enqueue_with_offset, svm_fifo_t * f,
573 u32 offset, u32 required_bytes, u8 * copy_from_here)
575 u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
576 u32 cursize, nitems, normalized_offset;
578 f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
580 /* read cursize, which can only increase while we're working */
581 cursize = svm_fifo_max_dequeue (f);
584 ASSERT (required_bytes < nitems);
586 normalized_offset = (f->tail + offset) % nitems;
588 /* Will this request fit? */
589 if ((required_bytes + offset) > (nitems - cursize))
592 svm_fifo_trace_add (f, offset, required_bytes, 1);
594 ooo_segment_add (f, offset, required_bytes);
596 /* Number of bytes we're going to copy */
597 total_copy_bytes = required_bytes;
599 /* Number of bytes in first copy segment */
600 first_copy_bytes = ((nitems - normalized_offset) < total_copy_bytes)
601 ? (nitems - normalized_offset) : total_copy_bytes;
603 clib_memcpy (&f->data[normalized_offset], copy_from_here, first_copy_bytes);
605 /* Number of bytes in second copy segment, if any */
606 second_copy_bytes = total_copy_bytes - first_copy_bytes;
607 if (second_copy_bytes)
609 normalized_offset += first_copy_bytes;
610 normalized_offset %= nitems;
612 ASSERT (normalized_offset == 0);
614 clib_memcpy (&f->data[normalized_offset],
615 copy_from_here + first_copy_bytes, second_copy_bytes);
621 #ifndef CLIB_MARCH_VARIANT
624 svm_fifo_enqueue_with_offset (svm_fifo_t * f, u32 offset, u32 required_bytes,
627 return ((int (*)(svm_fifo_t * f, u32, u32, u8 *))
628 (*svm_fifo_enqueue_with_offset_selected)) (f, offset,
634 svm_fifo_overwrite_head (svm_fifo_t * f, u8 * data, u32 len)
637 first_chunk = f->nitems - f->head;
638 ASSERT (len <= f->nitems);
639 if (len <= first_chunk)
640 clib_memcpy (&f->data[f->head], data, len);
643 clib_memcpy (&f->data[f->head], data, first_chunk);
644 clib_memcpy (&f->data[0], data + first_chunk, len - first_chunk);
649 MARCH_FN (svm_fifo_dequeue_nowait, svm_fifo_t * f, u32 max_bytes,
652 u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
655 /* read cursize, which can only increase while we're working */
656 cursize = svm_fifo_max_dequeue (f);
657 if (PREDICT_FALSE (cursize == 0))
658 return -2; /* nothing in the fifo */
662 /* Number of bytes we're going to copy */
663 total_copy_bytes = (cursize < max_bytes) ? cursize : max_bytes;
665 if (PREDICT_TRUE (copy_here != 0))
667 /* Number of bytes in first copy segment */
668 first_copy_bytes = ((nitems - f->head) < total_copy_bytes)
669 ? (nitems - f->head) : total_copy_bytes;
670 clib_memcpy (copy_here, &f->data[f->head], first_copy_bytes);
671 f->head += first_copy_bytes;
672 f->head = (f->head == nitems) ? 0 : f->head;
674 /* Number of bytes in second copy segment, if any */
675 second_copy_bytes = total_copy_bytes - first_copy_bytes;
676 if (second_copy_bytes)
678 clib_memcpy (copy_here + first_copy_bytes,
679 &f->data[f->head], second_copy_bytes);
680 f->head += second_copy_bytes;
681 f->head = (f->head == nitems) ? 0 : f->head;
687 /* Account for a zero-copy dequeue done elsewhere */
688 ASSERT (max_bytes <= cursize);
689 f->head += max_bytes;
690 f->head = f->head % nitems;
691 cursize -= max_bytes;
692 total_copy_bytes = max_bytes;
695 ASSERT (f->head <= nitems);
696 ASSERT (cursize >= total_copy_bytes);
697 __sync_fetch_and_sub (&f->cursize, total_copy_bytes);
699 return (total_copy_bytes);
702 #ifndef CLIB_MARCH_VARIANT
705 svm_fifo_dequeue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_here)
707 return ((int (*)(svm_fifo_t * f, u32, u8 *))
708 (*svm_fifo_dequeue_nowait_selected)) (f, max_bytes, copy_here);
712 MARCH_FN (svm_fifo_peek, svm_fifo_t * f, u32 relative_offset, u32 max_bytes,
715 u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
716 u32 cursize, nitems, real_head;
718 /* read cursize, which can only increase while we're working */
719 cursize = svm_fifo_max_dequeue (f);
720 if (PREDICT_FALSE (cursize < relative_offset))
721 return -2; /* nothing in the fifo */
724 real_head = f->head + relative_offset;
725 real_head = real_head >= nitems ? real_head - nitems : real_head;
727 /* Number of bytes we're going to copy */
728 total_copy_bytes = (cursize - relative_offset < max_bytes) ?
729 cursize - relative_offset : max_bytes;
731 if (PREDICT_TRUE (copy_here != 0))
733 /* Number of bytes in first copy segment */
735 ((nitems - real_head) < total_copy_bytes) ?
736 (nitems - real_head) : total_copy_bytes;
737 clib_memcpy (copy_here, &f->data[real_head], first_copy_bytes);
739 /* Number of bytes in second copy segment, if any */
740 second_copy_bytes = total_copy_bytes - first_copy_bytes;
741 if (second_copy_bytes)
743 clib_memcpy (copy_here + first_copy_bytes, &f->data[0],
747 return total_copy_bytes;
750 #ifndef CLIB_MARCH_VARIANT
753 svm_fifo_peek (svm_fifo_t * f, u32 relative_offset, u32 max_bytes,
756 return ((int (*)(svm_fifo_t * f, u32, u32, u8 *))
757 (*svm_fifo_peek_selected)) (f, relative_offset, max_bytes,
762 svm_fifo_dequeue_drop (svm_fifo_t * f, u32 max_bytes)
764 u32 total_drop_bytes, first_drop_bytes, second_drop_bytes;
767 /* read cursize, which can only increase while we're working */
768 cursize = svm_fifo_max_dequeue (f);
769 if (PREDICT_FALSE (cursize == 0))
770 return -2; /* nothing in the fifo */
774 /* Number of bytes we're going to drop */
775 total_drop_bytes = (cursize < max_bytes) ? cursize : max_bytes;
777 svm_fifo_trace_add (f, f->tail, total_drop_bytes, 3);
779 /* Number of bytes in first copy segment */
781 ((nitems - f->head) < total_drop_bytes) ?
782 (nitems - f->head) : total_drop_bytes;
783 f->head += first_drop_bytes;
784 f->head = (f->head == nitems) ? 0 : f->head;
786 /* Number of bytes in second drop segment, if any */
787 second_drop_bytes = total_drop_bytes - first_drop_bytes;
788 if (second_drop_bytes)
790 f->head += second_drop_bytes;
791 f->head = (f->head == nitems) ? 0 : f->head;
794 ASSERT (f->head <= nitems);
795 ASSERT (cursize >= total_drop_bytes);
796 __sync_fetch_and_sub (&f->cursize, total_drop_bytes);
798 return total_drop_bytes;
802 svm_fifo_dequeue_drop_all (svm_fifo_t * f)
805 __sync_fetch_and_sub (&f->cursize, f->cursize);
809 svm_fifo_segments (svm_fifo_t * f, svm_fifo_segment_t * fs)
813 /* read cursize, which can only increase while we're working */
814 cursize = svm_fifo_max_dequeue (f);
815 if (PREDICT_FALSE (cursize == 0))
820 fs[0].len = ((nitems - f->head) < cursize) ? (nitems - f->head) : cursize;
821 fs[0].data = f->data + f->head;
823 if (fs[0].len < cursize)
825 fs[1].len = cursize - fs[0].len;
826 fs[1].data = f->data;
837 svm_fifo_segments_free (svm_fifo_t * f, svm_fifo_segment_t * fs)
839 u32 total_drop_bytes;
841 ASSERT (fs[0].data == f->data + f->head);
845 total_drop_bytes = fs[0].len + fs[1].len;
849 f->head = (f->head + fs[0].len) % f->nitems;
850 total_drop_bytes = fs[0].len;
852 __sync_fetch_and_sub (&f->cursize, total_drop_bytes);
856 svm_fifo_number_ooo_segments (svm_fifo_t * f)
858 return pool_elts (f->ooo_segments);
862 svm_fifo_first_ooo_segment (svm_fifo_t * f)
864 return pool_elt_at_index (f->ooo_segments, f->ooos_list_head);
868 * Set fifo pointers to requested offset
871 svm_fifo_init_pointers (svm_fifo_t * f, u32 pointer)
873 f->head = f->tail = pointer % f->nitems;
876 #define foreach_svm_fifo_march_fn \
877 _(svm_fifo_enqueue_nowait) \
878 _(svm_fifo_enqueue_with_offset) \
879 _(svm_fifo_dequeue_nowait) \
882 #define _(_fn, _args...) CLIB_MARCH_SELECT_FN(_fn ## _ma);
883 foreach_svm_fifo_march_fn
885 void __clib_constructor
886 svm_fifo_select_march_fns (void)
888 #define _(_fn, _args...) _fn ## _selected = _fn ## _ma_multiarch_select ();
889 foreach_svm_fifo_march_fn
895 * fd.io coding-style-patch-verification: ON
898 * eval: (c-set-style "gnu")