#include <svm/svm_fifo.h>
#include <vppinfra/cpu.h>
+#if __x86_64__ && CLIB_DEBUG == 0
+#define foreach_march_variant_runtime(macro, _args...) \
+ macro(avx512, avx512f, _args) \
+ macro(avx2, avx2, _args)
+#else
+#define foreach_march_variant_runtime(macro, _args...)
+#endif
+
+#define CLIB_MARCH_ARCH_CHECK(arch, archname, fn) \
+ if (clib_cpu_supports_ ## archname ()) \
+ return & fn ## _ ##arch;
+
+#define CLIB_MARCH_SELECT_FN(fn,...) \
+ __VA_ARGS__ void * fn ## _multiarch_select(void) \
+{ \
+ foreach_march_variant_runtime(CLIB_MARCH_ARCH_CHECK, fn) \
+ return & fn; \
+}
+
+#define DEFINE_WEAK_FN(_arch, _fn, _args...) \
+ int __attribute__((weak)) _fn##_ma_##_arch(_args);
+#define DEFINE_FN_HELPER(arch, archname, macro, _args...) \
+ macro(arch, _args)
+
+#ifndef CLIB_MARCH_VARIANT
+#define MARCH_FN(fn, _args...) \
+ static void * (*fn ## _selected) (void); \
+ foreach_march_variant_runtime (DEFINE_FN_HELPER, DEFINE_WEAK_FN, fn, \
+ _args); \
+ static inline int CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args)
+#else
+#define MARCH_FN(fn, _args...) \
+ int CLIB_CPU_OPTIMIZED CLIB_MARCH_SFX (fn ## _ma)(_args)
+#endif
+
static inline u8
position_lt (svm_fifo_t * f, u32 a, u32 b)
{
return (s->start + s->length) % f->nitems;
}
+#ifndef CLIB_MARCH_VARIANT
+
u8 *
format_ooo_segment (u8 * s, va_list * args)
{
svm_fifo_t *f = va_arg (*args, svm_fifo_t *);
int verbose = va_arg (*args, int);
+ if (!s)
+ return s;
+
s = format (s, "cursize %u nitems %u has_event %d\n",
f->cursize, f->nitems, f->has_event);
- s = format (s, " head %d tail %d\n", f->head, f->tail);
+ s = format (s, " head %d tail %d segment manager %u\n", f->head, f->tail,
+ f->segment_manager);
if (verbose > 1)
s = format
- (s, " server session %d thread %d client session %d thread %d\n",
+ (s, " vpp session %d thread %d app session %d thread %d\n",
f->master_session_index, f->master_thread_index,
f->client_session_index, f->client_thread_index);
clib_mem_free (f);
}
}
+#endif
always_inline ooo_segment_t *
ooo_segment_new (svm_fifo_t * f, u32 start, u32 length)
return bytes;
}
-static int
-svm_fifo_enqueue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here)
+MARCH_FN (svm_fifo_enqueue_nowait, svm_fifo_t * f, u32 max_bytes,
+ const u8 * copy_from_here)
{
u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
u32 cursize, nitems;
f->ooos_newest = OOO_SEGMENT_INVALID_INDEX;
if (PREDICT_FALSE (cursize == f->nitems))
- return -2; /* fifo stuffed */
+ return SVM_FIFO_FULL;
nitems = f->nitems;
return (total_copy_bytes);
}
-#define SVM_ENQUEUE_CLONE_TEMPLATE(arch, fn, tgt) \
- uword \
- __attribute__ ((flatten)) \
- __attribute__ ((target (tgt))) \
- CLIB_CPU_OPTIMIZED \
- fn ## _ ## arch ( svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here) \
- { return fn (f, max_bytes, copy_from_here);}
-
-static int
-svm_fifo_enqueue_nowait_ma (svm_fifo_t * f, u32 max_bytes,
- u8 * copy_from_here)
-{
- return svm_fifo_enqueue_internal (f, max_bytes, copy_from_here);
-}
-
-foreach_march_variant (SVM_ENQUEUE_CLONE_TEMPLATE,
- svm_fifo_enqueue_nowait_ma);
-CLIB_MULTIARCH_SELECT_FN (svm_fifo_enqueue_nowait_ma);
-
+#ifndef CLIB_MARCH_VARIANT
int
-svm_fifo_enqueue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_from_here)
+svm_fifo_enqueue_nowait (svm_fifo_t * f, u32 max_bytes,
+ const u8 * copy_from_here)
{
-#if CLIB_DEBUG > 0
- return svm_fifo_enqueue_nowait_ma (f, max_bytes, copy_from_here);
-#else
- static int (*fp) (svm_fifo_t *, u32, u8 *);
-
- if (PREDICT_FALSE (fp == 0))
- fp = (void *) svm_fifo_enqueue_nowait_ma_multiarch_select ();
-
- return (*fp) (f, max_bytes, copy_from_here);
-#endif
+ return ((int (*)(svm_fifo_t * f, u32, const u8 *))
+ (*svm_fifo_enqueue_nowait_selected)) (f, max_bytes, copy_from_here);
}
+#endif
/**
* Enqueue a future segment.
* Returns 0 of the entire segment was copied
* Returns -1 if none of the segment was copied due to lack of space
*/
-static int
-svm_fifo_enqueue_with_offset_internal (svm_fifo_t * f,
- u32 offset,
- u32 required_bytes,
- u8 * copy_from_here)
+MARCH_FN (svm_fifo_enqueue_with_offset, svm_fifo_t * f,
+ u32 offset, u32 required_bytes, u8 * copy_from_here)
{
u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
u32 cursize, nitems, normalized_offset;
return (0);
}
+#ifndef CLIB_MARCH_VARIANT
int
-svm_fifo_enqueue_with_offset (svm_fifo_t * f,
- u32 offset,
- u32 required_bytes, u8 * copy_from_here)
+svm_fifo_enqueue_with_offset (svm_fifo_t * f, u32 offset, u32 required_bytes,
+ u8 * copy_from_here)
{
- return svm_fifo_enqueue_with_offset_internal (f, offset, required_bytes,
- copy_from_here);
+ return ((int (*)(svm_fifo_t * f, u32, u32, u8 *))
+ (*svm_fifo_enqueue_with_offset_selected)) (f, offset,
+ required_bytes,
+ copy_from_here);
}
+void
+svm_fifo_overwrite_head (svm_fifo_t * f, u8 * data, u32 len)
+{
+ u32 first_chunk;
+ first_chunk = f->nitems - f->head;
+ ASSERT (len <= f->nitems);
+ if (len <= first_chunk)
+ clib_memcpy (&f->data[f->head], data, len);
+ else
+ {
+ clib_memcpy (&f->data[f->head], data, first_chunk);
+ clib_memcpy (&f->data[0], data + first_chunk, len - first_chunk);
+ }
+}
+#endif
-static int
-svm_fifo_dequeue_internal (svm_fifo_t * f, u32 max_bytes, u8 * copy_here)
+MARCH_FN (svm_fifo_dequeue_nowait, svm_fifo_t * f, u32 max_bytes,
+ u8 * copy_here)
{
u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
u32 cursize, nitems;
return (total_copy_bytes);
}
-static int
-svm_fifo_dequeue_nowait_ma (svm_fifo_t * f, u32 max_bytes, u8 * copy_here)
-{
- return svm_fifo_dequeue_internal (f, max_bytes, copy_here);
-}
-
-#define SVM_FIFO_DEQUEUE_CLONE_TEMPLATE(arch, fn, tgt) \
- uword \
- __attribute__ ((flatten)) \
- __attribute__ ((target (tgt))) \
- CLIB_CPU_OPTIMIZED \
- fn ## _ ## arch ( svm_fifo_t * f, u32 max_bytes, \
- u8 * copy_here) \
- { return fn (f, max_bytes, copy_here);}
-
-foreach_march_variant (SVM_FIFO_DEQUEUE_CLONE_TEMPLATE,
- svm_fifo_dequeue_nowait_ma);
-CLIB_MULTIARCH_SELECT_FN (svm_fifo_dequeue_nowait_ma);
+#ifndef CLIB_MARCH_VARIANT
int
svm_fifo_dequeue_nowait (svm_fifo_t * f, u32 max_bytes, u8 * copy_here)
{
-#if CLIB_DEBUG > 0
- return svm_fifo_dequeue_nowait_ma (f, max_bytes, copy_here);
-#else
- static int (*fp) (svm_fifo_t *, u32, u8 *);
-
- if (PREDICT_FALSE (fp == 0))
- fp = (void *) svm_fifo_dequeue_nowait_ma_multiarch_select ();
-
- return (*fp) (f, max_bytes, copy_here);
-#endif
+ return ((int (*)(svm_fifo_t * f, u32, u8 *))
+ (*svm_fifo_dequeue_nowait_selected)) (f, max_bytes, copy_here);
}
+#endif
-static int
-svm_fifo_peek_ma (svm_fifo_t * f, u32 relative_offset, u32 max_bytes,
- u8 * copy_here)
+MARCH_FN (svm_fifo_peek, svm_fifo_t * f, u32 relative_offset, u32 max_bytes,
+ u8 * copy_here)
{
u32 total_copy_bytes, first_copy_bytes, second_copy_bytes;
u32 cursize, nitems, real_head;
return total_copy_bytes;
}
-#define SVM_FIFO_PEEK_CLONE_TEMPLATE(arch, fn, tgt) \
- uword \
- __attribute__ ((flatten)) \
- __attribute__ ((target (tgt))) \
- CLIB_CPU_OPTIMIZED \
- fn ## _ ## arch ( svm_fifo_t * f, u32 relative_offset, u32 max_bytes, \
- u8 * copy_here) \
- { return fn (f, relative_offset, max_bytes, copy_here);}
-
-foreach_march_variant (SVM_FIFO_PEEK_CLONE_TEMPLATE, svm_fifo_peek_ma);
-CLIB_MULTIARCH_SELECT_FN (svm_fifo_peek_ma);
+#ifndef CLIB_MARCH_VARIANT
int
svm_fifo_peek (svm_fifo_t * f, u32 relative_offset, u32 max_bytes,
u8 * copy_here)
{
-#if CLIB_DEBUG > 0
- return svm_fifo_peek_ma (f, relative_offset, max_bytes, copy_here);
-#else
- static int (*fp) (svm_fifo_t *, u32, u32, u8 *);
-
- if (PREDICT_FALSE (fp == 0))
- fp = (void *) svm_fifo_peek_ma_multiarch_select ();
-
- return (*fp) (f, relative_offset, max_bytes, copy_here);
-#endif
+ return ((int (*)(svm_fifo_t * f, u32, u32, u8 *))
+ (*svm_fifo_peek_selected)) (f, relative_offset, max_bytes,
+ copy_here);
}
int
return total_drop_bytes;
}
+void
+svm_fifo_dequeue_drop_all (svm_fifo_t * f)
+{
+ f->head = f->tail;
+ __sync_fetch_and_sub (&f->cursize, f->cursize);
+}
+
+int
+svm_fifo_segments (svm_fifo_t * f, svm_fifo_segment_t * fs)
+{
+ u32 cursize, nitems;
+
+ /* read cursize, which can only increase while we're working */
+ cursize = svm_fifo_max_dequeue (f);
+ if (PREDICT_FALSE (cursize == 0))
+ return -2;
+
+ nitems = f->nitems;
+
+ fs[0].len = ((nitems - f->head) < cursize) ? (nitems - f->head) : cursize;
+ fs[0].data = f->data + f->head;
+
+ if (fs[0].len < cursize)
+ {
+ fs[1].len = cursize - fs[0].len;
+ fs[1].data = f->data;
+ }
+ else
+ {
+ fs[1].len = 0;
+ fs[1].data = 0;
+ }
+ return cursize;
+}
+
+void
+svm_fifo_segments_free (svm_fifo_t * f, svm_fifo_segment_t * fs)
+{
+ u32 total_drop_bytes;
+
+ ASSERT (fs[0].data == f->data + f->head);
+ if (fs[1].len)
+ {
+ f->head = fs[1].len;
+ total_drop_bytes = fs[0].len + fs[1].len;
+ }
+ else
+ {
+ f->head = (f->head + fs[0].len) % f->nitems;
+ total_drop_bytes = fs[0].len;
+ }
+ __sync_fetch_and_sub (&f->cursize, total_drop_bytes);
+}
+
u32
svm_fifo_number_ooo_segments (svm_fifo_t * f)
{
f->head = f->tail = pointer % f->nitems;
}
+#define foreach_svm_fifo_march_fn \
+ _(svm_fifo_enqueue_nowait) \
+ _(svm_fifo_enqueue_with_offset) \
+ _(svm_fifo_dequeue_nowait) \
+ _(svm_fifo_peek) \
+
+#define _(_fn, _args...) CLIB_MARCH_SELECT_FN(_fn ## _ma);
+foreach_svm_fifo_march_fn
+#undef _
+void __clib_constructor
+svm_fifo_select_march_fns (void)
+{
+#define _(_fn, _args...) _fn ## _selected = _fn ## _ma_multiarch_select ();
+ foreach_svm_fifo_march_fn
+#undef _
+}
+
+#endif
/*
* fd.io coding-style-patch-verification: ON
*