Code Review
/
vpp.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
ip: fix ip zero checksum verification
[vpp.git]
/
src
/
vppinfra
/
vector_sse42.h
diff --git
a/src/vppinfra/vector_sse42.h
b/src/vppinfra/vector_sse42.h
index
a2d737a
..
effab3f
100644
(file)
--- a/
src/vppinfra/vector_sse42.h
+++ b/
src/vppinfra/vector_sse42.h
@@
-617,6
+617,15
@@
u8x16_msb_mask (u8x16 v)
#undef _signed_binop
#undef _signed_binop
+static_always_inline u32x4
+u32x4_byte_swap (u32x4 v)
+{
+ u8x16 swap = {
+ 3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12
+ };
+ return (u32x4) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap);
+}
+
static_always_inline u16x8
u16x8_byte_swap (u16x8 v)
{
static_always_inline u16x8
u16x8_byte_swap (u16x8 v)
{
@@
-626,12
+635,29
@@
u16x8_byte_swap (u16x8 v)
return (u16x8) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap);
}
return (u16x8) _mm_shuffle_epi8 ((__m128i) v, (__m128i) swap);
}
+static_always_inline u8x16
+u8x16_reflect (u8x16 v)
+{
+ u8x16 mask = {
+ 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
+ };
+ return (u8x16) _mm_shuffle_epi8 ((__m128i) v, (__m128i) mask);
+}
+
static_always_inline u32x4
u32x4_hadd (u32x4 v1, u32x4 v2)
{
return (u32x4) _mm_hadd_epi32 ((__m128i) v1, (__m128i) v2);
}
static_always_inline u32x4
u32x4_hadd (u32x4 v1, u32x4 v2)
{
return (u32x4) _mm_hadd_epi32 ((__m128i) v1, (__m128i) v2);
}
+static_always_inline u32 __clib_unused
+u32x4_sum_elts (u32x4 sum4)
+{
+ sum4 += (u32x4) u8x16_align_right (sum4, sum4, 8);
+ sum4 += (u32x4) u8x16_align_right (sum4, sum4, 4);
+ return sum4[0];
+}
+
static_always_inline u8x16
u8x16_shuffle (u8x16 v, u8x16 m)
{
static_always_inline u8x16
u8x16_shuffle (u8x16 v, u8x16 m)
{
@@
-650,11
+676,11
@@
u32x4_shuffle (u32x4 v, const int a, const int b, const int c, const int d)
#endif
}
#endif
}
-/* _
extend_to
_ */
+/* _
from
_ */
/* *INDENT-OFF* */
#define _(f,t,i) \
static_always_inline t \
/* *INDENT-OFF* */
#define _(f,t,i) \
static_always_inline t \
-
f##_extend_to_##t (f x)
\
+
t##_from_##f (f x)
\
{ return (t) _mm_cvt##i ((__m128i) x); }
_(u8x16, u16x8, epu8_epi16)
{ return (t) _mm_cvt##i ((__m128i) x); }
_(u8x16, u16x8, epu8_epi16)
@@
-681,7
+707,7
@@
u64x2_gather (void *p0, void *p1)
}
static_always_inline u32x4
}
static_always_inline u32x4
-u32x4_gather (void *p0, void *p1, void *p2, void *p3
, void *p4
)
+u32x4_gather (void *p0, void *p1, void *p2, void *p3)
{
u32x4 r = { *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3 };
return r;
{
u32x4 r = { *(u32 *) p0, *(u32 *) p1, *(u32 *) p2, *(u32 *) p3 };
return r;
@@
-728,6
+754,23
@@
u8x16_blend (u8x16 v1, u8x16 v2, u8x16 mask)
return (u8x16) _mm_blendv_epi8 ((__m128i) v1, (__m128i) v2, (__m128i) mask);
}
return (u8x16) _mm_blendv_epi8 ((__m128i) v1, (__m128i) v2, (__m128i) mask);
}
+static_always_inline u8x16
+u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c)
+{
+#if __AVX512F__
+ return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b,
+ (__m128i) c, 0x96);
+#endif
+ return a ^ b ^ c;
+}
+
+#ifdef __AVX512F__
+static_always_inline u8x16
+u8x16_mask_load (u8x16 a, void *p, u16 mask)
+{
+ return (u8x16) _mm_mask_loadu_epi8 ((__m128i) a, mask, p);
+}
+#endif
#endif /* included_vector_sse2_h */
#endif /* included_vector_sse2_h */