From: Damjan Marion Date: Thu, 13 Feb 2020 17:14:06 +0000 (+0100) Subject: vppinfra: add 128-bit and 512-bit a ^ b ^ c shortcut X-Git-Tag: v20.09-rc0~595 X-Git-Url: https://gerrit.fd.io/r/gitweb?p=vpp.git;a=commitdiff_plain;h=f75defa7676759fa81ae75e7edd492572c6b8fd6 vppinfra: add 128-bit and 512-bit a ^ b ^ c shortcut This allows us to combine 2 XOR operations into signle instruction which makes difference in crypto op: - in x86, by using ternary logic instruction - on ARM, by using EOR3 instruction (available with sha3 feature) Type: refactor Change-Id: Ibdf9001840399d2f838d491ca81b57cbd8430433 Signed-off-by: Damjan Marion --- diff --git a/src/plugins/crypto_native/ghash.h b/src/plugins/crypto_native/ghash.h index 1ee1a997997..a2886a468e9 100644 --- a/src/plugins/crypto_native/ghash.h +++ b/src/plugins/crypto_native/ghash.h @@ -105,18 +105,6 @@ #ifndef __ghash_h__ #define __ghash_h__ -/* on AVX-512 systems we can save a clock cycle by using ternary logic - instruction to calculate a XOR b XOR c */ -static_always_inline u8x16 -ghash_xor3 (u8x16 a, u8x16 b, u8x16 c) -{ -#if defined (__AVX512F__) - return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b, - (__m128i) c, 0x96); -#endif - return a ^ b ^ c; -} - static_always_inline u8x16 gmul_lo_lo (u8x16 a, u8x16 b) { @@ -204,8 +192,8 @@ ghash_mul_next (ghash_data_t * gd, u8x16 a, u8x16 b) if (gd->pending) { /* there is peding data from previous invocation so we can XOR */ - gd->hi = ghash_xor3 (gd->hi, gd->tmp_hi, hi); - gd->lo = ghash_xor3 (gd->lo, gd->tmp_lo, lo); + gd->hi = u8x16_xor3 (gd->hi, gd->tmp_hi, hi); + gd->lo = u8x16_xor3 (gd->lo, gd->tmp_lo, lo); gd->pending = 0; } else @@ -217,7 +205,7 @@ ghash_mul_next (ghash_data_t * gd, u8x16 a, u8x16 b) } /* gd->mid ^= a0 * b1 ^ a1 * b0 */ - gd->mid = ghash_xor3 (gd->mid, gmul_hi_lo (a, b), gmul_lo_hi (a, b)); + gd->mid = u8x16_xor3 (gd->mid, gmul_hi_lo (a, b), gmul_lo_hi (a, b)); } static_always_inline void @@ -233,8 +221,8 @@ ghash_reduce (ghash_data_t * gd) if (gd->pending) { - gd->lo = ghash_xor3 (gd->lo, gd->tmp_lo, midl); - gd->hi = ghash_xor3 (gd->hi, gd->tmp_hi, midr); + gd->lo = u8x16_xor3 (gd->lo, gd->tmp_lo, midl); + gd->hi = u8x16_xor3 (gd->hi, gd->tmp_hi, midr); } else { @@ -255,7 +243,7 @@ ghash_reduce2 (ghash_data_t * gd) static_always_inline u8x16 ghash_final (ghash_data_t * gd) { - return ghash_xor3 (gd->hi, u8x16_word_shift_right (gd->tmp_lo, 4), + return u8x16_xor3 (gd->hi, u8x16_word_shift_right (gd->tmp_lo, 4), u8x16_word_shift_left (gd->tmp_hi, 4)); } diff --git a/src/vppinfra/vector_avx512.h b/src/vppinfra/vector_avx512.h index c54d8cd2499..29d96f85ce9 100644 --- a/src/vppinfra/vector_avx512.h +++ b/src/vppinfra/vector_avx512.h @@ -143,6 +143,13 @@ u64x8_permute (u64x8 a, u64x8 b, u64x8 mask) #define u32x16_ternary_logic(a, b, c, d) \ (u32x16) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, (__m512i) c, d) +static_always_inline u8x64 +u8x64_xor3 (u8x64 a, u8x64 b, u8x64 c) +{ + return (u8x64) _mm512_ternarylogic_epi32 ((__m512i) a, (__m512i) b, + (__m512i) c, 0x96); +} + static_always_inline void u32x16_transpose (u32x16 m[16]) { diff --git a/src/vppinfra/vector_neon.h b/src/vppinfra/vector_neon.h index 81d99a64f05..3855f55ad41 100644 --- a/src/vppinfra/vector_neon.h +++ b/src/vppinfra/vector_neon.h @@ -203,6 +203,17 @@ u8x16_reflect (u8x16 v) return (u8x16) vqtbl1q_u8 (v, mask); } +static_always_inline u8x16 +u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c) +{ +#if __GNUC__ == 8 && __ARM_FEATURE_SHA3 == 1 + u8x16 r; +__asm__ ("eor3 %0.16b,%1.16b,%2.16b,%3.16b": "=w" (r): "0" (a), "w" (b), "w" (c):); + return r; +#endif + return a ^ b ^ c; +} + #define CLIB_HAVE_VEC128_MSB_MASK #define CLIB_HAVE_VEC128_UNALIGNED_LOAD_STORE diff --git a/src/vppinfra/vector_sse42.h b/src/vppinfra/vector_sse42.h index c22e86e7437..e75580e6026 100644 --- a/src/vppinfra/vector_sse42.h +++ b/src/vppinfra/vector_sse42.h @@ -746,6 +746,15 @@ u8x16_blend (u8x16 v1, u8x16 v2, u8x16 mask) return (u8x16) _mm_blendv_epi8 ((__m128i) v1, (__m128i) v2, (__m128i) mask); } +static_always_inline u8x16 +u8x16_xor3 (u8x16 a, u8x16 b, u8x16 c) +{ +#if __AVX512F__ + return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b, + (__m128i) c, 0x96); +#endif + return a ^ b ^ c; +} #endif /* included_vector_sse2_h */