#ifndef __ghash_h__
#define __ghash_h__
-/* on AVX-512 systems we can save a clock cycle by using ternary logic
- instruction to calculate a XOR b XOR c */
-static_always_inline u8x16
-ghash_xor3 (u8x16 a, u8x16 b, u8x16 c)
-{
-#if defined (__AVX512F__)
- return (u8x16) _mm_ternarylogic_epi32 ((__m128i) a, (__m128i) b,
- (__m128i) c, 0x96);
-#endif
- return a ^ b ^ c;
-}
-
static_always_inline u8x16
gmul_lo_lo (u8x16 a, u8x16 b)
{
+#if defined (__PCLMUL__)
return (u8x16) _mm_clmulepi64_si128 ((__m128i) a, (__m128i) b, 0x00);
+#elif defined (__ARM_FEATURE_CRYPTO)
+ return (u8x16) vmull_p64 ((poly64_t) vget_low_p64 ((poly64x2_t) a),
+ (poly64_t) vget_low_p64 ((poly64x2_t) b));
+#endif
}
static_always_inline u8x16
gmul_hi_lo (u8x16 a, u8x16 b)
{
+#if defined (__PCLMUL__)
return (u8x16) _mm_clmulepi64_si128 ((__m128i) a, (__m128i) b, 0x01);
+#elif defined (__ARM_FEATURE_CRYPTO)
+ return (u8x16) vmull_p64 ((poly64_t) vget_high_p64 ((poly64x2_t) a),
+ (poly64_t) vget_low_p64 ((poly64x2_t) b));
+#endif
}
static_always_inline u8x16
gmul_lo_hi (u8x16 a, u8x16 b)
{
+#if defined (__PCLMUL__)
return (u8x16) _mm_clmulepi64_si128 ((__m128i) a, (__m128i) b, 0x10);
+#elif defined (__ARM_FEATURE_CRYPTO)
+ return (u8x16) vmull_p64 ((poly64_t) vget_low_p64 ((poly64x2_t) a),
+ (poly64_t) vget_high_p64 ((poly64x2_t) b));
+#endif
}
static_always_inline u8x16
gmul_hi_hi (u8x16 a, u8x16 b)
{
+#if defined (__PCLMUL__)
return (u8x16) _mm_clmulepi64_si128 ((__m128i) a, (__m128i) b, 0x11);
+#elif defined (__ARM_FEATURE_CRYPTO)
+ return (u8x16) vmull_high_p64 ((poly64x2_t) a, (poly64x2_t) b);
+#endif
}
typedef struct
if (gd->pending)
{
/* there is peding data from previous invocation so we can XOR */
- gd->hi = ghash_xor3 (gd->hi, gd->tmp_hi, hi);
- gd->lo = ghash_xor3 (gd->lo, gd->tmp_lo, lo);
+ gd->hi = u8x16_xor3 (gd->hi, gd->tmp_hi, hi);
+ gd->lo = u8x16_xor3 (gd->lo, gd->tmp_lo, lo);
gd->pending = 0;
}
else
}
/* gd->mid ^= a0 * b1 ^ a1 * b0 */
- gd->mid = ghash_xor3 (gd->mid, gmul_hi_lo (a, b), gmul_lo_hi (a, b));
+ gd->mid = u8x16_xor3 (gd->mid, gmul_hi_lo (a, b), gmul_lo_hi (a, b));
}
static_always_inline void
if (gd->pending)
{
- gd->lo = ghash_xor3 (gd->lo, gd->tmp_lo, midl);
- gd->hi = ghash_xor3 (gd->hi, gd->tmp_hi, midr);
+ gd->lo = u8x16_xor3 (gd->lo, gd->tmp_lo, midl);
+ gd->hi = u8x16_xor3 (gd->hi, gd->tmp_hi, midr);
}
else
{
gd->lo ^= midl;
gd->hi ^= midr;
}
-
r = gmul_hi_lo (ghash_poly2, gd->lo);
gd->lo ^= u8x16_word_shift_left (r, 8);
}
static_always_inline u8x16
ghash_final (ghash_data_t * gd)
{
- return ghash_xor3 (gd->hi, u8x16_word_shift_right (gd->tmp_lo, 4),
+ return u8x16_xor3 (gd->hi, u8x16_word_shift_right (gd->tmp_lo, 4),
u8x16_word_shift_left (gd->tmp_hi, 4));
}
return ghash_final (gd);
}
+#ifdef __VPCLMULQDQ__
+
+static const u8x64 ghash4_poly2 = {
+ 0x00, 0x00, 0x00, 0xc2, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2,
+ 0x00, 0x00, 0x00, 0xc2, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2,
+ 0x00, 0x00, 0x00, 0xc2, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2,
+ 0x00, 0x00, 0x00, 0xc2, 0x01, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2,
+};
+
+typedef struct
+{
+ u8x64 hi, lo, mid, tmp_lo, tmp_hi;
+ int pending;
+} ghash4_data_t;
+
+static_always_inline u8x64
+gmul4_lo_lo (u8x64 a, u8x64 b)
+{
+ return (u8x64) _mm512_clmulepi64_epi128 ((__m512i) a, (__m512i) b, 0x00);
+}
+
+static_always_inline u8x64
+gmul4_hi_lo (u8x64 a, u8x64 b)
+{
+ return (u8x64) _mm512_clmulepi64_epi128 ((__m512i) a, (__m512i) b, 0x01);
+}
+
+static_always_inline u8x64
+gmul4_lo_hi (u8x64 a, u8x64 b)
+{
+ return (u8x64) _mm512_clmulepi64_epi128 ((__m512i) a, (__m512i) b, 0x10);
+}
+
+static_always_inline u8x64
+gmul4_hi_hi (u8x64 a, u8x64 b)
+{
+ return (u8x64) _mm512_clmulepi64_epi128 ((__m512i) a, (__m512i) b, 0x11);
+}
+
+
+static_always_inline void
+ghash4_mul_first (ghash4_data_t * gd, u8x64 a, u8x64 b)
+{
+ gd->hi = gmul4_hi_hi (a, b);
+ gd->lo = gmul4_lo_lo (a, b);
+ gd->mid = (gmul4_hi_lo (a, b) ^ gmul4_lo_hi (a, b));
+ gd->pending = 0;
+}
+
+static_always_inline void
+ghash4_mul_next (ghash4_data_t * gd, u8x64 a, u8x64 b)
+{
+ u8x64 hi = gmul4_hi_hi (a, b);
+ u8x64 lo = gmul4_lo_lo (a, b);
+
+ if (gd->pending)
+ {
+ /* there is peding data from previous invocation so we can XOR */
+ gd->hi = u8x64_xor3 (gd->hi, gd->tmp_hi, hi);
+ gd->lo = u8x64_xor3 (gd->lo, gd->tmp_lo, lo);
+ gd->pending = 0;
+ }
+ else
+ {
+ /* there is no peding data from previous invocation so we postpone XOR */
+ gd->tmp_hi = hi;
+ gd->tmp_lo = lo;
+ gd->pending = 1;
+ }
+ gd->mid = u8x64_xor3 (gd->mid, gmul4_hi_lo (a, b), gmul4_lo_hi (a, b));
+}
+
+static_always_inline void
+ghash4_reduce (ghash4_data_t * gd)
+{
+ u8x64 r;
+
+ /* Final combination:
+ gd->lo ^= gd->mid << 64
+ gd->hi ^= gd->mid >> 64 */
+
+ u8x64 midl = u8x64_word_shift_left (gd->mid, 8);
+ u8x64 midr = u8x64_word_shift_right (gd->mid, 8);
+
+ if (gd->pending)
+ {
+ gd->lo = u8x64_xor3 (gd->lo, gd->tmp_lo, midl);
+ gd->hi = u8x64_xor3 (gd->hi, gd->tmp_hi, midr);
+ }
+ else
+ {
+ gd->lo ^= midl;
+ gd->hi ^= midr;
+ }
+
+ r = gmul4_hi_lo (ghash4_poly2, gd->lo);
+ gd->lo ^= u8x64_word_shift_left (r, 8);
+
+}
+
+static_always_inline void
+ghash4_reduce2 (ghash4_data_t * gd)
+{
+ gd->tmp_lo = gmul4_lo_lo (ghash4_poly2, gd->lo);
+ gd->tmp_hi = gmul4_lo_hi (ghash4_poly2, gd->lo);
+}
+
+static_always_inline u8x16
+ghash4_final (ghash4_data_t * gd)
+{
+ u8x64 r;
+ u8x32 t;
+
+ r = u8x64_xor3 (gd->hi, u8x64_word_shift_right (gd->tmp_lo, 4),
+ u8x64_word_shift_left (gd->tmp_hi, 4));
+
+ /* horizontal XOR of 4 128-bit lanes */
+ t = u8x64_extract_lo (r) ^ u8x64_extract_hi (r);
+ return u8x32_extract_hi (t) ^ u8x32_extract_lo (t);
+}
+#endif
+
static_always_inline void
-ghash_precompute (u8x16 H, u8x16 * Hi, int count)
+ghash_precompute (u8x16 H, u8x16 * Hi, int n)
{
u8x16 r8;
u32x4 r32;
H = (u8x16) ((u64x2) H << 1);
H |= u8x16_word_shift_left (r8, 8);
r32 = (u32x4) u8x16_word_shift_right (r8, 8);
+#ifdef __SSE2__
r32 = u32x4_shuffle (r32, 0, 1, 2, 0);
+#else
+ r32[3] = r32[0];
+#endif
/* *INDENT-OFF* */
r32 = r32 == (u32x4) {1, 0, 0, 1};
/* *INDENT-ON* */
- Hi[0] = H ^ ((u8x16) r32 & ghash_poly);
+ Hi[n - 1] = H = H ^ ((u8x16) r32 & ghash_poly);
/* calculate H^(i + 1) */
- for (int i = 1; i < count; i++)
- Hi[i] = ghash_mul (Hi[0], Hi[i - 1]);
+ for (int i = n - 2; i >= 0; i--)
+ Hi[i] = ghash_mul (H, Hi[i + 1]);
}
#endif /* __ghash_h__ */