2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_sha2_h
17 #define included_sha2_h
19 #include <vppinfra/clib.h>
21 #define SHA224_DIGEST_SIZE 28
22 #define SHA224_BLOCK_SIZE 64
24 #define SHA256_DIGEST_SIZE 32
25 #define SHA256_BLOCK_SIZE 64
26 #define SHA256_ROTR(x, y) ((x >> y) | (x << (32 - y)))
27 #define SHA256_CH(a, b, c) ((a & b) ^ (~a & c))
28 #define SHA256_MAJ(a, b, c) ((a & b) ^ (a & c) ^ (b & c))
29 #define SHA256_CSIGMA0(x) \
30 (SHA256_ROTR (x, 2) ^ SHA256_ROTR (x, 13) ^ SHA256_ROTR (x, 22));
31 #define SHA256_CSIGMA1(x) \
32 (SHA256_ROTR (x, 6) ^ SHA256_ROTR (x, 11) ^ SHA256_ROTR (x, 25));
33 #define SHA256_SSIGMA0(x) (SHA256_ROTR (x, 7) ^ SHA256_ROTR (x, 18) ^ (x >> 3))
34 #define SHA256_SSIGMA1(x) \
35 (SHA256_ROTR (x, 17) ^ SHA256_ROTR (x, 19) ^ (x >> 10))
37 #define SHA256_MSG_SCHED(w, j) \
39 w[j] = w[j - 7] + w[j - 16]; \
40 w[j] += SHA256_SSIGMA0 (w[j - 15]); \
41 w[j] += SHA256_SSIGMA1 (w[j - 2]); \
44 #define SHA256_TRANSFORM(s, w, i, k) \
46 __typeof__ (s[0]) t1, t2; \
47 t1 = k + w[i] + s[7]; \
48 t1 += SHA256_CSIGMA1 (s[4]); \
49 t1 += SHA256_CH (s[4], s[5], s[6]); \
50 t2 = SHA256_CSIGMA0 (s[0]); \
51 t2 += SHA256_MAJ (s[0], s[1], s[2]); \
62 #define SHA512_224_DIGEST_SIZE 28
63 #define SHA512_224_BLOCK_SIZE 128
65 #define SHA512_256_DIGEST_SIZE 32
66 #define SHA512_256_BLOCK_SIZE 128
68 #define SHA384_DIGEST_SIZE 48
69 #define SHA384_BLOCK_SIZE 128
71 #define SHA512_DIGEST_SIZE 64
72 #define SHA512_BLOCK_SIZE 128
73 #define SHA512_ROTR(x, y) ((x >> y) | (x << (64 - y)))
74 #define SHA512_CH(a, b, c) ((a & b) ^ (~a & c))
75 #define SHA512_MAJ(a, b, c) ((a & b) ^ (a & c) ^ (b & c))
76 #define SHA512_CSIGMA0(x) \
77 (SHA512_ROTR (x, 28) ^ SHA512_ROTR (x, 34) ^ SHA512_ROTR (x, 39))
78 #define SHA512_CSIGMA1(x) \
79 (SHA512_ROTR (x, 14) ^ SHA512_ROTR (x, 18) ^ SHA512_ROTR (x, 41))
80 #define SHA512_SSIGMA0(x) (SHA512_ROTR (x, 1) ^ SHA512_ROTR (x, 8) ^ (x >> 7))
81 #define SHA512_SSIGMA1(x) \
82 (SHA512_ROTR (x, 19) ^ SHA512_ROTR (x, 61) ^ (x >> 6))
84 #define SHA512_MSG_SCHED(w, j) \
86 w[j] = w[j - 7] + w[j - 16]; \
87 w[j] += SHA512_SSIGMA0 (w[j - 15]); \
88 w[j] += SHA512_SSIGMA1 (w[j - 2]); \
91 #define SHA512_TRANSFORM(s, w, i, k) \
93 __typeof__ (s[0]) t1, t2; \
94 t1 = k + w[i] + s[7]; \
95 t1 += SHA512_CSIGMA1 (s[4]); \
96 t1 += SHA512_CH (s[4], s[5], s[6]); \
97 t2 = SHA512_CSIGMA0 (s[0]); \
98 t2 += SHA512_MAJ (s[0], s[1], s[2]); \
109 static const u32 sha224_h[8] = { 0xc1059ed8, 0x367cd507, 0x3070dd17,
110 0xf70e5939, 0xffc00b31, 0x68581511,
111 0x64f98fa7, 0xbefa4fa4 };
113 static const u32 sha256_h[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372,
114 0xa54ff53a, 0x510e527f, 0x9b05688c,
115 0x1f83d9ab, 0x5be0cd19 };
117 static const u32 sha256_k[64] = {
118 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
119 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
120 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
121 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
122 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
123 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
124 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
125 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
126 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
127 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
128 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
131 static const u64 sha384_h[8] = { 0xcbbb9d5dc1059ed8, 0x629a292a367cd507,
132 0x9159015a3070dd17, 0x152fecd8f70e5939,
133 0x67332667ffc00b31, 0x8eb44a8768581511,
134 0xdb0c2e0d64f98fa7, 0x47b5481dbefa4fa4 };
136 static const u64 sha512_h[8] = { 0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
137 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
138 0x510e527fade682d1, 0x9b05688c2b3e6c1f,
139 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179 };
141 static const u64 sha512_224_h[8] = { 0x8c3d37c819544da2, 0x73e1996689dcd4d6,
142 0x1dfab7ae32ff9c82, 0x679dd514582f9fcf,
143 0x0f6d2b697bd44da8, 0x77e36f7304c48942,
144 0x3f9d85a86a1d36c8, 0x1112e6ad91d692a1 };
146 static const u64 sha512_256_h[8] = { 0x22312194fc2bf72c, 0x9f555fa3c84c64c2,
147 0x2393b86b6f53b151, 0x963877195940eabd,
148 0x96283ee2a88effe3, 0xbe5e1e2553863992,
149 0x2b0199fc2c85b8aa, 0x0eb72ddc81c52ca2 };
151 static const u64 sha512_k[80] = {
152 0x428a2f98d728ae22, 0x7137449123ef65cd, 0xb5c0fbcfec4d3b2f,
153 0xe9b5dba58189dbbc, 0x3956c25bf348b538, 0x59f111f1b605d019,
154 0x923f82a4af194f9b, 0xab1c5ed5da6d8118, 0xd807aa98a3030242,
155 0x12835b0145706fbe, 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2,
156 0x72be5d74f27b896f, 0x80deb1fe3b1696b1, 0x9bdc06a725c71235,
157 0xc19bf174cf692694, 0xe49b69c19ef14ad2, 0xefbe4786384f25e3,
158 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65, 0x2de92c6f592b0275,
159 0x4a7484aa6ea6e483, 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5,
160 0x983e5152ee66dfab, 0xa831c66d2db43210, 0xb00327c898fb213f,
161 0xbf597fc7beef0ee4, 0xc6e00bf33da88fc2, 0xd5a79147930aa725,
162 0x06ca6351e003826f, 0x142929670a0e6e70, 0x27b70a8546d22ffc,
163 0x2e1b21385c26c926, 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df,
164 0x650a73548baf63de, 0x766a0abb3c77b2a8, 0x81c2c92e47edaee6,
165 0x92722c851482353b, 0xa2bfe8a14cf10364, 0xa81a664bbc423001,
166 0xc24b8b70d0f89791, 0xc76c51a30654be30, 0xd192e819d6ef5218,
167 0xd69906245565a910, 0xf40e35855771202a, 0x106aa07032bbd1b8,
168 0x19a4c116b8d2d0c8, 0x1e376c085141ab53, 0x2748774cdf8eeb99,
169 0x34b0bcb5e19b48a8, 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb,
170 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3, 0x748f82ee5defb2fc,
171 0x78a5636f43172f60, 0x84c87814a1f0ab72, 0x8cc702081a6439ec,
172 0x90befffa23631e28, 0xa4506cebde82bde9, 0xbef9a3f7b2c67915,
173 0xc67178f2e372532b, 0xca273eceea26619c, 0xd186b8c721c0c207,
174 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178, 0x06f067aa72176fba,
175 0x0a637dc5a2c898a6, 0x113f9804bef90dae, 0x1b710b35131c471b,
176 0x28db77f523047d84, 0x32caab7b40c72493, 0x3c9ebe0a15c9bebc,
177 0x431d67c49c100d4c, 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a,
178 0x5fcb6fab3ad6faec, 0x6c44198c4a475817
191 #define SHA2_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
192 #define SHA2_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
204 #if defined(__SHA__) && defined(__x86_64__)
210 u8 as_u8[SHA2_MAX_BLOCK_SIZE];
211 u64 as_u64[SHA2_MAX_BLOCK_SIZE / sizeof (u64)];
212 uword as_uword[SHA2_MAX_BLOCK_SIZE / sizeof (uword)];
216 static_always_inline void
217 clib_sha2_init (clib_sha2_ctx_t *ctx, clib_sha2_type_t type)
222 ctx->total_bytes = 0;
229 ctx->block_size = SHA224_BLOCK_SIZE;
230 ctx->digest_size = SHA224_DIGEST_SIZE;
234 ctx->block_size = SHA256_BLOCK_SIZE;
235 ctx->digest_size = SHA256_DIGEST_SIZE;
239 ctx->block_size = SHA384_BLOCK_SIZE;
240 ctx->digest_size = SHA384_DIGEST_SIZE;
244 ctx->block_size = SHA512_BLOCK_SIZE;
245 ctx->digest_size = SHA512_DIGEST_SIZE;
247 case CLIB_SHA2_512_224:
249 ctx->block_size = SHA512_224_BLOCK_SIZE;
250 ctx->digest_size = SHA512_224_DIGEST_SIZE;
252 case CLIB_SHA2_512_256:
254 ctx->block_size = SHA512_256_BLOCK_SIZE;
255 ctx->digest_size = SHA512_256_DIGEST_SIZE;
259 for (int i = 0; i < 8; i++)
260 ctx->h32[i] = h32[i];
263 for (int i = 0; i < 8; i++)
264 ctx->h64[i] = h64[i];
267 #if defined(__SHA__) && defined(__x86_64__)
269 shani_sha256_cycle_w (u32x4 cw[], u8 a, u8 b, u8 c, u8 d)
271 cw[a] = (u32x4) _mm_sha256msg1_epu32 ((__m128i) cw[a], (__m128i) cw[b]);
272 cw[a] += (u32x4) _mm_alignr_epi8 ((__m128i) cw[d], (__m128i) cw[c], 4);
273 cw[a] = (u32x4) _mm_sha256msg2_epu32 ((__m128i) cw[a], (__m128i) cw[d]);
277 shani_sha256_4_rounds (u32x4 cw, u8 n, u32x4 s[])
279 u32x4 r = *(u32x4 *) (sha256_k + 4 * n) + cw;
280 s[0] = (u32x4) _mm_sha256rnds2_epu32 ((__m128i) s[0], (__m128i) s[1],
282 r = (u32x4) u64x2_interleave_hi ((u64x2) r, (u64x2) r);
283 s[1] = (u32x4) _mm_sha256rnds2_epu32 ((__m128i) s[1], (__m128i) s[0],
288 shani_sha256_shuffle (u32x4 d[2], u32x4 s[2])
290 /* {0, 1, 2, 3}, {4, 5, 6, 7} -> {7, 6, 3, 2}, {5, 4, 1, 0} */
291 d[0] = (u32x4) _mm_shuffle_ps ((__m128) s[1], (__m128) s[0], 0xbb);
292 d[1] = (u32x4) _mm_shuffle_ps ((__m128) s[1], (__m128) s[0], 0x11);
297 clib_sha256_block (clib_sha2_ctx_t *ctx, const u8 *msg, uword n_blocks)
299 #if defined(__SHA__) && defined(__x86_64__)
300 u32x4 h[2], s[2], w[4];
302 shani_sha256_shuffle (h, ctx->h32x4);
306 w[0] = u32x4_byte_swap (u32x4_load_unaligned ((u8 *) msg + 0));
307 w[1] = u32x4_byte_swap (u32x4_load_unaligned ((u8 *) msg + 16));
308 w[2] = u32x4_byte_swap (u32x4_load_unaligned ((u8 *) msg + 32));
309 w[3] = u32x4_byte_swap (u32x4_load_unaligned ((u8 *) msg + 48));
314 shani_sha256_4_rounds (w[0], 0, s);
315 shani_sha256_4_rounds (w[1], 1, s);
316 shani_sha256_4_rounds (w[2], 2, s);
317 shani_sha256_4_rounds (w[3], 3, s);
319 shani_sha256_cycle_w (w, 0, 1, 2, 3);
320 shani_sha256_4_rounds (w[0], 4, s);
321 shani_sha256_cycle_w (w, 1, 2, 3, 0);
322 shani_sha256_4_rounds (w[1], 5, s);
323 shani_sha256_cycle_w (w, 2, 3, 0, 1);
324 shani_sha256_4_rounds (w[2], 6, s);
325 shani_sha256_cycle_w (w, 3, 0, 1, 2);
326 shani_sha256_4_rounds (w[3], 7, s);
328 shani_sha256_cycle_w (w, 0, 1, 2, 3);
329 shani_sha256_4_rounds (w[0], 8, s);
330 shani_sha256_cycle_w (w, 1, 2, 3, 0);
331 shani_sha256_4_rounds (w[1], 9, s);
332 shani_sha256_cycle_w (w, 2, 3, 0, 1);
333 shani_sha256_4_rounds (w[2], 10, s);
334 shani_sha256_cycle_w (w, 3, 0, 1, 2);
335 shani_sha256_4_rounds (w[3], 11, s);
337 shani_sha256_cycle_w (w, 0, 1, 2, 3);
338 shani_sha256_4_rounds (w[0], 12, s);
339 shani_sha256_cycle_w (w, 1, 2, 3, 0);
340 shani_sha256_4_rounds (w[1], 13, s);
341 shani_sha256_cycle_w (w, 2, 3, 0, 1);
342 shani_sha256_4_rounds (w[2], 14, s);
343 shani_sha256_cycle_w (w, 3, 0, 1, 2);
344 shani_sha256_4_rounds (w[3], 15, s);
350 msg += SHA256_BLOCK_SIZE;
354 shani_sha256_shuffle (ctx->h32x4, h);
360 for (i = 0; i < 8; i++)
363 for (i = 0; i < 16; i++)
365 w[i] = clib_net_to_host_u32 (*((u32 *) msg + i));
366 SHA256_TRANSFORM (s, w, i, sha256_k[i]);
369 for (i = 16; i < 64; i++)
371 SHA256_MSG_SCHED (w, i);
372 SHA256_TRANSFORM (s, w, i, sha256_k[i]);
375 for (i = 0; i < 8; i++)
379 msg += SHA256_BLOCK_SIZE;
385 static_always_inline void
386 clib_sha512_block (clib_sha2_ctx_t *ctx, const u8 *msg, uword n_blocks)
392 for (i = 0; i < 8; i++)
395 for (i = 0; i < 16; i++)
397 w[i] = clib_net_to_host_u64 (*((u64 *) msg + i));
398 SHA512_TRANSFORM (s, w, i, sha512_k[i]);
401 for (i = 16; i < 80; i++)
403 SHA512_MSG_SCHED (w, i);
404 SHA512_TRANSFORM (s, w, i, sha512_k[i]);
407 for (i = 0; i < 8; i++)
411 msg += SHA512_BLOCK_SIZE;
416 static_always_inline void
417 clib_sha2_update (clib_sha2_ctx_t *ctx, const u8 *msg, uword n_bytes)
422 uword n_left = ctx->block_size - ctx->n_pending;
423 if (n_bytes < n_left)
425 clib_memcpy_fast (ctx->pending.as_u8 + ctx->n_pending, msg, n_bytes);
426 ctx->n_pending += n_bytes;
431 clib_memcpy_fast (ctx->pending.as_u8 + ctx->n_pending, msg, n_left);
432 if (ctx->block_size == SHA512_BLOCK_SIZE)
433 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
435 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
437 ctx->total_bytes += ctx->block_size;
443 if ((n_blocks = n_bytes / ctx->block_size))
445 if (ctx->block_size == SHA512_BLOCK_SIZE)
446 clib_sha512_block (ctx, msg, n_blocks);
448 clib_sha256_block (ctx, msg, n_blocks);
449 n_bytes -= n_blocks * ctx->block_size;
450 msg += n_blocks * ctx->block_size;
451 ctx->total_bytes += n_blocks * ctx->block_size;
456 clib_memset_u8 (ctx->pending.as_u8, 0, ctx->block_size);
457 clib_memcpy_fast (ctx->pending.as_u8, msg, n_bytes);
458 ctx->n_pending = n_bytes;
464 static_always_inline void
465 clib_sha2_final (clib_sha2_ctx_t *ctx, u8 *digest)
469 ctx->total_bytes += ctx->n_pending;
470 if (ctx->n_pending == 0)
472 clib_memset (ctx->pending.as_u8, 0, ctx->block_size);
473 ctx->pending.as_u8[0] = 0x80;
475 else if (ctx->n_pending + sizeof (u64) + sizeof (u8) > ctx->block_size)
477 ctx->pending.as_u8[ctx->n_pending] = 0x80;
478 if (ctx->block_size == SHA512_BLOCK_SIZE)
479 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
481 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
482 clib_memset (ctx->pending.as_u8, 0, ctx->block_size);
485 ctx->pending.as_u8[ctx->n_pending] = 0x80;
487 ctx->pending.as_u64[ctx->block_size / 8 - 1] =
488 clib_net_to_host_u64 (ctx->total_bytes * 8);
489 if (ctx->block_size == SHA512_BLOCK_SIZE)
490 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
492 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
494 if (ctx->block_size == SHA512_BLOCK_SIZE)
496 for (i = 0; i < ctx->digest_size / sizeof (u64); i++)
497 *((u64 *) digest + i) = clib_net_to_host_u64 (ctx->h64[i]);
499 /* sha512-224 case - write half of u64 */
500 if (i * sizeof (u64) < ctx->digest_size)
501 *((u32 *) digest + 2 * i) = clib_net_to_host_u32 (ctx->h64[i] >> 32);
504 for (i = 0; i < ctx->digest_size / sizeof (u32); i++)
505 *((u32 *) digest + i) = clib_net_to_host_u32 (ctx->h32[i]);
508 static_always_inline void
509 clib_sha2 (clib_sha2_type_t type, const u8 *msg, uword len, u8 *digest)
512 clib_sha2_init (&ctx, type);
513 clib_sha2_update (&ctx, msg, len);
514 clib_sha2_final (&ctx, digest);
517 #define clib_sha224(...) clib_sha2 (CLIB_SHA2_224, __VA_ARGS__)
518 #define clib_sha256(...) clib_sha2 (CLIB_SHA2_256, __VA_ARGS__)
519 #define clib_sha384(...) clib_sha2 (CLIB_SHA2_384, __VA_ARGS__)
520 #define clib_sha512(...) clib_sha2 (CLIB_SHA2_512, __VA_ARGS__)
521 #define clib_sha512_224(...) clib_sha2 (CLIB_SHA2_512_224, __VA_ARGS__)
522 #define clib_sha512_256(...) clib_sha2 (CLIB_SHA2_512_256, __VA_ARGS__)
524 static_always_inline void
525 clib_hmac_sha2 (clib_sha2_type_t type, const u8 *key, uword key_len,
526 const u8 *msg, uword len, u8 *digest)
528 clib_sha2_ctx_t _ctx, *ctx = &_ctx;
529 uword key_data[SHA2_MAX_BLOCK_SIZE / sizeof (uword)];
530 u8 i_digest[SHA2_MAX_DIGEST_SIZE];
533 clib_sha2_init (ctx, type);
534 n_words = ctx->block_size / sizeof (uword);
537 if (key_len > ctx->block_size)
539 /* key is longer than block, calculate hash of key */
540 clib_sha2_update (ctx, key, key_len);
541 for (i = (ctx->digest_size / sizeof (uword)) / 2; i < n_words; i++)
543 clib_sha2_final (ctx, (u8 *) key_data);
544 clib_sha2_init (ctx, type);
548 for (i = 0; i < n_words; i++)
550 clib_memcpy_fast (key_data, key, key_len);
554 for (i = 0; i < n_words; i++)
555 ctx->pending.as_uword[i] = key_data[i] ^ (uword) 0x3636363636363636;
556 if (ctx->block_size == SHA512_BLOCK_SIZE)
557 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
559 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
560 ctx->total_bytes += ctx->block_size;
563 clib_sha2_update (ctx, msg, len);
564 clib_sha2_final (ctx, i_digest);
567 clib_sha2_init (ctx, type);
568 for (i = 0; i < n_words; i++)
569 ctx->pending.as_uword[i] = key_data[i] ^ (uword) 0x5c5c5c5c5c5c5c5c;
570 if (ctx->block_size == SHA512_BLOCK_SIZE)
571 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
573 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
574 ctx->total_bytes += ctx->block_size;
577 clib_sha2_update (ctx, i_digest, ctx->digest_size);
578 clib_sha2_final (ctx, digest);
581 #define clib_hmac_sha224(...) clib_hmac_sha2 (CLIB_SHA2_224, __VA_ARGS__)
582 #define clib_hmac_sha256(...) clib_hmac_sha2 (CLIB_SHA2_256, __VA_ARGS__)
583 #define clib_hmac_sha384(...) clib_hmac_sha2 (CLIB_SHA2_384, __VA_ARGS__)
584 #define clib_hmac_sha512(...) clib_hmac_sha2 (CLIB_SHA2_512, __VA_ARGS__)
585 #define clib_hmac_sha512_224(...) \
586 clib_hmac_sha2 (CLIB_SHA2_512_224, __VA_ARGS__)
587 #define clib_hmac_sha512_256(...) \
588 clib_hmac_sha2 (CLIB_SHA2_512_256, __VA_ARGS__)
590 #endif /* included_sha2_h */