2 * Copyright (c) 2019 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #ifndef included_sha2_h
17 #define included_sha2_h
19 #include <vppinfra/clib.h>
20 #include <vppinfra/vector.h>
22 #define SHA224_DIGEST_SIZE 28
23 #define SHA224_BLOCK_SIZE 64
25 #define SHA256_DIGEST_SIZE 32
26 #define SHA256_BLOCK_SIZE 64
27 #define SHA256_ROTR(x, y) ((x >> y) | (x << (32 - y)))
28 #define SHA256_CH(a, b, c) ((a & b) ^ (~a & c))
29 #define SHA256_MAJ(a, b, c) ((a & b) ^ (a & c) ^ (b & c))
30 #define SHA256_CSIGMA0(x) \
31 (SHA256_ROTR (x, 2) ^ SHA256_ROTR (x, 13) ^ SHA256_ROTR (x, 22));
32 #define SHA256_CSIGMA1(x) \
33 (SHA256_ROTR (x, 6) ^ SHA256_ROTR (x, 11) ^ SHA256_ROTR (x, 25));
34 #define SHA256_SSIGMA0(x) (SHA256_ROTR (x, 7) ^ SHA256_ROTR (x, 18) ^ (x >> 3))
35 #define SHA256_SSIGMA1(x) \
36 (SHA256_ROTR (x, 17) ^ SHA256_ROTR (x, 19) ^ (x >> 10))
38 #define SHA256_MSG_SCHED(w, j) \
40 w[j] = w[j - 7] + w[j - 16]; \
41 w[j] += SHA256_SSIGMA0 (w[j - 15]); \
42 w[j] += SHA256_SSIGMA1 (w[j - 2]); \
45 #define SHA256_TRANSFORM(s, w, i, k) \
47 __typeof__ (s[0]) t1, t2; \
48 t1 = k + w[i] + s[7]; \
49 t1 += SHA256_CSIGMA1 (s[4]); \
50 t1 += SHA256_CH (s[4], s[5], s[6]); \
51 t2 = SHA256_CSIGMA0 (s[0]); \
52 t2 += SHA256_MAJ (s[0], s[1], s[2]); \
63 #define SHA512_224_DIGEST_SIZE 28
64 #define SHA512_224_BLOCK_SIZE 128
66 #define SHA512_256_DIGEST_SIZE 32
67 #define SHA512_256_BLOCK_SIZE 128
69 #define SHA384_DIGEST_SIZE 48
70 #define SHA384_BLOCK_SIZE 128
72 #define SHA512_DIGEST_SIZE 64
73 #define SHA512_BLOCK_SIZE 128
74 #define SHA512_ROTR(x, y) ((x >> y) | (x << (64 - y)))
75 #define SHA512_CH(a, b, c) ((a & b) ^ (~a & c))
76 #define SHA512_MAJ(a, b, c) ((a & b) ^ (a & c) ^ (b & c))
77 #define SHA512_CSIGMA0(x) \
78 (SHA512_ROTR (x, 28) ^ SHA512_ROTR (x, 34) ^ SHA512_ROTR (x, 39))
79 #define SHA512_CSIGMA1(x) \
80 (SHA512_ROTR (x, 14) ^ SHA512_ROTR (x, 18) ^ SHA512_ROTR (x, 41))
81 #define SHA512_SSIGMA0(x) (SHA512_ROTR (x, 1) ^ SHA512_ROTR (x, 8) ^ (x >> 7))
82 #define SHA512_SSIGMA1(x) \
83 (SHA512_ROTR (x, 19) ^ SHA512_ROTR (x, 61) ^ (x >> 6))
85 #define SHA512_MSG_SCHED(w, j) \
87 w[j] = w[j - 7] + w[j - 16]; \
88 w[j] += SHA512_SSIGMA0 (w[j - 15]); \
89 w[j] += SHA512_SSIGMA1 (w[j - 2]); \
92 #define SHA512_TRANSFORM(s, w, i, k) \
94 __typeof__ (s[0]) t1, t2; \
95 t1 = k + w[i] + s[7]; \
96 t1 += SHA512_CSIGMA1 (s[4]); \
97 t1 += SHA512_CH (s[4], s[5], s[6]); \
98 t2 = SHA512_CSIGMA0 (s[0]); \
99 t2 += SHA512_MAJ (s[0], s[1], s[2]); \
110 #if defined(__SHA__) && defined(__x86_64__)
111 #define CLIB_SHA256_ISA_INTEL
112 #define CLIB_SHA256_ISA
115 #ifdef __ARM_FEATURE_SHA2
116 #define CLIB_SHA256_ISA_ARM
117 #define CLIB_SHA256_ISA
120 static const u32 sha224_h[8] = { 0xc1059ed8, 0x367cd507, 0x3070dd17,
121 0xf70e5939, 0xffc00b31, 0x68581511,
122 0x64f98fa7, 0xbefa4fa4 };
124 static const u32 sha256_h[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372,
125 0xa54ff53a, 0x510e527f, 0x9b05688c,
126 0x1f83d9ab, 0x5be0cd19 };
128 static const u32 sha256_k[64] = {
129 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
130 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
131 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
132 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
133 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
134 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
135 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
136 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
137 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
138 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
139 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
142 static const u64 sha384_h[8] = { 0xcbbb9d5dc1059ed8, 0x629a292a367cd507,
143 0x9159015a3070dd17, 0x152fecd8f70e5939,
144 0x67332667ffc00b31, 0x8eb44a8768581511,
145 0xdb0c2e0d64f98fa7, 0x47b5481dbefa4fa4 };
147 static const u64 sha512_h[8] = { 0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
148 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
149 0x510e527fade682d1, 0x9b05688c2b3e6c1f,
150 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179 };
152 static const u64 sha512_224_h[8] = { 0x8c3d37c819544da2, 0x73e1996689dcd4d6,
153 0x1dfab7ae32ff9c82, 0x679dd514582f9fcf,
154 0x0f6d2b697bd44da8, 0x77e36f7304c48942,
155 0x3f9d85a86a1d36c8, 0x1112e6ad91d692a1 };
157 static const u64 sha512_256_h[8] = { 0x22312194fc2bf72c, 0x9f555fa3c84c64c2,
158 0x2393b86b6f53b151, 0x963877195940eabd,
159 0x96283ee2a88effe3, 0xbe5e1e2553863992,
160 0x2b0199fc2c85b8aa, 0x0eb72ddc81c52ca2 };
162 static const u64 sha512_k[80] = {
163 0x428a2f98d728ae22, 0x7137449123ef65cd, 0xb5c0fbcfec4d3b2f,
164 0xe9b5dba58189dbbc, 0x3956c25bf348b538, 0x59f111f1b605d019,
165 0x923f82a4af194f9b, 0xab1c5ed5da6d8118, 0xd807aa98a3030242,
166 0x12835b0145706fbe, 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2,
167 0x72be5d74f27b896f, 0x80deb1fe3b1696b1, 0x9bdc06a725c71235,
168 0xc19bf174cf692694, 0xe49b69c19ef14ad2, 0xefbe4786384f25e3,
169 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65, 0x2de92c6f592b0275,
170 0x4a7484aa6ea6e483, 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5,
171 0x983e5152ee66dfab, 0xa831c66d2db43210, 0xb00327c898fb213f,
172 0xbf597fc7beef0ee4, 0xc6e00bf33da88fc2, 0xd5a79147930aa725,
173 0x06ca6351e003826f, 0x142929670a0e6e70, 0x27b70a8546d22ffc,
174 0x2e1b21385c26c926, 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df,
175 0x650a73548baf63de, 0x766a0abb3c77b2a8, 0x81c2c92e47edaee6,
176 0x92722c851482353b, 0xa2bfe8a14cf10364, 0xa81a664bbc423001,
177 0xc24b8b70d0f89791, 0xc76c51a30654be30, 0xd192e819d6ef5218,
178 0xd69906245565a910, 0xf40e35855771202a, 0x106aa07032bbd1b8,
179 0x19a4c116b8d2d0c8, 0x1e376c085141ab53, 0x2748774cdf8eeb99,
180 0x34b0bcb5e19b48a8, 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb,
181 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3, 0x748f82ee5defb2fc,
182 0x78a5636f43172f60, 0x84c87814a1f0ab72, 0x8cc702081a6439ec,
183 0x90befffa23631e28, 0xa4506cebde82bde9, 0xbef9a3f7b2c67915,
184 0xc67178f2e372532b, 0xca273eceea26619c, 0xd186b8c721c0c207,
185 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178, 0x06f067aa72176fba,
186 0x0a637dc5a2c898a6, 0x113f9804bef90dae, 0x1b710b35131c471b,
187 0x28db77f523047d84, 0x32caab7b40c72493, 0x3c9ebe0a15c9bebc,
188 0x431d67c49c100d4c, 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a,
189 0x5fcb6fab3ad6faec, 0x6c44198c4a475817
202 #define SHA2_MAX_BLOCK_SIZE SHA512_BLOCK_SIZE
203 #define SHA2_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE
215 #ifdef CLIB_SHA256_ISA
221 u8 as_u8[SHA2_MAX_BLOCK_SIZE];
222 u64 as_u64[SHA2_MAX_BLOCK_SIZE / sizeof (u64)];
223 uword as_uword[SHA2_MAX_BLOCK_SIZE / sizeof (uword)];
227 static_always_inline void
228 clib_sha2_init (clib_sha2_ctx_t *ctx, clib_sha2_type_t type)
233 ctx->total_bytes = 0;
240 ctx->block_size = SHA224_BLOCK_SIZE;
241 ctx->digest_size = SHA224_DIGEST_SIZE;
245 ctx->block_size = SHA256_BLOCK_SIZE;
246 ctx->digest_size = SHA256_DIGEST_SIZE;
250 ctx->block_size = SHA384_BLOCK_SIZE;
251 ctx->digest_size = SHA384_DIGEST_SIZE;
255 ctx->block_size = SHA512_BLOCK_SIZE;
256 ctx->digest_size = SHA512_DIGEST_SIZE;
258 case CLIB_SHA2_512_224:
260 ctx->block_size = SHA512_224_BLOCK_SIZE;
261 ctx->digest_size = SHA512_224_DIGEST_SIZE;
263 case CLIB_SHA2_512_256:
265 ctx->block_size = SHA512_256_BLOCK_SIZE;
266 ctx->digest_size = SHA512_256_DIGEST_SIZE;
270 for (int i = 0; i < 8; i++)
271 ctx->h32[i] = h32[i];
274 for (int i = 0; i < 8; i++)
275 ctx->h64[i] = h64[i];
278 #ifdef CLIB_SHA256_ISA
280 clib_sha256_vec_cycle_w (u32x4 w[], u8 i)
285 #ifdef CLIB_SHA256_ISA_INTEL
286 w[i] = (u32x4) _mm_sha256msg1_epu32 ((__m128i) w[i], (__m128i) w[j]);
287 w[i] += (u32x4) _mm_alignr_epi8 ((__m128i) w[l], (__m128i) w[k], 4);
288 w[i] = (u32x4) _mm_sha256msg2_epu32 ((__m128i) w[i], (__m128i) w[l]);
289 #elif defined(CLIB_SHA256_ISA_ARM)
290 w[i] = vsha256su1q_u32 (vsha256su0q_u32 (w[i], w[j]), w[k], w[l]);
295 clib_sha256_vec_4_rounds (u32x4 w, u8 n, u32x4 s[])
297 #ifdef CLIB_SHA256_ISA_INTEL
298 u32x4 r = *(u32x4 *) (sha256_k + 4 * n) + w;
299 s[0] = (u32x4) _mm_sha256rnds2_epu32 ((__m128i) s[0], (__m128i) s[1],
301 r = (u32x4) u64x2_interleave_hi ((u64x2) r, (u64x2) r);
302 s[1] = (u32x4) _mm_sha256rnds2_epu32 ((__m128i) s[1], (__m128i) s[0],
304 #elif defined(CLIB_SHA256_ISA_ARM)
306 const u32x4u *k = (u32x4u *) sha256_k;
310 s[0] = vsha256hq_u32 (s[0], s[1], r0);
311 s[1] = vsha256h2q_u32 (s[1], s0, r0);
316 #if defined(CLIB_SHA256_ISA)
318 clib_sha256_vec_load (u32x4 r)
320 #if defined(CLIB_SHA256_ISA_INTEL)
321 return u32x4_byte_swap (r);
322 #elif defined(CLIB_SHA256_ISA_ARM)
323 return vreinterpretq_u32_u8 (vrev32q_u8 (vreinterpretq_u8_u32 (r)));
328 clib_sha256_vec_shuffle (u32x4 d[2])
330 #if defined(CLIB_SHA256_ISA_INTEL)
331 /* {0, 1, 2, 3}, {4, 5, 6, 7} -> {7, 6, 3, 2}, {5, 4, 1, 0} */
333 r = (u32x4) _mm_shuffle_ps ((__m128) d[1], (__m128) d[0], 0xbb);
334 d[1] = (u32x4) _mm_shuffle_ps ((__m128) d[1], (__m128) d[0], 0x11);
341 clib_sha256_block (clib_sha2_ctx_t *ctx, const u8 *msg, uword n_blocks)
343 #if defined(CLIB_SHA256_ISA)
345 u32x4u *m = (u32x4u *) msg;
347 h[0] = ctx->h32x4[0];
348 h[1] = ctx->h32x4[1];
350 clib_sha256_vec_shuffle (h);
352 for (; n_blocks; m += 4, n_blocks--)
359 w[0] = clib_sha256_vec_load (m[0]);
360 w[1] = clib_sha256_vec_load (m[1]);
361 w[2] = clib_sha256_vec_load (m[2]);
362 w[3] = clib_sha256_vec_load (m[3]);
364 clib_sha256_vec_4_rounds (w[0], 0, s);
365 clib_sha256_vec_4_rounds (w[1], 1, s);
366 clib_sha256_vec_4_rounds (w[2], 2, s);
367 clib_sha256_vec_4_rounds (w[3], 3, s);
369 clib_sha256_vec_cycle_w (w, 0);
370 clib_sha256_vec_4_rounds (w[0], 4, s);
371 clib_sha256_vec_cycle_w (w, 1);
372 clib_sha256_vec_4_rounds (w[1], 5, s);
373 clib_sha256_vec_cycle_w (w, 2);
374 clib_sha256_vec_4_rounds (w[2], 6, s);
375 clib_sha256_vec_cycle_w (w, 3);
376 clib_sha256_vec_4_rounds (w[3], 7, s);
378 clib_sha256_vec_cycle_w (w, 0);
379 clib_sha256_vec_4_rounds (w[0], 8, s);
380 clib_sha256_vec_cycle_w (w, 1);
381 clib_sha256_vec_4_rounds (w[1], 9, s);
382 clib_sha256_vec_cycle_w (w, 2);
383 clib_sha256_vec_4_rounds (w[2], 10, s);
384 clib_sha256_vec_cycle_w (w, 3);
385 clib_sha256_vec_4_rounds (w[3], 11, s);
387 clib_sha256_vec_cycle_w (w, 0);
388 clib_sha256_vec_4_rounds (w[0], 12, s);
389 clib_sha256_vec_cycle_w (w, 1);
390 clib_sha256_vec_4_rounds (w[1], 13, s);
391 clib_sha256_vec_cycle_w (w, 2);
392 clib_sha256_vec_4_rounds (w[2], 14, s);
393 clib_sha256_vec_cycle_w (w, 3);
394 clib_sha256_vec_4_rounds (w[3], 15, s);
400 clib_sha256_vec_shuffle (h);
402 ctx->h32x4[0] = h[0];
403 ctx->h32x4[1] = h[1];
409 for (i = 0; i < 8; i++)
412 for (i = 0; i < 16; i++)
414 w[i] = clib_net_to_host_u32 (*((u32 *) msg + i));
415 SHA256_TRANSFORM (s, w, i, sha256_k[i]);
418 for (i = 16; i < 64; i++)
420 SHA256_MSG_SCHED (w, i);
421 SHA256_TRANSFORM (s, w, i, sha256_k[i]);
424 for (i = 0; i < 8; i++)
428 msg += SHA256_BLOCK_SIZE;
434 static_always_inline void
435 clib_sha512_block (clib_sha2_ctx_t *ctx, const u8 *msg, uword n_blocks)
441 for (i = 0; i < 8; i++)
444 for (i = 0; i < 16; i++)
446 w[i] = clib_net_to_host_u64 (*((u64 *) msg + i));
447 SHA512_TRANSFORM (s, w, i, sha512_k[i]);
450 for (i = 16; i < 80; i++)
452 SHA512_MSG_SCHED (w, i);
453 SHA512_TRANSFORM (s, w, i, sha512_k[i]);
456 for (i = 0; i < 8; i++)
460 msg += SHA512_BLOCK_SIZE;
465 static_always_inline void
466 clib_sha2_update (clib_sha2_ctx_t *ctx, const u8 *msg, uword n_bytes)
471 uword n_left = ctx->block_size - ctx->n_pending;
472 if (n_bytes < n_left)
474 clib_memcpy_fast (ctx->pending.as_u8 + ctx->n_pending, msg, n_bytes);
475 ctx->n_pending += n_bytes;
480 clib_memcpy_fast (ctx->pending.as_u8 + ctx->n_pending, msg, n_left);
481 if (ctx->block_size == SHA512_BLOCK_SIZE)
482 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
484 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
486 ctx->total_bytes += ctx->block_size;
492 if ((n_blocks = n_bytes / ctx->block_size))
494 if (ctx->block_size == SHA512_BLOCK_SIZE)
495 clib_sha512_block (ctx, msg, n_blocks);
497 clib_sha256_block (ctx, msg, n_blocks);
498 n_bytes -= n_blocks * ctx->block_size;
499 msg += n_blocks * ctx->block_size;
500 ctx->total_bytes += n_blocks * ctx->block_size;
505 clib_memset_u8 (ctx->pending.as_u8, 0, ctx->block_size);
506 clib_memcpy_fast (ctx->pending.as_u8, msg, n_bytes);
507 ctx->n_pending = n_bytes;
513 static_always_inline void
514 clib_sha2_final (clib_sha2_ctx_t *ctx, u8 *digest)
518 ctx->total_bytes += ctx->n_pending;
519 if (ctx->n_pending == 0)
521 clib_memset (ctx->pending.as_u8, 0, ctx->block_size);
522 ctx->pending.as_u8[0] = 0x80;
524 else if (ctx->n_pending + sizeof (u64) + sizeof (u8) > ctx->block_size)
526 ctx->pending.as_u8[ctx->n_pending] = 0x80;
527 if (ctx->block_size == SHA512_BLOCK_SIZE)
528 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
530 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
531 clib_memset (ctx->pending.as_u8, 0, ctx->block_size);
534 ctx->pending.as_u8[ctx->n_pending] = 0x80;
536 ctx->pending.as_u64[ctx->block_size / 8 - 1] =
537 clib_net_to_host_u64 (ctx->total_bytes * 8);
538 if (ctx->block_size == SHA512_BLOCK_SIZE)
539 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
541 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
543 if (ctx->block_size == SHA512_BLOCK_SIZE)
545 for (i = 0; i < ctx->digest_size / sizeof (u64); i++)
546 *((u64 *) digest + i) = clib_net_to_host_u64 (ctx->h64[i]);
548 /* sha512-224 case - write half of u64 */
549 if (i * sizeof (u64) < ctx->digest_size)
550 *((u32 *) digest + 2 * i) = clib_net_to_host_u32 (ctx->h64[i] >> 32);
553 for (i = 0; i < ctx->digest_size / sizeof (u32); i++)
554 *((u32 *) digest + i) = clib_net_to_host_u32 (ctx->h32[i]);
557 static_always_inline void
558 clib_sha2 (clib_sha2_type_t type, const u8 *msg, uword len, u8 *digest)
561 clib_sha2_init (&ctx, type);
562 clib_sha2_update (&ctx, msg, len);
563 clib_sha2_final (&ctx, digest);
566 #define clib_sha224(...) clib_sha2 (CLIB_SHA2_224, __VA_ARGS__)
567 #define clib_sha256(...) clib_sha2 (CLIB_SHA2_256, __VA_ARGS__)
568 #define clib_sha384(...) clib_sha2 (CLIB_SHA2_384, __VA_ARGS__)
569 #define clib_sha512(...) clib_sha2 (CLIB_SHA2_512, __VA_ARGS__)
570 #define clib_sha512_224(...) clib_sha2 (CLIB_SHA2_512_224, __VA_ARGS__)
571 #define clib_sha512_256(...) clib_sha2 (CLIB_SHA2_512_256, __VA_ARGS__)
573 static_always_inline void
574 clib_hmac_sha2 (clib_sha2_type_t type, const u8 *key, uword key_len,
575 const u8 *msg, uword len, u8 *digest)
577 clib_sha2_ctx_t _ctx, *ctx = &_ctx;
578 uword key_data[SHA2_MAX_BLOCK_SIZE / sizeof (uword)];
579 u8 i_digest[SHA2_MAX_DIGEST_SIZE];
582 clib_sha2_init (ctx, type);
583 n_words = ctx->block_size / sizeof (uword);
586 if (key_len > ctx->block_size)
588 /* key is longer than block, calculate hash of key */
589 clib_sha2_update (ctx, key, key_len);
590 for (i = (ctx->digest_size / sizeof (uword)) / 2; i < n_words; i++)
592 clib_sha2_final (ctx, (u8 *) key_data);
593 clib_sha2_init (ctx, type);
597 for (i = 0; i < n_words; i++)
599 clib_memcpy_fast (key_data, key, key_len);
603 for (i = 0; i < n_words; i++)
604 ctx->pending.as_uword[i] = key_data[i] ^ (uword) 0x3636363636363636;
605 if (ctx->block_size == SHA512_BLOCK_SIZE)
606 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
608 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
609 ctx->total_bytes += ctx->block_size;
612 clib_sha2_update (ctx, msg, len);
613 clib_sha2_final (ctx, i_digest);
616 clib_sha2_init (ctx, type);
617 for (i = 0; i < n_words; i++)
618 ctx->pending.as_uword[i] = key_data[i] ^ (uword) 0x5c5c5c5c5c5c5c5c;
619 if (ctx->block_size == SHA512_BLOCK_SIZE)
620 clib_sha512_block (ctx, ctx->pending.as_u8, 1);
622 clib_sha256_block (ctx, ctx->pending.as_u8, 1);
623 ctx->total_bytes += ctx->block_size;
626 clib_sha2_update (ctx, i_digest, ctx->digest_size);
627 clib_sha2_final (ctx, digest);
630 #define clib_hmac_sha224(...) clib_hmac_sha2 (CLIB_SHA2_224, __VA_ARGS__)
631 #define clib_hmac_sha256(...) clib_hmac_sha2 (CLIB_SHA2_256, __VA_ARGS__)
632 #define clib_hmac_sha384(...) clib_hmac_sha2 (CLIB_SHA2_384, __VA_ARGS__)
633 #define clib_hmac_sha512(...) clib_hmac_sha2 (CLIB_SHA2_512, __VA_ARGS__)
634 #define clib_hmac_sha512_224(...) \
635 clib_hmac_sha2 (CLIB_SHA2_512_224, __VA_ARGS__)
636 #define clib_hmac_sha512_256(...) \
637 clib_hmac_sha2 (CLIB_SHA2_512_256, __VA_ARGS__)
639 #endif /* included_sha2_h */