1 /* SPDX-License-Identifier: Apache-2.0
2 * Copyright(c) 2024 Cisco Systems, Inc.
5 #ifndef included_sha2_h
6 #define included_sha2_h
8 #include <vppinfra/clib.h>
9 #include <vppinfra/vector.h>
10 #include <vppinfra/string.h>
12 #define SHA256_ROTR(x, y) ((x >> y) | (x << (32 - y)))
13 #define SHA256_CH(a, b, c) ((a & b) ^ (~a & c))
14 #define SHA256_MAJ(a, b, c) ((a & b) ^ (a & c) ^ (b & c))
15 #define SHA256_CSIGMA0(x) \
16 (SHA256_ROTR (x, 2) ^ SHA256_ROTR (x, 13) ^ SHA256_ROTR (x, 22));
17 #define SHA256_CSIGMA1(x) \
18 (SHA256_ROTR (x, 6) ^ SHA256_ROTR (x, 11) ^ SHA256_ROTR (x, 25));
19 #define SHA256_SSIGMA0(x) (SHA256_ROTR (x, 7) ^ SHA256_ROTR (x, 18) ^ (x >> 3))
20 #define SHA256_SSIGMA1(x) \
21 (SHA256_ROTR (x, 17) ^ SHA256_ROTR (x, 19) ^ (x >> 10))
23 #define SHA256_MSG_SCHED(w, j) \
25 w[j] = w[j - 7] + w[j - 16]; \
26 w[j] += SHA256_SSIGMA0 (w[j - 15]); \
27 w[j] += SHA256_SSIGMA1 (w[j - 2]); \
30 #define SHA256_TRANSFORM(s, w, i, k) \
32 __typeof__ (s[0]) t1, t2; \
33 t1 = k + w[i] + s[7]; \
34 t1 += SHA256_CSIGMA1 (s[4]); \
35 t1 += SHA256_CH (s[4], s[5], s[6]); \
36 t2 = SHA256_CSIGMA0 (s[0]); \
37 t2 += SHA256_MAJ (s[0], s[1], s[2]); \
48 #define SHA512_ROTR(x, y) ((x >> y) | (x << (64 - y)))
49 #define SHA512_CH(a, b, c) ((a & b) ^ (~a & c))
50 #define SHA512_MAJ(a, b, c) ((a & b) ^ (a & c) ^ (b & c))
51 #define SHA512_CSIGMA0(x) \
52 (SHA512_ROTR (x, 28) ^ SHA512_ROTR (x, 34) ^ SHA512_ROTR (x, 39))
53 #define SHA512_CSIGMA1(x) \
54 (SHA512_ROTR (x, 14) ^ SHA512_ROTR (x, 18) ^ SHA512_ROTR (x, 41))
55 #define SHA512_SSIGMA0(x) (SHA512_ROTR (x, 1) ^ SHA512_ROTR (x, 8) ^ (x >> 7))
56 #define SHA512_SSIGMA1(x) \
57 (SHA512_ROTR (x, 19) ^ SHA512_ROTR (x, 61) ^ (x >> 6))
59 #define SHA512_MSG_SCHED(w, j) \
61 w[j] = w[j - 7] + w[j - 16]; \
62 w[j] += SHA512_SSIGMA0 (w[j - 15]); \
63 w[j] += SHA512_SSIGMA1 (w[j - 2]); \
66 #define SHA512_TRANSFORM(s, w, i, k) \
68 __typeof__ (s[0]) t1, t2; \
69 t1 = k + w[i] + s[7]; \
70 t1 += SHA512_CSIGMA1 (s[4]); \
71 t1 += SHA512_CH (s[4], s[5], s[6]); \
72 t2 = SHA512_CSIGMA0 (s[0]); \
73 t2 += SHA512_MAJ (s[0], s[1], s[2]); \
84 #if defined(__SHA__) && defined(__x86_64__)
85 #define CLIB_SHA256_ISA_INTEL
86 #define CLIB_SHA256_ISA
89 #ifdef __ARM_FEATURE_SHA2
90 #define CLIB_SHA256_ISA_ARM
91 #define CLIB_SHA256_ISA
94 static const u32 sha224_h[8] = { 0xc1059ed8, 0x367cd507, 0x3070dd17,
95 0xf70e5939, 0xffc00b31, 0x68581511,
96 0x64f98fa7, 0xbefa4fa4 };
98 static const u32 sha256_h[8] = { 0x6a09e667, 0xbb67ae85, 0x3c6ef372,
99 0xa54ff53a, 0x510e527f, 0x9b05688c,
100 0x1f83d9ab, 0x5be0cd19 };
102 static const u32 clib_sha2_256_k[64] = {
103 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1,
104 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3,
105 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786,
106 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da,
107 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147,
108 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13,
109 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b,
110 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070,
111 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a,
112 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208,
113 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
116 static const u64 sha384_h[8] = { 0xcbbb9d5dc1059ed8, 0x629a292a367cd507,
117 0x9159015a3070dd17, 0x152fecd8f70e5939,
118 0x67332667ffc00b31, 0x8eb44a8768581511,
119 0xdb0c2e0d64f98fa7, 0x47b5481dbefa4fa4 };
121 static const u64 sha512_h[8] = { 0x6a09e667f3bcc908, 0xbb67ae8584caa73b,
122 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1,
123 0x510e527fade682d1, 0x9b05688c2b3e6c1f,
124 0x1f83d9abfb41bd6b, 0x5be0cd19137e2179 };
126 static const u64 sha512_224_h[8] = { 0x8c3d37c819544da2, 0x73e1996689dcd4d6,
127 0x1dfab7ae32ff9c82, 0x679dd514582f9fcf,
128 0x0f6d2b697bd44da8, 0x77e36f7304c48942,
129 0x3f9d85a86a1d36c8, 0x1112e6ad91d692a1 };
131 static const u64 sha512_256_h[8] = { 0x22312194fc2bf72c, 0x9f555fa3c84c64c2,
132 0x2393b86b6f53b151, 0x963877195940eabd,
133 0x96283ee2a88effe3, 0xbe5e1e2553863992,
134 0x2b0199fc2c85b8aa, 0x0eb72ddc81c52ca2 };
136 static const u64 clib_sha2_512_k[80] = {
137 0x428a2f98d728ae22, 0x7137449123ef65cd, 0xb5c0fbcfec4d3b2f,
138 0xe9b5dba58189dbbc, 0x3956c25bf348b538, 0x59f111f1b605d019,
139 0x923f82a4af194f9b, 0xab1c5ed5da6d8118, 0xd807aa98a3030242,
140 0x12835b0145706fbe, 0x243185be4ee4b28c, 0x550c7dc3d5ffb4e2,
141 0x72be5d74f27b896f, 0x80deb1fe3b1696b1, 0x9bdc06a725c71235,
142 0xc19bf174cf692694, 0xe49b69c19ef14ad2, 0xefbe4786384f25e3,
143 0x0fc19dc68b8cd5b5, 0x240ca1cc77ac9c65, 0x2de92c6f592b0275,
144 0x4a7484aa6ea6e483, 0x5cb0a9dcbd41fbd4, 0x76f988da831153b5,
145 0x983e5152ee66dfab, 0xa831c66d2db43210, 0xb00327c898fb213f,
146 0xbf597fc7beef0ee4, 0xc6e00bf33da88fc2, 0xd5a79147930aa725,
147 0x06ca6351e003826f, 0x142929670a0e6e70, 0x27b70a8546d22ffc,
148 0x2e1b21385c26c926, 0x4d2c6dfc5ac42aed, 0x53380d139d95b3df,
149 0x650a73548baf63de, 0x766a0abb3c77b2a8, 0x81c2c92e47edaee6,
150 0x92722c851482353b, 0xa2bfe8a14cf10364, 0xa81a664bbc423001,
151 0xc24b8b70d0f89791, 0xc76c51a30654be30, 0xd192e819d6ef5218,
152 0xd69906245565a910, 0xf40e35855771202a, 0x106aa07032bbd1b8,
153 0x19a4c116b8d2d0c8, 0x1e376c085141ab53, 0x2748774cdf8eeb99,
154 0x34b0bcb5e19b48a8, 0x391c0cb3c5c95a63, 0x4ed8aa4ae3418acb,
155 0x5b9cca4f7763e373, 0x682e6ff3d6b2b8a3, 0x748f82ee5defb2fc,
156 0x78a5636f43172f60, 0x84c87814a1f0ab72, 0x8cc702081a6439ec,
157 0x90befffa23631e28, 0xa4506cebde82bde9, 0xbef9a3f7b2c67915,
158 0xc67178f2e372532b, 0xca273eceea26619c, 0xd186b8c721c0c207,
159 0xeada7dd6cde0eb1e, 0xf57d4f7fee6ed178, 0x06f067aa72176fba,
160 0x0a637dc5a2c898a6, 0x113f9804bef90dae, 0x1b710b35131c471b,
161 0x28db77f523047d84, 0x32caab7b40c72493, 0x3c9ebe0a15c9bebc,
162 0x431d67c49c100d4c, 0x4cc5d4becb3e42b6, 0x597f299cfc657e2a,
163 0x5fcb6fab3ad6faec, 0x6c44198c4a475817
176 #define CLIB_SHA2_256_BLOCK_SIZE 64
177 #define CLIB_SHA2_512_BLOCK_SIZE 128
178 #define SHA2_MAX_BLOCK_SIZE CLIB_SHA2_512_BLOCK_SIZE
179 #define SHA2_MAX_DIGEST_SIZE 64
187 } clib_sha2_variants[] = {
189 .block_size = CLIB_SHA2_256_BLOCK_SIZE,
194 .block_size = CLIB_SHA2_256_BLOCK_SIZE,
199 .block_size = CLIB_SHA2_512_BLOCK_SIZE,
204 .block_size = CLIB_SHA2_512_BLOCK_SIZE,
208 [CLIB_SHA2_512_224] = {
209 .block_size = CLIB_SHA2_512_BLOCK_SIZE,
213 [CLIB_SHA2_512_256] = {
214 .block_size = CLIB_SHA2_512_BLOCK_SIZE,
224 #ifdef CLIB_SHA256_ISA
236 u8 as_u8[SHA2_MAX_BLOCK_SIZE];
237 u64 as_u64[SHA2_MAX_BLOCK_SIZE / sizeof (u64)];
238 uword as_uword[SHA2_MAX_BLOCK_SIZE / sizeof (uword)];
244 clib_sha2_type_t type;
247 clib_sha2_state_t state;
250 static_always_inline void
251 clib_sha2_state_init (clib_sha2_state_t *state, clib_sha2_type_t type)
253 clib_sha2_state_t st = {};
255 if (clib_sha2_variants[type].block_size == CLIB_SHA2_256_BLOCK_SIZE)
256 for (int i = 0; i < 8; i++)
257 st.h.h32[i] = clib_sha2_variants[type].h32[i];
259 for (int i = 0; i < 8; i++)
260 st.h.h64[i] = clib_sha2_variants[type].h64[i];
265 static_always_inline void
266 clib_sha2_init (clib_sha2_ctx_t *ctx, clib_sha2_type_t type)
268 clib_sha2_state_init (&ctx->state, type);
269 ctx->block_size = clib_sha2_variants[type].block_size;
270 ctx->digest_size = clib_sha2_variants[type].digest_size;
274 #ifdef CLIB_SHA256_ISA
276 clib_sha256_vec_cycle_w (u32x4 w[], u8 i)
281 #ifdef CLIB_SHA256_ISA_INTEL
282 w[i] = (u32x4) _mm_sha256msg1_epu32 ((__m128i) w[i], (__m128i) w[j]);
283 w[i] += (u32x4) _mm_alignr_epi8 ((__m128i) w[l], (__m128i) w[k], 4);
284 w[i] = (u32x4) _mm_sha256msg2_epu32 ((__m128i) w[i], (__m128i) w[l]);
285 #elif defined(CLIB_SHA256_ISA_ARM)
286 w[i] = vsha256su1q_u32 (vsha256su0q_u32 (w[i], w[j]), w[k], w[l]);
291 clib_sha256_vec_4_rounds (u32x4 w, u8 n, u32x4 s[])
293 #ifdef CLIB_SHA256_ISA_INTEL
294 u32x4 r = *(u32x4 *) (clib_sha2_256_k + 4 * n) + w;
295 s[0] = (u32x4) _mm_sha256rnds2_epu32 ((__m128i) s[0], (__m128i) s[1],
297 r = (u32x4) u64x2_interleave_hi ((u64x2) r, (u64x2) r);
298 s[1] = (u32x4) _mm_sha256rnds2_epu32 ((__m128i) s[1], (__m128i) s[0],
300 #elif defined(CLIB_SHA256_ISA_ARM)
302 const u32x4u *k = (u32x4u *) clib_sha2_256_k;
306 s[0] = vsha256hq_u32 (s[0], s[1], r0);
307 s[1] = vsha256h2q_u32 (s[1], s0, r0);
312 #if defined(CLIB_SHA256_ISA)
314 clib_sha256_vec_load (u32x4 r)
316 #if defined(CLIB_SHA256_ISA_INTEL)
317 return u32x4_byte_swap (r);
318 #elif defined(CLIB_SHA256_ISA_ARM)
319 return vreinterpretq_u32_u8 (vrev32q_u8 (vreinterpretq_u8_u32 (r)));
324 clib_sha256_vec_shuffle (u32x4 d[2])
326 #if defined(CLIB_SHA256_ISA_INTEL)
327 /* {0, 1, 2, 3}, {4, 5, 6, 7} -> {7, 6, 3, 2}, {5, 4, 1, 0} */
329 r = (u32x4) _mm_shuffle_ps ((__m128) d[1], (__m128) d[0], 0xbb);
330 d[1] = (u32x4) _mm_shuffle_ps ((__m128) d[1], (__m128) d[0], 0x11);
337 clib_sha256_block (clib_sha2_state_t *st, const u8 *msg, uword n_blocks)
339 #if defined(CLIB_SHA256_ISA)
341 u32x4u *m = (u32x4u *) msg;
343 h[0] = st->h.h32x4[0];
344 h[1] = st->h.h32x4[1];
346 clib_sha256_vec_shuffle (h);
348 for (; n_blocks; m += 4, n_blocks--)
355 w[0] = clib_sha256_vec_load (m[0]);
356 w[1] = clib_sha256_vec_load (m[1]);
357 w[2] = clib_sha256_vec_load (m[2]);
358 w[3] = clib_sha256_vec_load (m[3]);
360 clib_sha256_vec_4_rounds (w[0], 0, s);
361 clib_sha256_vec_4_rounds (w[1], 1, s);
362 clib_sha256_vec_4_rounds (w[2], 2, s);
363 clib_sha256_vec_4_rounds (w[3], 3, s);
365 clib_sha256_vec_cycle_w (w, 0);
366 clib_sha256_vec_4_rounds (w[0], 4, s);
367 clib_sha256_vec_cycle_w (w, 1);
368 clib_sha256_vec_4_rounds (w[1], 5, s);
369 clib_sha256_vec_cycle_w (w, 2);
370 clib_sha256_vec_4_rounds (w[2], 6, s);
371 clib_sha256_vec_cycle_w (w, 3);
372 clib_sha256_vec_4_rounds (w[3], 7, s);
374 clib_sha256_vec_cycle_w (w, 0);
375 clib_sha256_vec_4_rounds (w[0], 8, s);
376 clib_sha256_vec_cycle_w (w, 1);
377 clib_sha256_vec_4_rounds (w[1], 9, s);
378 clib_sha256_vec_cycle_w (w, 2);
379 clib_sha256_vec_4_rounds (w[2], 10, s);
380 clib_sha256_vec_cycle_w (w, 3);
381 clib_sha256_vec_4_rounds (w[3], 11, s);
383 clib_sha256_vec_cycle_w (w, 0);
384 clib_sha256_vec_4_rounds (w[0], 12, s);
385 clib_sha256_vec_cycle_w (w, 1);
386 clib_sha256_vec_4_rounds (w[1], 13, s);
387 clib_sha256_vec_cycle_w (w, 2);
388 clib_sha256_vec_4_rounds (w[2], 14, s);
389 clib_sha256_vec_cycle_w (w, 3);
390 clib_sha256_vec_4_rounds (w[3], 15, s);
396 clib_sha256_vec_shuffle (h);
398 st->h.h32x4[0] = h[0];
399 st->h.h32x4[1] = h[1];
406 for (; n_blocks; msg += CLIB_SHA2_256_BLOCK_SIZE, n_blocks--)
408 for (i = 0; i < 8; i++)
411 for (i = 0; i < 16; i++)
413 w[i] = clib_net_to_host_u32 ((((u32u *) msg)[i]));
414 SHA256_TRANSFORM (s, w, i, clib_sha2_256_k[i]);
417 for (i = 16; i < 64; i++)
419 SHA256_MSG_SCHED (w, i);
420 SHA256_TRANSFORM (s, w, i, clib_sha2_256_k[i]);
423 for (i = 0; i < 8; i++)
431 static_always_inline void
432 clib_sha512_block (clib_sha2_state_t *st, const u8 *msg, uword n_blocks)
439 for (; n_blocks; msg += CLIB_SHA2_512_BLOCK_SIZE, n_blocks--)
441 for (i = 0; i < 8; i++)
444 for (i = 0; i < 16; i++)
446 w[i] = clib_net_to_host_u64 ((((u64u *) msg)[i]));
447 SHA512_TRANSFORM (s, w, i, clib_sha2_512_k[i]);
450 for (i = 16; i < 80; i++)
452 SHA512_MSG_SCHED (w, i);
453 SHA512_TRANSFORM (s, w, i, clib_sha2_512_k[i]);
456 for (i = 0; i < 8; i++)
463 static_always_inline void
464 clib_sha2_update_internal (clib_sha2_state_t *st, u8 block_size, const u8 *msg,
470 uword n_left = block_size - st->n_pending;
471 if (n_bytes < n_left)
473 clib_memcpy_fast (st->pending.as_u8 + st->n_pending, msg, n_bytes);
474 st->n_pending += n_bytes;
479 clib_memcpy_fast (st->pending.as_u8 + st->n_pending, msg, n_left);
480 if (block_size == CLIB_SHA2_512_BLOCK_SIZE)
481 clib_sha512_block (st, st->pending.as_u8, 1);
483 clib_sha256_block (st, st->pending.as_u8, 1);
485 st->total_bytes += block_size;
491 if ((n_blocks = n_bytes / block_size))
493 if (block_size == CLIB_SHA2_512_BLOCK_SIZE)
494 clib_sha512_block (st, msg, n_blocks);
496 clib_sha256_block (st, msg, n_blocks);
497 n_bytes -= n_blocks * block_size;
498 msg += n_blocks * block_size;
499 st->total_bytes += n_blocks * block_size;
504 clib_memset_u8 (st->pending.as_u8, 0, block_size);
505 clib_memcpy_fast (st->pending.as_u8, msg, n_bytes);
506 st->n_pending = n_bytes;
512 static_always_inline void
513 clib_sha2_update (clib_sha2_ctx_t *ctx, const u8 *msg, uword n_bytes)
515 clib_sha2_update_internal (&ctx->state, ctx->block_size, msg, n_bytes);
518 static_always_inline void
519 clib_sha2_final_internal (clib_sha2_state_t *st, u8 block_size, u8 digest_size,
524 st->total_bytes += st->n_pending;
525 if (st->n_pending == 0)
527 clib_memset (st->pending.as_u8, 0, block_size);
528 st->pending.as_u8[0] = 0x80;
530 else if (st->n_pending + sizeof (u64) + sizeof (u8) > block_size)
532 st->pending.as_u8[st->n_pending] = 0x80;
533 if (block_size == CLIB_SHA2_512_BLOCK_SIZE)
534 clib_sha512_block (st, st->pending.as_u8, 1);
536 clib_sha256_block (st, st->pending.as_u8, 1);
537 clib_memset (st->pending.as_u8, 0, block_size);
540 st->pending.as_u8[st->n_pending] = 0x80;
542 st->pending.as_u64[block_size / 8 - 1] =
543 clib_net_to_host_u64 (st->total_bytes * 8);
545 if (block_size == CLIB_SHA2_512_BLOCK_SIZE)
547 clib_sha512_block (st, st->pending.as_u8, 1);
548 for (i = 0; i < digest_size / sizeof (u64); i++)
549 ((u64 *) digest)[i] = clib_net_to_host_u64 (st->h.h64[i]);
551 /* sha512-224 case - write half of u64 */
552 if (i * sizeof (u64) < digest_size)
553 ((u32 *) digest)[2 * i] = clib_net_to_host_u32 (st->h.h64[i] >> 32);
557 clib_sha256_block (st, st->pending.as_u8, 1);
558 for (i = 0; i < digest_size / sizeof (u32); i++)
559 *((u32 *) digest + i) = clib_net_to_host_u32 (st->h.h32[i]);
563 static_always_inline void
564 clib_sha2_final (clib_sha2_ctx_t *ctx, u8 *digest)
566 clib_sha2_final_internal (&ctx->state, ctx->block_size, ctx->digest_size,
570 static_always_inline void
571 clib_sha2 (clib_sha2_type_t type, const u8 *msg, uword len, u8 *digest)
574 clib_sha2_init (&ctx, type);
575 clib_sha2_update (&ctx, msg, len);
576 clib_sha2_final (&ctx, digest);
579 #define clib_sha224(...) clib_sha2 (CLIB_SHA2_224, __VA_ARGS__)
580 #define clib_sha256(...) clib_sha2 (CLIB_SHA2_256, __VA_ARGS__)
581 #define clib_sha384(...) clib_sha2 (CLIB_SHA2_384, __VA_ARGS__)
582 #define clib_sha512(...) clib_sha2 (CLIB_SHA2_512, __VA_ARGS__)
583 #define clib_sha512_224(...) clib_sha2 (CLIB_SHA2_512_224, __VA_ARGS__)
584 #define clib_sha512_256(...) clib_sha2 (CLIB_SHA2_512_256, __VA_ARGS__)
592 clib_sha2_h_t ipad_h;
593 clib_sha2_h_t opad_h;
594 } clib_sha2_hmac_key_data_t;
598 clib_sha2_type_t type;
601 clib_sha2_state_t ipad_state;
602 clib_sha2_state_t opad_state;
603 } clib_sha2_hmac_ctx_t;
605 static_always_inline void
606 clib_sha2_hmac_key_data (clib_sha2_type_t type, const u8 *key, uword key_len,
607 clib_sha2_hmac_key_data_t *kd)
609 u8 block_size = clib_sha2_variants[type].block_size;
610 u8 data[SHA2_MAX_BLOCK_SIZE] = {};
611 u8 ikey[SHA2_MAX_BLOCK_SIZE];
612 u8 okey[SHA2_MAX_BLOCK_SIZE];
613 clib_sha2_state_t ipad_state;
614 clib_sha2_state_t opad_state;
617 if (key_len > block_size)
619 /* key is longer than block, calculate hash of key */
621 clib_sha2_init (&ctx, type);
622 clib_sha2_update (&ctx, key, key_len);
623 clib_sha2_final (&ctx, (u8 *) data);
626 clib_memcpy_fast (data, key, key_len);
628 for (int i = 0, w = 0; w < block_size; w += sizeof (uword), i++)
630 ((uwordu *) ikey)[i] = ((uwordu *) data)[i] ^ 0x3636363636363636UL;
631 ((uwordu *) okey)[i] = ((uwordu *) data)[i] ^ 0x5c5c5c5c5c5c5c5cUL;
634 clib_sha2_state_init (&ipad_state, type);
635 clib_sha2_state_init (&opad_state, type);
637 if (block_size == CLIB_SHA2_512_BLOCK_SIZE)
639 clib_sha512_block (&ipad_state, ikey, 1);
640 clib_sha512_block (&opad_state, okey, 1);
644 clib_sha256_block (&ipad_state, ikey, 1);
645 clib_sha256_block (&opad_state, okey, 1);
648 kd->ipad_h = ipad_state.h;
649 kd->opad_h = opad_state.h;
652 static_always_inline void
653 clib_sha2_hmac_init (clib_sha2_hmac_ctx_t *ctx, clib_sha2_type_t type,
654 clib_sha2_hmac_key_data_t *kd)
656 u8 block_size = clib_sha2_variants[type].block_size;
657 u8 digest_size = clib_sha2_variants[type].digest_size;
659 *ctx = (clib_sha2_hmac_ctx_t) {
661 .block_size = block_size,
662 .digest_size = digest_size,
665 .total_bytes = block_size,
669 .total_bytes = block_size,
674 static_always_inline void
675 clib_sha2_hmac_update (clib_sha2_hmac_ctx_t *ctx, const u8 *msg, uword len)
677 clib_sha2_update_internal (&ctx->ipad_state, ctx->block_size, msg, len);
680 static_always_inline void
681 clib_sha2_hmac_final (clib_sha2_hmac_ctx_t *ctx, u8 *digest)
683 u8 i_digest[SHA2_MAX_DIGEST_SIZE];
685 clib_sha2_final_internal (&ctx->ipad_state, ctx->block_size,
686 ctx->digest_size, i_digest);
687 clib_sha2_update_internal (&ctx->opad_state, ctx->block_size, i_digest,
689 clib_sha2_final_internal (&ctx->opad_state, ctx->block_size,
690 ctx->digest_size, digest);
693 static_always_inline void
694 clib_sha2_hmac (clib_sha2_type_t type, const u8 *key, uword key_len,
695 const u8 *msg, uword len, u8 *digest)
697 clib_sha2_hmac_ctx_t _ctx, *ctx = &_ctx;
698 clib_sha2_hmac_key_data_t kd;
700 clib_sha2_hmac_key_data (type, key, key_len, &kd);
701 clib_sha2_hmac_init (ctx, type, &kd);
702 clib_sha2_hmac_update (ctx, msg, len);
703 clib_sha2_hmac_final (ctx, digest);
706 #define clib_hmac_sha224(...) clib_sha2_hmac (CLIB_SHA2_224, __VA_ARGS__)
707 #define clib_hmac_sha256(...) clib_sha2_hmac (CLIB_SHA2_256, __VA_ARGS__)
708 #define clib_hmac_sha384(...) clib_sha2_hmac (CLIB_SHA2_384, __VA_ARGS__)
709 #define clib_hmac_sha512(...) clib_sha2_hmac (CLIB_SHA2_512, __VA_ARGS__)
710 #define clib_hmac_sha512_224(...) \
711 clib_sha2_hmac (CLIB_SHA2_512_224, __VA_ARGS__)
712 #define clib_hmac_sha512_256(...) \
713 clib_sha2_hmac (CLIB_SHA2_512_256, __VA_ARGS__)
715 #endif /* included_sha2_h */