2 *------------------------------------------------------------------
3 * Copyright (c) 2019 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #include <vlib/vlib.h>
19 #include <vnet/plugin/plugin.h>
20 #include <vnet/crypto/crypto.h>
21 #include <x86intrin.h>
22 #include <crypto_ia32/crypto_ia32.h>
23 #include <crypto_ia32/aesni.h>
24 #include <crypto_ia32/ghash.h>
26 #if __GNUC__ > 4 && !__clang__ && CLIB_DEBUG == 0
27 #pragma GCC optimize ("O3")
32 /* pre-calculated hash key values */
34 /* extracted AES key */
38 static const __m128i last_byte_one = { 0, 1ULL << 56 };
39 static const __m128i zero = { 0, 0 };
41 static const u8x16 bswap_mask = {
42 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0
45 static const u8x16 byte_mask_scale = {
46 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15
49 static_always_inline __m128i
50 aesni_gcm_bswap (__m128i x)
52 return _mm_shuffle_epi8 (x, (__m128i) bswap_mask);
55 static_always_inline __m128i
56 aesni_gcm_byte_mask (__m128i x, u8 n_bytes)
58 u8x16 mask = u8x16_is_greater (u8x16_splat (n_bytes), byte_mask_scale);
60 return _mm_blendv_epi8 (zero, x, (__m128i) mask);
63 static_always_inline __m128i
64 aesni_gcm_load_partial (__m128i * p, int n_bytes)
66 ASSERT (n_bytes <= 16);
68 return _mm_mask_loadu_epi8 (zero, (1 << n_bytes) - 1, p);
70 return aesni_gcm_byte_mask (CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, p),
75 static_always_inline void
76 aesni_gcm_store_partial (void *p, __m128i r, int n_bytes)
79 _mm_mask_storeu_epi8 (p, (1 << n_bytes) - 1, r);
81 u8x16 mask = u8x16_is_greater (u8x16_splat (n_bytes), byte_mask_scale);
82 _mm_maskmoveu_si128 (r, (__m128i) mask, p);
86 static_always_inline void
87 aesni_gcm_load (__m128i * d, __m128i * inv, int n, int n_bytes)
89 for (int i = 0; i < n - 1; i++)
90 d[i] = _mm_loadu_si128 (inv + i);
91 d[n - 1] = n_bytes ? aesni_gcm_load_partial (inv + n - 1, n_bytes) :
92 _mm_loadu_si128 (inv + n - 1);
95 static_always_inline void
96 aesni_gcm_store (__m128i * d, __m128i * outv, int n, int n_bytes)
98 for (int i = 0; i < n - 1; i++)
99 _mm_storeu_si128 (outv + i, d[i]);
101 aesni_gcm_store_partial (outv + n - 1, d[n - 1], n_bytes);
103 _mm_storeu_si128 (outv + n - 1, d[n - 1]);
106 static_always_inline void
107 aesni_gcm_enc_first_round (__m128i * r, __m128i * Y, u32 * ctr, __m128i k,
112 if (PREDICT_TRUE ((u8) ctr[0] < (256 - n_blocks)))
114 for (i = 0; i < n_blocks; i++)
116 Y[0] = _mm_add_epi32 (Y[0], last_byte_one);
123 for (i = 0; i < n_blocks; i++)
125 Y[0] = _mm_insert_epi32 (Y[0], clib_host_to_net_u32 (++ctr[0]), 3);
131 static_always_inline void
132 aesni_gcm_enc_round (__m128i * r, __m128i k, int n_blocks)
134 for (int i = 0; i < n_blocks; i++)
135 r[i] = _mm_aesenc_si128 (r[i], k);
138 static_always_inline void
139 aesni_gcm_enc_last_round (__m128i * r, __m128i * d, const __m128i * k,
140 int rounds, int n_blocks)
143 /* additional ronuds for AES-192 and AES-256 */
144 for (int i = 10; i < rounds; i++)
145 aesni_gcm_enc_round (r, k[i], n_blocks);
147 for (int i = 0; i < n_blocks; i++)
148 d[i] ^= _mm_aesenclast_si128 (r[i], k[rounds]);
151 static_always_inline __m128i
152 aesni_gcm_ghash_blocks (__m128i T, aes_gcm_key_data_t * kd,
153 const __m128i * in, int n_blocks)
155 ghash_data_t _gd, *gd = &_gd;
156 const __m128i *Hi = kd->Hi + n_blocks - 1;
157 ghash_mul_first (gd, aesni_gcm_bswap (_mm_loadu_si128 (in)) ^ T, Hi[0]);
158 for (int i = 1; i < n_blocks; i++)
159 ghash_mul_next (gd, aesni_gcm_bswap (_mm_loadu_si128 (in + i)), Hi[-i]);
162 return ghash_final (gd);
165 static_always_inline __m128i
166 aesni_gcm_ghash (__m128i T, aes_gcm_key_data_t * kd, const __m128i * in,
170 while (n_left >= 128)
172 T = aesni_gcm_ghash_blocks (T, kd, in, 8);
179 T = aesni_gcm_ghash_blocks (T, kd, in, 4);
186 T = aesni_gcm_ghash_blocks (T, kd, in, 2);
193 T = aesni_gcm_ghash_blocks (T, kd, in, 1);
200 __m128i r = aesni_gcm_load_partial ((__m128i *) in, n_left);
201 T = ghash_mul (aesni_gcm_bswap (r) ^ T, kd->Hi[0]);
206 static_always_inline __m128i
207 aesni_gcm_calc (__m128i T, aes_gcm_key_data_t * kd, __m128i * d,
208 __m128i * Y, u32 * ctr, __m128i * inv, __m128i * outv,
209 int rounds, int n, int last_block_bytes, int with_ghash,
213 ghash_data_t _gd = { }, *gd = &_gd;
214 const __m128i *k = kd->Ke;
215 int hidx = is_encrypt ? 4 : n, didx = 0;
217 _mm_prefetch (inv + 4, _MM_HINT_T0);
219 /* AES rounds 0 and 1 */
220 aesni_gcm_enc_first_round (r, Y, ctr, k[0], n);
221 aesni_gcm_enc_round (r, k[1], n);
223 /* load data - decrypt round */
225 aesni_gcm_load (d, inv, n, last_block_bytes);
227 /* GHASH multiply block 1 */
229 ghash_mul_first (gd, aesni_gcm_bswap (d[didx++]) ^ T, kd->Hi[--hidx]);
231 /* AES rounds 2 and 3 */
232 aesni_gcm_enc_round (r, k[2], n);
233 aesni_gcm_enc_round (r, k[3], n);
235 /* GHASH multiply block 2 */
236 if (with_ghash && hidx)
237 ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]);
239 /* AES rounds 4 and 5 */
240 aesni_gcm_enc_round (r, k[4], n);
241 aesni_gcm_enc_round (r, k[5], n);
243 /* GHASH multiply block 3 */
244 if (with_ghash && hidx)
245 ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]);
247 /* AES rounds 6 and 7 */
248 aesni_gcm_enc_round (r, k[6], n);
249 aesni_gcm_enc_round (r, k[7], n);
251 /* GHASH multiply block 4 */
252 if (with_ghash && hidx)
253 ghash_mul_next (gd, aesni_gcm_bswap (d[didx++]), kd->Hi[--hidx]);
255 /* AES rounds 8 and 9 */
256 aesni_gcm_enc_round (r, k[8], n);
257 aesni_gcm_enc_round (r, k[9], n);
259 /* GHASH reduce 1st step */
263 /* load data - encrypt round */
265 aesni_gcm_load (d, inv, n, last_block_bytes);
267 /* GHASH reduce 2nd step */
271 /* AES last round(s) */
272 aesni_gcm_enc_last_round (r, d, k, rounds, n);
275 aesni_gcm_store (d, outv, n, last_block_bytes);
277 /* GHASH final step */
279 T = ghash_final (gd);
284 static_always_inline __m128i
285 aesni_gcm_calc_double (__m128i T, aes_gcm_key_data_t * kd, __m128i * d,
286 __m128i * Y, u32 * ctr, __m128i * inv, __m128i * outv,
287 int rounds, int is_encrypt)
290 ghash_data_t _gd, *gd = &_gd;
291 const __m128i *k = kd->Ke;
293 /* AES rounds 0 and 1 */
294 aesni_gcm_enc_first_round (r, Y, ctr, k[0], 4);
295 aesni_gcm_enc_round (r, k[1], 4);
297 /* load 4 blocks of data - decrypt round */
299 aesni_gcm_load (d, inv, 4, 0);
301 /* GHASH multiply block 0 */
302 ghash_mul_first (gd, aesni_gcm_bswap (d[0]) ^ T, kd->Hi[7]);
304 /* AES rounds 2 and 3 */
305 aesni_gcm_enc_round (r, k[2], 4);
306 aesni_gcm_enc_round (r, k[3], 4);
308 /* GHASH multiply block 1 */
309 ghash_mul_next (gd, aesni_gcm_bswap (d[1]), kd->Hi[6]);
311 /* AES rounds 4 and 5 */
312 aesni_gcm_enc_round (r, k[4], 4);
313 aesni_gcm_enc_round (r, k[5], 4);
315 /* GHASH multiply block 2 */
316 ghash_mul_next (gd, aesni_gcm_bswap (d[2]), kd->Hi[5]);
318 /* AES rounds 6 and 7 */
319 aesni_gcm_enc_round (r, k[6], 4);
320 aesni_gcm_enc_round (r, k[7], 4);
322 /* GHASH multiply block 3 */
323 ghash_mul_next (gd, aesni_gcm_bswap (d[3]), kd->Hi[4]);
325 /* AES rounds 8 and 9 */
326 aesni_gcm_enc_round (r, k[8], 4);
327 aesni_gcm_enc_round (r, k[9], 4);
329 /* load 4 blocks of data - encrypt round */
331 aesni_gcm_load (d, inv, 4, 0);
333 /* AES last round(s) */
334 aesni_gcm_enc_last_round (r, d, k, rounds, 4);
336 /* store 4 blocks of data */
337 aesni_gcm_store (d, outv, 4, 0);
339 /* load next 4 blocks of data data - decrypt round */
341 aesni_gcm_load (d, inv + 4, 4, 0);
343 /* GHASH multiply block 4 */
344 ghash_mul_next (gd, aesni_gcm_bswap (d[0]), kd->Hi[3]);
346 /* AES rounds 0, 1 and 2 */
347 aesni_gcm_enc_first_round (r, Y, ctr, k[0], 4);
348 aesni_gcm_enc_round (r, k[1], 4);
349 aesni_gcm_enc_round (r, k[2], 4);
351 /* GHASH multiply block 5 */
352 ghash_mul_next (gd, aesni_gcm_bswap (d[1]), kd->Hi[2]);
354 /* AES rounds 3 and 4 */
355 aesni_gcm_enc_round (r, k[3], 4);
356 aesni_gcm_enc_round (r, k[4], 4);
358 /* GHASH multiply block 6 */
359 ghash_mul_next (gd, aesni_gcm_bswap (d[2]), kd->Hi[1]);
361 /* AES rounds 5 and 6 */
362 aesni_gcm_enc_round (r, k[5], 4);
363 aesni_gcm_enc_round (r, k[6], 4);
365 /* GHASH multiply block 7 */
366 ghash_mul_next (gd, aesni_gcm_bswap (d[3]), kd->Hi[0]);
368 /* AES rounds 7 and 8 */
369 aesni_gcm_enc_round (r, k[7], 4);
370 aesni_gcm_enc_round (r, k[8], 4);
372 /* GHASH reduce 1st step */
376 aesni_gcm_enc_round (r, k[9], 4);
378 /* load data - encrypt round */
380 aesni_gcm_load (d, inv + 4, 4, 0);
382 /* GHASH reduce 2nd step */
385 /* AES last round(s) */
386 aesni_gcm_enc_last_round (r, d, k, rounds, 4);
389 aesni_gcm_store (d, outv + 4, 4, 0);
391 /* GHASH final step */
392 return ghash_final (gd);
395 static_always_inline __m128i
396 aesni_gcm_ghash_last (__m128i T, aes_gcm_key_data_t * kd, __m128i * d,
397 int n_blocks, int n_bytes)
399 ghash_data_t _gd, *gd = &_gd;
402 d[n_blocks - 1] = aesni_gcm_byte_mask (d[n_blocks - 1], n_bytes);
404 ghash_mul_first (gd, aesni_gcm_bswap (d[0]) ^ T, kd->Hi[n_blocks - 1]);
406 ghash_mul_next (gd, aesni_gcm_bswap (d[1]), kd->Hi[n_blocks - 2]);
408 ghash_mul_next (gd, aesni_gcm_bswap (d[2]), kd->Hi[n_blocks - 3]);
410 ghash_mul_next (gd, aesni_gcm_bswap (d[3]), kd->Hi[n_blocks - 4]);
413 return ghash_final (gd);
417 static_always_inline __m128i
418 aesni_gcm_enc (__m128i T, aes_gcm_key_data_t * kd, __m128i Y, const u8 * in,
419 const u8 * out, u32 n_left, int rounds)
421 __m128i *inv = (__m128i *) in, *outv = (__m128i *) out;
433 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, n_left,
434 /* with_ghash */ 0, /* is_encrypt */ 1);
435 return aesni_gcm_ghash_last (T, kd, d, 4, n_left);
437 else if (n_left > 32)
440 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3, n_left,
441 /* with_ghash */ 0, /* is_encrypt */ 1);
442 return aesni_gcm_ghash_last (T, kd, d, 3, n_left);
444 else if (n_left > 16)
447 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2, n_left,
448 /* with_ghash */ 0, /* is_encrypt */ 1);
449 return aesni_gcm_ghash_last (T, kd, d, 2, n_left);
454 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
455 /* with_ghash */ 0, /* is_encrypt */ 1);
456 return aesni_gcm_ghash_last (T, kd, d, 1, n_left);
460 aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0,
461 /* with_ghash */ 0, /* is_encrypt */ 1);
468 while (n_left >= 128)
470 T = aesni_gcm_calc_double (T, kd, d, &Y, &ctr, inv, outv, rounds,
481 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0,
482 /* with_ghash */ 1, /* is_encrypt */ 1);
491 return aesni_gcm_ghash_last (T, kd, d, 4, 0);
496 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, n_left,
497 /* with_ghash */ 1, /* is_encrypt */ 1);
498 return aesni_gcm_ghash_last (T, kd, d, 4, n_left);
504 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3, n_left,
505 /* with_ghash */ 1, /* is_encrypt */ 1);
506 return aesni_gcm_ghash_last (T, kd, d, 3, n_left);
512 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2, n_left,
513 /* with_ghash */ 1, /* is_encrypt */ 1);
514 return aesni_gcm_ghash_last (T, kd, d, 2, n_left);
518 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
519 /* with_ghash */ 1, /* is_encrypt */ 1);
520 return aesni_gcm_ghash_last (T, kd, d, 1, n_left);
523 static_always_inline __m128i
524 aesni_gcm_dec (__m128i T, aes_gcm_key_data_t * kd, __m128i Y, const u8 * in,
525 const u8 * out, u32 n_left, int rounds)
527 __m128i *inv = (__m128i *) in, *outv = (__m128i *) out;
531 while (n_left >= 128)
533 T = aesni_gcm_calc_double (T, kd, d, &Y, &ctr, inv, outv, rounds,
544 T = aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4, 0, 1, 0);
556 return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 4,
558 /* with_ghash */ 1, /* is_encrypt */ 0);
561 return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 3,
563 /* with_ghash */ 1, /* is_encrypt */ 0);
566 return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 2,
568 /* with_ghash */ 1, /* is_encrypt */ 0);
570 return aesni_gcm_calc (T, kd, d, &Y, &ctr, inv, outv, rounds, 1, n_left,
571 /* with_ghash */ 1, /* is_encrypt */ 0);
574 static_always_inline int
575 aes_gcm (const u8 * in, u8 * out, const u8 * addt, const u8 * iv, u8 * tag,
576 u32 data_bytes, u32 aad_bytes, u8 tag_len, aes_gcm_key_data_t * kd,
577 int aes_rounds, int is_encrypt)
580 __m128i r, Y0, T = { };
581 ghash_data_t _gd, *gd = &_gd;
583 _mm_prefetch (iv, _MM_HINT_T0);
584 _mm_prefetch (in, _MM_HINT_T0);
585 _mm_prefetch (in + CLIB_CACHE_LINE_BYTES, _MM_HINT_T0);
587 /* calculate ghash for AAD - optimized for ipsec common cases */
589 T = aesni_gcm_ghash (T, kd, (__m128i *) addt, 8);
590 else if (aad_bytes == 12)
591 T = aesni_gcm_ghash (T, kd, (__m128i *) addt, 12);
593 T = aesni_gcm_ghash (T, kd, (__m128i *) addt, aad_bytes);
595 /* initalize counter */
596 Y0 = CLIB_MEM_OVERFLOW_LOAD (_mm_loadu_si128, (__m128i *) iv);
597 Y0 = _mm_insert_epi32 (Y0, clib_host_to_net_u32 (1), 3);
599 /* ghash and encrypt/edcrypt */
601 T = aesni_gcm_enc (T, kd, Y0, in, out, data_bytes, aes_rounds);
603 T = aesni_gcm_dec (T, kd, Y0, in, out, data_bytes, aes_rounds);
605 _mm_prefetch (tag, _MM_HINT_T0);
614 /* interleaved computation of final ghash and E(Y0, k) */
615 ghash_mul_first (gd, r ^ T, kd->Hi[0]);
617 for (i = 1; i < 5; i += 1)
618 r = _mm_aesenc_si128 (r, kd->Ke[i]);
621 for (; i < 9; i += 1)
622 r = _mm_aesenc_si128 (r, kd->Ke[i]);
623 T = ghash_final (gd);
624 for (; i < aes_rounds; i += 1)
625 r = _mm_aesenc_si128 (r, kd->Ke[i]);
626 r = _mm_aesenclast_si128 (r, kd->Ke[aes_rounds]);
627 T = aesni_gcm_bswap (T) ^ r;
629 /* tag_len 16 -> 0 */
636 aesni_gcm_store_partial ((__m128i *) tag, T, (1 << tag_len) - 1);
638 _mm_storeu_si128 ((__m128i *) tag, T);
643 u16 tag_mask = tag_len ? (1 << tag_len) - 1 : 0xffff;
644 r = _mm_loadu_si128 ((__m128i *) tag);
645 if (_mm_movemask_epi8 (r == T) != tag_mask)
651 static_always_inline u32
652 aesni_ops_enc_aes_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[],
653 u32 n_ops, aesni_key_size_t ks)
655 crypto_ia32_main_t *cm = &crypto_ia32_main;
656 vnet_crypto_op_t *op = ops[0];
657 aes_gcm_key_data_t *kd;
662 kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index];
663 aes_gcm (op->src, op->dst, op->aad, op->iv, op->tag, op->len, op->aad_len,
664 op->tag_len, kd, AESNI_KEY_ROUNDS (ks), /* is_encrypt */ 1);
665 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
676 static_always_inline u32
677 aesni_ops_dec_aes_gcm (vlib_main_t * vm, vnet_crypto_op_t * ops[],
678 u32 n_ops, aesni_key_size_t ks)
680 crypto_ia32_main_t *cm = &crypto_ia32_main;
681 vnet_crypto_op_t *op = ops[0];
682 aes_gcm_key_data_t *kd;
687 kd = (aes_gcm_key_data_t *) cm->key_data[op->key_index];
688 rv = aes_gcm (op->src, op->dst, op->aad, op->iv, op->tag, op->len,
689 op->aad_len, op->tag_len, kd, AESNI_KEY_ROUNDS (ks),
694 op->status = VNET_CRYPTO_OP_STATUS_COMPLETED;
698 op->status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
711 static_always_inline void *
712 aesni_gcm_key_exp (vnet_crypto_key_t * key, aesni_key_size_t ks)
714 aes_gcm_key_data_t *kd;
718 kd = clib_mem_alloc_aligned (sizeof (*kd), CLIB_CACHE_LINE_BYTES);
721 aes_key_expand ((__m128i *) kd->Ke, key->data, ks);
723 /* pre-calculate H */
725 for (i = 1; i < AESNI_KEY_ROUNDS (ks); i += 1)
726 H = _mm_aesenc_si128 (H, kd->Ke[i]);
727 H = _mm_aesenclast_si128 (H, kd->Ke[i]);
728 H = aesni_gcm_bswap (H);
729 ghash_precompute (H, (__m128i *) kd->Hi, 8);
733 #define foreach_aesni_gcm_handler_type _(128) _(192) _(256)
736 static u32 aesni_ops_dec_aes_gcm_##x \
737 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
738 { return aesni_ops_dec_aes_gcm (vm, ops, n_ops, AESNI_KEY_##x); } \
739 static u32 aesni_ops_enc_aes_gcm_##x \
740 (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops) \
741 { return aesni_ops_enc_aes_gcm (vm, ops, n_ops, AESNI_KEY_##x); } \
742 static void * aesni_gcm_key_exp_##x (vnet_crypto_key_t *key) \
743 { return aesni_gcm_key_exp (key, AESNI_KEY_##x); }
745 foreach_aesni_gcm_handler_type;
750 crypto_ia32_aesni_gcm_init_avx512 (vlib_main_t * vm)
752 crypto_ia32_aesni_gcm_init_avx2 (vlib_main_t * vm)
754 crypto_ia32_aesni_gcm_init_sse42 (vlib_main_t * vm)
757 crypto_ia32_main_t *cm = &crypto_ia32_main;
760 vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
761 VNET_CRYPTO_OP_AES_##x##_GCM_ENC, \
762 aesni_ops_enc_aes_gcm_##x); \
763 vnet_crypto_register_ops_handler (vm, cm->crypto_engine_index, \
764 VNET_CRYPTO_OP_AES_##x##_GCM_DEC, \
765 aesni_ops_dec_aes_gcm_##x); \
766 cm->key_fn[VNET_CRYPTO_ALG_AES_##x##_GCM] = aesni_gcm_key_exp_##x;
767 foreach_aesni_gcm_handler_type;
773 * fd.io coding-style-patch-verification: ON
776 * eval: (c-set-style "gnu")