X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=vnet%2Fvnet%2Fipsec%2Fesp.h;h=505d34ad859c24af85309aed4b2b0b9c6c02d4d0;hb=166a9d4c21821bdbc9a2d5a818ec5ae0e8bdc036;hp=2334dc48bc58a29540494fe83a7767ac02e8746b;hpb=e2c987e21fa9d2d2be93176429679dd8eaca9576;p=vpp.git diff --git a/vnet/vnet/ipsec/esp.h b/vnet/vnet/ipsec/esp.h index 2334dc48bc5..505d34ad859 100644 --- a/vnet/vnet/ipsec/esp.h +++ b/vnet/vnet/ipsec/esp.h @@ -20,137 +20,153 @@ #include #include -typedef struct { +typedef struct +{ u32 spi; u32 seq; u8 data[0]; } esp_header_t; -typedef struct { +typedef struct +{ u8 pad_length; u8 next_header; } esp_footer_t; +/* *INDENT-OFF* */ typedef CLIB_PACKED (struct { ip4_header_t ip4; esp_header_t esp; }) ip4_and_esp_header_t; +/* *INDENT-ON* */ +/* *INDENT-OFF* */ typedef CLIB_PACKED (struct { ip6_header_t ip6; esp_header_t esp; }) ip6_and_esp_header_t; +/* *INDENT-ON* */ -typedef struct { - const EVP_CIPHER * type; +typedef struct +{ + const EVP_CIPHER *type; } esp_crypto_alg_t; -typedef struct { - const EVP_MD * md; +typedef struct +{ + const EVP_MD *md; u8 trunc_size; } esp_integ_alg_t; -typedef struct { - CLIB_CACHE_LINE_ALIGN_MARK(cacheline0); +typedef struct +{ + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); EVP_CIPHER_CTX encrypt_ctx; - CLIB_CACHE_LINE_ALIGN_MARK(cacheline1); + CLIB_CACHE_LINE_ALIGN_MARK (cacheline1); EVP_CIPHER_CTX decrypt_ctx; - CLIB_CACHE_LINE_ALIGN_MARK(cacheline2); + CLIB_CACHE_LINE_ALIGN_MARK (cacheline2); HMAC_CTX hmac_ctx; ipsec_crypto_alg_t last_encrypt_alg; ipsec_crypto_alg_t last_decrypt_alg; ipsec_integ_alg_t last_integ_alg; } esp_main_per_thread_data_t; -typedef struct { - esp_crypto_alg_t * esp_crypto_algs; - esp_integ_alg_t * esp_integ_algs; - esp_main_per_thread_data_t * per_thread_data; +typedef struct +{ + esp_crypto_alg_t *esp_crypto_algs; + esp_integ_alg_t *esp_integ_algs; + esp_main_per_thread_data_t *per_thread_data; } esp_main_t; esp_main_t esp_main; always_inline void -esp_init() +esp_init () { - esp_main_t * em = &esp_main; - vlib_thread_main_t * tm = vlib_get_thread_main(); + esp_main_t *em = &esp_main; + vlib_thread_main_t *tm = vlib_get_thread_main (); memset (em, 0, sizeof (em[0])); - vec_validate(em->esp_crypto_algs, IPSEC_CRYPTO_N_ALG - 1); - em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].type = EVP_aes_128_cbc(); - em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].type = EVP_aes_192_cbc(); - em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].type = EVP_aes_256_cbc(); + vec_validate (em->esp_crypto_algs, IPSEC_CRYPTO_N_ALG - 1); + em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].type = EVP_aes_128_cbc (); + em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].type = EVP_aes_192_cbc (); + em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].type = EVP_aes_256_cbc (); - vec_validate(em->esp_integ_algs, IPSEC_INTEG_N_ALG - 1); - esp_integ_alg_t * i; + vec_validate (em->esp_integ_algs, IPSEC_INTEG_N_ALG - 1); + esp_integ_alg_t *i; i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA1_96]; - i->md = EVP_sha1(); + i->md = EVP_sha1 (); i->trunc_size = 12; i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_96]; - i->md = EVP_sha256(); + i->md = EVP_sha256 (); i->trunc_size = 12; i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_128]; - i->md = EVP_sha256(); + i->md = EVP_sha256 (); i->trunc_size = 16; i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_384_192]; - i->md = EVP_sha384(); + i->md = EVP_sha384 (); i->trunc_size = 24; i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_512_256]; - i->md = EVP_sha512(); + i->md = EVP_sha512 (); i->trunc_size = 32; - vec_validate_aligned(em->per_thread_data, tm->n_vlib_mains-1, CLIB_CACHE_LINE_BYTES); + vec_validate_aligned (em->per_thread_data, tm->n_vlib_mains - 1, + CLIB_CACHE_LINE_BYTES); int thread_id; for (thread_id = 0; thread_id < tm->n_vlib_mains - 1; thread_id++) { - EVP_CIPHER_CTX_init(&(em->per_thread_data[thread_id].encrypt_ctx)); - EVP_CIPHER_CTX_init(&(em->per_thread_data[thread_id].decrypt_ctx)); - HMAC_CTX_init(&(em->per_thread_data[thread_id].hmac_ctx)); + EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].encrypt_ctx)); + EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].decrypt_ctx)); + HMAC_CTX_init (&(em->per_thread_data[thread_id].hmac_ctx)); } } always_inline unsigned int -hmac_calc(ipsec_integ_alg_t alg, - u8 * key, - int key_len, - u8 * data, - int data_len, - u8 * signature, - u8 use_esn, - u32 seq_hi) +hmac_calc (ipsec_integ_alg_t alg, + u8 * key, + int key_len, + u8 * data, int data_len, u8 * signature, u8 use_esn, u32 seq_hi) { - esp_main_t * em = &esp_main; - u32 cpu_index = os_get_cpu_number(); - HMAC_CTX * ctx = &(em->per_thread_data[cpu_index].hmac_ctx); - const EVP_MD * md = NULL; + esp_main_t *em = &esp_main; + u32 cpu_index = os_get_cpu_number (); + HMAC_CTX *ctx = &(em->per_thread_data[cpu_index].hmac_ctx); + const EVP_MD *md = NULL; unsigned int len; - ASSERT(alg < IPSEC_INTEG_N_ALG); + ASSERT (alg < IPSEC_INTEG_N_ALG); - if (PREDICT_FALSE(em->esp_integ_algs[alg].md == 0)) + if (PREDICT_FALSE (em->esp_integ_algs[alg].md == 0)) return 0; - if (PREDICT_FALSE(alg != em->per_thread_data[cpu_index].last_integ_alg)) { - md = em->esp_integ_algs[alg].md; - em->per_thread_data[cpu_index].last_integ_alg = alg; - } + if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_integ_alg)) + { + md = em->esp_integ_algs[alg].md; + em->per_thread_data[cpu_index].last_integ_alg = alg; + } - HMAC_Init(ctx, key, key_len, md); + HMAC_Init (ctx, key, key_len, md); - HMAC_Update(ctx, data, data_len); + HMAC_Update (ctx, data, data_len); - if (PREDICT_TRUE(use_esn)) - HMAC_Update(ctx, (u8 *) &seq_hi, sizeof(seq_hi)); - HMAC_Final(ctx, signature, &len); + if (PREDICT_TRUE (use_esn)) + HMAC_Update (ctx, (u8 *) & seq_hi, sizeof (seq_hi)); + HMAC_Final (ctx, signature, &len); return em->esp_integ_algs[alg].trunc_size; } + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */