X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fesp.h;h=d24b5ea41025002a6537b4aff47e4fa72598de7b;hb=4a58e49cf;hp=82e3c961e1a40d40639f0eee5a1f604d6b571718;hpb=430ac939d115b59e3f7f704645c6f88878223e1b;p=vpp.git diff --git a/src/vnet/ipsec/esp.h b/src/vnet/ipsec/esp.h index 82e3c961e1a..d24b5ea4102 100644 --- a/src/vnet/ipsec/esp.h +++ b/src/vnet/ipsec/esp.h @@ -16,15 +16,16 @@ #define __ESP_H__ #include +#include #include -#include -#include -#include - typedef struct { - u32 spi; + union + { + u32 spi; + u8 spi_bytes[4]; + }; u32 seq; u8 data[0]; } esp_header_t; @@ -42,6 +43,14 @@ typedef CLIB_PACKED (struct { }) ip4_and_esp_header_t; /* *INDENT-ON* */ +/* *INDENT-OFF* */ +typedef CLIB_PACKED (struct { + ip4_header_t ip4; + udp_header_t udp; + esp_header_t esp; +}) ip4_and_udp_and_esp_header_t; +/* *INDENT-ON* */ + /* *INDENT-OFF* */ typedef CLIB_PACKED (struct { ip6_header_t ip6; @@ -49,294 +58,188 @@ typedef CLIB_PACKED (struct { }) ip6_and_esp_header_t; /* *INDENT-ON* */ -typedef struct -{ - const EVP_CIPHER *type; -} ipsec_proto_main_crypto_alg_t; - -typedef struct -{ - const EVP_MD *md; - u8 trunc_size; -} ipsec_proto_main_integ_alg_t; - -typedef struct -{ - CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); -#if OPENSSL_VERSION_NUMBER >= 0x10100000L - EVP_CIPHER_CTX *encrypt_ctx; -#else - EVP_CIPHER_CTX encrypt_ctx; -#endif - CLIB_CACHE_LINE_ALIGN_MARK (cacheline1); -#if OPENSSL_VERSION_NUMBER >= 0x10100000L - EVP_CIPHER_CTX *decrypt_ctx; -#else - EVP_CIPHER_CTX decrypt_ctx; -#endif - CLIB_CACHE_LINE_ALIGN_MARK (cacheline2); -#if OPENSSL_VERSION_NUMBER >= 0x10100000L - HMAC_CTX *hmac_ctx; -#else - HMAC_CTX hmac_ctx; -#endif - ipsec_crypto_alg_t last_encrypt_alg; - ipsec_crypto_alg_t last_decrypt_alg; - ipsec_integ_alg_t last_integ_alg; -} ipsec_proto_main_per_thread_data_t; - -typedef struct +/** + * AES GCM Additional Authentication data + */ +typedef struct esp_aead_t_ { - ipsec_proto_main_crypto_alg_t *ipsec_proto_main_crypto_algs; - ipsec_proto_main_integ_alg_t *ipsec_proto_main_integ_algs; - ipsec_proto_main_per_thread_data_t *per_thread_data; -} ipsec_proto_main_t; - -extern ipsec_proto_main_t ipsec_proto_main; - -#define ESP_WINDOW_SIZE (64) -#define ESP_SEQ_MAX (4294967295UL) + /** + * for GCM: when using ESN it's: + * SPI, seq-hi, seg-low + * else + * SPI, seq-low + */ + u32 data[3]; +} __clib_packed esp_aead_t; + +#define ESP_SEQ_MAX (4294967295UL) +#define ESP_MAX_BLOCK_SIZE (16) +#define ESP_MAX_IV_SIZE (16) +#define ESP_MAX_ICV_SIZE (32) u8 *format_esp_header (u8 * s, va_list * args); +/* TODO seq increment should be atomic to be accessed by multiple workers */ always_inline int -esp_replay_check (ipsec_sa_t * sa, u32 seq) -{ - u32 diff; - - if (PREDICT_TRUE (seq > sa->last_seq)) - return 0; - - diff = sa->last_seq - seq; - - if (ESP_WINDOW_SIZE > diff) - return (sa->replay_window & (1ULL << diff)) ? 1 : 0; - else - return 1; - - return 0; -} - -always_inline int -esp_replay_check_esn (ipsec_sa_t * sa, u32 seq) +esp_seq_advance (ipsec_sa_t * sa) { - u32 tl = sa->last_seq; - u32 th = sa->last_seq_hi; - u32 diff = tl - seq; - - if (PREDICT_TRUE (tl >= (ESP_WINDOW_SIZE - 1))) + if (PREDICT_TRUE (ipsec_sa_is_set_USE_ESN (sa))) { - if (seq >= (tl - ESP_WINDOW_SIZE + 1)) - { - sa->seq_hi = th; - if (seq <= tl) - return (sa->replay_window & (1ULL << diff)) ? 1 : 0; - else - return 0; - } - else + if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX)) { - sa->seq_hi = th + 1; - return 0; + if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && + sa->seq_hi == ESP_SEQ_MAX)) + return 1; + sa->seq_hi++; } + sa->seq++; } else { - if (seq >= (tl - ESP_WINDOW_SIZE + 1)) - { - sa->seq_hi = th - 1; - return (sa->replay_window & (1ULL << diff)) ? 1 : 0; - } - else - { - sa->seq_hi = th; - if (seq <= tl) - return (sa->replay_window & (1ULL << diff)) ? 1 : 0; - else - return 0; - } + if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && + sa->seq == ESP_SEQ_MAX)) + return 1; + sa->seq++; } return 0; } -/* TODO seq increment should be atomic to be accessed by multiple workers */ -always_inline void -esp_replay_advance (ipsec_sa_t * sa, u32 seq) +always_inline u16 +esp_aad_fill (u8 * data, const esp_header_t * esp, const ipsec_sa_t * sa) { - u32 pos; + esp_aead_t *aad; + + aad = (esp_aead_t *) data; + aad->data[0] = esp->spi; - if (seq > sa->last_seq) + if (ipsec_sa_is_set_USE_ESN (sa)) { - pos = seq - sa->last_seq; - if (pos < ESP_WINDOW_SIZE) - sa->replay_window = ((sa->replay_window) << pos) | 1; - else - sa->replay_window = 1; - sa->last_seq = seq; + /* SPI, seq-hi, seq-low */ + aad->data[1] = (u32) clib_host_to_net_u32 (sa->seq_hi); + aad->data[2] = esp->seq; + return 12; } else { - pos = sa->last_seq - seq; - sa->replay_window |= (1ULL << pos); + /* SPI, seq-low */ + aad->data[1] = esp->seq; + return 8; } } +/* Special case to drop or hand off packets for sync/async modes. + * + * Different than sync mode, async mode only enqueue drop or hand-off packets + * to next nodes. + */ always_inline void -esp_replay_advance_esn (ipsec_sa_t * sa, u32 seq) +esp_set_next_index (int is_async, u32 * from, u16 * nexts, u32 bi, + u16 * drop_index, u16 drop_next, u16 * next) { - int wrap = sa->seq_hi - sa->last_seq_hi; - u32 pos; - - if (wrap == 0 && seq > sa->last_seq) - { - pos = seq - sa->last_seq; - if (pos < ESP_WINDOW_SIZE) - sa->replay_window = ((sa->replay_window) << pos) | 1; - else - sa->replay_window = 1; - sa->last_seq = seq; - } - else if (wrap > 0) - { - pos = ~seq + sa->last_seq + 1; - if (pos < ESP_WINDOW_SIZE) - sa->replay_window = ((sa->replay_window) << pos) | 1; - else - sa->replay_window = 1; - sa->last_seq = seq; - sa->last_seq_hi = sa->seq_hi; - } - else if (wrap < 0) + if (is_async) { - pos = ~seq + sa->last_seq + 1; - sa->replay_window |= (1ULL << pos); + from[*drop_index] = bi; + nexts[*drop_index] = drop_next; + *drop_index += 1; } else - { - pos = sa->last_seq - seq; - sa->replay_window |= (1ULL << pos); - } + next[0] = drop_next; } -always_inline int -esp_seq_advance (ipsec_sa_t * sa) +/* when submitting a frame is failed, drop all buffers in the frame */ +always_inline void +esp_async_recycle_failed_submit (vnet_crypto_async_frame_t * f, + vlib_buffer_t ** b, u32 * from, u16 * nexts, + u16 * n_dropped, u16 drop_next_index, + vlib_error_t err) { - if (PREDICT_TRUE (sa->use_esn)) + u32 n_drop = f->n_elts; + u32 *bi = f->buffer_indices; + b -= n_drop; + while (n_drop--) { - if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX)) - { - if (PREDICT_FALSE - (sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX)) - return 1; - sa->seq_hi++; - } - sa->seq++; + b[0]->error = err; + esp_set_next_index (1, from, nexts, bi[0], n_dropped, drop_next_index, + NULL); + bi++; + b++; } - else - { - if (PREDICT_FALSE (sa->use_anti_replay && sa->seq == ESP_SEQ_MAX)) - return 1; - sa->seq++; - } - - return 0; + vnet_crypto_async_reset_frame (f); } -always_inline void -ipsec_proto_init () +/** + * The post data structure to for esp_encrypt/decrypt_inline to write to + * vib_buffer_t opaque unused field, and for post nodes to pick up after + * dequeue. + **/ +typedef struct { - ipsec_proto_main_t *em = &ipsec_proto_main; - vlib_thread_main_t *tm = vlib_get_thread_main (); - - memset (em, 0, sizeof (em[0])); - - vec_validate (em->ipsec_proto_main_crypto_algs, IPSEC_CRYPTO_N_ALG - 1); - em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].type = - EVP_aes_128_cbc (); - em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].type = - EVP_aes_192_cbc (); - em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].type = - EVP_aes_256_cbc (); - - vec_validate (em->ipsec_proto_main_integ_algs, IPSEC_INTEG_N_ALG - 1); - ipsec_proto_main_integ_alg_t *i; - - i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA1_96]; - i->md = EVP_sha1 (); - i->trunc_size = 12; - - i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_256_96]; - i->md = EVP_sha256 (); - i->trunc_size = 12; - - i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_256_128]; - i->md = EVP_sha256 (); - i->trunc_size = 16; - - i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_384_192]; - i->md = EVP_sha384 (); - i->trunc_size = 24; - - i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_512_256]; - i->md = EVP_sha512 (); - i->trunc_size = 32; + union + { + struct + { + u8 icv_sz; + u8 iv_sz; + ipsec_sa_flags_t flags; + u32 sa_index; + }; + u64 sa_data; + }; - vec_validate_aligned (em->per_thread_data, tm->n_vlib_mains - 1, - CLIB_CACHE_LINE_BYTES); - int thread_id; + u32 seq; + i16 current_data; + i16 current_length; + u16 hdr_sz; + u16 is_chain; + u32 protect_index; +} esp_decrypt_packet_data_t; - for (thread_id = 0; thread_id < tm->n_vlib_mains - 1; thread_id++) - { -#if OPENSSL_VERSION_NUMBER >= 0x10100000L - em->per_thread_data[thread_id].encrypt_ctx = EVP_CIPHER_CTX_new (); - em->per_thread_data[thread_id].decrypt_ctx = EVP_CIPHER_CTX_new (); - em->per_thread_data[thread_id].hmac_ctx = HMAC_CTX_new (); -#else - EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].encrypt_ctx)); - EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].decrypt_ctx)); - HMAC_CTX_init (&(em->per_thread_data[thread_id].hmac_ctx)); -#endif - } -} +STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 3 * sizeof (u64)); -always_inline unsigned int -hmac_calc (ipsec_integ_alg_t alg, - u8 * key, - int key_len, - u8 * data, int data_len, u8 * signature, u8 use_esn, u32 seq_hi) +/* we are forced to store the decrypt post data into 2 separate places - + vlib_opaque and opaque2. */ +typedef struct { - ipsec_proto_main_t *em = &ipsec_proto_main; - u32 thread_index = vlib_get_thread_index (); -#if OPENSSL_VERSION_NUMBER >= 0x10100000L - HMAC_CTX *ctx = em->per_thread_data[thread_index].hmac_ctx; -#else - HMAC_CTX *ctx = &(em->per_thread_data[thread_index].hmac_ctx); -#endif - const EVP_MD *md = NULL; - unsigned int len; - - ASSERT (alg < IPSEC_INTEG_N_ALG); + vlib_buffer_t *lb; + u32 free_buffer_index; + u8 icv_removed; +} esp_decrypt_packet_data2_t; - if (PREDICT_FALSE (em->ipsec_proto_main_integ_algs[alg].md == 0)) - return 0; +typedef union +{ + u16 next_index; + esp_decrypt_packet_data_t decrypt_data; +} esp_post_data_t; - if (PREDICT_FALSE (alg != em->per_thread_data[thread_index].last_integ_alg)) - { - md = em->ipsec_proto_main_integ_algs[alg].md; - em->per_thread_data[thread_index].last_integ_alg = alg; - } +STATIC_ASSERT (sizeof (esp_post_data_t) <= + STRUCT_SIZE_OF (vnet_buffer_opaque_t, unused), + "Custom meta-data too large for vnet_buffer_opaque_t"); - HMAC_Init_ex (ctx, key, key_len, md, NULL); +#define esp_post_data(b) \ + ((esp_post_data_t *)((u8 *)((b)->opaque) \ + + STRUCT_OFFSET_OF (vnet_buffer_opaque_t, unused))) - HMAC_Update (ctx, data, data_len); +STATIC_ASSERT (sizeof (esp_decrypt_packet_data2_t) <= + STRUCT_SIZE_OF (vnet_buffer_opaque2_t, unused), + "Custom meta-data too large for vnet_buffer_opaque2_t"); - if (PREDICT_TRUE (use_esn)) - HMAC_Update (ctx, (u8 *) & seq_hi, sizeof (seq_hi)); - HMAC_Final (ctx, signature, &len); +#define esp_post_data2(b) \ + ((esp_decrypt_packet_data2_t *)((u8 *)((b)->opaque2) \ + + STRUCT_OFFSET_OF (vnet_buffer_opaque2_t, unused))) - return em->ipsec_proto_main_integ_algs[alg].trunc_size; -} +typedef struct +{ + /* esp post node index for async crypto */ + u32 esp4_post_next; + u32 esp6_post_next; + u32 esp4_tun_post_next; + u32 esp6_tun_post_next; + u32 esp_mpls_tun_post_next; +} esp_async_post_next_t; + +extern esp_async_post_next_t esp_encrypt_async_next; +extern esp_async_post_next_t esp_decrypt_async_next; #endif /* __ESP_H__ */