X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fesp.h;h=2f734aa05ce65fbc0a71e06f8c90caab01f7edc2;hb=1e3aa5e213c23588981ee17d1413a0441a40527a;hp=98db70179bd2b9b858ee5951d0507747210bfcd7;hpb=db93cd971320301eb21403caabada7a3ec6a4cce;p=vpp.git diff --git a/src/vnet/ipsec/esp.h b/src/vnet/ipsec/esp.h index 98db70179bd..2f734aa05ce 100644 --- a/src/vnet/ipsec/esp.h +++ b/src/vnet/ipsec/esp.h @@ -16,12 +16,9 @@ #define __ESP_H__ #include +#include #include -#include -#include -#include - typedef struct { u32 spi; @@ -42,6 +39,14 @@ typedef CLIB_PACKED (struct { }) ip4_and_esp_header_t; /* *INDENT-ON* */ +/* *INDENT-OFF* */ +typedef CLIB_PACKED (struct { + ip4_header_t ip4; + udp_header_t udp; + esp_header_t esp; +}) ip4_and_udp_and_esp_header_t; +/* *INDENT-ON* */ + /* *INDENT-OFF* */ typedef CLIB_PACKED (struct { ip6_header_t ip6; @@ -49,173 +54,22 @@ typedef CLIB_PACKED (struct { }) ip6_and_esp_header_t; /* *INDENT-ON* */ -typedef struct -{ - const EVP_CIPHER *type; -} esp_crypto_alg_t; - -typedef struct -{ - const EVP_MD *md; - u8 trunc_size; -} esp_integ_alg_t; - -typedef struct -{ - CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - EVP_CIPHER_CTX encrypt_ctx; - CLIB_CACHE_LINE_ALIGN_MARK (cacheline1); - EVP_CIPHER_CTX decrypt_ctx; - CLIB_CACHE_LINE_ALIGN_MARK (cacheline2); - HMAC_CTX hmac_ctx; - ipsec_crypto_alg_t last_encrypt_alg; - ipsec_crypto_alg_t last_decrypt_alg; - ipsec_integ_alg_t last_integ_alg; -} esp_main_per_thread_data_t; - -typedef struct -{ - esp_crypto_alg_t *esp_crypto_algs; - esp_integ_alg_t *esp_integ_algs; - esp_main_per_thread_data_t *per_thread_data; -} esp_main_t; - -esp_main_t esp_main; - -#define ESP_WINDOW_SIZE (64) -#define ESP_SEQ_MAX (4294967295UL) +#define ESP_SEQ_MAX (4294967295UL) +#define ESP_MAX_BLOCK_SIZE (16) +#define ESP_MAX_ICV_SIZE (16) u8 *format_esp_header (u8 * s, va_list * args); -always_inline int -esp_replay_check (ipsec_sa_t * sa, u32 seq) -{ - u32 diff; - - if (PREDICT_TRUE (seq > sa->last_seq)) - return 0; - - diff = sa->last_seq - seq; - - if (ESP_WINDOW_SIZE > diff) - return (sa->replay_window & (1ULL << diff)) ? 1 : 0; - else - return 1; - - return 0; -} - -always_inline int -esp_replay_check_esn (ipsec_sa_t * sa, u32 seq) -{ - u32 tl = sa->last_seq; - u32 th = sa->last_seq_hi; - u32 diff = tl - seq; - - if (PREDICT_TRUE (tl >= (ESP_WINDOW_SIZE - 1))) - { - if (seq >= (tl - ESP_WINDOW_SIZE + 1)) - { - sa->seq_hi = th; - if (seq <= tl) - return (sa->replay_window & (1ULL << diff)) ? 1 : 0; - else - return 0; - } - else - { - sa->seq_hi = th + 1; - return 0; - } - } - else - { - if (seq >= (tl - ESP_WINDOW_SIZE + 1)) - { - sa->seq_hi = th - 1; - return (sa->replay_window & (1ULL << diff)) ? 1 : 0; - } - else - { - sa->seq_hi = th; - if (seq <= tl) - return (sa->replay_window & (1ULL << diff)) ? 1 : 0; - else - return 0; - } - } - - return 0; -} - /* TODO seq increment should be atomic to be accessed by multiple workers */ -always_inline void -esp_replay_advance (ipsec_sa_t * sa, u32 seq) -{ - u32 pos; - - if (seq > sa->last_seq) - { - pos = seq - sa->last_seq; - if (pos < ESP_WINDOW_SIZE) - sa->replay_window = ((sa->replay_window) << pos) | 1; - else - sa->replay_window = 1; - sa->last_seq = seq; - } - else - { - pos = sa->last_seq - seq; - sa->replay_window |= (1ULL << pos); - } -} - -always_inline void -esp_replay_advance_esn (ipsec_sa_t * sa, u32 seq) -{ - int wrap = sa->seq_hi - sa->last_seq_hi; - u32 pos; - - if (wrap == 0 && seq > sa->last_seq) - { - pos = seq - sa->last_seq; - if (pos < ESP_WINDOW_SIZE) - sa->replay_window = ((sa->replay_window) << pos) | 1; - else - sa->replay_window = 1; - sa->last_seq = seq; - } - else if (wrap > 0) - { - pos = ~seq + sa->last_seq + 1; - if (pos < ESP_WINDOW_SIZE) - sa->replay_window = ((sa->replay_window) << pos) | 1; - else - sa->replay_window = 1; - sa->last_seq = seq; - sa->last_seq_hi = sa->seq_hi; - } - else if (wrap < 0) - { - pos = ~seq + sa->last_seq + 1; - sa->replay_window |= (1ULL << pos); - } - else - { - pos = sa->last_seq - seq; - sa->replay_window |= (1ULL << pos); - } -} - always_inline int esp_seq_advance (ipsec_sa_t * sa) { - if (PREDICT_TRUE (sa->use_esn)) + if (PREDICT_TRUE (ipsec_sa_is_set_USE_ESN (sa))) { if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX)) { - if (PREDICT_FALSE - (sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX)) + if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && + sa->seq_hi == ESP_SEQ_MAX)) return 1; sa->seq_hi++; } @@ -223,7 +77,8 @@ esp_seq_advance (ipsec_sa_t * sa) } else { - if (PREDICT_FALSE (sa->use_anti_replay && sa->seq == ESP_SEQ_MAX)) + if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && + sa->seq == ESP_SEQ_MAX)) return 1; sa->seq++; } @@ -231,86 +86,34 @@ esp_seq_advance (ipsec_sa_t * sa) return 0; } -always_inline void -esp_init () -{ - esp_main_t *em = &esp_main; - vlib_thread_main_t *tm = vlib_get_thread_main (); - - memset (em, 0, sizeof (em[0])); - - vec_validate (em->esp_crypto_algs, IPSEC_CRYPTO_N_ALG - 1); - em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].type = EVP_aes_128_cbc (); - em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].type = EVP_aes_192_cbc (); - em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].type = EVP_aes_256_cbc (); - - vec_validate (em->esp_integ_algs, IPSEC_INTEG_N_ALG - 1); - esp_integ_alg_t *i; - - i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA1_96]; - i->md = EVP_sha1 (); - i->trunc_size = 12; - - i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_96]; - i->md = EVP_sha256 (); - i->trunc_size = 12; - - i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_128]; - i->md = EVP_sha256 (); - i->trunc_size = 16; - - i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_384_192]; - i->md = EVP_sha384 (); - i->trunc_size = 24; - - i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_512_256]; - i->md = EVP_sha512 (); - i->trunc_size = 32; - - vec_validate_aligned (em->per_thread_data, tm->n_vlib_mains - 1, - CLIB_CACHE_LINE_BYTES); - int thread_id; - - for (thread_id = 0; thread_id < tm->n_vlib_mains - 1; thread_id++) - { - EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].encrypt_ctx)); - EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].decrypt_ctx)); - HMAC_CTX_init (&(em->per_thread_data[thread_id].hmac_ctx)); - } -} always_inline unsigned int -hmac_calc (ipsec_integ_alg_t alg, - u8 * key, - int key_len, - u8 * data, int data_len, u8 * signature, u8 use_esn, u32 seq_hi) +hmac_calc (vlib_main_t * vm, ipsec_sa_t * sa, u8 * data, int data_len, + u8 * signature) { - esp_main_t *em = &esp_main; - u32 thread_index = vlib_get_thread_index (); - HMAC_CTX *ctx = &(em->per_thread_data[thread_index].hmac_ctx); - const EVP_MD *md = NULL; - unsigned int len; - - ASSERT (alg < IPSEC_INTEG_N_ALG); + vnet_crypto_op_t _op, *op = &_op; - if (PREDICT_FALSE (em->esp_integ_algs[alg].md == 0)) + if (PREDICT_FALSE (sa->integ_op_type == 0)) return 0; - if (PREDICT_FALSE (alg != em->per_thread_data[thread_index].last_integ_alg)) - { - md = em->esp_integ_algs[alg].md; - em->per_thread_data[thread_index].last_integ_alg = alg; - } - - HMAC_Init (ctx, key, key_len, md); + op->op = sa->integ_op_type; + op->key = sa->integ_key.data; + op->key_len = sa->integ_key.len; + op->src = data; + op->len = data_len; + op->dst = signature; + op->hmac_trunc_len = sa->integ_trunc_size; - HMAC_Update (ctx, data, data_len); + if (ipsec_sa_is_set_USE_ESN (sa)) + { + u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi); - if (PREDICT_TRUE (use_esn)) - HMAC_Update (ctx, (u8 *) & seq_hi, sizeof (seq_hi)); - HMAC_Final (ctx, signature, &len); + op->len += 4; + clib_memcpy (data + data_len, &seq_hi, 4); + } - return em->esp_integ_algs[alg].trunc_size; + vnet_crypto_process_ops (vm, op, 1); + return sa->integ_trunc_size; } #endif /* __ESP_H__ */