#define __ESP_H__
#include <vnet/ip/ip.h>
+#include <vnet/crypto/crypto.h>
#include <vnet/ipsec/ipsec.h>
-#include <openssl/hmac.h>
-#include <openssl/rand.h>
-#include <openssl/evp.h>
-
typedef struct
{
u32 spi;
}) ip4_and_esp_header_t;
/* *INDENT-ON* */
+/* *INDENT-OFF* */
+typedef CLIB_PACKED (struct {
+ ip4_header_t ip4;
+ udp_header_t udp;
+ esp_header_t esp;
+}) ip4_and_udp_and_esp_header_t;
+/* *INDENT-ON* */
+
/* *INDENT-OFF* */
typedef CLIB_PACKED (struct {
ip6_header_t ip6;
}) ip6_and_esp_header_t;
/* *INDENT-ON* */
-typedef struct
-{
- const EVP_CIPHER *type;
-} ipsec_proto_main_crypto_alg_t;
-
-typedef struct
-{
- const EVP_MD *md;
- u8 trunc_size;
-} ipsec_proto_main_integ_alg_t;
-
-typedef struct
-{
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L
- EVP_CIPHER_CTX *encrypt_ctx;
-#else
- EVP_CIPHER_CTX encrypt_ctx;
-#endif
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L
- EVP_CIPHER_CTX *decrypt_ctx;
-#else
- EVP_CIPHER_CTX decrypt_ctx;
-#endif
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L
- HMAC_CTX *hmac_ctx;
-#else
- HMAC_CTX hmac_ctx;
-#endif
- ipsec_crypto_alg_t last_encrypt_alg;
- ipsec_crypto_alg_t last_decrypt_alg;
- ipsec_integ_alg_t last_integ_alg;
-} ipsec_proto_main_per_thread_data_t;
-
-typedef struct
+/**
+ * AES GCM Additional Authentication data
+ */
+typedef struct esp_aead_t_
{
- ipsec_proto_main_crypto_alg_t *ipsec_proto_main_crypto_algs;
- ipsec_proto_main_integ_alg_t *ipsec_proto_main_integ_algs;
- ipsec_proto_main_per_thread_data_t *per_thread_data;
-} ipsec_proto_main_t;
-
-extern ipsec_proto_main_t ipsec_proto_main;
-
-#define ESP_WINDOW_SIZE (64)
-#define ESP_SEQ_MAX (4294967295UL)
+ /**
+ * for GCM: when using ESN it's:
+ * SPI, seq-hi, seg-low
+ * else
+ * SPI, seq-low
+ */
+ u32 data[3];
+} __clib_packed esp_aead_t;
+
+#define ESP_SEQ_MAX (4294967295UL)
+#define ESP_MAX_BLOCK_SIZE (16)
+#define ESP_MAX_IV_SIZE (16)
+#define ESP_MAX_ICV_SIZE (32)
u8 *format_esp_header (u8 * s, va_list * args);
-always_inline int
-esp_replay_check (ipsec_sa_t * sa, u32 seq)
-{
- u32 diff;
-
- if (PREDICT_TRUE (seq > sa->last_seq))
- return 0;
-
- diff = sa->last_seq - seq;
-
- if (ESP_WINDOW_SIZE > diff)
- return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
- else
- return 1;
-
- return 0;
-}
-
-always_inline int
-esp_replay_check_esn (ipsec_sa_t * sa, u32 seq)
-{
- u32 tl = sa->last_seq;
- u32 th = sa->last_seq_hi;
- u32 diff = tl - seq;
-
- if (PREDICT_TRUE (tl >= (ESP_WINDOW_SIZE - 1)))
- {
- if (seq >= (tl - ESP_WINDOW_SIZE + 1))
- {
- sa->seq_hi = th;
- if (seq <= tl)
- return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
- else
- return 0;
- }
- else
- {
- sa->seq_hi = th + 1;
- return 0;
- }
- }
- else
- {
- if (seq >= (tl - ESP_WINDOW_SIZE + 1))
- {
- sa->seq_hi = th - 1;
- return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
- }
- else
- {
- sa->seq_hi = th;
- if (seq <= tl)
- return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
- else
- return 0;
- }
- }
-
- return 0;
-}
-
/* TODO seq increment should be atomic to be accessed by multiple workers */
-always_inline void
-esp_replay_advance (ipsec_sa_t * sa, u32 seq)
-{
- u32 pos;
-
- if (seq > sa->last_seq)
- {
- pos = seq - sa->last_seq;
- if (pos < ESP_WINDOW_SIZE)
- sa->replay_window = ((sa->replay_window) << pos) | 1;
- else
- sa->replay_window = 1;
- sa->last_seq = seq;
- }
- else
- {
- pos = sa->last_seq - seq;
- sa->replay_window |= (1ULL << pos);
- }
-}
-
-always_inline void
-esp_replay_advance_esn (ipsec_sa_t * sa, u32 seq)
-{
- int wrap = sa->seq_hi - sa->last_seq_hi;
- u32 pos;
-
- if (wrap == 0 && seq > sa->last_seq)
- {
- pos = seq - sa->last_seq;
- if (pos < ESP_WINDOW_SIZE)
- sa->replay_window = ((sa->replay_window) << pos) | 1;
- else
- sa->replay_window = 1;
- sa->last_seq = seq;
- }
- else if (wrap > 0)
- {
- pos = ~seq + sa->last_seq + 1;
- if (pos < ESP_WINDOW_SIZE)
- sa->replay_window = ((sa->replay_window) << pos) | 1;
- else
- sa->replay_window = 1;
- sa->last_seq = seq;
- sa->last_seq_hi = sa->seq_hi;
- }
- else if (wrap < 0)
- {
- pos = ~seq + sa->last_seq + 1;
- sa->replay_window |= (1ULL << pos);
- }
- else
- {
- pos = sa->last_seq - seq;
- sa->replay_window |= (1ULL << pos);
- }
-}
-
always_inline int
esp_seq_advance (ipsec_sa_t * sa)
{
- if (PREDICT_TRUE (sa->use_esn))
+ if (PREDICT_TRUE (ipsec_sa_is_set_USE_ESN (sa)))
{
if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
{
- if (PREDICT_FALSE
- (sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX))
+ if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
+ sa->seq_hi == ESP_SEQ_MAX))
return 1;
sa->seq_hi++;
}
}
else
{
- if (PREDICT_FALSE (sa->use_anti_replay && sa->seq == ESP_SEQ_MAX))
+ if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
+ sa->seq == ESP_SEQ_MAX))
return 1;
sa->seq++;
}
return 0;
}
-always_inline void
-ipsec_proto_init ()
-{
- ipsec_proto_main_t *em = &ipsec_proto_main;
- vlib_thread_main_t *tm = vlib_get_thread_main ();
-
- memset (em, 0, sizeof (em[0]));
-
- vec_validate (em->ipsec_proto_main_crypto_algs, IPSEC_CRYPTO_N_ALG - 1);
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].type =
- EVP_aes_128_cbc ();
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].type =
- EVP_aes_192_cbc ();
- em->ipsec_proto_main_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].type =
- EVP_aes_256_cbc ();
-
- vec_validate (em->ipsec_proto_main_integ_algs, IPSEC_INTEG_N_ALG - 1);
- ipsec_proto_main_integ_alg_t *i;
- i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA1_96];
- i->md = EVP_sha1 ();
- i->trunc_size = 12;
-
- i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_256_96];
- i->md = EVP_sha256 ();
- i->trunc_size = 12;
-
- i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_256_128];
- i->md = EVP_sha256 ();
- i->trunc_size = 16;
-
- i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_384_192];
- i->md = EVP_sha384 ();
- i->trunc_size = 24;
+always_inline unsigned int
+hmac_calc (vlib_main_t * vm, ipsec_sa_t * sa, u8 * data, int data_len,
+ u8 * signature)
+{
+ vnet_crypto_op_t _op, *op = &_op;
- i = &em->ipsec_proto_main_integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
- i->md = EVP_sha512 ();
- i->trunc_size = 32;
+ if (PREDICT_FALSE (sa->integ_op_id == 0))
+ return 0;
- vec_validate_aligned (em->per_thread_data, tm->n_vlib_mains - 1,
- CLIB_CACHE_LINE_BYTES);
- int thread_id;
+ vnet_crypto_op_init (op, sa->integ_op_id);
+ op->key_index = sa->integ_key_index;
+ op->src = data;
+ op->len = data_len;
+ op->digest = signature;
+ op->digest_len = sa->integ_icv_size;
- for (thread_id = 0; thread_id < tm->n_vlib_mains - 1; thread_id++)
+ if (ipsec_sa_is_set_USE_ESN (sa))
{
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L
- em->per_thread_data[thread_id].encrypt_ctx = EVP_CIPHER_CTX_new ();
- em->per_thread_data[thread_id].decrypt_ctx = EVP_CIPHER_CTX_new ();
- em->per_thread_data[thread_id].hmac_ctx = HMAC_CTX_new ();
-#else
- EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].encrypt_ctx));
- EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].decrypt_ctx));
- HMAC_CTX_init (&(em->per_thread_data[thread_id].hmac_ctx));
-#endif
+ u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi);
+
+ op->len += 4;
+ clib_memcpy (data + data_len, &seq_hi, 4);
}
+
+ vnet_crypto_process_ops (vm, op, 1);
+ return sa->integ_icv_size;
}
-always_inline unsigned int
-hmac_calc (ipsec_integ_alg_t alg,
- u8 * key,
- int key_len,
- u8 * data, int data_len, u8 * signature, u8 use_esn, u32 seq_hi)
+always_inline void
+esp_aad_fill (vnet_crypto_op_t * op,
+ const esp_header_t * esp, const ipsec_sa_t * sa)
{
- ipsec_proto_main_t *em = &ipsec_proto_main;
- u32 thread_index = vlib_get_thread_index ();
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L
- HMAC_CTX *ctx = em->per_thread_data[thread_index].hmac_ctx;
-#else
- HMAC_CTX *ctx = &(em->per_thread_data[thread_index].hmac_ctx);
-#endif
- const EVP_MD *md = NULL;
- unsigned int len;
-
- ASSERT (alg < IPSEC_INTEG_N_ALG);
+ esp_aead_t *aad;
- if (PREDICT_FALSE (em->ipsec_proto_main_integ_algs[alg].md == 0))
- return 0;
+ aad = (esp_aead_t *) op->aad;
+ clib_memcpy_fast (aad, esp, 8);
- if (PREDICT_FALSE (alg != em->per_thread_data[thread_index].last_integ_alg))
+ if (ipsec_sa_is_set_USE_ESN (sa))
{
- md = em->ipsec_proto_main_integ_algs[alg].md;
- em->per_thread_data[thread_index].last_integ_alg = alg;
+ /* SPI, seq-hi, seq-low */
+ aad->data[2] = aad->data[1];
+ aad->data[1] = clib_host_to_net_u32 (sa->seq_hi);
+ op->aad_len = 12;
}
-
- HMAC_Init_ex (ctx, key, key_len, md, NULL);
-
- HMAC_Update (ctx, data, data_len);
-
- if (PREDICT_TRUE (use_esn))
- HMAC_Update (ctx, (u8 *) & seq_hi, sizeof (seq_hi));
- HMAC_Final (ctx, signature, &len);
-
- return em->ipsec_proto_main_integ_algs[alg].trunc_size;
+ else
+ /* SPI, seq-low */
+ op->aad_len = 8;
}
-
#endif /* __ESP_H__ */
/*