}
always_inline void
-esp_decrypt_cbc (ipsec_crypto_alg_t alg,
+esp_decrypt_cbc (vlib_main_t * vm, ipsec_sa_t * sa,
u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
{
- ipsec_proto_main_t *em = &ipsec_proto_main;
- u32 thread_index = vlib_get_thread_index ();
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L
- EVP_CIPHER_CTX *ctx = em->per_thread_data[thread_index].decrypt_ctx;
-#else
- EVP_CIPHER_CTX *ctx = &(em->per_thread_data[thread_index].decrypt_ctx);
-#endif
- const EVP_CIPHER *cipher = NULL;
- int out_len;
-
- ASSERT (alg < IPSEC_CRYPTO_N_ALG);
-
- if (PREDICT_FALSE (em->ipsec_proto_main_crypto_algs[alg].type == 0))
- return;
+ vnet_crypto_op_t _op, *op = &_op;
- if (PREDICT_FALSE
- (alg != em->per_thread_data[thread_index].last_decrypt_alg))
- {
- cipher = em->ipsec_proto_main_crypto_algs[alg].type;
- em->per_thread_data[thread_index].last_decrypt_alg = alg;
- }
- EVP_DecryptInit_ex (ctx, cipher, NULL, key, iv);
+ if (PREDICT_FALSE (sa->crypto_dec_op_type == VNET_CRYPTO_OP_NONE))
+ return;
+
+ op->op = sa->crypto_dec_op_type;
+ op->iv = iv;
+ op->src = in;
+ op->dst = out;
+ op->len = in_len;
+ op->key = key;
- EVP_DecryptUpdate (ctx, out, &out_len, in, in_len);
- EVP_DecryptFinal_ex (ctx, out + out_len, &out_len);
+ vnet_crypto_process_ops (vm, op, 1);
}
always_inline uword
int is_ip6)
{
ipsec_main_t *im = &ipsec_main;
- ipsec_proto_main_t *em = &ipsec_proto_main;
u32 *from = vlib_frame_vector_args (from_frame);
u32 n_left_from = from_frame->n_vectors;
u32 new_bufs[VLIB_FRAME_SIZE];
seq = clib_host_to_net_u32 (esp0->seq);
/* anti-replay check */
- if (sa0->use_anti_replay)
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa0))
{
int rv = 0;
- if (PREDICT_TRUE (sa0->use_esn))
+ if (PREDICT_TRUE (ipsec_sa_is_set_USE_EXTENDED_SEQ_NUM (sa0)))
rv = esp_replay_check_esn (sa0, seq);
else
rv = esp_replay_check (sa0, seq);
if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
{
u8 sig[64];
- int icv_size =
- em->ipsec_proto_main_integ_algs[sa0->integ_alg].trunc_size;
+ int icv_size = sa0->integ_trunc_size;
clib_memset (sig, 0, sizeof (sig));
- u8 *icv =
- vlib_buffer_get_current (ib[0]) + ib[0]->current_length -
+ u8 *icv = vlib_buffer_get_current (ib[0]) + ib[0]->current_length -
icv_size;
ib[0]->current_length -= icv_size;
- hmac_calc (sa0->integ_alg, sa0->integ_key.data,
- sa0->integ_key.len, (u8 *) esp0,
- ib[0]->current_length, sig, sa0->use_esn, sa0->seq_hi);
+ hmac_calc (vm, sa0, (u8 *) esp0, ib[0]->current_length, sig);
if (PREDICT_FALSE (memcmp (icv, sig, icv_size)))
{
}
}
- if (PREDICT_TRUE (sa0->use_anti_replay))
+ if (PREDICT_TRUE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa0)))
{
- if (PREDICT_TRUE (sa0->use_esn))
+ if (PREDICT_TRUE (ipsec_sa_is_set_USE_EXTENDED_SEQ_NUM (sa0)))
esp_replay_advance_esn (sa0, seq);
else
esp_replay_advance (sa0, seq);
(sa0->crypto_alg >= IPSEC_CRYPTO_ALG_DES_CBC &&
sa0->crypto_alg <= IPSEC_CRYPTO_ALG_3DES_CBC))
{
- const int BLOCK_SIZE =
- em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].block_size;;
- const int IV_SIZE =
- em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].iv_size;
+ const int BLOCK_SIZE = sa0->crypto_block_size;
+ const int IV_SIZE = sa0->crypto_block_size;
esp_footer_t *f0;
u8 ip_hdr_size = 0;
ob[0]->current_data = sizeof (ethernet_header_t);
/* transport mode */
- if (PREDICT_FALSE (!sa0->is_tunnel && !sa0->is_tunnel_ip6))
+ if (PREDICT_FALSE (!ipsec_sa_is_set_IS_TUNNEL (sa0) &&
+ !ipsec_sa_is_set_IS_TUNNEL_V6 (sa0)))
{
tunnel_mode = 0;
else
{
ip_hdr_size = sizeof (ip4_header_t);
- if (sa0->udp_encap)
+ if (ipsec_sa_is_set_UDP_ENCAP (sa0))
ih4 = (ip4_header_t *) ((u8 *) esp0 - ip_hdr_size -
sizeof (udp_header_t));
else
}
}
- esp_decrypt_cbc (sa0->crypto_alg,
- esp0->data + IV_SIZE,
+ esp_decrypt_cbc (vm, sa0, esp0->data + IV_SIZE,
(u8 *) vlib_buffer_get_current (ob[0]) +
ip_hdr_size, BLOCK_SIZE * blocks,
sa0->crypto_key.data, esp0->data);