}
always_inline void
-esp_decrypt_aes_cbc (ipsec_crypto_alg_t alg,
- u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
+esp_decrypt_cbc (ipsec_crypto_alg_t alg,
+ u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
{
- esp_main_t *em = &esp_main;
+ ipsec_proto_main_t *em = &ipsec_proto_main;
u32 thread_index = vlib_get_thread_index ();
#if OPENSSL_VERSION_NUMBER >= 0x10100000L
EVP_CIPHER_CTX *ctx = em->per_thread_data[thread_index].decrypt_ctx;
ASSERT (alg < IPSEC_CRYPTO_N_ALG);
- if (PREDICT_FALSE (em->esp_crypto_algs[alg].type == 0))
+ if (PREDICT_FALSE (em->ipsec_proto_main_crypto_algs[alg].type == 0))
return;
if (PREDICT_FALSE
(alg != em->per_thread_data[thread_index].last_decrypt_alg))
{
- cipher = em->esp_crypto_algs[alg].type;
+ cipher = em->ipsec_proto_main_crypto_algs[alg].type;
em->per_thread_data[thread_index].last_decrypt_alg = alg;
}
EVP_DecryptFinal_ex (ctx, out + out_len, &out_len);
}
-static uword
-esp_decrypt_node_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+always_inline uword
+esp_decrypt_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame,
+ int is_ip6)
{
u32 n_left_from, *from, next_index, *to_next;
ipsec_main_t *im = &ipsec_main;
- esp_main_t *em = &esp_main;
+ ipsec_proto_main_t *em = &ipsec_proto_main;
u32 *recycle = 0;
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from))
{
- vlib_node_increment_counter (vm, esp_decrypt_node.index,
- ESP_DECRYPT_ERROR_NO_BUFFER, n_left_from);
+ if (is_ip6)
+ vlib_node_increment_counter (vm, esp6_decrypt_node.index,
+ ESP_DECRYPT_ERROR_NO_BUFFER,
+ n_left_from);
+ else
+ vlib_node_increment_counter (vm, esp4_decrypt_node.index,
+ ESP_DECRYPT_ERROR_NO_BUFFER,
+ n_left_from);
goto free_buffers_and_exit;
}
ip4_header_t *ih4 = 0, *oh4 = 0;
ip6_header_t *ih6 = 0, *oh6 = 0;
u8 tunnel_mode = 1;
- u8 transport_ip6 = 0;
-
i_bi0 = from[0];
from += 1;
if (PREDICT_FALSE (rv))
{
clib_warning ("anti-replay SPI %u seq %u", sa0->spi, seq);
- vlib_node_increment_counter (vm, esp_decrypt_node.index,
- ESP_DECRYPT_ERROR_REPLAY, 1);
+ if (is_ip6)
+ vlib_node_increment_counter (vm,
+ esp6_decrypt_node.index,
+ ESP_DECRYPT_ERROR_REPLAY, 1);
+ else
+ vlib_node_increment_counter (vm,
+ esp4_decrypt_node.index,
+ ESP_DECRYPT_ERROR_REPLAY, 1);
o_bi0 = i_bi0;
to_next[0] = o_bi0;
to_next += 1;
if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
{
u8 sig[64];
- int icv_size = em->esp_integ_algs[sa0->integ_alg].trunc_size;
- memset (sig, 0, sizeof (sig));
+ int icv_size =
+ em->ipsec_proto_main_integ_algs[sa0->integ_alg].trunc_size;
+ clib_memset (sig, 0, sizeof (sig));
u8 *icv =
vlib_buffer_get_current (i_b0) + i_b0->current_length -
icv_size;
if (PREDICT_FALSE (memcmp (icv, sig, icv_size)))
{
- vlib_node_increment_counter (vm, esp_decrypt_node.index,
- ESP_DECRYPT_ERROR_INTEG_ERROR,
- 1);
+ if (is_ip6)
+ vlib_node_increment_counter (vm,
+ esp6_decrypt_node.index,
+ ESP_DECRYPT_ERROR_INTEG_ERROR,
+ 1);
+ else
+ vlib_node_increment_counter (vm,
+ esp4_decrypt_node.index,
+ ESP_DECRYPT_ERROR_INTEG_ERROR,
+ 1);
o_bi0 = i_bi0;
to_next[0] = o_bi0;
to_next += 1;
/* add old buffer to the recycle list */
vec_add1 (recycle, i_bi0);
- if (sa0->crypto_alg >= IPSEC_CRYPTO_ALG_AES_CBC_128 &&
- sa0->crypto_alg <= IPSEC_CRYPTO_ALG_AES_CBC_256)
+ if ((sa0->crypto_alg >= IPSEC_CRYPTO_ALG_AES_CBC_128 &&
+ sa0->crypto_alg <= IPSEC_CRYPTO_ALG_AES_CBC_256) ||
+ (sa0->crypto_alg >= IPSEC_CRYPTO_ALG_DES_CBC &&
+ sa0->crypto_alg <= IPSEC_CRYPTO_ALG_3DES_CBC))
{
- const int BLOCK_SIZE = 16;
- const int IV_SIZE = 16;
+ const int BLOCK_SIZE =
+ em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].block_size;;
+ const int IV_SIZE =
+ em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].iv_size;
esp_footer_t *f0;
u8 ip_hdr_size = 0;
if (PREDICT_FALSE (!sa0->is_tunnel && !sa0->is_tunnel_ip6))
{
tunnel_mode = 0;
- ih4 =
- (ip4_header_t *) (i_b0->data +
- sizeof (ethernet_header_t));
- if (PREDICT_TRUE
- ((ih4->ip_version_and_header_length & 0xF0) != 0x40))
+
+ if (is_ip6)
{
- if (PREDICT_TRUE
- ((ih4->ip_version_and_header_length & 0xF0) ==
- 0x60))
- {
- transport_ip6 = 1;
- ip_hdr_size = sizeof (ip6_header_t);
- ih6 =
- (ip6_header_t *) (i_b0->data +
- sizeof (ethernet_header_t));
- oh6 = vlib_buffer_get_current (o_b0);
- }
- else
- {
- vlib_node_increment_counter (vm,
- esp_decrypt_node.index,
- ESP_DECRYPT_ERROR_NOT_IP,
- 1);
- o_b0 = 0;
- goto trace;
- }
+ ih6 =
+ (ip6_header_t *) ((u8 *) esp0 -
+ sizeof (ip6_header_t));
+ ip_hdr_size = sizeof (ip6_header_t);
+ oh6 = vlib_buffer_get_current (o_b0);
}
else
{
+ ih4 =
+ (ip4_header_t *) ((u8 *) esp0 -
+ sizeof (ip4_header_t));
oh4 = vlib_buffer_get_current (o_b0);
ip_hdr_size = sizeof (ip4_header_t);
}
}
- esp_decrypt_aes_cbc (sa0->crypto_alg,
- esp0->data + IV_SIZE,
- (u8 *) vlib_buffer_get_current (o_b0) +
- ip_hdr_size, BLOCK_SIZE * blocks,
- sa0->crypto_key, esp0->data);
+ esp_decrypt_cbc (sa0->crypto_alg,
+ esp0->data + IV_SIZE,
+ (u8 *) vlib_buffer_get_current (o_b0) +
+ ip_hdr_size, BLOCK_SIZE * blocks,
+ sa0->crypto_key, esp0->data);
- o_b0->current_length = (blocks * 16) - 2 + ip_hdr_size;
+ o_b0->current_length = (blocks * BLOCK_SIZE) - 2 + ip_hdr_size;
o_b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
f0 =
(esp_footer_t *) ((u8 *) vlib_buffer_get_current (o_b0) +
else
{
clib_warning ("next header: 0x%x", f0->next_header);
- vlib_node_increment_counter (vm, esp_decrypt_node.index,
- ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
- 1);
+ if (is_ip6)
+ vlib_node_increment_counter (vm,
+ esp6_decrypt_node.index,
+ ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
+ 1);
+ else
+ vlib_node_increment_counter (vm,
+ esp4_decrypt_node.index,
+ ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
+ 1);
o_b0 = 0;
goto trace;
}
/* transport mode */
else
{
- if (PREDICT_FALSE (transport_ip6))
+ if (is_ip6)
{
next0 = ESP_DECRYPT_NEXT_IP6_INPUT;
oh6->ip_version_traffic_class_and_flow_label =
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_node_increment_counter (vm, esp_decrypt_node.index,
- ESP_DECRYPT_ERROR_RX_PKTS,
- from_frame->n_vectors);
+ if (is_ip6)
+ vlib_node_increment_counter (vm, esp6_decrypt_node.index,
+ ESP_DECRYPT_ERROR_RX_PKTS,
+ from_frame->n_vectors);
+ else
+ vlib_node_increment_counter (vm, esp4_decrypt_node.index,
+ ESP_DECRYPT_ERROR_RX_PKTS,
+ from_frame->n_vectors);
+
free_buffers_and_exit:
if (recycle)
return from_frame->n_vectors;
}
+VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (esp4_decrypt_node) = {
+ .name = "esp4-decrypt",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_decrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
+ .error_strings = esp_decrypt_error_strings,
+
+ .n_next_nodes = ESP_DECRYPT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
+ foreach_esp_decrypt_next
+#undef _
+ },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
+}
/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp_decrypt_node) = {
- .function = esp_decrypt_node_fn,
- .name = "esp-decrypt",
+VLIB_REGISTER_NODE (esp6_decrypt_node) = {
+ .name = "esp6-decrypt",
.vector_size = sizeof (u32),
.format_trace = format_esp_decrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (esp_decrypt_node, esp_decrypt_node_fn)
/*
* fd.io coding-style-patch-verification: ON
*