#include <vnet/ipsec/esp.h>
#include <vnet/ipsec/ipsec_io.h>
#include <vnet/ipsec/ipsec_tun.h>
-#include <vnet/ipsec/ipsec.api_enum.h>
#include <vnet/gre/packet.h>
err = e;
else
err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
- b[bi]->error = node->errors[err];
- nexts[bi] = ESP_DECRYPT_NEXT_DROP;
+ esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi,
+ nexts, ESP_DECRYPT_NEXT_DROP,
+ vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
err = e;
else
err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
- b[bi]->error = node->errors[err];
- nexts[bi] = ESP_DECRYPT_NEXT_DROP;
+ esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi,
+ nexts, ESP_DECRYPT_NEXT_DROP,
+ vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
payload, pd->current_length,
&op->digest, &op->n_chunks, 0) < 0)
{
- b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
- next[0] = ESP_DECRYPT_NEXT_DROP;
+ esp_decrypt_set_next_index (
+ b, node, vm->thread_index, ESP_DECRYPT_ERROR_NO_BUFFERS, 0,
+ next, ESP_DECRYPT_NEXT_DROP, pd->sa_index);
return;
}
}
op->aad_len = esp_aad_fill (op->aad, esp0, sa0, pd->seq_hi);
op->tag = payload + len;
op->tag_len = 16;
+ if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ {
+ /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
+ payload -= iv_sz;
+ len += iv_sz;
+ }
}
else
{
esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data;
esp_decrypt_packet_data2_t *async_pd2 = esp_post_data2 (b);
u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0;
- u32 key_index;
+ const u32 key_index = sa0->crypto_key_index;
u32 crypto_len, integ_len = 0;
i16 crypto_start_offset, integ_start_offset = 0;
u8 flags = 0;
if (!ipsec_sa_is_set_IS_AEAD (sa0))
{
/* linked algs */
- key_index = sa0->linked_key_index;
integ_start_offset = payload - b->data;
integ_len = len;
if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
else
esp_insert_esn (vm, sa0, pd, pd2, &integ_len, &tag, &len, b, payload);
}
- else
- key_index = sa0->crypto_key_index;
out:
/* crypto */
aad = (u8 *) nonce - sizeof (esp_aead_t);
esp_aad_fill (aad, esp0, sa0, pd->seq_hi);
tag = payload + len;
+ if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ {
+ /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
+ payload -= iv_sz;
+ len += iv_sz;
+ }
}
else
{
}
static_always_inline void
-esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node,
+esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
const u16 *next_by_next_header,
const esp_decrypt_packet_data_t *pd,
const esp_decrypt_packet_data2_t *pd2,
const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
u8 pad_length = 0, next_header = 0;
u16 icv_sz;
+ u64 n_lost;
/*
* redo the anti-reply check
* check above we did so against the state of the window (W),
* after packet s-1. So each of the packets in the sequence will be
* accepted.
- * This time s will be cheked against Ws-1, s+1 chceked against Ws
- * (i.e. the window state is updated/advnaced)
- * so this time the successive s+! packet will be dropped.
+ * This time s will be cheked against Ws-1, s+1 checked against Ws
+ * (i.e. the window state is updated/advanced)
+ * so this time the successive s+1 packet will be dropped.
* This is a consequence of batching the decrypts. If the
- * check-dcrypt-advance process was done for each packet it would
+ * check-decrypt-advance process was done for each packet it would
* be fine. But we batch the decrypts because it's much more efficient
* to do so in SW and if we offload to HW and the process is async.
*
* You're probably thinking, but this means an attacker can send the
- * above sequence and cause VPP to perform decrpyts that will fail,
+ * above sequence and cause VPP to perform decrypts that will fail,
* and that's true. But if the attacker can determine s (a valid
* sequence number in the window) which is non-trivial, it can generate
* a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
* implementation, sequential or batching, from decrypting these.
*/
- if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
- NULL))
+ if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
{
- b->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
- next[0] = ESP_DECRYPT_NEXT_DROP;
- return;
+ if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
+ NULL, true))
+ {
+ esp_decrypt_set_next_index (b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_REPLAY, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
+ return;
+ }
+ n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq,
+ pd->seq_hi, true);
+ }
+ else
+ {
+ if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
+ NULL, false))
+ {
+ esp_decrypt_set_next_index (b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_REPLAY, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
+ return;
+ }
+ n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq,
+ pd->seq_hi, false);
}
- u64 n_lost =
- ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq, pd->seq_hi);
-
- vlib_prefetch_simple_counter (&ipsec_sa_lost_counters, vm->thread_index,
- pd->sa_index);
+ vlib_prefetch_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
+ vm->thread_index, pd->sa_index);
if (pd->is_chain)
{
u16 adv = pd->iv_sz + esp_sz;
u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz;
u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz;
- b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ b->flags &=
+ ~(VLIB_BUFFER_TOTAL_LENGTH_VALID | VNET_BUFFER_F_L4_CHECKSUM_COMPUTED |
+ VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
{
next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
break;
default:
- b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
- next[0] = ESP_DECRYPT_NEXT_DROP;
+ esp_decrypt_set_next_index (
+ b, node, vm->thread_index, ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0,
+ next, ESP_DECRYPT_NEXT_DROP, pd->sa_index);
break;
}
}
}
else
{
- next[0] = ESP_DECRYPT_NEXT_DROP;
- b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
+ esp_decrypt_set_next_index (b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
return;
}
!ip46_address_is_equal_v4 (&itp->itp_tun.dst,
&ip4->src_address))
{
- next[0] = ESP_DECRYPT_NEXT_DROP;
- b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
+ esp_decrypt_set_next_index (
+ b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
}
}
else if (next_header == IP_PROTOCOL_IPV6)
!ip46_address_is_equal_v6 (&itp->itp_tun.dst,
&ip6->src_address))
{
- next[0] = ESP_DECRYPT_NEXT_DROP;
- b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
+ esp_decrypt_set_next_index (
+ b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
}
}
}
}
if (PREDICT_FALSE (n_lost))
- vlib_increment_simple_counter (&ipsec_sa_lost_counters, vm->thread_index,
- pd->sa_index, n_lost);
+ vlib_increment_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
+ vm->thread_index, pd->sa_index, n_lost);
}
always_inline uword
u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
const u8 esp_sz = sizeof (esp_header_t);
ipsec_sa_t *sa0 = 0;
+ bool anti_replay_result;
vnet_crypto_op_t _op, *op = &_op;
vnet_crypto_op_t **crypto_ops;
vnet_crypto_op_t **integ_ops;
if (n_bufs == 0)
{
err = ESP_DECRYPT_ERROR_NO_BUFFERS;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_DROP);
+ esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, ESP_DECRYPT_NEXT_DROP,
+ vnet_buffer (b[0])->ipsec.sad_index);
goto next;
}
{
if (current_sa_pkts)
vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
- current_sa_index,
- current_sa_pkts,
+ current_sa_index, current_sa_pkts,
current_sa_bytes);
current_sa_bytes = current_sa_pkts = 0;
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
+ vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index);
sa0 = ipsec_sa_get (current_sa_index);
/* fetch the second cacheline ASAP */
is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
}
- if (PREDICT_FALSE (~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
{
vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
err = ESP_DECRYPT_ERROR_HANDOFF;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_HANDOFF);
+ esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, ESP_DECRYPT_NEXT_HANDOFF,
+ current_sa_index);
goto next;
}
pd->current_length = b[0]->current_length;
/* anti-reply check */
- if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, ~0, false,
- &pd->seq_hi))
+ if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ {
+ anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
+ sa0, pd->seq, ~0, false, &pd->seq_hi, true);
+ }
+ else
+ {
+ anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
+ sa0, pd->seq, ~0, false, &pd->seq_hi, false);
+ }
+
+ if (anti_replay_result)
{
err = ESP_DECRYPT_ERROR_REPLAY;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_DROP);
+ esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, ESP_DECRYPT_NEXT_DROP,
+ current_sa_index);
goto next;
}
if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
{
err = ESP_DECRYPT_ERROR_RUNT;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_DROP);
+ esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, ESP_DECRYPT_NEXT_DROP,
+ current_sa_index);
goto next;
}
{
async_frames[async_op] =
vnet_crypto_async_get_frame (vm, async_op);
+ if (PREDICT_FALSE (!async_frames[async_op]))
+ {
+ err = ESP_DECRYPT_ERROR_NO_AVAIL_FRAME;
+ esp_decrypt_set_next_index (
+ b[0], node, thread_index, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP, current_sa_index);
+ goto next;
+ }
+
/* Save the frame to the list we'll submit at the end */
vec_add1 (ptd->async_frames, async_frames[async_op]);
}
async_next_node);
if (ESP_DECRYPT_ERROR_RX_PKTS != err)
{
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- ESP_DECRYPT_NEXT_DROP);
+ esp_decrypt_set_next_index (
+ b[0], node, thread_index, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP, current_sa_index);
}
}
else
{
n_noop += esp_async_recycle_failed_submit (
vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR,
- n_noop, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP);
+ IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP, false);
vnet_crypto_async_reset_frame (*async_frame);
vnet_crypto_async_free_frame (vm, *async_frame);
}