#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/esp.h>
#include <vnet/ipsec/ipsec_io.h>
+#include <vnet/ipsec/ipsec_tun.h>
#define foreach_esp_decrypt_next \
_(DROP, "error-drop") \
_(IP4_INPUT, "ip4-input-no-checksum") \
-_(IP6_INPUT, "ip6-input") \
-_(IPSEC_GRE_INPUT, "ipsec-gre-input")
+_(IP6_INPUT, "ip6-input")
#define _(v, s) ESP_DECRYPT_NEXT_##v,
typedef enum
_(INTEG_ERROR, "Integrity check failed") \
_(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
_(REPLAY, "SA replayed packet") \
+ _(RUNT, "undersized packet") \
_(CHAINED_BUFFER, "chained buffers (packet dropped)") \
_(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
_(NO_TAIL_SPACE, "no enough buffer tail space (dropped)")
{
u8 icv_sz;
u8 iv_sz;
- ipsec_sa_flags_t flags:8;
+ ipsec_sa_flags_t flags;
u32 sa_index;
};
u64 sa_data;
always_inline uword
esp_decrypt_inline (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * from_frame,
- int is_ip6)
+ int is_ip6, int is_tun)
{
ipsec_main_t *im = &ipsec_main;
u32 thread_index = vm->thread_index;
if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
{
+ if (current_sa_pkts)
+ vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index,
+ current_sa_pkts,
+ current_sa_bytes);
+ current_sa_bytes = current_sa_pkts = 0;
+
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
sa0 = pool_elt_at_index (im->sad, current_sa_index);
cpd.icv_sz = sa0->integ_icv_size;
cpd.iv_sz = sa0->crypto_iv_size;
cpd.flags = sa0->flags;
cpd.sa_index = current_sa_index;
-
- vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
- current_sa_index, current_sa_pkts,
- current_sa_bytes);
-
- current_sa_bytes = current_sa_pkts = 0;
}
/* store packet data for next round for easier prefetch */
goto next;
}
+ if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
+ {
+ b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT];
+ next[0] = ESP_DECRYPT_NEXT_DROP;
+ goto next;
+ }
+
len = pd->current_length - cpd.icv_sz;
current_sa_pkts += 1;
current_sa_bytes += pd->current_length;
- if (PREDICT_TRUE (cpd.icv_sz > 0))
+ if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
{
vnet_crypto_op_t *op;
vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
vnet_crypto_op_init (op, sa0->integ_op_id);
- op->key = sa0->integ_key.data;
- op->key_len = sa0->integ_key.len;
+ op->key_index = sa0->integ_key_index;
op->src = payload;
op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
op->user_data = b - bufs;
vnet_crypto_op_t *op;
vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES);
vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
- op->key = sa0->crypto_key.data;
+ op->key_index = sa0->crypto_key_index;
op->iv = payload;
+
+ if (ipsec_sa_is_set_IS_AEAD (sa0))
+ {
+ esp_header_t *esp0;
+ esp_aead_t *aad;
+ u8 *scratch;
+
+ /*
+ * construct the AAD and the nonce (Salt || IV) in a scratch
+ * space in front of the IP header.
+ */
+ scratch = payload - esp_sz;
+ esp0 = (esp_header_t *) (scratch);
+
+ scratch -= (sizeof (*aad) + pd->hdr_sz);
+ op->aad = scratch;
+
+ esp_aad_fill (op, esp0, sa0);
+
+ /*
+ * we don't need to refer to the ESP header anymore so we
+ * can overwrite it with the salt and use the IV where it is
+ * to form the nonce = (Salt + IV)
+ */
+ op->iv -= sizeof (sa0->salt);
+ clib_memcpy_fast (op->iv, &sa0->salt, sizeof (sa0->salt));
+
+ op->tag = payload + len;
+ op->tag_len = 16;
+ }
op->src = op->dst = payload += cpd.iv_sz;
- op->len = len;
+ op->len = len - cpd.iv_sz;
op->user_data = b - bufs;
}
sa0 = vec_elt_at_index (im->sad, pd->sa_index);
u8 *payload = b[0]->data + pd->current_data;
- ipsec_sa_anti_replay_advance (sa0, &((esp_header_t *) payload)->seq);
+ ipsec_sa_anti_replay_advance (sa0, ((esp_header_t *) payload)->seq);
esp_footer_t *f = (esp_footer_t *) (b[0]->data + pd->current_data +
pd->current_length - sizeof (*f) -
u16 adv = pd->iv_sz + esp_sz;
u16 tail = sizeof (esp_footer_t) + f->pad_length + pd->icv_sz;
- if ((pd->flags & tun_flags) == 0) /* transport mode */
+ if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
{
u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
sizeof (udp_header_t) : 0;
{
next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
b[0]->current_data = pd->current_data + adv;
- b[0]->current_length = pd->current_length + adv - tail;
+ b[0]->current_length = pd->current_length - adv - tail;
}
else if (f->next_header == IP_PROTOCOL_IPV6)
{
next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
b[0]->current_data = pd->current_data + adv;
- b[0]->current_length = pd->current_length + adv - tail;
+ b[0]->current_length = pd->current_length - adv - tail;
}
else
{
next[0] = ESP_DECRYPT_NEXT_DROP;
b[0]->error = node->errors[ESP_DECRYPT_ERROR_DECRYPTION_FAILED];
+ goto trace;
+ }
+ if (is_tun)
+ {
+ if (ipsec_sa_is_set_IS_PROTECT (sa0))
+ {
+ /*
+ * Check that the reveal IP header matches that
+ * of the tunnel we are protecting
+ */
+ const ipsec_tun_protect_t *itp;
+
+ itp =
+ ipsec_tun_protect_get (vnet_buffer (b[0])->
+ ipsec.protect_index);
+ if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP))
+ {
+ const ip4_header_t *ip4;
+
+ ip4 = vlib_buffer_get_current (b[0]);
+
+ if (!ip46_address_is_equal_v4 (&itp->itp_tun.src,
+ &ip4->dst_address) ||
+ !ip46_address_is_equal_v4 (&itp->itp_tun.dst,
+ &ip4->src_address))
+ next[0] = ESP_DECRYPT_NEXT_DROP;
+
+ }
+ else if (f->next_header == IP_PROTOCOL_IPV6)
+ {
+ const ip6_header_t *ip6;
+
+ ip6 = vlib_buffer_get_current (b[0]);
+
+ if (!ip46_address_is_equal_v6 (&itp->itp_tun.src,
+ &ip6->dst_address) ||
+ !ip46_address_is_equal_v6 (&itp->itp_tun.dst,
+ &ip6->src_address))
+ next[0] = ESP_DECRYPT_NEXT_DROP;
+ }
+ }
}
}
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_GRE (sa0)))
- next[0] = ESP_DECRYPT_NEXT_IPSEC_GRE_INPUT;
-
trace:
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
- return esp_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
+ return esp_decrypt_inline (vm, node, from_frame, 0, 0);
+}
+
+VLIB_NODE_FN (esp4_decrypt_tun_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_decrypt_inline (vm, node, from_frame, 0, 1);
+}
+
+VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_decrypt_inline (vm, node, from_frame, 1, 0);
+}
+
+VLIB_NODE_FN (esp6_decrypt_tun_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_decrypt_inline (vm, node, from_frame, 1, 1);
}
/* *INDENT-OFF* */
#undef _
},
};
-/* *INDENT-ON* */
-VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
-{
- return esp_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
-}
-
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (esp6_decrypt_node) = {
.name = "esp6-decrypt",
.vector_size = sizeof (u32),
#undef _
},
};
+
+VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = {
+ .name = "esp4-decrypt-tun",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_decrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
+ .error_strings = esp_decrypt_error_strings,
+
+ .n_next_nodes = ESP_DECRYPT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
+ foreach_esp_decrypt_next
+#undef _
+ },
+};
+
+VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
+ .name = "esp6-decrypt-tun",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_decrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
+ .error_strings = esp_decrypt_error_strings,
+
+ .n_next_nodes = ESP_DECRYPT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
+ foreach_esp_decrypt_next
+#undef _
+ },
+};
/* *INDENT-ON* */
/*