#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/ipsec_tun.h>
+#include <vnet/ipsec/ipsec.api_enum.h>
#include <vnet/ipsec/esp.h>
#include <vnet/tunnel/tunnel_dp.h>
ESP_ENCRYPT_N_NEXT,
} esp_encrypt_next_t;
-#define foreach_esp_encrypt_error \
- _ (RX_PKTS, "ESP pkts received") \
- _ (POST_RX_PKTS, "ESP-post pkts received") \
- _ (HANDOFF, "Hand-off") \
- _ (SEQ_CYCLED, "sequence number cycled (packet dropped)") \
- _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
- _ (CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)") \
- _ (NO_BUFFERS, "no buffers (packet dropped)")
-
-typedef enum
-{
-#define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
- foreach_esp_encrypt_error
-#undef _
- ESP_ENCRYPT_N_ERROR,
-} esp_encrypt_error_t;
-
-static char *esp_encrypt_error_strings[] = {
-#define _(sym,string) string,
- foreach_esp_encrypt_error
-#undef _
-};
-
typedef struct
{
u32 sa_index;
u32 next_index;
} esp_encrypt_post_trace_t;
+typedef vl_counter_esp_encrypt_enum_t esp_encrypt_error_t;
+
/* packet trace format function */
static u8 *
format_esp_encrypt_trace (u8 * s, va_list * args)
last[0]->current_length + pad_bytes);
u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
- if (last[0]->current_length + tail_sz > buffer_data_size)
+ if (last[0]->current_data + last[0]->current_length + tail_sz >
+ buffer_data_size)
{
u32 tmp_bi = 0;
if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
#else
return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
(nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
- (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
+ ((nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0));
#endif
}
return len;
}
- p = (void *) (ip6 + 1);
+ p = ip6_next_header (ip6);
len += ip6_ext_header_len (p);
-
while (ext_hdr_is_pre_esp (p->next_hdr))
{
len += ip6_ext_header_len (p);
always_inline void
esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
vnet_crypto_op_t **crypto_ops,
- vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0,
+ vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi,
u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
esp_header_t *esp)
{
/* constuct aad in a scratch space in front of the nonce */
op->aad = (u8 *) nonce - sizeof (esp_aead_t);
- op->aad_len = esp_aad_fill (op->aad, esp, sa0);
+ op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
op->tag = payload + op->len;
op->tag_len = 16;
}
}
else if (ipsec_sa_is_set_USE_ESN (sa0))
{
- u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
- clib_memcpy_fast (op->digest, &seq_hi, sizeof (seq_hi));
+ u32 tmp = clib_net_to_host_u32 (seq_hi);
+ clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
op->len += sizeof (seq_hi);
}
}
{
/* constuct aad in a scratch space in front of the nonce */
aad = (u8 *) nonce - sizeof (esp_aead_t);
- esp_aad_fill (aad, esp, sa);
+ esp_aad_fill (aad, esp, sa, sa->seq_hi);
key_index = sa->crypto_key_index;
}
else
ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
- u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0;
- u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
+ u16 n_async = 0;
+ u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0;
u32 sync_bi[VLIB_FRAME_SIZE];
u32 noop_bi[VLIB_FRAME_SIZE];
esp_encrypt_error_t err;
u8 *p;
vlib_prefetch_buffer_header (b[2], LOAD);
p = vlib_buffer_get_current (b[1]);
- CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+ clib_prefetch_load (p);
p -= CLIB_CACHE_LINE_BYTES;
- CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+ clib_prefetch_load (p);
/* speculate that the trailer goes in the first buffer */
CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
CLIB_CACHE_LINE_BYTES, LOAD);
vnet_buffer (b[0])->ipsec.sad_index =
sa_index0 = ipsec_tun_protect_get_sa_out
(vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
+
+ if (PREDICT_FALSE (INDEX_INVALID == sa_index0))
+ {
+ err = ESP_ENCRYPT_ERROR_NO_PROTECTION;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ drop_next);
+ goto trace;
+ }
}
else
sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
sa0 = ipsec_sa_get (sa_index0);
+ if (PREDICT_FALSE ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
+ sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
+ !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0)))
+ {
+ err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ drop_next);
+ goto trace;
+ }
/* fetch the second cacheline ASAP */
- CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD);
+ clib_prefetch_load (sa0->cacheline1);
current_sa_index = sa_index0;
spi = clib_net_to_host_u32 (sa0->spi);
}
else /* transport mode */
{
- u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
+ u8 *l2_hdr, l2_len, *ip_hdr;
+ u16 ip_len;
ip6_ext_header_t *ext_hdr;
udp_header_t *udp = 0;
u16 udp_len = 0;
u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
+ /*
+ * Get extension header chain length. It might be longer than the
+ * buffer's pre_data area.
+ */
ip_len =
(VNET_LINK_IP6 == lt ?
esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
+ if ((old_ip_hdr - ip_len) < &b[0]->pre_data[0])
+ {
+ err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ drop_next);
+ goto trace;
+ }
vlib_buffer_advance (b[0], ip_len);
payload = vlib_buffer_get_current (b[0]);
else
l2_len = 0;
+ u16 len;
+ len = payload_len_total + hdr_len - l2_len;
+
if (VNET_LINK_IP6 == lt)
{
ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
if (PREDICT_TRUE (NULL == ext_hdr))
{
*next_hdr_ptr = ip6->protocol;
- ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
+ ip6->protocol =
+ (udp) ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
}
else
{
*next_hdr_ptr = ext_hdr->next_hdr;
- ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP;
+ ext_hdr->next_hdr =
+ (udp) ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
}
ip6->payload_length =
- clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
- sizeof (ip6_header_t));
+ clib_host_to_net_u16 (len - sizeof (ip6_header_t));
}
else if (VNET_LINK_IP4 == lt)
{
- u16 len;
ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
*next_hdr_ptr = ip4->protocol;
- len = payload_len_total + hdr_len - l2_len;
- if (udp)
- {
- esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
- udp_len = len - ip_len;
- }
- else
- esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
+ esp_update_ip4_hdr (ip4, len, /* is_transport */ 1,
+ (udp != NULL));
}
clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
if (udp)
{
+ udp_len = len - ip_len;
esp_fill_udp_hdr (sa0, udp, udp_len);
}
async_next_node, lb);
}
else
- esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload,
- payload_len, iv_sz, icv_sz, n_sync, b, lb,
- hdr_len, esp);
+ esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, sa0->seq_hi,
+ payload, payload_len, iv_sz, icv_sz, n_sync, b,
+ lb, hdr_len, esp);
vlib_buffer_advance (b[0], 0LL - hdr_len);
{
esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
sizeof (*tr));
- tr->sa_index = sa_index0;
- tr->spi = sa0->spi;
- tr->seq = sa0->seq;
- tr->sa_seq_hi = sa0->seq_hi;
- tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
- tr->crypto_alg = sa0->crypto_alg;
- tr->integ_alg = sa0->integ_alg;
+ if (INDEX_INVALID == sa_index0)
+ clib_memset_u8 (tr, 0xff, sizeof (*tr));
+ else
+ {
+ tr->sa_index = sa_index0;
+ tr->spi = sa0->spi;
+ tr->seq = sa0->seq;
+ tr->sa_seq_hi = sa0->seq_hi;
+ tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
+ tr->crypto_alg = sa0->crypto_alg;
+ tr->integ_alg = sa0->integ_alg;
+ }
}
/* next */
{
noop_bi[n_noop] = from[b - bufs];
n_noop++;
- noop_next++;
}
else if (!is_async)
{
else
{
n_async++;
- async_next++;
}
n_left -= 1;
b += 1;
}
- vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
- current_sa_index, current_sa_packets,
- current_sa_bytes);
+ if (INDEX_INVALID != current_sa_index)
+ vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index, current_sa_packets,
+ current_sa_bytes);
if (n_sync)
{
esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
{
n_noop += esp_async_recycle_failed_submit (
vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
- n_sync, noop_bi, noop_nexts, drop_next);
+ n_noop, noop_bi, noop_nexts, drop_next);
vnet_crypto_async_reset_frame (*async_frame);
vnet_crypto_async_free_frame (vm, *async_frame);
}
.format_trace = format_esp_encrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
.n_next_nodes = ESP_ENCRYPT_N_NEXT,
.next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
.type = VLIB_NODE_TYPE_INTERNAL,
.sibling_of = "esp4-encrypt",
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
};
/* *INDENT-ON* */
.type = VLIB_NODE_TYPE_INTERNAL,
.sibling_of = "esp4-encrypt",
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
};
/* *INDENT-ON* */
.type = VLIB_NODE_TYPE_INTERNAL,
.sibling_of = "esp4-encrypt",
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
};
/* *INDENT-ON* */
.format_trace = format_esp_encrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
.n_next_nodes = ESP_ENCRYPT_N_NEXT,
.next_nodes = {
.type = VLIB_NODE_TYPE_INTERNAL,
.sibling_of = "esp4-encrypt-tun",
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
};
/* *INDENT-ON* */
.format_trace = format_esp_encrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
.n_next_nodes = ESP_ENCRYPT_N_NEXT,
.next_nodes = {
.type = VLIB_NODE_TYPE_INTERNAL,
.sibling_of = "esp-mpls-encrypt-tun",
- .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
};
/* *INDENT-ON* */
.format_trace = format_esp_encrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
.n_next_nodes = ESP_ENCRYPT_N_NEXT,
.next_nodes = {
.type = VLIB_NODE_TYPE_INTERNAL,
.sibling_of = "esp-mpls-encrypt-tun",
- .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
-};
-
-typedef struct
-{
- u32 sa_index;
-} esp_no_crypto_trace_t;
-
-static u8 *
-format_esp_no_crypto_trace (u8 * s, va_list * args)
-{
- CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
- CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
-
- s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
-
- return s;
-}
-
-enum
-{
- ESP_NO_CRYPTO_NEXT_DROP,
- ESP_NO_CRYPTO_N_NEXT,
-};
-
-enum
-{
- ESP_NO_CRYPTO_ERROR_RX_PKTS,
-};
-
-static char *esp_no_crypto_error_strings[] = {
- "Outbound ESP packets received",
-};
-
-always_inline uword
-esp_no_crypto_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
-{
- vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
- u32 *from = vlib_frame_vector_args (frame);
- u32 n_left = frame->n_vectors;
-
- vlib_get_buffers (vm, from, b, n_left);
-
- while (n_left > 0)
- {
- u32 sa_index0;
-
- /* packets are always going to be dropped, but get the sa_index */
- sa_index0 = ipsec_tun_protect_get_sa_out
- (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
-
- if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
- {
- esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0],
- sizeof (*tr));
- tr->sa_index = sa_index0;
- }
-
- n_left -= 1;
- b += 1;
- }
-
- vlib_node_increment_counter (vm, node->node_index,
- ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors);
-
- vlib_buffer_enqueue_to_single_next (vm, node, from,
- ESP_NO_CRYPTO_NEXT_DROP,
- frame->n_vectors);
-
- return frame->n_vectors;
-}
-
-VLIB_NODE_FN (esp4_no_crypto_tun_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
-{
- return esp_no_crypto_inline (vm, node, from_frame);
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp4_no_crypto_tun_node) =
-{
- .name = "esp4-no-crypto",
- .vector_size = sizeof (u32),
- .format_trace = format_esp_no_crypto_trace,
- .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
- .error_strings = esp_no_crypto_error_strings,
- .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
- .next_nodes = {
- [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
- },
-};
-
-VLIB_NODE_FN (esp6_no_crypto_tun_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
-{
- return esp_no_crypto_inline (vm, node, from_frame);
-}
-
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) =
-{
- .name = "esp6-no-crypto",
- .vector_size = sizeof (u32),
- .format_trace = format_esp_no_crypto_trace,
- .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
- .error_strings = esp_no_crypto_error_strings,
- .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
- .next_nodes = {
- [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
- },
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
};
-/* *INDENT-ON* */
#ifndef CLIB_MARCH_VARIANT