#include <vnet/vnet.h>
#include <vnet/api_errno.h>
#include <vnet/ip/ip.h>
-#include <vnet/udp/udp.h>
+
+#include <vnet/crypto/crypto.h>
#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ipsec_tun.h>
+#include <vnet/ipsec/ipsec.api_enum.h>
#include <vnet/ipsec/esp.h>
+#include <vnet/tunnel/tunnel_dp.h>
-#ifndef CLIB_MARCH_VARIANT
-ipsec_proto_main_t ipsec_proto_main;
-#endif /* CLIB_MARCH_VARIANT */
-
-#define foreach_esp_encrypt_next \
-_(DROP, "error-drop") \
-_(IP4_LOOKUP, "ip4-lookup") \
-_(IP6_LOOKUP, "ip6-lookup") \
-_(INTERFACE_OUTPUT, "interface-output")
+#define foreach_esp_encrypt_next \
+ _ (DROP4, "ip4-drop") \
+ _ (DROP6, "ip6-drop") \
+ _ (DROP_MPLS, "mpls-drop") \
+ _ (HANDOFF4, "handoff4") \
+ _ (HANDOFF6, "handoff6") \
+ _ (HANDOFF_MPLS, "handoff-mpls") \
+ _ (INTERFACE_OUTPUT, "interface-output")
#define _(v, s) ESP_ENCRYPT_NEXT_##v,
typedef enum
ESP_ENCRYPT_N_NEXT,
} esp_encrypt_next_t;
-#define foreach_esp_encrypt_error \
- _(RX_PKTS, "ESP pkts received") \
- _(NO_BUFFER, "No buffer (packet dropped)") \
- _(DECRYPTION_FAILED, "ESP encryption failed") \
- _(SEQ_CYCLED, "sequence number cycled")
-
-
-typedef enum
-{
-#define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
- foreach_esp_encrypt_error
-#undef _
- ESP_ENCRYPT_N_ERROR,
-} esp_encrypt_error_t;
-
-static char *esp_encrypt_error_strings[] = {
-#define _(sym,string) string,
- foreach_esp_encrypt_error
-#undef _
-};
-
typedef struct
{
u32 sa_index;
u32 spi;
u32 seq;
+ u32 sa_seq_hi;
u8 udp_encap;
ipsec_crypto_alg_t crypto_alg;
ipsec_integ_alg_t integ_alg;
} esp_encrypt_trace_t;
+typedef struct
+{
+ u32 next_index;
+} esp_encrypt_post_trace_t;
+
+typedef vl_counter_esp_encrypt_enum_t esp_encrypt_error_t;
+
/* packet trace format function */
static u8 *
format_esp_encrypt_trace (u8 * s, va_list * args)
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
- s = format (s, "esp: sa-index %d spi %u seq %u crypto %U integrity %U%s",
- t->sa_index, t->spi, t->seq,
- format_ipsec_crypto_alg, t->crypto_alg,
- format_ipsec_integ_alg, t->integ_alg,
- t->udp_encap ? " udp-encap-enabled" : "");
+ s =
+ format (s,
+ "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
+ t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
+ format_ipsec_crypto_alg,
+ t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
+ t->udp_encap ? " udp-encap-enabled" : "");
return s;
}
-always_inline void
-esp_encrypt_cbc (vlib_main_t * vm, ipsec_crypto_alg_t alg,
- u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
+static u8 *
+format_esp_post_encrypt_trace (u8 * s, va_list * args)
{
- ipsec_proto_main_t *em = &ipsec_proto_main;
- u32 thread_index = vm->thread_index;
-#if OPENSSL_VERSION_NUMBER >= 0x10100000L
- EVP_CIPHER_CTX *ctx = em->per_thread_data[thread_index].encrypt_ctx;
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ esp_encrypt_post_trace_t *t = va_arg (*args, esp_encrypt_post_trace_t *);
+
+ s = format (s, "esp-post: next node index %u", t->next_index);
+ return s;
+}
+
+/* pad packet in input buffer */
+static_always_inline u8 *
+esp_add_footer_and_icv (vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align,
+ u8 icv_sz, vlib_node_runtime_t *node,
+ u16 buffer_data_size, uword total_len)
+{
+ static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
+ };
+
+ u16 min_length = total_len + sizeof (esp_footer_t);
+ u16 new_length = round_pow2 (min_length, esp_align);
+ u8 pad_bytes = new_length - min_length;
+ esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (last[0]) +
+ last[0]->current_length + pad_bytes);
+ u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
+
+ if (last[0]->current_data + last[0]->current_length + tail_sz >
+ buffer_data_size)
+ {
+ u32 tmp_bi = 0;
+ if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
+ return 0;
+
+ vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
+ last[0]->next_buffer = tmp_bi;
+ last[0]->flags |= VLIB_BUFFER_NEXT_PRESENT;
+ f = (esp_footer_t *) (vlib_buffer_get_current (tmp) + pad_bytes);
+ tmp->current_length += tail_sz;
+ last[0] = tmp;
+ }
+ else
+ last[0]->current_length += tail_sz;
+
+ f->pad_length = pad_bytes;
+ if (pad_bytes)
+ {
+ ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
+ pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
+ clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
+ }
+
+ return &f->next_header;
+}
+
+static_always_inline void
+esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
+{
+ ip_csum_t sum;
+ u16 old_len;
+
+ len = clib_net_to_host_u16 (len);
+ old_len = ip4->length;
+
+ if (is_transport)
+ {
+ u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
+
+ sum = ip_csum_update (ip4->checksum, ip4->protocol,
+ prot, ip4_header_t, protocol);
+ ip4->protocol = prot;
+
+ sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
+ }
+ else
+ sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
+
+ ip4->length = len;
+ ip4->checksum = ip_csum_fold (sum);
+}
+
+static_always_inline void
+esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
+{
+ clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
+ udp->length = clib_net_to_host_u16 (len);
+}
+
+static_always_inline u8
+ext_hdr_is_pre_esp (u8 nexthdr)
+{
+#ifdef CLIB_HAVE_VEC128
+ static const u8x16 ext_hdr_types = {
+ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
+ IP_PROTOCOL_IPV6_ROUTE,
+ IP_PROTOCOL_IPV6_FRAGMENTATION,
+ };
+
+ return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
#else
- EVP_CIPHER_CTX *ctx = &(em->per_thread_data[thread_index].encrypt_ctx);
+ return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
+ (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
+ ((nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0));
#endif
- const EVP_CIPHER *cipher = NULL;
- int out_len;
+}
+
+static_always_inline u8
+esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
+{
+ /* this code assumes that HbH, route and frag headers will be before
+ others, if that is not the case, they will end up encrypted */
+ u8 len = sizeof (ip6_header_t);
+ ip6_ext_header_t *p;
- ASSERT (alg < IPSEC_CRYPTO_N_ALG);
+ /* if next packet doesn't have ext header */
+ if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
+ {
+ *ext_hdr = NULL;
+ return len;
+ }
- if (PREDICT_FALSE
- (em->ipsec_proto_main_crypto_algs[alg].type == IPSEC_CRYPTO_ALG_NONE))
+ p = ip6_next_header (ip6);
+ len += ip6_ext_header_len (p);
+ while (ext_hdr_is_pre_esp (p->next_hdr))
+ {
+ len += ip6_ext_header_len (p);
+ p = ip6_ext_next_header (p);
+ }
+
+ *ext_hdr = p;
+ return len;
+}
+
+static_always_inline void
+esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vnet_crypto_op_t * ops, vlib_buffer_t * b[],
+ u16 * nexts, vnet_crypto_op_chunk_t * chunks,
+ u16 drop_next)
+{
+ u32 n_fail, n_ops = vec_len (ops);
+ vnet_crypto_op_t *op = ops;
+
+ if (n_ops == 0)
return;
- if (PREDICT_FALSE
- (alg != em->per_thread_data[thread_index].last_encrypt_alg))
+ n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
+
+ while (n_fail)
{
- cipher = em->ipsec_proto_main_crypto_algs[alg].type;
- em->per_thread_data[thread_index].last_encrypt_alg = alg;
+ ASSERT (op - ops < n_ops);
+
+ if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+ {
+ u32 bi = op->user_data;
+ b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
+ nexts[bi] = drop_next;
+ n_fail--;
+ }
+ op++;
}
+}
+
+static_always_inline void
+esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
+ u16 drop_next)
+{
+ u32 n_fail, n_ops = vec_len (ops);
+ vnet_crypto_op_t *op = ops;
+
+ if (n_ops == 0)
+ return;
- EVP_EncryptInit_ex (ctx, cipher, NULL, key, iv);
+ n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
- EVP_EncryptUpdate (ctx, out, &out_len, in, in_len);
- EVP_EncryptFinal_ex (ctx, out + out_len, &out_len);
+ while (n_fail)
+ {
+ ASSERT (op - ops < n_ops);
+
+ if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+ {
+ u32 bi = op->user_data;
+ b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
+ nexts[bi] = drop_next;
+ n_fail--;
+ }
+ op++;
+ }
}
-always_inline uword
-esp_encrypt_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * from_frame,
- int is_ip6)
+static_always_inline u32
+esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
+ ipsec_sa_t * sa0, vlib_buffer_t * b,
+ vlib_buffer_t * lb, u8 icv_sz, u8 * start,
+ u32 start_len, u16 * n_ch)
{
- u32 n_left_from, *from, *to_next = 0, next_index;
- from = vlib_frame_vector_args (from_frame);
- n_left_from = from_frame->n_vectors;
- ipsec_main_t *im = &ipsec_main;
- ipsec_proto_main_t *em = &ipsec_proto_main;
- u32 *recycle = 0;
- u32 thread_index = vm->thread_index;
+ vnet_crypto_op_chunk_t *ch;
+ vlib_buffer_t *cb = b;
+ u32 n_chunks = 1;
+ u32 total_len;
+ vec_add2 (ptd->chunks, ch, 1);
+ total_len = ch->len = start_len;
+ ch->src = ch->dst = start;
+ cb = vlib_get_buffer (vm, cb->next_buffer);
+
+ while (1)
+ {
+ vec_add2 (ptd->chunks, ch, 1);
+ n_chunks += 1;
+ if (lb == cb)
+ total_len += ch->len = cb->current_length - icv_sz;
+ else
+ total_len += ch->len = cb->current_length;
+ ch->src = ch->dst = vlib_buffer_get_current (cb);
+
+ if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
+ break;
+
+ cb = vlib_get_buffer (vm, cb->next_buffer);
+ }
+
+ if (n_ch)
+ *n_ch = n_chunks;
+
+ return total_len;
+}
+
+static_always_inline u32
+esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
+ ipsec_sa_t * sa0, vlib_buffer_t * b,
+ vlib_buffer_t * lb, u8 icv_sz, u8 * start,
+ u32 start_len, u8 * digest, u16 * n_ch)
+{
+ vnet_crypto_op_chunk_t *ch;
+ vlib_buffer_t *cb = b;
+ u32 n_chunks = 1;
+ u32 total_len;
+ vec_add2 (ptd->chunks, ch, 1);
+ total_len = ch->len = start_len;
+ ch->src = start;
+ cb = vlib_get_buffer (vm, cb->next_buffer);
+
+ while (1)
+ {
+ vec_add2 (ptd->chunks, ch, 1);
+ n_chunks += 1;
+ if (lb == cb)
+ {
+ total_len += ch->len = cb->current_length - icv_sz;
+ if (ipsec_sa_is_set_USE_ESN (sa0))
+ {
+ u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
+ clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi));
+ ch->len += sizeof (seq_hi);
+ total_len += sizeof (seq_hi);
+ }
+ }
+ else
+ total_len += ch->len = cb->current_length;
+ ch->src = vlib_buffer_get_current (cb);
+
+ if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
+ break;
+
+ cb = vlib_get_buffer (vm, cb->next_buffer);
+ }
+
+ if (n_ch)
+ *n_ch = n_chunks;
+
+ return total_len;
+}
+
+always_inline void
+esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ vnet_crypto_op_t **crypto_ops,
+ vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi,
+ u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
+ vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
+ esp_header_t *esp)
+{
+ if (sa0->crypto_enc_op_id)
+ {
+ vnet_crypto_op_t *op;
+ vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
+ vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
+
+ op->src = op->dst = payload;
+ op->key_index = sa0->crypto_key_index;
+ op->len = payload_len - icv_sz;
+ op->user_data = bi;
- ipsec_alloc_empty_buffers (vm, im);
+ if (ipsec_sa_is_set_IS_CTR (sa0))
+ {
+ ASSERT (sizeof (u64) == iv_sz);
+ /* construct nonce in a scratch space in front of the IP header */
+ esp_ctr_nonce_t *nonce =
+ (esp_ctr_nonce_t *) (payload - sizeof (u64) - hdr_len -
+ sizeof (*nonce));
+ u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
+
+ if (ipsec_sa_is_set_IS_AEAD (sa0))
+ {
+ /* constuct aad in a scratch space in front of the nonce */
+ op->aad = (u8 *) nonce - sizeof (esp_aead_t);
+ op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
+ op->tag = payload + op->len;
+ op->tag_len = 16;
+ }
+ else
+ {
+ nonce->ctr = clib_host_to_net_u32 (1);
+ }
+
+ nonce->salt = sa0->salt;
+ nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa0->ctr_iv_counter++);
+ op->iv = (u8 *) nonce;
+ }
+ else
+ {
+ op->iv = payload - iv_sz;
+ op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
+ }
+
+ if (lb != b[0])
+ {
+ /* is chained */
+ op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+ op->chunk_index = vec_len (ptd->chunks);
+ op->tag = vlib_buffer_get_tail (lb) - icv_sz;
+ esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload,
+ payload_len, &op->n_chunks);
+ }
+ }
+
+ if (sa0->integ_op_id)
+ {
+ vnet_crypto_op_t *op;
+ vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
+ vnet_crypto_op_init (op, sa0->integ_op_id);
+ op->src = payload - iv_sz - sizeof (esp_header_t);
+ op->digest = payload + payload_len - icv_sz;
+ op->key_index = sa0->integ_key_index;
+ op->digest_len = icv_sz;
+ op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
+ op->user_data = bi;
+
+ if (lb != b[0])
+ {
+ /* is chained */
+ op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+ op->chunk_index = vec_len (ptd->chunks);
+ op->digest = vlib_buffer_get_tail (lb) - icv_sz;
+
+ esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
+ payload - iv_sz - sizeof (esp_header_t),
+ payload_len + iv_sz +
+ sizeof (esp_header_t), op->digest,
+ &op->n_chunks);
+ }
+ else if (ipsec_sa_is_set_USE_ESN (sa0))
+ {
+ u32 tmp = clib_net_to_host_u32 (seq_hi);
+ clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
+ op->len += sizeof (seq_hi);
+ }
+ }
+}
+
+static_always_inline void
+esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ vnet_crypto_async_frame_t *async_frame,
+ ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp,
+ u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
+ u32 bi, u16 next, u32 hdr_len, u16 async_next,
+ vlib_buffer_t *lb)
+{
+ esp_post_data_t *post = esp_post_data (b);
+ u8 *tag, *iv, *aad = 0;
+ u8 flag = 0;
+ u32 key_index;
+ i16 crypto_start_offset, integ_start_offset = 0;
+ u16 crypto_total_len, integ_total_len;
+
+ post->next_index = next;
- u32 *empty_buffers = im->empty_buffers[thread_index];
+ /* crypto */
+ crypto_start_offset = payload - b->data;
+ crypto_total_len = integ_total_len = payload_len - icv_sz;
+ tag = payload + crypto_total_len;
- if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from))
+ key_index = sa->linked_key_index;
+
+ if (ipsec_sa_is_set_IS_CTR (sa))
{
- vlib_node_increment_counter (vm, node->node_index,
- ESP_ENCRYPT_ERROR_NO_BUFFER, n_left_from);
- clib_warning ("not enough empty buffers. discarding frame");
- goto free_buffers_and_exit;
+ ASSERT (sizeof (u64) == iv_sz);
+ /* construct nonce in a scratch space in front of the IP header */
+ esp_ctr_nonce_t *nonce = (esp_ctr_nonce_t *) (payload - sizeof (u64) -
+ hdr_len - sizeof (*nonce));
+ u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
+
+ if (ipsec_sa_is_set_IS_AEAD (sa))
+ {
+ /* constuct aad in a scratch space in front of the nonce */
+ aad = (u8 *) nonce - sizeof (esp_aead_t);
+ esp_aad_fill (aad, esp, sa, sa->seq_hi);
+ key_index = sa->crypto_key_index;
+ }
+ else
+ {
+ nonce->ctr = clib_host_to_net_u32 (1);
+ }
+
+ nonce->salt = sa->salt;
+ nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->ctr_iv_counter++);
+ iv = (u8 *) nonce;
+ }
+ else
+ {
+ iv = payload - iv_sz;
+ flag |= VNET_CRYPTO_OP_FLAG_INIT_IV;
+ }
+
+ if (lb != b)
+ {
+ /* chain */
+ flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+ tag = vlib_buffer_get_tail (lb) - icv_sz;
+ crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, icv_sz,
+ payload, payload_len, 0);
}
- next_index = node->cached_next_index;
+ if (sa->integ_op_id)
+ {
+ integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t);
+ integ_total_len += iv_sz + sizeof (esp_header_t);
- while (n_left_from > 0)
+ if (b != lb)
+ {
+ integ_total_len = esp_encrypt_chain_integ (
+ vm, ptd, sa, b, lb, icv_sz,
+ payload - iv_sz - sizeof (esp_header_t),
+ payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
+ }
+ else if (ipsec_sa_is_set_USE_ESN (sa))
+ {
+ u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
+ clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
+ integ_total_len += sizeof (seq_hi);
+ }
+ }
+
+ /* this always succeeds because we know the frame is not full */
+ vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len,
+ integ_total_len - crypto_total_len,
+ crypto_start_offset, integ_start_offset, bi,
+ async_next, iv, tag, aad, flag);
+}
+
+always_inline uword
+esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, vnet_link_t lt, int is_tun,
+ u16 async_next_node)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left = frame->n_vectors;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u32 thread_index = vm->thread_index;
+ u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
+ u32 current_sa_index = ~0, current_sa_packets = 0;
+ u32 current_sa_bytes = 0, spi = 0;
+ u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
+ ipsec_sa_t *sa0 = 0;
+ vlib_buffer_t *lb;
+ vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
+ vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
+ vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
+ int is_async = im->async_mode;
+ vnet_crypto_async_op_id_t async_op = ~0;
+ u16 drop_next =
+ (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
+ (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
+ ESP_ENCRYPT_NEXT_DROP_MPLS));
+ u16 handoff_next = (lt == VNET_LINK_IP6 ?
+ ESP_ENCRYPT_NEXT_HANDOFF6 :
+ (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
+ ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
+ vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
+ u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
+ u16 n_async = 0;
+ u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0;
+ u32 sync_bi[VLIB_FRAME_SIZE];
+ u32 noop_bi[VLIB_FRAME_SIZE];
+ esp_encrypt_error_t err;
+
+ vlib_get_buffers (vm, from, b, n_left);
+
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->integ_ops);
+ vec_reset_length (ptd->chained_crypto_ops);
+ vec_reset_length (ptd->chained_integ_ops);
+ vec_reset_length (ptd->async_frames);
+ vec_reset_length (ptd->chunks);
+ clib_memset (async_frames, 0, sizeof (async_frames));
+
+ while (n_left > 0)
{
- u32 n_left_to_next;
-
- vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
- while (n_left_from > 0 && n_left_to_next > 0)
- {
- u32 i_bi0, o_bi0, next0;
- vlib_buffer_t *i_b0, *o_b0 = 0;
- u32 sa_index0;
- ipsec_sa_t *sa0;
- ip4_and_esp_header_t *oh0 = 0;
- ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
- ip4_and_udp_and_esp_header_t *iuh0, *ouh0 = 0;
- uword last_empty_buffer;
- esp_header_t *o_esp0;
- esp_footer_t *f0;
- u8 ip_udp_hdr_size;
- u8 next_hdr_type;
- u32 ip_proto = 0;
- u8 transport_mode = 0;
-
- i_bi0 = from[0];
- from += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- next0 = ESP_ENCRYPT_NEXT_DROP;
-
- i_b0 = vlib_get_buffer (vm, i_bi0);
- sa_index0 = vnet_buffer (i_b0)->ipsec.sad_index;
- sa0 = pool_elt_at_index (im->sad, sa_index0);
-
- if (PREDICT_FALSE (esp_seq_advance (sa0)))
+ u32 sa_index0;
+ dpo_id_t *dpo;
+ esp_header_t *esp;
+ u8 *payload, *next_hdr_ptr;
+ u16 payload_len, payload_len_total, n_bufs;
+ u32 hdr_len;
+
+ err = ESP_ENCRYPT_ERROR_RX_PKTS;
+
+ if (n_left > 2)
+ {
+ u8 *p;
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ p = vlib_buffer_get_current (b[1]);
+ clib_prefetch_load (p);
+ p -= CLIB_CACHE_LINE_BYTES;
+ clib_prefetch_load (p);
+ /* speculate that the trailer goes in the first buffer */
+ CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ if (is_tun)
+ {
+ /* we are on a ipsec tunnel's feature arc */
+ vnet_buffer (b[0])->ipsec.sad_index =
+ sa_index0 = ipsec_tun_protect_get_sa_out
+ (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
+
+ if (PREDICT_FALSE (INDEX_INVALID == sa_index0))
{
- clib_warning ("sequence number counter has cycled SPI %u",
- sa0->spi);
- vlib_node_increment_counter (vm, node->node_index,
- ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
- //TODO: rekey SA
- o_bi0 = i_bi0;
- to_next[0] = o_bi0;
- to_next += 1;
+ err = ESP_ENCRYPT_ERROR_NO_PROTECTION;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ drop_next);
goto trace;
}
+ }
+ else
+ sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
- sa0->total_data_size += i_b0->current_length;
-
- /* grab free buffer */
- last_empty_buffer = vec_len (empty_buffers) - 1;
- o_bi0 = empty_buffers[last_empty_buffer];
- o_b0 = vlib_get_buffer (vm, o_bi0);
- o_b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
- o_b0->current_data = sizeof (ethernet_header_t);
- iuh0 = vlib_buffer_get_current (i_b0);
- vlib_prefetch_buffer_with_index (vm,
- empty_buffers[last_empty_buffer -
- 1], STORE);
- _vec_len (empty_buffers) = last_empty_buffer;
- to_next[0] = o_bi0;
- to_next += 1;
-
- /* add old buffer to the recycle list */
- vec_add1 (recycle, i_bi0);
-
- if (is_ip6)
+ if (sa_index0 != current_sa_index)
+ {
+ if (current_sa_packets)
+ vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index,
+ current_sa_packets,
+ current_sa_bytes);
+ current_sa_packets = current_sa_bytes = 0;
+
+ sa0 = ipsec_sa_get (sa_index0);
+
+ if (PREDICT_FALSE ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
+ sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
+ !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0)))
{
- ih6_0 = vlib_buffer_get_current (i_b0);
- next_hdr_type = IP_PROTOCOL_IPV6;
- oh6_0 = vlib_buffer_get_current (o_b0);
-
- oh6_0->ip6.ip_version_traffic_class_and_flow_label =
- ih6_0->ip6.ip_version_traffic_class_and_flow_label;
- oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
- ip_udp_hdr_size = sizeof (ip6_header_t);
- o_esp0 = vlib_buffer_get_current (o_b0) + ip_udp_hdr_size;
- oh6_0->ip6.hop_limit = 254;
- oh6_0->ip6.src_address.as_u64[0] =
- ih6_0->ip6.src_address.as_u64[0];
- oh6_0->ip6.src_address.as_u64[1] =
- ih6_0->ip6.src_address.as_u64[1];
- oh6_0->ip6.dst_address.as_u64[0] =
- ih6_0->ip6.dst_address.as_u64[0];
- oh6_0->ip6.dst_address.as_u64[1] =
- ih6_0->ip6.dst_address.as_u64[1];
- o_esp0->spi = clib_net_to_host_u32 (sa0->spi);
- o_esp0->seq = clib_net_to_host_u32 (sa0->seq);
- ip_proto = ih6_0->ip6.protocol;
-
- next0 = ESP_ENCRYPT_NEXT_IP6_LOOKUP;
+ err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ drop_next);
+ goto trace;
}
- else
+ /* fetch the second cacheline ASAP */
+ clib_prefetch_load (sa0->cacheline1);
+
+ current_sa_index = sa_index0;
+ spi = clib_net_to_host_u32 (sa0->spi);
+ esp_align = sa0->esp_block_align;
+ icv_sz = sa0->integ_icv_size;
+ iv_sz = sa0->crypto_iv_size;
+ is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
+ }
+
+ if (PREDICT_FALSE (~0 == sa0->thread_index))
+ {
+ /* this is the first packet to use this SA, claim the SA
+ * for this thread. this could happen simultaneously on
+ * another thread */
+ clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ ipsec_sa_assign_thread (thread_index));
+ }
+
+ if (PREDICT_FALSE (thread_index != sa0->thread_index))
+ {
+ vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ err = ESP_ENCRYPT_ERROR_HANDOFF;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ handoff_next);
+ goto trace;
+ }
+
+ lb = b[0];
+ n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
+ if (n_bufs == 0)
+ {
+ err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
+ goto trace;
+ }
+
+ if (n_bufs > 1)
+ {
+ /* find last buffer in the chain */
+ while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
+ lb = vlib_get_buffer (vm, lb->next_buffer);
+ }
+
+ if (PREDICT_FALSE (esp_seq_advance (sa0)))
+ {
+ err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
+ goto trace;
+ }
+
+ /* space for IV */
+ hdr_len = iv_sz;
+
+ if (ipsec_sa_is_set_IS_TUNNEL (sa0))
+ {
+ payload = vlib_buffer_get_current (b[0]);
+ next_hdr_ptr = esp_add_footer_and_icv (
+ vm, &lb, esp_align, icv_sz, node, buffer_data_size,
+ vlib_buffer_length_in_chain (vm, b[0]));
+ if (!next_hdr_ptr)
+ {
+ err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ drop_next);
+ goto trace;
+ }
+ b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ payload_len = b[0]->current_length;
+ payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
+
+ /* ESP header */
+ hdr_len += sizeof (*esp);
+ esp = (esp_header_t *) (payload - hdr_len);
+
+ /* optional UDP header */
+ if (ipsec_sa_is_set_UDP_ENCAP (sa0))
+ {
+ hdr_len += sizeof (udp_header_t);
+ esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
+ payload_len_total + hdr_len);
+ }
+
+ /* IP header */
+ if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
{
- next_hdr_type = IP_PROTOCOL_IP_IN_IP;
- oh0 = vlib_buffer_get_current (o_b0);
- ouh0 = vlib_buffer_get_current (o_b0);
-
- oh0->ip4.ip_version_and_header_length = 0x45;
- oh0->ip4.tos = iuh0->ip4.tos;
- oh0->ip4.fragment_id = 0;
- oh0->ip4.flags_and_fragment_offset = 0;
- oh0->ip4.ttl = 254;
- if (sa0->udp_encap)
+ ip6_header_t *ip6;
+ u16 len = sizeof (ip6_header_t);
+ hdr_len += len;
+ ip6 = (ip6_header_t *) (payload - hdr_len);
+ clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
+
+ if (VNET_LINK_IP6 == lt)
+ {
+ *next_hdr_ptr = IP_PROTOCOL_IPV6;
+ tunnel_encap_fixup_6o6 (sa0->tunnel_flags,
+ (const ip6_header_t *) payload,
+ ip6);
+ }
+ else if (VNET_LINK_IP4 == lt)
{
- ouh0->udp.src_port =
- clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
- ouh0->udp.dst_port =
- clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
- ouh0->udp.checksum = 0;
- ouh0->ip4.protocol = IP_PROTOCOL_UDP;
- ip_udp_hdr_size =
- sizeof (udp_header_t) + sizeof (ip4_header_t);
+ *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
+ tunnel_encap_fixup_4o6 (sa0->tunnel_flags, b[0],
+ (const ip4_header_t *) payload, ip6);
+ }
+ else if (VNET_LINK_MPLS == lt)
+ {
+ *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
+ tunnel_encap_fixup_mplso6 (
+ sa0->tunnel_flags, b[0],
+ (const mpls_unicast_header_t *) payload, ip6);
}
else
+ ASSERT (0);
+
+ len = payload_len_total + hdr_len - len;
+ ip6->payload_length = clib_net_to_host_u16 (len);
+ b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
+ }
+ else
+ {
+ ip4_header_t *ip4;
+ u16 len = sizeof (ip4_header_t);
+ hdr_len += len;
+ ip4 = (ip4_header_t *) (payload - hdr_len);
+ clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
+
+ if (VNET_LINK_IP6 == lt)
+ {
+ *next_hdr_ptr = IP_PROTOCOL_IPV6;
+ tunnel_encap_fixup_6o4_w_chksum (sa0->tunnel_flags,
+ (const ip6_header_t *)
+ payload, ip4);
+ }
+ else if (VNET_LINK_IP4 == lt)
{
- oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
- ip_udp_hdr_size = sizeof (ip4_header_t);
+ *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
+ tunnel_encap_fixup_4o4_w_chksum (sa0->tunnel_flags,
+ (const ip4_header_t *)
+ payload, ip4);
}
- o_esp0 = vlib_buffer_get_current (o_b0) + ip_udp_hdr_size;
- oh0->ip4.src_address.as_u32 = iuh0->ip4.src_address.as_u32;
- oh0->ip4.dst_address.as_u32 = iuh0->ip4.dst_address.as_u32;
- o_esp0->spi = clib_net_to_host_u32 (sa0->spi);
- o_esp0->seq = clib_net_to_host_u32 (sa0->seq);
- ip_proto = iuh0->ip4.protocol;
-
- next0 = ESP_ENCRYPT_NEXT_IP4_LOOKUP;
+ else if (VNET_LINK_MPLS == lt)
+ {
+ *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
+ tunnel_encap_fixup_mplso4_w_chksum (
+ sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
+ ip4);
+ }
+ else
+ ASSERT (0);
+
+ len = payload_len_total + hdr_len;
+ esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
}
- if (PREDICT_TRUE (!is_ip6 && sa0->is_tunnel && !sa0->is_tunnel_ip6))
+ dpo = &sa0->dpo;
+ if (!is_tun)
{
- oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
- oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
+ sync_next[0] = dpo->dpoi_next_node;
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
+ }
+ else
+ sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+ b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
+ }
+ else /* transport mode */
+ {
+ u8 *l2_hdr, l2_len, *ip_hdr;
+ u16 ip_len;
+ ip6_ext_header_t *ext_hdr;
+ udp_header_t *udp = 0;
+ u16 udp_len = 0;
+ u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
+
+ /*
+ * Get extension header chain length. It might be longer than the
+ * buffer's pre_data area.
+ */
+ ip_len =
+ (VNET_LINK_IP6 == lt ?
+ esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
+ ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
+ if ((old_ip_hdr - ip_len) < &b[0]->pre_data[0])
+ {
+ err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ drop_next);
+ goto trace;
+ }
- next0 = sa0->dpo[IPSEC_PROTOCOL_ESP].dpoi_next_node;
- vnet_buffer (o_b0)->ip.adj_index[VLIB_TX] =
- sa0->dpo[IPSEC_PROTOCOL_ESP].dpoi_index;
+ vlib_buffer_advance (b[0], ip_len);
+ payload = vlib_buffer_get_current (b[0]);
+ next_hdr_ptr = esp_add_footer_and_icv (
+ vm, &lb, esp_align, icv_sz, node, buffer_data_size,
+ vlib_buffer_length_in_chain (vm, b[0]));
+ if (!next_hdr_ptr)
+ {
+ err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ drop_next);
+ goto trace;
}
- else if (is_ip6 && sa0->is_tunnel && sa0->is_tunnel_ip6)
+
+ b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ payload_len = b[0]->current_length;
+ payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
+
+ /* ESP header */
+ hdr_len += sizeof (*esp);
+ esp = (esp_header_t *) (payload - hdr_len);
+
+ /* optional UDP header */
+ if (ipsec_sa_is_set_UDP_ENCAP (sa0))
{
- oh6_0->ip6.src_address.as_u64[0] =
- sa0->tunnel_src_addr.ip6.as_u64[0];
- oh6_0->ip6.src_address.as_u64[1] =
- sa0->tunnel_src_addr.ip6.as_u64[1];
- oh6_0->ip6.dst_address.as_u64[0] =
- sa0->tunnel_dst_addr.ip6.as_u64[0];
- oh6_0->ip6.dst_address.as_u64[1] =
- sa0->tunnel_dst_addr.ip6.as_u64[1];
-
- next0 = sa0->dpo[IPSEC_PROTOCOL_ESP].dpoi_next_node;
- vnet_buffer (o_b0)->ip.adj_index[VLIB_TX] =
- sa0->dpo[IPSEC_PROTOCOL_ESP].dpoi_index;
+ hdr_len += sizeof (udp_header_t);
+ udp = (udp_header_t *) (payload - hdr_len);
}
- else
+
+ /* IP header */
+ hdr_len += ip_len;
+ ip_hdr = payload - hdr_len;
+
+ /* L2 header */
+ if (!is_tun)
{
- next_hdr_type = ip_proto;
- if (vnet_buffer (i_b0)->sw_if_index[VLIB_TX] != ~0)
- {
- transport_mode = 1;
- ethernet_header_t *ieh0, *oeh0;
- ieh0 =
- (ethernet_header_t *) ((u8 *)
- vlib_buffer_get_current (i_b0) -
- sizeof (ethernet_header_t));
- oeh0 = (ethernet_header_t *) o_b0->data;
- clib_memcpy_fast (oeh0, ieh0, sizeof (ethernet_header_t));
- next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
- vnet_buffer (o_b0)->sw_if_index[VLIB_TX] =
- vnet_buffer (i_b0)->sw_if_index[VLIB_TX];
- }
+ l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
+ hdr_len += l2_len;
+ l2_hdr = payload - hdr_len;
+
+ /* copy l2 and ip header */
+ clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
+ }
+ else
+ l2_len = 0;
+
+ u16 len;
+ len = payload_len_total + hdr_len - l2_len;
- if (is_ip6)
+ if (VNET_LINK_IP6 == lt)
+ {
+ ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
+ if (PREDICT_TRUE (NULL == ext_hdr))
{
- vlib_buffer_advance (i_b0, sizeof (ip6_header_t));
+ *next_hdr_ptr = ip6->protocol;
+ ip6->protocol =
+ (udp) ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
}
else
{
- vlib_buffer_advance (i_b0, sizeof (ip4_header_t));
+ *next_hdr_ptr = ext_hdr->next_hdr;
+ ext_hdr->next_hdr =
+ (udp) ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
}
+ ip6->payload_length =
+ clib_host_to_net_u16 (len - sizeof (ip6_header_t));
+ }
+ else if (VNET_LINK_IP4 == lt)
+ {
+ ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
+ *next_hdr_ptr = ip4->protocol;
+ esp_update_ip4_hdr (ip4, len, /* is_transport */ 1,
+ (udp != NULL));
}
- ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
+ clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
- if (PREDICT_TRUE (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE))
+ if (udp)
{
-
- const int BLOCK_SIZE =
- em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].block_size;
- const int IV_SIZE =
- em->ipsec_proto_main_crypto_algs[sa0->crypto_alg].iv_size;
- int blocks = 1 + (i_b0->current_length + 1) / BLOCK_SIZE;
-
- /* pad packet in input buffer */
- u8 pad_bytes = BLOCK_SIZE * blocks - 2 - i_b0->current_length;
- u8 i;
- u8 *padding =
- vlib_buffer_get_current (i_b0) + i_b0->current_length;
- i_b0->current_length = BLOCK_SIZE * blocks;
- for (i = 0; i < pad_bytes; ++i)
- {
- padding[i] = i + 1;
- }
- f0 = vlib_buffer_get_current (i_b0) + i_b0->current_length - 2;
- f0->pad_length = pad_bytes;
- f0->next_header = next_hdr_type;
-
- o_b0->current_length = ip_udp_hdr_size + sizeof (esp_header_t) +
- BLOCK_SIZE * blocks + IV_SIZE;
-
- vnet_buffer (o_b0)->sw_if_index[VLIB_RX] =
- vnet_buffer (i_b0)->sw_if_index[VLIB_RX];
-
- u8 iv[em->
- ipsec_proto_main_crypto_algs[sa0->crypto_alg].iv_size];
- RAND_bytes (iv, sizeof (iv));
-
- clib_memcpy_fast ((u8 *) vlib_buffer_get_current (o_b0) +
- ip_udp_hdr_size + sizeof (esp_header_t), iv,
- em->ipsec_proto_main_crypto_algs[sa0->
- crypto_alg].iv_size);
-
- esp_encrypt_cbc (vm, sa0->crypto_alg,
- (u8 *) vlib_buffer_get_current (i_b0),
- (u8 *) vlib_buffer_get_current (o_b0) +
- ip_udp_hdr_size + sizeof (esp_header_t) +
- IV_SIZE, BLOCK_SIZE * blocks,
- sa0->crypto_key.data, iv);
+ udp_len = len - ip_len;
+ esp_fill_udp_hdr (sa0, udp, udp_len);
}
- o_b0->current_length +=
- hmac_calc (sa0->integ_alg, sa0->integ_key.data,
- sa0->integ_key.len, (u8 *) o_esp0,
- o_b0->current_length - ip_udp_hdr_size,
- vlib_buffer_get_current (o_b0) + o_b0->current_length,
- sa0->use_esn, sa0->seq_hi);
+ sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+ }
+ if (lb != b[0])
+ {
+ crypto_ops = &ptd->chained_crypto_ops;
+ integ_ops = &ptd->chained_integ_ops;
+ }
+ else
+ {
+ crypto_ops = &ptd->crypto_ops;
+ integ_ops = &ptd->integ_ops;
+ }
+
+ esp->spi = spi;
+ esp->seq = clib_net_to_host_u32 (sa0->seq);
- if (is_ip6)
+ if (is_async)
+ {
+ async_op = sa0->crypto_async_enc_op_id;
+
+ /* get a frame for this op if we don't yet have one or it's full
+ */
+ if (NULL == async_frames[async_op] ||
+ vnet_crypto_async_frame_is_full (async_frames[async_op]))
{
- oh6_0->ip6.payload_length =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0) -
- sizeof (ip6_header_t));
+ async_frames[async_op] =
+ vnet_crypto_async_get_frame (vm, async_op);
+ /* Save the frame to the list we'll submit at the end */
+ vec_add1 (ptd->async_frames, async_frames[async_op]);
}
+
+ esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
+ esp, payload, payload_len, iv_sz, icv_sz,
+ from[b - bufs], sync_next[0], hdr_len,
+ async_next_node, lb);
+ }
+ else
+ esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, sa0->seq_hi,
+ payload, payload_len, iv_sz, icv_sz, n_sync, b,
+ lb, hdr_len, esp);
+
+ vlib_buffer_advance (b[0], 0LL - hdr_len);
+
+ current_sa_packets += 1;
+ current_sa_bytes += payload_len_total;
+
+ trace:
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
+ sizeof (*tr));
+ if (INDEX_INVALID == sa_index0)
+ clib_memset_u8 (tr, 0xff, sizeof (*tr));
else
{
- oh0->ip4.length =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0));
- oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
- if (sa0->udp_encap)
- {
- ouh0->udp.length =
- clib_host_to_net_u16 (clib_net_to_host_u16
- (oh0->ip4.length) -
- ip4_header_bytes (&oh0->ip4));
- }
+ tr->sa_index = sa_index0;
+ tr->spi = sa0->spi;
+ tr->seq = sa0->seq;
+ tr->sa_seq_hi = sa0->seq_hi;
+ tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
+ tr->crypto_alg = sa0->crypto_alg;
+ tr->integ_alg = sa0->integ_alg;
+ }
+ }
+
+ /* next */
+ if (ESP_ENCRYPT_ERROR_RX_PKTS != err)
+ {
+ noop_bi[n_noop] = from[b - bufs];
+ n_noop++;
+ }
+ else if (!is_async)
+ {
+ sync_bi[n_sync] = from[b - bufs];
+ sync_bufs[n_sync] = b[0];
+ n_sync++;
+ sync_next++;
+ }
+ else
+ {
+ n_async++;
+ }
+ n_left -= 1;
+ b += 1;
+ }
+
+ if (INDEX_INVALID != current_sa_index)
+ vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index, current_sa_packets,
+ current_sa_bytes);
+ if (n_sync)
+ {
+ esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
+ drop_next);
+ esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
+ sync_nexts, ptd->chunks, drop_next);
+
+ esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
+ drop_next);
+ esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
+ sync_nexts, ptd->chunks, drop_next);
+
+ vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
+ }
+ if (n_async)
+ {
+ /* submit all of the open frames */
+ vnet_crypto_async_frame_t **async_frame;
+
+ vec_foreach (async_frame, ptd->async_frames)
+ {
+ if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
+ {
+ n_noop += esp_async_recycle_failed_submit (
+ vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+ n_noop, noop_bi, noop_nexts, drop_next);
+ vnet_crypto_async_reset_frame (*async_frame);
+ vnet_crypto_async_free_frame (vm, *async_frame);
}
+ }
+ }
+ if (n_noop)
+ vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
+
+ vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS,
+ frame->n_vectors);
+
+ return frame->n_vectors;
+}
+
+always_inline uword
+esp_encrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left = frame->n_vectors;
+
+ vlib_get_buffers (vm, from, b, n_left);
- if (transport_mode)
- vlib_buffer_reset (o_b0);
+ if (n_left >= 4)
+ {
+ vlib_prefetch_buffer_header (b[0], LOAD);
+ vlib_prefetch_buffer_header (b[1], LOAD);
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ vlib_prefetch_buffer_header (b[3], LOAD);
+ }
+
+ while (n_left > 8)
+ {
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
+ vlib_prefetch_buffer_header (b[6], LOAD);
+ vlib_prefetch_buffer_header (b[7], LOAD);
- trace:
- if (PREDICT_FALSE (i_b0->flags & VLIB_BUFFER_IS_TRACED))
+ next[0] = (esp_post_data (b[0]))->next_index;
+ next[1] = (esp_post_data (b[1]))->next_index;
+ next[2] = (esp_post_data (b[2]))->next_index;
+ next[3] = (esp_post_data (b[3]))->next_index;
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
+ {
+ if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
{
- if (o_b0)
- {
- o_b0->flags |= VLIB_BUFFER_IS_TRACED;
- o_b0->trace_index = i_b0->trace_index;
- esp_encrypt_trace_t *tr =
- vlib_add_trace (vm, node, o_b0, sizeof (*tr));
- tr->sa_index = sa_index0;
- tr->spi = sa0->spi;
- tr->seq = sa0->seq - 1;
- tr->udp_encap = sa0->udp_encap;
- tr->crypto_alg = sa0->crypto_alg;
- tr->integ_alg = sa0->integ_alg;
- }
+ esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
+ sizeof (*tr));
+ tr->next_index = next[0];
+ }
+ if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[1],
+ sizeof (*tr));
+ tr->next_index = next[1];
}
+ if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[2],
+ sizeof (*tr));
+ tr->next_index = next[2];
+ }
+ if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[3],
+ sizeof (*tr));
+ tr->next_index = next[3];
+ }
+ }
+
+ b += 4;
+ next += 4;
+ n_left -= 4;
+ }
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next, o_bi0,
- next0);
+ while (n_left > 0)
+ {
+ next[0] = (esp_post_data (b[0]))->next_index;
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
+ sizeof (*tr));
+ tr->next_index = next[0];
}
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+
+ b += 1;
+ next += 1;
+ n_left -= 1;
}
+
vlib_node_increment_counter (vm, node->node_index,
- ESP_ENCRYPT_ERROR_RX_PKTS,
- from_frame->n_vectors);
-
-free_buffers_and_exit:
- if (recycle)
- vlib_buffer_free (vm, recycle, vec_len (recycle));
- vec_free (recycle);
- return from_frame->n_vectors;
+ ESP_ENCRYPT_ERROR_POST_RX_PKTS,
+ frame->n_vectors);
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+ return frame->n_vectors;
}
VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
- return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
+ return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 0,
+ esp_encrypt_async_next.esp4_post_next);
}
/* *INDENT-OFF* */
.format_trace = format_esp_encrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
.n_next_nodes = ESP_ENCRYPT_N_NEXT,
- .next_nodes = {
-#define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n,
- foreach_esp_encrypt_next
-#undef _
- },
+ .next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
+ [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
+ [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
+ [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
+ [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
+ [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "error-drop",
+ [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output" },
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FN (esp4_encrypt_post_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_encrypt_post_inline (vm, node, from_frame);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (esp4_encrypt_post_node) = {
+ .name = "esp4-encrypt-post",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_post_encrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .sibling_of = "esp4-encrypt",
+
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
};
/* *INDENT-ON* */
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
- return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
+ return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 0,
+ esp_encrypt_async_next.esp6_post_next);
}
/* *INDENT-OFF* */
.vector_size = sizeof (u32),
.format_trace = format_esp_encrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
+ .sibling_of = "esp4-encrypt",
+
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FN (esp6_encrypt_post_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_encrypt_post_inline (vm, node, from_frame);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
+ .name = "esp6-encrypt-post",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_post_encrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .sibling_of = "esp4-encrypt",
+
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 1,
+ esp_encrypt_async_next.esp4_tun_post_next);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
+ .name = "esp4-encrypt-tun",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_encrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
- .error_strings = esp_encrypt_error_strings,
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
.n_next_nodes = ESP_ENCRYPT_N_NEXT,
.next_nodes = {
-#define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n,
- foreach_esp_encrypt_next
-#undef _
+ [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
+ [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
+ [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
+ [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
+ [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
+ [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
+ [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
},
};
+
+VLIB_NODE_FN (esp4_encrypt_tun_post_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_encrypt_post_inline (vm, node, from_frame);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = {
+ .name = "esp4-encrypt-tun-post",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_post_encrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .sibling_of = "esp4-encrypt-tun",
+
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
+};
/* *INDENT-ON* */
+VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 1,
+ esp_encrypt_async_next.esp6_tun_post_next);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
+ .name = "esp6-encrypt-tun",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_encrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
+
+ .n_next_nodes = ESP_ENCRYPT_N_NEXT,
+ .next_nodes = {
+ [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
+ [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
+ [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
+ [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
+ [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
+ [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
+ [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
+ },
+};
+
+/* *INDENT-ON* */
+
+VLIB_NODE_FN (esp6_encrypt_tun_post_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_encrypt_post_inline (vm, node, from_frame);
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
+ .name = "esp6-encrypt-tun-post",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_post_encrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .sibling_of = "esp-mpls-encrypt-tun",
+
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FN (esp_mpls_encrypt_tun_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+ return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_MPLS, 1,
+ esp_encrypt_async_next.esp_mpls_tun_post_next);
+}
+
+VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_node) = {
+ .name = "esp-mpls-encrypt-tun",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_encrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
+
+ .n_next_nodes = ESP_ENCRYPT_N_NEXT,
+ .next_nodes = {
+ [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
+ [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
+ [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
+ [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
+ [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
+ [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
+ [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
+ },
+};
+
+VLIB_NODE_FN (esp_mpls_encrypt_tun_post_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+ return esp_encrypt_post_inline (vm, node, from_frame);
+}
+
+VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node) = {
+ .name = "esp-mpls-encrypt-tun-post",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_post_encrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .sibling_of = "esp-mpls-encrypt-tun",
+
+ .n_errors = ESP_ENCRYPT_N_ERROR,
+ .error_counters = esp_encrypt_error_counters,
+};
+
+#ifndef CLIB_MARCH_VARIANT
+
+static clib_error_t *
+esp_encrypt_init (vlib_main_t *vm)
+{
+ ipsec_main_t *im = &ipsec_main;
+
+ im->esp4_enc_fq_index =
+ vlib_frame_queue_main_init (esp4_encrypt_node.index, 0);
+ im->esp6_enc_fq_index =
+ vlib_frame_queue_main_init (esp6_encrypt_node.index, 0);
+ im->esp4_enc_tun_fq_index =
+ vlib_frame_queue_main_init (esp4_encrypt_tun_node.index, 0);
+ im->esp6_enc_tun_fq_index =
+ vlib_frame_queue_main_init (esp6_encrypt_tun_node.index, 0);
+ im->esp_mpls_enc_tun_fq_index =
+ vlib_frame_queue_main_init (esp_mpls_encrypt_tun_node.index, 0);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (esp_encrypt_init);
+
+#endif
+
/*
* fd.io coding-style-patch-verification: ON
*