#include <vnet/vnet.h>
#include <vnet/api_errno.h>
#include <vnet/ip/ip.h>
-#include <vnet/udp/udp.h>
#include <vnet/crypto/crypto.h>
#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/ipsec_tun.h>
#include <vnet/ipsec/esp.h>
+#include <vnet/tunnel/tunnel_dp.h>
-#define foreach_esp_encrypt_next \
-_(DROP4, "ip4-drop") \
-_(DROP6, "ip6-drop") \
-_(PENDING, "pending") \
-_(HANDOFF4, "handoff4") \
-_(HANDOFF6, "handoff6") \
-_(INTERFACE_OUTPUT, "interface-output")
+#define foreach_esp_encrypt_next \
+ _ (DROP4, "ip4-drop") \
+ _ (DROP6, "ip6-drop") \
+ _ (DROP_MPLS, "mpls-drop") \
+ _ (HANDOFF4, "handoff4") \
+ _ (HANDOFF6, "handoff6") \
+ _ (HANDOFF_MPLS, "handoff-mpls") \
+ _ (INTERFACE_OUTPUT, "interface-output")
#define _(v, s) ESP_ENCRYPT_NEXT_##v,
typedef enum
ESP_ENCRYPT_N_NEXT,
} esp_encrypt_next_t;
-#define foreach_esp_encrypt_error \
- _(RX_PKTS, "ESP pkts received") \
- _(POST_RX_PKTS, "ESP-post pkts received") \
- _(SEQ_CYCLED, "sequence number cycled (packet dropped)") \
- _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
- _(CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)") \
- _(NO_BUFFERS, "no buffers (packet dropped)") \
+#define foreach_esp_encrypt_error \
+ _ (RX_PKTS, "ESP pkts received") \
+ _ (POST_RX_PKTS, "ESP-post pkts received") \
+ _ (HANDOFF, "Hand-off") \
+ _ (SEQ_CYCLED, "sequence number cycled (packet dropped)") \
+ _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
+ _ (CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)") \
+ _ (NO_BUFFERS, "no buffers (packet dropped)")
typedef enum
{
/* pad packet in input buffer */
static_always_inline u8 *
-esp_add_footer_and_icv (vlib_main_t * vm, vlib_buffer_t ** last,
- u8 esp_align, u8 icv_sz,
- u16 * next, vlib_node_runtime_t * node,
+esp_add_footer_and_icv (vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align,
+ u8 icv_sz, vlib_node_runtime_t *node,
u16 buffer_data_size, uword total_len)
{
static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
last[0]->current_length + pad_bytes);
u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
- if (last[0]->current_length + tail_sz > buffer_data_size)
+ if (last[0]->current_data + last[0]->current_length + tail_sz >
+ buffer_data_size)
{
u32 tmp_bi = 0;
if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
}
}
-typedef struct
-{
- u32 salt;
- u64 iv;
-} __clib_packed esp_gcm_nonce_t;
-
-STATIC_ASSERT_SIZEOF (esp_gcm_nonce_t, 12);
-
static_always_inline u32
esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
ipsec_sa_t * sa0, vlib_buffer_t * b,
}
always_inline void
-esp_prepare_sync_op (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
- vnet_crypto_op_t ** crypto_ops,
- vnet_crypto_op_t ** integ_ops, ipsec_sa_t * sa0,
- u8 * payload, u16 payload_len, u8 iv_sz, u8 icv_sz,
- vlib_buffer_t ** bufs, vlib_buffer_t ** b,
- vlib_buffer_t * lb, u32 hdr_len, esp_header_t * esp,
- esp_gcm_nonce_t * nonce)
+esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ vnet_crypto_op_t **crypto_ops,
+ vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0,
+ u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
+ vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
+ esp_header_t *esp)
{
if (sa0->crypto_enc_op_id)
{
op->src = op->dst = payload;
op->key_index = sa0->crypto_key_index;
op->len = payload_len - icv_sz;
- op->user_data = b - bufs;
+ op->user_data = bi;
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (ipsec_sa_is_set_IS_CTR (sa0))
{
- /*
- * construct the AAD in a scratch space in front
- * of the IP header.
- */
- op->aad = payload - hdr_len - sizeof (esp_aead_t);
- op->aad_len = esp_aad_fill (op->aad, esp, sa0);
-
- op->tag = payload + op->len;
- op->tag_len = 16;
+ ASSERT (sizeof (u64) == iv_sz);
+ /* construct nonce in a scratch space in front of the IP header */
+ esp_ctr_nonce_t *nonce =
+ (esp_ctr_nonce_t *) (payload - sizeof (u64) - hdr_len -
+ sizeof (*nonce));
+ u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
+
+ if (ipsec_sa_is_set_IS_AEAD (sa0))
+ {
+ /* constuct aad in a scratch space in front of the nonce */
+ op->aad = (u8 *) nonce - sizeof (esp_aead_t);
+ op->aad_len = esp_aad_fill (op->aad, esp, sa0);
+ op->tag = payload + op->len;
+ op->tag_len = 16;
+ }
+ else
+ {
+ nonce->ctr = clib_host_to_net_u32 (1);
+ }
- u64 *iv = (u64 *) (payload - iv_sz);
nonce->salt = sa0->salt;
- nonce->iv = *iv = clib_host_to_net_u64 (sa0->gcm_iv_counter++);
+ nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa0->ctr_iv_counter++);
op->iv = (u8 *) nonce;
}
else
op->key_index = sa0->integ_key_index;
op->digest_len = icv_sz;
op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
- op->user_data = b - bufs;
+ op->user_data = bi;
if (lb != b[0])
{
}
}
-static_always_inline int
-esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
- vnet_crypto_async_frame_t ** async_frame,
- ipsec_sa_t * sa, vlib_buffer_t * b,
- esp_header_t * esp, u8 * payload, u32 payload_len,
- u8 iv_sz, u8 icv_sz, u32 bi, u16 * next, u32 hdr_len,
- u16 async_next, vlib_buffer_t * lb)
+static_always_inline void
+esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ vnet_crypto_async_frame_t *async_frame,
+ ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp,
+ u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
+ u32 bi, u16 next, u32 hdr_len, u16 async_next,
+ vlib_buffer_t *lb)
{
esp_post_data_t *post = esp_post_data (b);
u8 *tag, *iv, *aad = 0;
i16 crypto_start_offset, integ_start_offset = 0;
u16 crypto_total_len, integ_total_len;
- post->next_index = next[0];
- next[0] = ESP_ENCRYPT_NEXT_PENDING;
+ post->next_index = next;
/* crypto */
crypto_start_offset = payload - b->data;
crypto_total_len = integ_total_len = payload_len - icv_sz;
tag = payload + crypto_total_len;
- /* aead */
- if (ipsec_sa_is_set_IS_AEAD (sa))
- {
- esp_gcm_nonce_t *nonce;
- u64 *pkt_iv = (u64 *) (payload - iv_sz);
+ key_index = sa->linked_key_index;
- aad = payload - hdr_len - sizeof (esp_aead_t);
- esp_aad_fill (aad, esp, sa);
- nonce = (esp_gcm_nonce_t *) (aad - sizeof (*nonce));
- nonce->salt = sa->salt;
- nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->gcm_iv_counter++);
- iv = (u8 *) nonce;
- key_index = sa->crypto_key_index;
+ if (ipsec_sa_is_set_IS_CTR (sa))
+ {
+ ASSERT (sizeof (u64) == iv_sz);
+ /* construct nonce in a scratch space in front of the IP header */
+ esp_ctr_nonce_t *nonce = (esp_ctr_nonce_t *) (payload - sizeof (u64) -
+ hdr_len - sizeof (*nonce));
+ u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
- if (lb != b)
+ if (ipsec_sa_is_set_IS_AEAD (sa))
{
- /* chain */
- flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
- tag = vlib_buffer_get_tail (lb) - icv_sz;
- crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb,
- icv_sz, payload,
- payload_len, 0);
+ /* constuct aad in a scratch space in front of the nonce */
+ aad = (u8 *) nonce - sizeof (esp_aead_t);
+ esp_aad_fill (aad, esp, sa);
+ key_index = sa->crypto_key_index;
+ }
+ else
+ {
+ nonce->ctr = clib_host_to_net_u32 (1);
}
- goto out;
- }
- /* cipher then hash */
- iv = payload - iv_sz;
- integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t);
- integ_total_len += iv_sz + sizeof (esp_header_t);
- flag |= VNET_CRYPTO_OP_FLAG_INIT_IV;
- key_index = sa->linked_key_index;
+ nonce->salt = sa->salt;
+ nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->ctr_iv_counter++);
+ iv = (u8 *) nonce;
+ }
+ else
+ {
+ iv = payload - iv_sz;
+ flag |= VNET_CRYPTO_OP_FLAG_INIT_IV;
+ }
- if (b != lb)
+ if (lb != b)
{
+ /* chain */
flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
- crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb,
- icv_sz, payload,
- payload_len, 0);
tag = vlib_buffer_get_tail (lb) - icv_sz;
- integ_total_len = esp_encrypt_chain_integ (vm, ptd, sa, b, lb, icv_sz,
- payload - iv_sz -
- sizeof (esp_header_t),
- payload_len + iv_sz +
- sizeof (esp_header_t),
- tag, 0);
- }
- else if (ipsec_sa_is_set_USE_ESN (sa) && !ipsec_sa_is_set_IS_AEAD (sa))
- {
- u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
- clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
- integ_total_len += sizeof (seq_hi);
+ crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, icv_sz,
+ payload, payload_len, 0);
}
-out:
- return vnet_crypto_async_add_to_frame (vm, async_frame, key_index,
- crypto_total_len,
- integ_total_len - crypto_total_len,
- crypto_start_offset,
- integ_start_offset, bi, async_next,
- iv, tag, aad, flag);
-}
-
-/* when submitting a frame is failed, drop all buffers in the frame */
-static_always_inline void
-esp_async_recycle_failed_submit (vnet_crypto_async_frame_t * f,
- vlib_buffer_t ** b, u16 * next,
- u16 drop_next)
-{
- u32 n_drop = f->n_elts;
- while (--n_drop)
+ if (sa->integ_op_id)
{
- (b - n_drop)[0]->error = ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR;
- (next - n_drop)[0] = drop_next;
+ integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t);
+ integ_total_len += iv_sz + sizeof (esp_header_t);
+
+ if (b != lb)
+ {
+ integ_total_len = esp_encrypt_chain_integ (
+ vm, ptd, sa, b, lb, icv_sz,
+ payload - iv_sz - sizeof (esp_header_t),
+ payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
+ }
+ else if (ipsec_sa_is_set_USE_ESN (sa))
+ {
+ u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
+ clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
+ integ_total_len += sizeof (seq_hi);
+ }
}
- vnet_crypto_async_reset_frame (f);
+
+ /* this always succeeds because we know the frame is not full */
+ vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len,
+ integ_total_len - crypto_total_len,
+ crypto_start_offset, integ_start_offset, bi,
+ async_next, iv, tag, aad, flag);
}
always_inline uword
-esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame, int is_ip6, int is_tun,
- u16 async_next)
+esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, vnet_link_t lt, int is_tun,
+ u16 async_next_node)
{
ipsec_main_t *im = &ipsec_main;
ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
u32 *from = vlib_frame_vector_args (frame);
u32 n_left = frame->n_vectors;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
- u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
- esp_gcm_nonce_t nonces[VLIB_FRAME_SIZE], *nonce = nonces;
u32 thread_index = vm->thread_index;
u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
u32 current_sa_index = ~0, current_sa_packets = 0;
vlib_buffer_t *lb;
vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
- vnet_crypto_async_frame_t *async_frame = 0;
+ vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
int is_async = im->async_mode;
- vnet_crypto_async_op_id_t last_async_op = ~0;
- u16 drop_next = (is_ip6 ? ESP_ENCRYPT_NEXT_DROP6 : ESP_ENCRYPT_NEXT_DROP4);
+ vnet_crypto_async_op_id_t async_op = ~0;
+ u16 drop_next =
+ (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
+ (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
+ ESP_ENCRYPT_NEXT_DROP_MPLS));
+ u16 handoff_next = (lt == VNET_LINK_IP6 ?
+ ESP_ENCRYPT_NEXT_HANDOFF6 :
+ (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
+ ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
+ vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
+ u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
+ u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0;
+ u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
+ u32 sync_bi[VLIB_FRAME_SIZE];
+ u32 noop_bi[VLIB_FRAME_SIZE];
+ esp_encrypt_error_t err;
vlib_get_buffers (vm, from, b, n_left);
- if (!is_async)
- {
- vec_reset_length (ptd->crypto_ops);
- vec_reset_length (ptd->integ_ops);
- vec_reset_length (ptd->chained_crypto_ops);
- vec_reset_length (ptd->chained_integ_ops);
- }
+
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->integ_ops);
+ vec_reset_length (ptd->chained_crypto_ops);
+ vec_reset_length (ptd->chained_integ_ops);
+ vec_reset_length (ptd->async_frames);
vec_reset_length (ptd->chunks);
+ clib_memset (async_frames, 0, sizeof (async_frames));
while (n_left > 0)
{
u16 payload_len, payload_len_total, n_bufs;
u32 hdr_len;
+ err = ESP_ENCRYPT_ERROR_RX_PKTS;
+
if (n_left > 2)
{
u8 *p;
current_sa_bytes);
current_sa_packets = current_sa_bytes = 0;
- sa0 = pool_elt_at_index (im->sad, sa_index0);
+ sa0 = ipsec_sa_get (sa_index0);
/* fetch the second cacheline ASAP */
CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD);
esp_align = sa0->esp_block_align;
icv_sz = sa0->integ_icv_size;
iv_sz = sa0->crypto_iv_size;
-
- /* submit frame when op_id is different then the old one */
- if (is_async && sa0->crypto_async_enc_op_id != last_async_op)
- {
- if (async_frame && async_frame->n_elts)
- {
- if (vnet_crypto_async_submit_open_frame (vm, async_frame)
- < 0)
- esp_async_recycle_failed_submit (async_frame, b,
- next, drop_next);
- }
- async_frame =
- vnet_crypto_async_get_frame (vm, sa0->crypto_async_enc_op_id);
- last_async_op = sa0->crypto_async_enc_op_id;
- }
+ is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
}
- if (PREDICT_FALSE (~0 == sa0->encrypt_thread_index))
+ if (PREDICT_FALSE (~0 == sa0->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->encrypt_thread_index, ~0,
+ clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_TRUE (thread_index != sa0->encrypt_thread_index))
+ if (PREDICT_FALSE (thread_index != sa0->thread_index))
{
- next[0] = (is_ip6 ?
- ESP_ENCRYPT_NEXT_HANDOFF6 : ESP_ENCRYPT_NEXT_HANDOFF4);
+ vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ err = ESP_ENCRYPT_ERROR_HANDOFF;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ handoff_next);
goto trace;
}
n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
if (n_bufs == 0)
{
- b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
- next[0] = drop_next;
+ err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
goto trace;
}
if (PREDICT_FALSE (esp_seq_advance (sa0)))
{
- b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
- next[0] = drop_next;
+ err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
goto trace;
}
if (ipsec_sa_is_set_IS_TUNNEL (sa0))
{
payload = vlib_buffer_get_current (b[0]);
- next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
- next, node,
- buffer_data_size,
- vlib_buffer_length_in_chain
- (vm, b[0]));
+ next_hdr_ptr = esp_add_footer_and_icv (
+ vm, &lb, esp_align, icv_sz, node, buffer_data_size,
+ vlib_buffer_length_in_chain (vm, b[0]));
if (!next_hdr_ptr)
{
- b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
- next[0] = drop_next;
+ err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ drop_next);
goto trace;
}
b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
u16 len = sizeof (ip6_header_t);
hdr_len += len;
ip6 = (ip6_header_t *) (payload - hdr_len);
- clib_memcpy_fast (ip6, &sa0->ip6_hdr, len);
- *next_hdr_ptr = (is_ip6 ?
- IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
+ clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
+
+ if (VNET_LINK_IP6 == lt)
+ {
+ *next_hdr_ptr = IP_PROTOCOL_IPV6;
+ tunnel_encap_fixup_6o6 (sa0->tunnel_flags,
+ (const ip6_header_t *) payload,
+ ip6);
+ }
+ else if (VNET_LINK_IP4 == lt)
+ {
+ *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
+ tunnel_encap_fixup_4o6 (sa0->tunnel_flags, b[0],
+ (const ip4_header_t *) payload, ip6);
+ }
+ else if (VNET_LINK_MPLS == lt)
+ {
+ *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
+ tunnel_encap_fixup_mplso6 (
+ sa0->tunnel_flags, b[0],
+ (const mpls_unicast_header_t *) payload, ip6);
+ }
+ else
+ ASSERT (0);
+
len = payload_len_total + hdr_len - len;
ip6->payload_length = clib_net_to_host_u16 (len);
+ b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
}
else
{
u16 len = sizeof (ip4_header_t);
hdr_len += len;
ip4 = (ip4_header_t *) (payload - hdr_len);
- clib_memcpy_fast (ip4, &sa0->ip4_hdr, len);
- *next_hdr_ptr = (is_ip6 ?
- IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
+ clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
+
+ if (VNET_LINK_IP6 == lt)
+ {
+ *next_hdr_ptr = IP_PROTOCOL_IPV6;
+ tunnel_encap_fixup_6o4_w_chksum (sa0->tunnel_flags,
+ (const ip6_header_t *)
+ payload, ip4);
+ }
+ else if (VNET_LINK_IP4 == lt)
+ {
+ *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
+ tunnel_encap_fixup_4o4_w_chksum (sa0->tunnel_flags,
+ (const ip4_header_t *)
+ payload, ip4);
+ }
+ else if (VNET_LINK_MPLS == lt)
+ {
+ *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
+ tunnel_encap_fixup_mplso4_w_chksum (
+ sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
+ ip4);
+ }
+ else
+ ASSERT (0);
+
len = payload_len_total + hdr_len;
esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
}
dpo = &sa0->dpo;
if (!is_tun)
{
- next[0] = dpo->dpoi_next_node;
+ sync_next[0] = dpo->dpoi_next_node;
vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
}
else
- next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+ sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+ b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
}
else /* transport mode */
{
u16 udp_len = 0;
u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
- ip_len = is_ip6 ?
- esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
- ip4_header_bytes ((ip4_header_t *) old_ip_hdr);
+ ip_len =
+ (VNET_LINK_IP6 == lt ?
+ esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
+ ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
vlib_buffer_advance (b[0], ip_len);
payload = vlib_buffer_get_current (b[0]);
- next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
- next, node,
- buffer_data_size,
- vlib_buffer_length_in_chain
- (vm, b[0]));
+ next_hdr_ptr = esp_add_footer_and_icv (
+ vm, &lb, esp_align, icv_sz, node, buffer_data_size,
+ vlib_buffer_length_in_chain (vm, b[0]));
if (!next_hdr_ptr)
- goto trace;
+ {
+ err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ drop_next);
+ goto trace;
+ }
b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
payload_len = b[0]->current_length;
else
l2_len = 0;
- if (is_ip6)
+ if (VNET_LINK_IP6 == lt)
{
ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
if (PREDICT_TRUE (NULL == ext_hdr))
clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
sizeof (ip6_header_t));
}
- else
+ else if (VNET_LINK_IP4 == lt)
{
u16 len;
ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
esp_fill_udp_hdr (sa0, udp, udp_len);
}
- next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+ sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
}
if (lb != b[0])
if (is_async)
{
- if (PREDICT_FALSE (sa0->crypto_async_enc_op_id == 0))
- goto trace;
+ async_op = sa0->crypto_async_enc_op_id;
- if (esp_prepare_async_frame (vm, ptd, &async_frame, sa0, b[0], esp,
- payload, payload_len, iv_sz,
- icv_sz, from[b - bufs], next, hdr_len,
- async_next, lb))
+ /* get a frame for this op if we don't yet have one or it's full
+ */
+ if (NULL == async_frames[async_op] ||
+ vnet_crypto_async_frame_is_full (async_frames[async_op]))
{
- esp_async_recycle_failed_submit (async_frame, b, next,
- drop_next);
- goto trace;
+ async_frames[async_op] =
+ vnet_crypto_async_get_frame (vm, async_op);
+ /* Save the frame to the list we'll submit at the end */
+ vec_add1 (ptd->async_frames, async_frames[async_op]);
}
+
+ esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
+ esp, payload, payload_len, iv_sz, icv_sz,
+ from[b - bufs], sync_next[0], hdr_len,
+ async_next_node, lb);
}
else
- {
- esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload,
- payload_len, iv_sz, icv_sz, bufs, b, lb,
- hdr_len, esp, nonce++);
- }
+ esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload,
+ payload_len, iv_sz, icv_sz, n_sync, b, lb,
+ hdr_len, esp);
vlib_buffer_advance (b[0], 0LL - hdr_len);
tr->crypto_alg = sa0->crypto_alg;
tr->integ_alg = sa0->integ_alg;
}
+
/* next */
+ if (ESP_ENCRYPT_ERROR_RX_PKTS != err)
+ {
+ noop_bi[n_noop] = from[b - bufs];
+ n_noop++;
+ noop_next++;
+ }
+ else if (!is_async)
+ {
+ sync_bi[n_sync] = from[b - bufs];
+ sync_bufs[n_sync] = b[0];
+ n_sync++;
+ sync_next++;
+ }
+ else
+ {
+ n_async++;
+ async_next++;
+ }
n_left -= 1;
- next += 1;
b += 1;
}
vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
current_sa_index, current_sa_packets,
current_sa_bytes);
- if (!is_async)
+ if (n_sync)
{
- esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts, drop_next);
- esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
- ptd->chunks, drop_next);
+ esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
+ drop_next);
+ esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
+ sync_nexts, ptd->chunks, drop_next);
- esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, drop_next);
- esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
- ptd->chunks, drop_next);
+ esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
+ drop_next);
+ esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
+ sync_nexts, ptd->chunks, drop_next);
+
+ vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
}
- else if (async_frame && async_frame->n_elts)
+ if (n_async)
{
- if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
- esp_async_recycle_failed_submit (async_frame, b, next, drop_next);
+ /* submit all of the open frames */
+ vnet_crypto_async_frame_t **async_frame;
+
+ vec_foreach (async_frame, ptd->async_frames)
+ {
+ if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
+ {
+ n_noop += esp_async_recycle_failed_submit (
+ vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+ n_sync, noop_bi, noop_nexts, drop_next);
+ vnet_crypto_async_reset_frame (*async_frame);
+ vnet_crypto_async_free_frame (vm, *async_frame);
+ }
+ }
}
+ if (n_noop)
+ vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
- vlib_node_increment_counter (vm, node->node_index,
- ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
+ vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS,
+ frame->n_vectors);
- vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
return frame->n_vectors;
}
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
- return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 0,
+ return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 0,
esp_encrypt_async_next.esp4_post_next);
}
.format_trace = format_esp_encrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
+ .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
.error_strings = esp_encrypt_error_strings,
.n_next_nodes = ESP_ENCRYPT_N_NEXT,
- .next_nodes = {
- [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
- [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
- [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
- [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
- [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
- [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending",
- },
+ .next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
+ [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
+ [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
+ [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
+ [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
+ [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "error-drop",
+ [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output" },
};
/* *INDENT-ON* */
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
- return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 0,
+ return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 0,
esp_encrypt_async_next.esp6_post_next);
}
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
- return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 1,
+ return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 1,
esp_encrypt_async_next.esp4_tun_post_next);
}
.next_nodes = {
[ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
[ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
+ [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
[ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
- [ESP_ENCRYPT_NEXT_HANDOFF6] = "error-drop",
+ [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
+ [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
[ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
- [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending",
},
};
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
- return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 1,
+ return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 1,
esp_encrypt_async_next.esp6_tun_post_next);
}
.next_nodes = {
[ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
[ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
- [ESP_ENCRYPT_NEXT_HANDOFF4] = "error-drop",
+ [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
+ [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
[ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
- [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending",
+ [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
[ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
},
};
.vector_size = sizeof (u32),
.format_trace = format_esp_post_encrypt_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .sibling_of = "esp6-encrypt-tun",
+ .sibling_of = "esp-mpls-encrypt-tun",
- .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
+ .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
.error_strings = esp_encrypt_error_strings,
};
/* *INDENT-ON* */
+VLIB_NODE_FN (esp_mpls_encrypt_tun_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+ return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_MPLS, 1,
+ esp_encrypt_async_next.esp_mpls_tun_post_next);
+}
+
+VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_node) = {
+ .name = "esp-mpls-encrypt-tun",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_encrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
+ .error_strings = esp_encrypt_error_strings,
+
+ .n_next_nodes = ESP_ENCRYPT_N_NEXT,
+ .next_nodes = {
+ [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
+ [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
+ [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
+ [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
+ [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
+ [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
+ [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
+ },
+};
+
+VLIB_NODE_FN (esp_mpls_encrypt_tun_post_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
+{
+ return esp_encrypt_post_inline (vm, node, from_frame);
+}
+
+VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node) = {
+ .name = "esp-mpls-encrypt-tun-post",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_post_encrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .sibling_of = "esp-mpls-encrypt-tun",
+
+ .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
+ .error_strings = esp_encrypt_error_strings,
+};
+
typedef struct
{
u32 sa_index;
};
/* *INDENT-ON* */
-VLIB_NODE_FN (esp_encrypt_pending_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+#ifndef CLIB_MARCH_VARIANT
+
+static clib_error_t *
+esp_encrypt_init (vlib_main_t *vm)
{
- return from_frame->n_vectors;
+ ipsec_main_t *im = &ipsec_main;
+
+ im->esp4_enc_fq_index =
+ vlib_frame_queue_main_init (esp4_encrypt_node.index, 0);
+ im->esp6_enc_fq_index =
+ vlib_frame_queue_main_init (esp6_encrypt_node.index, 0);
+ im->esp4_enc_tun_fq_index =
+ vlib_frame_queue_main_init (esp4_encrypt_tun_node.index, 0);
+ im->esp6_enc_tun_fq_index =
+ vlib_frame_queue_main_init (esp6_encrypt_tun_node.index, 0);
+ im->esp_mpls_enc_tun_fq_index =
+ vlib_frame_queue_main_init (esp_mpls_encrypt_tun_node.index, 0);
+
+ return 0;
}
-/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (esp_encrypt_pending_node) = {
- .name = "esp-encrypt-pending",
- .vector_size = sizeof (u32),
- .type = VLIB_NODE_TYPE_INTERNAL,
+VLIB_INIT_FUNCTION (esp_encrypt_init);
- .n_next_nodes = 0
-};
-/* *INDENT-ON* */
+#endif
/*
* fd.io coding-style-patch-verification: ON