+ nonce->ctr = clib_host_to_net_u32 (1);
+ }
+
+ nonce->salt = sa0->salt;
+ nonce->iv = *(u64 *) pkt_iv;
+ op->iv = (u8 *) nonce;
+ }
+ else
+ {
+ /* construct zero iv in front of the IP header */
+ op->iv = pkt_iv - hdr_len - iv_sz;
+ clib_memset_u8 (op->iv, 0, iv_sz);
+ /* include iv field in crypto */
+ crypto_start -= iv_sz;
+ crypto_len += iv_sz;
+ }
+
+ if (PREDICT_FALSE (lb != b[0]))
+ {
+ /* is chained */
+ op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+ op->chunk_index = vec_len (ptd->chunks);
+ op->tag = vlib_buffer_get_tail (lb) - icv_sz;
+ esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz,
+ crypto_start, crypto_len + icv_sz,
+ &op->n_chunks);
+ }
+ else
+ {
+ /* not chained */
+ op->src = op->dst = crypto_start;
+ op->len = crypto_len;
+ }
+ }
+
+ if (sa0->integ_op_id)
+ {
+ vnet_crypto_op_t *op;
+ vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
+ vnet_crypto_op_init (op, sa0->integ_op_id);
+ op->src = payload - iv_sz - sizeof (esp_header_t);
+ op->digest = payload + payload_len - icv_sz;
+ op->key_index = sa0->integ_key_index;
+ op->digest_len = icv_sz;
+ op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
+ op->user_data = bi;
+
+ if (lb != b[0])
+ {
+ /* is chained */
+ op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+ op->chunk_index = vec_len (ptd->chunks);
+ op->digest = vlib_buffer_get_tail (lb) - icv_sz;
+
+ esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
+ payload - iv_sz - sizeof (esp_header_t),
+ payload_len + iv_sz +
+ sizeof (esp_header_t), op->digest,
+ &op->n_chunks);
+ }
+ else if (ipsec_sa_is_set_USE_ESN (sa0))
+ {
+ u32 tmp = clib_net_to_host_u32 (seq_hi);
+ clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
+ op->len += sizeof (seq_hi);
+ }
+ }
+}
+
+static_always_inline void
+esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ vnet_crypto_async_frame_t *async_frame,
+ ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp,
+ u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
+ u32 bi, u16 next, u32 hdr_len, u16 async_next,
+ vlib_buffer_t *lb)
+{
+ esp_post_data_t *post = esp_post_data (b);
+ u8 *tag, *iv, *aad = 0;
+ u8 flag = 0;
+ const u32 key_index = sa->crypto_key_index;
+ i16 crypto_start_offset, integ_start_offset;
+ u16 crypto_total_len, integ_total_len;
+
+ post->next_index = next;
+
+ /* crypto */
+ crypto_start_offset = integ_start_offset = payload - b->data;
+ crypto_total_len = integ_total_len = payload_len - icv_sz;
+ tag = payload + crypto_total_len;
+
+ /* generate the IV in front of the payload */
+ void *pkt_iv = esp_generate_iv (sa, payload, iv_sz);
+
+ if (ipsec_sa_is_set_IS_CTR (sa))
+ {
+ /* construct nonce in a scratch space in front of the IP header */
+ esp_ctr_nonce_t *nonce =
+ (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
+ if (ipsec_sa_is_set_IS_AEAD (sa))
+ {
+ /* constuct aad in a scratch space in front of the nonce */
+ aad = (u8 *) nonce - sizeof (esp_aead_t);
+ esp_aad_fill (aad, esp, sa, sa->seq_hi);
+ if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa)))
+ {
+ /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
+ crypto_start_offset -= iv_sz;
+ crypto_total_len += iv_sz;
+ }
+ }
+ else
+ {
+ nonce->ctr = clib_host_to_net_u32 (1);
+ }
+
+ nonce->salt = sa->salt;
+ nonce->iv = *(u64 *) pkt_iv;
+ iv = (u8 *) nonce;
+ }
+ else
+ {
+ /* construct zero iv in front of the IP header */
+ iv = pkt_iv - hdr_len - iv_sz;
+ clib_memset_u8 (iv, 0, iv_sz);
+ /* include iv field in crypto */
+ crypto_start_offset -= iv_sz;
+ crypto_total_len += iv_sz;
+ }
+
+ if (lb != b)
+ {
+ /* chain */
+ flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+ tag = vlib_buffer_get_tail (lb) - icv_sz;
+ crypto_total_len = esp_encrypt_chain_crypto (
+ vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset,
+ crypto_total_len + icv_sz, 0);
+ }
+
+ if (sa->integ_op_id)
+ {
+ integ_start_offset -= iv_sz + sizeof (esp_header_t);
+ integ_total_len += iv_sz + sizeof (esp_header_t);
+
+ if (b != lb)
+ {
+ integ_total_len = esp_encrypt_chain_integ (
+ vm, ptd, sa, b, lb, icv_sz,
+ payload - iv_sz - sizeof (esp_header_t),
+ payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
+ }
+ else if (ipsec_sa_is_set_USE_ESN (sa))
+ {
+ u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
+ clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
+ integ_total_len += sizeof (seq_hi);
+ }
+ }
+
+ /* this always succeeds because we know the frame is not full */
+ vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len,
+ integ_total_len - crypto_total_len,
+ crypto_start_offset, integ_start_offset, bi,
+ async_next, iv, tag, aad, flag);
+}
+
+always_inline uword
+esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, vnet_link_t lt, int is_tun,
+ u16 async_next_node)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left = frame->n_vectors;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u32 thread_index = vm->thread_index;
+ u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
+ u32 current_sa_index = ~0, current_sa_packets = 0;
+ u32 current_sa_bytes = 0, spi = 0;
+ u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
+ ipsec_sa_t *sa0 = 0;
+ u8 sa_drop_no_crypto = 0;
+ vlib_buffer_t *lb;
+ vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
+ vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
+ vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
+ int is_async = im->async_mode;
+ vnet_crypto_async_op_id_t async_op = ~0;
+ u16 drop_next =
+ (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
+ (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
+ ESP_ENCRYPT_NEXT_DROP_MPLS));
+ u16 handoff_next = (lt == VNET_LINK_IP6 ?
+ ESP_ENCRYPT_NEXT_HANDOFF6 :
+ (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
+ ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
+ vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
+ u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
+ u16 n_async = 0;
+ u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0;
+ u32 sync_bi[VLIB_FRAME_SIZE];
+ u32 noop_bi[VLIB_FRAME_SIZE];
+ esp_encrypt_error_t err;
+
+ vlib_get_buffers (vm, from, b, n_left);
+
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->integ_ops);
+ vec_reset_length (ptd->chained_crypto_ops);
+ vec_reset_length (ptd->chained_integ_ops);
+ vec_reset_length (ptd->async_frames);
+ vec_reset_length (ptd->chunks);
+ clib_memset (async_frames, 0, sizeof (async_frames));
+
+ while (n_left > 0)
+ {
+ u32 sa_index0;
+ dpo_id_t *dpo;
+ esp_header_t *esp;
+ u8 *payload, *next_hdr_ptr;
+ u16 payload_len, payload_len_total, n_bufs;
+ u32 hdr_len;
+
+ err = ESP_ENCRYPT_ERROR_RX_PKTS;
+
+ if (n_left > 2)
+ {
+ u8 *p;
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ p = vlib_buffer_get_current (b[1]);
+ clib_prefetch_load (p);
+ p -= CLIB_CACHE_LINE_BYTES;
+ clib_prefetch_load (p);
+ /* speculate that the trailer goes in the first buffer */
+ CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ vnet_calc_checksums_inline (vm, b[0], b[0]->flags & VNET_BUFFER_F_IS_IP4,
+ b[0]->flags & VNET_BUFFER_F_IS_IP6);
+ vnet_calc_outer_checksums_inline (vm, b[0]);
+
+ if (is_tun)
+ {
+ /* we are on a ipsec tunnel's feature arc */
+ vnet_buffer (b[0])->ipsec.sad_index =
+ sa_index0 = ipsec_tun_protect_get_sa_out
+ (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
+
+ if (PREDICT_FALSE (INDEX_INVALID == sa_index0))
+ {
+ err = ESP_ENCRYPT_ERROR_NO_PROTECTION;
+ noop_nexts[n_noop] = drop_next;
+ b[0]->error = node->errors[err];
+ goto trace;
+ }
+ }
+ else
+ sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
+
+ if (sa_index0 != current_sa_index)
+ {
+ if (current_sa_packets)
+ vlib_increment_combined_counter (
+ &ipsec_sa_counters, thread_index, current_sa_index,
+ current_sa_packets, current_sa_bytes);
+ current_sa_packets = current_sa_bytes = 0;
+
+ sa0 = ipsec_sa_get (sa_index0);
+ current_sa_index = sa_index0;
+
+ sa_drop_no_crypto = ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
+ sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
+ !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0));
+
+ vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index);
+
+ /* fetch the second cacheline ASAP */
+ clib_prefetch_load (sa0->cacheline1);
+
+ spi = clib_net_to_host_u32 (sa0->spi);
+ esp_align = sa0->esp_block_align;
+ icv_sz = sa0->integ_icv_size;
+ iv_sz = sa0->crypto_iv_size;
+ is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
+ }
+
+ if (PREDICT_FALSE (sa_drop_no_crypto != 0))
+ {
+ err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
+ esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, drop_next, sa_index0);
+ goto trace;
+ }
+
+ if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
+ {
+ /* this is the first packet to use this SA, claim the SA
+ * for this thread. this could happen simultaneously on
+ * another thread */
+ clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ ipsec_sa_assign_thread (thread_index));
+ }
+
+ if (PREDICT_FALSE (thread_index != sa0->thread_index))
+ {
+ vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ err = ESP_ENCRYPT_ERROR_HANDOFF;
+ esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, handoff_next,
+ current_sa_index);
+ goto trace;
+ }
+
+ lb = b[0];
+ n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
+ if (n_bufs == 0)
+ {
+ err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+ esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, drop_next, current_sa_index);
+ goto trace;
+ }
+
+ if (n_bufs > 1)
+ {
+ /* find last buffer in the chain */
+ while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
+ lb = vlib_get_buffer (vm, lb->next_buffer);
+ }
+
+ if (PREDICT_FALSE (esp_seq_advance (sa0)))
+ {
+ err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
+ esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, drop_next, current_sa_index);
+ goto trace;
+ }
+
+ /* space for IV */
+ hdr_len = iv_sz;
+
+ if (ipsec_sa_is_set_IS_TUNNEL (sa0))
+ {
+ payload = vlib_buffer_get_current (b[0]);
+ next_hdr_ptr = esp_add_footer_and_icv (
+ vm, &lb, esp_align, icv_sz, buffer_data_size,
+ vlib_buffer_length_in_chain (vm, b[0]));
+ if (!next_hdr_ptr)
+ {
+ err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
+ esp_encrypt_set_next_index (b[0], node, thread_index, err,
+ n_noop, noop_nexts, drop_next,
+ current_sa_index);