+ nonce->salt = sa0->salt;
+ nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa0->ctr_iv_counter++);
+ op->iv = (u8 *) nonce;
+ }
+ else
+ {
+ op->iv = payload - iv_sz;
+ op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
+ }
+
+ if (lb != b[0])
+ {
+ /* is chained */
+ op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+ op->chunk_index = vec_len (ptd->chunks);
+ op->tag = vlib_buffer_get_tail (lb) - icv_sz;
+ esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload,
+ payload_len, &op->n_chunks);
+ }
+ }
+
+ if (sa0->integ_op_id)
+ {
+ vnet_crypto_op_t *op;
+ vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
+ vnet_crypto_op_init (op, sa0->integ_op_id);
+ op->src = payload - iv_sz - sizeof (esp_header_t);
+ op->digest = payload + payload_len - icv_sz;
+ op->key_index = sa0->integ_key_index;
+ op->digest_len = icv_sz;
+ op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
+ op->user_data = bi;
+
+ if (lb != b[0])
+ {
+ /* is chained */
+ op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+ op->chunk_index = vec_len (ptd->chunks);
+ op->digest = vlib_buffer_get_tail (lb) - icv_sz;
+
+ esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
+ payload - iv_sz - sizeof (esp_header_t),
+ payload_len + iv_sz +
+ sizeof (esp_header_t), op->digest,
+ &op->n_chunks);
+ }
+ else if (ipsec_sa_is_set_USE_ESN (sa0))
+ {
+ u32 tmp = clib_net_to_host_u32 (seq_hi);
+ clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
+ op->len += sizeof (seq_hi);
+ }
+ }
+}
+
+static_always_inline void
+esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ vnet_crypto_async_frame_t *async_frame,
+ ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp,
+ u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
+ u32 bi, u16 next, u32 hdr_len, u16 async_next,
+ vlib_buffer_t *lb)
+{
+ esp_post_data_t *post = esp_post_data (b);
+ u8 *tag, *iv, *aad = 0;
+ u8 flag = 0;
+ u32 key_index;
+ i16 crypto_start_offset, integ_start_offset = 0;
+ u16 crypto_total_len, integ_total_len;
+
+ post->next_index = next;
+
+ /* crypto */
+ crypto_start_offset = payload - b->data;
+ crypto_total_len = integ_total_len = payload_len - icv_sz;
+ tag = payload + crypto_total_len;
+
+ key_index = sa->linked_key_index;
+
+ if (ipsec_sa_is_set_IS_CTR (sa))
+ {
+ ASSERT (sizeof (u64) == iv_sz);
+ /* construct nonce in a scratch space in front of the IP header */
+ esp_ctr_nonce_t *nonce = (esp_ctr_nonce_t *) (payload - sizeof (u64) -
+ hdr_len - sizeof (*nonce));
+ u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
+
+ if (ipsec_sa_is_set_IS_AEAD (sa))
+ {
+ /* constuct aad in a scratch space in front of the nonce */
+ aad = (u8 *) nonce - sizeof (esp_aead_t);
+ esp_aad_fill (aad, esp, sa, sa->seq_hi);
+ key_index = sa->crypto_key_index;
+ }
+ else
+ {
+ nonce->ctr = clib_host_to_net_u32 (1);
+ }
+
+ nonce->salt = sa->salt;
+ nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->ctr_iv_counter++);
+ iv = (u8 *) nonce;
+ }
+ else
+ {
+ iv = payload - iv_sz;
+ flag |= VNET_CRYPTO_OP_FLAG_INIT_IV;
+ }
+
+ if (lb != b)
+ {
+ /* chain */
+ flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+ tag = vlib_buffer_get_tail (lb) - icv_sz;
+ crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, icv_sz,
+ payload, payload_len, 0);
+ }
+
+ if (sa->integ_op_id)
+ {
+ integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t);
+ integ_total_len += iv_sz + sizeof (esp_header_t);
+
+ if (b != lb)
+ {
+ integ_total_len = esp_encrypt_chain_integ (
+ vm, ptd, sa, b, lb, icv_sz,
+ payload - iv_sz - sizeof (esp_header_t),
+ payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
+ }
+ else if (ipsec_sa_is_set_USE_ESN (sa))
+ {
+ u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
+ clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
+ integ_total_len += sizeof (seq_hi);
+ }
+ }
+
+ /* this always succeeds because we know the frame is not full */
+ vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len,
+ integ_total_len - crypto_total_len,
+ crypto_start_offset, integ_start_offset, bi,
+ async_next, iv, tag, aad, flag);
+}
+
+always_inline uword
+esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, vnet_link_t lt, int is_tun,
+ u16 async_next_node)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left = frame->n_vectors;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u32 thread_index = vm->thread_index;
+ u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
+ u32 current_sa_index = ~0, current_sa_packets = 0;
+ u32 current_sa_bytes = 0, spi = 0;
+ u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
+ ipsec_sa_t *sa0 = 0;
+ vlib_buffer_t *lb;
+ vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
+ vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
+ vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
+ int is_async = im->async_mode;
+ vnet_crypto_async_op_id_t async_op = ~0;
+ u16 drop_next =
+ (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
+ (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
+ ESP_ENCRYPT_NEXT_DROP_MPLS));
+ u16 handoff_next = (lt == VNET_LINK_IP6 ?
+ ESP_ENCRYPT_NEXT_HANDOFF6 :
+ (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
+ ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
+ vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
+ u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
+ u16 n_async = 0;
+ u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0;
+ u32 sync_bi[VLIB_FRAME_SIZE];
+ u32 noop_bi[VLIB_FRAME_SIZE];
+ esp_encrypt_error_t err;
+
+ vlib_get_buffers (vm, from, b, n_left);
+
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->integ_ops);
+ vec_reset_length (ptd->chained_crypto_ops);
+ vec_reset_length (ptd->chained_integ_ops);
+ vec_reset_length (ptd->async_frames);
+ vec_reset_length (ptd->chunks);
+ clib_memset (async_frames, 0, sizeof (async_frames));
+
+ while (n_left > 0)
+ {
+ u32 sa_index0;
+ dpo_id_t *dpo;
+ esp_header_t *esp;
+ u8 *payload, *next_hdr_ptr;
+ u16 payload_len, payload_len_total, n_bufs;
+ u32 hdr_len;
+
+ err = ESP_ENCRYPT_ERROR_RX_PKTS;
+
+ if (n_left > 2)
+ {
+ u8 *p;
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ p = vlib_buffer_get_current (b[1]);
+ clib_prefetch_load (p);
+ p -= CLIB_CACHE_LINE_BYTES;
+ clib_prefetch_load (p);
+ /* speculate that the trailer goes in the first buffer */
+ CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ if (is_tun)
+ {
+ /* we are on a ipsec tunnel's feature arc */
+ vnet_buffer (b[0])->ipsec.sad_index =
+ sa_index0 = ipsec_tun_protect_get_sa_out
+ (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
+
+ if (PREDICT_FALSE (INDEX_INVALID == sa_index0))