+ nonce->ctr = clib_host_to_net_u32 (1);
+ }
+
+ nonce->salt = sa->salt;
+ nonce->iv = *(u64 *) pkt_iv;
+ iv = (u8 *) nonce;
+ }
+ else
+ {
+ /* construct zero iv in front of the IP header */
+ iv = pkt_iv - hdr_len - iv_sz;
+ clib_memset_u8 (iv, 0, iv_sz);
+ /* include iv field in crypto */
+ crypto_start_offset -= iv_sz;
+ crypto_total_len += iv_sz;
+ }
+
+ if (lb != b)
+ {
+ /* chain */
+ flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+ tag = vlib_buffer_get_tail (lb) - icv_sz;
+ crypto_total_len = esp_encrypt_chain_crypto (
+ vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset,
+ crypto_total_len + icv_sz, 0);
+ }
+
+ if (sa->integ_op_id)
+ {
+ integ_start_offset -= iv_sz + sizeof (esp_header_t);
+ integ_total_len += iv_sz + sizeof (esp_header_t);
+
+ if (b != lb)
+ {
+ integ_total_len = esp_encrypt_chain_integ (
+ vm, ptd, sa, b, lb, icv_sz,
+ payload - iv_sz - sizeof (esp_header_t),
+ payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
+ }
+ else if (ipsec_sa_is_set_USE_ESN (sa))
+ {
+ u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
+ clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
+ integ_total_len += sizeof (seq_hi);
+ }
+ }
+
+ /* this always succeeds because we know the frame is not full */
+ vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len,
+ integ_total_len - crypto_total_len,
+ crypto_start_offset, integ_start_offset, bi,
+ async_next, iv, tag, aad, flag);
+}
+
+always_inline uword
+esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, vnet_link_t lt, int is_tun,
+ u16 async_next_node)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left = frame->n_vectors;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u32 thread_index = vm->thread_index;
+ u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
+ u32 current_sa_index = ~0, current_sa_packets = 0;
+ u32 current_sa_bytes = 0, spi = 0;
+ u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
+ ipsec_sa_t *sa0 = 0;
+ vlib_buffer_t *lb;
+ vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
+ vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
+ vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
+ int is_async = im->async_mode;
+ vnet_crypto_async_op_id_t async_op = ~0;
+ u16 drop_next =
+ (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
+ (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
+ ESP_ENCRYPT_NEXT_DROP_MPLS));
+ u16 handoff_next = (lt == VNET_LINK_IP6 ?
+ ESP_ENCRYPT_NEXT_HANDOFF6 :
+ (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
+ ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
+ vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
+ u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
+ u16 n_async = 0;
+ u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0;
+ u32 sync_bi[VLIB_FRAME_SIZE];
+ u32 noop_bi[VLIB_FRAME_SIZE];
+ esp_encrypt_error_t err;
+
+ vlib_get_buffers (vm, from, b, n_left);
+
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->integ_ops);
+ vec_reset_length (ptd->chained_crypto_ops);
+ vec_reset_length (ptd->chained_integ_ops);
+ vec_reset_length (ptd->async_frames);
+ vec_reset_length (ptd->chunks);
+ clib_memset (async_frames, 0, sizeof (async_frames));
+
+ while (n_left > 0)
+ {
+ u32 sa_index0;
+ dpo_id_t *dpo;
+ esp_header_t *esp;
+ u8 *payload, *next_hdr_ptr;
+ u16 payload_len, payload_len_total, n_bufs;
+ u32 hdr_len;
+
+ err = ESP_ENCRYPT_ERROR_RX_PKTS;
+
+ if (n_left > 2)
+ {
+ u8 *p;
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ p = vlib_buffer_get_current (b[1]);
+ clib_prefetch_load (p);
+ p -= CLIB_CACHE_LINE_BYTES;
+ clib_prefetch_load (p);
+ /* speculate that the trailer goes in the first buffer */
+ CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ if (is_tun)
+ {
+ /* we are on a ipsec tunnel's feature arc */
+ vnet_buffer (b[0])->ipsec.sad_index =
+ sa_index0 = ipsec_tun_protect_get_sa_out
+ (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
+
+ if (PREDICT_FALSE (INDEX_INVALID == sa_index0))