+ }
+ }
+ }
+}
+
+always_inline uword
+esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *from_frame, int is_ip6, int is_tun,
+ u16 async_next_node)
+{
+ ipsec_main_t *im = &ipsec_main;
+ u32 thread_index = vm->thread_index;
+ u16 len;
+ ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
+ u32 *from = vlib_frame_vector_args (from_frame);
+ u32 n_left = from_frame->n_vectors;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
+ u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
+ u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0;
+ u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
+ u32 sync_bi[VLIB_FRAME_SIZE];
+ u32 noop_bi[VLIB_FRAME_SIZE];
+ esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
+ esp_decrypt_packet_data2_t pkt_data2[VLIB_FRAME_SIZE], *pd2 = pkt_data2;
+ esp_decrypt_packet_data_t cpd = { };
+ u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
+ const u8 esp_sz = sizeof (esp_header_t);
+ ipsec_sa_t *sa0 = 0;
+ vnet_crypto_op_t _op, *op = &_op;
+ vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
+ vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
+ int is_async = im->async_mode;
+ vnet_crypto_async_op_id_t async_op = ~0;
+ vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
+ esp_decrypt_error_t err;
+
+ vlib_get_buffers (vm, from, b, n_left);
+ if (!is_async)
+ {
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->integ_ops);
+ vec_reset_length (ptd->chained_crypto_ops);
+ vec_reset_length (ptd->chained_integ_ops);
+ }
+ vec_reset_length (ptd->async_frames);
+ vec_reset_length (ptd->chunks);
+ clib_memset (sync_nexts, -1, sizeof (sync_nexts));
+ clib_memset (async_frames, 0, sizeof (async_frames));
+
+ while (n_left > 0)
+ {
+ u8 *payload;
+
+ err = ESP_DECRYPT_ERROR_RX_PKTS;
+ if (n_left > 2)
+ {
+ u8 *p;
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ p = vlib_buffer_get_current (b[1]);
+ CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+ p -= CLIB_CACHE_LINE_BYTES;
+ CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+ }
+
+ u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
+ if (n_bufs == 0)
+ {
+ err = ESP_DECRYPT_ERROR_NO_BUFFERS;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP);
+ goto next;
+ }
+
+ if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
+ {
+ if (current_sa_pkts)
+ vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index,
+ current_sa_pkts,
+ current_sa_bytes);
+ current_sa_bytes = current_sa_pkts = 0;
+
+ current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
+ sa0 = ipsec_sa_get (current_sa_index);
+
+ /* fetch the second cacheline ASAP */
+ CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD);
+ cpd.icv_sz = sa0->integ_icv_size;
+ cpd.iv_sz = sa0->crypto_iv_size;
+ cpd.flags = sa0->flags;
+ cpd.sa_index = current_sa_index;
+ is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
+ }
+
+ if (is_async)
+ {
+ async_op = sa0->crypto_async_dec_op_id;
+
+ /* get a frame for this op if we don't yet have one or it's full
+ */
+ if (NULL == async_frames[async_op] ||
+ vnet_crypto_async_frame_is_full (async_frames[async_op]))
+ {
+ async_frames[async_op] =
+ vnet_crypto_async_get_frame (vm, async_op);
+ /* Save the frame to the list we'll submit at the end */
+ vec_add1 (ptd->async_frames, async_frames[async_op]);
+ }
+ }
+
+ if (PREDICT_FALSE (~0 == sa0->thread_index))
+ {
+ /* this is the first packet to use this SA, claim the SA
+ * for this thread. this could happen simultaneously on
+ * another thread */
+ clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ ipsec_sa_assign_thread (thread_index));
+ }
+
+ if (PREDICT_FALSE (thread_index != sa0->thread_index))
+ {
+ vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ err = ESP_DECRYPT_ERROR_HANDOFF;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_HANDOFF);
+ goto next;
+ }
+
+ /* store packet data for next round for easier prefetch */
+ pd->sa_data = cpd.sa_data;
+ pd->current_data = b[0]->current_data;
+ pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
+ payload = b[0]->data + pd->current_data;
+ pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
+ pd->is_chain = 0;
+ pd2->lb = b[0];
+ pd2->free_buffer_index = 0;
+ pd2->icv_removed = 0;
+
+ if (n_bufs > 1)
+ {
+ pd->is_chain = 1;
+ /* find last buffer in the chain */
+ while (pd2->lb->flags & VLIB_BUFFER_NEXT_PRESENT)
+ pd2->lb = vlib_get_buffer (vm, pd2->lb->next_buffer);
+
+ crypto_ops = &ptd->chained_crypto_ops;
+ integ_ops = &ptd->chained_integ_ops;
+ }
+
+ pd->current_length = b[0]->current_length;
+
+ /* anti-reply check */
+ if (ipsec_sa_anti_replay_check (sa0, pd->seq))
+ {
+ err = ESP_DECRYPT_ERROR_REPLAY;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP);
+ goto next;
+ }
+
+ if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
+ {
+ err = ESP_DECRYPT_ERROR_RUNT;
+ esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
+ ESP_DECRYPT_NEXT_DROP);
+ goto next;
+ }
+
+ len = pd->current_length - cpd.icv_sz;
+ current_sa_pkts += 1;
+ current_sa_bytes += vlib_buffer_length_in_chain (vm, b[0]);