+ }
+
+ if (PREDICT_FALSE (~0 == sa0->thread_index))
+ {
+ /* this is the first packet to use this SA, claim the SA
+ * for this thread. this could happen simultaneously on
+ * another thread */
+ clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ ipsec_sa_assign_thread (thread_index));
+ }
+
+ if (PREDICT_FALSE (thread_index != sa0->thread_index))
+ {
+ esp_set_next_index (is_async, from, nexts, from[b - bufs],
+ &n_async_drop, ESP_DECRYPT_NEXT_HANDOFF, next);
+ next[0] = ESP_DECRYPT_NEXT_HANDOFF;
+ goto next;
+ }
+
+ /* store packet data for next round for easier prefetch */
+ pd->sa_data = cpd.sa_data;
+ pd->current_data = b[0]->current_data;
+ pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
+ payload = b[0]->data + pd->current_data;
+ pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
+ pd->is_chain = 0;
+ pd2->lb = b[0];
+ pd2->free_buffer_index = 0;
+ pd2->icv_removed = 0;
+
+ if (n_bufs > 1)
+ {
+ pd->is_chain = 1;
+ /* find last buffer in the chain */
+ while (pd2->lb->flags & VLIB_BUFFER_NEXT_PRESENT)
+ pd2->lb = vlib_get_buffer (vm, pd2->lb->next_buffer);
+
+ crypto_ops = &ptd->chained_crypto_ops;
+ integ_ops = &ptd->chained_integ_ops;
+ }
+
+ pd->current_length = b[0]->current_length;
+
+ /* anti-reply check */
+ if (ipsec_sa_anti_replay_check (sa0, pd->seq))
+ {
+ b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
+ esp_set_next_index (is_async, from, nexts, from[b - bufs],
+ &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
+ goto next;
+ }
+
+ if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
+ {
+ b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT];
+ esp_set_next_index (is_async, from, nexts, from[b - bufs],
+ &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
+ goto next;
+ }
+
+ len = pd->current_length - cpd.icv_sz;
+ current_sa_pkts += 1;
+ current_sa_bytes += vlib_buffer_length_in_chain (vm, b[0]);
+
+ if (is_async)
+ {
+ int ret = esp_decrypt_prepare_async_frame (vm, node, ptd,
+ &async_frame,
+ sa0, payload, len,
+ cpd.icv_sz,
+ cpd.iv_sz,
+ pd, pd2,
+ from[b - bufs],
+ b[0], next, async_next);
+ if (PREDICT_FALSE (ret < 0))
+ {
+ b[0]->error = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
+ esp_set_next_index (1, from, nexts, from[b - bufs],
+ &n_async_drop, ESP_DECRYPT_NEXT_DROP, next);
+ /* when next[0] is ESP_DECRYPT_NEXT_DROP we only have to drop
+ * the current packet. Otherwise it is frame submission error
+ * thus we have to drop the whole frame.
+ */
+ if (next[0] != ESP_DECRYPT_NEXT_DROP && async_frame->n_elts)
+ esp_async_recycle_failed_submit (async_frame, b, from,
+ nexts, &n_async_drop,
+ ESP_DECRYPT_NEXT_DROP,
+ ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+ goto next;
+ }
+ }
+ else
+ esp_decrypt_prepare_sync_op (vm, node, ptd, &crypto_ops, &integ_ops,
+ op, sa0, payload, len, cpd.icv_sz,
+ cpd.iv_sz, pd, pd2, b[0], next,
+ b - bufs);
+ /* next */
+ next:
+ n_left -= 1;
+ next += 1;
+ pd += 1;
+ pd2 += 1;
+ b += 1;
+ }
+
+ if (PREDICT_TRUE (~0 != current_sa_index))
+ vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index, current_sa_pkts,
+ current_sa_bytes);
+
+ if (is_async)
+ {
+ if (async_frame && async_frame->n_elts)
+ {
+ if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
+ esp_async_recycle_failed_submit (async_frame, b, from, nexts,
+ &n_async_drop,
+ ESP_DECRYPT_NEXT_DROP,
+ ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR);
+ }
+
+ /* no post process in async */
+ vlib_node_increment_counter (vm, node->node_index,
+ ESP_DECRYPT_ERROR_RX_PKTS, n_left);
+ if (n_async_drop)
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
+
+ return n_left;
+ }
+ else
+ {
+ esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts,
+ ESP_DECRYPT_ERROR_INTEG_ERROR);
+ esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
+ ptd->chunks, ESP_DECRYPT_ERROR_INTEG_ERROR);
+
+ esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts,
+ ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
+ esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
+ ptd->chunks,
+ ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
+ }
+
+ /* Post decryption ronud - adjust packet data start and length and next
+ node */
+
+ n_left = from_frame->n_vectors;
+ next = nexts;
+ pd = pkt_data;
+ pd2 = pkt_data2;
+ b = bufs;
+
+ while (n_left)
+ {
+ if (n_left >= 2)
+ {
+ void *data = b[1]->data + pd[1].current_data;
+
+ /* buffer metadata */
+ vlib_prefetch_buffer_header (b[1], LOAD);
+
+ /* esp_footer_t */
+ CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
+ CLIB_CACHE_LINE_BYTES, LOAD);
+
+ /* packet headers */
+ CLIB_PREFETCH (data - CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES * 2, LOAD);
+ }
+
+ /* save the sa_index as GRE_teb post_crypto changes L2 opaque */
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
+
+ if (next[0] >= ESP_DECRYPT_N_NEXT)
+ esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6,
+ is_tun, 0);
+
+ /* trace: */
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ esp_decrypt_trace_t *tr;
+ tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
+ sa0 = pool_elt_at_index (im->sad, current_sa_index);
+ tr->crypto_alg = sa0->crypto_alg;
+ tr->integ_alg = sa0->integ_alg;
+ tr->seq = pd->seq;
+ tr->sa_seq = sa0->last_seq;
+ tr->sa_seq_hi = sa0->seq_hi;
+ }
+
+ /* next */
+ n_left -= 1;
+ next += 1;
+ pd += 1;
+ pd2 += 1;
+ b += 1;
+ }
+
+ n_left = from_frame->n_vectors;
+ vlib_node_increment_counter (vm, node->node_index,
+ ESP_DECRYPT_ERROR_RX_PKTS, n_left);
+
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
+
+ return n_left;
+}
+
+always_inline uword
+esp_decrypt_post_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, int is_ip6, int is_tun)
+{
+ ipsec_main_t *im = &ipsec_main;
+ u32 *from = vlib_frame_vector_args (from_frame);
+ u32 n_left = from_frame->n_vectors;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
+ vlib_get_buffers (vm, from, b, n_left);
+
+ while (n_left > 0)
+ {
+ esp_decrypt_packet_data_t *pd = &(esp_post_data (b[0]))->decrypt_data;
+
+ if (n_left > 2)
+ {
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ vlib_prefetch_buffer_header (b[1], LOAD);
+ }
+
+ if (!pd->is_chain)
+ esp_decrypt_post_crypto (vm, node, pd, 0, b[0], next, is_ip6, is_tun,
+ 1);
+ else
+ {
+ esp_decrypt_packet_data2_t *pd2 = esp_post_data2 (b[0]);
+ esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6,
+ is_tun, 1);
+ }
+
+ /*trace: */
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ ipsec_sa_t *sa0 = pool_elt_at_index (im->sad, pd->sa_index);
+ esp_decrypt_trace_t *tr;
+ esp_decrypt_packet_data_t *async_pd =
+ &(esp_post_data (b[0]))->decrypt_data;
+ tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
+ sa0 = pool_elt_at_index (im->sad, async_pd->sa_index);