+ else
+ tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
+
+ flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+ if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
+ payload, pd->current_length, &tag, 0,
+ &integ_len) < 0)
+ {
+ /* allocate buffer failed, will not add to frame and drop */
+ return (ESP_DECRYPT_ERROR_NO_BUFFERS);
+ }
+ }
+ else
+ esp_insert_esn (vm, sa0, pd, pd2, &integ_len, &tag, &len, b, payload);
+ }
+
+out:
+ /* crypto */
+ payload += esp_sz;
+ len -= esp_sz;
+ iv = payload;
+
+ if (ipsec_sa_is_set_IS_CTR (sa0))
+ {
+ /* construct nonce in a scratch space in front of the IP header */
+ esp_ctr_nonce_t *nonce =
+ (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - sizeof (*nonce));
+ if (ipsec_sa_is_set_IS_AEAD (sa0))
+ {
+ /* constuct aad in a scratch space in front of the nonce */
+ esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
+ aad = (u8 *) nonce - sizeof (esp_aead_t);
+ esp_aad_fill (aad, esp0, sa0, pd->seq_hi);
+ tag = payload + len;
+ if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ {
+ /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
+ payload -= iv_sz;
+ len += iv_sz;
+ }
+ }
+ else
+ {
+ nonce->ctr = clib_host_to_net_u32 (1);
+ }
+ nonce->salt = sa0->salt;
+ ASSERT (sizeof (u64) == iv_sz);
+ nonce->iv = *(u64 *) iv;
+ iv = (u8 *) nonce;
+ }
+
+ crypto_start_offset = (payload += iv_sz) - b->data;
+ crypto_len = len - iv_sz;
+
+ if (pd->is_chain && (pd2->lb != b))
+ {
+ /* buffer is chained */
+ flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+
+ crypto_len = esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
+ payload,
+ len - pd->iv_sz + pd->icv_sz,
+ &tag, 0);
+ }
+
+ *async_pd = *pd;
+ *async_pd2 = *pd2;
+
+ /* for AEAD integ_len - crypto_len will be negative, it is ok since it
+ * is ignored by the engine. */
+ vnet_crypto_async_add_to_frame (
+ vm, f, key_index, crypto_len, integ_len - crypto_len, crypto_start_offset,
+ integ_start_offset, bi, async_next, iv, tag, aad, flags);
+
+ return (ESP_DECRYPT_ERROR_RX_PKTS);
+}
+
+static_always_inline void
+esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
+ const u16 *next_by_next_header,
+ const esp_decrypt_packet_data_t *pd,
+ const esp_decrypt_packet_data2_t *pd2,
+ vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun,
+ int is_async)
+{
+ ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
+ vlib_buffer_t *lb = b;
+ const u8 esp_sz = sizeof (esp_header_t);
+ const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
+ u8 pad_length = 0, next_header = 0;
+ u16 icv_sz;
+ u64 n_lost;
+
+ /*
+ * redo the anti-reply check
+ * in this frame say we have sequence numbers, s, s+1, s+1, s+1
+ * and s and s+1 are in the window. When we did the anti-replay
+ * check above we did so against the state of the window (W),
+ * after packet s-1. So each of the packets in the sequence will be
+ * accepted.
+ * This time s will be cheked against Ws-1, s+1 checked against Ws
+ * (i.e. the window state is updated/advanced)
+ * so this time the successive s+1 packet will be dropped.
+ * This is a consequence of batching the decrypts. If the
+ * check-decrypt-advance process was done for each packet it would
+ * be fine. But we batch the decrypts because it's much more efficient
+ * to do so in SW and if we offload to HW and the process is async.
+ *
+ * You're probably thinking, but this means an attacker can send the
+ * above sequence and cause VPP to perform decrypts that will fail,
+ * and that's true. But if the attacker can determine s (a valid
+ * sequence number in the window) which is non-trivial, it can generate
+ * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
+ * implementation, sequential or batching, from decrypting these.
+ */
+ if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ {
+ if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
+ NULL, true))
+ {
+ esp_decrypt_set_next_index (b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_REPLAY, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
+ return;
+ }
+ n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq,
+ pd->seq_hi, true);
+ }
+ else
+ {
+ if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
+ NULL, false))
+ {
+ esp_decrypt_set_next_index (b, node, vm->thread_index,
+ ESP_DECRYPT_ERROR_REPLAY, 0, next,
+ ESP_DECRYPT_NEXT_DROP, pd->sa_index);
+ return;
+ }
+ n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq,
+ pd->seq_hi, false);
+ }
+
+ vlib_prefetch_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
+ vm->thread_index, pd->sa_index);
+
+ if (pd->is_chain)
+ {
+ lb = pd2->lb;
+ icv_sz = pd2->icv_removed ? 0 : pd->icv_sz;
+ if (pd2->free_buffer_index)
+ {
+ vlib_buffer_free_one (vm, pd2->free_buffer_index);
+ lb->next_buffer = 0;
+ }
+ if (lb->current_length < sizeof (esp_footer_t) + icv_sz)
+ {
+ /* esp footer is either splitted in two buffers or in the before
+ * last buffer */
+
+ vlib_buffer_t *before_last = b, *bp = b;
+ while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ before_last = bp;
+ bp = vlib_get_buffer (vm, bp->next_buffer);
+ }
+ u8 *bt = vlib_buffer_get_tail (before_last);
+
+ if (lb->current_length == icv_sz)
+ {
+ esp_footer_t *f = (esp_footer_t *) (bt - sizeof (*f));
+ pad_length = f->pad_length;
+ next_header = f->next_header;
+ }