return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
#else
- return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
- (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
- ((nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0));
+ return (!(nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) ||
+ !(nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) ||
+ !(nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION));
#endif
}
return len;
}
+/* IPsec IV generation: IVs requirements differ depending of the
+ * encryption mode: IVs must be unpredictable for AES-CBC whereas it can
+ * be predictable but should never be reused with the same key material
+ * for CTR and GCM.
+ * To avoid reusing the same IVs between multiple VPP instances and between
+ * restarts, we use a properly chosen PRNG to generate IVs. To ensure the IV is
+ * unpredictable for CBC, it is then encrypted using the same key as the
+ * message. You can refer to NIST SP800-38a and NIST SP800-38d for more
+ * details. */
+static_always_inline void *
+esp_generate_iv (ipsec_sa_t *sa, void *payload, int iv_sz)
+{
+ ASSERT (iv_sz >= sizeof (u64));
+ u64 *iv = (u64 *) (payload - iv_sz);
+ clib_memset_u8 (iv, 0, iv_sz);
+ *iv = clib_pcg64i_random_r (&sa->iv_prng);
+ return iv;
+}
+
static_always_inline void
esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_crypto_op_t * ops, vlib_buffer_t * b[],
if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
{
u32 bi = op->user_data;
- b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
- nexts[bi] = drop_next;
+ esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
+ ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+ bi, nexts, drop_next,
+ vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
{
u32 bi = op->user_data;
- b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
- nexts[bi] = drop_next;
+ esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
+ ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
+ bi, nexts, drop_next,
+ vnet_buffer (b[bi])->ipsec.sad_index);
n_fail--;
}
op++;
vnet_crypto_op_t *op;
vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
+ u8 *crypto_start = payload;
+ /* esp_add_footer_and_icv() in esp_encrypt_inline() makes sure we always
+ * have enough space for ESP header and footer which includes ICV */
+ ASSERT (payload_len > icv_sz);
+ u16 crypto_len = payload_len - icv_sz;
+
+ /* generate the IV in front of the payload */
+ void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz);
- op->src = op->dst = payload;
op->key_index = sa0->crypto_key_index;
- op->len = payload_len - icv_sz;
op->user_data = bi;
if (ipsec_sa_is_set_IS_CTR (sa0))
{
- ASSERT (sizeof (u64) == iv_sz);
/* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce =
- (esp_ctr_nonce_t *) (payload - sizeof (u64) - hdr_len -
- sizeof (*nonce));
- u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
-
+ (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
if (ipsec_sa_is_set_IS_AEAD (sa0))
{
/* constuct aad in a scratch space in front of the nonce */
op->aad = (u8 *) nonce - sizeof (esp_aead_t);
op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
- op->tag = payload + op->len;
+ op->tag = payload + crypto_len;
op->tag_len = 16;
}
else
}
nonce->salt = sa0->salt;
- nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa0->ctr_iv_counter++);
+ nonce->iv = *(u64 *) pkt_iv;
op->iv = (u8 *) nonce;
}
else
{
- op->iv = payload - iv_sz;
- op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
+ /* construct zero iv in front of the IP header */
+ op->iv = pkt_iv - hdr_len - iv_sz;
+ clib_memset_u8 (op->iv, 0, iv_sz);
+ /* include iv field in crypto */
+ crypto_start -= iv_sz;
+ crypto_len += iv_sz;
}
- if (lb != b[0])
+ if (PREDICT_FALSE (lb != b[0]))
{
/* is chained */
op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
op->chunk_index = vec_len (ptd->chunks);
op->tag = vlib_buffer_get_tail (lb) - icv_sz;
- esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload,
- payload_len, &op->n_chunks);
+ esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz,
+ crypto_start, crypto_len + icv_sz,
+ &op->n_chunks);
+ }
+ else
+ {
+ /* not chained */
+ op->src = op->dst = crypto_start;
+ op->len = crypto_len;
}
}
esp_post_data_t *post = esp_post_data (b);
u8 *tag, *iv, *aad = 0;
u8 flag = 0;
- u32 key_index;
- i16 crypto_start_offset, integ_start_offset = 0;
+ const u32 key_index = sa->crypto_key_index;
+ i16 crypto_start_offset, integ_start_offset;
u16 crypto_total_len, integ_total_len;
post->next_index = next;
/* crypto */
- crypto_start_offset = payload - b->data;
+ crypto_start_offset = integ_start_offset = payload - b->data;
crypto_total_len = integ_total_len = payload_len - icv_sz;
tag = payload + crypto_total_len;
- key_index = sa->linked_key_index;
+ /* generate the IV in front of the payload */
+ void *pkt_iv = esp_generate_iv (sa, payload, iv_sz);
if (ipsec_sa_is_set_IS_CTR (sa))
{
- ASSERT (sizeof (u64) == iv_sz);
/* construct nonce in a scratch space in front of the IP header */
- esp_ctr_nonce_t *nonce = (esp_ctr_nonce_t *) (payload - sizeof (u64) -
- hdr_len - sizeof (*nonce));
- u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
-
+ esp_ctr_nonce_t *nonce =
+ (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
if (ipsec_sa_is_set_IS_AEAD (sa))
{
/* constuct aad in a scratch space in front of the nonce */
aad = (u8 *) nonce - sizeof (esp_aead_t);
esp_aad_fill (aad, esp, sa, sa->seq_hi);
- key_index = sa->crypto_key_index;
}
else
{
}
nonce->salt = sa->salt;
- nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->ctr_iv_counter++);
+ nonce->iv = *(u64 *) pkt_iv;
iv = (u8 *) nonce;
}
else
{
- iv = payload - iv_sz;
- flag |= VNET_CRYPTO_OP_FLAG_INIT_IV;
+ /* construct zero iv in front of the IP header */
+ iv = pkt_iv - hdr_len - iv_sz;
+ clib_memset_u8 (iv, 0, iv_sz);
+ /* include iv field in crypto */
+ crypto_start_offset -= iv_sz;
+ crypto_total_len += iv_sz;
}
if (lb != b)
/* chain */
flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
tag = vlib_buffer_get_tail (lb) - icv_sz;
- crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, icv_sz,
- payload, payload_len, 0);
+ crypto_total_len = esp_encrypt_chain_crypto (
+ vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset,
+ crypto_total_len + icv_sz, 0);
}
if (sa->integ_op_id)
{
- integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t);
+ integ_start_offset -= iv_sz + sizeof (esp_header_t);
integ_total_len += iv_sz + sizeof (esp_header_t);
if (b != lb)
if (PREDICT_FALSE (INDEX_INVALID == sa_index0))
{
err = ESP_ENCRYPT_ERROR_NO_PROTECTION;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- drop_next);
+ noop_nexts[n_noop] = drop_next;
+ b[0]->error = node->errors[err];
goto trace;
}
}
if (sa_index0 != current_sa_index)
{
if (current_sa_packets)
- vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
- current_sa_index,
- current_sa_packets,
- current_sa_bytes);
+ vlib_increment_combined_counter (
+ &ipsec_sa_counters, thread_index, current_sa_index,
+ current_sa_packets, current_sa_bytes);
current_sa_packets = current_sa_bytes = 0;
sa0 = ipsec_sa_get (sa_index0);
!ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0)))
{
err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err,
+ n_noop, noop_nexts, drop_next,
+ sa_index0);
goto trace;
}
+ current_sa_index = sa_index0;
+ vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index);
+
/* fetch the second cacheline ASAP */
clib_prefetch_load (sa0->cacheline1);
- current_sa_index = sa_index0;
spi = clib_net_to_host_u32 (sa0->spi);
esp_align = sa0->esp_block_align;
icv_sz = sa0->integ_icv_size;
is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
}
- if (PREDICT_FALSE (~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
{
vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
err = ESP_ENCRYPT_ERROR_HANDOFF;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- handoff_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, handoff_next,
+ current_sa_index);
goto trace;
}
if (n_bufs == 0)
{
err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, drop_next, current_sa_index);
goto trace;
}
if (PREDICT_FALSE (esp_seq_advance (sa0)))
{
err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
+ noop_nexts, drop_next, current_sa_index);
goto trace;
}
if (!next_hdr_ptr)
{
err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err,
+ n_noop, noop_nexts, drop_next,
+ current_sa_index);
goto trace;
}
b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
if ((old_ip_hdr - ip_len) < &b[0]->pre_data[0])
{
err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err,
+ n_noop, noop_nexts, drop_next,
+ current_sa_index);
goto trace;
}
if (!next_hdr_ptr)
{
err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
- esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
- drop_next);
+ esp_encrypt_set_next_index (b[0], node, thread_index, err,
+ n_noop, noop_nexts, drop_next,
+ current_sa_index);
goto trace;
}
{
async_frames[async_op] =
vnet_crypto_async_get_frame (vm, async_op);
+
+ if (PREDICT_FALSE (!async_frames[async_op]))
+ {
+ err = ESP_ENCRYPT_ERROR_NO_AVAIL_FRAME;
+ esp_encrypt_set_next_index (b[0], node, thread_index, err,
+ n_noop, noop_nexts, drop_next,
+ current_sa_index);
+ goto trace;
+ }
+
/* Save the frame to the list we'll submit at the end */
vec_add1 (ptd->async_frames, async_frames[async_op]);
}
{
n_noop += esp_async_recycle_failed_submit (
vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
- n_noop, noop_bi, noop_nexts, drop_next);
+ IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi,
+ noop_nexts, drop_next, true);
vnet_crypto_async_reset_frame (*async_frame);
vnet_crypto_async_free_frame (vm, *async_frame);
}