op->key_index = sa0->crypto_key_index;
op->iv = payload;
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (ipsec_sa_is_set_IS_CTR (sa0))
{
- esp_header_t *esp0;
- esp_aead_t *aad;
- u8 *scratch;
-
- /*
- * construct the AAD and the nonce (Salt || IV) in a scratch
- * space in front of the IP header.
- */
- scratch = payload - esp_sz;
- esp0 = (esp_header_t *) (scratch);
-
- scratch -= (sizeof (*aad) + pd->hdr_sz);
- op->aad = scratch;
-
- op->aad_len = esp_aad_fill (op->aad, esp0, sa0);
-
- /*
- * we don't need to refer to the ESP header anymore so we
- * can overwrite it with the salt and use the IV where it is
- * to form the nonce = (Salt + IV)
- */
- op->iv -= sizeof (sa0->salt);
- clib_memcpy_fast (op->iv, &sa0->salt, sizeof (sa0->salt));
-
- op->tag = payload + len;
- op->tag_len = 16;
+ /* construct nonce in a scratch space in front of the IP header */
+ esp_ctr_nonce_t *nonce =
+ (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz -
+ sizeof (*nonce));
+ if (ipsec_sa_is_set_IS_AEAD (sa0))
+ {
+ /* constuct aad in a scratch space in front of the nonce */
+ esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
+ op->aad = (u8 *) nonce - sizeof (esp_aead_t);
+ op->aad_len = esp_aad_fill (op->aad, esp0, sa0);
+ op->tag = payload + len;
+ op->tag_len = 16;
+ }
+ else
+ {
+ nonce->ctr = clib_host_to_net_u32 (1);
+ }
+ nonce->salt = sa0->salt;
+ ASSERT (sizeof (u64) == iv_sz);
+ nonce->iv = *(u64 *) op->iv;
+ op->iv = (u8 *) nonce;
}
op->src = op->dst = payload += iv_sz;
op->len = len - iv_sz;
len -= esp_sz;
iv = payload;
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (ipsec_sa_is_set_IS_CTR (sa0))
{
- esp_header_t *esp0;
- u8 *scratch;
-
- /*
- * construct the AAD and the nonce (Salt || IV) in a scratch
- * space in front of the IP header.
- */
- scratch = payload - esp_sz;
- esp0 = (esp_header_t *) (scratch);
-
- scratch -= (sizeof (esp_aead_t) + pd->hdr_sz);
- aad = scratch;
-
- esp_aad_fill (aad, esp0, sa0);
-
- /*
- * we don't need to refer to the ESP header anymore so we
- * can overwrite it with the salt and use the IV where it is
- * to form the nonce = (Salt + IV)
- */
- iv -= sizeof (sa0->salt);
- clib_memcpy_fast (iv, &sa0->salt, sizeof (sa0->salt));
-
- tag = payload + len;
+ /* construct nonce in a scratch space in front of the IP header */
+ esp_ctr_nonce_t *nonce =
+ (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - sizeof (*nonce));
+ if (ipsec_sa_is_set_IS_AEAD (sa0))
+ {
+ /* constuct aad in a scratch space in front of the nonce */
+ esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
+ aad = (u8 *) nonce - sizeof (esp_aead_t);
+ esp_aad_fill (aad, esp0, sa0);
+ tag = payload + len;
+ }
+ else
+ {
+ nonce->ctr = clib_host_to_net_u32 (1);
+ }
+ nonce->salt = sa0->salt;
+ ASSERT (sizeof (u64) == iv_sz);
+ nonce->iv = *(u64 *) iv;
+ iv = (u8 *) nonce;
}
crypto_start_offset = (payload += iv_sz) - b->data;
esp_decrypt_packet_data2_t * pd2, vlib_buffer_t * b,
u16 * next, int is_ip6, int is_tun, int is_async)
{
- ipsec_main_t *im = &ipsec_main;
- ipsec_sa_t *sa0 = vec_elt_at_index (im->sad, pd->sa_index);
+ ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
vlib_buffer_t *lb = b;
const u8 esp_sz = sizeof (esp_header_t);
const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
current_sa_bytes = current_sa_pkts = 0;
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
- sa0 = pool_elt_at_index (im->sad, current_sa_index);
+ sa0 = ipsec_sa_get (current_sa_index);
/* fetch the second cacheline ASAP */
CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD);
if (PREDICT_FALSE (thread_index != sa0->thread_index))
{
+ vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
esp_set_next_index (is_async, from, nexts, from[b - bufs],
&n_async_drop, ESP_DECRYPT_NEXT_HANDOFF, next);
next[0] = ESP_DECRYPT_NEXT_HANDOFF;
/* no post process in async */
vlib_node_increment_counter (vm, node->node_index,
- ESP_DECRYPT_ERROR_RX_PKTS, n_left);
+ ESP_DECRYPT_ERROR_RX_PKTS,
+ from_frame->n_vectors);
if (n_async_drop)
vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
{
esp_decrypt_trace_t *tr;
tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
- sa0 = pool_elt_at_index (im->sad, current_sa_index);
+ sa0 = ipsec_sa_get (current_sa_index);
tr->crypto_alg = sa0->crypto_alg;
tr->integ_alg = sa0->integ_alg;
tr->seq = pd->seq;
vlib_node_runtime_t * node,
vlib_frame_t * from_frame, int is_ip6, int is_tun)
{
- ipsec_main_t *im = &ipsec_main;
u32 *from = vlib_frame_vector_args (from_frame);
u32 n_left = from_frame->n_vectors;
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
/*trace: */
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
- ipsec_sa_t *sa0 = pool_elt_at_index (im->sad, pd->sa_index);
+ ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
esp_decrypt_trace_t *tr;
esp_decrypt_packet_data_t *async_pd =
&(esp_post_data (b[0]))->decrypt_data;
tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
- sa0 = pool_elt_at_index (im->sad, async_pd->sa_index);
+ sa0 = ipsec_sa_get (async_pd->sa_index);
tr->crypto_alg = sa0->crypto_alg;
tr->integ_alg = sa0->integ_alg;
};
/* *INDENT-ON* */
+#ifndef CLIB_MARCH_VARIANT
+
+static clib_error_t *
+esp_decrypt_init (vlib_main_t *vm)
+{
+ ipsec_main_t *im = &ipsec_main;
+
+ im->esp4_dec_fq_index =
+ vlib_frame_queue_main_init (esp4_decrypt_node.index, 0);
+ im->esp6_dec_fq_index =
+ vlib_frame_queue_main_init (esp6_decrypt_node.index, 0);
+ im->esp4_dec_tun_fq_index =
+ vlib_frame_queue_main_init (esp4_decrypt_tun_node.index, 0);
+ im->esp6_dec_tun_fq_index =
+ vlib_frame_queue_main_init (esp6_decrypt_tun_node.index, 0);
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (esp_decrypt_init);
+
+#endif
+
/*
* fd.io coding-style-patch-verification: ON
*