ikev2_mngr_process_fn (vlib_main_t * vm, vlib_node_runtime_t * rt,
vlib_frame_t * f)
{
+ ipsec_main_t *im = &ipsec_main;
ikev2_main_t *km = &ikev2_main;
ikev2_profile_t *p;
ikev2_child_sa_t *c;
/* process ipsec sas */
ipsec_sa_t *sa;
- pool_foreach (sa, ipsec_sa_pool)
- {
- ikev2_mngr_process_ipsec_sa (sa);
- }
+ pool_foreach (sa, im->sa_pool)
+ {
+ ikev2_mngr_process_ipsec_sa (sa);
+ }
ikev2_process_pending_sa_init (vm, km);
}
if (~0 != sa_id)
{
ipsec_sa_t *sa;
+ ipsec_sa_inb_rt_t *irt;
+ ipsec_sa_outb_rt_t *ort;
u32 sa_index;
sa_index = ipsec_sa_find_and_lock (sa_id);
sa = ipsec_sa_get (sa_index);
+ irt = ipsec_sa_get_inb_rt (sa);
+ ort = ipsec_sa_get_outb_rt (sa);
- sa->seq = seq_num & 0xffffffff;
- sa->seq_hi = seq_num >> 32;
+ if (ort)
+ {
+ ort->seq = seq_num & 0xffffffff;
+ ort->seq_hi = seq_num >> 32;
+ }
- /* clear the window */
- if (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa))
- clib_bitmap_zero (sa->replay_window_huge);
- else
- sa->replay_window = 0;
+ if (irt)
+ {
+ irt->seq = seq_num & 0xffffffff;
+ irt->seq_hi = seq_num >> 32;
+
+ /* clear the window */
+ if (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa))
+ clib_bitmap_zero (irt->replay_window_huge);
+ else
+ irt->replay_window = 0;
+ }
ipsec_sa_unlock (sa_index);
}
ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
from = vlib_frame_vector_args (from_frame);
n_left = from_frame->n_vectors;
- ipsec_sa_t *sa0 = 0;
+ ipsec_sa_inb_rt_t *irt = 0;
bool anti_replay_result;
u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
current_sa_index, current_sa_pkts,
current_sa_bytes);
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
- sa0 = ipsec_sa_get (current_sa_index);
+ irt = ipsec_sa_get_inb_rt_by_index (current_sa_index);
current_sa_bytes = current_sa_pkts = 0;
vlib_prefetch_combined_counter (&ipsec_sa_counters,
thread_index, current_sa_index);
}
- if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == irt->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ clib_atomic_cmp_and_swap (&irt->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_TRUE (thread_index != sa0->thread_index))
+ if (PREDICT_TRUE (thread_index != irt->thread_index))
{
- vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ vnet_buffer (b[0])->ipsec.thread_index = irt->thread_index;
next[0] = AH_DECRYPT_NEXT_HANDOFF;
goto next;
}
pd->seq = clib_host_to_net_u32 (ah0->seq_no);
/* anti-replay check */
- if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ if (PREDICT_FALSE (irt->anti_reply_huge))
{
anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, ~0, false, &pd->seq_hi, true);
+ irt, pd->seq, ~0, false, &pd->seq_hi, true);
}
else
{
anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, ~0, false, &pd->seq_hi, false);
+ irt, pd->seq, ~0, false, &pd->seq_hi, false);
}
if (anti_replay_result)
{
current_sa_bytes += b[0]->current_length;
current_sa_pkts += 1;
- pd->icv_size = sa0->integ_icv_size;
+ pd->icv_size = irt->integ_icv_size;
pd->nexthdr_cached = ah0->nexthdr;
- if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
+ if (PREDICT_TRUE (irt->integ_icv_size))
{
- if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0) &&
- pd->current_data + b[0]->current_length
- + sizeof (u32) > buffer_data_size))
+ if (PREDICT_FALSE (irt->use_esn && pd->current_data +
+ b[0]->current_length +
+ sizeof (u32) >
+ buffer_data_size))
{
ah_decrypt_set_next_index (
b[0], node, vm->thread_index, AH_DECRYPT_ERROR_NO_TAIL_SPACE,
vnet_crypto_op_t *op;
vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
- vnet_crypto_op_init (op, sa0->integ_op_id);
+ vnet_crypto_op_init (op, irt->integ_op_id);
op->src = (u8 *) ih4;
op->len = b[0]->current_length;
op->digest = (u8 *) ih4 - pd->icv_size;
op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
op->digest_len = pd->icv_size;
- op->key_index = sa0->integ_key_index;
+ op->key_index = irt->integ_key_index;
op->user_data = b - bufs;
- if (ipsec_sa_is_set_USE_ESN (sa0))
+ if (irt->use_esn)
{
u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
if (next[0] < AH_DECRYPT_N_NEXT)
goto trace;
- sa0 = ipsec_sa_get (pd->sa_index);
+ irt = ipsec_sa_get_inb_rt_by_index (pd->sa_index);
- if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
+ if (PREDICT_TRUE (irt->integ_icv_size))
{
/* redo the anti-reply check. see esp_decrypt for details */
- if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ if (PREDICT_FALSE (irt->anti_reply_huge))
{
if (ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, pd->seq_hi, true, NULL, true))
+ irt, pd->seq, pd->seq_hi, true, NULL, true))
{
ah_decrypt_set_next_index (
b[0], node, vm->thread_index, AH_DECRYPT_ERROR_REPLAY, 0,
goto trace;
}
n_lost = ipsec_sa_anti_replay_advance (
- sa0, thread_index, pd->seq, pd->seq_hi, true);
+ irt, thread_index, pd->seq, pd->seq_hi, true);
}
else
{
if (ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, pd->seq_hi, true, NULL, false))
+ irt, pd->seq, pd->seq_hi, true, NULL, false))
{
ah_decrypt_set_next_index (
b[0], node, vm->thread_index, AH_DECRYPT_ERROR_REPLAY, 0,
goto trace;
}
n_lost = ipsec_sa_anti_replay_advance (
- sa0, thread_index, pd->seq, pd->seq_hi, false);
+ irt, thread_index, pd->seq, pd->seq_hi, false);
}
vlib_prefetch_simple_counter (
&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST], thread_index,
b[0]->flags &= ~(VNET_BUFFER_F_L4_CHECKSUM_COMPUTED |
VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
- if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_TRUE (irt->is_tunnel))
{ /* tunnel mode */
if (PREDICT_TRUE (pd->nexthdr_cached == IP_PROTOCOL_IP_IN_IP))
next[0] = AH_DECRYPT_NEXT_IP4_INPUT;
trace:
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
- sa0 = ipsec_sa_get (vnet_buffer (b[0])->ipsec.sad_index);
+ ipsec_sa_t *sa = ipsec_sa_get (vnet_buffer (b[0])->ipsec.sad_index);
ah_decrypt_trace_t *tr =
vlib_add_trace (vm, node, b[0], sizeof (*tr));
- tr->integ_alg = sa0->integ_alg;
+ tr->integ_alg = sa->integ_alg;
tr->seq_num = pd->seq;
}
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
- ipsec_sa_t *sa0 = 0;
+ ipsec_sa_outb_rt_t *ort = 0;
ip4_and_ah_header_t *ih0, *oh0 = 0;
ip6_and_ah_header_t *ih6_0, *oh6_0 = 0;
u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
current_sa_index, current_sa_pkts,
current_sa_bytes);
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
- sa0 = ipsec_sa_get (current_sa_index);
+ ort = ipsec_sa_get_outb_rt_by_index (current_sa_index);
current_sa_bytes = current_sa_pkts = 0;
vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
pd->sa_index = current_sa_index;
next[0] = AH_ENCRYPT_NEXT_DROP;
- if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == ort->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ clib_atomic_cmp_and_swap (&ort->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_TRUE (thread_index != sa0->thread_index))
+ if (PREDICT_TRUE (thread_index != ort->thread_index))
{
- vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ vnet_buffer (b[0])->ipsec.thread_index = ort->thread_index;
next[0] = AH_ENCRYPT_NEXT_HANDOFF;
goto next;
}
- if (PREDICT_FALSE (esp_seq_advance (sa0)))
+ if (PREDICT_FALSE (esp_seq_advance (ort)))
{
ah_encrypt_set_next_index (b[0], node, vm->thread_index,
AH_ENCRYPT_ERROR_SEQ_CYCLED, 0, next,
ssize_t adv;
ih0 = vlib_buffer_get_current (b[0]);
- if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_TRUE (ort->is_tunnel))
{
if (is_ip6)
adv = -sizeof (ip6_and_ah_header_t);
adv = -sizeof (ah_header_t);
}
- icv_size = sa0->integ_icv_size;
+ icv_size = ort->integ_icv_size;
const u8 padding_len = ah_calc_icv_padding_len (icv_size, is_ip6);
adv -= padding_len;
/* transport mode save the eth header before it is overwritten */
- if (PREDICT_FALSE (!ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_FALSE (!ort->is_tunnel))
{
const u32 l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
u8 *l2_hdr_in = (u8 *) vlib_buffer_get_current (b[0]) - l2_len;
oh6_0->ip6.ip_version_traffic_class_and_flow_label =
ih6_0->ip6.ip_version_traffic_class_and_flow_label;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_FALSE (ort->is_tunnel))
{
- ip6_set_dscp_network_order (&oh6_0->ip6, sa0->tunnel.t_dscp);
- tunnel_encap_fixup_6o6 (sa0->tunnel_flags, &ih6_0->ip6,
+ ip6_set_dscp_network_order (&oh6_0->ip6, ort->t_dscp);
+ tunnel_encap_fixup_6o6 (ort->tunnel_flags, &ih6_0->ip6,
&oh6_0->ip6);
}
pd->ip_version_traffic_class_and_flow_label =
oh6_0->ip6.ip_version_traffic_class_and_flow_label;
- if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_TRUE (ort->is_tunnel))
{
next_hdr_type = IP_PROTOCOL_IPV6;
}
clib_memcpy_fast (&oh6_0->ip6, &ip6_hdr_template, 8);
oh6_0->ah.reserved = 0;
oh6_0->ah.nexthdr = next_hdr_type;
- oh6_0->ah.spi = clib_net_to_host_u32 (sa0->spi);
- oh6_0->ah.seq_no = clib_net_to_host_u32 (sa0->seq);
+ oh6_0->ah.spi = ort->spi_be;
+ oh6_0->ah.seq_no = clib_net_to_host_u32 (ort->seq);
oh6_0->ip6.payload_length =
clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
sizeof (ip6_header_t));
oh0 = vlib_buffer_get_current (b[0]);
pd->ttl = ih0->ip4.ttl;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_FALSE (ort->is_tunnel))
{
- if (sa0->tunnel.t_dscp)
- pd->tos = sa0->tunnel.t_dscp << 2;
+ if (ort->t_dscp)
+ pd->tos = ort->t_dscp << 2;
else
{
pd->tos = ih0->ip4.tos;
- if (!(sa0->tunnel_flags &
+ if (!(ort->tunnel_flags &
TUNNEL_ENCAP_DECAP_FLAG_ENCAP_COPY_DSCP))
pd->tos &= 0x3;
- if (!(sa0->tunnel_flags &
+ if (!(ort->tunnel_flags &
TUNNEL_ENCAP_DECAP_FLAG_ENCAP_COPY_ECN))
pd->tos &= 0xfc;
}
pd->current_data = b[0]->current_data;
clib_memset (oh0, 0, sizeof (ip4_and_ah_header_t));
- if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_TRUE (ort->is_tunnel))
{
next_hdr_type = IP_PROTOCOL_IP_IN_IP;
}
oh0->ip4.length =
clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
- oh0->ah.spi = clib_net_to_host_u32 (sa0->spi);
- oh0->ah.seq_no = clib_net_to_host_u32 (sa0->seq);
+ oh0->ah.spi = ort->spi_be;
+ oh0->ah.seq_no = clib_net_to_host_u32 (ort->seq);
oh0->ah.nexthdr = next_hdr_type;
oh0->ah.hdrlen =
(sizeof (ah_header_t) + icv_size + padding_len) / 4 - 2;
}
- if (PREDICT_TRUE (!is_ip6 && ipsec_sa_is_set_IS_TUNNEL (sa0) &&
- !ipsec_sa_is_set_IS_TUNNEL_V6 (sa0)))
+ if (PREDICT_TRUE (!is_ip6 && ort->is_tunnel && !ort->is_tunnel_v6))
{
- clib_memcpy_fast (&oh0->ip4.address_pair,
- &sa0->ip4_hdr.address_pair,
+ clib_memcpy_fast (&oh0->ip4.address_pair, &ort->ip4_hdr.address_pair,
sizeof (ip4_address_pair_t));
- next[0] = sa0->dpo.dpoi_next_node;
- vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = sa0->dpo.dpoi_index;
+ next[0] = ort->dpo.dpoi_next_node;
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = ort->dpo.dpoi_index;
}
- else if (is_ip6 && ipsec_sa_is_set_IS_TUNNEL (sa0) &&
- ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
+ else if (is_ip6 && ort->is_tunnel && ort->is_tunnel_v6)
{
- clib_memcpy_fast (&oh6_0->ip6.src_address,
- &sa0->ip6_hdr.src_address,
+ clib_memcpy_fast (&oh6_0->ip6.src_address, &ort->ip6_hdr.src_address,
sizeof (ip6_address_t) * 2);
- next[0] = sa0->dpo.dpoi_next_node;
- vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = sa0->dpo.dpoi_index;
+ next[0] = ort->dpo.dpoi_next_node;
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = ort->dpo.dpoi_index;
}
- if (PREDICT_TRUE (sa0->integ_op_id))
+ if (PREDICT_TRUE (ort->integ_op_id))
{
vnet_crypto_op_t *op;
vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
- vnet_crypto_op_init (op, sa0->integ_op_id);
+ vnet_crypto_op_init (op, ort->integ_op_id);
op->src = vlib_buffer_get_current (b[0]);
op->len = b[0]->current_length;
op->digest = vlib_buffer_get_current (b[0]) + ip_hdr_size +
sizeof (ah_header_t);
clib_memset (op->digest, 0, icv_size);
op->digest_len = icv_size;
- op->key_index = sa0->integ_key_index;
+ op->key_index = ort->integ_key_index;
op->user_data = b - bufs;
- if (ipsec_sa_is_set_USE_ESN (sa0))
+ if (ort->use_esn)
{
- u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
+ u32 seq_hi = clib_host_to_net_u32 (ort->seq_hi);
op->len += sizeof (seq_hi);
clib_memcpy (op->src + b[0]->current_length, &seq_hi,
}
}
- if (!ipsec_sa_is_set_IS_TUNNEL (sa0))
+ if (!ort->is_tunnel)
{
next[0] = AH_ENCRYPT_NEXT_INTERFACE_OUTPUT;
vlib_buffer_advance (b[0], -sizeof (ethernet_header_t));
next:
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
- sa0 = ipsec_sa_get (pd->sa_index);
+ ipsec_sa_t *sa = ipsec_sa_get (pd->sa_index);
+ ipsec_sa_outb_rt_t *ort =
+ ipsec_sa_get_outb_rt_by_index (pd->sa_index);
ah_encrypt_trace_t *tr =
vlib_add_trace (vm, node, b[0], sizeof (*tr));
- tr->spi = sa0->spi;
- tr->seq_lo = sa0->seq;
- tr->seq_hi = sa0->seq_hi;
- tr->integ_alg = sa0->integ_alg;
+ tr->spi = sa->spi;
+ tr->seq_lo = ort->seq;
+ tr->seq_hi = ort->seq_hi;
+ tr->integ_alg = sa->integ_alg;
tr->sa_index = pd->sa_index;
}
/* TODO seq increment should be atomic to be accessed by multiple workers */
always_inline int
-esp_seq_advance (ipsec_sa_t * sa)
+esp_seq_advance (ipsec_sa_outb_rt_t *ort)
{
- if (PREDICT_TRUE (ipsec_sa_is_set_USE_ESN (sa)))
+ if (PREDICT_TRUE (ort->use_esn))
{
- if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
+ if (PREDICT_FALSE (ort->seq == ESP_SEQ_MAX))
{
- if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
- sa->seq_hi == ESP_SEQ_MAX))
+ if (PREDICT_FALSE (ort->use_anti_replay &&
+ ort->seq_hi == ESP_SEQ_MAX))
return 1;
- sa->seq_hi++;
+ ort->seq_hi++;
}
- sa->seq++;
+ ort->seq++;
}
else
{
- if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
- sa->seq == ESP_SEQ_MAX))
+ if (PREDICT_FALSE (ort->use_anti_replay && ort->seq == ESP_SEQ_MAX))
return 1;
- sa->seq++;
+ ort->seq++;
}
return 0;
}
always_inline u16
-esp_aad_fill (u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa,
- u32 seq_hi)
+esp_aad_fill (u8 *data, const esp_header_t *esp, int use_esn, u32 seq_hi)
{
esp_aead_t *aad;
aad = (esp_aead_t *) data;
aad->data[0] = esp->spi;
- if (ipsec_sa_is_set_USE_ESN (sa))
+ if (use_esn)
{
/* SPI, seq-hi, seq-low */
aad->data[1] = (u32) clib_host_to_net_u32 (seq_hi);
{
u8 icv_sz;
u8 iv_sz;
- ipsec_sa_flags_t flags;
+ u8 udp_sz;
+ u8 is_transport;
u32 sa_index;
};
u64 sa_data;
}
static_always_inline u16
-esp_insert_esn (vlib_main_t *vm, ipsec_sa_t *sa, esp_decrypt_packet_data_t *pd,
- esp_decrypt_packet_data2_t *pd2, u32 *data_len, u8 **digest,
- u16 *len, vlib_buffer_t *b, u8 *payload)
+esp_insert_esn (vlib_main_t *vm, ipsec_sa_inb_rt_t *irt,
+ esp_decrypt_packet_data_t *pd, esp_decrypt_packet_data2_t *pd2,
+ u32 *data_len, u8 **digest, u16 *len, vlib_buffer_t *b,
+ u8 *payload)
{
- if (!ipsec_sa_is_set_USE_ESN (sa))
+ if (!irt->use_esn)
return 0;
/* shift ICV by 4 bytes to insert ESN */
u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
}
static_always_inline u8 *
-esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first,
- esp_decrypt_packet_data_t * pd,
- esp_decrypt_packet_data2_t * pd2, u16 icv_sz,
- ipsec_sa_t * sa, u8 * extra_esn, u32 * len)
+esp_move_icv_esn (vlib_main_t *vm, vlib_buffer_t *first,
+ esp_decrypt_packet_data_t *pd,
+ esp_decrypt_packet_data2_t *pd2, u16 icv_sz,
+ ipsec_sa_inb_rt_t *irt, u8 *extra_esn, u32 *len)
{
u16 dif = 0;
u8 *digest = esp_move_icv (vm, first, pd, pd2, icv_sz, &dif);
if (dif)
*len -= dif;
- if (ipsec_sa_is_set_USE_ESN (sa))
+ if (irt->use_esn)
{
u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
static_always_inline int
esp_decrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
const esp_decrypt_packet_data_t *pd,
- esp_decrypt_packet_data2_t *pd2, ipsec_sa_t *sa0,
- vlib_buffer_t *b, u8 icv_sz, u8 *start_src,
- u32 start_len, u8 **digest, u16 *n_ch,
+ esp_decrypt_packet_data2_t *pd2,
+ ipsec_sa_inb_rt_t *irt, vlib_buffer_t *b, u8 icv_sz,
+ u8 *start_src, u32 start_len, u8 **digest, u16 *n_ch,
u32 *integ_total_len)
{
vnet_crypto_op_chunk_t *ch;
ch->len = cb->current_length;
else
ch->len = cb->current_length - icv_sz;
- if (ipsec_sa_is_set_USE_ESN (sa0))
+ if (irt->use_esn)
{
u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
u8 tmp[ESP_MAX_ICV_SIZE];
}
static_always_inline u32
-esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
- esp_decrypt_packet_data_t * pd,
- esp_decrypt_packet_data2_t * pd2,
- ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz,
- u8 * start, u32 start_len, u8 ** tag, u16 * n_ch)
+esp_decrypt_chain_crypto (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ esp_decrypt_packet_data_t *pd,
+ esp_decrypt_packet_data2_t *pd2,
+ ipsec_sa_inb_rt_t *irt, vlib_buffer_t *b, u8 icv_sz,
+ u8 *start, u32 start_len, u8 **tag, u16 *n_ch)
{
vnet_crypto_op_chunk_t *ch;
vlib_buffer_t *cb = b;
ch->src = ch->dst = vlib_buffer_get_current (cb);
if (pd2->lb == cb)
{
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (irt->is_aead)
{
if (pd2->lb->current_length < icv_sz)
{
static_always_inline esp_decrypt_error_t
esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
- ipsec_sa_t *sa0, u8 *payload, u16 len, u8 icv_sz,
- u8 iv_sz, esp_decrypt_packet_data_t *pd,
+ ipsec_sa_inb_rt_t *irt, u8 *payload, u16 len,
+ u8 icv_sz, u8 iv_sz,
+ esp_decrypt_packet_data_t *pd,
esp_decrypt_packet_data2_t *pd2, vlib_buffer_t *b,
u32 index)
{
vnet_crypto_op_t _op, *op = &_op;
const u8 esp_sz = sizeof (esp_header_t);
- if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
+ if (PREDICT_TRUE (irt->integ_op_id != VNET_CRYPTO_OP_NONE))
{
- vnet_crypto_op_init (op, sa0->integ_op_id);
- op->key_index = sa0->integ_key_index;
+ vnet_crypto_op_init (op, irt->integ_op_id);
+ op->key_index = irt->integ_key_index;
op->src = payload;
op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
op->user_data = index;
if (pd2->lb->current_length < icv_sz)
{
u8 extra_esn = 0;
- op->digest =
- esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
- &extra_esn, &op->len);
+ op->digest = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, irt,
+ &extra_esn, &op->len);
if (extra_esn)
{
op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
op->chunk_index = vec_len (ptd->chunks);
- if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
+ if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, irt, b, icv_sz,
payload, pd->current_length,
&op->digest, &op->n_chunks, 0) < 0)
return ESP_DECRYPT_ERROR_NO_BUFFERS;
else
{
integ_ops = &ptd->integ_ops;
- esp_insert_esn (vm, sa0, pd, pd2, &op->len, &op->digest, &len, b,
+ esp_insert_esn (vm, irt, pd, pd2, &op->len, &op->digest, &len, b,
payload);
}
out:
payload += esp_sz;
len -= esp_sz;
- if (sa0->crypto_dec_op_id != VNET_CRYPTO_OP_NONE)
+ if (irt->cipher_op_id != VNET_CRYPTO_OP_NONE)
{
- vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
- op->key_index = sa0->crypto_key_index;
+ vnet_crypto_op_init (op, irt->cipher_op_id);
+ op->key_index = irt->cipher_key_index;
op->iv = payload;
- if (ipsec_sa_is_set_IS_CTR (sa0))
+ if (irt->is_ctr)
{
/* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce =
(esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz -
sizeof (*nonce));
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (irt->is_aead)
{
/* constuct aad in a scratch space in front of the nonce */
esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
op->aad = (u8 *) nonce - sizeof (esp_aead_t);
- op->aad_len = esp_aad_fill (op->aad, esp0, sa0, pd->seq_hi);
+ op->aad_len =
+ esp_aad_fill (op->aad, esp0, irt->use_esn, pd->seq_hi);
op->tag = payload + len;
op->tag_len = 16;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ if (PREDICT_FALSE (irt->is_null_gmac))
{
/* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
payload -= iv_sz;
{
nonce->ctr = clib_host_to_net_u32 (1);
}
- nonce->salt = sa0->salt;
+ nonce->salt = irt->salt;
ASSERT (sizeof (u64) == iv_sz);
nonce->iv = *(u64 *) op->iv;
op->iv = (u8 *) nonce;
/* buffer is chained */
op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
op->chunk_index = vec_len (ptd->chunks);
- esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
- payload, len - pd->iv_sz + pd->icv_sz,
- &op->tag, &op->n_chunks);
+ esp_decrypt_chain_crypto (vm, ptd, pd, pd2, irt, b, icv_sz, payload,
+ len - pd->iv_sz + pd->icv_sz, &op->tag,
+ &op->n_chunks);
crypto_ops = &ptd->chained_crypto_ops;
}
else
static_always_inline esp_decrypt_error_t
esp_decrypt_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
- vnet_crypto_async_frame_t *f, ipsec_sa_t *sa0,
- u8 *payload, u16 len, u8 icv_sz, u8 iv_sz,
+ vnet_crypto_async_frame_t *f,
+ ipsec_sa_inb_rt_t *irt, u8 *payload, u16 len,
+ u8 icv_sz, u8 iv_sz,
esp_decrypt_packet_data_t *pd,
esp_decrypt_packet_data2_t *pd2, u32 bi,
vlib_buffer_t *b, u16 async_next)
esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data;
esp_decrypt_packet_data2_t *async_pd2 = esp_post_data2 (b);
u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0;
- const u32 key_index = sa0->crypto_key_index;
+ const u32 key_index = irt->cipher_key_index;
u32 crypto_len, integ_len = 0;
i16 crypto_start_offset, integ_start_offset = 0;
u8 flags = 0;
- if (!ipsec_sa_is_set_IS_AEAD (sa0))
+ if (!irt->is_aead)
{
/* linked algs */
integ_start_offset = payload - b->data;
integ_len = len;
- if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
+ if (PREDICT_TRUE (irt->integ_op_id != VNET_CRYPTO_OP_NONE))
flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
if (pd->is_chain)
if (pd2->lb->current_length < icv_sz)
{
u8 extra_esn = 0;
- tag = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
- &extra_esn, &integ_len);
+ tag = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, irt, &extra_esn,
+ &integ_len);
if (extra_esn)
{
tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
- if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
+ if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, irt, b, icv_sz,
payload, pd->current_length, &tag, 0,
&integ_len) < 0)
{
}
}
else
- esp_insert_esn (vm, sa0, pd, pd2, &integ_len, &tag, &len, b, payload);
+ esp_insert_esn (vm, irt, pd, pd2, &integ_len, &tag, &len, b, payload);
}
out:
len -= esp_sz;
iv = payload;
- if (ipsec_sa_is_set_IS_CTR (sa0))
+ if (irt->is_ctr)
{
/* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce =
(esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - sizeof (*nonce));
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (irt->is_aead)
{
/* constuct aad in a scratch space in front of the nonce */
esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
aad = (u8 *) nonce - sizeof (esp_aead_t);
- esp_aad_fill (aad, esp0, sa0, pd->seq_hi);
+ esp_aad_fill (aad, esp0, irt->use_esn, pd->seq_hi);
tag = payload + len;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ if (PREDICT_FALSE (irt->is_null_gmac))
{
/* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
payload -= iv_sz;
{
nonce->ctr = clib_host_to_net_u32 (1);
}
- nonce->salt = sa0->salt;
+ nonce->salt = irt->salt;
ASSERT (sizeof (u64) == iv_sz);
nonce->iv = *(u64 *) iv;
iv = (u8 *) nonce;
/* buffer is chained */
flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
- crypto_len = esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
- payload,
- len - pd->iv_sz + pd->icv_sz,
- &tag, 0);
+ crypto_len =
+ esp_decrypt_chain_crypto (vm, ptd, pd, pd2, irt, b, icv_sz, payload,
+ len - pd->iv_sz + pd->icv_sz, &tag, 0);
}
*async_pd = *pd;
vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun,
int is_async)
{
- ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt_by_index (pd->sa_index);
vlib_buffer_t *lb = b;
const u8 esp_sz = sizeof (esp_header_t);
- const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
u8 pad_length = 0, next_header = 0;
u16 icv_sz;
u64 n_lost;
* a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
* implementation, sequential or batching, from decrypting these.
*/
- if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ if (PREDICT_FALSE (irt->anti_reply_huge))
{
- if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
+ if (ipsec_sa_anti_replay_and_sn_advance (irt, pd->seq, pd->seq_hi, true,
NULL, true))
{
esp_decrypt_set_next_index (b, node, vm->thread_index,
ESP_DECRYPT_NEXT_DROP, pd->sa_index);
return;
}
- n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq,
+ n_lost = ipsec_sa_anti_replay_advance (irt, vm->thread_index, pd->seq,
pd->seq_hi, true);
}
else
{
- if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
+ if (ipsec_sa_anti_replay_and_sn_advance (irt, pd->seq, pd->seq_hi, true,
NULL, false))
{
esp_decrypt_set_next_index (b, node, vm->thread_index,
ESP_DECRYPT_NEXT_DROP, pd->sa_index);
return;
}
- n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq,
+ n_lost = ipsec_sa_anti_replay_advance (irt, vm->thread_index, pd->seq,
pd->seq_hi, false);
}
b->flags &=
~(VNET_BUFFER_F_L4_CHECKSUM_COMPUTED | VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
- if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
+ if (pd->is_transport && !is_tun) /* transport mode */
{
- u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
- sizeof (udp_header_t) : 0;
+ u8 udp_sz = is_ip6 ? 0 : pd->udp_sz;
u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
u8 *old_ip = b->data + pd->current_data - ip_hdr_sz - udp_sz;
u8 *ip = old_ip + adv + udp_sz;
if (is_tun)
{
- if (ipsec_sa_is_set_IS_PROTECT (sa0))
+ if (irt->is_protect)
{
/*
* There are two encap possibilities
esp_decrypt_packet_data_t cpd = { };
u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
const u8 esp_sz = sizeof (esp_header_t);
- ipsec_sa_t *sa0 = 0;
+ ipsec_sa_inb_rt_t *irt = 0;
bool anti_replay_result;
- int is_async = im->async_mode;
+ int is_async = 0;
vnet_crypto_op_id_t async_op = ~0;
vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_N_OP_IDS];
esp_decrypt_error_t err;
vlib_get_buffers (vm, from, b, n_left);
- if (!is_async)
- {
- vec_reset_length (ptd->crypto_ops);
- vec_reset_length (ptd->integ_ops);
- vec_reset_length (ptd->chained_crypto_ops);
- vec_reset_length (ptd->chained_integ_ops);
- }
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->integ_ops);
+ vec_reset_length (ptd->chained_crypto_ops);
+ vec_reset_length (ptd->chained_integ_ops);
vec_reset_length (ptd->async_frames);
vec_reset_length (ptd->chunks);
clib_memset (sync_nexts, -1, sizeof (sync_nexts));
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
current_sa_index);
- sa0 = ipsec_sa_get (current_sa_index);
+ irt = ipsec_sa_get_inb_rt_by_index (current_sa_index);
- /* fetch the second cacheline ASAP */
- clib_prefetch_load (sa0->cacheline1);
- cpd.icv_sz = sa0->integ_icv_size;
- cpd.iv_sz = sa0->crypto_iv_size;
- cpd.flags = sa0->flags;
+ cpd.icv_sz = irt->integ_icv_size;
+ cpd.iv_sz = irt->cipher_iv_size;
+ cpd.udp_sz = irt->udp_sz;
+ cpd.is_transport = irt->is_transport;
cpd.sa_index = current_sa_index;
- is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
+ is_async = irt->is_async;
}
- if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == irt->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ clib_atomic_cmp_and_swap (&irt->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_FALSE (thread_index != sa0->thread_index))
+ if (PREDICT_FALSE (thread_index != irt->thread_index))
{
- vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ vnet_buffer (b[0])->ipsec.thread_index = irt->thread_index;
err = ESP_DECRYPT_ERROR_HANDOFF;
esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
noop_nexts, ESP_DECRYPT_NEXT_HANDOFF,
pd->current_length = b[0]->current_length;
/* anti-reply check */
- if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ if (PREDICT_FALSE (irt->anti_reply_huge))
{
anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, ~0, false, &pd->seq_hi, true);
+ irt, pd->seq, ~0, false, &pd->seq_hi, true);
}
else
{
anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, ~0, false, &pd->seq_hi, false);
+ irt, pd->seq, ~0, false, &pd->seq_hi, false);
}
if (anti_replay_result)
if (is_async)
{
- async_op = sa0->crypto_async_dec_op_id;
+ async_op = irt->async_op_id;
/* get a frame for this op if we don't yet have one or it's full
*/
}
err = esp_decrypt_prepare_async_frame (
- vm, ptd, async_frames[async_op], sa0, payload, len, cpd.icv_sz,
+ vm, ptd, async_frames[async_op], irt, payload, len, cpd.icv_sz,
cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next_node);
if (ESP_DECRYPT_ERROR_RX_PKTS != err)
{
}
else
{
- err = esp_decrypt_prepare_sync_op (vm, ptd, sa0, payload, len,
+ err = esp_decrypt_prepare_sync_op (vm, ptd, irt, payload, len,
cpd.icv_sz, cpd.iv_sz, pd, pd2,
b[0], n_sync);
if (err != ESP_DECRYPT_ERROR_RX_PKTS)
{
esp_decrypt_trace_t *tr;
tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
- sa0 = ipsec_sa_get (current_sa_index);
- tr->crypto_alg = sa0->crypto_alg;
- tr->integ_alg = sa0->integ_alg;
+ ipsec_sa_t *sa = ipsec_sa_get (current_sa_index);
+ ipsec_sa_inb_rt_t *irt =
+ ipsec_sa_get_inb_rt_by_index (current_sa_index);
+ tr->crypto_alg = sa->crypto_alg;
+ tr->integ_alg = sa->integ_alg;
tr->seq = pd->seq;
- tr->sa_seq = sa0->seq;
- tr->sa_seq_hi = sa0->seq_hi;
+ tr->sa_seq = irt->seq;
+ tr->sa_seq_hi = irt->seq_hi;
tr->pkt_seq_hi = pd->seq_hi;
}
/*trace: */
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
- ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
+ ipsec_sa_t *sa;
+ ipsec_sa_inb_rt_t *irt;
esp_decrypt_trace_t *tr;
esp_decrypt_packet_data_t *async_pd =
&(esp_post_data (b[0]))->decrypt_data;
tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
- sa0 = ipsec_sa_get (async_pd->sa_index);
+ sa = ipsec_sa_get (async_pd->sa_index);
+ irt = ipsec_sa_get_inb_rt_by_index (async_pd->sa_index);
- tr->crypto_alg = sa0->crypto_alg;
- tr->integ_alg = sa0->integ_alg;
+ tr->crypto_alg = sa->crypto_alg;
+ tr->integ_alg = sa->integ_alg;
tr->seq = pd->seq;
- tr->sa_seq = sa0->seq;
- tr->sa_seq_hi = sa0->seq_hi;
+ tr->sa_seq = irt->seq;
+ tr->sa_seq_hi = irt->seq_hi;
}
n_left--;
}
static_always_inline void
-esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
+esp_fill_udp_hdr (ipsec_sa_outb_rt_t *ort, udp_header_t *udp, u16 len)
{
- clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
+ clib_memcpy_fast (udp, &ort->udp_hdr, sizeof (udp_header_t));
udp->length = clib_net_to_host_u16 (len);
}
* message. You can refer to NIST SP800-38a and NIST SP800-38d for more
* details. */
static_always_inline void *
-esp_generate_iv (ipsec_sa_t *sa, void *payload, int iv_sz)
+esp_generate_iv (ipsec_sa_outb_rt_t *ort, void *payload, int iv_sz)
{
ASSERT (iv_sz >= sizeof (u64));
u64 *iv = (u64 *) (payload - iv_sz);
clib_memset_u8 (iv, 0, iv_sz);
- *iv = clib_pcg64i_random_r (&sa->iv_prng);
+ *iv = clib_pcg64i_random_r (&ort->iv_prng);
return iv;
}
}
static_always_inline u32
-esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
- ipsec_sa_t * sa0, vlib_buffer_t * b,
- vlib_buffer_t * lb, u8 icv_sz, u8 * start,
- u32 start_len, u16 * n_ch)
+esp_encrypt_chain_crypto (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ vlib_buffer_t *b, vlib_buffer_t *lb, u8 icv_sz,
+ u8 *start, u32 start_len, u16 *n_ch)
{
vnet_crypto_op_chunk_t *ch;
vlib_buffer_t *cb = b;
}
static_always_inline u32
-esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
- ipsec_sa_t * sa0, vlib_buffer_t * b,
- vlib_buffer_t * lb, u8 icv_sz, u8 * start,
- u32 start_len, u8 * digest, u16 * n_ch)
+esp_encrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ ipsec_sa_outb_rt_t *ort, vlib_buffer_t *b,
+ vlib_buffer_t *lb, u8 icv_sz, u8 *start,
+ u32 start_len, u8 *digest, u16 *n_ch)
{
vnet_crypto_op_chunk_t *ch;
vlib_buffer_t *cb = b;
if (lb == cb)
{
total_len += ch->len = cb->current_length - icv_sz;
- if (ipsec_sa_is_set_USE_ESN (sa0))
+ if (ort->use_esn)
{
- u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
+ u32 seq_hi = clib_net_to_host_u32 (ort->seq_hi);
clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi));
ch->len += sizeof (seq_hi);
total_len += sizeof (seq_hi);
always_inline void
esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
vnet_crypto_op_t **crypto_ops,
- vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi,
- u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
- vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
- esp_header_t *esp)
+ vnet_crypto_op_t **integ_ops, ipsec_sa_outb_rt_t *ort,
+ u32 seq_hi, u8 *payload, u16 payload_len, u8 iv_sz,
+ u8 icv_sz, u32 bi, vlib_buffer_t **b, vlib_buffer_t *lb,
+ u32 hdr_len, esp_header_t *esp)
{
- if (sa0->crypto_enc_op_id)
+ if (ort->cipher_op_id)
{
vnet_crypto_op_t *op;
vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
- vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
+ vnet_crypto_op_init (op, ort->cipher_op_id);
u8 *crypto_start = payload;
/* esp_add_footer_and_icv() in esp_encrypt_inline() makes sure we always
* have enough space for ESP header and footer which includes ICV */
u16 crypto_len = payload_len - icv_sz;
/* generate the IV in front of the payload */
- void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz);
+ void *pkt_iv = esp_generate_iv (ort, payload, iv_sz);
- op->key_index = sa0->crypto_key_index;
+ op->key_index = ort->cipher_key_index;
op->user_data = bi;
- if (ipsec_sa_is_set_IS_CTR (sa0))
+ if (ort->is_ctr)
{
/* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce =
(esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (ort->is_aead)
{
/* constuct aad in a scratch space in front of the nonce */
op->aad = (u8 *) nonce - sizeof (esp_aead_t);
- op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
+ op->aad_len = esp_aad_fill (op->aad, esp, ort->use_esn, seq_hi);
op->tag = payload + crypto_len;
op->tag_len = 16;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ if (PREDICT_FALSE (ort->is_null_gmac))
{
/* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
crypto_start -= iv_sz;
nonce->ctr = clib_host_to_net_u32 (1);
}
- nonce->salt = sa0->salt;
+ nonce->salt = ort->salt;
nonce->iv = *(u64 *) pkt_iv;
op->iv = (u8 *) nonce;
}
op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
op->chunk_index = vec_len (ptd->chunks);
op->tag = vlib_buffer_get_tail (lb) - icv_sz;
- esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz,
- crypto_start, crypto_len + icv_sz,
- &op->n_chunks);
+ esp_encrypt_chain_crypto (vm, ptd, b[0], lb, icv_sz, crypto_start,
+ crypto_len + icv_sz, &op->n_chunks);
}
else
{
}
}
- if (sa0->integ_op_id)
+ if (ort->integ_op_id)
{
vnet_crypto_op_t *op;
vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
- vnet_crypto_op_init (op, sa0->integ_op_id);
+ vnet_crypto_op_init (op, ort->integ_op_id);
op->src = payload - iv_sz - sizeof (esp_header_t);
op->digest = payload + payload_len - icv_sz;
- op->key_index = sa0->integ_key_index;
+ op->key_index = ort->integ_key_index;
op->digest_len = icv_sz;
op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
op->user_data = bi;
op->chunk_index = vec_len (ptd->chunks);
op->digest = vlib_buffer_get_tail (lb) - icv_sz;
- esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
+ esp_encrypt_chain_integ (vm, ptd, ort, b[0], lb, icv_sz,
payload - iv_sz - sizeof (esp_header_t),
- payload_len + iv_sz +
- sizeof (esp_header_t), op->digest,
- &op->n_chunks);
+ payload_len + iv_sz + sizeof (esp_header_t),
+ op->digest, &op->n_chunks);
}
- else if (ipsec_sa_is_set_USE_ESN (sa0))
+ else if (ort->use_esn)
{
u32 tmp = clib_net_to_host_u32 (seq_hi);
clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
static_always_inline void
esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
vnet_crypto_async_frame_t *async_frame,
- ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp,
- u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
- u32 bi, u16 next, u32 hdr_len, u16 async_next,
- vlib_buffer_t *lb)
+ ipsec_sa_outb_rt_t *ort, vlib_buffer_t *b,
+ esp_header_t *esp, u8 *payload, u32 payload_len,
+ u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len,
+ u16 async_next, vlib_buffer_t *lb)
{
esp_post_data_t *post = esp_post_data (b);
u8 *tag, *iv, *aad = 0;
u8 flag = 0;
- const u32 key_index = sa->crypto_key_index;
+ const u32 key_index = ort->cipher_key_index;
i16 crypto_start_offset, integ_start_offset;
u16 crypto_total_len, integ_total_len;
tag = payload + crypto_total_len;
/* generate the IV in front of the payload */
- void *pkt_iv = esp_generate_iv (sa, payload, iv_sz);
+ void *pkt_iv = esp_generate_iv (ort, payload, iv_sz);
- if (ipsec_sa_is_set_IS_CTR (sa))
+ if (ort->is_ctr)
{
/* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce =
(esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
- if (ipsec_sa_is_set_IS_AEAD (sa))
+ if (ort->is_aead)
{
/* constuct aad in a scratch space in front of the nonce */
aad = (u8 *) nonce - sizeof (esp_aead_t);
- esp_aad_fill (aad, esp, sa, sa->seq_hi);
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa)))
+ esp_aad_fill (aad, esp, ort->use_esn, ort->seq_hi);
+ if (PREDICT_FALSE (ort->is_null_gmac))
{
/* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
crypto_start_offset -= iv_sz;
nonce->ctr = clib_host_to_net_u32 (1);
}
- nonce->salt = sa->salt;
+ nonce->salt = ort->salt;
nonce->iv = *(u64 *) pkt_iv;
iv = (u8 *) nonce;
}
flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
tag = vlib_buffer_get_tail (lb) - icv_sz;
crypto_total_len = esp_encrypt_chain_crypto (
- vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset,
+ vm, ptd, b, lb, icv_sz, b->data + crypto_start_offset,
crypto_total_len + icv_sz, 0);
}
- if (sa->integ_op_id)
+ if (ort->integ_op_id)
{
integ_start_offset -= iv_sz + sizeof (esp_header_t);
integ_total_len += iv_sz + sizeof (esp_header_t);
if (b != lb)
{
integ_total_len = esp_encrypt_chain_integ (
- vm, ptd, sa, b, lb, icv_sz,
+ vm, ptd, ort, b, lb, icv_sz,
payload - iv_sz - sizeof (esp_header_t),
payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
}
- else if (ipsec_sa_is_set_USE_ESN (sa))
+ else if (ort->use_esn)
{
- u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
+ u32 seq_hi = clib_net_to_host_u32 (ort->seq_hi);
clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
integ_total_len += sizeof (seq_hi);
}
u32 current_sa_index = ~0, current_sa_packets = 0;
u32 current_sa_bytes = 0, spi = 0;
u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
- ipsec_sa_t *sa0 = 0;
- u8 sa_drop_no_crypto = 0;
+ ipsec_sa_outb_rt_t *ort = 0;
vlib_buffer_t *lb;
vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_N_OP_IDS];
- int is_async = im->async_mode;
+ int is_async = 0;
vnet_crypto_op_id_t async_op = ~0;
u16 drop_next =
(lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
current_sa_packets, current_sa_bytes);
current_sa_packets = current_sa_bytes = 0;
- sa0 = ipsec_sa_get (sa_index0);
+ ort = ipsec_sa_get_outb_rt_by_index (sa_index0);
current_sa_index = sa_index0;
- sa_drop_no_crypto = ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
- sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
- !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0));
-
vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
current_sa_index);
- /* fetch the second cacheline ASAP */
- clib_prefetch_load (sa0->cacheline1);
-
- spi = clib_net_to_host_u32 (sa0->spi);
- esp_align = sa0->esp_block_align;
- icv_sz = sa0->integ_icv_size;
- iv_sz = sa0->crypto_iv_size;
- is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
+ spi = ort->spi_be;
+ icv_sz = ort->integ_icv_size;
+ esp_align = ort->esp_block_align;
+ iv_sz = ort->cipher_iv_size;
+ is_async = ort->is_async;
}
- if (PREDICT_FALSE (sa_drop_no_crypto != 0))
+ if (PREDICT_FALSE (ort->drop_no_crypto != 0))
{
err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
goto trace;
}
- if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == ort->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ clib_atomic_cmp_and_swap (&ort->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_FALSE (thread_index != sa0->thread_index))
+ if (PREDICT_FALSE (thread_index != ort->thread_index))
{
- vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ vnet_buffer (b[0])->ipsec.thread_index = ort->thread_index;
err = ESP_ENCRYPT_ERROR_HANDOFF;
esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
noop_nexts, handoff_next,
lb = vlib_get_buffer (vm, lb->next_buffer);
}
- if (PREDICT_FALSE (esp_seq_advance (sa0)))
+ if (PREDICT_FALSE (esp_seq_advance (ort)))
{
err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
/* space for IV */
hdr_len = iv_sz;
- if (ipsec_sa_is_set_IS_TUNNEL (sa0))
+ if (ort->is_tunnel)
{
payload = vlib_buffer_get_current (b[0]);
next_hdr_ptr = esp_add_footer_and_icv (
esp = (esp_header_t *) (payload - hdr_len);
/* optional UDP header */
- if (ipsec_sa_is_set_UDP_ENCAP (sa0))
+ if (ort->udp_encap)
{
hdr_len += sizeof (udp_header_t);
- esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
+ esp_fill_udp_hdr (ort, (udp_header_t *) (payload - hdr_len),
payload_len_total + hdr_len);
}
/* IP header */
- if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
+ if (ort->is_tunnel_v6)
{
ip6_header_t *ip6;
u16 len = sizeof (ip6_header_t);
hdr_len += len;
ip6 = (ip6_header_t *) (payload - hdr_len);
- clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
+ clib_memcpy_fast (ip6, &ort->ip6_hdr, sizeof (ip6_header_t));
if (VNET_LINK_IP6 == lt)
{
*next_hdr_ptr = IP_PROTOCOL_IPV6;
- tunnel_encap_fixup_6o6 (sa0->tunnel_flags,
- (const ip6_header_t *) payload,
- ip6);
+ tunnel_encap_fixup_6o6 (ort->tunnel_flags,
+ (const ip6_header_t *) payload, ip6);
}
else if (VNET_LINK_IP4 == lt)
{
*next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
- tunnel_encap_fixup_4o6 (sa0->tunnel_flags, b[0],
+ tunnel_encap_fixup_4o6 (ort->tunnel_flags, b[0],
(const ip4_header_t *) payload, ip6);
}
else if (VNET_LINK_MPLS == lt)
{
*next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
tunnel_encap_fixup_mplso6 (
- sa0->tunnel_flags, b[0],
+ ort->tunnel_flags, b[0],
(const mpls_unicast_header_t *) payload, ip6);
}
else
u16 len = sizeof (ip4_header_t);
hdr_len += len;
ip4 = (ip4_header_t *) (payload - hdr_len);
- clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
+ clib_memcpy_fast (ip4, &ort->ip4_hdr, sizeof (ip4_header_t));
if (VNET_LINK_IP6 == lt)
{
*next_hdr_ptr = IP_PROTOCOL_IPV6;
- tunnel_encap_fixup_6o4_w_chksum (sa0->tunnel_flags,
- (const ip6_header_t *)
- payload, ip4);
+ tunnel_encap_fixup_6o4_w_chksum (
+ ort->tunnel_flags, (const ip6_header_t *) payload, ip4);
}
else if (VNET_LINK_IP4 == lt)
{
*next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
- tunnel_encap_fixup_4o4_w_chksum (sa0->tunnel_flags,
- (const ip4_header_t *)
- payload, ip4);
+ tunnel_encap_fixup_4o4_w_chksum (
+ ort->tunnel_flags, (const ip4_header_t *) payload, ip4);
}
else if (VNET_LINK_MPLS == lt)
{
*next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
tunnel_encap_fixup_mplso4_w_chksum (
- sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
+ ort->tunnel_flags, (const mpls_unicast_header_t *) payload,
ip4);
}
else
esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
}
- if (ipsec_sa_is_set_UDP_ENCAP (sa0) &&
- ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
+ if (ort->udp_encap && ort->is_tunnel_v6)
{
i16 l3_off = b[0]->current_data - hdr_len;
i16 l4_off = l3_off + sizeof (ip6_header_t);
set_ip6_udp_cksum_offload (b[0], l3_off, l4_off);
}
- dpo = &sa0->dpo;
+ dpo = &ort->dpo;
if (!is_tun)
{
sync_next[0] = dpo->dpoi_next_node;
esp = (esp_header_t *) (payload - hdr_len);
/* optional UDP header */
- if (ipsec_sa_is_set_UDP_ENCAP (sa0))
+ if (ort->udp_encap)
{
hdr_len += sizeof (udp_header_t);
udp = (udp_header_t *) (payload - hdr_len);
if (udp)
{
udp_len = len - ip_len;
- esp_fill_udp_hdr (sa0, udp, udp_len);
+ esp_fill_udp_hdr (ort, udp, udp_len);
}
if (udp && (VNET_LINK_IP6 == lt))
}
esp->spi = spi;
- esp->seq = clib_net_to_host_u32 (sa0->seq);
+ esp->seq = clib_net_to_host_u32 (ort->seq);
if (is_async)
{
- async_op = sa0->crypto_async_enc_op_id;
+ async_op = ort->async_op_id;
/* get a frame for this op if we don't yet have one or it's full
*/
vec_add1 (ptd->async_frames, async_frames[async_op]);
}
- esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
+ esp_prepare_async_frame (vm, ptd, async_frames[async_op], ort, b[0],
esp, payload, payload_len, iv_sz, icv_sz,
from[b - bufs], sync_next[0], hdr_len,
async_next_node, lb);
}
else
- esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, sa0->seq_hi,
+ esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, ort, ort->seq_hi,
payload, payload_len, iv_sz, icv_sz, n_sync, b,
lb, hdr_len, esp);
clib_memset_u8 (tr, 0xff, sizeof (*tr));
else
{
+ ipsec_sa_t *sa = ipsec_sa_get (sa_index0);
tr->sa_index = sa_index0;
- tr->spi = sa0->spi;
- tr->seq = sa0->seq;
- tr->sa_seq_hi = sa0->seq_hi;
- tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
- tr->crypto_alg = sa0->crypto_alg;
- tr->integ_alg = sa0->integ_alg;
+ tr->spi = sa->spi;
+ tr->seq = ort->seq;
+ tr->sa_seq_hi = ort->seq_hi;
+ tr->udp_encap = ort->udp_encap;
+ tr->crypto_alg = sa->crypto_alg;
+ tr->integ_alg = sa->integ_alg;
}
}
ipsec_rsc_in_use (ipsec_main_t * im)
{
/* return an error is crypto resource are in use */
- if (pool_elts (ipsec_sa_pool) > 0)
+ if (pool_elts (im->sa_pool) > 0)
return clib_error_return (0, "%d SA entries configured",
- pool_elts (ipsec_sa_pool));
+ pool_elts (im->sa_pool));
if (ipsec_itf_count () > 0)
return clib_error_return (0, "%d IPSec interface configured",
ipsec_itf_count ());
im->async_mode = is_enabled;
/* change SA crypto op data */
- pool_foreach (sa, ipsec_sa_pool)
+ pool_foreach (sa, im->sa_pool)
ipsec_sa_set_async_mode (sa, is_enabled);
}
const u8 iv_size;
const u8 block_align;
const u8 icv_size;
+ const u8 is_aead : 1;
+ const u8 is_ctr : 1;
+ const u8 is_null_gmac : 1;
} ipsec_main_crypto_alg_t;
typedef struct
u8 async_mode;
u16 msg_id_base;
+
+ ipsec_sa_t *sa_pool;
+ ipsec_sa_inb_rt_t **inb_sa_runtimes;
+ ipsec_sa_outb_rt_t **outb_sa_runtimes;
} ipsec_main_t;
typedef enum ipsec_format_flags_t_
u8 next_header,
const char *next_node);
+#include <vnet/ipsec/ipsec_funcs.h>
+
#endif /* __IPSEC_H__ */
/*
#define REPLY_MSG_ID_BASE ipsec_main.msg_id_base
#include <vlibapi/api_helper_macros.h>
+static inline u64
+ipsec_sa_get_inb_seq (ipsec_sa_t *sa)
+{
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ u64 seq;
+
+ seq = irt->seq;
+ if (ipsec_sa_is_set_USE_ESN (sa))
+ seq |= (u64) irt->seq_hi << 32;
+ return seq;
+}
+
+static inline u64
+ipsec_sa_get_outb_seq (ipsec_sa_t *sa)
+{
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
+ u64 seq;
+
+ seq = ort->seq;
+ if (ipsec_sa_is_set_USE_ESN (sa))
+ seq |= (u64) ort->seq_hi << 32;
+ return seq;
+}
+
static void
vl_api_ipsec_spd_add_del_t_handler (vl_api_ipsec_spd_add_del_t * mp)
{
static walk_rc_t
send_ipsec_sa_details (ipsec_sa_t * sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_details_t *mp;
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
}
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
- {
- mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
- }
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
+
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
+ mp->replay_window =
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
static walk_rc_t
send_ipsec_sa_v2_details (ipsec_sa_t * sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_v2_details_t *mp;
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
}
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
mp->entry.tunnel_flags =
tunnel_encap_decap_flags_encode (sa->tunnel.t_encap_decap_flags);
mp->entry.dscp = ip_dscp_encode (sa->tunnel.t_dscp);
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
- {
- mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
- }
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
+
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
+ mp->replay_window =
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
static walk_rc_t
send_ipsec_sa_v3_details (ipsec_sa_t *sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_v3_details_t *mp;
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
- {
- mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
- }
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
+
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
+ mp->replay_window =
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
static walk_rc_t
send_ipsec_sa_v4_details (ipsec_sa_t *sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_v4_details_t *mp;
+ u32 thread_index = 0;
mp = vl_msg_api_alloc (sizeof (*mp));
clib_memset (mp, 0, sizeof (*mp));
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
- {
- mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
- }
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
+
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
+ mp->replay_window =
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
+
+ if (ort)
+ thread_index = ort->thread_index;
+ else if (irt)
+ thread_index = irt->thread_index;
- mp->thread_index = clib_host_to_net_u32 (sa->thread_index);
+ mp->thread_index = clib_host_to_net_u32 (thread_index);
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
vl_api_send_msg (ctx->reg, (u8 *) mp);
static walk_rc_t
send_ipsec_sa_v5_details (ipsec_sa_t *sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_v5_details_t *mp;
+ u32 thread_index = 0;
mp = vl_msg_api_alloc (sizeof (*mp));
clib_memset (mp, 0, sizeof (*mp));
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
- {
- mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
- mp->entry.anti_replay_window_size =
- clib_host_to_net_u32 (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (sa));
- }
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
+ mp->replay_window =
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
- mp->thread_index = clib_host_to_net_u32 (sa->thread_index);
+ if (ort)
+ thread_index = ort->thread_index;
+ else if (irt)
+ thread_index = irt->thread_index;
+
+ mp->thread_index = clib_host_to_net_u32 (thread_index);
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
vl_api_send_msg (ctx->reg, (u8 *) mp);
vl_api_ipsec_select_backend_reply_t *rmp;
ipsec_protocol_t protocol;
int rv = 0;
- if (pool_elts (ipsec_sa_pool) > 0)
- {
- rv = VNET_API_ERROR_INSTANCE_IN_USE;
- goto done;
- }
+ if (pool_elts (im->sa_pool) > 0)
+ {
+ rv = VNET_API_ERROR_INSTANCE_IN_USE;
+ goto done;
+ }
rv = ipsec_proto_decode (mp->protocol, &protocol);
{
u32 sai;
- pool_foreach_index (sai, ipsec_sa_pool)
+ pool_foreach_index (sai, im->sa_pool)
{
vlib_cli_output (vm, "%U", format_ipsec_sa, sai,
(detail ? IPSEC_FORMAT_DETAIL : IPSEC_FORMAT_BRIEF));
clear_ipsec_sa_command_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
+ ipsec_main_t *im = &ipsec_main;
u32 sai = ~0;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
if (~0 == sai)
{
- pool_foreach_index (sai, ipsec_sa_pool)
+ pool_foreach_index (sai, im->sa_pool)
{
ipsec_sa_clear (sai);
}
}
else
{
- if (pool_is_free_index (ipsec_sa_pool, sai))
+ if (pool_is_free_index (im->sa_pool, sai))
return clib_error_return (0, "unknown SA index: %d", sai);
else
ipsec_sa_clear (sai);
u8 *
format_ipsec_sa (u8 * s, va_list * args)
{
+ ipsec_main_t *im = &ipsec_main;
u32 sai = va_arg (*args, u32);
ipsec_format_flags_t flags = va_arg (*args, ipsec_format_flags_t);
vlib_counter_t counts;
counter_t errors;
ipsec_sa_t *sa;
+ ipsec_sa_inb_rt_t *irt;
+ ipsec_sa_outb_rt_t *ort;
- if (pool_is_free_index (ipsec_sa_pool, sai))
+ if (pool_is_free_index (im->sa_pool, sai))
{
s = format (s, "No such SA index: %d", sai);
goto done;
}
sa = ipsec_sa_get (sai);
+ irt = ipsec_sa_get_inb_rt (sa);
+ ort = ipsec_sa_get_outb_rt (sa);
s = format (s, "[%d] sa %u (0x%x) spi %u (0x%08x) protocol:%s flags:[%U]",
sai, sa->id, sa->id, sa->spi, sa->spi,
s = format (s, "\n locks %d", sa->node.fn_locks);
s = format (s, "\n salt 0x%x", clib_net_to_host_u32 (sa->salt));
- s = format (s, "\n thread-index:%d", sa->thread_index);
- s = format (s, "\n seq %u seq-hi %u", sa->seq, sa->seq_hi);
- s = format (s, "\n window-size: %llu",
- IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (sa));
- s = format (s, "\n window: Bl <- %U Tl", format_ipsec_replay_window,
- ipsec_sa_anti_replay_get_64b_window (sa));
+ if (irt)
+ s = format (s, "\n inbound thread-index:%d", irt->thread_index);
+ if (ort)
+ s = format (s, "\n outbound thread-index:%d", ort->thread_index);
+ if (irt)
+ s = format (s, "\n inbound seq %u seq-hi %u", irt->seq, irt->seq_hi);
+ if (ort)
+ s = format (s, "\n outbound seq %u seq-hi %u", ort->seq, ort->seq_hi);
+ if (irt)
+ {
+ s = format (s, "\n window-size: %llu",
+ IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (irt));
+ s = format (s, "\n window: Bl <- %U Tl", format_ipsec_replay_window,
+ ipsec_sa_anti_replay_get_64b_window (irt));
+ }
s =
format (s, "\n crypto alg %U", format_ipsec_crypto_alg, sa->crypto_alg);
if (sa->crypto_alg && (flags & IPSEC_FORMAT_INSECURE))
s = format (s, " key %U", format_ipsec_key, &sa->integ_key);
else
s = format (s, " key [redacted]");
- s = format (s, "\n UDP:[src:%d dst:%d]",
- clib_host_to_net_u16 (sa->udp_hdr.src_port),
- clib_host_to_net_u16 (sa->udp_hdr.dst_port));
+ s =
+ format (s, "\n UDP:[src:%d dst:%d]", sa->udp_src_port, sa->udp_dst_port);
vlib_get_combined_counter (&ipsec_sa_counters, sai, &counts);
s = format (s, "\n tx/rx:[packets:%Ld bytes:%Ld]", counts.packets,
--- /dev/null
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2025 Cisco Systems, Inc.
+ */
+
+#ifndef __IPSEC_FUNCS_H__
+#define __IPSEC_FUNCS_H__
+
+#include <vlib/vlib.h>
+#include <vnet/ipsec/ipsec.h>
+
+always_inline ipsec_sa_t *
+ipsec_sa_get (u32 sa_index)
+{
+ return (pool_elt_at_index (ipsec_main.sa_pool, sa_index));
+}
+
+static_always_inline ipsec_sa_outb_rt_t *
+ipsec_sa_get_outb_rt_by_index (u32 sa_index)
+{
+ return ipsec_main.outb_sa_runtimes[sa_index];
+}
+
+static_always_inline ipsec_sa_inb_rt_t *
+ipsec_sa_get_inb_rt_by_index (u32 sa_index)
+{
+ return ipsec_main.inb_sa_runtimes[sa_index];
+}
+
+static_always_inline ipsec_sa_outb_rt_t *
+ipsec_sa_get_outb_rt (ipsec_sa_t *sa)
+{
+ return ipsec_sa_get_outb_rt_by_index (sa - ipsec_main.sa_pool);
+}
+
+static_always_inline ipsec_sa_inb_rt_t *
+ipsec_sa_get_inb_rt (ipsec_sa_t *sa)
+{
+ return ipsec_sa_get_inb_rt_by_index (sa - ipsec_main.sa_pool);
+}
+
+#endif /* __IPSEC_FUNCS_H__ */
/* Per-SA error counters */
vlib_simple_counter_main_t ipsec_sa_err_counters[IPSEC_SA_N_ERRORS];
-ipsec_sa_t *ipsec_sa_pool;
-
static clib_error_t *
ipsec_call_add_del_callbacks (ipsec_main_t * im, ipsec_sa_t * sa,
u32 sa_index, int is_add)
ipsec_sa_stack (ipsec_sa_t * sa)
{
ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
dpo_id_t tmp = DPO_INVALID;
tunnel_contribute_forwarding (&sa->tunnel, &tmp);
if (IPSEC_PROTOCOL_AH == sa->protocol)
dpo_stack_from_node ((ipsec_sa_is_set_IS_TUNNEL_V6 (sa) ?
- im->ah6_encrypt_node_index :
- im->ah4_encrypt_node_index), &sa->dpo, &tmp);
+ im->ah6_encrypt_node_index :
+ im->ah4_encrypt_node_index),
+ &ort->dpo, &tmp);
else
dpo_stack_from_node ((ipsec_sa_is_set_IS_TUNNEL_V6 (sa) ?
- im->esp6_encrypt_node_index :
- im->esp4_encrypt_node_index), &sa->dpo, &tmp);
+ im->esp6_encrypt_node_index :
+ im->esp4_encrypt_node_index),
+ &ort->dpo, &tmp);
dpo_reset (&tmp);
}
void
ipsec_sa_set_async_mode (ipsec_sa_t *sa, int is_enabled)
{
+ u32 cipher_key_index, integ_key_index;
+ vnet_crypto_op_id_t inb_cipher_op_id, outb_cipher_op_id, integ_op_id;
+ u32 is_async;
if (is_enabled)
{
- sa->crypto_key_index = sa->crypto_async_key_index;
- sa->crypto_enc_op_id = sa->crypto_async_enc_op_id;
- sa->crypto_dec_op_id = sa->crypto_async_dec_op_id;
- sa->integ_key_index = ~0;
- sa->integ_op_id = ~0;
+ if (sa->linked_key_index != ~0)
+ cipher_key_index = sa->linked_key_index;
+ else
+ cipher_key_index = sa->crypto_sync_key_index;
+
+ outb_cipher_op_id = sa->crypto_async_enc_op_id;
+ inb_cipher_op_id = sa->crypto_async_dec_op_id;
+ integ_key_index = ~0;
+ integ_op_id = ~0;
+ is_async = 1;
}
else
{
- sa->crypto_key_index = sa->crypto_sync_key_index;
- sa->crypto_enc_op_id = sa->crypto_sync_enc_op_id;
- sa->crypto_dec_op_id = sa->crypto_sync_dec_op_id;
- sa->integ_key_index = sa->integ_sync_key_index;
- sa->integ_op_id = sa->integ_sync_op_id;
+ cipher_key_index = sa->crypto_sync_key_index;
+ outb_cipher_op_id = sa->crypto_sync_enc_op_id;
+ inb_cipher_op_id = sa->crypto_sync_dec_op_id;
+ integ_key_index = sa->integ_sync_key_index;
+ integ_op_id = sa->integ_sync_op_id;
+ is_async = 0;
+ }
+
+ if (ipsec_sa_get_inb_rt (sa))
+ {
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ irt->cipher_key_index = cipher_key_index;
+ irt->integ_key_index = integ_key_index;
+ irt->cipher_op_id = inb_cipher_op_id;
+ irt->integ_op_id = integ_op_id;
+ irt->is_async = is_async;
+ }
+
+ if (ipsec_sa_get_outb_rt (sa))
+ {
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
+ ort->cipher_key_index = cipher_key_index;
+ ort->integ_key_index = integ_key_index;
+ ort->cipher_op_id = outb_cipher_op_id;
+ ort->integ_op_id = integ_op_id;
+ ort->is_async = is_async;
}
}
ipsec_sa_set_crypto_alg (ipsec_sa_t * sa, ipsec_crypto_alg_t crypto_alg)
{
ipsec_main_t *im = &ipsec_main;
+ ipsec_main_crypto_alg_t *alg = im->crypto_algs + crypto_alg;
sa->crypto_alg = crypto_alg;
- sa->crypto_iv_size = im->crypto_algs[crypto_alg].iv_size;
- sa->esp_block_align = clib_max (4, im->crypto_algs[crypto_alg].block_align);
- sa->crypto_sync_enc_op_id = im->crypto_algs[crypto_alg].enc_op_id;
- sa->crypto_sync_dec_op_id = im->crypto_algs[crypto_alg].dec_op_id;
- sa->crypto_calg = im->crypto_algs[crypto_alg].alg;
- ASSERT (sa->crypto_iv_size <= ESP_MAX_IV_SIZE);
- ASSERT (sa->esp_block_align <= ESP_MAX_BLOCK_SIZE);
- if (IPSEC_CRYPTO_ALG_IS_GCM (crypto_alg) ||
- IPSEC_CRYPTO_ALG_CTR_AEAD_OTHERS (crypto_alg))
- {
- sa->integ_icv_size = im->crypto_algs[crypto_alg].icv_size;
- ipsec_sa_set_IS_CTR (sa);
- ipsec_sa_set_IS_AEAD (sa);
- }
- else if (IPSEC_CRYPTO_ALG_IS_CTR (crypto_alg))
- {
- ipsec_sa_set_IS_CTR (sa);
- }
- else if (IPSEC_CRYPTO_ALG_IS_NULL_GMAC (crypto_alg))
- {
- sa->integ_icv_size = im->crypto_algs[crypto_alg].icv_size;
- ipsec_sa_set_IS_CTR (sa);
- ipsec_sa_set_IS_AEAD (sa);
- ipsec_sa_set_IS_NULL_GMAC (sa);
- }
+ sa->crypto_sync_enc_op_id = alg->enc_op_id;
+ sa->crypto_sync_dec_op_id = alg->dec_op_id;
+ sa->crypto_calg = alg->alg;
}
void
{
ipsec_main_t *im = &ipsec_main;
sa->integ_alg = integ_alg;
- sa->integ_icv_size = im->integ_algs[integ_alg].icv_size;
sa->integ_sync_op_id = im->integ_algs[integ_alg].op_id;
sa->integ_calg = im->integ_algs[integ_alg].alg;
- ASSERT (sa->integ_icv_size <= ESP_MAX_ICV_SIZE);
}
-void
-ipsec_sa_set_async_op_ids (ipsec_sa_t * sa)
+static void
+ipsec_sa_set_async_op_ids (ipsec_sa_t *sa)
{
if (ipsec_sa_is_set_USE_ESN (sa))
{
#undef _
}
+static void
+ipsec_sa_init_runtime (ipsec_sa_t *sa)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_main_crypto_alg_t *alg = im->crypto_algs + sa->crypto_alg;
+ u8 integ_icv_size;
+
+ if (alg->is_aead)
+ integ_icv_size = im->crypto_algs[sa->crypto_alg].icv_size;
+ else
+ integ_icv_size = im->integ_algs[sa->integ_alg].icv_size;
+ ASSERT (integ_icv_size <= ESP_MAX_ICV_SIZE);
+
+ if (ipsec_sa_get_inb_rt (sa))
+ {
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ irt->anti_reply_huge = ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa);
+ irt->use_anti_replay = ipsec_sa_is_set_USE_ANTI_REPLAY (sa);
+ irt->use_esn = ipsec_sa_is_set_USE_ESN (sa);
+ irt->is_tunnel = ipsec_sa_is_set_IS_TUNNEL (sa);
+ irt->is_transport =
+ !(ipsec_sa_is_set_IS_TUNNEL (sa) || ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
+ irt->udp_sz = ipsec_sa_is_set_UDP_ENCAP (sa) ? sizeof (udp_header_t) : 0;
+ irt->is_ctr = alg->is_ctr;
+ irt->is_aead = alg->is_aead;
+ irt->is_null_gmac = alg->is_null_gmac;
+ irt->cipher_iv_size = im->crypto_algs[sa->crypto_alg].iv_size;
+ irt->integ_icv_size = integ_icv_size;
+ irt->salt = sa->salt;
+ irt->async_op_id = sa->crypto_async_dec_op_id;
+ ASSERT (irt->cipher_iv_size <= ESP_MAX_IV_SIZE);
+ }
+
+ if (ipsec_sa_get_outb_rt (sa))
+ {
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
+ ort->use_anti_replay = ipsec_sa_is_set_USE_ANTI_REPLAY (sa);
+ ort->use_esn = ipsec_sa_is_set_USE_ESN (sa);
+ ort->is_ctr = alg->is_ctr;
+ ort->is_aead = alg->is_aead;
+ ort->is_null_gmac = alg->is_null_gmac;
+ ort->is_tunnel = ipsec_sa_is_set_IS_TUNNEL (sa);
+ ort->is_tunnel_v6 = ipsec_sa_is_set_IS_TUNNEL_V6 (sa);
+ ort->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa);
+ ort->esp_block_align =
+ clib_max (4, im->crypto_algs[sa->crypto_alg].block_align);
+ ort->cipher_iv_size = im->crypto_algs[sa->crypto_alg].iv_size;
+ ort->integ_icv_size = integ_icv_size;
+ ort->salt = sa->salt;
+ ort->spi_be = clib_host_to_net_u32 (sa->spi);
+ ort->tunnel_flags = sa->tunnel.t_encap_decap_flags;
+ ort->async_op_id = sa->crypto_async_enc_op_id;
+ ort->t_dscp = sa->tunnel.t_dscp;
+
+ ASSERT (ort->cipher_iv_size <= ESP_MAX_IV_SIZE);
+ ASSERT (ort->esp_block_align <= ESP_MAX_BLOCK_SIZE);
+ }
+ ipsec_sa_update_runtime (sa);
+}
+
+void
+ipsec_sa_update_runtime (ipsec_sa_t *sa)
+{
+ if (ipsec_sa_get_inb_rt (sa))
+ {
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ irt->is_protect = ipsec_sa_is_set_IS_PROTECT (sa);
+ }
+ if (ipsec_sa_get_outb_rt (sa))
+ {
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
+ ort->drop_no_crypto = sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
+ sa->integ_alg == IPSEC_INTEG_ALG_NONE &&
+ !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa);
+ }
+}
+
int
ipsec_sa_update (u32 id, u16 src_port, u16 dst_port, const tunnel_t *tun,
bool is_tun)
{
ipsec_main_t *im = &ipsec_main;
ipsec_sa_t *sa;
+ ipsec_sa_outb_rt_t *ort;
u32 sa_index;
uword *p;
int rv;
return VNET_API_ERROR_NO_SUCH_ENTRY;
sa = ipsec_sa_get (p[0]);
- sa_index = sa - ipsec_sa_pool;
+ ort = ipsec_sa_get_outb_rt (sa);
+ sa_index = sa - im->sa_pool;
if (is_tun && ipsec_sa_is_set_IS_TUNNEL (sa) &&
(ip_address_cmp (&tun->t_src, &sa->tunnel.t_src) != 0 ||
tunnel_copy (tun, &sa->tunnel);
if (!ipsec_sa_is_set_IS_INBOUND (sa))
{
- dpo_reset (&sa->dpo);
+ dpo_reset (&ort->dpo);
- sa->tunnel_flags = sa->tunnel.t_encap_decap_flags;
+ ort->tunnel_flags = sa->tunnel.t_encap_decap_flags;
rv = tunnel_resolve (&sa->tunnel, FIB_NODE_TYPE_IPSEC_SA, sa_index);
if (rv)
{
hash_unset (im->sa_index_by_sa_id, sa->id);
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return rv;
}
ipsec_sa_stack (sa);
{
tunnel_build_v6_hdr (&sa->tunnel,
(ipsec_sa_is_set_UDP_ENCAP (sa) ?
- IP_PROTOCOL_UDP :
- IP_PROTOCOL_IPSEC_ESP),
- &sa->ip6_hdr);
+ IP_PROTOCOL_UDP :
+ IP_PROTOCOL_IPSEC_ESP),
+ &ort->ip6_hdr);
}
else
{
tunnel_build_v4_hdr (&sa->tunnel,
(ipsec_sa_is_set_UDP_ENCAP (sa) ?
- IP_PROTOCOL_UDP :
- IP_PROTOCOL_IPSEC_ESP),
- &sa->ip4_hdr);
+ IP_PROTOCOL_UDP :
+ IP_PROTOCOL_IPSEC_ESP),
+ &ort->ip4_hdr);
}
}
}
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- if (dst_port != IPSEC_UDP_PORT_NONE &&
- dst_port != clib_net_to_host_u16 (sa->udp_hdr.dst_port))
+ if (dst_port != IPSEC_UDP_PORT_NONE && dst_port != sa->udp_dst_port)
{
if (ipsec_sa_is_set_IS_INBOUND (sa))
{
- ipsec_unregister_udp_port (
- clib_net_to_host_u16 (sa->udp_hdr.dst_port),
- !ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
+ ipsec_unregister_udp_port (sa->udp_dst_port,
+ !ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
ipsec_register_udp_port (dst_port,
!ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
}
- sa->udp_hdr.dst_port = clib_host_to_net_u16 (dst_port);
+ sa->udp_dst_port = dst_port;
+ if (ort)
+ ort->udp_hdr.dst_port = clib_host_to_net_u16 (dst_port);
+ }
+ if (src_port != IPSEC_UDP_PORT_NONE && src_port != (sa->udp_src_port))
+ {
+ sa->udp_src_port = src_port;
+ if (ort)
+ ort->udp_hdr.src_port = clib_host_to_net_u16 (src_port);
}
- if (src_port != IPSEC_UDP_PORT_NONE &&
- src_port != clib_net_to_host_u16 (sa->udp_hdr.src_port))
- sa->udp_hdr.src_port = clib_host_to_net_u16 (src_port);
}
return (0);
}
{
vlib_main_t *vm = vlib_get_main ();
ipsec_main_t *im = &ipsec_main;
+ ipsec_main_crypto_alg_t *alg = im->crypto_algs + crypto_alg;
+ ipsec_sa_inb_rt_t *irt;
+ ipsec_sa_outb_rt_t *ort;
clib_error_t *err;
ipsec_sa_t *sa;
u32 sa_index;
if (getrandom (rand, sizeof (rand), 0) != sizeof (rand))
return VNET_API_ERROR_INIT_FAILED;
- pool_get_aligned_zero (ipsec_sa_pool, sa, CLIB_CACHE_LINE_BYTES);
+ pool_get_aligned_zero (im->sa_pool, sa, CLIB_CACHE_LINE_BYTES);
+ sa_index = sa - im->sa_pool;
+ vec_validate (im->inb_sa_runtimes, sa_index);
+ vec_validate (im->outb_sa_runtimes, sa_index);
- clib_pcg64i_srandom_r (&sa->iv_prng, rand[0], rand[1]);
+ irt = clib_mem_alloc_aligned (sizeof (ipsec_sa_inb_rt_t),
+ _Alignof (ipsec_sa_inb_rt_t));
+ ort = clib_mem_alloc_aligned (sizeof (ipsec_sa_outb_rt_t),
+ _Alignof (ipsec_sa_outb_rt_t));
+ im->inb_sa_runtimes[sa_index] = irt;
+ im->outb_sa_runtimes[sa_index] = ort;
+ clib_memset (irt, 0, sizeof (ipsec_sa_inb_rt_t));
+ clib_memset (ort, 0, sizeof (ipsec_sa_outb_rt_t));
+
+ clib_pcg64i_srandom_r (&ort->iv_prng, rand[0], rand[1]);
fib_node_init (&sa->node, FIB_NODE_TYPE_IPSEC_SA);
fib_node_lock (&sa->node);
- sa_index = sa - ipsec_sa_pool;
vlib_validate_combined_counter (&ipsec_sa_counters, sa_index);
vlib_zero_combined_counter (&ipsec_sa_counters, sa_index);
sa->protocol = proto;
sa->flags = flags;
sa->salt = salt;
- sa->thread_index = (vlib_num_workers ()) ? ~0 : 0;
+ if (irt)
+ irt->thread_index = (vlib_num_workers ()) ? ~0 : 0;
+ if (ort)
+ ort->thread_index = (vlib_num_workers ()) ? ~0 : 0;
+
if (integ_alg != IPSEC_INTEG_ALG_NONE)
{
ipsec_sa_set_integ_alg (sa, integ_alg);
vm, im->crypto_algs[crypto_alg].alg, (u8 *) ck->data, ck->len);
if (~0 == sa->crypto_sync_key_index)
{
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return VNET_API_ERROR_KEY_LENGTH;
}
}
vm, im->integ_algs[integ_alg].alg, (u8 *) ik->data, ik->len);
if (~0 == sa->integ_sync_key_index)
{
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return VNET_API_ERROR_KEY_LENGTH;
}
}
- if (sa->crypto_async_enc_op_id && !ipsec_sa_is_set_IS_AEAD (sa))
- sa->crypto_async_key_index =
+ if (sa->crypto_async_enc_op_id && alg->is_aead == 0)
+ sa->linked_key_index =
vnet_crypto_key_add_linked (vm, sa->crypto_sync_key_index,
sa->integ_sync_key_index); // AES-CBC & HMAC
else
- sa->crypto_async_key_index = sa->crypto_sync_key_index;
+ sa->linked_key_index = ~0;
if (im->async_mode)
{
if (err)
{
clib_warning ("%v", err->what);
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return VNET_API_ERROR_UNIMPLEMENTED;
}
err = ipsec_call_add_del_callbacks (im, sa, sa_index, 1);
if (err)
{
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return VNET_API_ERROR_SYSCALL_ERROR_1;
}
if (ipsec_sa_is_set_IS_TUNNEL (sa) && !ipsec_sa_is_set_IS_INBOUND (sa))
{
- sa->tunnel_flags = sa->tunnel.t_encap_decap_flags;
rv = tunnel_resolve (&sa->tunnel, FIB_NODE_TYPE_IPSEC_SA, sa_index);
if (rv)
{
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return rv;
}
ipsec_sa_stack (sa);
(ipsec_sa_is_set_UDP_ENCAP (sa) ?
IP_PROTOCOL_UDP :
IP_PROTOCOL_IPSEC_ESP),
- &sa->ip6_hdr);
+ &ort->ip6_hdr);
}
else
{
(ipsec_sa_is_set_UDP_ENCAP (sa) ?
IP_PROTOCOL_UDP :
IP_PROTOCOL_IPSEC_ESP),
- &sa->ip4_hdr);
+ &ort->ip4_hdr);
}
}
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
if (dst_port == IPSEC_UDP_PORT_NONE)
- sa->udp_hdr.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
- else
- sa->udp_hdr.dst_port = clib_host_to_net_u16 (dst_port);
-
+ dst_port = UDP_DST_PORT_ipsec;
if (src_port == IPSEC_UDP_PORT_NONE)
- sa->udp_hdr.src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
- else
- sa->udp_hdr.src_port = clib_host_to_net_u16 (src_port);
+ src_port = UDP_DST_PORT_ipsec;
+ sa->udp_dst_port = dst_port;
+ sa->udp_src_port = src_port;
+ if (ort)
+ {
+ ort->udp_hdr.src_port = clib_host_to_net_u16 (src_port);
+ ort->udp_hdr.dst_port = clib_host_to_net_u16 (dst_port);
+ }
if (ipsec_sa_is_set_IS_INBOUND (sa))
- ipsec_register_udp_port (clib_host_to_net_u16 (sa->udp_hdr.dst_port),
- !ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
+ ipsec_register_udp_port (dst_port, !ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
}
/* window size rounded up to next power of 2 */
if (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa))
{
anti_replay_window_size = 1 << max_log2 (anti_replay_window_size);
- sa->replay_window_huge =
+ irt->replay_window_huge =
clib_bitmap_set_region (0, 0, 1, anti_replay_window_size);
}
else
{
- sa->replay_window = ~0;
+ irt->replay_window = ~0;
}
hash_set (im->sa_index_by_sa_id, sa->id, sa_index);
if (sa_out_index)
*sa_out_index = sa_index;
+ ipsec_sa_init_runtime (sa);
+
return (0);
}
vlib_main_t *vm = vlib_get_main ();
ipsec_main_t *im = &ipsec_main;
u32 sa_index;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
- sa_index = sa - ipsec_sa_pool;
+ sa_index = sa - im->sa_pool;
hash_unset (im->sa_index_by_sa_id, sa->id);
tunnel_unresolve (&sa->tunnel);
/* no recovery possible when deleting an SA */
(void) ipsec_call_add_del_callbacks (im, sa, sa_index, 0);
- if (ipsec_sa_is_set_IS_ASYNC (sa))
- {
- if (!ipsec_sa_is_set_IS_AEAD (sa))
- vnet_crypto_key_del (vm, sa->crypto_async_key_index);
- }
+ if (sa->linked_key_index != ~0)
+ vnet_crypto_key_del (vm, sa->linked_key_index);
if (ipsec_sa_is_set_UDP_ENCAP (sa) && ipsec_sa_is_set_IS_INBOUND (sa))
- ipsec_unregister_udp_port (clib_net_to_host_u16 (sa->udp_hdr.dst_port),
+ ipsec_unregister_udp_port (sa->udp_dst_port,
!ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
if (ipsec_sa_is_set_IS_TUNNEL (sa) && !ipsec_sa_is_set_IS_INBOUND (sa))
- dpo_reset (&sa->dpo);
+ dpo_reset (&ort->dpo);
if (sa->crypto_alg != IPSEC_CRYPTO_ALG_NONE)
vnet_crypto_key_del (vm, sa->crypto_sync_key_index);
if (sa->integ_alg != IPSEC_INTEG_ALG_NONE)
vnet_crypto_key_del (vm, sa->integ_sync_key_index);
if (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa))
- clib_bitmap_free (sa->replay_window_huge);
- pool_put (ipsec_sa_pool, sa);
+ if (irt && irt->replay_window_huge)
+ clib_bitmap_free (irt->replay_window_huge);
+ foreach_pointer (p, irt, ort)
+ if (p)
+ clib_mem_free (p);
+
+ im->inb_sa_runtimes[sa_index] = 0;
+ im->outb_sa_runtimes[sa_index] = 0;
+
+ pool_put (im->sa_pool, sa);
}
int
ipsec_main_t *im = &ipsec_main;
uword *p;
ipsec_sa_t *sa;
+ ipsec_sa_inb_rt_t *irt;
+ ipsec_sa_outb_rt_t *ort;
+ u16 thread_index;
p = hash_get (im->sa_index_by_sa_id, id);
if (!p)
return VNET_API_ERROR_INVALID_VALUE;
sa = ipsec_sa_get (p[0]);
+ irt = ipsec_sa_get_inb_rt (sa);
+ ort = ipsec_sa_get_outb_rt (sa);
if (!bind)
{
- sa->thread_index = ~0;
- return 0;
+ thread_index = ~0;
+ goto done;
}
if (worker >= vlib_num_workers ())
return VNET_API_ERROR_INVALID_WORKER;
- sa->thread_index = vlib_get_worker_thread_index (worker);
+ thread_index = vlib_get_worker_thread_index (worker);
+done:
+ if (irt)
+ irt->thread_index = thread_index;
+ if (ort)
+ ort->thread_index = thread_index;
return 0;
}
void
ipsec_sa_walk (ipsec_sa_walk_cb_t cb, void *ctx)
{
+ ipsec_main_t *im = &ipsec_main;
ipsec_sa_t *sa;
- pool_foreach (sa, ipsec_sa_pool)
+ pool_foreach (sa, im->sa_pool)
{
if (WALK_CONTINUE != cb (sa, ctx))
break;
IPSEC_CRYPTO_N_ALG,
} __clib_packed ipsec_crypto_alg_t;
-#define IPSEC_CRYPTO_ALG_IS_NULL_GMAC(_alg) \
- ((_alg == IPSEC_CRYPTO_ALG_AES_NULL_GMAC_128) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_NULL_GMAC_192) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_NULL_GMAC_256))
-
-#define IPSEC_CRYPTO_ALG_IS_GCM(_alg) \
- (((_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_GCM_256)))
-
-#define IPSEC_CRYPTO_ALG_IS_CTR(_alg) \
- (((_alg == IPSEC_CRYPTO_ALG_AES_CTR_128) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_CTR_192) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_CTR_256)))
-
-#define IPSEC_CRYPTO_ALG_CTR_AEAD_OTHERS(_alg) \
- (_alg == IPSEC_CRYPTO_ALG_CHACHA20_POLY1305)
-
#define foreach_ipsec_integ_alg \
_ (0, NONE, "none") \
_ (1, MD5_96, "md5-96") /* RFC2403 */ \
_ (16, UDP_ENCAP, "udp-encap") \
_ (32, IS_PROTECT, "Protect") \
_ (64, IS_INBOUND, "inbound") \
- _ (128, IS_AEAD, "aead") \
- _ (256, IS_CTR, "ctr") \
_ (512, IS_ASYNC, "async") \
_ (1024, NO_ALGO_NO_DROP, "no-algo-no-drop") \
- _ (2048, IS_NULL_GMAC, "null-gmac") \
_ (4096, ANTI_REPLAY_HUGE, "anti-replay-huge")
typedef enum ipsec_sad_flags_t_
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
-
- clib_pcg64i_random_t iv_prng;
-
+ u16 is_aead : 1;
+ u16 is_ctr : 1;
+ u16 is_null_gmac : 1;
+ u16 use_esn : 1;
+ u16 use_anti_replay : 1;
+ u16 anti_reply_huge : 1;
+ u16 is_protect : 1;
+ u16 is_tunnel : 1;
+ u16 is_transport : 1;
+ u16 is_async : 1;
+ u16 cipher_op_id;
+ u16 integ_op_id;
+ u8 cipher_iv_size;
+ u8 integ_icv_size;
+ u8 udp_sz;
+ u16 thread_index;
+ u32 salt;
+ u32 seq;
+ u32 seq_hi;
+ u16 async_op_id;
+ vnet_crypto_key_index_t cipher_key_index;
+ vnet_crypto_key_index_t integ_key_index;
union
{
u64 replay_window;
clib_bitmap_t *replay_window_huge;
};
- dpo_id_t dpo;
-
- vnet_crypto_key_index_t crypto_key_index;
- vnet_crypto_key_index_t integ_key_index;
-
- u32 spi;
- u32 seq;
- u32 seq_hi;
+} ipsec_sa_inb_rt_t;
- u16 crypto_enc_op_id;
- u16 crypto_dec_op_id;
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u16 is_aead : 1;
+ u16 is_ctr : 1;
+ u16 is_null_gmac : 1;
+ u16 is_tunnel : 1;
+ u16 is_tunnel_v6 : 1;
+ u16 udp_encap : 1;
+ u16 use_esn : 1;
+ u16 use_anti_replay : 1;
+ u16 drop_no_crypto : 1;
+ u16 is_async : 1;
+ clib_pcg64i_random_t iv_prng;
+ u16 cipher_op_id;
u16 integ_op_id;
- ipsec_sa_flags_t flags;
+ u8 cipher_iv_size;
+ u8 esp_block_align;
+ u8 integ_icv_size;
u16 thread_index;
-
- u16 integ_icv_size : 6;
- u16 crypto_iv_size : 5;
- u16 esp_block_align : 5;
-
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
-
+ u32 salt;
+ u32 seq;
+ u32 seq_hi;
+ u32 spi_be;
+ ip_dscp_t t_dscp;
+ dpo_id_t dpo;
+ tunnel_encap_decap_flags_t tunnel_flags;
+ u16 async_op_id;
+ vnet_crypto_key_index_t cipher_key_index;
+ vnet_crypto_key_index_t integ_key_index;
union
{
ip4_header_t ip4_hdr;
ip6_header_t ip6_hdr;
};
udp_header_t udp_hdr;
+} ipsec_sa_outb_rt_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ u32 spi;
+
+ ipsec_sa_flags_t flags;
+
+ u16 udp_src_port;
+ u16 udp_dst_port;
/* Salt used in CTR modes (incl. GCM) - stored in network byte order */
u32 salt;
ipsec_protocol_t protocol;
- tunnel_encap_decap_flags_t tunnel_flags;
- u8 __pad[2];
-
- /* data accessed by dataplane code should be above this comment */
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
/* Elements with u64 size multiples */
tunnel_t tunnel;
vnet_crypto_alg_t crypto_calg;
u32 crypto_sync_key_index;
u32 integ_sync_key_index;
- u32 crypto_async_key_index;
+ u32 linked_key_index;
/* elements with u16 size */
u16 crypto_sync_enc_op_id;
STATIC_ASSERT (ESP_MAX_ICV_SIZE < (1 << 6), "integer icv overflow");
STATIC_ASSERT (ESP_MAX_IV_SIZE < (1 << 5), "esp iv overflow");
STATIC_ASSERT (ESP_MAX_BLOCK_SIZE < (1 << 5), "esp alignment overflow");
-STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline1, CLIB_CACHE_LINE_BYTES);
-STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline2, 2 * CLIB_CACHE_LINE_BYTES);
-
-/**
- * Pool of IPSec SAs
- */
-extern ipsec_sa_t *ipsec_sa_pool;
/*
* Ensure that the IPsec data does not overlap with the IP data in
extern int ipsec_sa_update (u32 id, u16 src_port, u16 dst_port,
const tunnel_t *tun, bool is_tun);
+extern void ipsec_sa_update_runtime (ipsec_sa_t *sa);
extern int ipsec_sa_add_and_lock (
u32 id, u32 spi, ipsec_protocol_t proto, ipsec_crypto_alg_t crypto_alg,
const ipsec_key_t *ck, ipsec_integ_alg_t integ_alg, const ipsec_key_t *ik,
* Anti Replay definitions
*/
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE(_sa) \
- (u32) (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (_sa)) ? \
- clib_bitmap_bytes (_sa->replay_window_huge) * 8 : \
- BITS (_sa->replay_window))
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE(_irt) \
+ (u32) (PREDICT_FALSE (_irt->anti_reply_huge) ? \
+ clib_bitmap_bytes (_irt->replay_window_huge) * 8 : \
+ BITS (_irt->replay_window))
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN(_sa, _is_huge) \
- (u32) (_is_huge ? clib_bitmap_bytes (_sa->replay_window_huge) * 8 : \
- BITS (_sa->replay_window))
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN(_irt, _is_huge) \
+ (u32) (_is_huge ? clib_bitmap_bytes (_irt->replay_window_huge) * 8 : \
+ BITS (_irt->replay_window))
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN(_sa) \
- (u64) (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (_sa)) ? \
- clib_bitmap_count_set_bits (_sa->replay_window_huge) : \
- count_set_bits (_sa->replay_window))
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN(_irt) \
+ (u64) (PREDICT_FALSE (_irt->anti_reply_huge) ? \
+ clib_bitmap_count_set_bits (_irt->replay_window_huge) : \
+ count_set_bits (_irt->replay_window))
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN_KNOWN_WIN(_sa, _is_huge) \
- (u64) (_is_huge ? clib_bitmap_count_set_bits (_sa->replay_window_huge) : \
- count_set_bits (_sa->replay_window))
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN_KNOWN_WIN(_irt, _is_huge) \
+ (u64) (_is_huge ? clib_bitmap_count_set_bits (_irt->replay_window_huge) : \
+ count_set_bits (_irt->replay_window))
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX(_sa) \
- (u32) (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (_sa) - 1)
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX(_irt) \
+ (u32) (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (_irt) - 1)
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX_KNOWN_WIN(_sa, _is_huge) \
- (u32) (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (_sa, _is_huge) - 1)
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX_KNOWN_WIN(_irt, _is_huge) \
+ (u32) (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (_irt, _is_huge) - 1)
/*
* sequence number less than the lower bound are outside of the window
IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (_sa, _is_huge) + 1)
always_inline u64
-ipsec_sa_anti_replay_get_64b_window (const ipsec_sa_t *sa)
+ipsec_sa_anti_replay_get_64b_window (const ipsec_sa_inb_rt_t *irt)
{
- if (!ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa))
- return sa->replay_window;
+ if (!irt->anti_reply_huge)
+ return irt->replay_window;
u64 w;
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (sa);
- u32 tl_win_index = sa->seq & (window_size - 1);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (irt);
+ u32 tl_win_index = irt->seq & (window_size - 1);
if (PREDICT_TRUE (tl_win_index >= 63))
- return clib_bitmap_get_multiple (sa->replay_window_huge, tl_win_index - 63,
- 64);
+ return clib_bitmap_get_multiple (irt->replay_window_huge,
+ tl_win_index - 63, 64);
- w = clib_bitmap_get_multiple_no_check (sa->replay_window_huge, 0,
+ w = clib_bitmap_get_multiple_no_check (irt->replay_window_huge, 0,
tl_win_index + 1)
<< (63 - tl_win_index);
- w |= clib_bitmap_get_multiple_no_check (sa->replay_window_huge,
+ w |= clib_bitmap_get_multiple_no_check (irt->replay_window_huge,
window_size - 63 + tl_win_index,
63 - tl_win_index);
}
always_inline int
-ipsec_sa_anti_replay_check (const ipsec_sa_t *sa, u32 seq, bool ar_huge)
+ipsec_sa_anti_replay_check (const ipsec_sa_inb_rt_t *irt, u32 seq,
+ bool ar_huge)
{
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (sa, ar_huge);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (irt, ar_huge);
/* we assume that the packet is in the window.
* if the packet falls left (sa->seq - seq >= window size),
* the result is wrong */
if (ar_huge)
- return clib_bitmap_get (sa->replay_window_huge, seq & (window_size - 1));
+ return clib_bitmap_get (irt->replay_window_huge, seq & (window_size - 1));
else
- return (sa->replay_window >> (window_size + seq - sa->seq - 1)) & 1;
+ return (irt->replay_window >> (window_size + seq - irt->seq - 1)) & 1;
return 0;
}
* the high sequence number is set.
*/
always_inline int
-ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
+ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_inb_rt_t *irt, u32 seq,
u32 hi_seq_used, bool post_decrypt,
u32 *hi_seq_req, bool ar_huge)
{
ASSERT ((post_decrypt == false) == (hi_seq_req != 0));
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (sa, ar_huge);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (irt, ar_huge);
u32 window_lower_bound =
- IPSEC_SA_ANTI_REPLAY_WINDOW_LOWER_BOUND_KNOWN_WIN (sa, ar_huge);
+ IPSEC_SA_ANTI_REPLAY_WINDOW_LOWER_BOUND_KNOWN_WIN (irt, ar_huge);
- if (!ipsec_sa_is_set_USE_ESN (sa))
+ if (!irt->use_esn)
{
if (hi_seq_req)
/* no ESN, therefore the hi-seq is always 0 */
*hi_seq_req = 0;
- if (!ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
+ if (!irt->use_anti_replay)
return 0;
- if (PREDICT_TRUE (seq > sa->seq))
+ if (PREDICT_TRUE (seq > irt->seq))
return 0;
/* does the packet fall out on the left of the window */
- if (sa->seq >= seq + window_size)
+ if (irt->seq >= seq + window_size)
return 1;
- return ipsec_sa_anti_replay_check (sa, seq, ar_huge);
+ return ipsec_sa_anti_replay_check (irt, seq, ar_huge);
}
- if (!ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
+ if (!irt->use_anti_replay)
{
/* there's no AR configured for this SA, but in order
* to know whether a packet has wrapped the hi ESN we need
*/
if (hi_seq_req)
{
- if (seq >= sa->seq)
+ if (seq >= irt->seq)
/* The packet's sequence number is larger that the SA's.
* that can't be a warp - unless we lost more than
* 2^32 packets ... how could we know? */
- *hi_seq_req = sa->seq_hi;
+ *hi_seq_req = irt->seq_hi;
else
{
/* The packet's SN is less than the SAs, so either the SN has
* wrapped or the SN is just old. */
- if (sa->seq - seq > (1 << 30))
+ if (irt->seq - seq > (1 << 30))
/* It's really really really old => it wrapped */
- *hi_seq_req = sa->seq_hi + 1;
+ *hi_seq_req = irt->seq_hi + 1;
else
- *hi_seq_req = sa->seq_hi;
+ *hi_seq_req = irt->seq_hi;
}
}
/*
return 0;
}
- if (PREDICT_TRUE (window_size > 0 && sa->seq >= window_size - 1))
+ if (PREDICT_TRUE (window_size > 0 && irt->seq >= window_size - 1))
{
/*
* the last sequence number VPP received is more than one
*/
if (post_decrypt)
{
- if (hi_seq_used == sa->seq_hi)
+ if (hi_seq_used == irt->seq_hi)
/* the high sequence number used to succesfully decrypt this
* packet is the same as the last-sequence number of the SA.
* that means this packet did not cause a wrap.
/* pre-decrypt it might be the packet that causes a wrap, we
* need to decrypt it to find out */
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi + 1;
+ *hi_seq_req = irt->seq_hi + 1;
return 0;
}
}
* end of the window.
*/
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi;
- if (seq <= sa->seq)
+ *hi_seq_req = irt->seq_hi;
+ if (seq <= irt->seq)
/*
* The received seq number is within bounds of the window
* check if it's a duplicate
*/
- return ipsec_sa_anti_replay_check (sa, seq, ar_huge);
+ return ipsec_sa_anti_replay_check (irt, seq, ar_huge);
else
/*
* The received sequence number is greater than the window
/*
* the sequence number is less than the lower bound.
*/
- if (seq <= sa->seq)
+ if (seq <= irt->seq)
{
/*
* the packet is within the window upper bound.
* check for duplicates.
*/
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi;
- return ipsec_sa_anti_replay_check (sa, seq, ar_huge);
+ *hi_seq_req = irt->seq_hi;
+ return ipsec_sa_anti_replay_check (irt, seq, ar_huge);
}
else
{
* we've lost close to 2^32 packets.
*/
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi;
+ *hi_seq_req = irt->seq_hi;
return 0;
}
}
* received packet, the SA has moved on to a higher sequence number.
*/
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi - 1;
- return ipsec_sa_anti_replay_check (sa, seq, ar_huge);
+ *hi_seq_req = irt->seq_hi - 1;
+ return ipsec_sa_anti_replay_check (irt, seq, ar_huge);
}
}
}
always_inline u32
-ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc, bool ar_huge)
+ipsec_sa_anti_replay_window_shift (ipsec_sa_inb_rt_t *irt, u32 inc,
+ bool ar_huge)
{
u32 n_lost = 0;
u32 seen = 0;
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (sa, ar_huge);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (irt, ar_huge);
if (inc < window_size)
{
if (ar_huge)
{
/* the number of packets we saw in this section of the window */
- clib_bitmap_t *window = sa->replay_window_huge;
- u32 window_lower_bound = (sa->seq + 1) & (window_size - 1);
+ clib_bitmap_t *window = irt->replay_window_huge;
+ u32 window_lower_bound = (irt->seq + 1) & (window_size - 1);
u32 window_next_lower_bound =
(window_lower_bound + inc) & (window_size - 1);
}
clib_bitmap_set_no_check (window,
- (sa->seq + inc) & (window_size - 1), 1);
+ (irt->seq + inc) & (window_size - 1), 1);
}
else
{
* of the window that we will right shift of the end
* as a result of this increments
*/
- u64 old = sa->replay_window & pow2_mask (inc);
+ u64 old = irt->replay_window & pow2_mask (inc);
/* the number of packets we saw in this section of the window */
seen = count_set_bits (old);
- sa->replay_window =
- ((sa->replay_window) >> inc) | (1ULL << (window_size - 1));
+ irt->replay_window =
+ ((irt->replay_window) >> inc) | (1ULL << (window_size - 1));
}
/*
{
/* holes in the replay window are lost packets */
n_lost = window_size -
- IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN_KNOWN_WIN (sa, ar_huge);
+ IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN_KNOWN_WIN (irt, ar_huge);
/* any sequence numbers that now fall outside the window
* are forever lost */
if (PREDICT_FALSE (ar_huge))
{
- clib_bitmap_zero (sa->replay_window_huge);
- clib_bitmap_set_no_check (sa->replay_window_huge,
- (sa->seq + inc) & (window_size - 1), 1);
+ clib_bitmap_zero (irt->replay_window_huge);
+ clib_bitmap_set_no_check (irt->replay_window_huge,
+ (irt->seq + inc) & (window_size - 1), 1);
}
else
{
- sa->replay_window = 1ULL << (window_size - 1);
+ irt->replay_window = 1ULL << (window_size - 1);
}
}
* the branch cost.
*/
always_inline u64
-ipsec_sa_anti_replay_advance (ipsec_sa_t *sa, u32 thread_index, u32 seq,
- u32 hi_seq, bool ar_huge)
+ipsec_sa_anti_replay_advance (ipsec_sa_inb_rt_t *irt, u32 thread_index,
+ u32 seq, u32 hi_seq, bool ar_huge)
{
u64 n_lost = 0;
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (sa, ar_huge);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (irt, ar_huge);
u32 pos;
- if (ipsec_sa_is_set_USE_ESN (sa))
+ if (irt->use_esn)
{
- int wrap = hi_seq - sa->seq_hi;
+ int wrap = hi_seq - irt->seq_hi;
- if (wrap == 0 && seq > sa->seq)
+ if (wrap == 0 && seq > irt->seq)
{
- pos = seq - sa->seq;
- n_lost = ipsec_sa_anti_replay_window_shift (sa, pos, ar_huge);
- sa->seq = seq;
+ pos = seq - irt->seq;
+ n_lost = ipsec_sa_anti_replay_window_shift (irt, pos, ar_huge);
+ irt->seq = seq;
}
else if (wrap > 0)
{
- pos = seq + ~sa->seq + 1;
- n_lost = ipsec_sa_anti_replay_window_shift (sa, pos, ar_huge);
- sa->seq = seq;
- sa->seq_hi = hi_seq;
+ pos = seq + ~irt->seq + 1;
+ n_lost = ipsec_sa_anti_replay_window_shift (irt, pos, ar_huge);
+ irt->seq = seq;
+ irt->seq_hi = hi_seq;
}
else if (wrap < 0)
{
- pos = ~seq + sa->seq + 1;
+ pos = ~seq + irt->seq + 1;
if (ar_huge)
- clib_bitmap_set_no_check (sa->replay_window_huge,
+ clib_bitmap_set_no_check (irt->replay_window_huge,
seq & (window_size - 1), 1);
else
- sa->replay_window |= (1ULL << (window_size - 1 - pos));
+ irt->replay_window |= (1ULL << (window_size - 1 - pos));
}
else
{
- pos = sa->seq - seq;
+ pos = irt->seq - seq;
if (ar_huge)
- clib_bitmap_set_no_check (sa->replay_window_huge,
+ clib_bitmap_set_no_check (irt->replay_window_huge,
seq & (window_size - 1), 1);
else
- sa->replay_window |= (1ULL << (window_size - 1 - pos));
+ irt->replay_window |= (1ULL << (window_size - 1 - pos));
}
}
else
{
- if (seq > sa->seq)
+ if (seq > irt->seq)
{
- pos = seq - sa->seq;
- n_lost = ipsec_sa_anti_replay_window_shift (sa, pos, ar_huge);
- sa->seq = seq;
+ pos = seq - irt->seq;
+ n_lost = ipsec_sa_anti_replay_window_shift (irt, pos, ar_huge);
+ irt->seq = seq;
}
else
{
- pos = sa->seq - seq;
+ pos = irt->seq - seq;
if (ar_huge)
- clib_bitmap_set_no_check (sa->replay_window_huge,
+ clib_bitmap_set_no_check (irt->replay_window_huge,
seq & (window_size - 1), 1);
else
- sa->replay_window |= (1ULL << (window_size - 1 - pos));
+ irt->replay_window |= (1ULL << (window_size - 1 - pos));
}
}
: (unix_time_now_nsec () % vlib_num_workers ()) + 1);
}
-always_inline ipsec_sa_t *
-ipsec_sa_get (u32 sa_index)
-{
- return (pool_elt_at_index (ipsec_sa_pool, sa_index));
-}
-
#endif /* __IPSEC_SPD_SA_H__ */
/*
if (!(itp->itp_flags & IPSEC_PROTECT_ITF))
{
ipsec_sa_set_IS_PROTECT (sa);
+ ipsec_sa_update_runtime (sa);
itp->itp_flags |= IPSEC_PROTECT_ENCAPED;
}
}
ipsec_sa_lock (itp->itp_out_sa);
if (itp->itp_flags & IPSEC_PROTECT_ITF)
- ipsec_sa_set_NO_ALGO_NO_DROP (ipsec_sa_get (itp->itp_out_sa));
+ {
+ ipsec_sa_t *sa = ipsec_sa_get (itp->itp_out_sa);
+ ipsec_sa_set_NO_ALGO_NO_DROP (sa);
+ ipsec_sa_update_runtime (sa);
+ }
FOR_EACH_IPSEC_PROTECT_INPUT_SAI(itp, sai,
({
FOR_EACH_IPSEC_PROTECT_INPUT_SA(itp, sa,
({
ipsec_sa_unset_IS_PROTECT (sa);
+ ipsec_sa_update_runtime (sa);
}));
ipsec_tun_protect_rx_db_remove (im, itp);
ipsec_tun_protect_tx_db_remove (itp);
- ipsec_sa_unset_NO_ALGO_NO_DROP (ipsec_sa_get (itp->itp_out_sa));
+ sa = ipsec_sa_get (itp->itp_out_sa);
+ ipsec_sa_unset_NO_ALGO_NO_DROP (sa);
+ ipsec_sa_update_runtime (sa);
+
ipsec_sa_unlock(itp->itp_out_sa);
FOR_EACH_IPSEC_PROTECT_INPUT_SAI(itp, sai,
.alg = VNET_CRYPTO_ALG_AES_128_CTR,
.iv_size = 8,
.block_align = 1,
+ .is_ctr = 1,
},
[IPSEC_CRYPTO_ALG_AES_CTR_192] = {
.alg = VNET_CRYPTO_ALG_AES_192_CTR,
.iv_size = 8,
.block_align = 1,
+ .is_ctr = 1,
},
[IPSEC_CRYPTO_ALG_AES_CTR_256] = {
.alg = VNET_CRYPTO_ALG_AES_256_CTR,
.iv_size = 8,
.block_align = 1,
+ .is_ctr = 1,
},
[IPSEC_CRYPTO_ALG_AES_GCM_128] = {
.iv_size = 8,
.block_align = 1,
.icv_size = 16,
+ .is_aead = 1,
+ .is_ctr = 1,
},
[IPSEC_CRYPTO_ALG_AES_GCM_192] = {
.iv_size = 8,
.block_align = 1,
.icv_size = 16,
+ .is_aead = 1,
+ .is_ctr = 1,
},
[IPSEC_CRYPTO_ALG_AES_GCM_256] = {
.iv_size = 8,
.block_align = 1,
.icv_size = 16,
+ .is_aead = 1,
+ .is_ctr = 1,
},
[IPSEC_CRYPTO_ALG_CHACHA20_POLY1305] = {
.alg = VNET_CRYPTO_ALG_CHACHA20_POLY1305,
.iv_size = 8,
.icv_size = 16,
+ .is_ctr = 1,
+ .is_aead = 1,
},
[IPSEC_CRYPTO_ALG_AES_NULL_GMAC_128] = {
.iv_size = 8,
.block_align = 1,
.icv_size = 16,
+ .is_ctr = 1,
+ .is_aead = 1,
+ .is_null_gmac = 1,
},
[IPSEC_CRYPTO_ALG_AES_NULL_GMAC_192] = {
.iv_size = 8,
.block_align = 1,
.icv_size = 16,
+ .is_ctr = 1,
+ .is_aead = 1,
+ .is_null_gmac = 1,
},
[IPSEC_CRYPTO_ALG_AES_NULL_GMAC_256] = {
.iv_size = 8,
.block_align = 1,
.icv_size = 16,
+ .is_ctr = 1,
+ .is_aead = 1,
+ .is_null_gmac = 1,
},
},
.integ_algs = {