2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ipsec/ipsec.h>
17 #include <vnet/ipsec/esp.h>
18 #include <vnet/udp/udp_local.h>
19 #include <vnet/fib/fib_table.h>
20 #include <vnet/fib/fib_entry_track.h>
21 #include <vnet/ipsec/ipsec_tun.h>
25 * SA packet & bytes counters
27 vlib_combined_counter_main_t ipsec_sa_counters = {
29 .stat_segment_name = "/net/ipsec/sa",
31 vlib_simple_counter_main_t ipsec_sa_lost_counters = {
33 .stat_segment_name = "/net/ipsec/sa/lost",
36 ipsec_sa_t *ipsec_sa_pool;
39 ipsec_call_add_del_callbacks (ipsec_main_t * im, ipsec_sa_t * sa,
40 u32 sa_index, int is_add)
42 ipsec_ah_backend_t *ab;
43 ipsec_esp_backend_t *eb;
46 case IPSEC_PROTOCOL_AH:
47 ab = pool_elt_at_index (im->ah_backends, im->ah_current_backend);
48 if (ab->add_del_sa_sess_cb)
49 return ab->add_del_sa_sess_cb (sa_index, is_add);
51 case IPSEC_PROTOCOL_ESP:
52 eb = pool_elt_at_index (im->esp_backends, im->esp_current_backend);
53 if (eb->add_del_sa_sess_cb)
54 return eb->add_del_sa_sess_cb (sa_index, is_add);
61 ipsec_mk_key (ipsec_key_t * key, const u8 * data, u8 len)
63 memset (key, 0, sizeof (*key));
65 if (len > sizeof (key->data))
66 key->len = sizeof (key->data);
70 memcpy (key->data, data, key->len);
74 * 'stack' (resolve the recursion for) the SA tunnel destination
77 ipsec_sa_stack (ipsec_sa_t * sa)
79 ipsec_main_t *im = &ipsec_main;
80 dpo_id_t tmp = DPO_INVALID;
82 tunnel_contribute_forwarding (&sa->tunnel, &tmp);
84 if (IPSEC_PROTOCOL_AH == sa->protocol)
85 dpo_stack_from_node ((ipsec_sa_is_set_IS_TUNNEL_V6 (sa) ?
86 im->ah6_encrypt_node_index :
87 im->ah4_encrypt_node_index), &sa->dpo, &tmp);
89 dpo_stack_from_node ((ipsec_sa_is_set_IS_TUNNEL_V6 (sa) ?
90 im->esp6_encrypt_node_index :
91 im->esp4_encrypt_node_index), &sa->dpo, &tmp);
96 ipsec_sa_set_crypto_alg (ipsec_sa_t * sa, ipsec_crypto_alg_t crypto_alg)
98 ipsec_main_t *im = &ipsec_main;
99 sa->crypto_alg = crypto_alg;
100 sa->crypto_iv_size = im->crypto_algs[crypto_alg].iv_size;
101 sa->esp_block_align = clib_max (4, im->crypto_algs[crypto_alg].block_align);
102 sa->sync_op_data.crypto_enc_op_id = im->crypto_algs[crypto_alg].enc_op_id;
103 sa->sync_op_data.crypto_dec_op_id = im->crypto_algs[crypto_alg].dec_op_id;
104 sa->crypto_calg = im->crypto_algs[crypto_alg].alg;
105 ASSERT (sa->crypto_iv_size <= ESP_MAX_IV_SIZE);
106 ASSERT (sa->esp_block_align <= ESP_MAX_BLOCK_SIZE);
107 if (IPSEC_CRYPTO_ALG_IS_GCM (crypto_alg))
109 sa->integ_icv_size = im->crypto_algs[crypto_alg].icv_size;
110 ipsec_sa_set_IS_CTR (sa);
111 ipsec_sa_set_IS_AEAD (sa);
113 else if (IPSEC_CRYPTO_ALG_IS_CTR (crypto_alg))
115 ipsec_sa_set_IS_CTR (sa);
120 ipsec_sa_set_integ_alg (ipsec_sa_t * sa, ipsec_integ_alg_t integ_alg)
122 ipsec_main_t *im = &ipsec_main;
123 sa->integ_alg = integ_alg;
124 sa->integ_icv_size = im->integ_algs[integ_alg].icv_size;
125 sa->sync_op_data.integ_op_id = im->integ_algs[integ_alg].op_id;
126 sa->integ_calg = im->integ_algs[integ_alg].alg;
127 ASSERT (sa->integ_icv_size <= ESP_MAX_ICV_SIZE);
131 ipsec_sa_set_async_op_ids (ipsec_sa_t * sa)
134 if (ipsec_sa_is_set_USE_ESN (sa))
137 if( sa->sync_op_data.crypto_enc_op_id == VNET_CRYPTO_OP_##n##_ENC ) \
138 sa->async_op_data.crypto_async_enc_op_id = \
139 VNET_CRYPTO_OP_##n##_TAG16_AAD12_ENC; \
140 if( sa->sync_op_data.crypto_dec_op_id == VNET_CRYPTO_OP_##n##_DEC ) \
141 sa->async_op_data.crypto_async_dec_op_id = \
142 VNET_CRYPTO_OP_##n##_TAG16_AAD12_DEC;
143 foreach_crypto_aead_alg
149 if( sa->sync_op_data.crypto_enc_op_id == VNET_CRYPTO_OP_##n##_ENC ) \
150 sa->async_op_data.crypto_async_enc_op_id = \
151 VNET_CRYPTO_OP_##n##_TAG16_AAD8_ENC; \
152 if( sa->sync_op_data.crypto_dec_op_id == VNET_CRYPTO_OP_##n##_DEC ) \
153 sa->async_op_data.crypto_async_dec_op_id = \
154 VNET_CRYPTO_OP_##n##_TAG16_AAD8_DEC;
155 foreach_crypto_aead_alg
159 #define _(c, h, s, k ,d) \
160 if( sa->sync_op_data.crypto_enc_op_id == VNET_CRYPTO_OP_##c##_ENC && \
161 sa->sync_op_data.integ_op_id == VNET_CRYPTO_OP_##h##_HMAC) \
162 sa->async_op_data.crypto_async_enc_op_id = \
163 VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC; \
164 if( sa->sync_op_data.crypto_dec_op_id == VNET_CRYPTO_OP_##c##_DEC && \
165 sa->sync_op_data.integ_op_id == VNET_CRYPTO_OP_##h##_HMAC) \
166 sa->async_op_data.crypto_async_dec_op_id = \
167 VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC;
168 foreach_crypto_link_async_alg
174 ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
175 ipsec_crypto_alg_t crypto_alg, const ipsec_key_t *ck,
176 ipsec_integ_alg_t integ_alg, const ipsec_key_t *ik,
177 ipsec_sa_flags_t flags, u32 salt, u16 src_port,
178 u16 dst_port, const tunnel_t *tun, u32 *sa_out_index)
180 vlib_main_t *vm = vlib_get_main ();
181 ipsec_main_t *im = &ipsec_main;
188 p = hash_get (im->sa_index_by_sa_id, id);
190 return VNET_API_ERROR_ENTRY_ALREADY_EXISTS;
192 pool_get_aligned_zero (ipsec_sa_pool, sa, CLIB_CACHE_LINE_BYTES);
194 fib_node_init (&sa->node, FIB_NODE_TYPE_IPSEC_SA);
195 fib_node_lock (&sa->node);
196 sa_index = sa - ipsec_sa_pool;
198 vlib_validate_combined_counter (&ipsec_sa_counters, sa_index);
199 vlib_zero_combined_counter (&ipsec_sa_counters, sa_index);
200 vlib_validate_simple_counter (&ipsec_sa_lost_counters, sa_index);
201 vlib_zero_simple_counter (&ipsec_sa_lost_counters, sa_index);
203 tunnel_copy (tun, &sa->tunnel);
206 sa->stat_index = sa_index;
207 sa->protocol = proto;
210 sa->thread_index = (vlib_num_workers ()) ? ~0 : 0;
211 if (integ_alg != IPSEC_INTEG_ALG_NONE)
213 ipsec_sa_set_integ_alg (sa, integ_alg);
214 clib_memcpy (&sa->integ_key, ik, sizeof (sa->integ_key));
216 ipsec_sa_set_crypto_alg (sa, crypto_alg);
217 ipsec_sa_set_async_op_ids (sa);
219 clib_memcpy (&sa->crypto_key, ck, sizeof (sa->crypto_key));
221 sa->crypto_key_index = vnet_crypto_key_add (vm,
222 im->crypto_algs[crypto_alg].alg,
223 (u8 *) ck->data, ck->len);
224 if (~0 == sa->crypto_key_index)
226 pool_put (ipsec_sa_pool, sa);
227 return VNET_API_ERROR_KEY_LENGTH;
230 if (integ_alg != IPSEC_INTEG_ALG_NONE)
232 sa->integ_key_index = vnet_crypto_key_add (vm,
234 integ_algs[integ_alg].alg,
235 (u8 *) ik->data, ik->len);
236 if (~0 == sa->integ_key_index)
238 pool_put (ipsec_sa_pool, sa);
239 return VNET_API_ERROR_KEY_LENGTH;
243 if (sa->async_op_data.crypto_async_enc_op_id &&
244 !ipsec_sa_is_set_IS_AEAD (sa))
246 sa->async_op_data.linked_key_index =
247 vnet_crypto_key_add_linked (vm, sa->crypto_key_index,
248 sa->integ_key_index);
252 sa->crypto_op_data = sa->async_op_data.data;
255 if (ipsec_sa_is_set_IS_ASYNC (sa))
257 vnet_crypto_request_async_mode (1);
258 sa->crypto_op_data = sa->async_op_data.data;
261 sa->crypto_op_data = sa->sync_op_data.data;
264 err = ipsec_check_support_cb (im, sa);
267 clib_warning ("%s", err->what);
268 pool_put (ipsec_sa_pool, sa);
269 return VNET_API_ERROR_UNIMPLEMENTED;
272 err = ipsec_call_add_del_callbacks (im, sa, sa_index, 1);
275 pool_put (ipsec_sa_pool, sa);
276 return VNET_API_ERROR_SYSCALL_ERROR_1;
279 if (ipsec_sa_is_set_IS_TUNNEL (sa) &&
280 AF_IP6 == ip_addr_version (&tun->t_src))
281 ipsec_sa_set_IS_TUNNEL_V6 (sa);
283 if (ipsec_sa_is_set_IS_TUNNEL (sa) && !ipsec_sa_is_set_IS_INBOUND (sa))
285 sa->tunnel_flags = sa->tunnel.t_encap_decap_flags;
287 rv = tunnel_resolve (&sa->tunnel, FIB_NODE_TYPE_IPSEC_SA, sa_index);
291 pool_put (ipsec_sa_pool, sa);
296 /* generate header templates */
297 if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa))
299 tunnel_build_v6_hdr (&sa->tunnel,
300 (ipsec_sa_is_set_UDP_ENCAP (sa) ?
302 IP_PROTOCOL_IPSEC_ESP),
307 tunnel_build_v4_hdr (&sa->tunnel,
308 (ipsec_sa_is_set_UDP_ENCAP (sa) ?
310 IP_PROTOCOL_IPSEC_ESP),
315 if (ipsec_sa_is_set_UDP_ENCAP (sa))
317 if (dst_port == IPSEC_UDP_PORT_NONE)
318 sa->udp_hdr.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
320 sa->udp_hdr.dst_port = clib_host_to_net_u16 (dst_port);
322 if (src_port == IPSEC_UDP_PORT_NONE)
323 sa->udp_hdr.src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
325 sa->udp_hdr.src_port = clib_host_to_net_u16 (src_port);
327 if (ipsec_sa_is_set_IS_INBOUND (sa))
328 ipsec_register_udp_port (clib_host_to_net_u16 (sa->udp_hdr.dst_port));
331 hash_set (im->sa_index_by_sa_id, sa->id, sa_index);
334 *sa_out_index = sa_index;
340 ipsec_sa_del (ipsec_sa_t * sa)
342 vlib_main_t *vm = vlib_get_main ();
343 ipsec_main_t *im = &ipsec_main;
346 sa_index = sa - ipsec_sa_pool;
347 hash_unset (im->sa_index_by_sa_id, sa->id);
348 tunnel_unresolve (&sa->tunnel);
350 /* no recovery possible when deleting an SA */
351 (void) ipsec_call_add_del_callbacks (im, sa, sa_index, 0);
353 if (ipsec_sa_is_set_IS_ASYNC (sa))
354 vnet_crypto_request_async_mode (0);
355 if (ipsec_sa_is_set_UDP_ENCAP (sa) && ipsec_sa_is_set_IS_INBOUND (sa))
356 ipsec_unregister_udp_port (clib_net_to_host_u16 (sa->udp_hdr.dst_port));
358 if (ipsec_sa_is_set_IS_TUNNEL (sa) && !ipsec_sa_is_set_IS_INBOUND (sa))
359 dpo_reset (&sa->dpo);
360 vnet_crypto_key_del (vm, sa->crypto_key_index);
361 if (sa->integ_alg != IPSEC_INTEG_ALG_NONE)
362 vnet_crypto_key_del (vm, sa->integ_key_index);
363 pool_put (ipsec_sa_pool, sa);
367 ipsec_sa_unlock (index_t sai)
371 if (INDEX_INVALID == sai)
374 sa = ipsec_sa_get (sai);
376 fib_node_unlock (&sa->node);
380 ipsec_sa_lock (index_t sai)
384 if (INDEX_INVALID == sai)
387 sa = ipsec_sa_get (sai);
389 fib_node_lock (&sa->node);
393 ipsec_sa_find_and_lock (u32 id)
395 ipsec_main_t *im = &ipsec_main;
399 p = hash_get (im->sa_index_by_sa_id, id);
402 return INDEX_INVALID;
404 sa = ipsec_sa_get (p[0]);
406 fib_node_lock (&sa->node);
412 ipsec_sa_unlock_id (u32 id)
414 ipsec_main_t *im = &ipsec_main;
417 p = hash_get (im->sa_index_by_sa_id, id);
420 return VNET_API_ERROR_NO_SUCH_ENTRY;
422 ipsec_sa_unlock (p[0]);
428 ipsec_sa_clear (index_t sai)
430 vlib_zero_combined_counter (&ipsec_sa_counters, sai);
431 vlib_zero_simple_counter (&ipsec_sa_lost_counters, sai);
435 ipsec_sa_walk (ipsec_sa_walk_cb_t cb, void *ctx)
440 pool_foreach (sa, ipsec_sa_pool)
442 if (WALK_CONTINUE != cb (sa, ctx))
449 * Function definition to get a FIB node from its index
452 ipsec_sa_fib_node_get (fib_node_index_t index)
456 sa = ipsec_sa_get (index);
462 ipsec_sa_from_fib_node (fib_node_t * node)
464 ASSERT (FIB_NODE_TYPE_IPSEC_SA == node->fn_type);
465 return ((ipsec_sa_t *) (((char *) node) -
466 STRUCT_OFFSET_OF (ipsec_sa_t, node)));
471 * Function definition to inform the FIB node that its last lock has gone.
474 ipsec_sa_last_lock_gone (fib_node_t * node)
477 * The ipsec SA is a root of the graph. As such
478 * it never has children and thus is never locked.
480 ipsec_sa_del (ipsec_sa_from_fib_node (node));
484 * Function definition to backwalk a FIB node
486 static fib_node_back_walk_rc_t
487 ipsec_sa_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
489 ipsec_sa_stack (ipsec_sa_from_fib_node (node));
491 return (FIB_NODE_BACK_WALK_CONTINUE);
495 * Virtual function table registered by SAs
496 * for participation in the FIB object graph.
498 const static fib_node_vft_t ipsec_sa_vft = {
499 .fnv_get = ipsec_sa_fib_node_get,
500 .fnv_last_lock = ipsec_sa_last_lock_gone,
501 .fnv_back_walk = ipsec_sa_back_walk,
504 /* force inclusion from application's main.c */
506 ipsec_sa_interface_init (vlib_main_t * vm)
508 fib_node_register_type (FIB_NODE_TYPE_IPSEC_SA, &ipsec_sa_vft);
513 VLIB_INIT_FUNCTION (ipsec_sa_interface_init);
516 * fd.io coding-style-patch-verification: ON
519 * eval: (c-set-style "gnu")