2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ipsec/ipsec.h>
17 #include <vnet/ipsec/esp.h>
18 #include <vnet/udp/udp_local.h>
19 #include <vnet/fib/fib_table.h>
20 #include <vnet/fib/fib_entry_track.h>
21 #include <vnet/ipsec/ipsec_tun.h>
25 * SA packet & bytes counters
27 vlib_combined_counter_main_t ipsec_sa_counters = {
29 .stat_segment_name = "/net/ipsec/sa",
32 ipsec_sa_t *ipsec_sa_pool;
35 ipsec_call_add_del_callbacks (ipsec_main_t * im, ipsec_sa_t * sa,
36 u32 sa_index, int is_add)
38 ipsec_ah_backend_t *ab;
39 ipsec_esp_backend_t *eb;
42 case IPSEC_PROTOCOL_AH:
43 ab = pool_elt_at_index (im->ah_backends, im->ah_current_backend);
44 if (ab->add_del_sa_sess_cb)
45 return ab->add_del_sa_sess_cb (sa_index, is_add);
47 case IPSEC_PROTOCOL_ESP:
48 eb = pool_elt_at_index (im->esp_backends, im->esp_current_backend);
49 if (eb->add_del_sa_sess_cb)
50 return eb->add_del_sa_sess_cb (sa_index, is_add);
57 ipsec_mk_key (ipsec_key_t * key, const u8 * data, u8 len)
59 memset (key, 0, sizeof (*key));
61 if (len > sizeof (key->data))
62 key->len = sizeof (key->data);
66 memcpy (key->data, data, key->len);
70 * 'stack' (resolve the recursion for) the SA tunnel destination
73 ipsec_sa_stack (ipsec_sa_t * sa)
75 ipsec_main_t *im = &ipsec_main;
76 dpo_id_t tmp = DPO_INVALID;
78 tunnel_contribute_forwarding (&sa->tunnel, &tmp);
80 if (IPSEC_PROTOCOL_AH == sa->protocol)
81 dpo_stack_from_node ((ipsec_sa_is_set_IS_TUNNEL_V6 (sa) ?
82 im->ah6_encrypt_node_index :
83 im->ah4_encrypt_node_index), &sa->dpo, &tmp);
85 dpo_stack_from_node ((ipsec_sa_is_set_IS_TUNNEL_V6 (sa) ?
86 im->esp6_encrypt_node_index :
87 im->esp4_encrypt_node_index), &sa->dpo, &tmp);
92 ipsec_sa_set_crypto_alg (ipsec_sa_t * sa, ipsec_crypto_alg_t crypto_alg)
94 ipsec_main_t *im = &ipsec_main;
95 sa->crypto_alg = crypto_alg;
96 sa->crypto_iv_size = im->crypto_algs[crypto_alg].iv_size;
97 sa->esp_block_align = clib_max (4, im->crypto_algs[crypto_alg].block_align);
98 sa->sync_op_data.crypto_enc_op_id = im->crypto_algs[crypto_alg].enc_op_id;
99 sa->sync_op_data.crypto_dec_op_id = im->crypto_algs[crypto_alg].dec_op_id;
100 sa->crypto_calg = im->crypto_algs[crypto_alg].alg;
101 ASSERT (sa->crypto_iv_size <= ESP_MAX_IV_SIZE);
102 ASSERT (sa->esp_block_align <= ESP_MAX_BLOCK_SIZE);
103 if (IPSEC_CRYPTO_ALG_IS_GCM (crypto_alg))
105 sa->integ_icv_size = im->crypto_algs[crypto_alg].icv_size;
106 ipsec_sa_set_IS_CTR (sa);
107 ipsec_sa_set_IS_AEAD (sa);
109 else if (IPSEC_CRYPTO_ALG_IS_CTR (crypto_alg))
111 ipsec_sa_set_IS_CTR (sa);
116 ipsec_sa_set_integ_alg (ipsec_sa_t * sa, ipsec_integ_alg_t integ_alg)
118 ipsec_main_t *im = &ipsec_main;
119 sa->integ_alg = integ_alg;
120 sa->integ_icv_size = im->integ_algs[integ_alg].icv_size;
121 sa->sync_op_data.integ_op_id = im->integ_algs[integ_alg].op_id;
122 sa->integ_calg = im->integ_algs[integ_alg].alg;
123 ASSERT (sa->integ_icv_size <= ESP_MAX_ICV_SIZE);
127 ipsec_sa_set_async_op_ids (ipsec_sa_t * sa)
130 if (ipsec_sa_is_set_USE_ESN (sa))
133 if( sa->sync_op_data.crypto_enc_op_id == VNET_CRYPTO_OP_##n##_ENC ) \
134 sa->async_op_data.crypto_async_enc_op_id = \
135 VNET_CRYPTO_OP_##n##_TAG16_AAD12_ENC; \
136 if( sa->sync_op_data.crypto_dec_op_id == VNET_CRYPTO_OP_##n##_DEC ) \
137 sa->async_op_data.crypto_async_dec_op_id = \
138 VNET_CRYPTO_OP_##n##_TAG16_AAD12_DEC;
139 foreach_crypto_aead_alg
145 if( sa->sync_op_data.crypto_enc_op_id == VNET_CRYPTO_OP_##n##_ENC ) \
146 sa->async_op_data.crypto_async_enc_op_id = \
147 VNET_CRYPTO_OP_##n##_TAG16_AAD8_ENC; \
148 if( sa->sync_op_data.crypto_dec_op_id == VNET_CRYPTO_OP_##n##_DEC ) \
149 sa->async_op_data.crypto_async_dec_op_id = \
150 VNET_CRYPTO_OP_##n##_TAG16_AAD8_DEC;
151 foreach_crypto_aead_alg
155 #define _(c, h, s, k ,d) \
156 if( sa->sync_op_data.crypto_enc_op_id == VNET_CRYPTO_OP_##c##_ENC && \
157 sa->sync_op_data.integ_op_id == VNET_CRYPTO_OP_##h##_HMAC) \
158 sa->async_op_data.crypto_async_enc_op_id = \
159 VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC; \
160 if( sa->sync_op_data.crypto_dec_op_id == VNET_CRYPTO_OP_##c##_DEC && \
161 sa->sync_op_data.integ_op_id == VNET_CRYPTO_OP_##h##_HMAC) \
162 sa->async_op_data.crypto_async_dec_op_id = \
163 VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC;
164 foreach_crypto_link_async_alg
170 ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
171 ipsec_crypto_alg_t crypto_alg, const ipsec_key_t *ck,
172 ipsec_integ_alg_t integ_alg, const ipsec_key_t *ik,
173 ipsec_sa_flags_t flags, u32 salt, u16 src_port,
174 u16 dst_port, const tunnel_t *tun, u32 *sa_out_index)
176 vlib_main_t *vm = vlib_get_main ();
177 ipsec_main_t *im = &ipsec_main;
184 p = hash_get (im->sa_index_by_sa_id, id);
186 return VNET_API_ERROR_ENTRY_ALREADY_EXISTS;
188 pool_get_aligned_zero (ipsec_sa_pool, sa, CLIB_CACHE_LINE_BYTES);
190 fib_node_init (&sa->node, FIB_NODE_TYPE_IPSEC_SA);
191 fib_node_lock (&sa->node);
192 sa_index = sa - ipsec_sa_pool;
194 vlib_validate_combined_counter (&ipsec_sa_counters, sa_index);
195 vlib_zero_combined_counter (&ipsec_sa_counters, sa_index);
197 tunnel_copy (tun, &sa->tunnel);
200 sa->stat_index = sa_index;
201 sa->protocol = proto;
204 sa->thread_index = (vlib_num_workers ()) ? ~0 : 0;
205 if (integ_alg != IPSEC_INTEG_ALG_NONE)
207 ipsec_sa_set_integ_alg (sa, integ_alg);
208 clib_memcpy (&sa->integ_key, ik, sizeof (sa->integ_key));
210 ipsec_sa_set_crypto_alg (sa, crypto_alg);
211 ipsec_sa_set_async_op_ids (sa);
213 clib_memcpy (&sa->crypto_key, ck, sizeof (sa->crypto_key));
215 sa->crypto_key_index = vnet_crypto_key_add (vm,
216 im->crypto_algs[crypto_alg].alg,
217 (u8 *) ck->data, ck->len);
218 if (~0 == sa->crypto_key_index)
220 pool_put (ipsec_sa_pool, sa);
221 return VNET_API_ERROR_KEY_LENGTH;
224 if (integ_alg != IPSEC_INTEG_ALG_NONE)
226 sa->integ_key_index = vnet_crypto_key_add (vm,
228 integ_algs[integ_alg].alg,
229 (u8 *) ik->data, ik->len);
230 if (~0 == sa->integ_key_index)
232 pool_put (ipsec_sa_pool, sa);
233 return VNET_API_ERROR_KEY_LENGTH;
237 if (sa->async_op_data.crypto_async_enc_op_id &&
238 !ipsec_sa_is_set_IS_AEAD (sa))
240 sa->async_op_data.linked_key_index =
241 vnet_crypto_key_add_linked (vm, sa->crypto_key_index,
242 sa->integ_key_index);
246 sa->crypto_op_data = sa->async_op_data.data;
249 if (ipsec_sa_is_set_IS_ASYNC (sa))
251 vnet_crypto_request_async_mode (1);
252 sa->crypto_op_data = sa->async_op_data.data;
255 sa->crypto_op_data = sa->sync_op_data.data;
258 err = ipsec_check_support_cb (im, sa);
261 clib_warning ("%s", err->what);
262 pool_put (ipsec_sa_pool, sa);
263 return VNET_API_ERROR_UNIMPLEMENTED;
266 err = ipsec_call_add_del_callbacks (im, sa, sa_index, 1);
269 pool_put (ipsec_sa_pool, sa);
270 return VNET_API_ERROR_SYSCALL_ERROR_1;
273 if (ipsec_sa_is_set_IS_TUNNEL (sa) && !ipsec_sa_is_set_IS_INBOUND (sa))
275 sa->tunnel_flags = sa->tunnel.t_encap_decap_flags;
277 rv = tunnel_resolve (&sa->tunnel, FIB_NODE_TYPE_IPSEC_SA, sa_index);
281 pool_put (ipsec_sa_pool, sa);
286 /* generate header templates */
287 if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa))
289 tunnel_build_v6_hdr (&sa->tunnel,
290 (ipsec_sa_is_set_UDP_ENCAP (sa) ?
292 IP_PROTOCOL_IPSEC_ESP),
297 tunnel_build_v4_hdr (&sa->tunnel,
298 (ipsec_sa_is_set_UDP_ENCAP (sa) ?
300 IP_PROTOCOL_IPSEC_ESP),
305 if (ipsec_sa_is_set_UDP_ENCAP (sa))
307 if (dst_port == IPSEC_UDP_PORT_NONE)
308 sa->udp_hdr.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
310 sa->udp_hdr.dst_port = clib_host_to_net_u16 (dst_port);
312 if (src_port == IPSEC_UDP_PORT_NONE)
313 sa->udp_hdr.src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
315 sa->udp_hdr.src_port = clib_host_to_net_u16 (src_port);
317 if (ipsec_sa_is_set_IS_INBOUND (sa))
318 ipsec_register_udp_port (clib_host_to_net_u16 (sa->udp_hdr.dst_port));
321 hash_set (im->sa_index_by_sa_id, sa->id, sa_index);
324 *sa_out_index = sa_index;
330 ipsec_sa_del (ipsec_sa_t * sa)
332 vlib_main_t *vm = vlib_get_main ();
333 ipsec_main_t *im = &ipsec_main;
336 sa_index = sa - ipsec_sa_pool;
337 hash_unset (im->sa_index_by_sa_id, sa->id);
338 tunnel_unresolve (&sa->tunnel);
340 /* no recovery possible when deleting an SA */
341 (void) ipsec_call_add_del_callbacks (im, sa, sa_index, 0);
343 if (ipsec_sa_is_set_IS_ASYNC (sa))
344 vnet_crypto_request_async_mode (0);
345 if (ipsec_sa_is_set_UDP_ENCAP (sa) && ipsec_sa_is_set_IS_INBOUND (sa))
346 ipsec_unregister_udp_port (clib_net_to_host_u16 (sa->udp_hdr.dst_port));
348 if (ipsec_sa_is_set_IS_TUNNEL (sa) && !ipsec_sa_is_set_IS_INBOUND (sa))
349 dpo_reset (&sa->dpo);
350 vnet_crypto_key_del (vm, sa->crypto_key_index);
351 if (sa->integ_alg != IPSEC_INTEG_ALG_NONE)
352 vnet_crypto_key_del (vm, sa->integ_key_index);
353 pool_put (ipsec_sa_pool, sa);
357 ipsec_sa_unlock (index_t sai)
361 if (INDEX_INVALID == sai)
364 sa = ipsec_sa_get (sai);
366 fib_node_unlock (&sa->node);
370 ipsec_sa_lock (index_t sai)
374 if (INDEX_INVALID == sai)
377 sa = ipsec_sa_get (sai);
379 fib_node_lock (&sa->node);
383 ipsec_sa_find_and_lock (u32 id)
385 ipsec_main_t *im = &ipsec_main;
389 p = hash_get (im->sa_index_by_sa_id, id);
392 return INDEX_INVALID;
394 sa = ipsec_sa_get (p[0]);
396 fib_node_lock (&sa->node);
402 ipsec_sa_unlock_id (u32 id)
404 ipsec_main_t *im = &ipsec_main;
407 p = hash_get (im->sa_index_by_sa_id, id);
410 return VNET_API_ERROR_NO_SUCH_ENTRY;
412 ipsec_sa_unlock (p[0]);
418 ipsec_sa_clear (index_t sai)
420 vlib_zero_combined_counter (&ipsec_sa_counters, sai);
424 ipsec_sa_walk (ipsec_sa_walk_cb_t cb, void *ctx)
429 pool_foreach (sa, ipsec_sa_pool)
431 if (WALK_CONTINUE != cb (sa, ctx))
438 * Function definition to get a FIB node from its index
441 ipsec_sa_fib_node_get (fib_node_index_t index)
445 sa = ipsec_sa_get (index);
451 ipsec_sa_from_fib_node (fib_node_t * node)
453 ASSERT (FIB_NODE_TYPE_IPSEC_SA == node->fn_type);
454 return ((ipsec_sa_t *) (((char *) node) -
455 STRUCT_OFFSET_OF (ipsec_sa_t, node)));
460 * Function definition to inform the FIB node that its last lock has gone.
463 ipsec_sa_last_lock_gone (fib_node_t * node)
466 * The ipsec SA is a root of the graph. As such
467 * it never has children and thus is never locked.
469 ipsec_sa_del (ipsec_sa_from_fib_node (node));
473 * Function definition to backwalk a FIB node
475 static fib_node_back_walk_rc_t
476 ipsec_sa_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
478 ipsec_sa_stack (ipsec_sa_from_fib_node (node));
480 return (FIB_NODE_BACK_WALK_CONTINUE);
484 * Virtual function table registered by SAs
485 * for participation in the FIB object graph.
487 const static fib_node_vft_t ipsec_sa_vft = {
488 .fnv_get = ipsec_sa_fib_node_get,
489 .fnv_last_lock = ipsec_sa_last_lock_gone,
490 .fnv_back_walk = ipsec_sa_back_walk,
493 /* force inclusion from application's main.c */
495 ipsec_sa_interface_init (vlib_main_t * vm)
497 fib_node_register_type (FIB_NODE_TYPE_IPSEC_SA, &ipsec_sa_vft);
502 VLIB_INIT_FUNCTION (ipsec_sa_interface_init);
505 * fd.io coding-style-patch-verification: ON
508 * eval: (c-set-style "gnu")