2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __IPSEC_SPD_SA_H__
16 #define __IPSEC_SPD_SA_H__
18 #include <vlib/vlib.h>
19 #include <vnet/crypto/crypto.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/fib/fib_node.h>
22 #include <vnet/tunnel/tunnel.h>
24 #define foreach_ipsec_crypto_alg \
26 _ (1, AES_CBC_128, "aes-cbc-128") \
27 _ (2, AES_CBC_192, "aes-cbc-192") \
28 _ (3, AES_CBC_256, "aes-cbc-256") \
29 _ (4, AES_CTR_128, "aes-ctr-128") \
30 _ (5, AES_CTR_192, "aes-ctr-192") \
31 _ (6, AES_CTR_256, "aes-ctr-256") \
32 _ (7, AES_GCM_128, "aes-gcm-128") \
33 _ (8, AES_GCM_192, "aes-gcm-192") \
34 _ (9, AES_GCM_256, "aes-gcm-256") \
35 _ (10, DES_CBC, "des-cbc") \
36 _ (11, 3DES_CBC, "3des-cbc")
40 #define _(v, f, s) IPSEC_CRYPTO_ALG_##f = v,
41 foreach_ipsec_crypto_alg
44 } __clib_packed ipsec_crypto_alg_t;
46 #define IPSEC_CRYPTO_ALG_IS_GCM(_alg) \
47 (((_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) || \
48 (_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) || \
49 (_alg == IPSEC_CRYPTO_ALG_AES_GCM_256)))
51 #define foreach_ipsec_integ_alg \
53 _ (1, MD5_96, "md5-96") /* RFC2403 */ \
54 _ (2, SHA1_96, "sha1-96") /* RFC2404 */ \
55 _ (3, SHA_256_96, "sha-256-96") /* draft-ietf-ipsec-ciph-sha-256-00 */ \
56 _ (4, SHA_256_128, "sha-256-128") /* RFC4868 */ \
57 _ (5, SHA_384_192, "sha-384-192") /* RFC4868 */ \
58 _ (6, SHA_512_256, "sha-512-256") /* RFC4868 */
62 #define _(v, f, s) IPSEC_INTEG_ALG_##f = v,
63 foreach_ipsec_integ_alg
66 } __clib_packed ipsec_integ_alg_t;
70 IPSEC_PROTOCOL_AH = 0,
71 IPSEC_PROTOCOL_ESP = 1
72 } __clib_packed ipsec_protocol_t;
74 #define IPSEC_KEY_MAX_LEN 128
75 typedef struct ipsec_key_t_
78 u8 data[IPSEC_KEY_MAX_LEN];
82 * Enable extended sequence numbers
84 * IPsec tunnel mode if non-zero, else transport mode
85 * IPsec tunnel mode is IPv6 if non-zero,
86 * else IPv4 tunnel only valid if is_tunnel is non-zero
87 * enable UDP encapsulation for NAT traversal
89 #define foreach_ipsec_sa_flags \
91 _ (1, USE_ESN, "esn") \
92 _ (2, USE_ANTI_REPLAY, "anti-replay") \
93 _ (4, IS_TUNNEL, "tunnel") \
94 _ (8, IS_TUNNEL_V6, "tunnel-v6") \
95 _ (16, UDP_ENCAP, "udp-encap") \
96 _ (32, IS_PROTECT, "Protect") \
97 _ (64, IS_INBOUND, "inbound") \
98 _ (128, IS_AEAD, "aead") \
100 typedef enum ipsec_sad_flags_t_
102 #define _(v, f, s) IPSEC_SA_FLAG_##f = v,
103 foreach_ipsec_sa_flags
105 } __clib_packed ipsec_sa_flags_t;
107 STATIC_ASSERT (sizeof (ipsec_sa_flags_t) == 1, "IPSEC SA flags > 1 byte");
111 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
114 ipsec_sa_flags_t flags;
119 u32 encrypt_thread_index;
120 u32 decrypt_thread_index;
129 vnet_crypto_key_index_t crypto_key_index;
130 vnet_crypto_key_index_t integ_key_index;
132 /* Union data shared by sync and async ops, updated when mode is
138 vnet_crypto_op_id_t crypto_enc_op_id:16;
139 vnet_crypto_op_id_t crypto_dec_op_id:16;
140 vnet_crypto_op_id_t integ_op_id:16;
145 vnet_crypto_async_op_id_t crypto_async_enc_op_id:16;
146 vnet_crypto_async_op_id_t crypto_async_dec_op_id:16;
147 vnet_crypto_key_index_t linked_key_index;
153 CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
158 ip4_header_t ip4_hdr;
159 ip6_header_t ip6_hdr;
161 udp_header_t udp_hdr;
163 /* Salt used in GCM modes - stored in network byte order */
166 ipsec_protocol_t protocol;
167 tunnel_encap_decap_flags_t tunnel_flags;
171 /* data accessed by dataplane code should be above this comment */
172 CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
174 /* Elements with u64 size multiples */
179 vnet_crypto_op_id_t crypto_enc_op_id:16;
180 vnet_crypto_op_id_t crypto_dec_op_id:16;
181 vnet_crypto_op_id_t integ_op_id:16;
190 vnet_crypto_async_op_id_t crypto_async_enc_op_id:16;
191 vnet_crypto_async_op_id_t crypto_async_dec_op_id:16;
192 vnet_crypto_key_index_t linked_key_index;
197 ip46_address_t tunnel_src_addr;
198 ip46_address_t tunnel_dst_addr;
202 /* elements with u32 size */
205 vnet_crypto_alg_t integ_calg;
206 vnet_crypto_alg_t crypto_calg;
208 fib_node_index_t fib_entry_index;
213 ipsec_crypto_alg_t crypto_alg;
214 ipsec_integ_alg_t integ_alg;
216 ipsec_key_t integ_key;
217 ipsec_key_t crypto_key;
220 STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline1, CLIB_CACHE_LINE_BYTES);
221 STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline2, 2 * CLIB_CACHE_LINE_BYTES);
225 ipsec_sa_is_set_##v (const ipsec_sa_t *sa) { \
226 return (sa->flags & IPSEC_SA_FLAG_##v); \
228 foreach_ipsec_sa_flags
232 ipsec_sa_set_##v (ipsec_sa_t *sa) { \
233 return (sa->flags |= IPSEC_SA_FLAG_##v); \
235 foreach_ipsec_sa_flags
239 ipsec_sa_unset_##v (ipsec_sa_t *sa) { \
240 return (sa->flags &= ~IPSEC_SA_FLAG_##v); \
242 foreach_ipsec_sa_flags
246 * SA packet & bytes counters
248 extern vlib_combined_counter_main_t ipsec_sa_counters;
250 extern void ipsec_mk_key (ipsec_key_t * key, const u8 * data, u8 len);
252 extern int ipsec_sa_add_and_lock (u32 id,
254 ipsec_protocol_t proto,
255 ipsec_crypto_alg_t crypto_alg,
256 const ipsec_key_t * ck,
257 ipsec_integ_alg_t integ_alg,
258 const ipsec_key_t * ik,
259 ipsec_sa_flags_t flags,
262 const ip46_address_t * tunnel_src_addr,
263 const ip46_address_t * tunnel_dst_addr,
264 tunnel_encap_decap_flags_t tunnel_flags,
266 u32 * sa_index, u16 src_port, u16 dst_port);
267 extern index_t ipsec_sa_find_and_lock (u32 id);
268 extern int ipsec_sa_unlock_id (u32 id);
269 extern void ipsec_sa_unlock (index_t sai);
270 extern void ipsec_sa_lock (index_t sai);
271 extern void ipsec_sa_clear (index_t sai);
272 extern void ipsec_sa_set_crypto_alg (ipsec_sa_t * sa,
273 ipsec_crypto_alg_t crypto_alg);
274 extern void ipsec_sa_set_integ_alg (ipsec_sa_t * sa,
275 ipsec_integ_alg_t integ_alg);
277 typedef walk_rc_t (*ipsec_sa_walk_cb_t) (ipsec_sa_t * sa, void *ctx);
278 extern void ipsec_sa_walk (ipsec_sa_walk_cb_t cd, void *ctx);
280 extern u8 *format_ipsec_crypto_alg (u8 * s, va_list * args);
281 extern u8 *format_ipsec_integ_alg (u8 * s, va_list * args);
282 extern u8 *format_ipsec_sa (u8 * s, va_list * args);
283 extern u8 *format_ipsec_key (u8 * s, va_list * args);
284 extern uword unformat_ipsec_crypto_alg (unformat_input_t * input,
286 extern uword unformat_ipsec_integ_alg (unformat_input_t * input,
288 extern uword unformat_ipsec_key (unformat_input_t * input, va_list * args);
290 #define IPSEC_UDP_PORT_NONE ((u16)~0)
293 * Anti Replay definitions
296 #define IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (64)
297 #define IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE-1)
300 * sequence number less than the lower bound are outside of the window
301 * From RFC4303 Appendix A:
304 #define IPSEC_SA_ANTI_REPLAY_WINDOW_LOWER_BOUND(_tl) (_tl - IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE + 1)
308 * inputs need to be in host byte order.
311 ipsec_sa_anti_replay_check (ipsec_sa_t * sa, u32 seq)
315 if ((sa->flags & IPSEC_SA_FLAG_USE_ANTI_REPLAY) == 0)
318 if (!ipsec_sa_is_set_USE_ESN (sa))
320 if (PREDICT_TRUE (seq > sa->last_seq))
323 diff = sa->last_seq - seq;
325 if (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE > diff)
326 return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
334 th = sa->last_seq_hi;
337 if (PREDICT_TRUE (tl >= (IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX)))
340 * the last sequence number VPP recieved is more than one
341 * window size greater than zero.
342 * Case A from RFC4303 Appendix A.
344 if (seq < IPSEC_SA_ANTI_REPLAY_WINDOW_LOWER_BOUND (tl))
347 * the received sequence number is lower than the lower bound
348 * of the window, this could mean either a replay packet or that
349 * the high sequence number has wrapped. if it decrypts corrently
350 * then it's the latter.
358 * the recieved sequence number greater than the low
364 * The recieved seq number is within bounds of the window
365 * check if it's a duplicate
367 return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
370 * The received sequence number is greater than the window
371 * upper bound. this packet will move the window along, assuming
372 * it decrypts correctly.
380 * the last sequence number VPP recieved is within one window
381 * size of zero, i.e. 0 < TL < WINDOW_SIZE, the lower bound is thus a
382 * large sequence number.
383 * Note that the check below uses unsiged integer arthimetic, so the
384 * RHS will be a larger number.
385 * Case B from RFC4303 Appendix A.
387 if (seq < IPSEC_SA_ANTI_REPLAY_WINDOW_LOWER_BOUND (tl))
390 * the sequence number is less than the lower bound.
395 * the packet is within the window upper bound.
396 * check for duplicates.
399 return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
404 * the packet is less the window lower bound or greater than
405 * the higher bound, depending on how you look at it...
406 * We're assuming, given that the last sequence number received,
407 * TL < WINDOW_SIZE, that a largeer seq num is more likely to be
408 * a packet that moves the window forward, than a packet that has
409 * wrapped the high sequence again. If it were the latter then
410 * we've lost close to 2^32 packets.
419 * the packet seq number is between the lower bound (a large nubmer)
420 * and MAX_SEQ_NUM. This is in the window since the window upper bound
422 * However, since TL is the other side of 0 to the received
423 * packet, the SA has moved on to a higher sequence number.
426 return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
434 * Anti replay window advance
435 * inputs need to be in host byte order.
438 ipsec_sa_anti_replay_advance (ipsec_sa_t * sa, u32 seq)
441 if (PREDICT_TRUE (sa->flags & IPSEC_SA_FLAG_USE_ANTI_REPLAY) == 0)
444 if (PREDICT_TRUE (sa->flags & IPSEC_SA_FLAG_USE_ESN))
446 int wrap = sa->seq_hi - sa->last_seq_hi;
448 if (wrap == 0 && seq > sa->last_seq)
450 pos = seq - sa->last_seq;
451 if (pos < IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE)
452 sa->replay_window = ((sa->replay_window) << pos) | 1;
454 sa->replay_window = 1;
459 pos = ~seq + sa->last_seq + 1;
460 if (pos < IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE)
461 sa->replay_window = ((sa->replay_window) << pos) | 1;
463 sa->replay_window = 1;
465 sa->last_seq_hi = sa->seq_hi;
469 pos = ~seq + sa->last_seq + 1;
470 sa->replay_window |= (1ULL << pos);
474 pos = sa->last_seq - seq;
475 sa->replay_window |= (1ULL << pos);
480 if (seq > sa->last_seq)
482 pos = seq - sa->last_seq;
483 if (pos < IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE)
484 sa->replay_window = ((sa->replay_window) << pos) | 1;
486 sa->replay_window = 1;
491 pos = sa->last_seq - seq;
492 sa->replay_window |= (1ULL << pos);
499 * Makes choice for thread_id should be assigned.
500 * if input ~0, gets random worker_id based on unix_time_now_nsec
503 ipsec_sa_assign_thread (u32 thread_id)
505 return ((thread_id) ? thread_id
506 : (unix_time_now_nsec () % vlib_num_workers ()) + 1);
509 #endif /* __IPSEC_SPD_SA_H__ */
512 * fd.io coding-style-patch-verification: ON
515 * eval: (c-set-style "gnu")