X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fipsec%2Fipsec_sa.h;h=84abd6ef4b44c063772e4e6d40da6327cf32de46;hb=c7eaa711f;hp=43d699be928233efe6e34964e56c1d582c2c751a;hpb=999c8ee6d6f1c07ba7877fb3f9aa66a90774aacc;p=vpp.git diff --git a/src/vnet/ipsec/ipsec_sa.h b/src/vnet/ipsec/ipsec_sa.h index 43d699be928..84abd6ef4b4 100644 --- a/src/vnet/ipsec/ipsec_sa.h +++ b/src/vnet/ipsec/ipsec_sa.h @@ -16,7 +16,10 @@ #define __IPSEC_SPD_SA_H__ #include +#include #include +#include +#include #define foreach_ipsec_crypto_alg \ _ (0, NONE, "none") \ @@ -38,7 +41,17 @@ typedef enum foreach_ipsec_crypto_alg #undef _ IPSEC_CRYPTO_N_ALG, -} ipsec_crypto_alg_t; +} __clib_packed ipsec_crypto_alg_t; + +#define IPSEC_CRYPTO_ALG_IS_GCM(_alg) \ + (((_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) || \ + (_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) || \ + (_alg == IPSEC_CRYPTO_ALG_AES_GCM_256))) + +#define IPSEC_CRYPTO_ALG_IS_CTR(_alg) \ + (((_alg == IPSEC_CRYPTO_ALG_AES_CTR_128) || \ + (_alg == IPSEC_CRYPTO_ALG_AES_CTR_192) || \ + (_alg == IPSEC_CRYPTO_ALG_AES_CTR_256))) #define foreach_ipsec_integ_alg \ _ (0, NONE, "none") \ @@ -55,63 +68,437 @@ typedef enum foreach_ipsec_integ_alg #undef _ IPSEC_INTEG_N_ALG, -} ipsec_integ_alg_t; +} __clib_packed ipsec_integ_alg_t; typedef enum { IPSEC_PROTOCOL_AH = 0, IPSEC_PROTOCOL_ESP = 1 -} ipsec_protocol_t; +} __clib_packed ipsec_protocol_t; -typedef struct +#define IPSEC_KEY_MAX_LEN 128 +typedef struct ipsec_key_t_ { - u32 id; - u32 spi; - ipsec_protocol_t protocol; + u8 len; + u8 data[IPSEC_KEY_MAX_LEN]; +} ipsec_key_t; - ipsec_crypto_alg_t crypto_alg; - u8 crypto_key_len; - u8 crypto_key[128]; +/* + * Enable extended sequence numbers + * Enable Anti-replay + * IPsec tunnel mode if non-zero, else transport mode + * IPsec tunnel mode is IPv6 if non-zero, + * else IPv4 tunnel only valid if is_tunnel is non-zero + * enable UDP encapsulation for NAT traversal + */ +#define foreach_ipsec_sa_flags \ + _ (0, NONE, "none") \ + _ (1, USE_ESN, "esn") \ + _ (2, USE_ANTI_REPLAY, "anti-replay") \ + _ (4, IS_TUNNEL, "tunnel") \ + _ (8, IS_TUNNEL_V6, "tunnel-v6") \ + _ (16, UDP_ENCAP, "udp-encap") \ + _ (32, IS_PROTECT, "Protect") \ + _ (64, IS_INBOUND, "inbound") \ + _ (128, IS_AEAD, "aead") \ + _ (256, IS_CTR, "ctr") - ipsec_integ_alg_t integ_alg; - u8 integ_key_len; - u8 integ_key[128]; +typedef enum ipsec_sad_flags_t_ +{ +#define _(v, f, s) IPSEC_SA_FLAG_##f = v, + foreach_ipsec_sa_flags +#undef _ +} __clib_packed ipsec_sa_flags_t; - u8 use_esn; - u8 use_anti_replay; +STATIC_ASSERT (sizeof (ipsec_sa_flags_t) == 2, "IPSEC SA flags != 2 byte"); - u8 is_tunnel; - u8 is_tunnel_ip6; - u8 udp_encap; - ip46_address_t tunnel_src_addr; - ip46_address_t tunnel_dst_addr; +typedef struct +{ + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - u32 tx_fib_index; - u32 salt; + /* flags */ + ipsec_sa_flags_t flags; - /* runtime */ + u8 crypto_iv_size; + u8 esp_block_align; + u8 integ_icv_size; + + u8 __pad1[3]; + + u32 thread_index; + + u32 spi; u32 seq; u32 seq_hi; u32 last_seq; u32 last_seq_hi; u64 replay_window; + dpo_id_t dpo; + + vnet_crypto_key_index_t crypto_key_index; + vnet_crypto_key_index_t integ_key_index; + + /* Union data shared by sync and async ops, updated when mode is + * changed. */ + union + { + struct + { + vnet_crypto_op_id_t crypto_enc_op_id:16; + vnet_crypto_op_id_t crypto_dec_op_id:16; + vnet_crypto_op_id_t integ_op_id:16; + }; + + struct + { + vnet_crypto_async_op_id_t crypto_async_enc_op_id:16; + vnet_crypto_async_op_id_t crypto_async_dec_op_id:16; + vnet_crypto_key_index_t linked_key_index; + }; + + u64 crypto_op_data; + }; + + CLIB_CACHE_LINE_ALIGN_MARK (cacheline1); + + u64 ctr_iv_counter; + union + { + ip4_header_t ip4_hdr; + ip6_header_t ip6_hdr; + }; + udp_header_t udp_hdr; + + /* Salt used in CTR modes (incl. GCM) - stored in network byte order */ + u32 salt; + + ipsec_protocol_t protocol; + tunnel_encap_decap_flags_t tunnel_flags; + u8 __pad[2]; + + /* data accessed by dataplane code should be above this comment */ + CLIB_CACHE_LINE_ALIGN_MARK (cacheline2); + + /* Elements with u64 size multiples */ + union + { + struct + { + vnet_crypto_op_id_t crypto_enc_op_id:16; + vnet_crypto_op_id_t crypto_dec_op_id:16; + vnet_crypto_op_id_t integ_op_id:16; + }; + u64 data; + } sync_op_data; + + union + { + struct + { + vnet_crypto_async_op_id_t crypto_async_enc_op_id:16; + vnet_crypto_async_op_id_t crypto_async_dec_op_id:16; + vnet_crypto_key_index_t linked_key_index; + }; + u64 data; + } async_op_data; + + tunnel_t tunnel; + + fib_node_t node; + + /* elements with u32 size */ + u32 id; + u32 stat_index; + vnet_crypto_alg_t integ_calg; + vnet_crypto_alg_t crypto_calg; + + /* else u8 packed */ + ipsec_crypto_alg_t crypto_alg; + ipsec_integ_alg_t integ_alg; - /* lifetime data */ - u64 total_data_size; + ipsec_key_t integ_key; + ipsec_key_t crypto_key; } ipsec_sa_t; -extern int ipsec_add_del_sa (vlib_main_t * vm, ipsec_sa_t * new_sa, - int is_add); -extern u8 ipsec_is_sa_used (u32 sa_index); -extern int ipsec_set_sa_key (vlib_main_t * vm, ipsec_sa_t * sa_update); -extern u32 ipsec_get_sa_index_by_sa_id (u32 sa_id); +STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline1, CLIB_CACHE_LINE_BYTES); +STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline2, 2 * CLIB_CACHE_LINE_BYTES); + +#define _(a,v,s) \ + always_inline int \ + ipsec_sa_is_set_##v (const ipsec_sa_t *sa) { \ + return (sa->flags & IPSEC_SA_FLAG_##v); \ + } +foreach_ipsec_sa_flags +#undef _ +#define _(a,v,s) \ + always_inline int \ + ipsec_sa_set_##v (ipsec_sa_t *sa) { \ + return (sa->flags |= IPSEC_SA_FLAG_##v); \ + } + foreach_ipsec_sa_flags +#undef _ +#define _(a,v,s) \ + always_inline int \ + ipsec_sa_unset_##v (ipsec_sa_t *sa) { \ + return (sa->flags &= ~IPSEC_SA_FLAG_##v); \ + } + foreach_ipsec_sa_flags +#undef _ +/** + * @brief + * SA packet & bytes counters + */ +extern vlib_combined_counter_main_t ipsec_sa_counters; + +extern void ipsec_mk_key (ipsec_key_t * key, const u8 * data, u8 len); + +extern int +ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto, + ipsec_crypto_alg_t crypto_alg, const ipsec_key_t *ck, + ipsec_integ_alg_t integ_alg, const ipsec_key_t *ik, + ipsec_sa_flags_t flags, u32 salt, u16 src_port, + u16 dst_port, const tunnel_t *tun, u32 *sa_out_index); +extern index_t ipsec_sa_find_and_lock (u32 id); +extern int ipsec_sa_unlock_id (u32 id); +extern void ipsec_sa_unlock (index_t sai); +extern void ipsec_sa_lock (index_t sai); +extern void ipsec_sa_clear (index_t sai); +extern void ipsec_sa_set_crypto_alg (ipsec_sa_t * sa, + ipsec_crypto_alg_t crypto_alg); +extern void ipsec_sa_set_integ_alg (ipsec_sa_t * sa, + ipsec_integ_alg_t integ_alg); + +typedef walk_rc_t (*ipsec_sa_walk_cb_t) (ipsec_sa_t * sa, void *ctx); +extern void ipsec_sa_walk (ipsec_sa_walk_cb_t cd, void *ctx); extern u8 *format_ipsec_crypto_alg (u8 * s, va_list * args); extern u8 *format_ipsec_integ_alg (u8 * s, va_list * args); +extern u8 *format_ipsec_sa (u8 * s, va_list * args); +extern u8 *format_ipsec_key (u8 * s, va_list * args); extern uword unformat_ipsec_crypto_alg (unformat_input_t * input, va_list * args); extern uword unformat_ipsec_integ_alg (unformat_input_t * input, va_list * args); +extern uword unformat_ipsec_key (unformat_input_t * input, va_list * args); + +#define IPSEC_UDP_PORT_NONE ((u16)~0) + +/* + * Anti Replay definitions + */ + +#define IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (64) +#define IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE-1) + +/* + * sequence number less than the lower bound are outside of the window + * From RFC4303 Appendix A: + * Bl = Tl - W + 1 + */ +#define IPSEC_SA_ANTI_REPLAY_WINDOW_LOWER_BOUND(_tl) (_tl - IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE + 1) + +/* + * Anti replay check. + * inputs need to be in host byte order. + */ +always_inline int +ipsec_sa_anti_replay_check (ipsec_sa_t * sa, u32 seq) +{ + u32 diff, tl, th; + + if ((sa->flags & IPSEC_SA_FLAG_USE_ANTI_REPLAY) == 0) + return 0; + + if (!ipsec_sa_is_set_USE_ESN (sa)) + { + if (PREDICT_TRUE (seq > sa->last_seq)) + return 0; + + diff = sa->last_seq - seq; + + if (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE > diff) + return (sa->replay_window & (1ULL << diff)) ? 1 : 0; + else + return 1; + + return 0; + } + + tl = sa->last_seq; + th = sa->last_seq_hi; + diff = tl - seq; + + if (PREDICT_TRUE (tl >= (IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX))) + { + /* + * the last sequence number VPP recieved is more than one + * window size greater than zero. + * Case A from RFC4303 Appendix A. + */ + if (seq < IPSEC_SA_ANTI_REPLAY_WINDOW_LOWER_BOUND (tl)) + { + /* + * the received sequence number is lower than the lower bound + * of the window, this could mean either a replay packet or that + * the high sequence number has wrapped. if it decrypts corrently + * then it's the latter. + */ + sa->seq_hi = th + 1; + return 0; + } + else + { + /* + * the recieved sequence number greater than the low + * end of the window. + */ + sa->seq_hi = th; + if (seq <= tl) + /* + * The recieved seq number is within bounds of the window + * check if it's a duplicate + */ + return (sa->replay_window & (1ULL << diff)) ? 1 : 0; + else + /* + * The received sequence number is greater than the window + * upper bound. this packet will move the window along, assuming + * it decrypts correctly. + */ + return 0; + } + } + else + { + /* + * the last sequence number VPP recieved is within one window + * size of zero, i.e. 0 < TL < WINDOW_SIZE, the lower bound is thus a + * large sequence number. + * Note that the check below uses unsiged integer arthimetic, so the + * RHS will be a larger number. + * Case B from RFC4303 Appendix A. + */ + if (seq < IPSEC_SA_ANTI_REPLAY_WINDOW_LOWER_BOUND (tl)) + { + /* + * the sequence number is less than the lower bound. + */ + if (seq <= tl) + { + /* + * the packet is within the window upper bound. + * check for duplicates. + */ + sa->seq_hi = th; + return (sa->replay_window & (1ULL << diff)) ? 1 : 0; + } + else + { + /* + * the packet is less the window lower bound or greater than + * the higher bound, depending on how you look at it... + * We're assuming, given that the last sequence number received, + * TL < WINDOW_SIZE, that a largeer seq num is more likely to be + * a packet that moves the window forward, than a packet that has + * wrapped the high sequence again. If it were the latter then + * we've lost close to 2^32 packets. + */ + sa->seq_hi = th; + return 0; + } + } + else + { + /* + * the packet seq number is between the lower bound (a large nubmer) + * and MAX_SEQ_NUM. This is in the window since the window upper bound + * tl > 0. + * However, since TL is the other side of 0 to the received + * packet, the SA has moved on to a higher sequence number. + */ + sa->seq_hi = th - 1; + return (sa->replay_window & (1ULL << diff)) ? 1 : 0; + } + } + + return 0; +} + +/* + * Anti replay window advance + * inputs need to be in host byte order. + */ +always_inline void +ipsec_sa_anti_replay_advance (ipsec_sa_t * sa, u32 seq) +{ + u32 pos; + if (PREDICT_TRUE (sa->flags & IPSEC_SA_FLAG_USE_ANTI_REPLAY) == 0) + return; + + if (PREDICT_TRUE (sa->flags & IPSEC_SA_FLAG_USE_ESN)) + { + int wrap = sa->seq_hi - sa->last_seq_hi; + + if (wrap == 0 && seq > sa->last_seq) + { + pos = seq - sa->last_seq; + if (pos < IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE) + sa->replay_window = ((sa->replay_window) << pos) | 1; + else + sa->replay_window = 1; + sa->last_seq = seq; + } + else if (wrap > 0) + { + pos = ~seq + sa->last_seq + 1; + if (pos < IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE) + sa->replay_window = ((sa->replay_window) << pos) | 1; + else + sa->replay_window = 1; + sa->last_seq = seq; + sa->last_seq_hi = sa->seq_hi; + } + else if (wrap < 0) + { + pos = ~seq + sa->last_seq + 1; + sa->replay_window |= (1ULL << pos); + } + else + { + pos = sa->last_seq - seq; + sa->replay_window |= (1ULL << pos); + } + } + else + { + if (seq > sa->last_seq) + { + pos = seq - sa->last_seq; + if (pos < IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE) + sa->replay_window = ((sa->replay_window) << pos) | 1; + else + sa->replay_window = 1; + sa->last_seq = seq; + } + else + { + pos = sa->last_seq - seq; + sa->replay_window |= (1ULL << pos); + } + } +} + + +/* + * Makes choice for thread_id should be assigned. + * if input ~0, gets random worker_id based on unix_time_now_nsec +*/ +always_inline u32 +ipsec_sa_assign_thread (u32 thread_id) +{ + return ((thread_id) ? thread_id + : (unix_time_now_nsec () % vlib_num_workers ()) + 1); +} #endif /* __IPSEC_SPD_SA_H__ */