2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
18 #include <vnet/ip/ip.h>
19 #include <vnet/ipsec/ipsec.h>
21 #include <openssl/hmac.h>
22 #include <openssl/rand.h>
23 #include <openssl/evp.h>
39 typedef CLIB_PACKED (struct {
42 }) ip4_and_esp_header_t;
46 typedef CLIB_PACKED (struct {
49 }) ip6_and_esp_header_t;
54 const EVP_CIPHER *type;
65 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
66 EVP_CIPHER_CTX encrypt_ctx;
67 CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
68 EVP_CIPHER_CTX decrypt_ctx;
69 CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
71 ipsec_crypto_alg_t last_encrypt_alg;
72 ipsec_crypto_alg_t last_decrypt_alg;
73 ipsec_integ_alg_t last_integ_alg;
74 } esp_main_per_thread_data_t;
78 esp_crypto_alg_t *esp_crypto_algs;
79 esp_integ_alg_t *esp_integ_algs;
80 esp_main_per_thread_data_t *per_thread_data;
85 #define ESP_WINDOW_SIZE (64)
86 #define ESP_SEQ_MAX (4294967295UL)
88 u8 *format_esp_header (u8 * s, va_list * args);
91 esp_replay_check (ipsec_sa_t * sa, u32 seq)
95 if (PREDICT_TRUE (seq > sa->last_seq))
98 diff = sa->last_seq - seq;
100 if (ESP_WINDOW_SIZE > diff)
101 return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
109 esp_replay_check_esn (ipsec_sa_t * sa, u32 seq)
111 u32 tl = sa->last_seq;
112 u32 th = sa->last_seq_hi;
115 if (PREDICT_TRUE (tl >= (ESP_WINDOW_SIZE - 1)))
117 if (seq >= (tl - ESP_WINDOW_SIZE + 1))
121 return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
133 if (seq >= (tl - ESP_WINDOW_SIZE + 1))
136 return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
142 return (sa->replay_window & (1ULL << diff)) ? 1 : 0;
151 /* TODO seq increment should be atomic to be accessed by multiple workers */
153 esp_replay_advance (ipsec_sa_t * sa, u32 seq)
157 if (seq > sa->last_seq)
159 pos = seq - sa->last_seq;
160 if (pos < ESP_WINDOW_SIZE)
161 sa->replay_window = ((sa->replay_window) << pos) | 1;
163 sa->replay_window = 1;
168 pos = sa->last_seq - seq;
169 sa->replay_window |= (1ULL << pos);
174 esp_replay_advance_esn (ipsec_sa_t * sa, u32 seq)
176 int wrap = sa->seq_hi - sa->last_seq_hi;
179 if (wrap == 0 && seq > sa->last_seq)
181 pos = seq - sa->last_seq;
182 if (pos < ESP_WINDOW_SIZE)
183 sa->replay_window = ((sa->replay_window) << pos) | 1;
185 sa->replay_window = 1;
190 pos = ~seq + sa->last_seq + 1;
191 if (pos < ESP_WINDOW_SIZE)
192 sa->replay_window = ((sa->replay_window) << pos) | 1;
194 sa->replay_window = 1;
196 sa->last_seq_hi = sa->seq_hi;
200 pos = ~seq + sa->last_seq + 1;
201 sa->replay_window |= (1ULL << pos);
205 pos = sa->last_seq - seq;
206 sa->replay_window |= (1ULL << pos);
211 esp_seq_advance (ipsec_sa_t * sa)
213 if (PREDICT_TRUE (sa->use_esn))
215 if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
218 (sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX))
226 if (PREDICT_FALSE (sa->use_anti_replay && sa->seq == ESP_SEQ_MAX))
237 esp_main_t *em = &esp_main;
238 vlib_thread_main_t *tm = vlib_get_thread_main ();
240 memset (em, 0, sizeof (em[0]));
242 vec_validate (em->esp_crypto_algs, IPSEC_CRYPTO_N_ALG - 1);
243 em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_128].type = EVP_aes_128_cbc ();
244 em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_192].type = EVP_aes_192_cbc ();
245 em->esp_crypto_algs[IPSEC_CRYPTO_ALG_AES_CBC_256].type = EVP_aes_256_cbc ();
247 vec_validate (em->esp_integ_algs, IPSEC_INTEG_N_ALG - 1);
250 i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA1_96];
254 i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_96];
255 i->md = EVP_sha256 ();
258 i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_256_128];
259 i->md = EVP_sha256 ();
262 i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_384_192];
263 i->md = EVP_sha384 ();
266 i = &em->esp_integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
267 i->md = EVP_sha512 ();
270 vec_validate_aligned (em->per_thread_data, tm->n_vlib_mains - 1,
271 CLIB_CACHE_LINE_BYTES);
274 for (thread_id = 0; thread_id < tm->n_vlib_mains - 1; thread_id++)
276 EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].encrypt_ctx));
277 EVP_CIPHER_CTX_init (&(em->per_thread_data[thread_id].decrypt_ctx));
278 HMAC_CTX_init (&(em->per_thread_data[thread_id].hmac_ctx));
282 always_inline unsigned int
283 hmac_calc (ipsec_integ_alg_t alg,
286 u8 * data, int data_len, u8 * signature, u8 use_esn, u32 seq_hi)
288 esp_main_t *em = &esp_main;
289 u32 thread_index = vlib_get_thread_index ();
290 HMAC_CTX *ctx = &(em->per_thread_data[thread_index].hmac_ctx);
291 const EVP_MD *md = NULL;
294 ASSERT (alg < IPSEC_INTEG_N_ALG);
296 if (PREDICT_FALSE (em->esp_integ_algs[alg].md == 0))
299 if (PREDICT_FALSE (alg != em->per_thread_data[thread_index].last_integ_alg))
301 md = em->esp_integ_algs[alg].md;
302 em->per_thread_data[thread_index].last_integ_alg = alg;
305 HMAC_Init (ctx, key, key_len, md);
307 HMAC_Update (ctx, data, data_len);
309 if (PREDICT_TRUE (use_esn))
310 HMAC_Update (ctx, (u8 *) & seq_hi, sizeof (seq_hi));
311 HMAC_Final (ctx, signature, &len);
313 return em->esp_integ_algs[alg].trunc_size;
316 #endif /* __ESP_H__ */
319 * fd.io coding-style-patch-verification: ON
322 * eval: (c-set-style "gnu")