2 * esp_encrypt.c : IPSec ESP encrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
25 ipsec_proto_main_t ipsec_proto_main;
27 #define foreach_esp_encrypt_next \
28 _(DROP, "error-drop") \
29 _(IP4_LOOKUP, "ip4-lookup") \
30 _(IP6_LOOKUP, "ip6-lookup") \
31 _(INTERFACE_OUTPUT, "interface-output")
33 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
36 foreach_esp_encrypt_next
41 #define foreach_esp_encrypt_error \
42 _(RX_PKTS, "ESP pkts received") \
43 _(NO_BUFFER, "No buffer (packet dropped)") \
44 _(DECRYPTION_FAILED, "ESP encryption failed") \
45 _(SEQ_CYCLED, "sequence number cycled")
50 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
51 foreach_esp_encrypt_error
54 } esp_encrypt_error_t;
56 static char *esp_encrypt_error_strings[] = {
57 #define _(sym,string) string,
58 foreach_esp_encrypt_error
62 vlib_node_registration_t esp_encrypt_node;
68 ipsec_crypto_alg_t crypto_alg;
69 ipsec_integ_alg_t integ_alg;
70 } esp_encrypt_trace_t;
72 /* packet trace format function */
74 format_esp_encrypt_trace (u8 * s, va_list * args)
76 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
77 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
78 esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
80 s = format (s, "esp: spi %u seq %u crypto %U integrity %U",
82 format_ipsec_crypto_alg, t->crypto_alg,
83 format_ipsec_integ_alg, t->integ_alg);
88 esp_encrypt_aes_cbc (ipsec_crypto_alg_t alg,
89 u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
91 ipsec_proto_main_t *em = &ipsec_proto_main;
92 u32 thread_index = vlib_get_thread_index ();
93 #if OPENSSL_VERSION_NUMBER >= 0x10100000L
94 EVP_CIPHER_CTX *ctx = em->per_thread_data[thread_index].encrypt_ctx;
96 EVP_CIPHER_CTX *ctx = &(em->per_thread_data[thread_index].encrypt_ctx);
98 const EVP_CIPHER *cipher = NULL;
101 ASSERT (alg < IPSEC_CRYPTO_N_ALG);
104 (em->ipsec_proto_main_crypto_algs[alg].type == IPSEC_CRYPTO_ALG_NONE))
108 (alg != em->per_thread_data[thread_index].last_encrypt_alg))
110 cipher = em->ipsec_proto_main_crypto_algs[alg].type;
111 em->per_thread_data[thread_index].last_encrypt_alg = alg;
114 EVP_EncryptInit_ex (ctx, cipher, NULL, key, iv);
116 EVP_EncryptUpdate (ctx, out, &out_len, in, in_len);
117 EVP_EncryptFinal_ex (ctx, out + out_len, &out_len);
121 esp_encrypt_node_fn (vlib_main_t * vm,
122 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
124 u32 n_left_from, *from, *to_next = 0, next_index;
125 from = vlib_frame_vector_args (from_frame);
126 n_left_from = from_frame->n_vectors;
127 ipsec_main_t *im = &ipsec_main;
129 u32 thread_index = vlib_get_thread_index ();
131 ipsec_alloc_empty_buffers (vm, im);
133 u32 *empty_buffers = im->empty_buffers[thread_index];
135 if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from))
137 vlib_node_increment_counter (vm, esp_encrypt_node.index,
138 ESP_ENCRYPT_ERROR_NO_BUFFER, n_left_from);
139 clib_warning ("no enough empty buffers. discarding frame");
140 goto free_buffers_and_exit;
143 next_index = node->cached_next_index;
145 while (n_left_from > 0)
149 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
151 while (n_left_from > 0 && n_left_to_next > 0)
153 u32 i_bi0, o_bi0, next0;
154 vlib_buffer_t *i_b0, *o_b0 = 0;
157 ip4_and_esp_header_t *ih0, *oh0 = 0;
158 ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
159 uword last_empty_buffer;
160 esp_header_t *o_esp0;
166 u8 transport_mode = 0;
173 next0 = ESP_ENCRYPT_NEXT_DROP;
175 i_b0 = vlib_get_buffer (vm, i_bi0);
176 sa_index0 = vnet_buffer (i_b0)->ipsec.sad_index;
177 sa0 = pool_elt_at_index (im->sad, sa_index0);
179 if (PREDICT_FALSE (esp_seq_advance (sa0)))
181 clib_warning ("sequence number counter has cycled SPI %u",
183 vlib_node_increment_counter (vm, esp_encrypt_node.index,
184 ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
192 sa0->total_data_size += i_b0->current_length;
194 /* grab free buffer */
195 last_empty_buffer = vec_len (empty_buffers) - 1;
196 o_bi0 = empty_buffers[last_empty_buffer];
197 o_b0 = vlib_get_buffer (vm, o_bi0);
198 o_b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
199 o_b0->current_data = sizeof (ethernet_header_t);
200 ih0 = vlib_buffer_get_current (i_b0);
201 vlib_prefetch_buffer_with_index (vm,
202 empty_buffers[last_empty_buffer -
204 _vec_len (empty_buffers) = last_empty_buffer;
208 /* add old buffer to the recycle list */
209 vec_add1 (recycle, i_bi0);
213 ((ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60))
216 ih6_0 = vlib_buffer_get_current (i_b0);
217 ip_hdr_size = sizeof (ip6_header_t);
218 next_hdr_type = IP_PROTOCOL_IPV6;
219 oh6_0 = vlib_buffer_get_current (o_b0);
220 o_esp0 = vlib_buffer_get_current (o_b0) + sizeof (ip6_header_t);
222 oh6_0->ip6.ip_version_traffic_class_and_flow_label =
223 ih6_0->ip6.ip_version_traffic_class_and_flow_label;
224 oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
225 oh6_0->ip6.hop_limit = 254;
226 oh6_0->ip6.src_address.as_u64[0] =
227 ih6_0->ip6.src_address.as_u64[0];
228 oh6_0->ip6.src_address.as_u64[1] =
229 ih6_0->ip6.src_address.as_u64[1];
230 oh6_0->ip6.dst_address.as_u64[0] =
231 ih6_0->ip6.dst_address.as_u64[0];
232 oh6_0->ip6.dst_address.as_u64[1] =
233 ih6_0->ip6.dst_address.as_u64[1];
234 oh6_0->esp.spi = clib_net_to_host_u32 (sa0->spi);
235 oh6_0->esp.seq = clib_net_to_host_u32 (sa0->seq);
236 ip_proto = ih6_0->ip6.protocol;
238 next0 = ESP_ENCRYPT_NEXT_IP6_LOOKUP;
243 ip_hdr_size = sizeof (ip4_header_t);
244 next_hdr_type = IP_PROTOCOL_IP_IN_IP;
245 oh0 = vlib_buffer_get_current (o_b0);
246 o_esp0 = vlib_buffer_get_current (o_b0) + sizeof (ip4_header_t);
248 oh0->ip4.ip_version_and_header_length = 0x45;
249 oh0->ip4.tos = ih0->ip4.tos;
250 oh0->ip4.fragment_id = 0;
251 oh0->ip4.flags_and_fragment_offset = 0;
253 oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
254 oh0->ip4.src_address.as_u32 = ih0->ip4.src_address.as_u32;
255 oh0->ip4.dst_address.as_u32 = ih0->ip4.dst_address.as_u32;
256 oh0->esp.spi = clib_net_to_host_u32 (sa0->spi);
257 oh0->esp.seq = clib_net_to_host_u32 (sa0->seq);
258 ip_proto = ih0->ip4.protocol;
260 next0 = ESP_ENCRYPT_NEXT_IP4_LOOKUP;
264 (!is_ipv6 && sa0->is_tunnel && !sa0->is_tunnel_ip6))
266 oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
267 oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
269 vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
271 else if (is_ipv6 && sa0->is_tunnel && sa0->is_tunnel_ip6)
273 oh6_0->ip6.src_address.as_u64[0] =
274 sa0->tunnel_src_addr.ip6.as_u64[0];
275 oh6_0->ip6.src_address.as_u64[1] =
276 sa0->tunnel_src_addr.ip6.as_u64[1];
277 oh6_0->ip6.dst_address.as_u64[0] =
278 sa0->tunnel_dst_addr.ip6.as_u64[0];
279 oh6_0->ip6.dst_address.as_u64[1] =
280 sa0->tunnel_dst_addr.ip6.as_u64[1];
282 vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
286 next_hdr_type = ip_proto;
287 if (vnet_buffer (i_b0)->sw_if_index[VLIB_TX] != ~0)
290 ethernet_header_t *ieh0, *oeh0;
292 (ethernet_header_t *) ((u8 *)
293 vlib_buffer_get_current (i_b0) -
294 sizeof (ethernet_header_t));
295 oeh0 = (ethernet_header_t *) o_b0->data;
296 clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
297 next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
298 vnet_buffer (o_b0)->sw_if_index[VLIB_TX] =
299 vnet_buffer (i_b0)->sw_if_index[VLIB_TX];
301 vlib_buffer_advance (i_b0, ip_hdr_size);
304 ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
306 if (PREDICT_TRUE (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE))
309 const int BLOCK_SIZE = 16;
310 const int IV_SIZE = 16;
311 int blocks = 1 + (i_b0->current_length + 1) / BLOCK_SIZE;
313 /* pad packet in input buffer */
314 u8 pad_bytes = BLOCK_SIZE * blocks - 2 - i_b0->current_length;
317 vlib_buffer_get_current (i_b0) + i_b0->current_length;
318 i_b0->current_length = BLOCK_SIZE * blocks;
319 for (i = 0; i < pad_bytes; ++i)
323 f0 = vlib_buffer_get_current (i_b0) + i_b0->current_length - 2;
324 f0->pad_length = pad_bytes;
325 f0->next_header = next_hdr_type;
327 o_b0->current_length = ip_hdr_size + sizeof (esp_header_t) +
328 BLOCK_SIZE * blocks + IV_SIZE;
330 vnet_buffer (o_b0)->sw_if_index[VLIB_RX] =
331 vnet_buffer (i_b0)->sw_if_index[VLIB_RX];
334 RAND_bytes (iv, sizeof (iv));
336 clib_memcpy ((u8 *) vlib_buffer_get_current (o_b0) +
337 ip_hdr_size + sizeof (esp_header_t), iv, 16);
339 esp_encrypt_aes_cbc (sa0->crypto_alg,
340 (u8 *) vlib_buffer_get_current (i_b0),
341 (u8 *) vlib_buffer_get_current (o_b0) +
342 ip_hdr_size + sizeof (esp_header_t) +
343 IV_SIZE, BLOCK_SIZE * blocks,
344 sa0->crypto_key, iv);
347 o_b0->current_length += hmac_calc (sa0->integ_alg, sa0->integ_key,
350 o_b0->current_length -
352 vlib_buffer_get_current (o_b0) +
353 o_b0->current_length,
354 sa0->use_esn, sa0->seq_hi);
357 if (PREDICT_FALSE (is_ipv6))
359 oh6_0->ip6.payload_length =
360 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0) -
361 sizeof (ip6_header_t));
366 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0));
367 oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
371 vlib_buffer_reset (o_b0);
374 if (PREDICT_FALSE (i_b0->flags & VLIB_BUFFER_IS_TRACED))
378 o_b0->flags |= VLIB_BUFFER_IS_TRACED;
379 o_b0->trace_index = i_b0->trace_index;
380 esp_encrypt_trace_t *tr =
381 vlib_add_trace (vm, node, o_b0, sizeof (*tr));
383 tr->seq = sa0->seq - 1;
384 tr->crypto_alg = sa0->crypto_alg;
385 tr->integ_alg = sa0->integ_alg;
389 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
390 to_next, n_left_to_next, o_bi0,
393 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
395 vlib_node_increment_counter (vm, esp_encrypt_node.index,
396 ESP_ENCRYPT_ERROR_RX_PKTS,
397 from_frame->n_vectors);
399 free_buffers_and_exit:
401 vlib_buffer_free (vm, recycle, vec_len (recycle));
403 return from_frame->n_vectors;
408 VLIB_REGISTER_NODE (esp_encrypt_node) = {
409 .function = esp_encrypt_node_fn,
410 .name = "esp-encrypt",
411 .vector_size = sizeof (u32),
412 .format_trace = format_esp_encrypt_trace,
413 .type = VLIB_NODE_TYPE_INTERNAL,
415 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
416 .error_strings = esp_encrypt_error_strings,
418 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
420 #define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n,
421 foreach_esp_encrypt_next
427 VLIB_NODE_FUNCTION_MULTIARCH (esp_encrypt_node, esp_encrypt_node_fn)
429 * fd.io coding-style-patch-verification: ON
432 * eval: (c-set-style "gnu")