2 * esp_encrypt.c : IPSec ESP encrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
25 #define ESP_SEQ_MAX (4294967295UL)
27 #define foreach_esp_encrypt_next \
28 _(DROP, "error-drop") \
29 _(IP4_INPUT, "ip4-input") \
30 _(IP6_INPUT, "ip6-input") \
31 _(INTERFACE_OUTPUT, "interface-output")
33 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
36 foreach_esp_encrypt_next
41 #define foreach_esp_encrypt_error \
42 _(RX_PKTS, "ESP pkts received") \
43 _(NO_BUFFER, "No buffer (packet dropped)") \
44 _(DECRYPTION_FAILED, "ESP encryption failed") \
45 _(SEQ_CYCLED, "sequence number cycled")
50 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
51 foreach_esp_encrypt_error
54 } esp_encrypt_error_t;
56 static char *esp_encrypt_error_strings[] = {
57 #define _(sym,string) string,
58 foreach_esp_encrypt_error
62 vlib_node_registration_t esp_encrypt_node;
68 ipsec_crypto_alg_t crypto_alg;
69 ipsec_integ_alg_t integ_alg;
70 } esp_encrypt_trace_t;
72 /* packet trace format function */
74 format_esp_encrypt_trace (u8 * s, va_list * args)
76 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
77 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
78 esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
80 s = format (s, "esp: spi %u seq %u crypto %U integrity %U",
82 format_ipsec_crypto_alg, t->crypto_alg,
83 format_ipsec_integ_alg, t->integ_alg);
88 esp_encrypt_aes_cbc (ipsec_crypto_alg_t alg,
89 u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
91 esp_main_t *em = &esp_main;
92 u32 cpu_index = os_get_cpu_number ();
93 EVP_CIPHER_CTX *ctx = &(em->per_thread_data[cpu_index].encrypt_ctx);
94 const EVP_CIPHER *cipher = NULL;
97 ASSERT (alg < IPSEC_CRYPTO_N_ALG);
99 if (PREDICT_FALSE (em->esp_crypto_algs[alg].type == IPSEC_CRYPTO_ALG_NONE))
102 if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_encrypt_alg))
104 cipher = em->esp_crypto_algs[alg].type;
105 em->per_thread_data[cpu_index].last_encrypt_alg = alg;
108 EVP_EncryptInit_ex (ctx, cipher, NULL, key, iv);
110 EVP_EncryptUpdate (ctx, out, &out_len, in, in_len);
111 EVP_EncryptFinal_ex (ctx, out + out_len, &out_len);
115 esp_seq_advance (ipsec_sa_t * sa)
117 if (PREDICT_TRUE (sa->use_esn))
119 if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
122 (sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX))
130 if (PREDICT_FALSE (sa->use_anti_replay && sa->seq == ESP_SEQ_MAX))
139 esp_encrypt_node_fn (vlib_main_t * vm,
140 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
142 u32 n_left_from, *from, *to_next = 0, next_index;
143 from = vlib_frame_vector_args (from_frame);
144 n_left_from = from_frame->n_vectors;
145 ipsec_main_t *im = &ipsec_main;
147 u32 cpu_index = os_get_cpu_number ();
149 ipsec_alloc_empty_buffers (vm, im);
151 u32 *empty_buffers = im->empty_buffers[cpu_index];
153 if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from))
155 vlib_node_increment_counter (vm, esp_encrypt_node.index,
156 ESP_ENCRYPT_ERROR_NO_BUFFER, n_left_from);
157 clib_warning ("no enough empty buffers. discarding frame");
158 goto free_buffers_and_exit;
161 next_index = node->cached_next_index;
163 while (n_left_from > 0)
167 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
169 while (n_left_from > 0 && n_left_to_next > 0)
171 u32 i_bi0, o_bi0, next0;
172 vlib_buffer_t *i_b0, *o_b0 = 0;
175 ip4_and_esp_header_t *ih0, *oh0 = 0;
176 ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
177 uword last_empty_buffer;
178 esp_header_t *o_esp0;
184 u8 transport_mode = 0;
191 next0 = ESP_ENCRYPT_NEXT_DROP;
193 i_b0 = vlib_get_buffer (vm, i_bi0);
194 sa_index0 = vnet_buffer (i_b0)->ipsec.sad_index;
195 sa0 = pool_elt_at_index (im->sad, sa_index0);
197 if (PREDICT_FALSE (esp_seq_advance (sa0)))
199 clib_warning ("sequence number counter has cycled SPI %u",
201 vlib_node_increment_counter (vm, esp_encrypt_node.index,
202 ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
210 /* grab free buffer */
211 last_empty_buffer = vec_len (empty_buffers) - 1;
212 o_bi0 = empty_buffers[last_empty_buffer];
213 o_b0 = vlib_get_buffer (vm, o_bi0);
214 o_b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
215 o_b0->current_data = sizeof (ethernet_header_t);
216 ih0 = vlib_buffer_get_current (i_b0);
217 vlib_prefetch_buffer_with_index (vm,
218 empty_buffers[last_empty_buffer -
220 _vec_len (empty_buffers) = last_empty_buffer;
224 /* add old buffer to the recycle list */
225 vec_add1 (recycle, i_bi0);
229 ((ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60))
232 ih6_0 = vlib_buffer_get_current (i_b0);
233 ip_hdr_size = sizeof (ip6_header_t);
234 next_hdr_type = IP_PROTOCOL_IPV6;
235 oh6_0 = vlib_buffer_get_current (o_b0);
236 o_esp0 = vlib_buffer_get_current (o_b0) + sizeof (ip6_header_t);
238 oh6_0->ip6.ip_version_traffic_class_and_flow_label =
239 ih6_0->ip6.ip_version_traffic_class_and_flow_label;
240 oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
241 oh6_0->ip6.hop_limit = 254;
242 oh6_0->ip6.src_address.as_u64[0] =
243 ih6_0->ip6.src_address.as_u64[0];
244 oh6_0->ip6.src_address.as_u64[1] =
245 ih6_0->ip6.src_address.as_u64[1];
246 oh6_0->ip6.dst_address.as_u64[0] =
247 ih6_0->ip6.dst_address.as_u64[0];
248 oh6_0->ip6.dst_address.as_u64[1] =
249 ih6_0->ip6.dst_address.as_u64[1];
250 oh6_0->esp.spi = clib_net_to_host_u32 (sa0->spi);
251 oh6_0->esp.seq = clib_net_to_host_u32 (sa0->seq);
252 ip_proto = ih6_0->ip6.protocol;
254 next0 = ESP_ENCRYPT_NEXT_IP6_INPUT;
259 ip_hdr_size = sizeof (ip4_header_t);
260 next_hdr_type = IP_PROTOCOL_IP_IN_IP;
261 oh0 = vlib_buffer_get_current (o_b0);
262 o_esp0 = vlib_buffer_get_current (o_b0) + sizeof (ip4_header_t);
264 oh0->ip4.ip_version_and_header_length = 0x45;
265 oh0->ip4.tos = ih0->ip4.tos;
266 oh0->ip4.fragment_id = 0;
267 oh0->ip4.flags_and_fragment_offset = 0;
269 oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
270 oh0->ip4.src_address.as_u32 = ih0->ip4.src_address.as_u32;
271 oh0->ip4.dst_address.as_u32 = ih0->ip4.dst_address.as_u32;
272 oh0->esp.spi = clib_net_to_host_u32 (sa0->spi);
273 oh0->esp.seq = clib_net_to_host_u32 (sa0->seq);
274 ip_proto = ih0->ip4.protocol;
276 next0 = ESP_ENCRYPT_NEXT_IP4_INPUT;
280 (!is_ipv6 && sa0->is_tunnel && !sa0->is_tunnel_ip6))
282 oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
283 oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
285 vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
287 else if (is_ipv6 && sa0->is_tunnel && sa0->is_tunnel_ip6)
289 oh6_0->ip6.src_address.as_u64[0] =
290 sa0->tunnel_src_addr.ip6.as_u64[0];
291 oh6_0->ip6.src_address.as_u64[1] =
292 sa0->tunnel_src_addr.ip6.as_u64[1];
293 oh6_0->ip6.dst_address.as_u64[0] =
294 sa0->tunnel_dst_addr.ip6.as_u64[0];
295 oh6_0->ip6.dst_address.as_u64[1] =
296 sa0->tunnel_dst_addr.ip6.as_u64[1];
298 vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
302 next_hdr_type = ip_proto;
303 if (vnet_buffer (i_b0)->sw_if_index[VLIB_TX] != ~0)
306 ethernet_header_t *ieh0, *oeh0;
308 (ethernet_header_t *) ((u8 *)
309 vlib_buffer_get_current (i_b0) -
310 sizeof (ethernet_header_t));
311 oeh0 = (ethernet_header_t *) o_b0->data;
312 clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
313 next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
314 vnet_buffer (o_b0)->sw_if_index[VLIB_TX] =
315 vnet_buffer (i_b0)->sw_if_index[VLIB_TX];
317 vlib_buffer_advance (i_b0, ip_hdr_size);
320 ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
322 if (PREDICT_TRUE (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE))
325 const int BLOCK_SIZE = 16;
326 const int IV_SIZE = 16;
327 int blocks = 1 + (i_b0->current_length + 1) / BLOCK_SIZE;
329 /* pad packet in input buffer */
330 u8 pad_bytes = BLOCK_SIZE * blocks - 2 - i_b0->current_length;
333 vlib_buffer_get_current (i_b0) + i_b0->current_length;
334 i_b0->current_length = BLOCK_SIZE * blocks;
335 for (i = 0; i < pad_bytes; ++i)
339 f0 = vlib_buffer_get_current (i_b0) + i_b0->current_length - 2;
340 f0->pad_length = pad_bytes;
341 f0->next_header = next_hdr_type;
343 o_b0->current_length = ip_hdr_size + sizeof (esp_header_t) +
344 BLOCK_SIZE * blocks + IV_SIZE;
346 vnet_buffer (o_b0)->sw_if_index[VLIB_RX] =
347 vnet_buffer (i_b0)->sw_if_index[VLIB_RX];
350 RAND_bytes (iv, sizeof (iv));
352 clib_memcpy ((u8 *) vlib_buffer_get_current (o_b0) +
353 ip_hdr_size + sizeof (esp_header_t), iv, 16);
355 esp_encrypt_aes_cbc (sa0->crypto_alg,
356 (u8 *) vlib_buffer_get_current (i_b0),
357 (u8 *) vlib_buffer_get_current (o_b0) +
358 ip_hdr_size + sizeof (esp_header_t) +
359 IV_SIZE, BLOCK_SIZE * blocks,
360 sa0->crypto_key, iv);
363 o_b0->current_length += hmac_calc (sa0->integ_alg, sa0->integ_key,
366 o_b0->current_length -
368 vlib_buffer_get_current (o_b0) +
369 o_b0->current_length,
370 sa0->use_esn, sa0->seq_hi);
373 if (PREDICT_FALSE (is_ipv6))
375 oh6_0->ip6.payload_length =
376 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0) -
377 sizeof (ip6_header_t));
382 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0));
383 oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
387 vlib_buffer_reset (o_b0);
390 if (PREDICT_FALSE (i_b0->flags & VLIB_BUFFER_IS_TRACED))
394 o_b0->flags |= VLIB_BUFFER_IS_TRACED;
395 o_b0->trace_index = i_b0->trace_index;
396 esp_encrypt_trace_t *tr =
397 vlib_add_trace (vm, node, o_b0, sizeof (*tr));
399 tr->seq = sa0->seq - 1;
400 tr->crypto_alg = sa0->crypto_alg;
401 tr->integ_alg = sa0->integ_alg;
405 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
406 to_next, n_left_to_next, o_bi0,
409 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
411 vlib_node_increment_counter (vm, esp_encrypt_node.index,
412 ESP_ENCRYPT_ERROR_RX_PKTS,
413 from_frame->n_vectors);
415 free_buffers_and_exit:
417 vlib_buffer_free (vm, recycle, vec_len (recycle));
419 return from_frame->n_vectors;
424 VLIB_REGISTER_NODE (esp_encrypt_node) = {
425 .function = esp_encrypt_node_fn,
426 .name = "esp-encrypt",
427 .vector_size = sizeof (u32),
428 .format_trace = format_esp_encrypt_trace,
429 .type = VLIB_NODE_TYPE_INTERNAL,
431 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
432 .error_strings = esp_encrypt_error_strings,
434 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
436 #define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n,
437 foreach_esp_encrypt_next
443 VLIB_NODE_FUNCTION_MULTIARCH (esp_encrypt_node, esp_encrypt_node_fn)
445 * fd.io coding-style-patch-verification: ON
448 * eval: (c-set-style "gnu")