2 * esp_decrypt.c : IPSec ESP decrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ipsec_io.h>
26 #define foreach_esp_decrypt_next \
27 _(DROP, "error-drop") \
28 _(IP4_INPUT, "ip4-input-no-checksum") \
29 _(IP6_INPUT, "ip6-input") \
30 _(IPSEC_GRE_INPUT, "ipsec-gre-input")
32 #define _(v, s) ESP_DECRYPT_NEXT_##v,
35 foreach_esp_decrypt_next
41 #define foreach_esp_decrypt_error \
42 _(RX_PKTS, "ESP pkts received") \
43 _(DECRYPTION_FAILED, "ESP decryption failed") \
44 _(INTEG_ERROR, "Integrity check failed") \
45 _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
46 _(REPLAY, "SA replayed packet") \
47 _(RUNT, "undersized packet") \
48 _(CHAINED_BUFFER, "chained buffers (packet dropped)") \
49 _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
50 _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)")
55 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
56 foreach_esp_decrypt_error
59 } esp_decrypt_error_t;
61 static char *esp_decrypt_error_strings[] = {
62 #define _(sym,string) string,
63 foreach_esp_decrypt_error
70 ipsec_crypto_alg_t crypto_alg;
71 ipsec_integ_alg_t integ_alg;
72 } esp_decrypt_trace_t;
74 /* packet trace format function */
76 format_esp_decrypt_trace (u8 * s, va_list * args)
78 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
79 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
80 esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
82 s = format (s, "esp: crypto %U integrity %U seq %u",
83 format_ipsec_crypto_alg, t->crypto_alg,
84 format_ipsec_integ_alg, t->integ_alg, t->seq);
96 ipsec_sa_flags_t flags:8;
105 } esp_decrypt_packet_data_t;
107 STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 2 * sizeof (u64));
109 #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
112 esp_decrypt_inline (vlib_main_t * vm,
113 vlib_node_runtime_t * node, vlib_frame_t * from_frame,
116 ipsec_main_t *im = &ipsec_main;
117 u32 thread_index = vm->thread_index;
118 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
120 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
121 u32 *from = vlib_frame_vector_args (from_frame);
122 u32 n, n_left = from_frame->n_vectors;
123 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
124 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
125 esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
126 esp_decrypt_packet_data_t cpd = { };
127 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
128 const u8 esp_sz = sizeof (esp_header_t);
131 vlib_get_buffers (vm, from, b, n_left);
132 vec_reset_length (ptd->crypto_ops);
133 vec_reset_length (ptd->integ_ops);
134 clib_memset_u16 (nexts, -1, n_left);
143 vlib_prefetch_buffer_header (b[2], LOAD);
144 p = vlib_buffer_get_current (b[1]);
145 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
146 p -= CLIB_CACHE_LINE_BYTES;
147 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
150 if (vlib_buffer_chain_linearize (vm, b[0]) != 1)
152 b[0]->error = node->errors[ESP_DECRYPT_ERROR_CHAINED_BUFFER];
153 next[0] = ESP_DECRYPT_NEXT_DROP;
157 if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
159 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
160 sa0 = pool_elt_at_index (im->sad, current_sa_index);
161 cpd.icv_sz = sa0->integ_icv_size;
162 cpd.iv_sz = sa0->crypto_iv_size;
163 cpd.flags = sa0->flags;
164 cpd.sa_index = current_sa_index;
166 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
167 current_sa_index, current_sa_pkts,
170 current_sa_bytes = current_sa_pkts = 0;
173 /* store packet data for next round for easier prefetch */
174 pd->sa_data = cpd.sa_data;
175 pd->current_data = b[0]->current_data;
176 pd->current_length = b[0]->current_length;
177 pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
178 payload = b[0]->data + pd->current_data;
180 /* we need 4 extra bytes for HMAC calculation when ESN are used */
181 if (ipsec_sa_is_set_USE_ESN (sa0) && pd->icv_sz &&
182 (pd->current_data + pd->current_length + 4 > buffer_data_size))
184 b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_TAIL_SPACE];
185 next[0] = ESP_DECRYPT_NEXT_DROP;
189 /* anti-reply check */
190 if (ipsec_sa_anti_replay_check (sa0, &((esp_header_t *) payload)->seq))
192 b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
193 next[0] = ESP_DECRYPT_NEXT_DROP;
197 if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
199 b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT];
200 next[0] = ESP_DECRYPT_NEXT_DROP;
204 len = pd->current_length - cpd.icv_sz;
205 current_sa_pkts += 1;
206 current_sa_bytes += pd->current_length;
208 if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
210 vnet_crypto_op_t *op;
211 vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
213 vnet_crypto_op_init (op, sa0->integ_op_id);
214 op->key_index = sa0->integ_key_index;
216 op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
217 op->user_data = b - bufs;
218 op->digest = payload + len;
219 op->digest_len = cpd.icv_sz;
221 if (ipsec_sa_is_set_USE_ESN (sa0))
223 /* shift ICV for 4 bytes to insert ESN */
224 u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi);
225 clib_memcpy_fast (tmp, payload + len, ESP_MAX_ICV_SIZE);
226 clib_memcpy_fast (payload + len, &sa0->seq_hi, sz);
227 clib_memcpy_fast (payload + len + sz, tmp, ESP_MAX_ICV_SIZE);
236 if (sa0->crypto_enc_op_id != VNET_CRYPTO_OP_NONE)
238 vnet_crypto_op_t *op;
239 vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES);
240 vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
241 op->key_index = sa0->crypto_key_index;
244 if (ipsec_sa_is_set_IS_AEAD (sa0))
251 * construct the AAD and the nonce (Salt || IV) in a scratch
252 * space in front of the IP header.
254 scratch = payload - esp_sz;
255 esp0 = (esp_header_t *) (scratch);
257 scratch -= (sizeof (*aad) + pd->hdr_sz);
260 esp_aad_fill (op, esp0, sa0);
263 * we don't need to refer to the ESP header anymore so we
264 * can overwrite it with the salt and use the IV where it is
265 * to form the nonce = (Salt + IV)
267 op->iv -= sizeof (sa0->salt);
268 clib_memcpy_fast (op->iv, &sa0->salt, sizeof (sa0->salt));
270 op->tag = payload + len;
273 op->src = op->dst = payload += cpd.iv_sz;
274 op->len = len - cpd.iv_sz;
275 op->user_data = b - bufs;
286 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
287 current_sa_index, current_sa_pkts,
290 if ((n = vec_len (ptd->integ_ops)))
292 vnet_crypto_op_t *op = ptd->integ_ops;
293 n -= vnet_crypto_process_ops (vm, op, n);
296 ASSERT (op - ptd->integ_ops < vec_len (ptd->integ_ops));
297 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
299 u32 err, bi = op->user_data;
300 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
301 err = ESP_DECRYPT_ERROR_INTEG_ERROR;
303 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
304 bufs[bi]->error = node->errors[err];
305 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
311 if ((n = vec_len (ptd->crypto_ops)))
313 vnet_crypto_op_t *op = ptd->crypto_ops;
314 n -= vnet_crypto_process_ops (vm, op, n);
317 ASSERT (op - ptd->crypto_ops < vec_len (ptd->crypto_ops));
318 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
324 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
325 err = ESP_DECRYPT_ERROR_DECRYPTION_FAILED;
327 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
329 bufs[bi]->error = node->errors[err];
330 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
337 /* Post decryption ronud - adjust packet data start and length and next
340 n_left = from_frame->n_vectors;
347 const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL |
348 IPSEC_SA_FLAG_IS_TUNNEL_V6;
352 void *data = b[1]->data + pd[1].current_data;
354 /* buffer metadata */
355 vlib_prefetch_buffer_header (b[1], LOAD);
358 CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
359 CLIB_CACHE_LINE_BYTES, LOAD);
362 CLIB_PREFETCH (data - CLIB_CACHE_LINE_BYTES,
363 CLIB_CACHE_LINE_BYTES * 2, LOAD);
366 if (next[0] < ESP_DECRYPT_N_NEXT)
369 sa0 = vec_elt_at_index (im->sad, pd->sa_index);
370 u8 *payload = b[0]->data + pd->current_data;
372 ipsec_sa_anti_replay_advance (sa0, ((esp_header_t *) payload)->seq);
374 esp_footer_t *f = (esp_footer_t *) (b[0]->data + pd->current_data +
375 pd->current_length - sizeof (*f) -
377 u16 adv = pd->iv_sz + esp_sz;
378 u16 tail = sizeof (esp_footer_t) + f->pad_length + pd->icv_sz;
380 if ((pd->flags & tun_flags) == 0) /* transport mode */
382 u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
383 sizeof (udp_header_t) : 0;
384 u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
385 u8 *old_ip = b[0]->data + pd->current_data - ip_hdr_sz - udp_sz;
386 u8 *ip = old_ip + adv + udp_sz;
388 if (is_ip6 && ip_hdr_sz > 64)
389 memmove (ip, old_ip, ip_hdr_sz);
391 clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
393 b[0]->current_data = pd->current_data + adv - ip_hdr_sz;
394 b[0]->current_length = pd->current_length + ip_hdr_sz - tail - adv;
398 ip6_header_t *ip6 = (ip6_header_t *) ip;
399 u16 len = clib_net_to_host_u16 (ip6->payload_length);
401 ip6->payload_length = clib_host_to_net_u16 (len);
402 ip6->protocol = f->next_header;
403 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
407 ip4_header_t *ip4 = (ip4_header_t *) ip;
408 ip_csum_t sum = ip4->checksum;
409 u16 len = clib_net_to_host_u16 (ip4->length);
410 len = clib_host_to_net_u16 (len - adv - tail - udp_sz);
411 sum = ip_csum_update (sum, ip4->protocol, f->next_header,
412 ip4_header_t, protocol);
413 sum = ip_csum_update (sum, ip4->length, len,
414 ip4_header_t, length);
415 ip4->checksum = ip_csum_fold (sum);
416 ip4->protocol = f->next_header;
418 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
423 if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP))
425 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
426 b[0]->current_data = pd->current_data + adv;
427 b[0]->current_length = pd->current_length + adv - tail;
429 else if (f->next_header == IP_PROTOCOL_IPV6)
431 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
432 b[0]->current_data = pd->current_data + adv;
433 b[0]->current_length = pd->current_length + adv - tail;
437 next[0] = ESP_DECRYPT_NEXT_DROP;
438 b[0]->error = node->errors[ESP_DECRYPT_ERROR_DECRYPTION_FAILED];
442 if (PREDICT_FALSE (ipsec_sa_is_set_IS_GRE (sa0)))
443 next[0] = ESP_DECRYPT_NEXT_IPSEC_GRE_INPUT;
446 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
448 esp_decrypt_trace_t *tr;
449 u8 *payload = b[0]->data + pd->current_data;
450 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
451 sa0 = pool_elt_at_index (im->sad,
452 vnet_buffer (b[0])->ipsec.sad_index);
453 tr->crypto_alg = sa0->crypto_alg;
454 tr->integ_alg = sa0->integ_alg;
455 tr->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
465 n_left = from_frame->n_vectors;
466 vlib_node_increment_counter (vm, node->node_index,
467 ESP_DECRYPT_ERROR_RX_PKTS, n_left);
469 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
475 VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
476 vlib_node_runtime_t * node,
477 vlib_frame_t * from_frame)
479 return esp_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
483 VLIB_REGISTER_NODE (esp4_decrypt_node) = {
484 .name = "esp4-decrypt",
485 .vector_size = sizeof (u32),
486 .format_trace = format_esp_decrypt_trace,
487 .type = VLIB_NODE_TYPE_INTERNAL,
489 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
490 .error_strings = esp_decrypt_error_strings,
492 .n_next_nodes = ESP_DECRYPT_N_NEXT,
494 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
495 foreach_esp_decrypt_next
501 VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
502 vlib_node_runtime_t * node,
503 vlib_frame_t * from_frame)
505 return esp_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
509 VLIB_REGISTER_NODE (esp6_decrypt_node) = {
510 .name = "esp6-decrypt",
511 .vector_size = sizeof (u32),
512 .format_trace = format_esp_decrypt_trace,
513 .type = VLIB_NODE_TYPE_INTERNAL,
515 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
516 .error_strings = esp_decrypt_error_strings,
518 .n_next_nodes = ESP_DECRYPT_N_NEXT,
520 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
521 foreach_esp_decrypt_next
528 * fd.io coding-style-patch-verification: ON
531 * eval: (c-set-style "gnu")