2 * esp_decrypt.c : IPSec ESP decrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ipsec_io.h>
25 #include <vnet/ipsec/ipsec_tun.h>
27 #define foreach_esp_decrypt_next \
28 _(DROP, "error-drop") \
29 _(IP4_INPUT, "ip4-input-no-checksum") \
30 _(IP6_INPUT, "ip6-input")
32 #define _(v, s) ESP_DECRYPT_NEXT_##v,
35 foreach_esp_decrypt_next
41 #define foreach_esp_decrypt_error \
42 _(RX_PKTS, "ESP pkts received") \
43 _(DECRYPTION_FAILED, "ESP decryption failed") \
44 _(INTEG_ERROR, "Integrity check failed") \
45 _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
46 _(REPLAY, "SA replayed packet") \
47 _(RUNT, "undersized packet") \
48 _(CHAINED_BUFFER, "chained buffers (packet dropped)") \
49 _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
50 _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)")
55 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
56 foreach_esp_decrypt_error
59 } esp_decrypt_error_t;
61 static char *esp_decrypt_error_strings[] = {
62 #define _(sym,string) string,
63 foreach_esp_decrypt_error
72 ipsec_crypto_alg_t crypto_alg;
73 ipsec_integ_alg_t integ_alg;
74 } esp_decrypt_trace_t;
76 /* packet trace format function */
78 format_esp_decrypt_trace (u8 * s, va_list * args)
80 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
81 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
82 esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
86 "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u",
87 format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg,
88 t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi);
100 ipsec_sa_flags_t flags;
110 } esp_decrypt_packet_data_t;
112 STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 3 * sizeof (u64));
114 #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
117 esp_decrypt_inline (vlib_main_t * vm,
118 vlib_node_runtime_t * node, vlib_frame_t * from_frame,
119 int is_ip6, int is_tun)
121 ipsec_main_t *im = &ipsec_main;
122 u32 thread_index = vm->thread_index;
123 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
125 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
126 u32 *from = vlib_frame_vector_args (from_frame);
127 u32 n, n_left = from_frame->n_vectors;
128 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
129 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
130 esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
131 esp_decrypt_packet_data_t cpd = { };
132 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
133 const u8 esp_sz = sizeof (esp_header_t);
136 vlib_get_buffers (vm, from, b, n_left);
137 vec_reset_length (ptd->crypto_ops);
138 vec_reset_length (ptd->integ_ops);
139 clib_memset_u16 (nexts, -1, n_left);
148 vlib_prefetch_buffer_header (b[2], LOAD);
149 p = vlib_buffer_get_current (b[1]);
150 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
151 p -= CLIB_CACHE_LINE_BYTES;
152 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
155 if (vlib_buffer_chain_linearize (vm, b[0]) != 1)
157 b[0]->error = node->errors[ESP_DECRYPT_ERROR_CHAINED_BUFFER];
158 next[0] = ESP_DECRYPT_NEXT_DROP;
162 if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
165 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
169 current_sa_bytes = current_sa_pkts = 0;
171 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
172 sa0 = pool_elt_at_index (im->sad, current_sa_index);
173 cpd.icv_sz = sa0->integ_icv_size;
174 cpd.iv_sz = sa0->crypto_iv_size;
175 cpd.flags = sa0->flags;
176 cpd.sa_index = current_sa_index;
179 /* store packet data for next round for easier prefetch */
180 pd->sa_data = cpd.sa_data;
181 pd->current_data = b[0]->current_data;
182 pd->current_length = b[0]->current_length;
183 pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
184 payload = b[0]->data + pd->current_data;
185 pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
187 /* we need 4 extra bytes for HMAC calculation when ESN are used */
188 if (ipsec_sa_is_set_USE_ESN (sa0) && pd->icv_sz &&
189 (pd->current_data + pd->current_length + 4 > buffer_data_size))
191 b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_TAIL_SPACE];
192 next[0] = ESP_DECRYPT_NEXT_DROP;
196 /* anti-reply check */
197 if (ipsec_sa_anti_replay_check (sa0, pd->seq))
199 b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
200 next[0] = ESP_DECRYPT_NEXT_DROP;
204 if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
206 b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT];
207 next[0] = ESP_DECRYPT_NEXT_DROP;
211 len = pd->current_length - cpd.icv_sz;
212 current_sa_pkts += 1;
213 current_sa_bytes += pd->current_length;
215 if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
217 vnet_crypto_op_t *op;
218 vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
220 vnet_crypto_op_init (op, sa0->integ_op_id);
221 op->key_index = sa0->integ_key_index;
223 op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
224 op->user_data = b - bufs;
225 op->digest = payload + len;
226 op->digest_len = cpd.icv_sz;
228 if (ipsec_sa_is_set_USE_ESN (sa0))
230 /* shift ICV by 4 bytes to insert ESN */
231 u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
232 u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi);
233 clib_memcpy_fast (tmp, payload + len, ESP_MAX_ICV_SIZE);
234 clib_memcpy_fast (payload + len, &seq_hi, sz);
235 clib_memcpy_fast (payload + len + sz, tmp, ESP_MAX_ICV_SIZE);
244 if (sa0->crypto_enc_op_id != VNET_CRYPTO_OP_NONE)
246 vnet_crypto_op_t *op;
247 vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES);
248 vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
249 op->key_index = sa0->crypto_key_index;
252 if (ipsec_sa_is_set_IS_AEAD (sa0))
259 * construct the AAD and the nonce (Salt || IV) in a scratch
260 * space in front of the IP header.
262 scratch = payload - esp_sz;
263 esp0 = (esp_header_t *) (scratch);
265 scratch -= (sizeof (*aad) + pd->hdr_sz);
268 esp_aad_fill (op, esp0, sa0);
271 * we don't need to refer to the ESP header anymore so we
272 * can overwrite it with the salt and use the IV where it is
273 * to form the nonce = (Salt + IV)
275 op->iv -= sizeof (sa0->salt);
276 clib_memcpy_fast (op->iv, &sa0->salt, sizeof (sa0->salt));
278 op->tag = payload + len;
281 op->src = op->dst = payload += cpd.iv_sz;
282 op->len = len - cpd.iv_sz;
283 op->user_data = b - bufs;
294 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
295 current_sa_index, current_sa_pkts,
298 if ((n = vec_len (ptd->integ_ops)))
300 vnet_crypto_op_t *op = ptd->integ_ops;
301 n -= vnet_crypto_process_ops (vm, op, n);
304 ASSERT (op - ptd->integ_ops < vec_len (ptd->integ_ops));
305 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
307 u32 err, bi = op->user_data;
308 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
309 err = ESP_DECRYPT_ERROR_INTEG_ERROR;
311 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
312 bufs[bi]->error = node->errors[err];
313 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
319 if ((n = vec_len (ptd->crypto_ops)))
321 vnet_crypto_op_t *op = ptd->crypto_ops;
322 n -= vnet_crypto_process_ops (vm, op, n);
325 ASSERT (op - ptd->crypto_ops < vec_len (ptd->crypto_ops));
326 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
332 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
333 err = ESP_DECRYPT_ERROR_DECRYPTION_FAILED;
335 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
337 bufs[bi]->error = node->errors[err];
338 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
345 /* Post decryption ronud - adjust packet data start and length and next
348 n_left = from_frame->n_vectors;
355 const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL |
356 IPSEC_SA_FLAG_IS_TUNNEL_V6;
360 void *data = b[1]->data + pd[1].current_data;
362 /* buffer metadata */
363 vlib_prefetch_buffer_header (b[1], LOAD);
366 CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
367 CLIB_CACHE_LINE_BYTES, LOAD);
370 CLIB_PREFETCH (data - CLIB_CACHE_LINE_BYTES,
371 CLIB_CACHE_LINE_BYTES * 2, LOAD);
374 if (next[0] < ESP_DECRYPT_N_NEXT)
377 sa0 = vec_elt_at_index (im->sad, pd->sa_index);
380 * redo the anti-reply check
381 * in this frame say we have sequence numbers, s, s+1, s+1, s+1
382 * and s and s+1 are in the window. When we did the anti-replay
383 * check above we did so against the state of the window (W),
384 * after packet s-1. So each of the packets in the sequence will be
386 * This time s will be cheked against Ws-1, s+1 chceked against Ws
387 * (i.e. the window state is updated/advnaced)
388 * so this time the successive s+! packet will be dropped.
389 * This is a consequence of batching the decrypts. If the
390 * check-dcrypt-advance process was done for each packet it would
391 * be fine. But we batch the decrypts because it's much more efficient
392 * to do so in SW and if we offload to HW and the process is async.
394 * You're probably thinking, but this means an attacker can send the
395 * above sequence and cause VPP to perform decrpyts that will fail,
396 * and that's true. But if the attacker can determine s (a valid
397 * sequence number in the window) which is non-trivial, it can generate
398 * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
399 * implementation, sequential or batching, from decrypting these.
401 if (ipsec_sa_anti_replay_check (sa0, pd->seq))
403 b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
404 next[0] = ESP_DECRYPT_NEXT_DROP;
408 ipsec_sa_anti_replay_advance (sa0, pd->seq);
410 esp_footer_t *f = (esp_footer_t *) (b[0]->data + pd->current_data +
411 pd->current_length - sizeof (*f) -
413 u16 adv = pd->iv_sz + esp_sz;
414 u16 tail = sizeof (esp_footer_t) + f->pad_length + pd->icv_sz;
416 if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
418 u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
419 sizeof (udp_header_t) : 0;
420 u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
421 u8 *old_ip = b[0]->data + pd->current_data - ip_hdr_sz - udp_sz;
422 u8 *ip = old_ip + adv + udp_sz;
424 if (is_ip6 && ip_hdr_sz > 64)
425 memmove (ip, old_ip, ip_hdr_sz);
427 clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
429 b[0]->current_data = pd->current_data + adv - ip_hdr_sz;
430 b[0]->current_length = pd->current_length + ip_hdr_sz - tail - adv;
434 ip6_header_t *ip6 = (ip6_header_t *) ip;
435 u16 len = clib_net_to_host_u16 (ip6->payload_length);
437 ip6->payload_length = clib_host_to_net_u16 (len);
438 ip6->protocol = f->next_header;
439 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
443 ip4_header_t *ip4 = (ip4_header_t *) ip;
444 ip_csum_t sum = ip4->checksum;
445 u16 len = clib_net_to_host_u16 (ip4->length);
446 len = clib_host_to_net_u16 (len - adv - tail - udp_sz);
447 sum = ip_csum_update (sum, ip4->protocol, f->next_header,
448 ip4_header_t, protocol);
449 sum = ip_csum_update (sum, ip4->length, len,
450 ip4_header_t, length);
451 ip4->checksum = ip_csum_fold (sum);
452 ip4->protocol = f->next_header;
454 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
459 if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP))
461 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
462 b[0]->current_data = pd->current_data + adv;
463 b[0]->current_length = pd->current_length - adv - tail;
465 else if (f->next_header == IP_PROTOCOL_IPV6)
467 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
468 b[0]->current_data = pd->current_data + adv;
469 b[0]->current_length = pd->current_length - adv - tail;
473 next[0] = ESP_DECRYPT_NEXT_DROP;
474 b[0]->error = node->errors[ESP_DECRYPT_ERROR_DECRYPTION_FAILED];
479 if (ipsec_sa_is_set_IS_PROTECT (sa0))
482 * Check that the reveal IP header matches that
483 * of the tunnel we are protecting
485 const ipsec_tun_protect_t *itp;
488 ipsec_tun_protect_get (vnet_buffer (b[0])->
489 ipsec.protect_index);
490 if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP))
492 const ip4_header_t *ip4;
494 ip4 = vlib_buffer_get_current (b[0]);
496 if (!ip46_address_is_equal_v4 (&itp->itp_tun.src,
497 &ip4->dst_address) ||
498 !ip46_address_is_equal_v4 (&itp->itp_tun.dst,
500 next[0] = ESP_DECRYPT_NEXT_DROP;
503 else if (f->next_header == IP_PROTOCOL_IPV6)
505 const ip6_header_t *ip6;
507 ip6 = vlib_buffer_get_current (b[0]);
509 if (!ip46_address_is_equal_v6 (&itp->itp_tun.src,
510 &ip6->dst_address) ||
511 !ip46_address_is_equal_v6 (&itp->itp_tun.dst,
513 next[0] = ESP_DECRYPT_NEXT_DROP;
520 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
522 esp_decrypt_trace_t *tr;
523 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
524 sa0 = pool_elt_at_index (im->sad,
525 vnet_buffer (b[0])->ipsec.sad_index);
526 tr->crypto_alg = sa0->crypto_alg;
527 tr->integ_alg = sa0->integ_alg;
529 tr->sa_seq = sa0->last_seq;
530 tr->sa_seq_hi = sa0->seq_hi;
540 n_left = from_frame->n_vectors;
541 vlib_node_increment_counter (vm, node->node_index,
542 ESP_DECRYPT_ERROR_RX_PKTS, n_left);
544 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
550 VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
551 vlib_node_runtime_t * node,
552 vlib_frame_t * from_frame)
554 return esp_decrypt_inline (vm, node, from_frame, 0, 0);
557 VLIB_NODE_FN (esp4_decrypt_tun_node) (vlib_main_t * vm,
558 vlib_node_runtime_t * node,
559 vlib_frame_t * from_frame)
561 return esp_decrypt_inline (vm, node, from_frame, 0, 1);
564 VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
565 vlib_node_runtime_t * node,
566 vlib_frame_t * from_frame)
568 return esp_decrypt_inline (vm, node, from_frame, 1, 0);
571 VLIB_NODE_FN (esp6_decrypt_tun_node) (vlib_main_t * vm,
572 vlib_node_runtime_t * node,
573 vlib_frame_t * from_frame)
575 return esp_decrypt_inline (vm, node, from_frame, 1, 1);
579 VLIB_REGISTER_NODE (esp4_decrypt_node) = {
580 .name = "esp4-decrypt",
581 .vector_size = sizeof (u32),
582 .format_trace = format_esp_decrypt_trace,
583 .type = VLIB_NODE_TYPE_INTERNAL,
585 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
586 .error_strings = esp_decrypt_error_strings,
588 .n_next_nodes = ESP_DECRYPT_N_NEXT,
590 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
591 foreach_esp_decrypt_next
596 VLIB_REGISTER_NODE (esp6_decrypt_node) = {
597 .name = "esp6-decrypt",
598 .vector_size = sizeof (u32),
599 .format_trace = format_esp_decrypt_trace,
600 .type = VLIB_NODE_TYPE_INTERNAL,
602 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
603 .error_strings = esp_decrypt_error_strings,
605 .n_next_nodes = ESP_DECRYPT_N_NEXT,
607 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
608 foreach_esp_decrypt_next
613 VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = {
614 .name = "esp4-decrypt-tun",
615 .vector_size = sizeof (u32),
616 .format_trace = format_esp_decrypt_trace,
617 .type = VLIB_NODE_TYPE_INTERNAL,
619 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
620 .error_strings = esp_decrypt_error_strings,
622 .n_next_nodes = ESP_DECRYPT_N_NEXT,
624 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
625 foreach_esp_decrypt_next
630 VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
631 .name = "esp6-decrypt-tun",
632 .vector_size = sizeof (u32),
633 .format_trace = format_esp_decrypt_trace,
634 .type = VLIB_NODE_TYPE_INTERNAL,
636 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
637 .error_strings = esp_decrypt_error_strings,
639 .n_next_nodes = ESP_DECRYPT_N_NEXT,
641 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
642 foreach_esp_decrypt_next
649 * fd.io coding-style-patch-verification: ON
652 * eval: (c-set-style "gnu")