2 * esp_decrypt.c : IPSec ESP decrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/l2/l2_input.h>
23 #include <vnet/ipsec/ipsec.h>
24 #include <vnet/ipsec/esp.h>
25 #include <vnet/ipsec/ipsec_io.h>
26 #include <vnet/ipsec/ipsec_tun.h>
28 #include <vnet/gre/gre.h>
30 #define foreach_esp_decrypt_next \
31 _(DROP, "error-drop") \
32 _(IP4_INPUT, "ip4-input-no-checksum") \
33 _(IP6_INPUT, "ip6-input") \
34 _(L2_INPUT, "l2-input") \
37 #define _(v, s) ESP_DECRYPT_NEXT_##v,
40 foreach_esp_decrypt_next
46 #define foreach_esp_decrypt_error \
47 _(RX_PKTS, "ESP pkts received") \
48 _(DECRYPTION_FAILED, "ESP decryption failed") \
49 _(INTEG_ERROR, "Integrity check failed") \
50 _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
51 _(REPLAY, "SA replayed packet") \
52 _(RUNT, "undersized packet") \
53 _(NO_BUFFERS, "no buffers (packet dropped)") \
54 _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
55 _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \
56 _(TUN_NO_PROTO, "no tunnel protocol") \
57 _(UNSUP_PAYLOAD, "unsupported payload") \
62 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
63 foreach_esp_decrypt_error
66 } esp_decrypt_error_t;
68 static char *esp_decrypt_error_strings[] = {
69 #define _(sym,string) string,
70 foreach_esp_decrypt_error
79 ipsec_crypto_alg_t crypto_alg;
80 ipsec_integ_alg_t integ_alg;
81 } esp_decrypt_trace_t;
83 /* packet trace format function */
85 format_esp_decrypt_trace (u8 * s, va_list * args)
87 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
88 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
89 esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
93 "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u",
94 format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg,
95 t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi);
108 ipsec_sa_flags_t flags;
115 u32 free_buffer_index;
121 } esp_decrypt_packet_data_t;
123 STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 4 * sizeof (u64));
125 #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
127 static_always_inline void
128 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
129 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
132 vnet_crypto_op_t *op = ops;
133 u32 n_fail, n_ops = vec_len (ops);
138 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
142 ASSERT (op - ops < n_ops);
143 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
145 u32 err, bi = op->user_data;
146 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
149 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
150 b[bi]->error = node->errors[err];
151 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
158 static_always_inline void
159 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
160 vnet_crypto_op_t * ops, vlib_buffer_t * b[],
161 u16 * nexts, vnet_crypto_op_chunk_t * chunks, int e)
164 vnet_crypto_op_t *op = ops;
165 u32 n_fail, n_ops = vec_len (ops);
170 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
174 ASSERT (op - ops < n_ops);
175 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
177 u32 err, bi = op->user_data;
178 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
181 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
182 b[bi]->error = node->errors[err];
183 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
191 esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last,
194 vlib_buffer_t *before_last = b;
196 if (last->current_length > tail)
198 last->current_length -= tail;
201 ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
203 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
206 b = vlib_get_buffer (vm, b->next_buffer);
208 before_last->current_length -= tail - last->current_length;
209 vlib_buffer_free_one (vm, before_last->next_buffer);
210 before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
213 /* ICV is splitted in last two buffers so move it to the last buffer and
214 return pointer to it */
215 static_always_inline u8 *
216 esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first,
217 esp_decrypt_packet_data_t * pd, u16 icv_sz, u16 * dif)
219 vlib_buffer_t *before_last, *bp;
220 u16 last_sz = pd->lb->current_length;
221 u16 first_sz = icv_sz - last_sz;
223 bp = before_last = first;
224 while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
227 bp = vlib_get_buffer (vm, bp->next_buffer);
230 u8 *lb_curr = vlib_buffer_get_current (pd->lb);
231 memmove (lb_curr + first_sz, lb_curr, last_sz);
232 clib_memcpy_fast (lb_curr, vlib_buffer_get_tail (before_last) - first_sz,
234 before_last->current_length -= first_sz;
237 pd->lb = before_last;
239 pd->free_buffer_index = before_last->next_buffer;
240 before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
244 static_always_inline int
245 esp_insert_esn (vlib_main_t * vm, ipsec_sa_t * sa,
246 esp_decrypt_packet_data_t * pd, vnet_crypto_op_t * op,
247 u16 * len, vlib_buffer_t * b, u8 * payload)
249 if (!ipsec_sa_is_set_USE_ESN (sa))
252 /* shift ICV by 4 bytes to insert ESN */
253 u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi);
254 u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa->seq_hi);
258 u16 space_left = vlib_buffer_space_left_at_end (vm, pd->lb);
259 if (space_left >= sz)
261 clib_memcpy_fast (vlib_buffer_get_tail (pd->lb), &seq_hi, sz);
267 len[0] = b->current_length;
271 clib_memcpy_fast (tmp, payload + len[0], ESP_MAX_ICV_SIZE);
272 clib_memcpy_fast (payload + len[0], &seq_hi, sz);
273 clib_memcpy_fast (payload + len[0] + sz, tmp, ESP_MAX_ICV_SIZE);
280 static_always_inline u8 *
281 esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first,
282 esp_decrypt_packet_data_t * pd, u16 icv_sz, ipsec_sa_t * sa,
283 u8 * extra_esn, vnet_crypto_op_t * op)
286 u8 *digest = esp_move_icv (vm, first, pd, icv_sz, &dif);
290 if (ipsec_sa_is_set_USE_ESN (sa))
292 u8 sz = sizeof (sa->seq_hi);
293 u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi);
294 u16 space_left = vlib_buffer_space_left_at_end (vm, pd->lb);
296 if (space_left >= sz)
298 clib_memcpy_fast (vlib_buffer_get_tail (pd->lb), &seq_hi, sz);
303 /* no space for ESN at the tail, use the next buffer
305 ASSERT (pd->icv_removed);
306 vlib_buffer_t *tmp = vlib_get_buffer (vm, pd->free_buffer_index);
307 clib_memcpy_fast (vlib_buffer_get_current (tmp) - sz, &seq_hi, sz);
315 esp_decrypt_inline (vlib_main_t * vm,
316 vlib_node_runtime_t * node, vlib_frame_t * from_frame,
317 int is_ip6, int is_tun)
319 ipsec_main_t *im = &ipsec_main;
320 u32 thread_index = vm->thread_index;
322 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
323 u32 *from = vlib_frame_vector_args (from_frame);
324 u32 n_left = from_frame->n_vectors;
325 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
326 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
327 esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
328 esp_decrypt_packet_data_t cpd = { };
329 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
330 const u8 esp_sz = sizeof (esp_header_t);
332 vnet_crypto_op_t _op, *op = &_op;
333 vnet_crypto_op_chunk_t *ch;
334 vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
335 vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
337 vlib_get_buffers (vm, from, b, n_left);
338 vec_reset_length (ptd->crypto_ops);
339 vec_reset_length (ptd->integ_ops);
340 vec_reset_length (ptd->chained_crypto_ops);
341 vec_reset_length (ptd->chained_integ_ops);
342 vec_reset_length (ptd->chunks);
343 clib_memset_u16 (nexts, -1, n_left);
352 vlib_prefetch_buffer_header (b[2], LOAD);
353 p = vlib_buffer_get_current (b[1]);
354 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
355 p -= CLIB_CACHE_LINE_BYTES;
356 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
359 u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
362 b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
363 next[0] = ESP_DECRYPT_NEXT_DROP;
367 if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
370 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
374 current_sa_bytes = current_sa_pkts = 0;
376 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
377 sa0 = pool_elt_at_index (im->sad, current_sa_index);
378 cpd.icv_sz = sa0->integ_icv_size;
379 cpd.iv_sz = sa0->crypto_iv_size;
380 cpd.flags = sa0->flags;
381 cpd.sa_index = current_sa_index;
384 if (PREDICT_FALSE (~0 == sa0->decrypt_thread_index))
386 /* this is the first packet to use this SA, claim the SA
387 * for this thread. this could happen simultaneously on
389 clib_atomic_cmp_and_swap (&sa0->decrypt_thread_index, ~0,
390 ipsec_sa_assign_thread (thread_index));
393 if (PREDICT_TRUE (thread_index != sa0->decrypt_thread_index))
395 next[0] = ESP_DECRYPT_NEXT_HANDOFF;
399 /* store packet data for next round for easier prefetch */
400 pd->sa_data = cpd.sa_data;
401 pd->current_data = b[0]->current_data;
402 pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
403 payload = b[0]->data + pd->current_data;
404 pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
405 pd->free_buffer_index = 0;
411 /* find last buffer in the chain */
412 while (pd->lb->flags & VLIB_BUFFER_NEXT_PRESENT)
413 pd->lb = vlib_get_buffer (vm, pd->lb->next_buffer);
415 crypto_ops = &ptd->chained_crypto_ops;
416 integ_ops = &ptd->chained_integ_ops;
418 pd->current_length = b[0]->current_length;
420 /* anti-reply check */
421 if (ipsec_sa_anti_replay_check (sa0, pd->seq))
423 b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
424 next[0] = ESP_DECRYPT_NEXT_DROP;
428 if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
430 b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT];
431 next[0] = ESP_DECRYPT_NEXT_DROP;
435 len = pd->current_length - cpd.icv_sz;
436 current_sa_pkts += 1;
437 current_sa_bytes += vlib_buffer_length_in_chain (vm, b[0]);
439 if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
441 vnet_crypto_op_init (op, sa0->integ_op_id);
442 op->key_index = sa0->integ_key_index;
444 op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
445 op->user_data = b - bufs;
446 op->digest = payload + len;
447 op->digest_len = cpd.icv_sz;
452 /* buffer is chained */
453 vlib_buffer_t *cb = b[0];
454 op->len = pd->current_length;
456 /* special case when ICV is splitted and needs to be reassembled
457 * first -> move it to the last buffer. Also take into account
458 * that ESN needs to be added after encrypted data and may or
459 * may not fit in the tail.*/
460 if (pd->lb->current_length < cpd.icv_sz)
464 esp_move_icv_esn (vm, b[0], pd, cpd.icv_sz, sa0,
469 /* esn is in the last buffer, that was unlinked from
471 op->len = b[0]->current_length;
477 /* we now have a single buffer of crypto data, adjust
478 * the length (second buffer contains only ICV) */
479 integ_ops = &ptd->integ_ops;
480 crypto_ops = &ptd->crypto_ops;
486 op->digest = vlib_buffer_get_tail (pd->lb) - cpd.icv_sz;
488 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
489 op->chunk_index = vec_len (ptd->chunks);
490 vec_add2 (ptd->chunks, ch, 1);
491 ch->len = pd->current_length;
493 cb = vlib_get_buffer (vm, cb->next_buffer);
497 vec_add2 (ptd->chunks, ch, 1);
499 ch->src = vlib_buffer_get_current (cb);
503 ch->len = cb->current_length;
505 ch->len = cb->current_length - cpd.icv_sz;
506 if (ipsec_sa_is_set_USE_ESN (sa0))
508 u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
509 u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi);
511 vlib_buffer_t *tmp_b;
512 u16 space_left = vlib_buffer_space_left_at_end
518 /* use pre-data area from the last bufer
519 that was removed from the chain */
522 pd->free_buffer_index);
523 esn = tmp_b->data - sz;
527 /* no space, need to allocate new buffer */
529 vlib_buffer_alloc (vm, &tmp_bi, 1);
530 tmp_b = vlib_get_buffer (vm, tmp_bi);
532 pd->free_buffer_index = tmp_bi;
534 clib_memcpy_fast (esn, &seq_hi, sz);
536 vec_add2 (ptd->chunks, ch, 1);
545 clib_memcpy_fast (vlib_buffer_get_tail
546 (pd->lb), &seq_hi, sz);
550 clib_memcpy_fast (tmp, op->digest,
552 clib_memcpy_fast (op->digest, &seq_hi, sz);
553 clib_memcpy_fast (op->digest + sz, tmp,
563 ch->len = cb->current_length;
565 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
568 cb = vlib_get_buffer (vm, cb->next_buffer);
572 esp_insert_esn (vm, sa0, pd, op, &len, b[0], payload);
574 vec_add_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
580 if (sa0->crypto_dec_op_id != VNET_CRYPTO_OP_NONE)
582 vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
583 op->key_index = sa0->crypto_key_index;
586 if (ipsec_sa_is_set_IS_AEAD (sa0))
593 * construct the AAD and the nonce (Salt || IV) in a scratch
594 * space in front of the IP header.
596 scratch = payload - esp_sz;
597 esp0 = (esp_header_t *) (scratch);
599 scratch -= (sizeof (*aad) + pd->hdr_sz);
602 esp_aad_fill (op, esp0, sa0);
605 * we don't need to refer to the ESP header anymore so we
606 * can overwrite it with the salt and use the IV where it is
607 * to form the nonce = (Salt + IV)
609 op->iv -= sizeof (sa0->salt);
610 clib_memcpy_fast (op->iv, &sa0->salt, sizeof (sa0->salt));
612 op->tag = payload + len;
615 op->src = op->dst = payload += cpd.iv_sz;
616 op->len = len - cpd.iv_sz;
617 op->user_data = b - bufs;
621 /* buffer is chained */
622 vlib_buffer_t *cb = b[0];
623 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
624 op->chunk_index = vec_len (ptd->chunks);
625 vec_add2 (ptd->chunks, ch, 1);
626 ch->len = len - cpd.iv_sz + cpd.icv_sz;
627 ch->src = ch->dst = payload;
628 cb = vlib_get_buffer (vm, cb->next_buffer);
633 vec_add2 (ptd->chunks, ch, 1);
635 ch->src = ch->dst = vlib_buffer_get_current (cb);
638 if (ipsec_sa_is_set_IS_AEAD (sa0))
640 if (pd->lb->current_length < cpd.icv_sz)
644 esp_move_icv (vm, b[0], pd, cpd.icv_sz, &dif);
646 /* this chunk does not contain crypto data */
649 /* and fix previous chunk's length as it might have
651 ASSERT (op->n_chunks > 0);
655 ch[-1].len = pd->lb->current_length;
660 vlib_buffer_get_tail (pd->lb) - cpd.icv_sz;
664 ch->len = cb->current_length;
666 ch->len = cb->current_length - cpd.icv_sz;
669 ch->len = cb->current_length;
671 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
674 cb = vlib_get_buffer (vm, cb->next_buffer);
678 vec_add_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
689 if (PREDICT_TRUE (~0 != current_sa_index))
690 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
691 current_sa_index, current_sa_pkts,
694 esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts,
695 ESP_DECRYPT_ERROR_INTEG_ERROR);
696 esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
697 ptd->chunks, ESP_DECRYPT_ERROR_INTEG_ERROR);
699 esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts,
700 ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
701 esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
702 ptd->chunks, ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
704 /* Post decryption ronud - adjust packet data start and length and next
707 n_left = from_frame->n_vectors;
714 const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL |
715 IPSEC_SA_FLAG_IS_TUNNEL_V6;
719 void *data = b[1]->data + pd[1].current_data;
721 /* buffer metadata */
722 vlib_prefetch_buffer_header (b[1], LOAD);
725 CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
726 CLIB_CACHE_LINE_BYTES, LOAD);
729 CLIB_PREFETCH (data - CLIB_CACHE_LINE_BYTES,
730 CLIB_CACHE_LINE_BYTES * 2, LOAD);
733 if (next[0] < ESP_DECRYPT_N_NEXT)
736 sa0 = vec_elt_at_index (im->sad, pd->sa_index);
739 * redo the anti-reply check
740 * in this frame say we have sequence numbers, s, s+1, s+1, s+1
741 * and s and s+1 are in the window. When we did the anti-replay
742 * check above we did so against the state of the window (W),
743 * after packet s-1. So each of the packets in the sequence will be
745 * This time s will be cheked against Ws-1, s+1 chceked against Ws
746 * (i.e. the window state is updated/advnaced)
747 * so this time the successive s+! packet will be dropped.
748 * This is a consequence of batching the decrypts. If the
749 * check-dcrypt-advance process was done for each packet it would
750 * be fine. But we batch the decrypts because it's much more efficient
751 * to do so in SW and if we offload to HW and the process is async.
753 * You're probably thinking, but this means an attacker can send the
754 * above sequence and cause VPP to perform decrpyts that will fail,
755 * and that's true. But if the attacker can determine s (a valid
756 * sequence number in the window) which is non-trivial, it can generate
757 * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
758 * implementation, sequential or batching, from decrypting these.
760 if (ipsec_sa_anti_replay_check (sa0, pd->seq))
762 b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
763 next[0] = ESP_DECRYPT_NEXT_DROP;
767 ipsec_sa_anti_replay_advance (sa0, pd->seq);
769 u8 pad_length = 0, next_header = 0;
770 u16 icv_sz = pd->icv_removed ? 0 : pd->icv_sz;
772 if (pd->free_buffer_index)
773 vlib_buffer_free_one (vm, pd->free_buffer_index);
775 if (pd->lb->current_length < sizeof (esp_footer_t) + icv_sz)
777 /* esp footer is either splitted in two buffers or in the before
780 vlib_buffer_t *before_last = b[0], *bp = b[0];
781 while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
784 bp = vlib_get_buffer (vm, bp->next_buffer);
786 u8 *bt = vlib_buffer_get_tail (before_last);
788 if (pd->lb->current_length == icv_sz)
790 esp_footer_t *f = (esp_footer_t *) (bt - sizeof (*f));
791 pad_length = f->pad_length;
792 next_header = f->next_header;
796 pad_length = (bt - 1)[0];
797 next_header = ((u8 *) vlib_buffer_get_current (pd->lb))[0];
803 (esp_footer_t *) (pd->lb->data + pd->lb->current_data +
804 pd->lb->current_length - sizeof (esp_footer_t) -
806 pad_length = f->pad_length;
807 next_header = f->next_header;
810 u16 adv = pd->iv_sz + esp_sz;
811 u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz;
812 u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz;
813 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
815 if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
817 u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
818 sizeof (udp_header_t) : 0;
819 u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
820 u8 *old_ip = b[0]->data + pd->current_data - ip_hdr_sz - udp_sz;
821 u8 *ip = old_ip + adv + udp_sz;
823 if (is_ip6 && ip_hdr_sz > 64)
824 memmove (ip, old_ip, ip_hdr_sz);
826 clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
828 b[0]->current_data = pd->current_data + adv - ip_hdr_sz;
829 b[0]->current_length += ip_hdr_sz - adv;
830 esp_remove_tail (vm, b[0], pd->lb, tail);
834 ip6_header_t *ip6 = (ip6_header_t *) ip;
835 u16 len = clib_net_to_host_u16 (ip6->payload_length);
836 len -= adv + tail_orig;
837 ip6->payload_length = clib_host_to_net_u16 (len);
838 ip6->protocol = next_header;
839 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
843 ip4_header_t *ip4 = (ip4_header_t *) ip;
844 ip_csum_t sum = ip4->checksum;
845 u16 len = clib_net_to_host_u16 (ip4->length);
846 len = clib_host_to_net_u16 (len - adv - tail_orig - udp_sz);
847 sum = ip_csum_update (sum, ip4->protocol, next_header,
848 ip4_header_t, protocol);
849 sum = ip_csum_update (sum, ip4->length, len,
850 ip4_header_t, length);
851 ip4->checksum = ip_csum_fold (sum);
852 ip4->protocol = next_header;
854 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
859 if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
861 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
862 b[0]->current_data = pd->current_data + adv;
863 b[0]->current_length = pd->current_length - adv;
864 esp_remove_tail (vm, b[0], pd->lb, tail);
866 else if (next_header == IP_PROTOCOL_IPV6)
868 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
869 b[0]->current_data = pd->current_data + adv;
870 b[0]->current_length = pd->current_length - adv;
871 esp_remove_tail (vm, b[0], pd->lb, tail);
875 if (is_tun && next_header == IP_PROTOCOL_GRE)
879 b[0]->current_data = pd->current_data + adv;
880 b[0]->current_length = pd->current_length - adv - tail;
882 gre = vlib_buffer_get_current (b[0]);
884 vlib_buffer_advance (b[0], sizeof (*gre));
886 switch (clib_net_to_host_u16 (gre->protocol))
888 case GRE_PROTOCOL_teb:
889 vnet_update_l2_len (b[0]);
890 next[0] = ESP_DECRYPT_NEXT_L2_INPUT;
892 case GRE_PROTOCOL_ip4:
893 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
895 case GRE_PROTOCOL_ip6:
896 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
900 node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
901 next[0] = ESP_DECRYPT_NEXT_DROP;
907 next[0] = ESP_DECRYPT_NEXT_DROP;
908 b[0]->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
914 if (ipsec_sa_is_set_IS_PROTECT (sa0))
917 * There are two encap possibilities
918 * 1) the tunnel and ths SA are prodiving encap, i.e. it's
919 * MAC | SA-IP | TUN-IP | ESP | PAYLOAD
920 * implying the SA is in tunnel mode (on a tunnel interface)
921 * 2) only the tunnel provides encap
922 * MAC | TUN-IP | ESP | PAYLOAD
923 * implying the SA is in transport mode.
925 * For 2) we need only strip the tunnel encap and we're good.
926 * since the tunnel and crypto ecnap (int the tun=protect
927 * object) are the same and we verified above that these match
928 * for 1) we need to strip the SA-IP outer headers, to
929 * reveal the tunnel IP and then check that this matches
930 * the configured tunnel.
932 const ipsec_tun_protect_t *itp;
934 itp = ipsec_tun_protect_get
935 (vnet_buffer (b[0])->ipsec.protect_index);
937 if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
939 const ip4_header_t *ip4;
941 ip4 = vlib_buffer_get_current (b[0]);
943 if (!ip46_address_is_equal_v4 (&itp->itp_tun.src,
944 &ip4->dst_address) ||
945 !ip46_address_is_equal_v4 (&itp->itp_tun.dst,
948 next[0] = ESP_DECRYPT_NEXT_DROP;
950 node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
953 else if (next_header == IP_PROTOCOL_IPV6)
955 const ip6_header_t *ip6;
957 ip6 = vlib_buffer_get_current (b[0]);
959 if (!ip46_address_is_equal_v6 (&itp->itp_tun.src,
960 &ip6->dst_address) ||
961 !ip46_address_is_equal_v6 (&itp->itp_tun.dst,
964 next[0] = ESP_DECRYPT_NEXT_DROP;
966 node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
974 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
976 esp_decrypt_trace_t *tr;
977 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
978 sa0 = pool_elt_at_index (im->sad,
979 vnet_buffer (b[0])->ipsec.sad_index);
980 tr->crypto_alg = sa0->crypto_alg;
981 tr->integ_alg = sa0->integ_alg;
983 tr->sa_seq = sa0->last_seq;
984 tr->sa_seq_hi = sa0->seq_hi;
994 n_left = from_frame->n_vectors;
995 vlib_node_increment_counter (vm, node->node_index,
996 ESP_DECRYPT_ERROR_RX_PKTS, n_left);
998 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
1003 VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
1004 vlib_node_runtime_t * node,
1005 vlib_frame_t * from_frame)
1007 return esp_decrypt_inline (vm, node, from_frame, 0, 0);
1010 VLIB_NODE_FN (esp4_decrypt_tun_node) (vlib_main_t * vm,
1011 vlib_node_runtime_t * node,
1012 vlib_frame_t * from_frame)
1014 return esp_decrypt_inline (vm, node, from_frame, 0, 1);
1017 VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
1018 vlib_node_runtime_t * node,
1019 vlib_frame_t * from_frame)
1021 return esp_decrypt_inline (vm, node, from_frame, 1, 0);
1024 VLIB_NODE_FN (esp6_decrypt_tun_node) (vlib_main_t * vm,
1025 vlib_node_runtime_t * node,
1026 vlib_frame_t * from_frame)
1028 return esp_decrypt_inline (vm, node, from_frame, 1, 1);
1032 VLIB_REGISTER_NODE (esp4_decrypt_node) = {
1033 .name = "esp4-decrypt",
1034 .vector_size = sizeof (u32),
1035 .format_trace = format_esp_decrypt_trace,
1036 .type = VLIB_NODE_TYPE_INTERNAL,
1038 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1039 .error_strings = esp_decrypt_error_strings,
1041 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1043 [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1044 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1045 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1046 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1047 [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff",
1051 VLIB_REGISTER_NODE (esp6_decrypt_node) = {
1052 .name = "esp6-decrypt",
1053 .vector_size = sizeof (u32),
1054 .format_trace = format_esp_decrypt_trace,
1055 .type = VLIB_NODE_TYPE_INTERNAL,
1057 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1058 .error_strings = esp_decrypt_error_strings,
1060 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1062 [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1063 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1064 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1065 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1066 [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-handoff",
1070 VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = {
1071 .name = "esp4-decrypt-tun",
1072 .vector_size = sizeof (u32),
1073 .format_trace = format_esp_decrypt_trace,
1074 .type = VLIB_NODE_TYPE_INTERNAL,
1075 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1076 .error_strings = esp_decrypt_error_strings,
1077 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1079 [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1080 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1081 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1082 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1083 [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-tun-handoff",
1087 VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
1088 .name = "esp6-decrypt-tun",
1089 .vector_size = sizeof (u32),
1090 .format_trace = format_esp_decrypt_trace,
1091 .type = VLIB_NODE_TYPE_INTERNAL,
1092 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1093 .error_strings = esp_decrypt_error_strings,
1094 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1096 [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1097 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1098 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1099 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1100 [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-tun-handoff",
1106 * fd.io coding-style-patch-verification: ON
1109 * eval: (c-set-style "gnu")