2 * esp_decrypt.c : IPSec ESP decrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/l2/l2_input.h>
23 #include <vnet/ipsec/ipsec.h>
24 #include <vnet/ipsec/esp.h>
25 #include <vnet/ipsec/ipsec_io.h>
26 #include <vnet/ipsec/ipsec_tun.h>
28 #include <vnet/gre/packet.h>
30 #define foreach_esp_decrypt_next \
31 _ (DROP, "error-drop") \
32 _ (IP4_INPUT, "ip4-input-no-checksum") \
33 _ (IP6_INPUT, "ip6-input") \
34 _ (L2_INPUT, "l2-input") \
35 _ (MPLS_INPUT, "mpls-input") \
36 _ (HANDOFF, "handoff")
38 #define _(v, s) ESP_DECRYPT_NEXT_##v,
41 foreach_esp_decrypt_next
46 #define foreach_esp_decrypt_post_next \
47 _ (DROP, "error-drop") \
48 _ (IP4_INPUT, "ip4-input-no-checksum") \
49 _ (IP6_INPUT, "ip6-input") \
50 _ (MPLS_INPUT, "mpls-input") \
51 _ (L2_INPUT, "l2-input")
53 #define _(v, s) ESP_DECRYPT_POST_NEXT_##v,
56 foreach_esp_decrypt_post_next
58 ESP_DECRYPT_POST_N_NEXT,
59 } esp_decrypt_post_next_t;
61 #define foreach_esp_decrypt_error \
62 _ (RX_PKTS, "ESP pkts received") \
63 _ (RX_POST_PKTS, "ESP-POST pkts received") \
64 _ (HANDOFF, "hand-off") \
65 _ (DECRYPTION_FAILED, "ESP decryption failed") \
66 _ (INTEG_ERROR, "Integrity check failed") \
67 _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
68 _ (REPLAY, "SA replayed packet") \
69 _ (RUNT, "undersized packet") \
70 _ (NO_BUFFERS, "no buffers (packet dropped)") \
71 _ (OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
72 _ (NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \
73 _ (TUN_NO_PROTO, "no tunnel protocol") \
74 _ (UNSUP_PAYLOAD, "unsupported payload")
78 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
79 foreach_esp_decrypt_error
82 } esp_decrypt_error_t;
84 static char *esp_decrypt_error_strings[] = {
85 #define _(sym,string) string,
86 foreach_esp_decrypt_error
95 ipsec_crypto_alg_t crypto_alg;
96 ipsec_integ_alg_t integ_alg;
97 } esp_decrypt_trace_t;
99 /* packet trace format function */
101 format_esp_decrypt_trace (u8 * s, va_list * args)
103 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
104 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
105 esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
109 "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u",
110 format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg,
111 t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi);
115 #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
117 static_always_inline void
118 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
119 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
122 vnet_crypto_op_t *op = ops;
123 u32 n_fail, n_ops = vec_len (ops);
128 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
132 ASSERT (op - ops < n_ops);
133 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
135 u32 err, bi = op->user_data;
136 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
139 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
140 b[bi]->error = node->errors[err];
141 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
148 static_always_inline void
149 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
150 vnet_crypto_op_t * ops, vlib_buffer_t * b[],
151 u16 * nexts, vnet_crypto_op_chunk_t * chunks, int e)
154 vnet_crypto_op_t *op = ops;
155 u32 n_fail, n_ops = vec_len (ops);
157 if (PREDICT_TRUE (n_ops == 0))
160 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
164 ASSERT (op - ops < n_ops);
165 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
167 u32 err, bi = op->user_data;
168 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
171 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
172 b[bi]->error = node->errors[err];
173 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
181 esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last,
184 vlib_buffer_t *before_last = b;
186 if (last->current_length > tail)
188 last->current_length -= tail;
191 ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
193 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
196 b = vlib_get_buffer (vm, b->next_buffer);
198 before_last->current_length -= tail - last->current_length;
199 vlib_buffer_free_one (vm, before_last->next_buffer);
200 before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
203 /* ICV is splitted in last two buffers so move it to the last buffer and
204 return pointer to it */
205 static_always_inline u8 *
206 esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first,
207 esp_decrypt_packet_data_t * pd,
208 esp_decrypt_packet_data2_t * pd2, u16 icv_sz, u16 * dif)
210 vlib_buffer_t *before_last, *bp;
211 u16 last_sz = pd2->lb->current_length;
212 u16 first_sz = icv_sz - last_sz;
214 bp = before_last = first;
215 while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
218 bp = vlib_get_buffer (vm, bp->next_buffer);
221 u8 *lb_curr = vlib_buffer_get_current (pd2->lb);
222 memmove (lb_curr + first_sz, lb_curr, last_sz);
223 clib_memcpy_fast (lb_curr, vlib_buffer_get_tail (before_last) - first_sz,
225 before_last->current_length -= first_sz;
226 if (before_last == first)
227 pd->current_length -= first_sz;
228 clib_memset (vlib_buffer_get_tail (before_last), 0, first_sz);
231 pd2->lb = before_last;
232 pd2->icv_removed = 1;
233 pd2->free_buffer_index = before_last->next_buffer;
234 before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
238 static_always_inline i16
239 esp_insert_esn (vlib_main_t * vm, ipsec_sa_t * sa,
240 esp_decrypt_packet_data2_t * pd2, u32 * data_len,
241 u8 ** digest, u16 * len, vlib_buffer_t * b, u8 * payload)
243 if (!ipsec_sa_is_set_USE_ESN (sa))
246 /* shift ICV by 4 bytes to insert ESN */
247 u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi);
248 u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa->seq_hi);
250 if (pd2->icv_removed)
252 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
253 if (space_left >= sz)
255 clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi, sz);
261 len[0] = b->current_length;
265 clib_memcpy_fast (tmp, payload + len[0], ESP_MAX_ICV_SIZE);
266 clib_memcpy_fast (payload + len[0], &seq_hi, sz);
267 clib_memcpy_fast (payload + len[0] + sz, tmp, ESP_MAX_ICV_SIZE);
274 static_always_inline u8 *
275 esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first,
276 esp_decrypt_packet_data_t * pd,
277 esp_decrypt_packet_data2_t * pd2, u16 icv_sz,
278 ipsec_sa_t * sa, u8 * extra_esn, u32 * len)
281 u8 *digest = esp_move_icv (vm, first, pd, pd2, icv_sz, &dif);
285 if (ipsec_sa_is_set_USE_ESN (sa))
287 u8 sz = sizeof (sa->seq_hi);
288 u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi);
289 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
291 if (space_left >= sz)
293 clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi, sz);
298 /* no space for ESN at the tail, use the next buffer
300 ASSERT (pd2->icv_removed);
301 vlib_buffer_t *tmp = vlib_get_buffer (vm, pd2->free_buffer_index);
302 clib_memcpy_fast (vlib_buffer_get_current (tmp) - sz, &seq_hi, sz);
309 static_always_inline int
310 esp_decrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
311 esp_decrypt_packet_data2_t * pd2,
312 ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz,
313 u8 * start_src, u32 start_len,
314 u8 ** digest, u16 * n_ch, u32 * integ_total_len)
316 vnet_crypto_op_chunk_t *ch;
317 vlib_buffer_t *cb = vlib_get_buffer (vm, b->next_buffer);
320 vec_add2 (ptd->chunks, ch, 1);
321 total_len = ch->len = start_len;
326 vec_add2 (ptd->chunks, ch, 1);
328 ch->src = vlib_buffer_get_current (cb);
331 if (pd2->icv_removed)
332 ch->len = cb->current_length;
334 ch->len = cb->current_length - icv_sz;
335 if (ipsec_sa_is_set_USE_ESN (sa0))
337 u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
338 u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi);
340 vlib_buffer_t *tmp_b;
341 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
344 if (pd2->icv_removed)
346 /* use pre-data area from the last bufer
347 that was removed from the chain */
348 tmp_b = vlib_get_buffer (vm, pd2->free_buffer_index);
349 esn = tmp_b->data - sz;
353 /* no space, need to allocate new buffer */
355 if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
357 tmp_b = vlib_get_buffer (vm, tmp_bi);
359 pd2->free_buffer_index = tmp_bi;
361 clib_memcpy_fast (esn, &seq_hi, sz);
363 vec_add2 (ptd->chunks, ch, 1);
370 if (pd2->icv_removed)
372 clib_memcpy_fast (vlib_buffer_get_tail
373 (pd2->lb), &seq_hi, sz);
377 clib_memcpy_fast (tmp, *digest, ESP_MAX_ICV_SIZE);
378 clib_memcpy_fast (*digest, &seq_hi, sz);
379 clib_memcpy_fast (*digest + sz, tmp, ESP_MAX_ICV_SIZE);
385 total_len += ch->len;
389 total_len += ch->len = cb->current_length;
391 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
394 cb = vlib_get_buffer (vm, cb->next_buffer);
400 *integ_total_len = total_len;
405 static_always_inline u32
406 esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
407 esp_decrypt_packet_data_t * pd,
408 esp_decrypt_packet_data2_t * pd2,
409 ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz,
410 u8 * start, u32 start_len, u8 ** tag, u16 * n_ch)
412 vnet_crypto_op_chunk_t *ch;
413 vlib_buffer_t *cb = b;
416 vec_add2 (ptd->chunks, ch, 1);
417 total_len = ch->len = start_len;
418 ch->src = ch->dst = start;
419 cb = vlib_get_buffer (vm, cb->next_buffer);
424 vec_add2 (ptd->chunks, ch, 1);
426 ch->src = ch->dst = vlib_buffer_get_current (cb);
429 if (ipsec_sa_is_set_IS_AEAD (sa0))
431 if (pd2->lb->current_length < icv_sz)
434 *tag = esp_move_icv (vm, b, pd, pd2, icv_sz, &dif);
436 /* this chunk does not contain crypto data */
438 /* and fix previous chunk's length as it might have
440 ASSERT (n_chunks > 0);
448 total_len = total_len + pd2->lb->current_length -
450 ch[-1].len = pd2->lb->current_length;
455 *tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
458 if (pd2->icv_removed)
459 total_len += ch->len = cb->current_length;
461 total_len += ch->len = cb->current_length - icv_sz;
464 total_len += ch->len = cb->current_length;
466 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
469 cb = vlib_get_buffer (vm, cb->next_buffer);
478 static_always_inline void
479 esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node,
480 ipsec_per_thread_data_t * ptd,
481 vnet_crypto_op_t *** crypto_ops,
482 vnet_crypto_op_t *** integ_ops,
483 vnet_crypto_op_t * op,
484 ipsec_sa_t * sa0, u8 * payload,
485 u16 len, u8 icv_sz, u8 iv_sz,
486 esp_decrypt_packet_data_t * pd,
487 esp_decrypt_packet_data2_t * pd2,
488 vlib_buffer_t * b, u16 * next, u32 index)
490 const u8 esp_sz = sizeof (esp_header_t);
492 if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
494 vnet_crypto_op_init (op, sa0->integ_op_id);
495 op->key_index = sa0->integ_key_index;
497 op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
498 op->user_data = index;
499 op->digest = payload + len;
500 op->digest_len = icv_sz;
505 /* buffer is chained */
506 op->len = pd->current_length;
508 /* special case when ICV is splitted and needs to be reassembled
509 * first -> move it to the last buffer. Also take into account
510 * that ESN needs to be added after encrypted data and may or
511 * may not fit in the tail.*/
512 if (pd2->lb->current_length < icv_sz)
516 esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
517 &extra_esn, &op->len);
521 /* esn is in the last buffer, that was unlinked from
523 op->len = b->current_length;
529 /* we now have a single buffer of crypto data, adjust
530 * the length (second buffer contains only ICV) */
531 *integ_ops = &ptd->integ_ops;
532 *crypto_ops = &ptd->crypto_ops;
533 len = b->current_length;
539 op->digest = vlib_buffer_get_tail (pd2->lb) - icv_sz;
541 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
542 op->chunk_index = vec_len (ptd->chunks);
543 if (esp_decrypt_chain_integ (vm, ptd, pd2, sa0, b, icv_sz,
544 payload, pd->current_length,
545 &op->digest, &op->n_chunks, 0) < 0)
547 b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
548 next[0] = ESP_DECRYPT_NEXT_DROP;
553 esp_insert_esn (vm, sa0, pd2, &op->len, &op->digest, &len, b,
556 vec_add_aligned (*(integ_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
562 if (sa0->crypto_dec_op_id != VNET_CRYPTO_OP_NONE)
564 vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
565 op->key_index = sa0->crypto_key_index;
568 if (ipsec_sa_is_set_IS_CTR (sa0))
570 /* construct nonce in a scratch space in front of the IP header */
571 esp_ctr_nonce_t *nonce =
572 (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz -
574 if (ipsec_sa_is_set_IS_AEAD (sa0))
576 /* constuct aad in a scratch space in front of the nonce */
577 esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
578 op->aad = (u8 *) nonce - sizeof (esp_aead_t);
579 op->aad_len = esp_aad_fill (op->aad, esp0, sa0);
580 op->tag = payload + len;
585 nonce->ctr = clib_host_to_net_u32 (1);
587 nonce->salt = sa0->salt;
588 ASSERT (sizeof (u64) == iv_sz);
589 nonce->iv = *(u64 *) op->iv;
590 op->iv = (u8 *) nonce;
592 op->src = op->dst = payload += iv_sz;
593 op->len = len - iv_sz;
594 op->user_data = index;
596 if (pd->is_chain && (pd2->lb != b))
598 /* buffer is chained */
599 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
600 op->chunk_index = vec_len (ptd->chunks);
601 esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
602 payload, len - pd->iv_sz + pd->icv_sz,
603 &op->tag, &op->n_chunks);
606 vec_add_aligned (*(crypto_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
610 static_always_inline esp_decrypt_error_t
611 esp_decrypt_prepare_async_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
612 ipsec_per_thread_data_t *ptd,
613 vnet_crypto_async_frame_t *f, ipsec_sa_t *sa0,
614 u8 *payload, u16 len, u8 icv_sz, u8 iv_sz,
615 esp_decrypt_packet_data_t *pd,
616 esp_decrypt_packet_data2_t *pd2, u32 bi,
617 vlib_buffer_t *b, u16 *next, u16 async_next)
619 const u8 esp_sz = sizeof (esp_header_t);
620 u32 current_protect_index = vnet_buffer (b)->ipsec.protect_index;
621 esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data;
622 esp_decrypt_packet_data2_t *async_pd2 = esp_post_data2 (b);
623 u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0;
625 u32 crypto_len, integ_len = 0;
626 i16 crypto_start_offset, integ_start_offset = 0;
629 if (!ipsec_sa_is_set_IS_AEAD (sa0))
632 key_index = sa0->linked_key_index;
633 integ_start_offset = payload - b->data;
635 if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
636 flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
640 /* buffer is chained */
641 integ_len = pd->current_length;
643 /* special case when ICV is splitted and needs to be reassembled
644 * first -> move it to the last buffer. Also take into account
645 * that ESN needs to be added after encrypted data and may or
646 * may not fit in the tail.*/
647 if (pd2->lb->current_length < icv_sz)
650 tag = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
651 &extra_esn, &integ_len);
655 /* esn is in the last buffer, that was unlinked from
657 integ_len = b->current_length;
663 /* we now have a single buffer of crypto data, adjust
664 * the length (second buffer contains only ICV) */
665 len = b->current_length;
671 tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
673 flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
674 if (esp_decrypt_chain_integ (vm, ptd, pd2, sa0, b, icv_sz, payload,
675 pd->current_length, &tag,
678 /* allocate buffer failed, will not add to frame and drop */
679 return (ESP_DECRYPT_ERROR_NO_BUFFERS);
683 esp_insert_esn (vm, sa0, pd2, &integ_len, &tag, &len, b, payload);
686 key_index = sa0->crypto_key_index;
694 if (ipsec_sa_is_set_IS_CTR (sa0))
696 /* construct nonce in a scratch space in front of the IP header */
697 esp_ctr_nonce_t *nonce =
698 (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - sizeof (*nonce));
699 if (ipsec_sa_is_set_IS_AEAD (sa0))
701 /* constuct aad in a scratch space in front of the nonce */
702 esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
703 aad = (u8 *) nonce - sizeof (esp_aead_t);
704 esp_aad_fill (aad, esp0, sa0);
709 nonce->ctr = clib_host_to_net_u32 (1);
711 nonce->salt = sa0->salt;
712 ASSERT (sizeof (u64) == iv_sz);
713 nonce->iv = *(u64 *) iv;
717 crypto_start_offset = (payload += iv_sz) - b->data;
718 crypto_len = len - iv_sz;
720 if (pd->is_chain && (pd2->lb != b))
722 /* buffer is chained */
723 flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
725 crypto_len = esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
727 len - pd->iv_sz + pd->icv_sz,
733 pd->protect_index = current_protect_index;
735 /* for AEAD integ_len - crypto_len will be negative, it is ok since it
736 * is ignored by the engine. */
737 vnet_crypto_async_add_to_frame (
738 vm, f, key_index, crypto_len, integ_len - crypto_len, crypto_start_offset,
739 integ_start_offset, bi, async_next, iv, tag, aad, flags);
741 return (ESP_DECRYPT_ERROR_RX_PKTS);
744 static_always_inline void
745 esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
746 esp_decrypt_packet_data_t * pd,
747 esp_decrypt_packet_data2_t * pd2, vlib_buffer_t * b,
748 u16 * next, int is_ip6, int is_tun, int is_async)
750 ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
751 vlib_buffer_t *lb = b;
752 const u8 esp_sz = sizeof (esp_header_t);
753 const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
754 u8 pad_length = 0, next_header = 0;
758 * redo the anti-reply check
759 * in this frame say we have sequence numbers, s, s+1, s+1, s+1
760 * and s and s+1 are in the window. When we did the anti-replay
761 * check above we did so against the state of the window (W),
762 * after packet s-1. So each of the packets in the sequence will be
764 * This time s will be cheked against Ws-1, s+1 chceked against Ws
765 * (i.e. the window state is updated/advnaced)
766 * so this time the successive s+! packet will be dropped.
767 * This is a consequence of batching the decrypts. If the
768 * check-dcrypt-advance process was done for each packet it would
769 * be fine. But we batch the decrypts because it's much more efficient
770 * to do so in SW and if we offload to HW and the process is async.
772 * You're probably thinking, but this means an attacker can send the
773 * above sequence and cause VPP to perform decrpyts that will fail,
774 * and that's true. But if the attacker can determine s (a valid
775 * sequence number in the window) which is non-trivial, it can generate
776 * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
777 * implementation, sequential or batching, from decrypting these.
779 if (ipsec_sa_anti_replay_check (sa0, pd->seq))
781 b->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
782 next[0] = ESP_DECRYPT_NEXT_DROP;
786 ipsec_sa_anti_replay_advance (sa0, pd->seq);
791 icv_sz = pd2->icv_removed ? 0 : pd->icv_sz;
792 if (pd2->free_buffer_index)
794 vlib_buffer_free_one (vm, pd2->free_buffer_index);
797 if (lb->current_length < sizeof (esp_footer_t) + icv_sz)
799 /* esp footer is either splitted in two buffers or in the before
802 vlib_buffer_t *before_last = b, *bp = b;
803 while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
806 bp = vlib_get_buffer (vm, bp->next_buffer);
808 u8 *bt = vlib_buffer_get_tail (before_last);
810 if (lb->current_length == icv_sz)
812 esp_footer_t *f = (esp_footer_t *) (bt - sizeof (*f));
813 pad_length = f->pad_length;
814 next_header = f->next_header;
818 pad_length = (bt - 1)[0];
819 next_header = ((u8 *) vlib_buffer_get_current (lb))[0];
825 (esp_footer_t *) (lb->data + lb->current_data +
826 lb->current_length - sizeof (esp_footer_t) -
828 pad_length = f->pad_length;
829 next_header = f->next_header;
836 (esp_footer_t *) (lb->data + lb->current_data + lb->current_length -
837 sizeof (esp_footer_t) - icv_sz);
838 pad_length = f->pad_length;
839 next_header = f->next_header;
842 u16 adv = pd->iv_sz + esp_sz;
843 u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz;
844 u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz;
845 b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
847 if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
849 u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
850 sizeof (udp_header_t) : 0;
851 u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
852 u8 *old_ip = b->data + pd->current_data - ip_hdr_sz - udp_sz;
853 u8 *ip = old_ip + adv + udp_sz;
855 if (is_ip6 && ip_hdr_sz > 64)
856 memmove (ip, old_ip, ip_hdr_sz);
858 clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
860 b->current_data = pd->current_data + adv - ip_hdr_sz;
861 b->current_length += ip_hdr_sz - adv;
862 esp_remove_tail (vm, b, lb, tail);
866 ip6_header_t *ip6 = (ip6_header_t *) ip;
867 u16 len = clib_net_to_host_u16 (ip6->payload_length);
868 len -= adv + tail_orig;
869 ip6->payload_length = clib_host_to_net_u16 (len);
870 ip6->protocol = next_header;
871 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
875 ip4_header_t *ip4 = (ip4_header_t *) ip;
876 ip_csum_t sum = ip4->checksum;
877 u16 len = clib_net_to_host_u16 (ip4->length);
878 len = clib_host_to_net_u16 (len - adv - tail_orig - udp_sz);
879 sum = ip_csum_update (sum, ip4->protocol, next_header,
880 ip4_header_t, protocol);
881 sum = ip_csum_update (sum, ip4->length, len, ip4_header_t, length);
882 ip4->checksum = ip_csum_fold (sum);
883 ip4->protocol = next_header;
885 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
890 if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
892 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
893 b->current_data = pd->current_data + adv;
894 b->current_length = pd->current_length - adv;
895 esp_remove_tail (vm, b, lb, tail);
897 else if (next_header == IP_PROTOCOL_IPV6)
899 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
900 b->current_data = pd->current_data + adv;
901 b->current_length = pd->current_length - adv;
902 esp_remove_tail (vm, b, lb, tail);
904 else if (next_header == IP_PROTOCOL_MPLS_IN_IP)
906 next[0] = ESP_DECRYPT_NEXT_MPLS_INPUT;
907 b->current_data = pd->current_data + adv;
908 b->current_length = pd->current_length - adv;
909 esp_remove_tail (vm, b, lb, tail);
913 if (is_tun && next_header == IP_PROTOCOL_GRE)
917 b->current_data = pd->current_data + adv;
918 b->current_length = pd->current_length - adv - tail;
920 gre = vlib_buffer_get_current (b);
922 vlib_buffer_advance (b, sizeof (*gre));
924 switch (clib_net_to_host_u16 (gre->protocol))
926 case GRE_PROTOCOL_teb:
927 vnet_update_l2_len (b);
928 next[0] = ESP_DECRYPT_NEXT_L2_INPUT;
930 case GRE_PROTOCOL_ip4:
931 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
933 case GRE_PROTOCOL_ip6:
934 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
937 b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
938 next[0] = ESP_DECRYPT_NEXT_DROP;
944 next[0] = ESP_DECRYPT_NEXT_DROP;
945 b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
951 if (ipsec_sa_is_set_IS_PROTECT (sa0))
954 * There are two encap possibilities
955 * 1) the tunnel and ths SA are prodiving encap, i.e. it's
956 * MAC | SA-IP | TUN-IP | ESP | PAYLOAD
957 * implying the SA is in tunnel mode (on a tunnel interface)
958 * 2) only the tunnel provides encap
959 * MAC | TUN-IP | ESP | PAYLOAD
960 * implying the SA is in transport mode.
962 * For 2) we need only strip the tunnel encap and we're good.
963 * since the tunnel and crypto ecnap (int the tun=protect
964 * object) are the same and we verified above that these match
965 * for 1) we need to strip the SA-IP outer headers, to
966 * reveal the tunnel IP and then check that this matches
967 * the configured tunnel.
969 const ipsec_tun_protect_t *itp;
972 itp = ipsec_tun_protect_get (pd->protect_index);
975 ipsec_tun_protect_get (vnet_buffer (b)->
976 ipsec.protect_index);
978 if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
980 const ip4_header_t *ip4;
982 ip4 = vlib_buffer_get_current (b);
984 if (!ip46_address_is_equal_v4 (&itp->itp_tun.src,
985 &ip4->dst_address) ||
986 !ip46_address_is_equal_v4 (&itp->itp_tun.dst,
989 next[0] = ESP_DECRYPT_NEXT_DROP;
990 b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
993 else if (next_header == IP_PROTOCOL_IPV6)
995 const ip6_header_t *ip6;
997 ip6 = vlib_buffer_get_current (b);
999 if (!ip46_address_is_equal_v6 (&itp->itp_tun.src,
1000 &ip6->dst_address) ||
1001 !ip46_address_is_equal_v6 (&itp->itp_tun.dst,
1004 next[0] = ESP_DECRYPT_NEXT_DROP;
1005 b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
1014 esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
1015 vlib_frame_t *from_frame, int is_ip6, int is_tun,
1016 u16 async_next_node)
1018 ipsec_main_t *im = &ipsec_main;
1019 u32 thread_index = vm->thread_index;
1021 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
1022 u32 *from = vlib_frame_vector_args (from_frame);
1023 u32 n_left = from_frame->n_vectors;
1024 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1025 vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
1026 u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
1027 u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0;
1028 u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
1029 u32 sync_bi[VLIB_FRAME_SIZE];
1030 u32 noop_bi[VLIB_FRAME_SIZE];
1031 esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
1032 esp_decrypt_packet_data2_t pkt_data2[VLIB_FRAME_SIZE], *pd2 = pkt_data2;
1033 esp_decrypt_packet_data_t cpd = { };
1034 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
1035 const u8 esp_sz = sizeof (esp_header_t);
1036 ipsec_sa_t *sa0 = 0;
1037 vnet_crypto_op_t _op, *op = &_op;
1038 vnet_crypto_op_t **crypto_ops;
1039 vnet_crypto_op_t **integ_ops;
1040 int is_async = im->async_mode;
1041 vnet_crypto_async_op_id_t async_op = ~0;
1042 vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
1043 esp_decrypt_error_t err;
1045 vlib_get_buffers (vm, from, b, n_left);
1048 vec_reset_length (ptd->crypto_ops);
1049 vec_reset_length (ptd->integ_ops);
1050 vec_reset_length (ptd->chained_crypto_ops);
1051 vec_reset_length (ptd->chained_integ_ops);
1053 vec_reset_length (ptd->async_frames);
1054 vec_reset_length (ptd->chunks);
1055 clib_memset (sync_nexts, -1, sizeof (sync_nexts));
1056 clib_memset (async_frames, 0, sizeof (async_frames));
1062 err = ESP_DECRYPT_ERROR_RX_PKTS;
1066 vlib_prefetch_buffer_header (b[2], LOAD);
1067 p = vlib_buffer_get_current (b[1]);
1068 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
1069 p -= CLIB_CACHE_LINE_BYTES;
1070 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
1073 u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
1076 err = ESP_DECRYPT_ERROR_NO_BUFFERS;
1077 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1078 ESP_DECRYPT_NEXT_DROP);
1082 if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
1084 if (current_sa_pkts)
1085 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1089 current_sa_bytes = current_sa_pkts = 0;
1091 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1092 sa0 = ipsec_sa_get (current_sa_index);
1094 /* fetch the second cacheline ASAP */
1095 CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD);
1096 cpd.icv_sz = sa0->integ_icv_size;
1097 cpd.iv_sz = sa0->crypto_iv_size;
1098 cpd.flags = sa0->flags;
1099 cpd.sa_index = current_sa_index;
1100 is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
1105 async_op = sa0->crypto_async_dec_op_id;
1107 /* get a frame for this op if we don't yet have one or it's full
1109 if (NULL == async_frames[async_op] ||
1110 vnet_crypto_async_frame_is_full (async_frames[async_op]))
1112 async_frames[async_op] =
1113 vnet_crypto_async_get_frame (vm, async_op);
1114 /* Save the frame to the list we'll submit at the end */
1115 vec_add1 (ptd->async_frames, async_frames[async_op]);
1119 if (PREDICT_FALSE (~0 == sa0->thread_index))
1121 /* this is the first packet to use this SA, claim the SA
1122 * for this thread. this could happen simultaneously on
1124 clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
1125 ipsec_sa_assign_thread (thread_index));
1128 if (PREDICT_FALSE (thread_index != sa0->thread_index))
1130 vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
1131 err = ESP_DECRYPT_ERROR_HANDOFF;
1132 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1133 ESP_DECRYPT_NEXT_HANDOFF);
1137 /* store packet data for next round for easier prefetch */
1138 pd->sa_data = cpd.sa_data;
1139 pd->current_data = b[0]->current_data;
1140 pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
1141 payload = b[0]->data + pd->current_data;
1142 pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
1145 pd2->free_buffer_index = 0;
1146 pd2->icv_removed = 0;
1151 /* find last buffer in the chain */
1152 while (pd2->lb->flags & VLIB_BUFFER_NEXT_PRESENT)
1153 pd2->lb = vlib_get_buffer (vm, pd2->lb->next_buffer);
1155 crypto_ops = &ptd->chained_crypto_ops;
1156 integ_ops = &ptd->chained_integ_ops;
1160 crypto_ops = &ptd->crypto_ops;
1161 integ_ops = &ptd->integ_ops;
1164 pd->current_length = b[0]->current_length;
1166 /* anti-reply check */
1167 if (ipsec_sa_anti_replay_check (sa0, pd->seq))
1169 err = ESP_DECRYPT_ERROR_REPLAY;
1170 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1171 ESP_DECRYPT_NEXT_DROP);
1175 if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
1177 err = ESP_DECRYPT_ERROR_RUNT;
1178 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1179 ESP_DECRYPT_NEXT_DROP);
1183 len = pd->current_length - cpd.icv_sz;
1184 current_sa_pkts += 1;
1185 current_sa_bytes += vlib_buffer_length_in_chain (vm, b[0]);
1190 err = esp_decrypt_prepare_async_frame (
1191 vm, node, ptd, async_frames[async_op], sa0, payload, len,
1192 cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next,
1194 if (ESP_DECRYPT_ERROR_RX_PKTS != err)
1196 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1197 ESP_DECRYPT_NEXT_DROP);
1201 esp_decrypt_prepare_sync_op (
1202 vm, node, ptd, &crypto_ops, &integ_ops, op, sa0, payload, len,
1203 cpd.icv_sz, cpd.iv_sz, pd, pd2, b[0], sync_next, b - bufs);
1206 if (ESP_DECRYPT_ERROR_RX_PKTS != err)
1208 noop_bi[n_noop] = from[b - bufs];
1214 sync_bi[n_sync] = from[b - bufs];
1215 sync_bufs[n_sync] = b[0];
1230 if (PREDICT_TRUE (~0 != current_sa_index))
1231 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1232 current_sa_index, current_sa_pkts,
1237 /* submit all of the open frames */
1238 vnet_crypto_async_frame_t **async_frame;
1240 vec_foreach (async_frame, ptd->async_frames)
1242 if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
1244 n_noop += esp_async_recycle_failed_submit (
1245 vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR,
1246 n_sync, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP);
1247 vnet_crypto_async_reset_frame (*async_frame);
1248 vnet_crypto_async_free_frame (vm, *async_frame);
1255 esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
1256 ESP_DECRYPT_ERROR_INTEG_ERROR);
1257 esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
1258 sync_nexts, ptd->chunks,
1259 ESP_DECRYPT_ERROR_INTEG_ERROR);
1261 esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
1262 ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1263 esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
1264 sync_nexts, ptd->chunks,
1265 ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1268 /* Post decryption ronud - adjust packet data start and length and next
1272 sync_next = sync_nexts;
1281 void *data = b[1]->data + pd[1].current_data;
1283 /* buffer metadata */
1284 vlib_prefetch_buffer_header (b[1], LOAD);
1287 CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
1288 CLIB_CACHE_LINE_BYTES, LOAD);
1290 /* packet headers */
1291 CLIB_PREFETCH (data - CLIB_CACHE_LINE_BYTES,
1292 CLIB_CACHE_LINE_BYTES * 2, LOAD);
1295 /* save the sa_index as GRE_teb post_crypto changes L2 opaque */
1296 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1297 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1299 if (sync_next[0] >= ESP_DECRYPT_N_NEXT)
1300 esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], sync_next, is_ip6,
1304 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1306 esp_decrypt_trace_t *tr;
1307 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1308 sa0 = ipsec_sa_get (current_sa_index);
1309 tr->crypto_alg = sa0->crypto_alg;
1310 tr->integ_alg = sa0->integ_alg;
1312 tr->sa_seq = sa0->last_seq;
1313 tr->sa_seq_hi = sa0->seq_hi;
1324 vlib_node_increment_counter (vm, node->node_index, ESP_DECRYPT_ERROR_RX_PKTS,
1325 from_frame->n_vectors);
1328 vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
1331 vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
1333 return (from_frame->n_vectors);
1337 esp_decrypt_post_inline (vlib_main_t * vm,
1338 vlib_node_runtime_t * node,
1339 vlib_frame_t * from_frame, int is_ip6, int is_tun)
1341 u32 *from = vlib_frame_vector_args (from_frame);
1342 u32 n_left = from_frame->n_vectors;
1343 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1344 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1345 vlib_get_buffers (vm, from, b, n_left);
1349 esp_decrypt_packet_data_t *pd = &(esp_post_data (b[0]))->decrypt_data;
1353 vlib_prefetch_buffer_header (b[2], LOAD);
1354 vlib_prefetch_buffer_header (b[1], LOAD);
1358 esp_decrypt_post_crypto (vm, node, pd, 0, b[0], next, is_ip6, is_tun,
1362 esp_decrypt_packet_data2_t *pd2 = esp_post_data2 (b[0]);
1363 esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6,
1368 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1370 ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
1371 esp_decrypt_trace_t *tr;
1372 esp_decrypt_packet_data_t *async_pd =
1373 &(esp_post_data (b[0]))->decrypt_data;
1374 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1375 sa0 = ipsec_sa_get (async_pd->sa_index);
1377 tr->crypto_alg = sa0->crypto_alg;
1378 tr->integ_alg = sa0->integ_alg;
1380 tr->sa_seq = sa0->last_seq;
1381 tr->sa_seq_hi = sa0->seq_hi;
1389 n_left = from_frame->n_vectors;
1390 vlib_node_increment_counter (vm, node->node_index,
1391 ESP_DECRYPT_ERROR_RX_POST_PKTS, n_left);
1393 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
1398 VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
1399 vlib_node_runtime_t * node,
1400 vlib_frame_t * from_frame)
1402 return esp_decrypt_inline (vm, node, from_frame, 0, 0,
1403 esp_decrypt_async_next.esp4_post_next);
1406 VLIB_NODE_FN (esp4_decrypt_post_node) (vlib_main_t * vm,
1407 vlib_node_runtime_t * node,
1408 vlib_frame_t * from_frame)
1410 return esp_decrypt_post_inline (vm, node, from_frame, 0, 0);
1413 VLIB_NODE_FN (esp4_decrypt_tun_node) (vlib_main_t * vm,
1414 vlib_node_runtime_t * node,
1415 vlib_frame_t * from_frame)
1417 return esp_decrypt_inline (vm, node, from_frame, 0, 1,
1418 esp_decrypt_async_next.esp4_tun_post_next);
1421 VLIB_NODE_FN (esp4_decrypt_tun_post_node) (vlib_main_t * vm,
1422 vlib_node_runtime_t * node,
1423 vlib_frame_t * from_frame)
1425 return esp_decrypt_post_inline (vm, node, from_frame, 0, 1);
1428 VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
1429 vlib_node_runtime_t * node,
1430 vlib_frame_t * from_frame)
1432 return esp_decrypt_inline (vm, node, from_frame, 1, 0,
1433 esp_decrypt_async_next.esp6_post_next);
1436 VLIB_NODE_FN (esp6_decrypt_post_node) (vlib_main_t * vm,
1437 vlib_node_runtime_t * node,
1438 vlib_frame_t * from_frame)
1440 return esp_decrypt_post_inline (vm, node, from_frame, 1, 0);
1443 VLIB_NODE_FN (esp6_decrypt_tun_node) (vlib_main_t * vm,
1444 vlib_node_runtime_t * node,
1445 vlib_frame_t * from_frame)
1447 return esp_decrypt_inline (vm, node, from_frame, 1, 1,
1448 esp_decrypt_async_next.esp6_tun_post_next);
1451 VLIB_NODE_FN (esp6_decrypt_tun_post_node) (vlib_main_t * vm,
1452 vlib_node_runtime_t * node,
1453 vlib_frame_t * from_frame)
1455 return esp_decrypt_post_inline (vm, node, from_frame, 1, 1);
1459 VLIB_REGISTER_NODE (esp4_decrypt_node) = {
1460 .name = "esp4-decrypt",
1461 .vector_size = sizeof (u32),
1462 .format_trace = format_esp_decrypt_trace,
1463 .type = VLIB_NODE_TYPE_INTERNAL,
1465 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1466 .error_strings = esp_decrypt_error_strings,
1468 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1470 [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1471 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1472 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1473 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-drop",
1474 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1475 [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff",
1479 VLIB_REGISTER_NODE (esp4_decrypt_post_node) = {
1480 .name = "esp4-decrypt-post",
1481 .vector_size = sizeof (u32),
1482 .format_trace = format_esp_decrypt_trace,
1483 .type = VLIB_NODE_TYPE_INTERNAL,
1485 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1486 .error_strings = esp_decrypt_error_strings,
1488 .sibling_of = "esp4-decrypt",
1491 VLIB_REGISTER_NODE (esp6_decrypt_node) = {
1492 .name = "esp6-decrypt",
1493 .vector_size = sizeof (u32),
1494 .format_trace = format_esp_decrypt_trace,
1495 .type = VLIB_NODE_TYPE_INTERNAL,
1497 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1498 .error_strings = esp_decrypt_error_strings,
1500 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1502 [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1503 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1504 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1505 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-drop",
1506 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1507 [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-handoff",
1511 VLIB_REGISTER_NODE (esp6_decrypt_post_node) = {
1512 .name = "esp6-decrypt-post",
1513 .vector_size = sizeof (u32),
1514 .format_trace = format_esp_decrypt_trace,
1515 .type = VLIB_NODE_TYPE_INTERNAL,
1517 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1518 .error_strings = esp_decrypt_error_strings,
1520 .sibling_of = "esp6-decrypt",
1523 VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = {
1524 .name = "esp4-decrypt-tun",
1525 .vector_size = sizeof (u32),
1526 .format_trace = format_esp_decrypt_trace,
1527 .type = VLIB_NODE_TYPE_INTERNAL,
1528 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1529 .error_strings = esp_decrypt_error_strings,
1530 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1532 [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1533 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1534 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1535 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-input",
1536 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1537 [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-tun-handoff",
1541 VLIB_REGISTER_NODE (esp4_decrypt_tun_post_node) = {
1542 .name = "esp4-decrypt-tun-post",
1543 .vector_size = sizeof (u32),
1544 .format_trace = format_esp_decrypt_trace,
1545 .type = VLIB_NODE_TYPE_INTERNAL,
1547 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1548 .error_strings = esp_decrypt_error_strings,
1550 .sibling_of = "esp4-decrypt-tun",
1553 VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
1554 .name = "esp6-decrypt-tun",
1555 .vector_size = sizeof (u32),
1556 .format_trace = format_esp_decrypt_trace,
1557 .type = VLIB_NODE_TYPE_INTERNAL,
1558 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1559 .error_strings = esp_decrypt_error_strings,
1560 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1562 [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1563 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1564 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1565 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-input",
1566 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1567 [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-tun-handoff",
1571 VLIB_REGISTER_NODE (esp6_decrypt_tun_post_node) = {
1572 .name = "esp6-decrypt-tun-post",
1573 .vector_size = sizeof (u32),
1574 .format_trace = format_esp_decrypt_trace,
1575 .type = VLIB_NODE_TYPE_INTERNAL,
1577 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1578 .error_strings = esp_decrypt_error_strings,
1580 .sibling_of = "esp6-decrypt-tun",
1584 #ifndef CLIB_MARCH_VARIANT
1586 static clib_error_t *
1587 esp_decrypt_init (vlib_main_t *vm)
1589 ipsec_main_t *im = &ipsec_main;
1591 im->esp4_dec_fq_index =
1592 vlib_frame_queue_main_init (esp4_decrypt_node.index, 0);
1593 im->esp6_dec_fq_index =
1594 vlib_frame_queue_main_init (esp6_decrypt_node.index, 0);
1595 im->esp4_dec_tun_fq_index =
1596 vlib_frame_queue_main_init (esp4_decrypt_tun_node.index, 0);
1597 im->esp6_dec_tun_fq_index =
1598 vlib_frame_queue_main_init (esp6_decrypt_tun_node.index, 0);
1603 VLIB_INIT_FUNCTION (esp_decrypt_init);
1608 * fd.io coding-style-patch-verification: ON
1611 * eval: (c-set-style "gnu")