2 * esp_decrypt.c : IPSec ESP decrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/l2/l2_input.h>
23 #include <vnet/ipsec/ipsec.h>
24 #include <vnet/ipsec/esp.h>
25 #include <vnet/ipsec/ipsec_io.h>
26 #include <vnet/ipsec/ipsec_tun.h>
28 #include <vnet/gre/gre.h>
30 #define foreach_esp_decrypt_next \
31 _(DROP, "error-drop") \
32 _(IP4_INPUT, "ip4-input-no-checksum") \
33 _(IP6_INPUT, "ip6-input") \
34 _(L2_INPUT, "l2-input") \
35 _(HANDOFF, "handoff") \
38 #define _(v, s) ESP_DECRYPT_NEXT_##v,
41 foreach_esp_decrypt_next
46 #define foreach_esp_decrypt_post_next \
47 _(DROP, "error-drop") \
48 _(IP4_INPUT, "ip4-input-no-checksum") \
49 _(IP6_INPUT, "ip6-input") \
50 _(L2_INPUT, "l2-input")
52 #define _(v, s) ESP_DECRYPT_POST_NEXT_##v,
55 foreach_esp_decrypt_post_next
57 ESP_DECRYPT_POST_N_NEXT,
58 } esp_decrypt_post_next_t;
60 #define foreach_esp_decrypt_error \
61 _(RX_PKTS, "ESP pkts received") \
62 _(RX_POST_PKTS, "ESP-POST pkts received") \
63 _(DECRYPTION_FAILED, "ESP decryption failed") \
64 _(INTEG_ERROR, "Integrity check failed") \
65 _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
66 _(REPLAY, "SA replayed packet") \
67 _(RUNT, "undersized packet") \
68 _(NO_BUFFERS, "no buffers (packet dropped)") \
69 _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
70 _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \
71 _(TUN_NO_PROTO, "no tunnel protocol") \
72 _(UNSUP_PAYLOAD, "unsupported payload") \
77 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
78 foreach_esp_decrypt_error
81 } esp_decrypt_error_t;
83 static char *esp_decrypt_error_strings[] = {
84 #define _(sym,string) string,
85 foreach_esp_decrypt_error
94 ipsec_crypto_alg_t crypto_alg;
95 ipsec_integ_alg_t integ_alg;
96 } esp_decrypt_trace_t;
98 /* packet trace format function */
100 format_esp_decrypt_trace (u8 * s, va_list * args)
102 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
103 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
104 esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
108 "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u",
109 format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg,
110 t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi);
114 #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
116 static_always_inline void
117 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
118 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
121 vnet_crypto_op_t *op = ops;
122 u32 n_fail, n_ops = vec_len (ops);
127 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
131 ASSERT (op - ops < n_ops);
132 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
134 u32 err, bi = op->user_data;
135 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
138 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
139 b[bi]->error = node->errors[err];
140 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
147 static_always_inline void
148 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
149 vnet_crypto_op_t * ops, vlib_buffer_t * b[],
150 u16 * nexts, vnet_crypto_op_chunk_t * chunks, int e)
153 vnet_crypto_op_t *op = ops;
154 u32 n_fail, n_ops = vec_len (ops);
159 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
163 ASSERT (op - ops < n_ops);
164 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
166 u32 err, bi = op->user_data;
167 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
170 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
171 b[bi]->error = node->errors[err];
172 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
180 esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last,
183 vlib_buffer_t *before_last = b;
185 if (last->current_length > tail)
187 last->current_length -= tail;
190 ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
192 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
195 b = vlib_get_buffer (vm, b->next_buffer);
197 before_last->current_length -= tail - last->current_length;
198 vlib_buffer_free_one (vm, before_last->next_buffer);
199 before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
202 /* ICV is splitted in last two buffers so move it to the last buffer and
203 return pointer to it */
204 static_always_inline u8 *
205 esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first,
206 esp_decrypt_packet_data2_t * pd2, u16 icv_sz, u16 * dif)
208 vlib_buffer_t *before_last, *bp;
209 u16 last_sz = pd2->lb->current_length;
210 u16 first_sz = icv_sz - last_sz;
212 bp = before_last = first;
213 while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
216 bp = vlib_get_buffer (vm, bp->next_buffer);
219 u8 *lb_curr = vlib_buffer_get_current (pd2->lb);
220 memmove (lb_curr + first_sz, lb_curr, last_sz);
221 clib_memcpy_fast (lb_curr, vlib_buffer_get_tail (before_last) - first_sz,
223 before_last->current_length -= first_sz;
224 clib_memset (vlib_buffer_get_tail (before_last), 0, first_sz);
227 pd2->lb = before_last;
228 pd2->icv_removed = 1;
229 pd2->free_buffer_index = before_last->next_buffer;
230 before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
234 static_always_inline i16
235 esp_insert_esn (vlib_main_t * vm, ipsec_sa_t * sa,
236 esp_decrypt_packet_data2_t * pd2, u32 * data_len,
237 u8 ** digest, u16 * len, vlib_buffer_t * b, u8 * payload)
239 if (!ipsec_sa_is_set_USE_ESN (sa))
242 /* shift ICV by 4 bytes to insert ESN */
243 u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi);
244 u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa->seq_hi);
246 if (pd2->icv_removed)
248 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
249 if (space_left >= sz)
251 clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi, sz);
257 len[0] = b->current_length;
261 clib_memcpy_fast (tmp, payload + len[0], ESP_MAX_ICV_SIZE);
262 clib_memcpy_fast (payload + len[0], &seq_hi, sz);
263 clib_memcpy_fast (payload + len[0] + sz, tmp, ESP_MAX_ICV_SIZE);
270 static_always_inline u8 *
271 esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first,
272 esp_decrypt_packet_data2_t * pd2, u16 icv_sz,
273 ipsec_sa_t * sa, u8 * extra_esn, u32 * len)
276 u8 *digest = esp_move_icv (vm, first, pd2, icv_sz, &dif);
280 if (ipsec_sa_is_set_USE_ESN (sa))
282 u8 sz = sizeof (sa->seq_hi);
283 u32 seq_hi = clib_host_to_net_u32 (sa->seq_hi);
284 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
286 if (space_left >= sz)
288 clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi, sz);
293 /* no space for ESN at the tail, use the next buffer
295 ASSERT (pd2->icv_removed);
296 vlib_buffer_t *tmp = vlib_get_buffer (vm, pd2->free_buffer_index);
297 clib_memcpy_fast (vlib_buffer_get_current (tmp) - sz, &seq_hi, sz);
304 static_always_inline int
305 esp_decrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
306 esp_decrypt_packet_data2_t * pd2,
307 ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz,
308 u8 * start_src, u32 start_len,
309 u8 ** digest, u16 * n_ch, u32 * integ_total_len)
311 vnet_crypto_op_chunk_t *ch;
312 vlib_buffer_t *cb = vlib_get_buffer (vm, b->next_buffer);
315 vec_add2 (ptd->chunks, ch, 1);
316 total_len = ch->len = start_len;
321 vec_add2 (ptd->chunks, ch, 1);
323 ch->src = vlib_buffer_get_current (cb);
326 if (pd2->icv_removed)
327 ch->len = cb->current_length;
329 ch->len = cb->current_length - icv_sz;
330 if (ipsec_sa_is_set_USE_ESN (sa0))
332 u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
333 u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi);
335 vlib_buffer_t *tmp_b;
336 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
339 if (pd2->icv_removed)
341 /* use pre-data area from the last bufer
342 that was removed from the chain */
343 tmp_b = vlib_get_buffer (vm, pd2->free_buffer_index);
344 esn = tmp_b->data - sz;
348 /* no space, need to allocate new buffer */
350 if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
352 tmp_b = vlib_get_buffer (vm, tmp_bi);
354 pd2->free_buffer_index = tmp_bi;
356 clib_memcpy_fast (esn, &seq_hi, sz);
358 vec_add2 (ptd->chunks, ch, 1);
365 if (pd2->icv_removed)
367 clib_memcpy_fast (vlib_buffer_get_tail
368 (pd2->lb), &seq_hi, sz);
372 clib_memcpy_fast (tmp, *digest, ESP_MAX_ICV_SIZE);
373 clib_memcpy_fast (*digest, &seq_hi, sz);
374 clib_memcpy_fast (*digest + sz, tmp, ESP_MAX_ICV_SIZE);
380 total_len += ch->len;
384 total_len += ch->len = cb->current_length;
386 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
389 cb = vlib_get_buffer (vm, cb->next_buffer);
395 *integ_total_len = total_len;
400 static_always_inline u32
401 esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
402 esp_decrypt_packet_data2_t * pd2,
403 ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz,
404 u8 * start, u32 start_len, u8 ** tag, u16 * n_ch)
406 vnet_crypto_op_chunk_t *ch;
407 vlib_buffer_t *cb = b;
410 vec_add2 (ptd->chunks, ch, 1);
411 total_len = ch->len = start_len;
412 ch->src = ch->dst = start;
413 cb = vlib_get_buffer (vm, cb->next_buffer);
418 vec_add2 (ptd->chunks, ch, 1);
420 ch->src = ch->dst = vlib_buffer_get_current (cb);
423 if (ipsec_sa_is_set_IS_AEAD (sa0))
425 if (pd2->lb->current_length < icv_sz)
428 *tag = esp_move_icv (vm, b, pd2, icv_sz, &dif);
430 /* this chunk does not contain crypto data */
432 /* and fix previous chunk's length as it might have
434 ASSERT (n_chunks > 0);
442 total_len = total_len + pd2->lb->current_length -
444 ch[-1].len = pd2->lb->current_length;
449 *tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
452 if (pd2->icv_removed)
453 total_len += ch->len = cb->current_length;
455 total_len += ch->len = cb->current_length - icv_sz;
458 total_len += ch->len = cb->current_length;
460 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
463 cb = vlib_get_buffer (vm, cb->next_buffer);
472 static_always_inline void
473 esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node,
474 ipsec_per_thread_data_t * ptd,
475 vnet_crypto_op_t *** crypto_ops,
476 vnet_crypto_op_t *** integ_ops,
477 vnet_crypto_op_t * op,
478 ipsec_sa_t * sa0, u8 * payload,
479 u16 len, u8 icv_sz, u8 iv_sz,
480 esp_decrypt_packet_data_t * pd,
481 esp_decrypt_packet_data2_t * pd2,
482 vlib_buffer_t * b, u16 * next, u32 index)
484 const u8 esp_sz = sizeof (esp_header_t);
486 if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
488 vnet_crypto_op_init (op, sa0->integ_op_id);
489 op->key_index = sa0->integ_key_index;
491 op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
492 op->user_data = index;
493 op->digest = payload + len;
494 op->digest_len = icv_sz;
499 /* buffer is chained */
500 op->len = pd->current_length;
502 /* special case when ICV is splitted and needs to be reassembled
503 * first -> move it to the last buffer. Also take into account
504 * that ESN needs to be added after encrypted data and may or
505 * may not fit in the tail.*/
506 if (pd2->lb->current_length < icv_sz)
510 esp_move_icv_esn (vm, b, pd2, icv_sz, sa0,
511 &extra_esn, &op->len);
515 /* esn is in the last buffer, that was unlinked from
517 op->len = b->current_length;
523 /* we now have a single buffer of crypto data, adjust
524 * the length (second buffer contains only ICV) */
525 *integ_ops = &ptd->integ_ops;
526 *crypto_ops = &ptd->crypto_ops;
527 len = b->current_length;
533 op->digest = vlib_buffer_get_tail (pd2->lb) - icv_sz;
535 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
536 op->chunk_index = vec_len (ptd->chunks);
537 if (esp_decrypt_chain_integ (vm, ptd, pd2, sa0, b, icv_sz,
538 payload, pd->current_length,
539 &op->digest, &op->n_chunks, 0) < 0)
541 b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
542 next[0] = ESP_DECRYPT_NEXT_DROP;
547 esp_insert_esn (vm, sa0, pd2, &op->len, &op->digest, &len, b,
550 vec_add_aligned (*(integ_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
556 if (sa0->crypto_dec_op_id != VNET_CRYPTO_OP_NONE)
558 vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
559 op->key_index = sa0->crypto_key_index;
562 if (ipsec_sa_is_set_IS_AEAD (sa0))
569 * construct the AAD and the nonce (Salt || IV) in a scratch
570 * space in front of the IP header.
572 scratch = payload - esp_sz;
573 esp0 = (esp_header_t *) (scratch);
575 scratch -= (sizeof (*aad) + pd->hdr_sz);
578 op->aad_len = esp_aad_fill (op->aad, esp0, sa0);
581 * we don't need to refer to the ESP header anymore so we
582 * can overwrite it with the salt and use the IV where it is
583 * to form the nonce = (Salt + IV)
585 op->iv -= sizeof (sa0->salt);
586 clib_memcpy_fast (op->iv, &sa0->salt, sizeof (sa0->salt));
588 op->tag = payload + len;
591 op->src = op->dst = payload += iv_sz;
592 op->len = len - iv_sz;
593 op->user_data = index;
595 if (pd->is_chain && (pd2->lb != b))
597 /* buffer is chained */
598 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
599 op->chunk_index = vec_len (ptd->chunks);
600 esp_decrypt_chain_crypto (vm, ptd, pd2, sa0, b, icv_sz,
601 payload, len - pd->iv_sz + pd->icv_sz,
602 &op->tag, &op->n_chunks);
605 vec_add_aligned (*(crypto_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
609 static_always_inline int
610 esp_decrypt_prepare_async_frame (vlib_main_t * vm,
611 vlib_node_runtime_t * node,
612 ipsec_per_thread_data_t * ptd,
613 vnet_crypto_async_frame_t ** f,
614 ipsec_sa_t * sa0, u8 * payload, u16 len,
616 esp_decrypt_packet_data_t * pd,
617 esp_decrypt_packet_data2_t * pd2, u32 bi,
618 vlib_buffer_t * b, u16 * next,
621 const u8 esp_sz = sizeof (esp_header_t);
622 u32 current_protect_index = vnet_buffer (b)->ipsec.protect_index;
623 esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data;
624 esp_decrypt_packet_data2_t *async_pd2 = esp_post_data2 (b);
625 u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0;
627 u32 crypto_len, integ_len = 0;
628 i16 crypto_start_offset, integ_start_offset = 0;
631 if (!ipsec_sa_is_set_IS_AEAD (sa0))
634 key_index = sa0->linked_key_index;
635 integ_start_offset = payload - b->data;
640 /* buffer is chained */
641 integ_len = pd->current_length;
643 /* special case when ICV is splitted and needs to be reassembled
644 * first -> move it to the last buffer. Also take into account
645 * that ESN needs to be added after encrypted data and may or
646 * may not fit in the tail.*/
647 if (pd2->lb->current_length < icv_sz)
650 tag = esp_move_icv_esn (vm, b, pd2, icv_sz, sa0,
651 &extra_esn, &integ_len);
655 /* esn is in the last buffer, that was unlinked from
657 integ_len = b->current_length;
663 /* we now have a single buffer of crypto data, adjust
664 * the length (second buffer contains only ICV) */
665 len = b->current_length;
671 tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
673 flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
674 if (esp_decrypt_chain_integ (vm, ptd, pd2, sa0, b, icv_sz, payload,
675 pd->current_length, &tag,
678 /* allocate buffer failed, will not add to frame and drop */
679 b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
680 next[0] = ESP_DECRYPT_NEXT_DROP;
685 esp_insert_esn (vm, sa0, pd2, &integ_len, &tag, &len, b, payload);
688 key_index = sa0->crypto_key_index;
696 if (ipsec_sa_is_set_IS_AEAD (sa0))
702 * construct the AAD and the nonce (Salt || IV) in a scratch
703 * space in front of the IP header.
705 scratch = payload - esp_sz;
706 esp0 = (esp_header_t *) (scratch);
708 scratch -= (sizeof (esp_aead_t) + pd->hdr_sz);
711 esp_aad_fill (aad, esp0, sa0);
714 * we don't need to refer to the ESP header anymore so we
715 * can overwrite it with the salt and use the IV where it is
716 * to form the nonce = (Salt + IV)
718 iv -= sizeof (sa0->salt);
719 clib_memcpy_fast (iv, &sa0->salt, sizeof (sa0->salt));
724 crypto_start_offset = (payload += iv_sz) - b->data;
725 crypto_len = len - iv_sz;
727 if (pd->is_chain && (pd2->lb != b))
729 /* buffer is chained */
730 flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
732 crypto_len = esp_decrypt_chain_crypto (vm, ptd, pd2, sa0, b, icv_sz,
734 len - pd->iv_sz + pd->icv_sz,
740 pd->protect_index = current_protect_index;
741 next[0] = ESP_DECRYPT_NEXT_PENDING;
743 /* for AEAD integ_len - crypto_len will be negative, it is ok since it
744 * is ignored by the engine. */
745 return vnet_crypto_async_add_to_frame (vm, f, key_index, crypto_len,
746 integ_len - crypto_len,
749 bi, async_next, iv, tag, aad, flags);
752 static_always_inline void
753 esp_decrypt_post_crypto (vlib_main_t * vm, vlib_node_runtime_t * node,
754 esp_decrypt_packet_data_t * pd,
755 esp_decrypt_packet_data2_t * pd2, vlib_buffer_t * b,
756 u16 * next, int is_ip6, int is_tun, int is_async)
758 ipsec_main_t *im = &ipsec_main;
759 ipsec_sa_t *sa0 = vec_elt_at_index (im->sad, pd->sa_index);
760 vlib_buffer_t *lb = b;
761 const u8 esp_sz = sizeof (esp_header_t);
762 const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
763 u8 pad_length = 0, next_header = 0;
767 * redo the anti-reply check
768 * in this frame say we have sequence numbers, s, s+1, s+1, s+1
769 * and s and s+1 are in the window. When we did the anti-replay
770 * check above we did so against the state of the window (W),
771 * after packet s-1. So each of the packets in the sequence will be
773 * This time s will be cheked against Ws-1, s+1 chceked against Ws
774 * (i.e. the window state is updated/advnaced)
775 * so this time the successive s+! packet will be dropped.
776 * This is a consequence of batching the decrypts. If the
777 * check-dcrypt-advance process was done for each packet it would
778 * be fine. But we batch the decrypts because it's much more efficient
779 * to do so in SW and if we offload to HW and the process is async.
781 * You're probably thinking, but this means an attacker can send the
782 * above sequence and cause VPP to perform decrpyts that will fail,
783 * and that's true. But if the attacker can determine s (a valid
784 * sequence number in the window) which is non-trivial, it can generate
785 * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
786 * implementation, sequential or batching, from decrypting these.
788 if (ipsec_sa_anti_replay_check (sa0, pd->seq))
790 b->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
791 next[0] = ESP_DECRYPT_NEXT_DROP;
795 ipsec_sa_anti_replay_advance (sa0, pd->seq);
800 icv_sz = pd2->icv_removed ? 0 : pd->icv_sz;
801 if (pd2->free_buffer_index)
803 vlib_buffer_free_one (vm, pd2->free_buffer_index);
806 if (lb->current_length < sizeof (esp_footer_t) + icv_sz)
808 /* esp footer is either splitted in two buffers or in the before
811 vlib_buffer_t *before_last = b, *bp = b;
812 while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
815 bp = vlib_get_buffer (vm, bp->next_buffer);
817 u8 *bt = vlib_buffer_get_tail (before_last);
819 if (lb->current_length == icv_sz)
821 esp_footer_t *f = (esp_footer_t *) (bt - sizeof (*f));
822 pad_length = f->pad_length;
823 next_header = f->next_header;
827 pad_length = (bt - 1)[0];
828 next_header = ((u8 *) vlib_buffer_get_current (lb))[0];
834 (esp_footer_t *) (lb->data + lb->current_data +
835 lb->current_length - sizeof (esp_footer_t) -
837 pad_length = f->pad_length;
838 next_header = f->next_header;
845 (esp_footer_t *) (lb->data + lb->current_data + lb->current_length -
846 sizeof (esp_footer_t) - icv_sz);
847 pad_length = f->pad_length;
848 next_header = f->next_header;
851 u16 adv = pd->iv_sz + esp_sz;
852 u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz;
853 u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz;
854 b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
856 if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
858 u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
859 sizeof (udp_header_t) : 0;
860 u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
861 u8 *old_ip = b->data + pd->current_data - ip_hdr_sz - udp_sz;
862 u8 *ip = old_ip + adv + udp_sz;
864 if (is_ip6 && ip_hdr_sz > 64)
865 memmove (ip, old_ip, ip_hdr_sz);
867 clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
869 b->current_data = pd->current_data + adv - ip_hdr_sz;
870 b->current_length += ip_hdr_sz - adv;
871 esp_remove_tail (vm, b, lb, tail);
875 ip6_header_t *ip6 = (ip6_header_t *) ip;
876 u16 len = clib_net_to_host_u16 (ip6->payload_length);
877 len -= adv + tail_orig;
878 ip6->payload_length = clib_host_to_net_u16 (len);
879 ip6->protocol = next_header;
880 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
884 ip4_header_t *ip4 = (ip4_header_t *) ip;
885 ip_csum_t sum = ip4->checksum;
886 u16 len = clib_net_to_host_u16 (ip4->length);
887 len = clib_host_to_net_u16 (len - adv - tail_orig - udp_sz);
888 sum = ip_csum_update (sum, ip4->protocol, next_header,
889 ip4_header_t, protocol);
890 sum = ip_csum_update (sum, ip4->length, len, ip4_header_t, length);
891 ip4->checksum = ip_csum_fold (sum);
892 ip4->protocol = next_header;
894 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
899 if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
901 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
902 b->current_data = pd->current_data + adv;
903 b->current_length = pd->current_length - adv;
904 esp_remove_tail (vm, b, lb, tail);
906 else if (next_header == IP_PROTOCOL_IPV6)
908 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
909 b->current_data = pd->current_data + adv;
910 b->current_length = pd->current_length - adv;
911 esp_remove_tail (vm, b, lb, tail);
915 if (is_tun && next_header == IP_PROTOCOL_GRE)
919 b->current_data = pd->current_data + adv;
920 b->current_length = pd->current_length - adv - tail;
922 gre = vlib_buffer_get_current (b);
924 vlib_buffer_advance (b, sizeof (*gre));
926 switch (clib_net_to_host_u16 (gre->protocol))
928 case GRE_PROTOCOL_teb:
929 vnet_update_l2_len (b);
930 next[0] = ESP_DECRYPT_NEXT_L2_INPUT;
932 case GRE_PROTOCOL_ip4:
933 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
935 case GRE_PROTOCOL_ip6:
936 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
939 b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
940 next[0] = ESP_DECRYPT_NEXT_DROP;
946 next[0] = ESP_DECRYPT_NEXT_DROP;
947 b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
953 if (ipsec_sa_is_set_IS_PROTECT (sa0))
956 * There are two encap possibilities
957 * 1) the tunnel and ths SA are prodiving encap, i.e. it's
958 * MAC | SA-IP | TUN-IP | ESP | PAYLOAD
959 * implying the SA is in tunnel mode (on a tunnel interface)
960 * 2) only the tunnel provides encap
961 * MAC | TUN-IP | ESP | PAYLOAD
962 * implying the SA is in transport mode.
964 * For 2) we need only strip the tunnel encap and we're good.
965 * since the tunnel and crypto ecnap (int the tun=protect
966 * object) are the same and we verified above that these match
967 * for 1) we need to strip the SA-IP outer headers, to
968 * reveal the tunnel IP and then check that this matches
969 * the configured tunnel.
971 const ipsec_tun_protect_t *itp;
974 itp = ipsec_tun_protect_get (pd->protect_index);
977 ipsec_tun_protect_get (vnet_buffer (b)->
978 ipsec.protect_index);
980 if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
982 const ip4_header_t *ip4;
984 ip4 = vlib_buffer_get_current (b);
986 if (!ip46_address_is_equal_v4 (&itp->itp_tun.src,
987 &ip4->dst_address) ||
988 !ip46_address_is_equal_v4 (&itp->itp_tun.dst,
991 next[0] = ESP_DECRYPT_NEXT_DROP;
992 b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
995 else if (next_header == IP_PROTOCOL_IPV6)
997 const ip6_header_t *ip6;
999 ip6 = vlib_buffer_get_current (b);
1001 if (!ip46_address_is_equal_v6 (&itp->itp_tun.src,
1002 &ip6->dst_address) ||
1003 !ip46_address_is_equal_v6 (&itp->itp_tun.dst,
1006 next[0] = ESP_DECRYPT_NEXT_DROP;
1007 b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
1015 /* when submitting a frame is failed, drop all buffers in the frame */
1016 static_always_inline void
1017 esp_async_recycle_failed_submit (vnet_crypto_async_frame_t * f,
1018 vlib_buffer_t ** b, u16 * next)
1020 u32 n_drop = f->n_elts;
1023 (b - n_drop)[0]->error = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
1024 (next - n_drop)[0] = ESP_DECRYPT_NEXT_DROP;
1026 vnet_crypto_async_reset_frame (f);
1030 esp_decrypt_inline (vlib_main_t * vm,
1031 vlib_node_runtime_t * node, vlib_frame_t * from_frame,
1032 int is_ip6, int is_tun, u16 async_next)
1034 ipsec_main_t *im = &ipsec_main;
1035 u32 thread_index = vm->thread_index;
1037 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
1038 u32 *from = vlib_frame_vector_args (from_frame);
1039 u32 n_left = from_frame->n_vectors;
1040 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1041 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1042 esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
1043 esp_decrypt_packet_data2_t pkt_data2[VLIB_FRAME_SIZE], *pd2 = pkt_data2;
1044 esp_decrypt_packet_data_t cpd = { };
1045 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
1046 const u8 esp_sz = sizeof (esp_header_t);
1047 ipsec_sa_t *sa0 = 0;
1048 vnet_crypto_op_t _op, *op = &_op;
1049 vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
1050 vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
1051 vnet_crypto_async_frame_t *async_frame = 0;
1052 int is_async = im->async_mode;
1053 vnet_crypto_async_op_id_t last_async_op = ~0;
1055 vlib_get_buffers (vm, from, b, n_left);
1058 vec_reset_length (ptd->crypto_ops);
1059 vec_reset_length (ptd->integ_ops);
1060 vec_reset_length (ptd->chained_crypto_ops);
1061 vec_reset_length (ptd->chained_integ_ops);
1063 vec_reset_length (ptd->chunks);
1064 clib_memset_u16 (nexts, -1, n_left);
1073 vlib_prefetch_buffer_header (b[2], LOAD);
1074 p = vlib_buffer_get_current (b[1]);
1075 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
1076 p -= CLIB_CACHE_LINE_BYTES;
1077 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
1080 u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
1083 b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
1084 next[0] = ESP_DECRYPT_NEXT_DROP;
1088 if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
1090 if (current_sa_pkts)
1091 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1095 current_sa_bytes = current_sa_pkts = 0;
1097 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1098 sa0 = pool_elt_at_index (im->sad, current_sa_index);
1099 cpd.icv_sz = sa0->integ_icv_size;
1100 cpd.iv_sz = sa0->crypto_iv_size;
1101 cpd.flags = sa0->flags;
1102 cpd.sa_index = current_sa_index;
1104 /* submit frame when op_id is different then the old one */
1105 if (is_async && last_async_op != sa0->crypto_async_dec_op_id)
1107 if (async_frame && async_frame->n_elts)
1109 if (vnet_crypto_async_submit_open_frame (vm, async_frame))
1110 esp_async_recycle_failed_submit (async_frame, b, next);
1113 vnet_crypto_async_get_frame (vm, sa0->crypto_async_dec_op_id);
1114 last_async_op = sa0->crypto_async_dec_op_id;
1118 if (PREDICT_FALSE (~0 == sa0->decrypt_thread_index))
1120 /* this is the first packet to use this SA, claim the SA
1121 * for this thread. this could happen simultaneously on
1123 clib_atomic_cmp_and_swap (&sa0->decrypt_thread_index, ~0,
1124 ipsec_sa_assign_thread (thread_index));
1127 if (PREDICT_TRUE (thread_index != sa0->decrypt_thread_index))
1129 next[0] = ESP_DECRYPT_NEXT_HANDOFF;
1133 /* store packet data for next round for easier prefetch */
1134 pd->sa_data = cpd.sa_data;
1135 pd->current_data = b[0]->current_data;
1136 pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
1137 payload = b[0]->data + pd->current_data;
1138 pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
1141 pd2->free_buffer_index = 0;
1142 pd2->icv_removed = 0;
1147 /* find last buffer in the chain */
1148 while (pd2->lb->flags & VLIB_BUFFER_NEXT_PRESENT)
1149 pd2->lb = vlib_get_buffer (vm, pd2->lb->next_buffer);
1151 crypto_ops = &ptd->chained_crypto_ops;
1152 integ_ops = &ptd->chained_integ_ops;
1155 pd->current_length = b[0]->current_length;
1157 /* anti-reply check */
1158 if (ipsec_sa_anti_replay_check (sa0, pd->seq))
1160 b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
1161 next[0] = ESP_DECRYPT_NEXT_DROP;
1165 if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
1167 b[0]->error = node->errors[ESP_DECRYPT_ERROR_RUNT];
1168 next[0] = ESP_DECRYPT_NEXT_DROP;
1172 len = pd->current_length - cpd.icv_sz;
1173 current_sa_pkts += 1;
1174 current_sa_bytes += vlib_buffer_length_in_chain (vm, b[0]);
1178 int ret = esp_decrypt_prepare_async_frame (vm, node, ptd,
1185 b[0], next, async_next);
1186 if (PREDICT_FALSE (ret < 0))
1188 esp_async_recycle_failed_submit (async_frame, b, next);
1193 esp_decrypt_prepare_sync_op (vm, node, ptd, &crypto_ops, &integ_ops,
1194 op, sa0, payload, len, cpd.icv_sz,
1195 cpd.iv_sz, pd, pd2, b[0], next,
1206 if (PREDICT_TRUE (~0 != current_sa_index))
1207 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1208 current_sa_index, current_sa_pkts,
1213 if (async_frame && async_frame->n_elts)
1215 if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
1216 esp_async_recycle_failed_submit (async_frame, b, next);
1219 /* no post process in async */
1220 n_left = from_frame->n_vectors;
1221 vlib_node_increment_counter (vm, node->node_index,
1222 ESP_DECRYPT_ERROR_RX_PKTS, n_left);
1223 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
1229 esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts,
1230 ESP_DECRYPT_ERROR_INTEG_ERROR);
1231 esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
1232 ptd->chunks, ESP_DECRYPT_ERROR_INTEG_ERROR);
1234 esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts,
1235 ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1236 esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
1238 ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1241 /* Post decryption ronud - adjust packet data start and length and next
1244 n_left = from_frame->n_vectors;
1254 void *data = b[1]->data + pd[1].current_data;
1256 /* buffer metadata */
1257 vlib_prefetch_buffer_header (b[1], LOAD);
1260 CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
1261 CLIB_CACHE_LINE_BYTES, LOAD);
1263 /* packet headers */
1264 CLIB_PREFETCH (data - CLIB_CACHE_LINE_BYTES,
1265 CLIB_CACHE_LINE_BYTES * 2, LOAD);
1268 /* save the sa_index as GRE_teb post_crypto changes L2 opaque */
1269 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1270 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1272 if (next[0] >= ESP_DECRYPT_N_NEXT)
1273 esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6,
1277 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1279 esp_decrypt_trace_t *tr;
1280 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1281 sa0 = pool_elt_at_index (im->sad, current_sa_index);
1282 tr->crypto_alg = sa0->crypto_alg;
1283 tr->integ_alg = sa0->integ_alg;
1285 tr->sa_seq = sa0->last_seq;
1286 tr->sa_seq_hi = sa0->seq_hi;
1297 n_left = from_frame->n_vectors;
1298 vlib_node_increment_counter (vm, node->node_index,
1299 ESP_DECRYPT_ERROR_RX_PKTS, n_left);
1301 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
1307 esp_decrypt_post_inline (vlib_main_t * vm,
1308 vlib_node_runtime_t * node,
1309 vlib_frame_t * from_frame, int is_ip6, int is_tun)
1311 ipsec_main_t *im = &ipsec_main;
1312 u32 *from = vlib_frame_vector_args (from_frame);
1313 u32 n_left = from_frame->n_vectors;
1314 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1315 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1316 vlib_get_buffers (vm, from, b, n_left);
1320 esp_decrypt_packet_data_t *pd = &(esp_post_data (b[0]))->decrypt_data;
1324 vlib_prefetch_buffer_header (b[2], LOAD);
1325 vlib_prefetch_buffer_header (b[1], LOAD);
1329 esp_decrypt_post_crypto (vm, node, pd, 0, b[0], next, is_ip6, is_tun,
1333 esp_decrypt_packet_data2_t *pd2 = esp_post_data2 (b[0]);
1334 esp_decrypt_post_crypto (vm, node, pd, pd2, b[0], next, is_ip6,
1339 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1341 ipsec_sa_t *sa0 = pool_elt_at_index (im->sad, pd->sa_index);
1342 esp_decrypt_trace_t *tr;
1343 esp_decrypt_packet_data_t *async_pd =
1344 &(esp_post_data (b[0]))->decrypt_data;
1345 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1346 sa0 = pool_elt_at_index (im->sad, async_pd->sa_index);
1348 tr->crypto_alg = sa0->crypto_alg;
1349 tr->integ_alg = sa0->integ_alg;
1351 tr->sa_seq = sa0->last_seq;
1352 tr->sa_seq_hi = sa0->seq_hi;
1360 n_left = from_frame->n_vectors;
1361 vlib_node_increment_counter (vm, node->node_index,
1362 ESP_DECRYPT_ERROR_RX_POST_PKTS, n_left);
1364 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
1369 VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
1370 vlib_node_runtime_t * node,
1371 vlib_frame_t * from_frame)
1373 return esp_decrypt_inline (vm, node, from_frame, 0, 0,
1374 esp_decrypt_async_next.esp4_post_next);
1377 VLIB_NODE_FN (esp4_decrypt_post_node) (vlib_main_t * vm,
1378 vlib_node_runtime_t * node,
1379 vlib_frame_t * from_frame)
1381 return esp_decrypt_post_inline (vm, node, from_frame, 0, 0);
1384 VLIB_NODE_FN (esp4_decrypt_tun_node) (vlib_main_t * vm,
1385 vlib_node_runtime_t * node,
1386 vlib_frame_t * from_frame)
1388 return esp_decrypt_inline (vm, node, from_frame, 0, 1,
1389 esp_decrypt_async_next.esp4_tun_post_next);
1392 VLIB_NODE_FN (esp4_decrypt_tun_post_node) (vlib_main_t * vm,
1393 vlib_node_runtime_t * node,
1394 vlib_frame_t * from_frame)
1396 return esp_decrypt_post_inline (vm, node, from_frame, 0, 1);
1399 VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
1400 vlib_node_runtime_t * node,
1401 vlib_frame_t * from_frame)
1403 return esp_decrypt_inline (vm, node, from_frame, 1, 0,
1404 esp_decrypt_async_next.esp6_post_next);
1407 VLIB_NODE_FN (esp6_decrypt_post_node) (vlib_main_t * vm,
1408 vlib_node_runtime_t * node,
1409 vlib_frame_t * from_frame)
1411 return esp_decrypt_post_inline (vm, node, from_frame, 1, 0);
1414 VLIB_NODE_FN (esp6_decrypt_tun_node) (vlib_main_t * vm,
1415 vlib_node_runtime_t * node,
1416 vlib_frame_t * from_frame)
1418 return esp_decrypt_inline (vm, node, from_frame, 1, 1,
1419 esp_decrypt_async_next.esp6_tun_post_next);
1422 VLIB_NODE_FN (esp6_decrypt_tun_post_node) (vlib_main_t * vm,
1423 vlib_node_runtime_t * node,
1424 vlib_frame_t * from_frame)
1426 return esp_decrypt_post_inline (vm, node, from_frame, 1, 1);
1429 VLIB_NODE_FN (esp_decrypt_pending_node) (vlib_main_t * vm,
1430 vlib_node_runtime_t * node,
1431 vlib_frame_t * from_frame)
1433 return from_frame->n_vectors;
1437 VLIB_REGISTER_NODE (esp_decrypt_pending_node) = {
1438 .name = "esp-decrypt-pending",
1439 .vector_size = sizeof (u32),
1440 .type = VLIB_NODE_TYPE_INTERNAL,
1447 VLIB_REGISTER_NODE (esp4_decrypt_node) = {
1448 .name = "esp4-decrypt",
1449 .vector_size = sizeof (u32),
1450 .format_trace = format_esp_decrypt_trace,
1451 .type = VLIB_NODE_TYPE_INTERNAL,
1453 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1454 .error_strings = esp_decrypt_error_strings,
1456 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1458 [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1459 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1460 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1461 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1462 [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff",
1463 [ESP_DECRYPT_NEXT_PENDING] = "esp-decrypt-pending"
1467 VLIB_REGISTER_NODE (esp4_decrypt_post_node) = {
1468 .name = "esp4-decrypt-post",
1469 .vector_size = sizeof (u32),
1470 .format_trace = format_esp_decrypt_trace,
1471 .type = VLIB_NODE_TYPE_INTERNAL,
1473 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1474 .error_strings = esp_decrypt_error_strings,
1476 .sibling_of = "esp4-decrypt",
1479 VLIB_REGISTER_NODE (esp6_decrypt_node) = {
1480 .name = "esp6-decrypt",
1481 .vector_size = sizeof (u32),
1482 .format_trace = format_esp_decrypt_trace,
1483 .type = VLIB_NODE_TYPE_INTERNAL,
1485 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1486 .error_strings = esp_decrypt_error_strings,
1488 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1490 [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1491 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1492 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1493 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1494 [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-handoff",
1495 [ESP_DECRYPT_NEXT_PENDING] = "esp-decrypt-pending"
1499 VLIB_REGISTER_NODE (esp6_decrypt_post_node) = {
1500 .name = "esp6-decrypt-post",
1501 .vector_size = sizeof (u32),
1502 .format_trace = format_esp_decrypt_trace,
1503 .type = VLIB_NODE_TYPE_INTERNAL,
1505 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1506 .error_strings = esp_decrypt_error_strings,
1508 .sibling_of = "esp6-decrypt",
1511 VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = {
1512 .name = "esp4-decrypt-tun",
1513 .vector_size = sizeof (u32),
1514 .format_trace = format_esp_decrypt_trace,
1515 .type = VLIB_NODE_TYPE_INTERNAL,
1516 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1517 .error_strings = esp_decrypt_error_strings,
1518 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1520 [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1521 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1522 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1523 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1524 [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-tun-handoff",
1525 [ESP_DECRYPT_NEXT_PENDING] = "esp-decrypt-pending"
1529 VLIB_REGISTER_NODE (esp4_decrypt_tun_post_node) = {
1530 .name = "esp4-decrypt-tun-post",
1531 .vector_size = sizeof (u32),
1532 .format_trace = format_esp_decrypt_trace,
1533 .type = VLIB_NODE_TYPE_INTERNAL,
1535 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1536 .error_strings = esp_decrypt_error_strings,
1538 .sibling_of = "esp4-decrypt-tun",
1541 VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
1542 .name = "esp6-decrypt-tun",
1543 .vector_size = sizeof (u32),
1544 .format_trace = format_esp_decrypt_trace,
1545 .type = VLIB_NODE_TYPE_INTERNAL,
1546 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1547 .error_strings = esp_decrypt_error_strings,
1548 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1550 [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1551 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1552 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1553 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1554 [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-tun-handoff",
1555 [ESP_DECRYPT_NEXT_PENDING] = "esp-decrypt-pending"
1559 VLIB_REGISTER_NODE (esp6_decrypt_tun_post_node) = {
1560 .name = "esp6-decrypt-tun-post",
1561 .vector_size = sizeof (u32),
1562 .format_trace = format_esp_decrypt_trace,
1563 .type = VLIB_NODE_TYPE_INTERNAL,
1565 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
1566 .error_strings = esp_decrypt_error_strings,
1568 .sibling_of = "esp6-decrypt-tun",
1573 * fd.io coding-style-patch-verification: ON
1576 * eval: (c-set-style "gnu")