2 * esp_decrypt.c : IPSec ESP decrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 #include <vnet/vnet.h>
18 #include <vnet/api_errno.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/l2/l2_input.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ipsec_io.h>
25 #include <vnet/ipsec/ipsec_tun.h>
26 #include <vnet/ipsec/ipsec.api_enum.h>
28 #include <vnet/gre/packet.h>
30 #define foreach_esp_decrypt_next \
31 _ (DROP, "error-drop") \
32 _ (IP4_INPUT, "ip4-input-no-checksum") \
33 _ (IP6_INPUT, "ip6-input") \
34 _ (L2_INPUT, "l2-input") \
35 _ (MPLS_INPUT, "mpls-input") \
36 _ (HANDOFF, "handoff")
38 #define _(v, s) ESP_DECRYPT_NEXT_##v,
41 foreach_esp_decrypt_next
46 #define foreach_esp_decrypt_post_next \
47 _ (DROP, "error-drop") \
48 _ (IP4_INPUT, "ip4-input-no-checksum") \
49 _ (IP6_INPUT, "ip6-input") \
50 _ (MPLS_INPUT, "mpls-input") \
51 _ (L2_INPUT, "l2-input")
53 #define _(v, s) ESP_DECRYPT_POST_NEXT_##v,
56 foreach_esp_decrypt_post_next
58 ESP_DECRYPT_POST_N_NEXT,
59 } esp_decrypt_post_next_t;
67 ipsec_crypto_alg_t crypto_alg;
68 ipsec_integ_alg_t integ_alg;
69 } esp_decrypt_trace_t;
71 typedef vl_counter_esp_decrypt_enum_t esp_decrypt_error_t;
73 /* The number of byres in the hisequence number */
74 #define N_HI_ESN_BYTES 4
76 /* packet trace format function */
78 format_esp_decrypt_trace (u8 * s, va_list * args)
80 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
81 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
82 esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
85 "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u "
87 format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg,
88 t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi, t->pkt_seq_hi);
92 #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
94 static_always_inline void
95 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
96 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
99 vnet_crypto_op_t *op = ops;
100 u32 n_fail, n_ops = vec_len (ops);
105 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
109 ASSERT (op - ops < n_ops);
110 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
112 u32 err, bi = op->user_data;
113 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
116 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
117 b[bi]->error = node->errors[err];
118 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
125 static_always_inline void
126 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
127 vnet_crypto_op_t * ops, vlib_buffer_t * b[],
128 u16 * nexts, vnet_crypto_op_chunk_t * chunks, int e)
131 vnet_crypto_op_t *op = ops;
132 u32 n_fail, n_ops = vec_len (ops);
134 if (PREDICT_TRUE (n_ops == 0))
137 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
141 ASSERT (op - ops < n_ops);
142 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
144 u32 err, bi = op->user_data;
145 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
148 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
149 b[bi]->error = node->errors[err];
150 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
158 esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last,
161 vlib_buffer_t *before_last = b;
163 if (last->current_length > tail)
165 last->current_length -= tail;
168 ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
170 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
173 b = vlib_get_buffer (vm, b->next_buffer);
175 before_last->current_length -= tail - last->current_length;
176 vlib_buffer_free_one (vm, before_last->next_buffer);
177 before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
180 /* ICV is splitted in last two buffers so move it to the last buffer and
181 return pointer to it */
182 static_always_inline u8 *
183 esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first,
184 esp_decrypt_packet_data_t * pd,
185 esp_decrypt_packet_data2_t * pd2, u16 icv_sz, u16 * dif)
187 vlib_buffer_t *before_last, *bp;
188 u16 last_sz = pd2->lb->current_length;
189 u16 first_sz = icv_sz - last_sz;
191 bp = before_last = first;
192 while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
195 bp = vlib_get_buffer (vm, bp->next_buffer);
198 u8 *lb_curr = vlib_buffer_get_current (pd2->lb);
199 memmove (lb_curr + first_sz, lb_curr, last_sz);
200 clib_memcpy_fast (lb_curr, vlib_buffer_get_tail (before_last) - first_sz,
202 before_last->current_length -= first_sz;
203 if (before_last == first)
204 pd->current_length -= first_sz;
205 clib_memset (vlib_buffer_get_tail (before_last), 0, first_sz);
208 pd2->lb = before_last;
209 pd2->icv_removed = 1;
210 pd2->free_buffer_index = before_last->next_buffer;
211 before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
215 static_always_inline u16
216 esp_insert_esn (vlib_main_t *vm, ipsec_sa_t *sa, esp_decrypt_packet_data_t *pd,
217 esp_decrypt_packet_data2_t *pd2, u32 *data_len, u8 **digest,
218 u16 *len, vlib_buffer_t *b, u8 *payload)
220 if (!ipsec_sa_is_set_USE_ESN (sa))
222 /* shift ICV by 4 bytes to insert ESN */
223 u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
224 u8 tmp[ESP_MAX_ICV_SIZE];
226 if (pd2->icv_removed)
228 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
229 if (space_left >= N_HI_ESN_BYTES)
231 clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi,
233 *data_len += N_HI_ESN_BYTES;
236 return N_HI_ESN_BYTES;
238 len[0] = b->current_length;
242 clib_memcpy_fast (tmp, payload + len[0], ESP_MAX_ICV_SIZE);
243 clib_memcpy_fast (payload + len[0], &seq_hi, N_HI_ESN_BYTES);
244 clib_memcpy_fast (payload + len[0] + N_HI_ESN_BYTES, tmp,
246 *data_len += N_HI_ESN_BYTES;
247 *digest += N_HI_ESN_BYTES;
249 return N_HI_ESN_BYTES;
252 static_always_inline u8 *
253 esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first,
254 esp_decrypt_packet_data_t * pd,
255 esp_decrypt_packet_data2_t * pd2, u16 icv_sz,
256 ipsec_sa_t * sa, u8 * extra_esn, u32 * len)
259 u8 *digest = esp_move_icv (vm, first, pd, pd2, icv_sz, &dif);
263 if (ipsec_sa_is_set_USE_ESN (sa))
265 u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
266 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
268 if (space_left >= N_HI_ESN_BYTES)
270 clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi,
272 *len += N_HI_ESN_BYTES;
276 /* no space for ESN at the tail, use the next buffer
278 ASSERT (pd2->icv_removed);
279 vlib_buffer_t *tmp = vlib_get_buffer (vm, pd2->free_buffer_index);
280 clib_memcpy_fast (vlib_buffer_get_current (tmp) - N_HI_ESN_BYTES,
281 &seq_hi, N_HI_ESN_BYTES);
288 static_always_inline int
289 esp_decrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
290 const esp_decrypt_packet_data_t *pd,
291 esp_decrypt_packet_data2_t *pd2, ipsec_sa_t *sa0,
292 vlib_buffer_t *b, u8 icv_sz, u8 *start_src,
293 u32 start_len, u8 **digest, u16 *n_ch,
294 u32 *integ_total_len)
296 vnet_crypto_op_chunk_t *ch;
297 vlib_buffer_t *cb = vlib_get_buffer (vm, b->next_buffer);
300 vec_add2 (ptd->chunks, ch, 1);
301 total_len = ch->len = start_len;
306 vec_add2 (ptd->chunks, ch, 1);
308 ch->src = vlib_buffer_get_current (cb);
311 if (pd2->icv_removed)
312 ch->len = cb->current_length;
314 ch->len = cb->current_length - icv_sz;
315 if (ipsec_sa_is_set_USE_ESN (sa0))
317 u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
318 u8 tmp[ESP_MAX_ICV_SIZE];
320 vlib_buffer_t *tmp_b;
321 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
322 if (space_left < N_HI_ESN_BYTES)
324 if (pd2->icv_removed)
326 /* use pre-data area from the last bufer
327 that was removed from the chain */
328 tmp_b = vlib_get_buffer (vm, pd2->free_buffer_index);
329 esn = tmp_b->data - N_HI_ESN_BYTES;
333 /* no space, need to allocate new buffer */
335 if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
337 tmp_b = vlib_get_buffer (vm, tmp_bi);
339 pd2->free_buffer_index = tmp_bi;
341 clib_memcpy_fast (esn, &seq_hi, N_HI_ESN_BYTES);
343 vec_add2 (ptd->chunks, ch, 1);
346 ch->len = N_HI_ESN_BYTES;
350 if (pd2->icv_removed)
352 clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb),
353 &seq_hi, N_HI_ESN_BYTES);
357 clib_memcpy_fast (tmp, *digest, ESP_MAX_ICV_SIZE);
358 clib_memcpy_fast (*digest, &seq_hi, N_HI_ESN_BYTES);
359 clib_memcpy_fast (*digest + N_HI_ESN_BYTES, tmp,
361 *digest += N_HI_ESN_BYTES;
363 ch->len += N_HI_ESN_BYTES;
366 total_len += ch->len;
370 total_len += ch->len = cb->current_length;
372 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
375 cb = vlib_get_buffer (vm, cb->next_buffer);
381 *integ_total_len = total_len;
386 static_always_inline u32
387 esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
388 esp_decrypt_packet_data_t * pd,
389 esp_decrypt_packet_data2_t * pd2,
390 ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz,
391 u8 * start, u32 start_len, u8 ** tag, u16 * n_ch)
393 vnet_crypto_op_chunk_t *ch;
394 vlib_buffer_t *cb = b;
397 vec_add2 (ptd->chunks, ch, 1);
398 total_len = ch->len = start_len;
399 ch->src = ch->dst = start;
400 cb = vlib_get_buffer (vm, cb->next_buffer);
405 vec_add2 (ptd->chunks, ch, 1);
407 ch->src = ch->dst = vlib_buffer_get_current (cb);
410 if (ipsec_sa_is_set_IS_AEAD (sa0))
412 if (pd2->lb->current_length < icv_sz)
415 *tag = esp_move_icv (vm, b, pd, pd2, icv_sz, &dif);
417 /* this chunk does not contain crypto data */
419 /* and fix previous chunk's length as it might have
421 ASSERT (n_chunks > 0);
429 total_len = total_len + pd2->lb->current_length -
431 ch[-1].len = pd2->lb->current_length;
436 *tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
439 if (pd2->icv_removed)
440 total_len += ch->len = cb->current_length;
442 total_len += ch->len = cb->current_length - icv_sz;
445 total_len += ch->len = cb->current_length;
447 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
450 cb = vlib_get_buffer (vm, cb->next_buffer);
459 static_always_inline void
460 esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node,
461 ipsec_per_thread_data_t * ptd,
462 vnet_crypto_op_t *** crypto_ops,
463 vnet_crypto_op_t *** integ_ops,
464 vnet_crypto_op_t * op,
465 ipsec_sa_t * sa0, u8 * payload,
466 u16 len, u8 icv_sz, u8 iv_sz,
467 esp_decrypt_packet_data_t * pd,
468 esp_decrypt_packet_data2_t * pd2,
469 vlib_buffer_t * b, u16 * next, u32 index)
471 const u8 esp_sz = sizeof (esp_header_t);
473 if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
475 vnet_crypto_op_init (op, sa0->integ_op_id);
476 op->key_index = sa0->integ_key_index;
478 op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
479 op->user_data = index;
480 op->digest = payload + len;
481 op->digest_len = icv_sz;
486 /* buffer is chained */
487 op->len = pd->current_length;
489 /* special case when ICV is splitted and needs to be reassembled
490 * first -> move it to the last buffer. Also take into account
491 * that ESN needs to be added after encrypted data and may or
492 * may not fit in the tail.*/
493 if (pd2->lb->current_length < icv_sz)
497 esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
498 &extra_esn, &op->len);
502 /* esn is in the last buffer, that was unlinked from
504 op->len = b->current_length;
510 /* we now have a single buffer of crypto data, adjust
511 * the length (second buffer contains only ICV) */
512 *integ_ops = &ptd->integ_ops;
513 *crypto_ops = &ptd->crypto_ops;
514 len = b->current_length;
520 op->digest = vlib_buffer_get_tail (pd2->lb) - icv_sz;
522 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
523 op->chunk_index = vec_len (ptd->chunks);
524 if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
525 payload, pd->current_length,
526 &op->digest, &op->n_chunks, 0) < 0)
528 b->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS];
529 next[0] = ESP_DECRYPT_NEXT_DROP;
534 esp_insert_esn (vm, sa0, pd, pd2, &op->len, &op->digest, &len, b,
537 vec_add_aligned (*(integ_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
543 if (sa0->crypto_dec_op_id != VNET_CRYPTO_OP_NONE)
545 vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
546 op->key_index = sa0->crypto_key_index;
549 if (ipsec_sa_is_set_IS_CTR (sa0))
551 /* construct nonce in a scratch space in front of the IP header */
552 esp_ctr_nonce_t *nonce =
553 (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz -
555 if (ipsec_sa_is_set_IS_AEAD (sa0))
557 /* constuct aad in a scratch space in front of the nonce */
558 esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
559 op->aad = (u8 *) nonce - sizeof (esp_aead_t);
560 op->aad_len = esp_aad_fill (op->aad, esp0, sa0, pd->seq_hi);
561 op->tag = payload + len;
566 nonce->ctr = clib_host_to_net_u32 (1);
568 nonce->salt = sa0->salt;
569 ASSERT (sizeof (u64) == iv_sz);
570 nonce->iv = *(u64 *) op->iv;
571 op->iv = (u8 *) nonce;
573 op->src = op->dst = payload += iv_sz;
574 op->len = len - iv_sz;
575 op->user_data = index;
577 if (pd->is_chain && (pd2->lb != b))
579 /* buffer is chained */
580 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
581 op->chunk_index = vec_len (ptd->chunks);
582 esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
583 payload, len - pd->iv_sz + pd->icv_sz,
584 &op->tag, &op->n_chunks);
587 vec_add_aligned (*(crypto_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
591 static_always_inline esp_decrypt_error_t
592 esp_decrypt_prepare_async_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
593 ipsec_per_thread_data_t *ptd,
594 vnet_crypto_async_frame_t *f, ipsec_sa_t *sa0,
595 u8 *payload, u16 len, u8 icv_sz, u8 iv_sz,
596 esp_decrypt_packet_data_t *pd,
597 esp_decrypt_packet_data2_t *pd2, u32 bi,
598 vlib_buffer_t *b, u16 *next, u16 async_next)
600 const u8 esp_sz = sizeof (esp_header_t);
601 esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data;
602 esp_decrypt_packet_data2_t *async_pd2 = esp_post_data2 (b);
603 u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0;
605 u32 crypto_len, integ_len = 0;
606 i16 crypto_start_offset, integ_start_offset = 0;
609 if (!ipsec_sa_is_set_IS_AEAD (sa0))
612 key_index = sa0->linked_key_index;
613 integ_start_offset = payload - b->data;
615 if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
616 flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
620 /* buffer is chained */
621 integ_len = pd->current_length;
623 /* special case when ICV is splitted and needs to be reassembled
624 * first -> move it to the last buffer. Also take into account
625 * that ESN needs to be added after encrypted data and may or
626 * may not fit in the tail.*/
627 if (pd2->lb->current_length < icv_sz)
630 tag = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
631 &extra_esn, &integ_len);
635 /* esn is in the last buffer, that was unlinked from
637 integ_len = b->current_length;
643 /* we now have a single buffer of crypto data, adjust
644 * the length (second buffer contains only ICV) */
645 len = b->current_length;
651 tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
653 flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
654 if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
655 payload, pd->current_length, &tag, 0,
658 /* allocate buffer failed, will not add to frame and drop */
659 return (ESP_DECRYPT_ERROR_NO_BUFFERS);
663 esp_insert_esn (vm, sa0, pd, pd2, &integ_len, &tag, &len, b, payload);
666 key_index = sa0->crypto_key_index;
674 if (ipsec_sa_is_set_IS_CTR (sa0))
676 /* construct nonce in a scratch space in front of the IP header */
677 esp_ctr_nonce_t *nonce =
678 (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - sizeof (*nonce));
679 if (ipsec_sa_is_set_IS_AEAD (sa0))
681 /* constuct aad in a scratch space in front of the nonce */
682 esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
683 aad = (u8 *) nonce - sizeof (esp_aead_t);
684 esp_aad_fill (aad, esp0, sa0, pd->seq_hi);
689 nonce->ctr = clib_host_to_net_u32 (1);
691 nonce->salt = sa0->salt;
692 ASSERT (sizeof (u64) == iv_sz);
693 nonce->iv = *(u64 *) iv;
697 crypto_start_offset = (payload += iv_sz) - b->data;
698 crypto_len = len - iv_sz;
700 if (pd->is_chain && (pd2->lb != b))
702 /* buffer is chained */
703 flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
705 crypto_len = esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
707 len - pd->iv_sz + pd->icv_sz,
714 /* for AEAD integ_len - crypto_len will be negative, it is ok since it
715 * is ignored by the engine. */
716 vnet_crypto_async_add_to_frame (
717 vm, f, key_index, crypto_len, integ_len - crypto_len, crypto_start_offset,
718 integ_start_offset, bi, async_next, iv, tag, aad, flags);
720 return (ESP_DECRYPT_ERROR_RX_PKTS);
723 static_always_inline void
724 esp_decrypt_post_crypto (vlib_main_t *vm, const vlib_node_runtime_t *node,
725 const u16 *next_by_next_header,
726 const esp_decrypt_packet_data_t *pd,
727 const esp_decrypt_packet_data2_t *pd2,
728 vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun,
731 ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
732 vlib_buffer_t *lb = b;
733 const u8 esp_sz = sizeof (esp_header_t);
734 const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
735 u8 pad_length = 0, next_header = 0;
739 * redo the anti-reply check
740 * in this frame say we have sequence numbers, s, s+1, s+1, s+1
741 * and s and s+1 are in the window. When we did the anti-replay
742 * check above we did so against the state of the window (W),
743 * after packet s-1. So each of the packets in the sequence will be
745 * This time s will be cheked against Ws-1, s+1 chceked against Ws
746 * (i.e. the window state is updated/advnaced)
747 * so this time the successive s+! packet will be dropped.
748 * This is a consequence of batching the decrypts. If the
749 * check-dcrypt-advance process was done for each packet it would
750 * be fine. But we batch the decrypts because it's much more efficient
751 * to do so in SW and if we offload to HW and the process is async.
753 * You're probably thinking, but this means an attacker can send the
754 * above sequence and cause VPP to perform decrpyts that will fail,
755 * and that's true. But if the attacker can determine s (a valid
756 * sequence number in the window) which is non-trivial, it can generate
757 * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
758 * implementation, sequential or batching, from decrypting these.
760 if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
763 b->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
764 next[0] = ESP_DECRYPT_NEXT_DROP;
769 ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq, pd->seq_hi);
771 vlib_prefetch_simple_counter (&ipsec_sa_lost_counters, vm->thread_index,
777 icv_sz = pd2->icv_removed ? 0 : pd->icv_sz;
778 if (pd2->free_buffer_index)
780 vlib_buffer_free_one (vm, pd2->free_buffer_index);
783 if (lb->current_length < sizeof (esp_footer_t) + icv_sz)
785 /* esp footer is either splitted in two buffers or in the before
788 vlib_buffer_t *before_last = b, *bp = b;
789 while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
792 bp = vlib_get_buffer (vm, bp->next_buffer);
794 u8 *bt = vlib_buffer_get_tail (before_last);
796 if (lb->current_length == icv_sz)
798 esp_footer_t *f = (esp_footer_t *) (bt - sizeof (*f));
799 pad_length = f->pad_length;
800 next_header = f->next_header;
804 pad_length = (bt - 1)[0];
805 next_header = ((u8 *) vlib_buffer_get_current (lb))[0];
811 (esp_footer_t *) (lb->data + lb->current_data +
812 lb->current_length - sizeof (esp_footer_t) -
814 pad_length = f->pad_length;
815 next_header = f->next_header;
822 (esp_footer_t *) (lb->data + lb->current_data + lb->current_length -
823 sizeof (esp_footer_t) - icv_sz);
824 pad_length = f->pad_length;
825 next_header = f->next_header;
828 u16 adv = pd->iv_sz + esp_sz;
829 u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz;
830 u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz;
831 b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
833 if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
835 u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
836 sizeof (udp_header_t) : 0;
837 u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
838 u8 *old_ip = b->data + pd->current_data - ip_hdr_sz - udp_sz;
839 u8 *ip = old_ip + adv + udp_sz;
841 if (is_ip6 && ip_hdr_sz > 64)
842 memmove (ip, old_ip, ip_hdr_sz);
844 clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
846 b->current_data = pd->current_data + adv - ip_hdr_sz;
847 b->current_length += ip_hdr_sz - adv;
848 esp_remove_tail (vm, b, lb, tail);
852 ip6_header_t *ip6 = (ip6_header_t *) ip;
853 u16 len = clib_net_to_host_u16 (ip6->payload_length);
854 len -= adv + tail_orig;
855 ip6->payload_length = clib_host_to_net_u16 (len);
856 ip6->protocol = next_header;
857 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
861 ip4_header_t *ip4 = (ip4_header_t *) ip;
862 ip_csum_t sum = ip4->checksum;
863 u16 len = clib_net_to_host_u16 (ip4->length);
864 len = clib_host_to_net_u16 (len - adv - tail_orig - udp_sz);
865 sum = ip_csum_update (sum, ip4->protocol, next_header,
866 ip4_header_t, protocol);
867 sum = ip_csum_update (sum, ip4->length, len, ip4_header_t, length);
868 ip4->checksum = ip_csum_fold (sum);
869 ip4->protocol = next_header;
871 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
876 if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
878 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
879 b->current_data = pd->current_data + adv;
880 b->current_length = pd->current_length - adv;
881 esp_remove_tail (vm, b, lb, tail);
883 else if (next_header == IP_PROTOCOL_IPV6)
885 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
886 b->current_data = pd->current_data + adv;
887 b->current_length = pd->current_length - adv;
888 esp_remove_tail (vm, b, lb, tail);
890 else if (next_header == IP_PROTOCOL_MPLS_IN_IP)
892 next[0] = ESP_DECRYPT_NEXT_MPLS_INPUT;
893 b->current_data = pd->current_data + adv;
894 b->current_length = pd->current_length - adv;
895 esp_remove_tail (vm, b, lb, tail);
897 else if (is_tun && next_header == IP_PROTOCOL_GRE)
901 b->current_data = pd->current_data + adv;
902 b->current_length = pd->current_length - adv - tail;
904 gre = vlib_buffer_get_current (b);
906 vlib_buffer_advance (b, sizeof (*gre));
908 switch (clib_net_to_host_u16 (gre->protocol))
910 case GRE_PROTOCOL_teb:
911 vnet_update_l2_len (b);
912 next[0] = ESP_DECRYPT_NEXT_L2_INPUT;
914 case GRE_PROTOCOL_ip4:
915 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
917 case GRE_PROTOCOL_ip6:
918 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
921 b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
922 next[0] = ESP_DECRYPT_NEXT_DROP;
926 else if ((next[0] = vec_elt (next_by_next_header, next_header)) !=
929 b->current_data = pd->current_data + adv;
930 b->current_length = pd->current_length - adv;
931 esp_remove_tail (vm, b, lb, tail);
935 next[0] = ESP_DECRYPT_NEXT_DROP;
936 b->error = node->errors[ESP_DECRYPT_ERROR_UNSUP_PAYLOAD];
942 if (ipsec_sa_is_set_IS_PROTECT (sa0))
945 * There are two encap possibilities
946 * 1) the tunnel and ths SA are prodiving encap, i.e. it's
947 * MAC | SA-IP | TUN-IP | ESP | PAYLOAD
948 * implying the SA is in tunnel mode (on a tunnel interface)
949 * 2) only the tunnel provides encap
950 * MAC | TUN-IP | ESP | PAYLOAD
951 * implying the SA is in transport mode.
953 * For 2) we need only strip the tunnel encap and we're good.
954 * since the tunnel and crypto ecnap (int the tun=protect
955 * object) are the same and we verified above that these match
956 * for 1) we need to strip the SA-IP outer headers, to
957 * reveal the tunnel IP and then check that this matches
958 * the configured tunnel.
960 const ipsec_tun_protect_t *itp;
963 ipsec_tun_protect_get (vnet_buffer (b)->ipsec.protect_index);
965 if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
967 const ip4_header_t *ip4;
969 ip4 = vlib_buffer_get_current (b);
971 if (!ip46_address_is_equal_v4 (&itp->itp_tun.src,
972 &ip4->dst_address) ||
973 !ip46_address_is_equal_v4 (&itp->itp_tun.dst,
976 next[0] = ESP_DECRYPT_NEXT_DROP;
977 b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
980 else if (next_header == IP_PROTOCOL_IPV6)
982 const ip6_header_t *ip6;
984 ip6 = vlib_buffer_get_current (b);
986 if (!ip46_address_is_equal_v6 (&itp->itp_tun.src,
987 &ip6->dst_address) ||
988 !ip46_address_is_equal_v6 (&itp->itp_tun.dst,
991 next[0] = ESP_DECRYPT_NEXT_DROP;
992 b->error = node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO];
999 if (PREDICT_FALSE (n_lost))
1000 vlib_increment_simple_counter (&ipsec_sa_lost_counters, vm->thread_index,
1001 pd->sa_index, n_lost);
1005 esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
1006 vlib_frame_t *from_frame, int is_ip6, int is_tun,
1007 u16 async_next_node)
1009 ipsec_main_t *im = &ipsec_main;
1010 const u16 *next_by_next_header = im->next_header_registrations;
1011 u32 thread_index = vm->thread_index;
1013 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
1014 u32 *from = vlib_frame_vector_args (from_frame);
1015 u32 n_left = from_frame->n_vectors;
1016 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1017 vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
1018 u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
1019 u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts;
1020 u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
1021 u32 sync_bi[VLIB_FRAME_SIZE];
1022 u32 noop_bi[VLIB_FRAME_SIZE];
1023 esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
1024 esp_decrypt_packet_data2_t pkt_data2[VLIB_FRAME_SIZE], *pd2 = pkt_data2;
1025 esp_decrypt_packet_data_t cpd = { };
1026 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
1027 const u8 esp_sz = sizeof (esp_header_t);
1028 ipsec_sa_t *sa0 = 0;
1029 vnet_crypto_op_t _op, *op = &_op;
1030 vnet_crypto_op_t **crypto_ops;
1031 vnet_crypto_op_t **integ_ops;
1032 int is_async = im->async_mode;
1033 vnet_crypto_async_op_id_t async_op = ~0;
1034 vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
1035 esp_decrypt_error_t err;
1037 vlib_get_buffers (vm, from, b, n_left);
1040 vec_reset_length (ptd->crypto_ops);
1041 vec_reset_length (ptd->integ_ops);
1042 vec_reset_length (ptd->chained_crypto_ops);
1043 vec_reset_length (ptd->chained_integ_ops);
1045 vec_reset_length (ptd->async_frames);
1046 vec_reset_length (ptd->chunks);
1047 clib_memset (sync_nexts, -1, sizeof (sync_nexts));
1048 clib_memset (async_frames, 0, sizeof (async_frames));
1054 err = ESP_DECRYPT_ERROR_RX_PKTS;
1058 vlib_prefetch_buffer_header (b[2], LOAD);
1059 p = vlib_buffer_get_current (b[1]);
1060 clib_prefetch_load (p);
1061 p -= CLIB_CACHE_LINE_BYTES;
1062 clib_prefetch_load (p);
1065 u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
1068 err = ESP_DECRYPT_ERROR_NO_BUFFERS;
1069 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1070 ESP_DECRYPT_NEXT_DROP);
1074 if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
1076 if (current_sa_pkts)
1077 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1081 current_sa_bytes = current_sa_pkts = 0;
1083 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1084 sa0 = ipsec_sa_get (current_sa_index);
1086 /* fetch the second cacheline ASAP */
1087 clib_prefetch_load (sa0->cacheline1);
1088 cpd.icv_sz = sa0->integ_icv_size;
1089 cpd.iv_sz = sa0->crypto_iv_size;
1090 cpd.flags = sa0->flags;
1091 cpd.sa_index = current_sa_index;
1092 is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
1095 if (PREDICT_FALSE (~0 == sa0->thread_index))
1097 /* this is the first packet to use this SA, claim the SA
1098 * for this thread. this could happen simultaneously on
1100 clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
1101 ipsec_sa_assign_thread (thread_index));
1104 if (PREDICT_FALSE (thread_index != sa0->thread_index))
1106 vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
1107 err = ESP_DECRYPT_ERROR_HANDOFF;
1108 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1109 ESP_DECRYPT_NEXT_HANDOFF);
1113 /* store packet data for next round for easier prefetch */
1114 pd->sa_data = cpd.sa_data;
1115 pd->current_data = b[0]->current_data;
1116 pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
1117 payload = b[0]->data + pd->current_data;
1118 pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
1121 pd2->free_buffer_index = 0;
1122 pd2->icv_removed = 0;
1127 /* find last buffer in the chain */
1128 while (pd2->lb->flags & VLIB_BUFFER_NEXT_PRESENT)
1129 pd2->lb = vlib_get_buffer (vm, pd2->lb->next_buffer);
1131 crypto_ops = &ptd->chained_crypto_ops;
1132 integ_ops = &ptd->chained_integ_ops;
1136 crypto_ops = &ptd->crypto_ops;
1137 integ_ops = &ptd->integ_ops;
1140 pd->current_length = b[0]->current_length;
1142 /* anti-reply check */
1143 if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, ~0, false,
1146 err = ESP_DECRYPT_ERROR_REPLAY;
1147 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1148 ESP_DECRYPT_NEXT_DROP);
1152 if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
1154 err = ESP_DECRYPT_ERROR_RUNT;
1155 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1156 ESP_DECRYPT_NEXT_DROP);
1160 len = pd->current_length - cpd.icv_sz;
1161 current_sa_pkts += 1;
1162 current_sa_bytes += vlib_buffer_length_in_chain (vm, b[0]);
1166 async_op = sa0->crypto_async_dec_op_id;
1168 /* get a frame for this op if we don't yet have one or it's full
1170 if (NULL == async_frames[async_op] ||
1171 vnet_crypto_async_frame_is_full (async_frames[async_op]))
1173 async_frames[async_op] =
1174 vnet_crypto_async_get_frame (vm, async_op);
1175 /* Save the frame to the list we'll submit at the end */
1176 vec_add1 (ptd->async_frames, async_frames[async_op]);
1179 err = esp_decrypt_prepare_async_frame (
1180 vm, node, ptd, async_frames[async_op], sa0, payload, len,
1181 cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next,
1183 if (ESP_DECRYPT_ERROR_RX_PKTS != err)
1185 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
1186 ESP_DECRYPT_NEXT_DROP);
1190 esp_decrypt_prepare_sync_op (
1191 vm, node, ptd, &crypto_ops, &integ_ops, op, sa0, payload, len,
1192 cpd.icv_sz, cpd.iv_sz, pd, pd2, b[0], sync_next, b - bufs);
1195 if (ESP_DECRYPT_ERROR_RX_PKTS != err)
1197 noop_bi[n_noop] = from[b - bufs];
1203 sync_bi[n_sync] = from[b - bufs];
1204 sync_bufs[n_sync] = b[0];
1217 if (PREDICT_TRUE (~0 != current_sa_index))
1218 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1219 current_sa_index, current_sa_pkts,
1222 /* submit or free all of the open frames */
1223 vnet_crypto_async_frame_t **async_frame;
1225 vec_foreach (async_frame, ptd->async_frames)
1227 /* free frame and move on if no ops were successfully added */
1228 if (PREDICT_FALSE (!(*async_frame)->n_elts))
1230 vnet_crypto_async_free_frame (vm, *async_frame);
1233 if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
1235 n_noop += esp_async_recycle_failed_submit (
1236 vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR,
1237 n_noop, noop_bi, noop_nexts, ESP_DECRYPT_NEXT_DROP);
1238 vnet_crypto_async_reset_frame (*async_frame);
1239 vnet_crypto_async_free_frame (vm, *async_frame);
1245 esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
1246 ESP_DECRYPT_ERROR_INTEG_ERROR);
1247 esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
1248 sync_nexts, ptd->chunks,
1249 ESP_DECRYPT_ERROR_INTEG_ERROR);
1251 esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
1252 ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1253 esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
1254 sync_nexts, ptd->chunks,
1255 ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1258 /* Post decryption ronud - adjust packet data start and length and next
1262 sync_next = sync_nexts;
1271 void *data = b[1]->data + pd[1].current_data;
1273 /* buffer metadata */
1274 vlib_prefetch_buffer_header (b[1], LOAD);
1277 CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
1278 CLIB_CACHE_LINE_BYTES, LOAD);
1280 /* packet headers */
1281 CLIB_PREFETCH (data - CLIB_CACHE_LINE_BYTES,
1282 CLIB_CACHE_LINE_BYTES * 2, LOAD);
1285 /* save the sa_index as GRE_teb post_crypto changes L2 opaque */
1286 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1287 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1289 if (sync_next[0] >= ESP_DECRYPT_N_NEXT)
1290 esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2, b[0],
1291 sync_next, is_ip6, is_tun, 0);
1294 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1296 esp_decrypt_trace_t *tr;
1297 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1298 sa0 = ipsec_sa_get (current_sa_index);
1299 tr->crypto_alg = sa0->crypto_alg;
1300 tr->integ_alg = sa0->integ_alg;
1302 tr->sa_seq = sa0->seq;
1303 tr->sa_seq_hi = sa0->seq_hi;
1304 tr->pkt_seq_hi = pd->seq_hi;
1315 vlib_node_increment_counter (vm, node->node_index, ESP_DECRYPT_ERROR_RX_PKTS,
1316 from_frame->n_vectors);
1319 vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
1322 vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
1324 return (from_frame->n_vectors);
1328 esp_decrypt_post_inline (vlib_main_t * vm,
1329 vlib_node_runtime_t * node,
1330 vlib_frame_t * from_frame, int is_ip6, int is_tun)
1332 const ipsec_main_t *im = &ipsec_main;
1333 const u16 *next_by_next_header = im->next_header_registrations;
1334 u32 *from = vlib_frame_vector_args (from_frame);
1335 u32 n_left = from_frame->n_vectors;
1336 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1337 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1338 vlib_get_buffers (vm, from, b, n_left);
1342 esp_decrypt_packet_data_t *pd = &(esp_post_data (b[0]))->decrypt_data;
1346 vlib_prefetch_buffer_header (b[2], LOAD);
1347 vlib_prefetch_buffer_header (b[1], LOAD);
1351 esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, 0, b[0],
1352 next, is_ip6, is_tun, 1);
1355 esp_decrypt_packet_data2_t *pd2 = esp_post_data2 (b[0]);
1356 esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2,
1357 b[0], next, is_ip6, is_tun, 1);
1361 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1363 ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
1364 esp_decrypt_trace_t *tr;
1365 esp_decrypt_packet_data_t *async_pd =
1366 &(esp_post_data (b[0]))->decrypt_data;
1367 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1368 sa0 = ipsec_sa_get (async_pd->sa_index);
1370 tr->crypto_alg = sa0->crypto_alg;
1371 tr->integ_alg = sa0->integ_alg;
1373 tr->sa_seq = sa0->seq;
1374 tr->sa_seq_hi = sa0->seq_hi;
1382 n_left = from_frame->n_vectors;
1383 vlib_node_increment_counter (vm, node->node_index,
1384 ESP_DECRYPT_ERROR_RX_POST_PKTS, n_left);
1386 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
1391 VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
1392 vlib_node_runtime_t * node,
1393 vlib_frame_t * from_frame)
1395 return esp_decrypt_inline (vm, node, from_frame, 0, 0,
1396 esp_decrypt_async_next.esp4_post_next);
1399 VLIB_NODE_FN (esp4_decrypt_post_node) (vlib_main_t * vm,
1400 vlib_node_runtime_t * node,
1401 vlib_frame_t * from_frame)
1403 return esp_decrypt_post_inline (vm, node, from_frame, 0, 0);
1406 VLIB_NODE_FN (esp4_decrypt_tun_node) (vlib_main_t * vm,
1407 vlib_node_runtime_t * node,
1408 vlib_frame_t * from_frame)
1410 return esp_decrypt_inline (vm, node, from_frame, 0, 1,
1411 esp_decrypt_async_next.esp4_tun_post_next);
1414 VLIB_NODE_FN (esp4_decrypt_tun_post_node) (vlib_main_t * vm,
1415 vlib_node_runtime_t * node,
1416 vlib_frame_t * from_frame)
1418 return esp_decrypt_post_inline (vm, node, from_frame, 0, 1);
1421 VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
1422 vlib_node_runtime_t * node,
1423 vlib_frame_t * from_frame)
1425 return esp_decrypt_inline (vm, node, from_frame, 1, 0,
1426 esp_decrypt_async_next.esp6_post_next);
1429 VLIB_NODE_FN (esp6_decrypt_post_node) (vlib_main_t * vm,
1430 vlib_node_runtime_t * node,
1431 vlib_frame_t * from_frame)
1433 return esp_decrypt_post_inline (vm, node, from_frame, 1, 0);
1436 VLIB_NODE_FN (esp6_decrypt_tun_node) (vlib_main_t * vm,
1437 vlib_node_runtime_t * node,
1438 vlib_frame_t * from_frame)
1440 return esp_decrypt_inline (vm, node, from_frame, 1, 1,
1441 esp_decrypt_async_next.esp6_tun_post_next);
1444 VLIB_NODE_FN (esp6_decrypt_tun_post_node) (vlib_main_t * vm,
1445 vlib_node_runtime_t * node,
1446 vlib_frame_t * from_frame)
1448 return esp_decrypt_post_inline (vm, node, from_frame, 1, 1);
1452 VLIB_REGISTER_NODE (esp4_decrypt_node) = {
1453 .name = "esp4-decrypt",
1454 .vector_size = sizeof (u32),
1455 .format_trace = format_esp_decrypt_trace,
1456 .type = VLIB_NODE_TYPE_INTERNAL,
1458 .n_errors = ESP_DECRYPT_N_ERROR,
1459 .error_counters = esp_decrypt_error_counters,
1461 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1463 [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1464 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1465 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1466 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-drop",
1467 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1468 [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff",
1472 VLIB_REGISTER_NODE (esp4_decrypt_post_node) = {
1473 .name = "esp4-decrypt-post",
1474 .vector_size = sizeof (u32),
1475 .format_trace = format_esp_decrypt_trace,
1476 .type = VLIB_NODE_TYPE_INTERNAL,
1478 .n_errors = ESP_DECRYPT_N_ERROR,
1479 .error_counters = esp_decrypt_error_counters,
1481 .sibling_of = "esp4-decrypt",
1484 VLIB_REGISTER_NODE (esp6_decrypt_node) = {
1485 .name = "esp6-decrypt",
1486 .vector_size = sizeof (u32),
1487 .format_trace = format_esp_decrypt_trace,
1488 .type = VLIB_NODE_TYPE_INTERNAL,
1490 .n_errors = ESP_DECRYPT_N_ERROR,
1491 .error_counters = esp_decrypt_error_counters,
1493 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1495 [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1496 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1497 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1498 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-drop",
1499 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1500 [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-handoff",
1504 VLIB_REGISTER_NODE (esp6_decrypt_post_node) = {
1505 .name = "esp6-decrypt-post",
1506 .vector_size = sizeof (u32),
1507 .format_trace = format_esp_decrypt_trace,
1508 .type = VLIB_NODE_TYPE_INTERNAL,
1510 .n_errors = ESP_DECRYPT_N_ERROR,
1511 .error_counters = esp_decrypt_error_counters,
1513 .sibling_of = "esp6-decrypt",
1516 VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = {
1517 .name = "esp4-decrypt-tun",
1518 .vector_size = sizeof (u32),
1519 .format_trace = format_esp_decrypt_trace,
1520 .type = VLIB_NODE_TYPE_INTERNAL,
1521 .n_errors = ESP_DECRYPT_N_ERROR,
1522 .error_counters = esp_decrypt_error_counters,
1523 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1525 [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1526 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1527 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1528 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-input",
1529 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1530 [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-tun-handoff",
1534 VLIB_REGISTER_NODE (esp4_decrypt_tun_post_node) = {
1535 .name = "esp4-decrypt-tun-post",
1536 .vector_size = sizeof (u32),
1537 .format_trace = format_esp_decrypt_trace,
1538 .type = VLIB_NODE_TYPE_INTERNAL,
1540 .n_errors = ESP_DECRYPT_N_ERROR,
1541 .error_counters = esp_decrypt_error_counters,
1543 .sibling_of = "esp4-decrypt-tun",
1546 VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
1547 .name = "esp6-decrypt-tun",
1548 .vector_size = sizeof (u32),
1549 .format_trace = format_esp_decrypt_trace,
1550 .type = VLIB_NODE_TYPE_INTERNAL,
1551 .n_errors = ESP_DECRYPT_N_ERROR,
1552 .error_counters = esp_decrypt_error_counters,
1553 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1555 [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1556 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1557 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1558 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-input",
1559 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1560 [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-tun-handoff",
1564 VLIB_REGISTER_NODE (esp6_decrypt_tun_post_node) = {
1565 .name = "esp6-decrypt-tun-post",
1566 .vector_size = sizeof (u32),
1567 .format_trace = format_esp_decrypt_trace,
1568 .type = VLIB_NODE_TYPE_INTERNAL,
1570 .n_errors = ESP_DECRYPT_N_ERROR,
1571 .error_counters = esp_decrypt_error_counters,
1573 .sibling_of = "esp6-decrypt-tun",
1577 #ifndef CLIB_MARCH_VARIANT
1579 static clib_error_t *
1580 esp_decrypt_init (vlib_main_t *vm)
1582 ipsec_main_t *im = &ipsec_main;
1584 im->esp4_dec_fq_index =
1585 vlib_frame_queue_main_init (esp4_decrypt_node.index, 0);
1586 im->esp6_dec_fq_index =
1587 vlib_frame_queue_main_init (esp6_decrypt_node.index, 0);
1588 im->esp4_dec_tun_fq_index =
1589 vlib_frame_queue_main_init (esp4_decrypt_tun_node.index, 0);
1590 im->esp6_dec_tun_fq_index =
1591 vlib_frame_queue_main_init (esp6_decrypt_tun_node.index, 0);
1596 VLIB_INIT_FUNCTION (esp_decrypt_init);
1601 * fd.io coding-style-patch-verification: ON
1604 * eval: (c-set-style "gnu")