2 * esp_decrypt.c : IPSec ESP decrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 #include <vnet/vnet.h>
18 #include <vnet/api_errno.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/l2/l2_input.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ipsec_io.h>
25 #include <vnet/ipsec/ipsec_tun.h>
27 #include <vnet/gre/packet.h>
29 #define foreach_esp_decrypt_next \
30 _ (DROP, "error-drop") \
31 _ (IP4_INPUT, "ip4-input-no-checksum") \
32 _ (IP6_INPUT, "ip6-input") \
33 _ (L2_INPUT, "l2-input") \
34 _ (MPLS_INPUT, "mpls-input") \
35 _ (HANDOFF, "handoff")
37 #define _(v, s) ESP_DECRYPT_NEXT_##v,
40 foreach_esp_decrypt_next
45 #define foreach_esp_decrypt_post_next \
46 _ (DROP, "error-drop") \
47 _ (IP4_INPUT, "ip4-input-no-checksum") \
48 _ (IP6_INPUT, "ip6-input") \
49 _ (MPLS_INPUT, "mpls-input") \
50 _ (L2_INPUT, "l2-input")
52 #define _(v, s) ESP_DECRYPT_POST_NEXT_##v,
55 foreach_esp_decrypt_post_next
57 ESP_DECRYPT_POST_N_NEXT,
58 } esp_decrypt_post_next_t;
66 ipsec_crypto_alg_t crypto_alg;
67 ipsec_integ_alg_t integ_alg;
68 } esp_decrypt_trace_t;
70 typedef vl_counter_esp_decrypt_enum_t esp_decrypt_error_t;
72 /* The number of byres in the hisequence number */
73 #define N_HI_ESN_BYTES 4
75 /* packet trace format function */
77 format_esp_decrypt_trace (u8 * s, va_list * args)
79 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
80 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
81 esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
84 "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u "
86 format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg,
87 t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi, t->pkt_seq_hi);
91 #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
93 static_always_inline void
94 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
95 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
98 vnet_crypto_op_t *op = ops;
99 u32 n_fail, n_ops = vec_len (ops);
104 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
108 ASSERT (op - ops < n_ops);
109 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
111 u32 err, bi = op->user_data;
112 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
115 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
116 esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi,
117 nexts, ESP_DECRYPT_NEXT_DROP,
118 vnet_buffer (b[bi])->ipsec.sad_index);
125 static_always_inline void
126 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
127 vnet_crypto_op_t * ops, vlib_buffer_t * b[],
128 u16 * nexts, vnet_crypto_op_chunk_t * chunks, int e)
131 vnet_crypto_op_t *op = ops;
132 u32 n_fail, n_ops = vec_len (ops);
134 if (PREDICT_TRUE (n_ops == 0))
137 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
141 ASSERT (op - ops < n_ops);
142 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
144 u32 err, bi = op->user_data;
145 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
148 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
149 esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi,
150 nexts, ESP_DECRYPT_NEXT_DROP,
151 vnet_buffer (b[bi])->ipsec.sad_index);
159 esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last,
162 vlib_buffer_t *before_last = b;
164 if (last->current_length > tail)
166 last->current_length -= tail;
169 ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
171 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
174 b = vlib_get_buffer (vm, b->next_buffer);
176 before_last->current_length -= tail - last->current_length;
177 vlib_buffer_free_one (vm, before_last->next_buffer);
178 before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
181 /* ICV is splitted in last two buffers so move it to the last buffer and
182 return pointer to it */
183 static_always_inline u8 *
184 esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first,
185 esp_decrypt_packet_data_t * pd,
186 esp_decrypt_packet_data2_t * pd2, u16 icv_sz, u16 * dif)
188 vlib_buffer_t *before_last, *bp;
189 u16 last_sz = pd2->lb->current_length;
190 u16 first_sz = icv_sz - last_sz;
192 bp = before_last = first;
193 while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
196 bp = vlib_get_buffer (vm, bp->next_buffer);
199 u8 *lb_curr = vlib_buffer_get_current (pd2->lb);
200 memmove (lb_curr + first_sz, lb_curr, last_sz);
201 clib_memcpy_fast (lb_curr, vlib_buffer_get_tail (before_last) - first_sz,
203 before_last->current_length -= first_sz;
204 if (before_last == first)
205 pd->current_length -= first_sz;
206 clib_memset (vlib_buffer_get_tail (before_last), 0, first_sz);
209 pd2->lb = before_last;
210 pd2->icv_removed = 1;
211 pd2->free_buffer_index = before_last->next_buffer;
212 before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
216 static_always_inline u16
217 esp_insert_esn (vlib_main_t *vm, ipsec_sa_t *sa, esp_decrypt_packet_data_t *pd,
218 esp_decrypt_packet_data2_t *pd2, u32 *data_len, u8 **digest,
219 u16 *len, vlib_buffer_t *b, u8 *payload)
221 if (!ipsec_sa_is_set_USE_ESN (sa))
223 /* shift ICV by 4 bytes to insert ESN */
224 u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
225 u8 tmp[ESP_MAX_ICV_SIZE];
227 if (pd2->icv_removed)
229 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
230 if (space_left >= N_HI_ESN_BYTES)
232 clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi,
234 *data_len += N_HI_ESN_BYTES;
237 return N_HI_ESN_BYTES;
239 len[0] = b->current_length;
243 clib_memcpy_fast (tmp, payload + len[0], ESP_MAX_ICV_SIZE);
244 clib_memcpy_fast (payload + len[0], &seq_hi, N_HI_ESN_BYTES);
245 clib_memcpy_fast (payload + len[0] + N_HI_ESN_BYTES, tmp,
247 *data_len += N_HI_ESN_BYTES;
248 *digest += N_HI_ESN_BYTES;
250 return N_HI_ESN_BYTES;
253 static_always_inline u8 *
254 esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first,
255 esp_decrypt_packet_data_t * pd,
256 esp_decrypt_packet_data2_t * pd2, u16 icv_sz,
257 ipsec_sa_t * sa, u8 * extra_esn, u32 * len)
260 u8 *digest = esp_move_icv (vm, first, pd, pd2, icv_sz, &dif);
264 if (ipsec_sa_is_set_USE_ESN (sa))
266 u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
267 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
269 if (space_left >= N_HI_ESN_BYTES)
271 clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi,
273 *len += N_HI_ESN_BYTES;
277 /* no space for ESN at the tail, use the next buffer
279 ASSERT (pd2->icv_removed);
280 vlib_buffer_t *tmp = vlib_get_buffer (vm, pd2->free_buffer_index);
281 clib_memcpy_fast (vlib_buffer_get_current (tmp) - N_HI_ESN_BYTES,
282 &seq_hi, N_HI_ESN_BYTES);
289 static_always_inline int
290 esp_decrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
291 const esp_decrypt_packet_data_t *pd,
292 esp_decrypt_packet_data2_t *pd2, ipsec_sa_t *sa0,
293 vlib_buffer_t *b, u8 icv_sz, u8 *start_src,
294 u32 start_len, u8 **digest, u16 *n_ch,
295 u32 *integ_total_len)
297 vnet_crypto_op_chunk_t *ch;
298 vlib_buffer_t *cb = vlib_get_buffer (vm, b->next_buffer);
301 vec_add2 (ptd->chunks, ch, 1);
302 total_len = ch->len = start_len;
307 vec_add2 (ptd->chunks, ch, 1);
309 ch->src = vlib_buffer_get_current (cb);
312 if (pd2->icv_removed)
313 ch->len = cb->current_length;
315 ch->len = cb->current_length - icv_sz;
316 if (ipsec_sa_is_set_USE_ESN (sa0))
318 u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
319 u8 tmp[ESP_MAX_ICV_SIZE];
321 vlib_buffer_t *tmp_b;
322 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
323 if (space_left < N_HI_ESN_BYTES)
325 if (pd2->icv_removed)
327 /* use pre-data area from the last bufer
328 that was removed from the chain */
329 tmp_b = vlib_get_buffer (vm, pd2->free_buffer_index);
330 esn = tmp_b->data - N_HI_ESN_BYTES;
334 /* no space, need to allocate new buffer */
336 if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
338 tmp_b = vlib_get_buffer (vm, tmp_bi);
340 pd2->free_buffer_index = tmp_bi;
342 clib_memcpy_fast (esn, &seq_hi, N_HI_ESN_BYTES);
344 vec_add2 (ptd->chunks, ch, 1);
347 ch->len = N_HI_ESN_BYTES;
351 if (pd2->icv_removed)
353 clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb),
354 &seq_hi, N_HI_ESN_BYTES);
358 clib_memcpy_fast (tmp, *digest, ESP_MAX_ICV_SIZE);
359 clib_memcpy_fast (*digest, &seq_hi, N_HI_ESN_BYTES);
360 clib_memcpy_fast (*digest + N_HI_ESN_BYTES, tmp,
362 *digest += N_HI_ESN_BYTES;
364 ch->len += N_HI_ESN_BYTES;
367 total_len += ch->len;
371 total_len += ch->len = cb->current_length;
373 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
376 cb = vlib_get_buffer (vm, cb->next_buffer);
382 *integ_total_len = total_len;
387 static_always_inline u32
388 esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
389 esp_decrypt_packet_data_t * pd,
390 esp_decrypt_packet_data2_t * pd2,
391 ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz,
392 u8 * start, u32 start_len, u8 ** tag, u16 * n_ch)
394 vnet_crypto_op_chunk_t *ch;
395 vlib_buffer_t *cb = b;
398 vec_add2 (ptd->chunks, ch, 1);
399 total_len = ch->len = start_len;
400 ch->src = ch->dst = start;
401 cb = vlib_get_buffer (vm, cb->next_buffer);
406 vec_add2 (ptd->chunks, ch, 1);
408 ch->src = ch->dst = vlib_buffer_get_current (cb);
411 if (ipsec_sa_is_set_IS_AEAD (sa0))
413 if (pd2->lb->current_length < icv_sz)
416 *tag = esp_move_icv (vm, b, pd, pd2, icv_sz, &dif);
418 /* this chunk does not contain crypto data */
420 /* and fix previous chunk's length as it might have
422 ASSERT (n_chunks > 0);
430 total_len = total_len + pd2->lb->current_length -
432 ch[-1].len = pd2->lb->current_length;
437 *tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
440 if (pd2->icv_removed)
441 total_len += ch->len = cb->current_length;
443 total_len += ch->len = cb->current_length - icv_sz;
446 total_len += ch->len = cb->current_length;
448 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
451 cb = vlib_get_buffer (vm, cb->next_buffer);
460 static_always_inline void
461 esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node,
462 ipsec_per_thread_data_t * ptd,
463 vnet_crypto_op_t *** crypto_ops,
464 vnet_crypto_op_t *** integ_ops,
465 vnet_crypto_op_t * op,
466 ipsec_sa_t * sa0, u8 * payload,
467 u16 len, u8 icv_sz, u8 iv_sz,
468 esp_decrypt_packet_data_t * pd,
469 esp_decrypt_packet_data2_t * pd2,
470 vlib_buffer_t * b, u16 * next, u32 index)
472 const u8 esp_sz = sizeof (esp_header_t);
474 if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
476 vnet_crypto_op_init (op, sa0->integ_op_id);
477 op->key_index = sa0->integ_key_index;
479 op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
480 op->user_data = index;
481 op->digest = payload + len;
482 op->digest_len = icv_sz;
487 /* buffer is chained */
488 op->len = pd->current_length;
490 /* special case when ICV is splitted and needs to be reassembled
491 * first -> move it to the last buffer. Also take into account
492 * that ESN needs to be added after encrypted data and may or
493 * may not fit in the tail.*/
494 if (pd2->lb->current_length < icv_sz)
498 esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
499 &extra_esn, &op->len);
503 /* esn is in the last buffer, that was unlinked from
505 op->len = b->current_length;
511 /* we now have a single buffer of crypto data, adjust
512 * the length (second buffer contains only ICV) */
513 *integ_ops = &ptd->integ_ops;
514 *crypto_ops = &ptd->crypto_ops;
515 len = b->current_length;
521 op->digest = vlib_buffer_get_tail (pd2->lb) - icv_sz;
523 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
524 op->chunk_index = vec_len (ptd->chunks);
525 if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
526 payload, pd->current_length,
527 &op->digest, &op->n_chunks, 0) < 0)
529 esp_decrypt_set_next_index (
530 b, node, vm->thread_index, ESP_DECRYPT_ERROR_NO_BUFFERS, 0,
531 next, ESP_DECRYPT_NEXT_DROP, pd->sa_index);
536 esp_insert_esn (vm, sa0, pd, pd2, &op->len, &op->digest, &len, b,
539 vec_add_aligned (*(integ_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
545 if (sa0->crypto_dec_op_id != VNET_CRYPTO_OP_NONE)
547 vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
548 op->key_index = sa0->crypto_key_index;
551 if (ipsec_sa_is_set_IS_CTR (sa0))
553 /* construct nonce in a scratch space in front of the IP header */
554 esp_ctr_nonce_t *nonce =
555 (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz -
557 if (ipsec_sa_is_set_IS_AEAD (sa0))
559 /* constuct aad in a scratch space in front of the nonce */
560 esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
561 op->aad = (u8 *) nonce - sizeof (esp_aead_t);
562 op->aad_len = esp_aad_fill (op->aad, esp0, sa0, pd->seq_hi);
563 op->tag = payload + len;
565 if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
567 /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
574 nonce->ctr = clib_host_to_net_u32 (1);
576 nonce->salt = sa0->salt;
577 ASSERT (sizeof (u64) == iv_sz);
578 nonce->iv = *(u64 *) op->iv;
579 op->iv = (u8 *) nonce;
581 op->src = op->dst = payload += iv_sz;
582 op->len = len - iv_sz;
583 op->user_data = index;
585 if (pd->is_chain && (pd2->lb != b))
587 /* buffer is chained */
588 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
589 op->chunk_index = vec_len (ptd->chunks);
590 esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
591 payload, len - pd->iv_sz + pd->icv_sz,
592 &op->tag, &op->n_chunks);
595 vec_add_aligned (*(crypto_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
599 static_always_inline esp_decrypt_error_t
600 esp_decrypt_prepare_async_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
601 ipsec_per_thread_data_t *ptd,
602 vnet_crypto_async_frame_t *f, ipsec_sa_t *sa0,
603 u8 *payload, u16 len, u8 icv_sz, u8 iv_sz,
604 esp_decrypt_packet_data_t *pd,
605 esp_decrypt_packet_data2_t *pd2, u32 bi,
606 vlib_buffer_t *b, u16 *next, u16 async_next)
608 const u8 esp_sz = sizeof (esp_header_t);
609 esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data;
610 esp_decrypt_packet_data2_t *async_pd2 = esp_post_data2 (b);
611 u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0;
612 const u32 key_index = sa0->crypto_key_index;
613 u32 crypto_len, integ_len = 0;
614 i16 crypto_start_offset, integ_start_offset = 0;
617 if (!ipsec_sa_is_set_IS_AEAD (sa0))
620 integ_start_offset = payload - b->data;
622 if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
623 flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
627 /* buffer is chained */
628 integ_len = pd->current_length;
630 /* special case when ICV is splitted and needs to be reassembled
631 * first -> move it to the last buffer. Also take into account
632 * that ESN needs to be added after encrypted data and may or
633 * may not fit in the tail.*/
634 if (pd2->lb->current_length < icv_sz)
637 tag = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
638 &extra_esn, &integ_len);
642 /* esn is in the last buffer, that was unlinked from
644 integ_len = b->current_length;
650 /* we now have a single buffer of crypto data, adjust
651 * the length (second buffer contains only ICV) */
652 len = b->current_length;
658 tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
660 flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
661 if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
662 payload, pd->current_length, &tag, 0,
665 /* allocate buffer failed, will not add to frame and drop */
666 return (ESP_DECRYPT_ERROR_NO_BUFFERS);
670 esp_insert_esn (vm, sa0, pd, pd2, &integ_len, &tag, &len, b, payload);
679 if (ipsec_sa_is_set_IS_CTR (sa0))
681 /* construct nonce in a scratch space in front of the IP header */
682 esp_ctr_nonce_t *nonce =
683 (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - sizeof (*nonce));
684 if (ipsec_sa_is_set_IS_AEAD (sa0))
686 /* constuct aad in a scratch space in front of the nonce */
687 esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
688 aad = (u8 *) nonce - sizeof (esp_aead_t);
689 esp_aad_fill (aad, esp0, sa0, pd->seq_hi);
691 if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
693 /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
700 nonce->ctr = clib_host_to_net_u32 (1);
702 nonce->salt = sa0->salt;
703 ASSERT (sizeof (u64) == iv_sz);
704 nonce->iv = *(u64 *) iv;
708 crypto_start_offset = (payload += iv_sz) - b->data;
709 crypto_len = len - iv_sz;
711 if (pd->is_chain && (pd2->lb != b))
713 /* buffer is chained */
714 flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
716 crypto_len = esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
718 len - pd->iv_sz + pd->icv_sz,
725 /* for AEAD integ_len - crypto_len will be negative, it is ok since it
726 * is ignored by the engine. */
727 vnet_crypto_async_add_to_frame (
728 vm, f, key_index, crypto_len, integ_len - crypto_len, crypto_start_offset,
729 integ_start_offset, bi, async_next, iv, tag, aad, flags);
731 return (ESP_DECRYPT_ERROR_RX_PKTS);
734 static_always_inline void
735 esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
736 const u16 *next_by_next_header,
737 const esp_decrypt_packet_data_t *pd,
738 const esp_decrypt_packet_data2_t *pd2,
739 vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun,
742 ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
743 vlib_buffer_t *lb = b;
744 const u8 esp_sz = sizeof (esp_header_t);
745 const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
746 u8 pad_length = 0, next_header = 0;
750 * redo the anti-reply check
751 * in this frame say we have sequence numbers, s, s+1, s+1, s+1
752 * and s and s+1 are in the window. When we did the anti-replay
753 * check above we did so against the state of the window (W),
754 * after packet s-1. So each of the packets in the sequence will be
756 * This time s will be cheked against Ws-1, s+1 chceked against Ws
757 * (i.e. the window state is updated/advnaced)
758 * so this time the successive s+! packet will be dropped.
759 * This is a consequence of batching the decrypts. If the
760 * check-dcrypt-advance process was done for each packet it would
761 * be fine. But we batch the decrypts because it's much more efficient
762 * to do so in SW and if we offload to HW and the process is async.
764 * You're probably thinking, but this means an attacker can send the
765 * above sequence and cause VPP to perform decrpyts that will fail,
766 * and that's true. But if the attacker can determine s (a valid
767 * sequence number in the window) which is non-trivial, it can generate
768 * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
769 * implementation, sequential or batching, from decrypting these.
771 if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
774 esp_decrypt_set_next_index (b, node, vm->thread_index,
775 ESP_DECRYPT_ERROR_REPLAY, 0, next,
776 ESP_DECRYPT_NEXT_DROP, pd->sa_index);
781 ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq, pd->seq_hi);
783 vlib_prefetch_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
784 vm->thread_index, pd->sa_index);
789 icv_sz = pd2->icv_removed ? 0 : pd->icv_sz;
790 if (pd2->free_buffer_index)
792 vlib_buffer_free_one (vm, pd2->free_buffer_index);
795 if (lb->current_length < sizeof (esp_footer_t) + icv_sz)
797 /* esp footer is either splitted in two buffers or in the before
800 vlib_buffer_t *before_last = b, *bp = b;
801 while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
804 bp = vlib_get_buffer (vm, bp->next_buffer);
806 u8 *bt = vlib_buffer_get_tail (before_last);
808 if (lb->current_length == icv_sz)
810 esp_footer_t *f = (esp_footer_t *) (bt - sizeof (*f));
811 pad_length = f->pad_length;
812 next_header = f->next_header;
816 pad_length = (bt - 1)[0];
817 next_header = ((u8 *) vlib_buffer_get_current (lb))[0];
823 (esp_footer_t *) (lb->data + lb->current_data +
824 lb->current_length - sizeof (esp_footer_t) -
826 pad_length = f->pad_length;
827 next_header = f->next_header;
834 (esp_footer_t *) (lb->data + lb->current_data + lb->current_length -
835 sizeof (esp_footer_t) - icv_sz);
836 pad_length = f->pad_length;
837 next_header = f->next_header;
840 u16 adv = pd->iv_sz + esp_sz;
841 u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz;
842 u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz;
843 b->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
845 if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
847 u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
848 sizeof (udp_header_t) : 0;
849 u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
850 u8 *old_ip = b->data + pd->current_data - ip_hdr_sz - udp_sz;
851 u8 *ip = old_ip + adv + udp_sz;
853 if (is_ip6 && ip_hdr_sz > 64)
854 memmove (ip, old_ip, ip_hdr_sz);
856 clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
858 b->current_data = pd->current_data + adv - ip_hdr_sz;
859 b->current_length += ip_hdr_sz - adv;
860 esp_remove_tail (vm, b, lb, tail);
864 ip6_header_t *ip6 = (ip6_header_t *) ip;
865 u16 len = clib_net_to_host_u16 (ip6->payload_length);
866 len -= adv + tail_orig;
867 ip6->payload_length = clib_host_to_net_u16 (len);
868 ip6->protocol = next_header;
869 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
873 ip4_header_t *ip4 = (ip4_header_t *) ip;
874 ip_csum_t sum = ip4->checksum;
875 u16 len = clib_net_to_host_u16 (ip4->length);
876 len = clib_host_to_net_u16 (len - adv - tail_orig - udp_sz);
877 sum = ip_csum_update (sum, ip4->protocol, next_header,
878 ip4_header_t, protocol);
879 sum = ip_csum_update (sum, ip4->length, len, ip4_header_t, length);
880 ip4->checksum = ip_csum_fold (sum);
881 ip4->protocol = next_header;
883 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
888 if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
890 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
891 b->current_data = pd->current_data + adv;
892 b->current_length = pd->current_length - adv;
893 esp_remove_tail (vm, b, lb, tail);
895 else if (next_header == IP_PROTOCOL_IPV6)
897 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
898 b->current_data = pd->current_data + adv;
899 b->current_length = pd->current_length - adv;
900 esp_remove_tail (vm, b, lb, tail);
902 else if (next_header == IP_PROTOCOL_MPLS_IN_IP)
904 next[0] = ESP_DECRYPT_NEXT_MPLS_INPUT;
905 b->current_data = pd->current_data + adv;
906 b->current_length = pd->current_length - adv;
907 esp_remove_tail (vm, b, lb, tail);
909 else if (is_tun && next_header == IP_PROTOCOL_GRE)
913 b->current_data = pd->current_data + adv;
914 b->current_length = pd->current_length - adv - tail;
916 gre = vlib_buffer_get_current (b);
918 vlib_buffer_advance (b, sizeof (*gre));
920 switch (clib_net_to_host_u16 (gre->protocol))
922 case GRE_PROTOCOL_teb:
923 vnet_update_l2_len (b);
924 next[0] = ESP_DECRYPT_NEXT_L2_INPUT;
926 case GRE_PROTOCOL_ip4:
927 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
929 case GRE_PROTOCOL_ip6:
930 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
933 esp_decrypt_set_next_index (
934 b, node, vm->thread_index, ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0,
935 next, ESP_DECRYPT_NEXT_DROP, pd->sa_index);
939 else if ((next[0] = vec_elt (next_by_next_header, next_header)) !=
942 b->current_data = pd->current_data + adv;
943 b->current_length = pd->current_length - adv;
944 esp_remove_tail (vm, b, lb, tail);
948 esp_decrypt_set_next_index (b, node, vm->thread_index,
949 ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0, next,
950 ESP_DECRYPT_NEXT_DROP, pd->sa_index);
956 if (ipsec_sa_is_set_IS_PROTECT (sa0))
959 * There are two encap possibilities
960 * 1) the tunnel and ths SA are prodiving encap, i.e. it's
961 * MAC | SA-IP | TUN-IP | ESP | PAYLOAD
962 * implying the SA is in tunnel mode (on a tunnel interface)
963 * 2) only the tunnel provides encap
964 * MAC | TUN-IP | ESP | PAYLOAD
965 * implying the SA is in transport mode.
967 * For 2) we need only strip the tunnel encap and we're good.
968 * since the tunnel and crypto ecnap (int the tun=protect
969 * object) are the same and we verified above that these match
970 * for 1) we need to strip the SA-IP outer headers, to
971 * reveal the tunnel IP and then check that this matches
972 * the configured tunnel.
974 const ipsec_tun_protect_t *itp;
977 ipsec_tun_protect_get (vnet_buffer (b)->ipsec.protect_index);
979 if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
981 const ip4_header_t *ip4;
983 ip4 = vlib_buffer_get_current (b);
985 if (!ip46_address_is_equal_v4 (&itp->itp_tun.src,
986 &ip4->dst_address) ||
987 !ip46_address_is_equal_v4 (&itp->itp_tun.dst,
990 esp_decrypt_set_next_index (
991 b, node, vm->thread_index,
992 ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next,
993 ESP_DECRYPT_NEXT_DROP, pd->sa_index);
996 else if (next_header == IP_PROTOCOL_IPV6)
998 const ip6_header_t *ip6;
1000 ip6 = vlib_buffer_get_current (b);
1002 if (!ip46_address_is_equal_v6 (&itp->itp_tun.src,
1003 &ip6->dst_address) ||
1004 !ip46_address_is_equal_v6 (&itp->itp_tun.dst,
1007 esp_decrypt_set_next_index (
1008 b, node, vm->thread_index,
1009 ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next,
1010 ESP_DECRYPT_NEXT_DROP, pd->sa_index);
1017 if (PREDICT_FALSE (n_lost))
1018 vlib_increment_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
1019 vm->thread_index, pd->sa_index, n_lost);
1023 esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
1024 vlib_frame_t *from_frame, int is_ip6, int is_tun,
1025 u16 async_next_node)
1027 ipsec_main_t *im = &ipsec_main;
1028 const u16 *next_by_next_header = im->next_header_registrations;
1029 u32 thread_index = vm->thread_index;
1031 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
1032 u32 *from = vlib_frame_vector_args (from_frame);
1033 u32 n_left = from_frame->n_vectors;
1034 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1035 vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
1036 u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
1037 u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts;
1038 u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0;
1039 u32 sync_bi[VLIB_FRAME_SIZE];
1040 u32 noop_bi[VLIB_FRAME_SIZE];
1041 esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
1042 esp_decrypt_packet_data2_t pkt_data2[VLIB_FRAME_SIZE], *pd2 = pkt_data2;
1043 esp_decrypt_packet_data_t cpd = { };
1044 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
1045 const u8 esp_sz = sizeof (esp_header_t);
1046 ipsec_sa_t *sa0 = 0;
1047 vnet_crypto_op_t _op, *op = &_op;
1048 vnet_crypto_op_t **crypto_ops;
1049 vnet_crypto_op_t **integ_ops;
1050 int is_async = im->async_mode;
1051 vnet_crypto_async_op_id_t async_op = ~0;
1052 vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
1053 esp_decrypt_error_t err;
1055 vlib_get_buffers (vm, from, b, n_left);
1058 vec_reset_length (ptd->crypto_ops);
1059 vec_reset_length (ptd->integ_ops);
1060 vec_reset_length (ptd->chained_crypto_ops);
1061 vec_reset_length (ptd->chained_integ_ops);
1063 vec_reset_length (ptd->async_frames);
1064 vec_reset_length (ptd->chunks);
1065 clib_memset (sync_nexts, -1, sizeof (sync_nexts));
1066 clib_memset (async_frames, 0, sizeof (async_frames));
1072 err = ESP_DECRYPT_ERROR_RX_PKTS;
1076 vlib_prefetch_buffer_header (b[2], LOAD);
1077 p = vlib_buffer_get_current (b[1]);
1078 clib_prefetch_load (p);
1079 p -= CLIB_CACHE_LINE_BYTES;
1080 clib_prefetch_load (p);
1083 u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
1086 err = ESP_DECRYPT_ERROR_NO_BUFFERS;
1087 esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
1088 noop_nexts, ESP_DECRYPT_NEXT_DROP,
1089 vnet_buffer (b[0])->ipsec.sad_index);
1093 if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
1095 if (current_sa_pkts)
1096 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1097 current_sa_index, current_sa_pkts,
1099 current_sa_bytes = current_sa_pkts = 0;
1101 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1102 vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
1104 sa0 = ipsec_sa_get (current_sa_index);
1106 /* fetch the second cacheline ASAP */
1107 clib_prefetch_load (sa0->cacheline1);
1108 cpd.icv_sz = sa0->integ_icv_size;
1109 cpd.iv_sz = sa0->crypto_iv_size;
1110 cpd.flags = sa0->flags;
1111 cpd.sa_index = current_sa_index;
1112 is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
1115 if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
1117 /* this is the first packet to use this SA, claim the SA
1118 * for this thread. this could happen simultaneously on
1120 clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
1121 ipsec_sa_assign_thread (thread_index));
1124 if (PREDICT_FALSE (thread_index != sa0->thread_index))
1126 vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
1127 err = ESP_DECRYPT_ERROR_HANDOFF;
1128 esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
1129 noop_nexts, ESP_DECRYPT_NEXT_HANDOFF,
1134 /* store packet data for next round for easier prefetch */
1135 pd->sa_data = cpd.sa_data;
1136 pd->current_data = b[0]->current_data;
1137 pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
1138 payload = b[0]->data + pd->current_data;
1139 pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
1142 pd2->free_buffer_index = 0;
1143 pd2->icv_removed = 0;
1148 /* find last buffer in the chain */
1149 while (pd2->lb->flags & VLIB_BUFFER_NEXT_PRESENT)
1150 pd2->lb = vlib_get_buffer (vm, pd2->lb->next_buffer);
1152 crypto_ops = &ptd->chained_crypto_ops;
1153 integ_ops = &ptd->chained_integ_ops;
1157 crypto_ops = &ptd->crypto_ops;
1158 integ_ops = &ptd->integ_ops;
1161 pd->current_length = b[0]->current_length;
1163 /* anti-reply check */
1164 if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, ~0, false,
1167 err = ESP_DECRYPT_ERROR_REPLAY;
1168 esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
1169 noop_nexts, ESP_DECRYPT_NEXT_DROP,
1174 if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
1176 err = ESP_DECRYPT_ERROR_RUNT;
1177 esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
1178 noop_nexts, ESP_DECRYPT_NEXT_DROP,
1183 len = pd->current_length - cpd.icv_sz;
1184 current_sa_pkts += 1;
1185 current_sa_bytes += vlib_buffer_length_in_chain (vm, b[0]);
1189 async_op = sa0->crypto_async_dec_op_id;
1191 /* get a frame for this op if we don't yet have one or it's full
1193 if (NULL == async_frames[async_op] ||
1194 vnet_crypto_async_frame_is_full (async_frames[async_op]))
1196 async_frames[async_op] =
1197 vnet_crypto_async_get_frame (vm, async_op);
1198 if (PREDICT_FALSE (!async_frames[async_op]))
1200 err = ESP_DECRYPT_ERROR_NO_AVAIL_FRAME;
1201 esp_decrypt_set_next_index (
1202 b[0], node, thread_index, err, n_noop, noop_nexts,
1203 ESP_DECRYPT_NEXT_DROP, current_sa_index);
1207 /* Save the frame to the list we'll submit at the end */
1208 vec_add1 (ptd->async_frames, async_frames[async_op]);
1211 err = esp_decrypt_prepare_async_frame (
1212 vm, node, ptd, async_frames[async_op], sa0, payload, len,
1213 cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next,
1215 if (ESP_DECRYPT_ERROR_RX_PKTS != err)
1217 esp_decrypt_set_next_index (
1218 b[0], node, thread_index, err, n_noop, noop_nexts,
1219 ESP_DECRYPT_NEXT_DROP, current_sa_index);
1223 esp_decrypt_prepare_sync_op (
1224 vm, node, ptd, &crypto_ops, &integ_ops, op, sa0, payload, len,
1225 cpd.icv_sz, cpd.iv_sz, pd, pd2, b[0], sync_next, n_sync);
1228 if (ESP_DECRYPT_ERROR_RX_PKTS != err)
1230 noop_bi[n_noop] = from[b - bufs];
1235 sync_bi[n_sync] = from[b - bufs];
1236 sync_bufs[n_sync] = b[0];
1249 if (PREDICT_TRUE (~0 != current_sa_index))
1250 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1251 current_sa_index, current_sa_pkts,
1254 /* submit or free all of the open frames */
1255 vnet_crypto_async_frame_t **async_frame;
1257 vec_foreach (async_frame, ptd->async_frames)
1259 /* free frame and move on if no ops were successfully added */
1260 if (PREDICT_FALSE (!(*async_frame)->n_elts))
1262 vnet_crypto_async_free_frame (vm, *async_frame);
1265 if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
1267 n_noop += esp_async_recycle_failed_submit (
1268 vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR,
1269 IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi, noop_nexts,
1270 ESP_DECRYPT_NEXT_DROP, false);
1271 vnet_crypto_async_reset_frame (*async_frame);
1272 vnet_crypto_async_free_frame (vm, *async_frame);
1278 esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
1279 ESP_DECRYPT_ERROR_INTEG_ERROR);
1280 esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
1281 sync_nexts, ptd->chunks,
1282 ESP_DECRYPT_ERROR_INTEG_ERROR);
1284 esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
1285 ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1286 esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
1287 sync_nexts, ptd->chunks,
1288 ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1291 /* Post decryption ronud - adjust packet data start and length and next
1295 sync_next = sync_nexts;
1304 void *data = b[1]->data + pd[1].current_data;
1306 /* buffer metadata */
1307 vlib_prefetch_buffer_header (b[1], LOAD);
1310 CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
1311 CLIB_CACHE_LINE_BYTES, LOAD);
1313 /* packet headers */
1314 CLIB_PREFETCH (data - CLIB_CACHE_LINE_BYTES,
1315 CLIB_CACHE_LINE_BYTES * 2, LOAD);
1318 /* save the sa_index as GRE_teb post_crypto changes L2 opaque */
1319 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1320 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1322 if (sync_next[0] >= ESP_DECRYPT_N_NEXT)
1323 esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2, b[0],
1324 sync_next, is_ip6, is_tun, 0);
1327 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1329 esp_decrypt_trace_t *tr;
1330 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1331 sa0 = ipsec_sa_get (current_sa_index);
1332 tr->crypto_alg = sa0->crypto_alg;
1333 tr->integ_alg = sa0->integ_alg;
1335 tr->sa_seq = sa0->seq;
1336 tr->sa_seq_hi = sa0->seq_hi;
1337 tr->pkt_seq_hi = pd->seq_hi;
1348 vlib_node_increment_counter (vm, node->node_index, ESP_DECRYPT_ERROR_RX_PKTS,
1349 from_frame->n_vectors);
1352 vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
1355 vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
1357 return (from_frame->n_vectors);
1361 esp_decrypt_post_inline (vlib_main_t * vm,
1362 vlib_node_runtime_t * node,
1363 vlib_frame_t * from_frame, int is_ip6, int is_tun)
1365 const ipsec_main_t *im = &ipsec_main;
1366 const u16 *next_by_next_header = im->next_header_registrations;
1367 u32 *from = vlib_frame_vector_args (from_frame);
1368 u32 n_left = from_frame->n_vectors;
1369 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1370 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1371 vlib_get_buffers (vm, from, b, n_left);
1375 esp_decrypt_packet_data_t *pd = &(esp_post_data (b[0]))->decrypt_data;
1379 vlib_prefetch_buffer_header (b[2], LOAD);
1380 vlib_prefetch_buffer_header (b[1], LOAD);
1384 esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, 0, b[0],
1385 next, is_ip6, is_tun, 1);
1388 esp_decrypt_packet_data2_t *pd2 = esp_post_data2 (b[0]);
1389 esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2,
1390 b[0], next, is_ip6, is_tun, 1);
1394 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1396 ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
1397 esp_decrypt_trace_t *tr;
1398 esp_decrypt_packet_data_t *async_pd =
1399 &(esp_post_data (b[0]))->decrypt_data;
1400 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1401 sa0 = ipsec_sa_get (async_pd->sa_index);
1403 tr->crypto_alg = sa0->crypto_alg;
1404 tr->integ_alg = sa0->integ_alg;
1406 tr->sa_seq = sa0->seq;
1407 tr->sa_seq_hi = sa0->seq_hi;
1415 n_left = from_frame->n_vectors;
1416 vlib_node_increment_counter (vm, node->node_index,
1417 ESP_DECRYPT_ERROR_RX_POST_PKTS, n_left);
1419 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
1424 VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
1425 vlib_node_runtime_t * node,
1426 vlib_frame_t * from_frame)
1428 return esp_decrypt_inline (vm, node, from_frame, 0, 0,
1429 esp_decrypt_async_next.esp4_post_next);
1432 VLIB_NODE_FN (esp4_decrypt_post_node) (vlib_main_t * vm,
1433 vlib_node_runtime_t * node,
1434 vlib_frame_t * from_frame)
1436 return esp_decrypt_post_inline (vm, node, from_frame, 0, 0);
1439 VLIB_NODE_FN (esp4_decrypt_tun_node) (vlib_main_t * vm,
1440 vlib_node_runtime_t * node,
1441 vlib_frame_t * from_frame)
1443 return esp_decrypt_inline (vm, node, from_frame, 0, 1,
1444 esp_decrypt_async_next.esp4_tun_post_next);
1447 VLIB_NODE_FN (esp4_decrypt_tun_post_node) (vlib_main_t * vm,
1448 vlib_node_runtime_t * node,
1449 vlib_frame_t * from_frame)
1451 return esp_decrypt_post_inline (vm, node, from_frame, 0, 1);
1454 VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
1455 vlib_node_runtime_t * node,
1456 vlib_frame_t * from_frame)
1458 return esp_decrypt_inline (vm, node, from_frame, 1, 0,
1459 esp_decrypt_async_next.esp6_post_next);
1462 VLIB_NODE_FN (esp6_decrypt_post_node) (vlib_main_t * vm,
1463 vlib_node_runtime_t * node,
1464 vlib_frame_t * from_frame)
1466 return esp_decrypt_post_inline (vm, node, from_frame, 1, 0);
1469 VLIB_NODE_FN (esp6_decrypt_tun_node) (vlib_main_t * vm,
1470 vlib_node_runtime_t * node,
1471 vlib_frame_t * from_frame)
1473 return esp_decrypt_inline (vm, node, from_frame, 1, 1,
1474 esp_decrypt_async_next.esp6_tun_post_next);
1477 VLIB_NODE_FN (esp6_decrypt_tun_post_node) (vlib_main_t * vm,
1478 vlib_node_runtime_t * node,
1479 vlib_frame_t * from_frame)
1481 return esp_decrypt_post_inline (vm, node, from_frame, 1, 1);
1485 VLIB_REGISTER_NODE (esp4_decrypt_node) = {
1486 .name = "esp4-decrypt",
1487 .vector_size = sizeof (u32),
1488 .format_trace = format_esp_decrypt_trace,
1489 .type = VLIB_NODE_TYPE_INTERNAL,
1491 .n_errors = ESP_DECRYPT_N_ERROR,
1492 .error_counters = esp_decrypt_error_counters,
1494 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1496 [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1497 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1498 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1499 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-drop",
1500 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1501 [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff",
1505 VLIB_REGISTER_NODE (esp4_decrypt_post_node) = {
1506 .name = "esp4-decrypt-post",
1507 .vector_size = sizeof (u32),
1508 .format_trace = format_esp_decrypt_trace,
1509 .type = VLIB_NODE_TYPE_INTERNAL,
1511 .n_errors = ESP_DECRYPT_N_ERROR,
1512 .error_counters = esp_decrypt_error_counters,
1514 .sibling_of = "esp4-decrypt",
1517 VLIB_REGISTER_NODE (esp6_decrypt_node) = {
1518 .name = "esp6-decrypt",
1519 .vector_size = sizeof (u32),
1520 .format_trace = format_esp_decrypt_trace,
1521 .type = VLIB_NODE_TYPE_INTERNAL,
1523 .n_errors = ESP_DECRYPT_N_ERROR,
1524 .error_counters = esp_decrypt_error_counters,
1526 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1528 [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1529 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1530 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1531 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-drop",
1532 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1533 [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-handoff",
1537 VLIB_REGISTER_NODE (esp6_decrypt_post_node) = {
1538 .name = "esp6-decrypt-post",
1539 .vector_size = sizeof (u32),
1540 .format_trace = format_esp_decrypt_trace,
1541 .type = VLIB_NODE_TYPE_INTERNAL,
1543 .n_errors = ESP_DECRYPT_N_ERROR,
1544 .error_counters = esp_decrypt_error_counters,
1546 .sibling_of = "esp6-decrypt",
1549 VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = {
1550 .name = "esp4-decrypt-tun",
1551 .vector_size = sizeof (u32),
1552 .format_trace = format_esp_decrypt_trace,
1553 .type = VLIB_NODE_TYPE_INTERNAL,
1554 .n_errors = ESP_DECRYPT_N_ERROR,
1555 .error_counters = esp_decrypt_error_counters,
1556 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1558 [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1559 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1560 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1561 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-input",
1562 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1563 [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-tun-handoff",
1567 VLIB_REGISTER_NODE (esp4_decrypt_tun_post_node) = {
1568 .name = "esp4-decrypt-tun-post",
1569 .vector_size = sizeof (u32),
1570 .format_trace = format_esp_decrypt_trace,
1571 .type = VLIB_NODE_TYPE_INTERNAL,
1573 .n_errors = ESP_DECRYPT_N_ERROR,
1574 .error_counters = esp_decrypt_error_counters,
1576 .sibling_of = "esp4-decrypt-tun",
1579 VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
1580 .name = "esp6-decrypt-tun",
1581 .vector_size = sizeof (u32),
1582 .format_trace = format_esp_decrypt_trace,
1583 .type = VLIB_NODE_TYPE_INTERNAL,
1584 .n_errors = ESP_DECRYPT_N_ERROR,
1585 .error_counters = esp_decrypt_error_counters,
1586 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1588 [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1589 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1590 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1591 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-input",
1592 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1593 [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-tun-handoff",
1597 VLIB_REGISTER_NODE (esp6_decrypt_tun_post_node) = {
1598 .name = "esp6-decrypt-tun-post",
1599 .vector_size = sizeof (u32),
1600 .format_trace = format_esp_decrypt_trace,
1601 .type = VLIB_NODE_TYPE_INTERNAL,
1603 .n_errors = ESP_DECRYPT_N_ERROR,
1604 .error_counters = esp_decrypt_error_counters,
1606 .sibling_of = "esp6-decrypt-tun",
1610 #ifndef CLIB_MARCH_VARIANT
1612 static clib_error_t *
1613 esp_decrypt_init (vlib_main_t *vm)
1615 ipsec_main_t *im = &ipsec_main;
1617 im->esp4_dec_fq_index =
1618 vlib_frame_queue_main_init (esp4_decrypt_node.index, 0);
1619 im->esp6_dec_fq_index =
1620 vlib_frame_queue_main_init (esp6_decrypt_node.index, 0);
1621 im->esp4_dec_tun_fq_index =
1622 vlib_frame_queue_main_init (esp4_decrypt_tun_node.index, 0);
1623 im->esp6_dec_tun_fq_index =
1624 vlib_frame_queue_main_init (esp6_decrypt_tun_node.index, 0);
1629 VLIB_INIT_FUNCTION (esp_decrypt_init);
1634 * fd.io coding-style-patch-verification: ON
1637 * eval: (c-set-style "gnu")