2 * esp_decrypt.c : IPSec ESP decrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
17 #include <vnet/vnet.h>
18 #include <vnet/api_errno.h>
19 #include <vnet/ip/ip.h>
20 #include <vnet/l2/l2_input.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ipsec_io.h>
25 #include <vnet/ipsec/ipsec_tun.h>
27 #include <vnet/gre/packet.h>
29 #define foreach_esp_decrypt_next \
30 _ (DROP, "error-drop") \
31 _ (IP4_INPUT, "ip4-input-no-checksum") \
32 _ (IP6_INPUT, "ip6-input") \
33 _ (L2_INPUT, "l2-input") \
34 _ (MPLS_INPUT, "mpls-input") \
35 _ (HANDOFF, "handoff")
37 #define _(v, s) ESP_DECRYPT_NEXT_##v,
40 foreach_esp_decrypt_next
45 #define foreach_esp_decrypt_post_next \
46 _ (DROP, "error-drop") \
47 _ (IP4_INPUT, "ip4-input-no-checksum") \
48 _ (IP6_INPUT, "ip6-input") \
49 _ (MPLS_INPUT, "mpls-input") \
50 _ (L2_INPUT, "l2-input")
52 #define _(v, s) ESP_DECRYPT_POST_NEXT_##v,
55 foreach_esp_decrypt_post_next
57 ESP_DECRYPT_POST_N_NEXT,
58 } esp_decrypt_post_next_t;
66 ipsec_crypto_alg_t crypto_alg;
67 ipsec_integ_alg_t integ_alg;
68 } esp_decrypt_trace_t;
70 typedef vl_counter_esp_decrypt_enum_t esp_decrypt_error_t;
72 /* The number of byres in the hisequence number */
73 #define N_HI_ESN_BYTES 4
75 /* packet trace format function */
77 format_esp_decrypt_trace (u8 * s, va_list * args)
79 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
80 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
81 esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
84 "esp: crypto %U integrity %U pkt-seq %d sa-seq %u sa-seq-hi %u "
86 format_ipsec_crypto_alg, t->crypto_alg, format_ipsec_integ_alg,
87 t->integ_alg, t->seq, t->sa_seq, t->sa_seq_hi, t->pkt_seq_hi);
91 #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
93 static_always_inline void
94 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
95 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
98 vnet_crypto_op_t *op = ops;
99 u32 n_fail, n_ops = vec_len (ops);
104 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
108 ASSERT (op - ops < n_ops);
109 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
111 u32 err, bi = op->user_data;
112 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
115 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
116 esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi,
117 nexts, ESP_DECRYPT_NEXT_DROP,
118 vnet_buffer (b[bi])->ipsec.sad_index);
125 static_always_inline void
126 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
127 vnet_crypto_op_t * ops, vlib_buffer_t * b[],
128 u16 * nexts, vnet_crypto_op_chunk_t * chunks, int e)
131 vnet_crypto_op_t *op = ops;
132 u32 n_fail, n_ops = vec_len (ops);
134 if (PREDICT_TRUE (n_ops == 0))
137 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
141 ASSERT (op - ops < n_ops);
142 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
144 u32 err, bi = op->user_data;
145 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
148 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
149 esp_decrypt_set_next_index (b[bi], node, vm->thread_index, err, bi,
150 nexts, ESP_DECRYPT_NEXT_DROP,
151 vnet_buffer (b[bi])->ipsec.sad_index);
159 esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last,
162 vlib_buffer_t *before_last = b;
165 b->total_length_not_including_first_buffer -= tail;
167 if (last->current_length > tail)
169 last->current_length -= tail;
172 ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT);
174 while (b->flags & VLIB_BUFFER_NEXT_PRESENT)
177 b = vlib_get_buffer (vm, b->next_buffer);
179 before_last->current_length -= tail - last->current_length;
180 vlib_buffer_free_one (vm, before_last->next_buffer);
181 before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
185 esp_remove_tail_and_tfc_padding (vlib_main_t *vm, vlib_node_runtime_t *node,
186 const esp_decrypt_packet_data_t *pd,
187 vlib_buffer_t *b, vlib_buffer_t *last,
188 u16 *next, u16 tail, int is_ip6)
190 const u16 total_buffer_length = vlib_buffer_length_in_chain (vm, b);
191 u16 ip_packet_length;
194 const ip6_header_t *ip6 = vlib_buffer_get_current (b);
196 clib_net_to_host_u16 (ip6->payload_length) + sizeof (ip6_header_t);
200 const ip4_header_t *ip4 = vlib_buffer_get_current (b);
201 ip_packet_length = clib_net_to_host_u16 (ip4->length);
203 /* In case of TFC padding, the size of the buffer data needs to be adjusted
204 * to the ip packet length */
205 if (PREDICT_FALSE (total_buffer_length < ip_packet_length + tail))
207 esp_decrypt_set_next_index (b, node, vm->thread_index,
208 ESP_DECRYPT_ERROR_NO_TAIL_SPACE, 0, next,
209 ESP_DECRYPT_NEXT_DROP, pd->sa_index);
212 esp_remove_tail (vm, b, last, total_buffer_length - ip_packet_length);
215 /* ICV is splitted in last two buffers so move it to the last buffer and
216 return pointer to it */
217 static_always_inline u8 *
218 esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first,
219 esp_decrypt_packet_data_t * pd,
220 esp_decrypt_packet_data2_t * pd2, u16 icv_sz, u16 * dif)
222 vlib_buffer_t *before_last, *bp;
223 u16 last_sz = pd2->lb->current_length;
224 u16 first_sz = icv_sz - last_sz;
226 bp = before_last = first;
227 while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
230 bp = vlib_get_buffer (vm, bp->next_buffer);
233 u8 *lb_curr = vlib_buffer_get_current (pd2->lb);
234 memmove (lb_curr + first_sz, lb_curr, last_sz);
235 clib_memcpy_fast (lb_curr, vlib_buffer_get_tail (before_last) - first_sz,
237 before_last->current_length -= first_sz;
238 if (before_last == first)
239 pd->current_length -= first_sz;
241 first->total_length_not_including_first_buffer -= first_sz;
242 clib_memset (vlib_buffer_get_tail (before_last), 0, first_sz);
245 first->total_length_not_including_first_buffer -= last_sz;
246 pd2->lb = before_last;
247 pd2->icv_removed = 1;
248 pd2->free_buffer_index = before_last->next_buffer;
249 before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
253 static_always_inline u16
254 esp_insert_esn (vlib_main_t *vm, ipsec_sa_t *sa, esp_decrypt_packet_data_t *pd,
255 esp_decrypt_packet_data2_t *pd2, u32 *data_len, u8 **digest,
256 u16 *len, vlib_buffer_t *b, u8 *payload)
258 if (!ipsec_sa_is_set_USE_ESN (sa))
260 /* shift ICV by 4 bytes to insert ESN */
261 u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
262 u8 tmp[ESP_MAX_ICV_SIZE];
264 if (pd2->icv_removed)
266 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
267 if (space_left >= N_HI_ESN_BYTES)
269 clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi,
271 *data_len += N_HI_ESN_BYTES;
274 return N_HI_ESN_BYTES;
276 len[0] = b->current_length;
280 clib_memcpy_fast (tmp, payload + len[0], ESP_MAX_ICV_SIZE);
281 clib_memcpy_fast (payload + len[0], &seq_hi, N_HI_ESN_BYTES);
282 clib_memcpy_fast (payload + len[0] + N_HI_ESN_BYTES, tmp,
284 *data_len += N_HI_ESN_BYTES;
285 *digest += N_HI_ESN_BYTES;
287 return N_HI_ESN_BYTES;
290 static_always_inline u8 *
291 esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first,
292 esp_decrypt_packet_data_t * pd,
293 esp_decrypt_packet_data2_t * pd2, u16 icv_sz,
294 ipsec_sa_t * sa, u8 * extra_esn, u32 * len)
297 u8 *digest = esp_move_icv (vm, first, pd, pd2, icv_sz, &dif);
301 if (ipsec_sa_is_set_USE_ESN (sa))
303 u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
304 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
306 if (space_left >= N_HI_ESN_BYTES)
308 clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb), &seq_hi,
310 *len += N_HI_ESN_BYTES;
314 /* no space for ESN at the tail, use the next buffer
316 ASSERT (pd2->icv_removed);
317 vlib_buffer_t *tmp = vlib_get_buffer (vm, pd2->free_buffer_index);
318 clib_memcpy_fast (vlib_buffer_get_current (tmp) - N_HI_ESN_BYTES,
319 &seq_hi, N_HI_ESN_BYTES);
326 static_always_inline int
327 esp_decrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
328 const esp_decrypt_packet_data_t *pd,
329 esp_decrypt_packet_data2_t *pd2, ipsec_sa_t *sa0,
330 vlib_buffer_t *b, u8 icv_sz, u8 *start_src,
331 u32 start_len, u8 **digest, u16 *n_ch,
332 u32 *integ_total_len)
334 vnet_crypto_op_chunk_t *ch;
335 vlib_buffer_t *cb = vlib_get_buffer (vm, b->next_buffer);
338 vec_add2 (ptd->chunks, ch, 1);
339 total_len = ch->len = start_len;
344 vec_add2 (ptd->chunks, ch, 1);
346 ch->src = vlib_buffer_get_current (cb);
349 if (pd2->icv_removed)
350 ch->len = cb->current_length;
352 ch->len = cb->current_length - icv_sz;
353 if (ipsec_sa_is_set_USE_ESN (sa0))
355 u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
356 u8 tmp[ESP_MAX_ICV_SIZE];
358 vlib_buffer_t *tmp_b;
359 u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
360 if (space_left < N_HI_ESN_BYTES)
362 if (pd2->icv_removed)
364 /* use pre-data area from the last bufer
365 that was removed from the chain */
366 tmp_b = vlib_get_buffer (vm, pd2->free_buffer_index);
367 esn = tmp_b->data - N_HI_ESN_BYTES;
371 /* no space, need to allocate new buffer */
373 if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
375 tmp_b = vlib_get_buffer (vm, tmp_bi);
377 pd2->free_buffer_index = tmp_bi;
379 clib_memcpy_fast (esn, &seq_hi, N_HI_ESN_BYTES);
381 vec_add2 (ptd->chunks, ch, 1);
384 ch->len = N_HI_ESN_BYTES;
388 if (pd2->icv_removed)
390 clib_memcpy_fast (vlib_buffer_get_tail (pd2->lb),
391 &seq_hi, N_HI_ESN_BYTES);
395 clib_memcpy_fast (tmp, *digest, ESP_MAX_ICV_SIZE);
396 clib_memcpy_fast (*digest, &seq_hi, N_HI_ESN_BYTES);
397 clib_memcpy_fast (*digest + N_HI_ESN_BYTES, tmp,
399 *digest += N_HI_ESN_BYTES;
401 ch->len += N_HI_ESN_BYTES;
404 total_len += ch->len;
408 total_len += ch->len = cb->current_length;
410 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
413 cb = vlib_get_buffer (vm, cb->next_buffer);
419 *integ_total_len = total_len;
424 static_always_inline u32
425 esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
426 esp_decrypt_packet_data_t * pd,
427 esp_decrypt_packet_data2_t * pd2,
428 ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz,
429 u8 * start, u32 start_len, u8 ** tag, u16 * n_ch)
431 vnet_crypto_op_chunk_t *ch;
432 vlib_buffer_t *cb = b;
435 vec_add2 (ptd->chunks, ch, 1);
436 total_len = ch->len = start_len;
437 ch->src = ch->dst = start;
438 cb = vlib_get_buffer (vm, cb->next_buffer);
443 vec_add2 (ptd->chunks, ch, 1);
445 ch->src = ch->dst = vlib_buffer_get_current (cb);
448 if (ipsec_sa_is_set_IS_AEAD (sa0))
450 if (pd2->lb->current_length < icv_sz)
453 *tag = esp_move_icv (vm, b, pd, pd2, icv_sz, &dif);
455 /* this chunk does not contain crypto data */
457 /* and fix previous chunk's length as it might have
459 ASSERT (n_chunks > 0);
467 total_len = total_len + pd2->lb->current_length -
469 ch[-1].len = pd2->lb->current_length;
474 *tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
477 if (pd2->icv_removed)
478 total_len += ch->len = cb->current_length;
480 total_len += ch->len = cb->current_length - icv_sz;
483 total_len += ch->len = cb->current_length;
485 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
488 cb = vlib_get_buffer (vm, cb->next_buffer);
497 static_always_inline void
498 esp_decrypt_prepare_sync_op (vlib_main_t * vm, vlib_node_runtime_t * node,
499 ipsec_per_thread_data_t * ptd,
500 vnet_crypto_op_t *** crypto_ops,
501 vnet_crypto_op_t *** integ_ops,
502 vnet_crypto_op_t * op,
503 ipsec_sa_t * sa0, u8 * payload,
504 u16 len, u8 icv_sz, u8 iv_sz,
505 esp_decrypt_packet_data_t * pd,
506 esp_decrypt_packet_data2_t * pd2,
507 vlib_buffer_t * b, u16 * next, u32 index)
509 const u8 esp_sz = sizeof (esp_header_t);
511 if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
513 vnet_crypto_op_init (op, sa0->integ_op_id);
514 op->key_index = sa0->integ_key_index;
516 op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
517 op->user_data = index;
518 op->digest = payload + len;
519 op->digest_len = icv_sz;
524 /* buffer is chained */
525 op->len = pd->current_length;
527 /* special case when ICV is splitted and needs to be reassembled
528 * first -> move it to the last buffer. Also take into account
529 * that ESN needs to be added after encrypted data and may or
530 * may not fit in the tail.*/
531 if (pd2->lb->current_length < icv_sz)
535 esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
536 &extra_esn, &op->len);
540 /* esn is in the last buffer, that was unlinked from
542 op->len = b->current_length;
548 /* we now have a single buffer of crypto data, adjust
549 * the length (second buffer contains only ICV) */
550 *integ_ops = &ptd->integ_ops;
551 *crypto_ops = &ptd->crypto_ops;
552 len = b->current_length;
558 op->digest = vlib_buffer_get_tail (pd2->lb) - icv_sz;
560 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
561 op->chunk_index = vec_len (ptd->chunks);
562 if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
563 payload, pd->current_length,
564 &op->digest, &op->n_chunks, 0) < 0)
566 esp_decrypt_set_next_index (
567 b, node, vm->thread_index, ESP_DECRYPT_ERROR_NO_BUFFERS, 0,
568 next, ESP_DECRYPT_NEXT_DROP, pd->sa_index);
573 esp_insert_esn (vm, sa0, pd, pd2, &op->len, &op->digest, &len, b,
576 vec_add_aligned (*(integ_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
582 if (sa0->crypto_dec_op_id != VNET_CRYPTO_OP_NONE)
584 vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
585 op->key_index = sa0->crypto_key_index;
588 if (ipsec_sa_is_set_IS_CTR (sa0))
590 /* construct nonce in a scratch space in front of the IP header */
591 esp_ctr_nonce_t *nonce =
592 (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz -
594 if (ipsec_sa_is_set_IS_AEAD (sa0))
596 /* constuct aad in a scratch space in front of the nonce */
597 esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
598 op->aad = (u8 *) nonce - sizeof (esp_aead_t);
599 op->aad_len = esp_aad_fill (op->aad, esp0, sa0, pd->seq_hi);
600 op->tag = payload + len;
602 if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
604 /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
611 nonce->ctr = clib_host_to_net_u32 (1);
613 nonce->salt = sa0->salt;
614 ASSERT (sizeof (u64) == iv_sz);
615 nonce->iv = *(u64 *) op->iv;
616 op->iv = (u8 *) nonce;
618 op->src = op->dst = payload += iv_sz;
619 op->len = len - iv_sz;
620 op->user_data = index;
622 if (pd->is_chain && (pd2->lb != b))
624 /* buffer is chained */
625 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
626 op->chunk_index = vec_len (ptd->chunks);
627 esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
628 payload, len - pd->iv_sz + pd->icv_sz,
629 &op->tag, &op->n_chunks);
632 vec_add_aligned (*(crypto_ops[0]), op, 1, CLIB_CACHE_LINE_BYTES);
636 static_always_inline esp_decrypt_error_t
637 esp_decrypt_prepare_async_frame (vlib_main_t *vm, vlib_node_runtime_t *node,
638 ipsec_per_thread_data_t *ptd,
639 vnet_crypto_async_frame_t *f, ipsec_sa_t *sa0,
640 u8 *payload, u16 len, u8 icv_sz, u8 iv_sz,
641 esp_decrypt_packet_data_t *pd,
642 esp_decrypt_packet_data2_t *pd2, u32 bi,
643 vlib_buffer_t *b, u16 *next, u16 async_next)
645 const u8 esp_sz = sizeof (esp_header_t);
646 esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data;
647 esp_decrypt_packet_data2_t *async_pd2 = esp_post_data2 (b);
648 u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0;
649 const u32 key_index = sa0->crypto_key_index;
650 u32 crypto_len, integ_len = 0;
651 i16 crypto_start_offset, integ_start_offset = 0;
654 if (!ipsec_sa_is_set_IS_AEAD (sa0))
657 integ_start_offset = payload - b->data;
659 if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
660 flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
664 /* buffer is chained */
665 integ_len = pd->current_length;
667 /* special case when ICV is splitted and needs to be reassembled
668 * first -> move it to the last buffer. Also take into account
669 * that ESN needs to be added after encrypted data and may or
670 * may not fit in the tail.*/
671 if (pd2->lb->current_length < icv_sz)
674 tag = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
675 &extra_esn, &integ_len);
679 /* esn is in the last buffer, that was unlinked from
681 integ_len = b->current_length;
687 /* we now have a single buffer of crypto data, adjust
688 * the length (second buffer contains only ICV) */
689 len = b->current_length;
695 tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
697 flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
698 if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
699 payload, pd->current_length, &tag, 0,
702 /* allocate buffer failed, will not add to frame and drop */
703 return (ESP_DECRYPT_ERROR_NO_BUFFERS);
707 esp_insert_esn (vm, sa0, pd, pd2, &integ_len, &tag, &len, b, payload);
716 if (ipsec_sa_is_set_IS_CTR (sa0))
718 /* construct nonce in a scratch space in front of the IP header */
719 esp_ctr_nonce_t *nonce =
720 (esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - sizeof (*nonce));
721 if (ipsec_sa_is_set_IS_AEAD (sa0))
723 /* constuct aad in a scratch space in front of the nonce */
724 esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
725 aad = (u8 *) nonce - sizeof (esp_aead_t);
726 esp_aad_fill (aad, esp0, sa0, pd->seq_hi);
728 if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
730 /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
737 nonce->ctr = clib_host_to_net_u32 (1);
739 nonce->salt = sa0->salt;
740 ASSERT (sizeof (u64) == iv_sz);
741 nonce->iv = *(u64 *) iv;
745 crypto_start_offset = (payload += iv_sz) - b->data;
746 crypto_len = len - iv_sz;
748 if (pd->is_chain && (pd2->lb != b))
750 /* buffer is chained */
751 flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
753 crypto_len = esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
755 len - pd->iv_sz + pd->icv_sz,
762 /* for AEAD integ_len - crypto_len will be negative, it is ok since it
763 * is ignored by the engine. */
764 vnet_crypto_async_add_to_frame (
765 vm, f, key_index, crypto_len, integ_len - crypto_len, crypto_start_offset,
766 integ_start_offset, bi, async_next, iv, tag, aad, flags);
768 return (ESP_DECRYPT_ERROR_RX_PKTS);
771 static_always_inline void
772 esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
773 const u16 *next_by_next_header,
774 const esp_decrypt_packet_data_t *pd,
775 const esp_decrypt_packet_data2_t *pd2,
776 vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun,
779 ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
780 vlib_buffer_t *lb = b;
781 const u8 esp_sz = sizeof (esp_header_t);
782 const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
783 u8 pad_length = 0, next_header = 0;
788 * redo the anti-reply check
789 * in this frame say we have sequence numbers, s, s+1, s+1, s+1
790 * and s and s+1 are in the window. When we did the anti-replay
791 * check above we did so against the state of the window (W),
792 * after packet s-1. So each of the packets in the sequence will be
794 * This time s will be cheked against Ws-1, s+1 checked against Ws
795 * (i.e. the window state is updated/advanced)
796 * so this time the successive s+1 packet will be dropped.
797 * This is a consequence of batching the decrypts. If the
798 * check-decrypt-advance process was done for each packet it would
799 * be fine. But we batch the decrypts because it's much more efficient
800 * to do so in SW and if we offload to HW and the process is async.
802 * You're probably thinking, but this means an attacker can send the
803 * above sequence and cause VPP to perform decrypts that will fail,
804 * and that's true. But if the attacker can determine s (a valid
805 * sequence number in the window) which is non-trivial, it can generate
806 * a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
807 * implementation, sequential or batching, from decrypting these.
809 if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
811 if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
814 esp_decrypt_set_next_index (b, node, vm->thread_index,
815 ESP_DECRYPT_ERROR_REPLAY, 0, next,
816 ESP_DECRYPT_NEXT_DROP, pd->sa_index);
819 n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq,
824 if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
827 esp_decrypt_set_next_index (b, node, vm->thread_index,
828 ESP_DECRYPT_ERROR_REPLAY, 0, next,
829 ESP_DECRYPT_NEXT_DROP, pd->sa_index);
832 n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq,
836 vlib_prefetch_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
837 vm->thread_index, pd->sa_index);
842 icv_sz = pd2->icv_removed ? 0 : pd->icv_sz;
843 if (pd2->free_buffer_index)
845 vlib_buffer_free_one (vm, pd2->free_buffer_index);
848 if (lb->current_length < sizeof (esp_footer_t) + icv_sz)
850 /* esp footer is either splitted in two buffers or in the before
853 vlib_buffer_t *before_last = b, *bp = b;
854 while (bp->flags & VLIB_BUFFER_NEXT_PRESENT)
857 bp = vlib_get_buffer (vm, bp->next_buffer);
859 u8 *bt = vlib_buffer_get_tail (before_last);
861 if (lb->current_length == icv_sz)
863 esp_footer_t *f = (esp_footer_t *) (bt - sizeof (*f));
864 pad_length = f->pad_length;
865 next_header = f->next_header;
869 pad_length = (bt - 1)[0];
870 next_header = ((u8 *) vlib_buffer_get_current (lb))[0];
876 (esp_footer_t *) (lb->data + lb->current_data +
877 lb->current_length - sizeof (esp_footer_t) -
879 pad_length = f->pad_length;
880 next_header = f->next_header;
887 (esp_footer_t *) (lb->data + lb->current_data + lb->current_length -
888 sizeof (esp_footer_t) - icv_sz);
889 pad_length = f->pad_length;
890 next_header = f->next_header;
893 u16 adv = pd->iv_sz + esp_sz;
894 u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz;
895 u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz;
897 ~(VNET_BUFFER_F_L4_CHECKSUM_COMPUTED | VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
899 if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
901 u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
902 sizeof (udp_header_t) : 0;
903 u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
904 u8 *old_ip = b->data + pd->current_data - ip_hdr_sz - udp_sz;
905 u8 *ip = old_ip + adv + udp_sz;
907 if (is_ip6 && ip_hdr_sz > 64)
908 memmove (ip, old_ip, ip_hdr_sz);
910 clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
912 b->current_data = pd->current_data + adv - ip_hdr_sz;
913 b->current_length += ip_hdr_sz - adv;
914 esp_remove_tail (vm, b, lb, tail);
918 ip6_header_t *ip6 = (ip6_header_t *) ip;
919 u16 len = clib_net_to_host_u16 (ip6->payload_length);
920 len -= adv + tail_orig;
921 ip6->payload_length = clib_host_to_net_u16 (len);
922 ip6->protocol = next_header;
923 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
927 ip4_header_t *ip4 = (ip4_header_t *) ip;
928 ip_csum_t sum = ip4->checksum;
929 u16 len = clib_net_to_host_u16 (ip4->length);
930 len = clib_host_to_net_u16 (len - adv - tail_orig - udp_sz);
931 sum = ip_csum_update (sum, ip4->protocol, next_header,
932 ip4_header_t, protocol);
933 sum = ip_csum_update (sum, ip4->length, len, ip4_header_t, length);
934 ip4->checksum = ip_csum_fold (sum);
935 ip4->protocol = next_header;
937 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
942 if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
944 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
945 b->current_data = pd->current_data + adv;
946 b->current_length = pd->current_length - adv;
947 esp_remove_tail_and_tfc_padding (vm, node, pd, b, lb, next, tail,
950 else if (next_header == IP_PROTOCOL_IPV6)
952 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
953 b->current_data = pd->current_data + adv;
954 b->current_length = pd->current_length - adv;
955 esp_remove_tail_and_tfc_padding (vm, node, pd, b, lb, next, tail,
958 else if (next_header == IP_PROTOCOL_MPLS_IN_IP)
960 next[0] = ESP_DECRYPT_NEXT_MPLS_INPUT;
961 b->current_data = pd->current_data + adv;
962 b->current_length = pd->current_length - adv;
963 esp_remove_tail (vm, b, lb, tail);
965 else if (is_tun && next_header == IP_PROTOCOL_GRE)
969 b->current_data = pd->current_data + adv;
970 b->current_length = pd->current_length - adv - tail;
972 gre = vlib_buffer_get_current (b);
974 vlib_buffer_advance (b, sizeof (*gre));
976 switch (clib_net_to_host_u16 (gre->protocol))
978 case GRE_PROTOCOL_teb:
979 vnet_update_l2_len (b);
980 next[0] = ESP_DECRYPT_NEXT_L2_INPUT;
982 case GRE_PROTOCOL_ip4:
983 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
985 case GRE_PROTOCOL_ip6:
986 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
989 esp_decrypt_set_next_index (
990 b, node, vm->thread_index, ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0,
991 next, ESP_DECRYPT_NEXT_DROP, pd->sa_index);
995 else if ((next[0] = vec_elt (next_by_next_header, next_header)) !=
998 b->current_data = pd->current_data + adv;
999 b->current_length = pd->current_length - adv;
1000 esp_remove_tail (vm, b, lb, tail);
1004 esp_decrypt_set_next_index (b, node, vm->thread_index,
1005 ESP_DECRYPT_ERROR_UNSUP_PAYLOAD, 0, next,
1006 ESP_DECRYPT_NEXT_DROP, pd->sa_index);
1012 if (ipsec_sa_is_set_IS_PROTECT (sa0))
1015 * There are two encap possibilities
1016 * 1) the tunnel and ths SA are prodiving encap, i.e. it's
1017 * MAC | SA-IP | TUN-IP | ESP | PAYLOAD
1018 * implying the SA is in tunnel mode (on a tunnel interface)
1019 * 2) only the tunnel provides encap
1020 * MAC | TUN-IP | ESP | PAYLOAD
1021 * implying the SA is in transport mode.
1023 * For 2) we need only strip the tunnel encap and we're good.
1024 * since the tunnel and crypto ecnap (int the tun=protect
1025 * object) are the same and we verified above that these match
1026 * for 1) we need to strip the SA-IP outer headers, to
1027 * reveal the tunnel IP and then check that this matches
1028 * the configured tunnel.
1030 const ipsec_tun_protect_t *itp;
1033 ipsec_tun_protect_get (vnet_buffer (b)->ipsec.protect_index);
1035 if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP))
1037 const ip4_header_t *ip4;
1039 ip4 = vlib_buffer_get_current (b);
1041 if (!ip46_address_is_equal_v4 (&itp->itp_tun.src,
1042 &ip4->dst_address) ||
1043 !ip46_address_is_equal_v4 (&itp->itp_tun.dst,
1046 esp_decrypt_set_next_index (
1047 b, node, vm->thread_index,
1048 ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next,
1049 ESP_DECRYPT_NEXT_DROP, pd->sa_index);
1052 else if (next_header == IP_PROTOCOL_IPV6)
1054 const ip6_header_t *ip6;
1056 ip6 = vlib_buffer_get_current (b);
1058 if (!ip46_address_is_equal_v6 (&itp->itp_tun.src,
1059 &ip6->dst_address) ||
1060 !ip46_address_is_equal_v6 (&itp->itp_tun.dst,
1063 esp_decrypt_set_next_index (
1064 b, node, vm->thread_index,
1065 ESP_DECRYPT_ERROR_TUN_NO_PROTO, 0, next,
1066 ESP_DECRYPT_NEXT_DROP, pd->sa_index);
1073 if (PREDICT_FALSE (n_lost))
1074 vlib_increment_simple_counter (&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST],
1075 vm->thread_index, pd->sa_index, n_lost);
1079 esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
1080 vlib_frame_t *from_frame, int is_ip6, int is_tun,
1081 u16 async_next_node)
1083 ipsec_main_t *im = &ipsec_main;
1084 const u16 *next_by_next_header = im->next_header_registrations;
1085 u32 thread_index = vm->thread_index;
1087 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
1088 u32 *from = vlib_frame_vector_args (from_frame);
1089 u32 n_left = from_frame->n_vectors;
1090 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1091 vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
1092 u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
1093 u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts;
1094 u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0;
1095 u32 sync_bi[VLIB_FRAME_SIZE];
1096 u32 noop_bi[VLIB_FRAME_SIZE];
1097 esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
1098 esp_decrypt_packet_data2_t pkt_data2[VLIB_FRAME_SIZE], *pd2 = pkt_data2;
1099 esp_decrypt_packet_data_t cpd = { };
1100 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
1101 const u8 esp_sz = sizeof (esp_header_t);
1102 ipsec_sa_t *sa0 = 0;
1103 bool anti_replay_result;
1104 vnet_crypto_op_t _op, *op = &_op;
1105 vnet_crypto_op_t **crypto_ops;
1106 vnet_crypto_op_t **integ_ops;
1107 int is_async = im->async_mode;
1108 vnet_crypto_async_op_id_t async_op = ~0;
1109 vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
1110 esp_decrypt_error_t err;
1112 vlib_get_buffers (vm, from, b, n_left);
1115 vec_reset_length (ptd->crypto_ops);
1116 vec_reset_length (ptd->integ_ops);
1117 vec_reset_length (ptd->chained_crypto_ops);
1118 vec_reset_length (ptd->chained_integ_ops);
1120 vec_reset_length (ptd->async_frames);
1121 vec_reset_length (ptd->chunks);
1122 clib_memset (sync_nexts, -1, sizeof (sync_nexts));
1123 clib_memset (async_frames, 0, sizeof (async_frames));
1129 err = ESP_DECRYPT_ERROR_RX_PKTS;
1133 vlib_prefetch_buffer_header (b[2], LOAD);
1134 p = vlib_buffer_get_current (b[1]);
1135 clib_prefetch_load (p);
1136 p -= CLIB_CACHE_LINE_BYTES;
1137 clib_prefetch_load (p);
1140 u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
1143 err = ESP_DECRYPT_ERROR_NO_BUFFERS;
1144 esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
1145 noop_nexts, ESP_DECRYPT_NEXT_DROP,
1146 vnet_buffer (b[0])->ipsec.sad_index);
1150 if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
1152 if (current_sa_pkts)
1153 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1154 current_sa_index, current_sa_pkts,
1156 current_sa_bytes = current_sa_pkts = 0;
1158 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1159 vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
1161 sa0 = ipsec_sa_get (current_sa_index);
1163 /* fetch the second cacheline ASAP */
1164 clib_prefetch_load (sa0->cacheline1);
1165 cpd.icv_sz = sa0->integ_icv_size;
1166 cpd.iv_sz = sa0->crypto_iv_size;
1167 cpd.flags = sa0->flags;
1168 cpd.sa_index = current_sa_index;
1169 is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
1172 if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
1174 /* this is the first packet to use this SA, claim the SA
1175 * for this thread. this could happen simultaneously on
1177 clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
1178 ipsec_sa_assign_thread (thread_index));
1181 if (PREDICT_FALSE (thread_index != sa0->thread_index))
1183 vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
1184 err = ESP_DECRYPT_ERROR_HANDOFF;
1185 esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
1186 noop_nexts, ESP_DECRYPT_NEXT_HANDOFF,
1191 /* store packet data for next round for easier prefetch */
1192 pd->sa_data = cpd.sa_data;
1193 pd->current_data = b[0]->current_data;
1194 pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
1195 payload = b[0]->data + pd->current_data;
1196 pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
1199 pd2->free_buffer_index = 0;
1200 pd2->icv_removed = 0;
1205 /* find last buffer in the chain */
1206 while (pd2->lb->flags & VLIB_BUFFER_NEXT_PRESENT)
1207 pd2->lb = vlib_get_buffer (vm, pd2->lb->next_buffer);
1209 crypto_ops = &ptd->chained_crypto_ops;
1210 integ_ops = &ptd->chained_integ_ops;
1214 crypto_ops = &ptd->crypto_ops;
1215 integ_ops = &ptd->integ_ops;
1218 pd->current_length = b[0]->current_length;
1220 /* anti-reply check */
1221 if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
1223 anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
1224 sa0, pd->seq, ~0, false, &pd->seq_hi, true);
1228 anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
1229 sa0, pd->seq, ~0, false, &pd->seq_hi, false);
1232 if (anti_replay_result)
1234 err = ESP_DECRYPT_ERROR_REPLAY;
1235 esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
1236 noop_nexts, ESP_DECRYPT_NEXT_DROP,
1241 if (pd->current_length < cpd.icv_sz + esp_sz + cpd.iv_sz)
1243 err = ESP_DECRYPT_ERROR_RUNT;
1244 esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
1245 noop_nexts, ESP_DECRYPT_NEXT_DROP,
1250 len = pd->current_length - cpd.icv_sz;
1251 current_sa_pkts += 1;
1252 current_sa_bytes += vlib_buffer_length_in_chain (vm, b[0]);
1256 async_op = sa0->crypto_async_dec_op_id;
1258 /* get a frame for this op if we don't yet have one or it's full
1260 if (NULL == async_frames[async_op] ||
1261 vnet_crypto_async_frame_is_full (async_frames[async_op]))
1263 async_frames[async_op] =
1264 vnet_crypto_async_get_frame (vm, async_op);
1265 if (PREDICT_FALSE (!async_frames[async_op]))
1267 err = ESP_DECRYPT_ERROR_NO_AVAIL_FRAME;
1268 esp_decrypt_set_next_index (
1269 b[0], node, thread_index, err, n_noop, noop_nexts,
1270 ESP_DECRYPT_NEXT_DROP, current_sa_index);
1274 /* Save the frame to the list we'll submit at the end */
1275 vec_add1 (ptd->async_frames, async_frames[async_op]);
1278 err = esp_decrypt_prepare_async_frame (
1279 vm, node, ptd, async_frames[async_op], sa0, payload, len,
1280 cpd.icv_sz, cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next,
1282 if (ESP_DECRYPT_ERROR_RX_PKTS != err)
1284 esp_decrypt_set_next_index (
1285 b[0], node, thread_index, err, n_noop, noop_nexts,
1286 ESP_DECRYPT_NEXT_DROP, current_sa_index);
1290 esp_decrypt_prepare_sync_op (
1291 vm, node, ptd, &crypto_ops, &integ_ops, op, sa0, payload, len,
1292 cpd.icv_sz, cpd.iv_sz, pd, pd2, b[0], sync_next, n_sync);
1295 if (ESP_DECRYPT_ERROR_RX_PKTS != err)
1297 noop_bi[n_noop] = from[b - bufs];
1302 sync_bi[n_sync] = from[b - bufs];
1303 sync_bufs[n_sync] = b[0];
1316 if (PREDICT_TRUE (~0 != current_sa_index))
1317 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1318 current_sa_index, current_sa_pkts,
1321 /* submit or free all of the open frames */
1322 vnet_crypto_async_frame_t **async_frame;
1324 vec_foreach (async_frame, ptd->async_frames)
1326 /* free frame and move on if no ops were successfully added */
1327 if (PREDICT_FALSE (!(*async_frame)->n_elts))
1329 vnet_crypto_async_free_frame (vm, *async_frame);
1332 if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
1334 n_noop += esp_async_recycle_failed_submit (
1335 vm, *async_frame, node, ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR,
1336 IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi, noop_nexts,
1337 ESP_DECRYPT_NEXT_DROP, false);
1338 vnet_crypto_async_reset_frame (*async_frame);
1339 vnet_crypto_async_free_frame (vm, *async_frame);
1345 esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
1346 ESP_DECRYPT_ERROR_INTEG_ERROR);
1347 esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
1348 sync_nexts, ptd->chunks,
1349 ESP_DECRYPT_ERROR_INTEG_ERROR);
1351 esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
1352 ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1353 esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
1354 sync_nexts, ptd->chunks,
1355 ESP_DECRYPT_ERROR_DECRYPTION_FAILED);
1358 /* Post decryption ronud - adjust packet data start and length and next
1362 sync_next = sync_nexts;
1371 void *data = b[1]->data + pd[1].current_data;
1373 /* buffer metadata */
1374 vlib_prefetch_buffer_header (b[1], LOAD);
1377 CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
1378 CLIB_CACHE_LINE_BYTES, LOAD);
1380 /* packet headers */
1381 CLIB_PREFETCH (data - CLIB_CACHE_LINE_BYTES,
1382 CLIB_CACHE_LINE_BYTES * 2, LOAD);
1385 /* save the sa_index as GRE_teb post_crypto changes L2 opaque */
1386 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1387 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
1389 if (sync_next[0] >= ESP_DECRYPT_N_NEXT)
1390 esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2, b[0],
1391 sync_next, is_ip6, is_tun, 0);
1394 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1396 esp_decrypt_trace_t *tr;
1397 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1398 sa0 = ipsec_sa_get (current_sa_index);
1399 tr->crypto_alg = sa0->crypto_alg;
1400 tr->integ_alg = sa0->integ_alg;
1402 tr->sa_seq = sa0->seq;
1403 tr->sa_seq_hi = sa0->seq_hi;
1404 tr->pkt_seq_hi = pd->seq_hi;
1415 vlib_node_increment_counter (vm, node->node_index, ESP_DECRYPT_ERROR_RX_PKTS,
1416 from_frame->n_vectors);
1419 vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
1422 vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
1424 return (from_frame->n_vectors);
1428 esp_decrypt_post_inline (vlib_main_t * vm,
1429 vlib_node_runtime_t * node,
1430 vlib_frame_t * from_frame, int is_ip6, int is_tun)
1432 const ipsec_main_t *im = &ipsec_main;
1433 const u16 *next_by_next_header = im->next_header_registrations;
1434 u32 *from = vlib_frame_vector_args (from_frame);
1435 u32 n_left = from_frame->n_vectors;
1436 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1437 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1438 vlib_get_buffers (vm, from, b, n_left);
1442 esp_decrypt_packet_data_t *pd = &(esp_post_data (b[0]))->decrypt_data;
1446 vlib_prefetch_buffer_header (b[2], LOAD);
1447 vlib_prefetch_buffer_header (b[1], LOAD);
1451 esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, 0, b[0],
1452 next, is_ip6, is_tun, 1);
1455 esp_decrypt_packet_data2_t *pd2 = esp_post_data2 (b[0]);
1456 esp_decrypt_post_crypto (vm, node, next_by_next_header, pd, pd2,
1457 b[0], next, is_ip6, is_tun, 1);
1461 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1463 ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
1464 esp_decrypt_trace_t *tr;
1465 esp_decrypt_packet_data_t *async_pd =
1466 &(esp_post_data (b[0]))->decrypt_data;
1467 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
1468 sa0 = ipsec_sa_get (async_pd->sa_index);
1470 tr->crypto_alg = sa0->crypto_alg;
1471 tr->integ_alg = sa0->integ_alg;
1473 tr->sa_seq = sa0->seq;
1474 tr->sa_seq_hi = sa0->seq_hi;
1482 n_left = from_frame->n_vectors;
1483 vlib_node_increment_counter (vm, node->node_index,
1484 ESP_DECRYPT_ERROR_RX_POST_PKTS, n_left);
1486 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
1491 VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
1492 vlib_node_runtime_t * node,
1493 vlib_frame_t * from_frame)
1495 return esp_decrypt_inline (vm, node, from_frame, 0, 0,
1496 esp_decrypt_async_next.esp4_post_next);
1499 VLIB_NODE_FN (esp4_decrypt_post_node) (vlib_main_t * vm,
1500 vlib_node_runtime_t * node,
1501 vlib_frame_t * from_frame)
1503 return esp_decrypt_post_inline (vm, node, from_frame, 0, 0);
1506 VLIB_NODE_FN (esp4_decrypt_tun_node) (vlib_main_t * vm,
1507 vlib_node_runtime_t * node,
1508 vlib_frame_t * from_frame)
1510 return esp_decrypt_inline (vm, node, from_frame, 0, 1,
1511 esp_decrypt_async_next.esp4_tun_post_next);
1514 VLIB_NODE_FN (esp4_decrypt_tun_post_node) (vlib_main_t * vm,
1515 vlib_node_runtime_t * node,
1516 vlib_frame_t * from_frame)
1518 return esp_decrypt_post_inline (vm, node, from_frame, 0, 1);
1521 VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
1522 vlib_node_runtime_t * node,
1523 vlib_frame_t * from_frame)
1525 return esp_decrypt_inline (vm, node, from_frame, 1, 0,
1526 esp_decrypt_async_next.esp6_post_next);
1529 VLIB_NODE_FN (esp6_decrypt_post_node) (vlib_main_t * vm,
1530 vlib_node_runtime_t * node,
1531 vlib_frame_t * from_frame)
1533 return esp_decrypt_post_inline (vm, node, from_frame, 1, 0);
1536 VLIB_NODE_FN (esp6_decrypt_tun_node) (vlib_main_t * vm,
1537 vlib_node_runtime_t * node,
1538 vlib_frame_t * from_frame)
1540 return esp_decrypt_inline (vm, node, from_frame, 1, 1,
1541 esp_decrypt_async_next.esp6_tun_post_next);
1544 VLIB_NODE_FN (esp6_decrypt_tun_post_node) (vlib_main_t * vm,
1545 vlib_node_runtime_t * node,
1546 vlib_frame_t * from_frame)
1548 return esp_decrypt_post_inline (vm, node, from_frame, 1, 1);
1552 VLIB_REGISTER_NODE (esp4_decrypt_node) = {
1553 .name = "esp4-decrypt",
1554 .vector_size = sizeof (u32),
1555 .format_trace = format_esp_decrypt_trace,
1556 .type = VLIB_NODE_TYPE_INTERNAL,
1558 .n_errors = ESP_DECRYPT_N_ERROR,
1559 .error_counters = esp_decrypt_error_counters,
1561 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1563 [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1564 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1565 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1566 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-drop",
1567 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1568 [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-handoff",
1572 VLIB_REGISTER_NODE (esp4_decrypt_post_node) = {
1573 .name = "esp4-decrypt-post",
1574 .vector_size = sizeof (u32),
1575 .format_trace = format_esp_decrypt_trace,
1576 .type = VLIB_NODE_TYPE_INTERNAL,
1578 .n_errors = ESP_DECRYPT_N_ERROR,
1579 .error_counters = esp_decrypt_error_counters,
1581 .sibling_of = "esp4-decrypt",
1584 VLIB_REGISTER_NODE (esp6_decrypt_node) = {
1585 .name = "esp6-decrypt",
1586 .vector_size = sizeof (u32),
1587 .format_trace = format_esp_decrypt_trace,
1588 .type = VLIB_NODE_TYPE_INTERNAL,
1590 .n_errors = ESP_DECRYPT_N_ERROR,
1591 .error_counters = esp_decrypt_error_counters,
1593 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1595 [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1596 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1597 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1598 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-drop",
1599 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1600 [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-handoff",
1604 VLIB_REGISTER_NODE (esp6_decrypt_post_node) = {
1605 .name = "esp6-decrypt-post",
1606 .vector_size = sizeof (u32),
1607 .format_trace = format_esp_decrypt_trace,
1608 .type = VLIB_NODE_TYPE_INTERNAL,
1610 .n_errors = ESP_DECRYPT_N_ERROR,
1611 .error_counters = esp_decrypt_error_counters,
1613 .sibling_of = "esp6-decrypt",
1616 VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = {
1617 .name = "esp4-decrypt-tun",
1618 .vector_size = sizeof (u32),
1619 .format_trace = format_esp_decrypt_trace,
1620 .type = VLIB_NODE_TYPE_INTERNAL,
1621 .n_errors = ESP_DECRYPT_N_ERROR,
1622 .error_counters = esp_decrypt_error_counters,
1623 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1625 [ESP_DECRYPT_NEXT_DROP] = "ip4-drop",
1626 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1627 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1628 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-input",
1629 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1630 [ESP_DECRYPT_NEXT_HANDOFF] = "esp4-decrypt-tun-handoff",
1634 VLIB_REGISTER_NODE (esp4_decrypt_tun_post_node) = {
1635 .name = "esp4-decrypt-tun-post",
1636 .vector_size = sizeof (u32),
1637 .format_trace = format_esp_decrypt_trace,
1638 .type = VLIB_NODE_TYPE_INTERNAL,
1640 .n_errors = ESP_DECRYPT_N_ERROR,
1641 .error_counters = esp_decrypt_error_counters,
1643 .sibling_of = "esp4-decrypt-tun",
1646 VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
1647 .name = "esp6-decrypt-tun",
1648 .vector_size = sizeof (u32),
1649 .format_trace = format_esp_decrypt_trace,
1650 .type = VLIB_NODE_TYPE_INTERNAL,
1651 .n_errors = ESP_DECRYPT_N_ERROR,
1652 .error_counters = esp_decrypt_error_counters,
1653 .n_next_nodes = ESP_DECRYPT_N_NEXT,
1655 [ESP_DECRYPT_NEXT_DROP] = "ip6-drop",
1656 [ESP_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
1657 [ESP_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
1658 [ESP_DECRYPT_NEXT_MPLS_INPUT] = "mpls-input",
1659 [ESP_DECRYPT_NEXT_L2_INPUT] = "l2-input",
1660 [ESP_DECRYPT_NEXT_HANDOFF]= "esp6-decrypt-tun-handoff",
1664 VLIB_REGISTER_NODE (esp6_decrypt_tun_post_node) = {
1665 .name = "esp6-decrypt-tun-post",
1666 .vector_size = sizeof (u32),
1667 .format_trace = format_esp_decrypt_trace,
1668 .type = VLIB_NODE_TYPE_INTERNAL,
1670 .n_errors = ESP_DECRYPT_N_ERROR,
1671 .error_counters = esp_decrypt_error_counters,
1673 .sibling_of = "esp6-decrypt-tun",
1677 #ifndef CLIB_MARCH_VARIANT
1679 static clib_error_t *
1680 esp_decrypt_init (vlib_main_t *vm)
1682 ipsec_main_t *im = &ipsec_main;
1684 im->esp4_dec_fq_index =
1685 vlib_frame_queue_main_init (esp4_decrypt_node.index, 0);
1686 im->esp6_dec_fq_index =
1687 vlib_frame_queue_main_init (esp6_decrypt_node.index, 0);
1688 im->esp4_dec_tun_fq_index =
1689 vlib_frame_queue_main_init (esp4_decrypt_tun_node.index, 0);
1690 im->esp6_dec_tun_fq_index =
1691 vlib_frame_queue_main_init (esp6_decrypt_tun_node.index, 0);
1696 VLIB_INIT_FUNCTION (esp_decrypt_init);
1701 * fd.io coding-style-patch-verification: ON
1704 * eval: (c-set-style "gnu")