2 * esp_encrypt.c : IPSec ESP encrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/interface_output.h>
23 #include <vnet/crypto/crypto.h>
25 #include <vnet/ipsec/ipsec.h>
26 #include <vnet/ipsec/ipsec_tun.h>
27 #include <vnet/ipsec/ipsec.api_enum.h>
28 #include <vnet/ipsec/esp.h>
29 #include <vnet/tunnel/tunnel_dp.h>
31 #define foreach_esp_encrypt_next \
32 _ (DROP4, "ip4-drop") \
33 _ (DROP6, "ip6-drop") \
34 _ (DROP_MPLS, "mpls-drop") \
35 _ (HANDOFF4, "handoff4") \
36 _ (HANDOFF6, "handoff6") \
37 _ (HANDOFF_MPLS, "handoff-mpls") \
38 _ (INTERFACE_OUTPUT, "interface-output")
40 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
43 foreach_esp_encrypt_next
55 ipsec_crypto_alg_t crypto_alg;
56 ipsec_integ_alg_t integ_alg;
57 } esp_encrypt_trace_t;
62 } esp_encrypt_post_trace_t;
64 typedef vl_counter_esp_encrypt_enum_t esp_encrypt_error_t;
66 /* packet trace format function */
68 format_esp_encrypt_trace (u8 * s, va_list * args)
70 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
71 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
72 esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
76 "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
77 t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
78 format_ipsec_crypto_alg,
79 t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
80 t->udp_encap ? " udp-encap-enabled" : "");
85 format_esp_post_encrypt_trace (u8 * s, va_list * args)
87 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
88 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
89 esp_encrypt_post_trace_t *t = va_arg (*args, esp_encrypt_post_trace_t *);
91 s = format (s, "esp-post: next node index %u", t->next_index);
95 /* pad packet in input buffer */
96 static_always_inline u8 *
97 esp_add_footer_and_icv (vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align,
98 u8 icv_sz, u16 buffer_data_size, uword total_len)
100 static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
101 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
102 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
105 u16 min_length = total_len + sizeof (esp_footer_t);
106 u16 new_length = round_pow2 (min_length, esp_align);
107 u8 pad_bytes = new_length - min_length;
108 esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (last[0]) +
109 last[0]->current_length + pad_bytes);
110 u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
112 if (last[0]->current_data + last[0]->current_length + tail_sz >
116 if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
119 vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
120 last[0]->next_buffer = tmp_bi;
121 last[0]->flags |= VLIB_BUFFER_NEXT_PRESENT;
122 f = (esp_footer_t *) (vlib_buffer_get_current (tmp) + pad_bytes);
123 tmp->current_length += tail_sz;
127 last[0]->current_length += tail_sz;
129 f->pad_length = pad_bytes;
132 ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
133 pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
134 clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
137 return &f->next_header;
140 static_always_inline void
141 esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
146 len = clib_net_to_host_u16 (len);
147 old_len = ip4->length;
151 u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
152 sum = ip_csum_update (ip4->checksum, ip4->protocol, prot, ip4_header_t,
154 ip4->protocol = prot;
155 sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
158 sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
161 ip4->checksum = ip_csum_fold (sum);
164 static_always_inline void
165 esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
167 clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
168 udp->length = clib_net_to_host_u16 (len);
171 static_always_inline u8
172 ext_hdr_is_pre_esp (u8 nexthdr)
174 #ifdef CLIB_HAVE_VEC128
175 static const u8x16 ext_hdr_types = {
176 IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
177 IP_PROTOCOL_IPV6_ROUTE,
178 IP_PROTOCOL_IPV6_FRAGMENTATION,
181 return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
183 return (!(nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) ||
184 !(nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) ||
185 !(nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION));
189 static_always_inline u8
190 esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
192 /* this code assumes that HbH, route and frag headers will be before
193 others, if that is not the case, they will end up encrypted */
194 u8 len = sizeof (ip6_header_t);
197 /* if next packet doesn't have ext header */
198 if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
204 p = ip6_next_header (ip6);
205 len += ip6_ext_header_len (p);
206 while (ext_hdr_is_pre_esp (p->next_hdr))
208 len += ip6_ext_header_len (p);
209 p = ip6_ext_next_header (p);
216 /* IPsec IV generation: IVs requirements differ depending of the
217 * encryption mode: IVs must be unpredictable for AES-CBC whereas it can
218 * be predictable but should never be reused with the same key material
220 * To avoid reusing the same IVs between multiple VPP instances and between
221 * restarts, we use a properly chosen PRNG to generate IVs. To ensure the IV is
222 * unpredictable for CBC, it is then encrypted using the same key as the
223 * message. You can refer to NIST SP800-38a and NIST SP800-38d for more
225 static_always_inline void *
226 esp_generate_iv (ipsec_sa_t *sa, void *payload, int iv_sz)
228 ASSERT (iv_sz >= sizeof (u64));
229 u64 *iv = (u64 *) (payload - iv_sz);
230 clib_memset_u8 (iv, 0, iv_sz);
231 *iv = clib_pcg64i_random_r (&sa->iv_prng);
235 static_always_inline void
236 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
237 vnet_crypto_op_t * ops, vlib_buffer_t * b[],
238 u16 * nexts, vnet_crypto_op_chunk_t * chunks,
241 u32 n_fail, n_ops = vec_len (ops);
242 vnet_crypto_op_t *op = ops;
247 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
251 ASSERT (op - ops < n_ops);
253 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
255 u32 bi = op->user_data;
256 esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
257 ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
258 bi, nexts, drop_next,
259 vnet_buffer (b[bi])->ipsec.sad_index);
266 static_always_inline void
267 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
268 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
271 u32 n_fail, n_ops = vec_len (ops);
272 vnet_crypto_op_t *op = ops;
277 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
281 ASSERT (op - ops < n_ops);
283 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
285 u32 bi = op->user_data;
286 esp_encrypt_set_next_index (b[bi], node, vm->thread_index,
287 ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
288 bi, nexts, drop_next,
289 vnet_buffer (b[bi])->ipsec.sad_index);
296 static_always_inline u32
297 esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
298 ipsec_sa_t * sa0, vlib_buffer_t * b,
299 vlib_buffer_t * lb, u8 icv_sz, u8 * start,
300 u32 start_len, u16 * n_ch)
302 vnet_crypto_op_chunk_t *ch;
303 vlib_buffer_t *cb = b;
306 vec_add2 (ptd->chunks, ch, 1);
307 total_len = ch->len = start_len;
308 ch->src = ch->dst = start;
309 cb = vlib_get_buffer (vm, cb->next_buffer);
313 vec_add2 (ptd->chunks, ch, 1);
316 total_len += ch->len = cb->current_length - icv_sz;
318 total_len += ch->len = cb->current_length;
319 ch->src = ch->dst = vlib_buffer_get_current (cb);
321 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
324 cb = vlib_get_buffer (vm, cb->next_buffer);
333 static_always_inline u32
334 esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
335 ipsec_sa_t * sa0, vlib_buffer_t * b,
336 vlib_buffer_t * lb, u8 icv_sz, u8 * start,
337 u32 start_len, u8 * digest, u16 * n_ch)
339 vnet_crypto_op_chunk_t *ch;
340 vlib_buffer_t *cb = b;
343 vec_add2 (ptd->chunks, ch, 1);
344 total_len = ch->len = start_len;
346 cb = vlib_get_buffer (vm, cb->next_buffer);
350 vec_add2 (ptd->chunks, ch, 1);
354 total_len += ch->len = cb->current_length - icv_sz;
355 if (ipsec_sa_is_set_USE_ESN (sa0))
357 u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
358 clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi));
359 ch->len += sizeof (seq_hi);
360 total_len += sizeof (seq_hi);
364 total_len += ch->len = cb->current_length;
365 ch->src = vlib_buffer_get_current (cb);
367 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
370 cb = vlib_get_buffer (vm, cb->next_buffer);
380 esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
381 vnet_crypto_op_t **crypto_ops,
382 vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi,
383 u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
384 vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
387 if (sa0->crypto_enc_op_id)
389 vnet_crypto_op_t *op;
390 vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
391 vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
392 u8 *crypto_start = payload;
393 /* esp_add_footer_and_icv() in esp_encrypt_inline() makes sure we always
394 * have enough space for ESP header and footer which includes ICV */
395 ASSERT (payload_len > icv_sz);
396 u16 crypto_len = payload_len - icv_sz;
398 /* generate the IV in front of the payload */
399 void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz);
401 op->key_index = sa0->crypto_key_index;
404 if (ipsec_sa_is_set_IS_CTR (sa0))
406 /* construct nonce in a scratch space in front of the IP header */
407 esp_ctr_nonce_t *nonce =
408 (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
409 if (ipsec_sa_is_set_IS_AEAD (sa0))
411 /* constuct aad in a scratch space in front of the nonce */
412 op->aad = (u8 *) nonce - sizeof (esp_aead_t);
413 op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
414 op->tag = payload + crypto_len;
416 if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
418 /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
419 crypto_start -= iv_sz;
425 nonce->ctr = clib_host_to_net_u32 (1);
428 nonce->salt = sa0->salt;
429 nonce->iv = *(u64 *) pkt_iv;
430 op->iv = (u8 *) nonce;
434 /* construct zero iv in front of the IP header */
435 op->iv = pkt_iv - hdr_len - iv_sz;
436 clib_memset_u8 (op->iv, 0, iv_sz);
437 /* include iv field in crypto */
438 crypto_start -= iv_sz;
442 if (PREDICT_FALSE (lb != b[0]))
445 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
446 op->chunk_index = vec_len (ptd->chunks);
447 op->tag = vlib_buffer_get_tail (lb) - icv_sz;
448 esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz,
449 crypto_start, crypto_len + icv_sz,
455 op->src = op->dst = crypto_start;
456 op->len = crypto_len;
460 if (sa0->integ_op_id)
462 vnet_crypto_op_t *op;
463 vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
464 vnet_crypto_op_init (op, sa0->integ_op_id);
465 op->src = payload - iv_sz - sizeof (esp_header_t);
466 op->digest = payload + payload_len - icv_sz;
467 op->key_index = sa0->integ_key_index;
468 op->digest_len = icv_sz;
469 op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
475 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
476 op->chunk_index = vec_len (ptd->chunks);
477 op->digest = vlib_buffer_get_tail (lb) - icv_sz;
479 esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
480 payload - iv_sz - sizeof (esp_header_t),
481 payload_len + iv_sz +
482 sizeof (esp_header_t), op->digest,
485 else if (ipsec_sa_is_set_USE_ESN (sa0))
487 u32 tmp = clib_net_to_host_u32 (seq_hi);
488 clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
489 op->len += sizeof (seq_hi);
494 static_always_inline void
495 esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
496 vnet_crypto_async_frame_t *async_frame,
497 ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp,
498 u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
499 u32 bi, u16 next, u32 hdr_len, u16 async_next,
502 esp_post_data_t *post = esp_post_data (b);
503 u8 *tag, *iv, *aad = 0;
505 const u32 key_index = sa->crypto_key_index;
506 i16 crypto_start_offset, integ_start_offset;
507 u16 crypto_total_len, integ_total_len;
509 post->next_index = next;
512 crypto_start_offset = integ_start_offset = payload - b->data;
513 crypto_total_len = integ_total_len = payload_len - icv_sz;
514 tag = payload + crypto_total_len;
516 /* generate the IV in front of the payload */
517 void *pkt_iv = esp_generate_iv (sa, payload, iv_sz);
519 if (ipsec_sa_is_set_IS_CTR (sa))
521 /* construct nonce in a scratch space in front of the IP header */
522 esp_ctr_nonce_t *nonce =
523 (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
524 if (ipsec_sa_is_set_IS_AEAD (sa))
526 /* constuct aad in a scratch space in front of the nonce */
527 aad = (u8 *) nonce - sizeof (esp_aead_t);
528 esp_aad_fill (aad, esp, sa, sa->seq_hi);
529 if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa)))
531 /* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
532 crypto_start_offset -= iv_sz;
533 crypto_total_len += iv_sz;
538 nonce->ctr = clib_host_to_net_u32 (1);
541 nonce->salt = sa->salt;
542 nonce->iv = *(u64 *) pkt_iv;
547 /* construct zero iv in front of the IP header */
548 iv = pkt_iv - hdr_len - iv_sz;
549 clib_memset_u8 (iv, 0, iv_sz);
550 /* include iv field in crypto */
551 crypto_start_offset -= iv_sz;
552 crypto_total_len += iv_sz;
558 flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
559 tag = vlib_buffer_get_tail (lb) - icv_sz;
560 crypto_total_len = esp_encrypt_chain_crypto (
561 vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset,
562 crypto_total_len + icv_sz, 0);
567 integ_start_offset -= iv_sz + sizeof (esp_header_t);
568 integ_total_len += iv_sz + sizeof (esp_header_t);
572 integ_total_len = esp_encrypt_chain_integ (
573 vm, ptd, sa, b, lb, icv_sz,
574 payload - iv_sz - sizeof (esp_header_t),
575 payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
577 else if (ipsec_sa_is_set_USE_ESN (sa))
579 u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
580 clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
581 integ_total_len += sizeof (seq_hi);
585 /* this always succeeds because we know the frame is not full */
586 vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len,
587 integ_total_len - crypto_total_len,
588 crypto_start_offset, integ_start_offset, bi,
589 async_next, iv, tag, aad, flag);
593 esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
594 vlib_frame_t *frame, vnet_link_t lt, int is_tun,
597 ipsec_main_t *im = &ipsec_main;
598 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
599 u32 *from = vlib_frame_vector_args (frame);
600 u32 n_left = frame->n_vectors;
601 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
602 u32 thread_index = vm->thread_index;
603 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
604 u32 current_sa_index = ~0, current_sa_packets = 0;
605 u32 current_sa_bytes = 0, spi = 0;
606 u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
608 u8 sa_drop_no_crypto = 0;
610 vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
611 vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
612 vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
613 int is_async = im->async_mode;
614 vnet_crypto_async_op_id_t async_op = ~0;
616 (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
617 (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
618 ESP_ENCRYPT_NEXT_DROP_MPLS));
619 u16 handoff_next = (lt == VNET_LINK_IP6 ?
620 ESP_ENCRYPT_NEXT_HANDOFF6 :
621 (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
622 ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
623 vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
624 u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
626 u16 noop_nexts[VLIB_FRAME_SIZE], n_noop = 0;
627 u32 sync_bi[VLIB_FRAME_SIZE];
628 u32 noop_bi[VLIB_FRAME_SIZE];
629 esp_encrypt_error_t err;
631 vlib_get_buffers (vm, from, b, n_left);
633 vec_reset_length (ptd->crypto_ops);
634 vec_reset_length (ptd->integ_ops);
635 vec_reset_length (ptd->chained_crypto_ops);
636 vec_reset_length (ptd->chained_integ_ops);
637 vec_reset_length (ptd->async_frames);
638 vec_reset_length (ptd->chunks);
639 clib_memset (async_frames, 0, sizeof (async_frames));
646 u8 *payload, *next_hdr_ptr;
647 u16 payload_len, payload_len_total, n_bufs;
650 err = ESP_ENCRYPT_ERROR_RX_PKTS;
655 vlib_prefetch_buffer_header (b[2], LOAD);
656 p = vlib_buffer_get_current (b[1]);
657 clib_prefetch_load (p);
658 p -= CLIB_CACHE_LINE_BYTES;
659 clib_prefetch_load (p);
660 /* speculate that the trailer goes in the first buffer */
661 CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
662 CLIB_CACHE_LINE_BYTES, LOAD);
665 vnet_calc_checksums_inline (vm, b[0], b[0]->flags & VNET_BUFFER_F_IS_IP4,
666 b[0]->flags & VNET_BUFFER_F_IS_IP6);
667 vnet_calc_outer_checksums_inline (vm, b[0]);
671 /* we are on a ipsec tunnel's feature arc */
672 vnet_buffer (b[0])->ipsec.sad_index =
673 sa_index0 = ipsec_tun_protect_get_sa_out
674 (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
676 if (PREDICT_FALSE (INDEX_INVALID == sa_index0))
678 err = ESP_ENCRYPT_ERROR_NO_PROTECTION;
679 noop_nexts[n_noop] = drop_next;
680 b[0]->error = node->errors[err];
685 sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
687 if (sa_index0 != current_sa_index)
689 if (current_sa_packets)
690 vlib_increment_combined_counter (
691 &ipsec_sa_counters, thread_index, current_sa_index,
692 current_sa_packets, current_sa_bytes);
693 current_sa_packets = current_sa_bytes = 0;
695 sa0 = ipsec_sa_get (sa_index0);
696 current_sa_index = sa_index0;
698 sa_drop_no_crypto = ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
699 sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
700 !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0));
702 vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
705 /* fetch the second cacheline ASAP */
706 clib_prefetch_load (sa0->cacheline1);
708 spi = clib_net_to_host_u32 (sa0->spi);
709 esp_align = sa0->esp_block_align;
710 icv_sz = sa0->integ_icv_size;
711 iv_sz = sa0->crypto_iv_size;
712 is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
715 if (PREDICT_FALSE (sa_drop_no_crypto != 0))
717 err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
718 esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
719 noop_nexts, drop_next, sa_index0);
723 if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
725 /* this is the first packet to use this SA, claim the SA
726 * for this thread. this could happen simultaneously on
728 clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
729 ipsec_sa_assign_thread (thread_index));
732 if (PREDICT_FALSE (thread_index != sa0->thread_index))
734 vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
735 err = ESP_ENCRYPT_ERROR_HANDOFF;
736 esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
737 noop_nexts, handoff_next,
743 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
746 err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
747 esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
748 noop_nexts, drop_next, current_sa_index);
754 /* find last buffer in the chain */
755 while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
756 lb = vlib_get_buffer (vm, lb->next_buffer);
759 if (PREDICT_FALSE (esp_seq_advance (sa0)))
761 err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
762 esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
763 noop_nexts, drop_next, current_sa_index);
770 if (ipsec_sa_is_set_IS_TUNNEL (sa0))
772 payload = vlib_buffer_get_current (b[0]);
773 next_hdr_ptr = esp_add_footer_and_icv (
774 vm, &lb, esp_align, icv_sz, buffer_data_size,
775 vlib_buffer_length_in_chain (vm, b[0]));
778 err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
779 esp_encrypt_set_next_index (b[0], node, thread_index, err,
780 n_noop, noop_nexts, drop_next,
784 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
785 payload_len = b[0]->current_length;
786 payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
789 hdr_len += sizeof (*esp);
790 esp = (esp_header_t *) (payload - hdr_len);
792 /* optional UDP header */
793 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
795 hdr_len += sizeof (udp_header_t);
796 esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
797 payload_len_total + hdr_len);
801 if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
804 u16 len = sizeof (ip6_header_t);
806 ip6 = (ip6_header_t *) (payload - hdr_len);
807 clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
809 if (VNET_LINK_IP6 == lt)
811 *next_hdr_ptr = IP_PROTOCOL_IPV6;
812 tunnel_encap_fixup_6o6 (sa0->tunnel_flags,
813 (const ip6_header_t *) payload,
816 else if (VNET_LINK_IP4 == lt)
818 *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
819 tunnel_encap_fixup_4o6 (sa0->tunnel_flags, b[0],
820 (const ip4_header_t *) payload, ip6);
822 else if (VNET_LINK_MPLS == lt)
824 *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
825 tunnel_encap_fixup_mplso6 (
826 sa0->tunnel_flags, b[0],
827 (const mpls_unicast_header_t *) payload, ip6);
832 len = payload_len_total + hdr_len - len;
833 ip6->payload_length = clib_net_to_host_u16 (len);
834 b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
839 u16 len = sizeof (ip4_header_t);
841 ip4 = (ip4_header_t *) (payload - hdr_len);
842 clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
844 if (VNET_LINK_IP6 == lt)
846 *next_hdr_ptr = IP_PROTOCOL_IPV6;
847 tunnel_encap_fixup_6o4_w_chksum (sa0->tunnel_flags,
848 (const ip6_header_t *)
851 else if (VNET_LINK_IP4 == lt)
853 *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
854 tunnel_encap_fixup_4o4_w_chksum (sa0->tunnel_flags,
855 (const ip4_header_t *)
858 else if (VNET_LINK_MPLS == lt)
860 *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
861 tunnel_encap_fixup_mplso4_w_chksum (
862 sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
868 len = payload_len_total + hdr_len;
869 esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
875 sync_next[0] = dpo->dpoi_next_node;
876 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
879 sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
880 b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
882 else /* transport mode */
884 u8 *l2_hdr, l2_len, *ip_hdr;
886 ip6_ext_header_t *ext_hdr;
887 udp_header_t *udp = 0;
889 u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
892 * Get extension header chain length. It might be longer than the
893 * buffer's pre_data area.
896 (VNET_LINK_IP6 == lt ?
897 esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
898 ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
899 if ((old_ip_hdr - ip_len) < &b[0]->pre_data[0])
901 err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
902 esp_encrypt_set_next_index (b[0], node, thread_index, err,
903 n_noop, noop_nexts, drop_next,
908 vlib_buffer_advance (b[0], ip_len);
909 payload = vlib_buffer_get_current (b[0]);
910 next_hdr_ptr = esp_add_footer_and_icv (
911 vm, &lb, esp_align, icv_sz, buffer_data_size,
912 vlib_buffer_length_in_chain (vm, b[0]));
915 err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
916 esp_encrypt_set_next_index (b[0], node, thread_index, err,
917 n_noop, noop_nexts, drop_next,
922 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
923 payload_len = b[0]->current_length;
924 payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
927 hdr_len += sizeof (*esp);
928 esp = (esp_header_t *) (payload - hdr_len);
930 /* optional UDP header */
931 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
933 hdr_len += sizeof (udp_header_t);
934 udp = (udp_header_t *) (payload - hdr_len);
939 ip_hdr = payload - hdr_len;
944 l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
946 l2_hdr = payload - hdr_len;
948 /* copy l2 and ip header */
949 clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
955 len = payload_len_total + hdr_len - l2_len;
957 if (VNET_LINK_IP6 == lt)
959 ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
960 if (PREDICT_TRUE (NULL == ext_hdr))
962 *next_hdr_ptr = ip6->protocol;
964 (udp) ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
968 *next_hdr_ptr = ext_hdr->next_hdr;
970 (udp) ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
972 ip6->payload_length =
973 clib_host_to_net_u16 (len - sizeof (ip6_header_t));
975 else if (VNET_LINK_IP4 == lt)
977 ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
978 *next_hdr_ptr = ip4->protocol;
979 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1,
983 clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
987 udp_len = len - ip_len;
988 esp_fill_udp_hdr (sa0, udp, udp_len);
991 sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
996 crypto_ops = &ptd->chained_crypto_ops;
997 integ_ops = &ptd->chained_integ_ops;
1001 crypto_ops = &ptd->crypto_ops;
1002 integ_ops = &ptd->integ_ops;
1006 esp->seq = clib_net_to_host_u32 (sa0->seq);
1010 async_op = sa0->crypto_async_enc_op_id;
1012 /* get a frame for this op if we don't yet have one or it's full
1014 if (NULL == async_frames[async_op] ||
1015 vnet_crypto_async_frame_is_full (async_frames[async_op]))
1017 async_frames[async_op] =
1018 vnet_crypto_async_get_frame (vm, async_op);
1020 if (PREDICT_FALSE (!async_frames[async_op]))
1022 err = ESP_ENCRYPT_ERROR_NO_AVAIL_FRAME;
1023 esp_encrypt_set_next_index (b[0], node, thread_index, err,
1024 n_noop, noop_nexts, drop_next,
1029 /* Save the frame to the list we'll submit at the end */
1030 vec_add1 (ptd->async_frames, async_frames[async_op]);
1033 esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
1034 esp, payload, payload_len, iv_sz, icv_sz,
1035 from[b - bufs], sync_next[0], hdr_len,
1036 async_next_node, lb);
1039 esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, sa0->seq_hi,
1040 payload, payload_len, iv_sz, icv_sz, n_sync, b,
1043 vlib_buffer_advance (b[0], 0LL - hdr_len);
1045 current_sa_packets += 1;
1046 current_sa_bytes += payload_len_total;
1049 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1051 esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
1053 if (INDEX_INVALID == sa_index0)
1054 clib_memset_u8 (tr, 0xff, sizeof (*tr));
1057 tr->sa_index = sa_index0;
1060 tr->sa_seq_hi = sa0->seq_hi;
1061 tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
1062 tr->crypto_alg = sa0->crypto_alg;
1063 tr->integ_alg = sa0->integ_alg;
1068 if (ESP_ENCRYPT_ERROR_RX_PKTS != err)
1070 noop_bi[n_noop] = from[b - bufs];
1075 sync_bi[n_sync] = from[b - bufs];
1076 sync_bufs[n_sync] = b[0];
1088 if (INDEX_INVALID != current_sa_index)
1089 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1090 current_sa_index, current_sa_packets,
1094 esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
1096 esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
1097 sync_nexts, ptd->chunks, drop_next);
1099 esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
1101 esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
1102 sync_nexts, ptd->chunks, drop_next);
1104 vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
1108 /* submit all of the open frames */
1109 vnet_crypto_async_frame_t **async_frame;
1111 vec_foreach (async_frame, ptd->async_frames)
1113 if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
1115 n_noop += esp_async_recycle_failed_submit (
1116 vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
1117 IPSEC_SA_ERROR_CRYPTO_ENGINE_ERROR, n_noop, noop_bi,
1118 noop_nexts, drop_next, true);
1119 vnet_crypto_async_reset_frame (*async_frame);
1120 vnet_crypto_async_free_frame (vm, *async_frame);
1125 vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
1127 vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS,
1130 return frame->n_vectors;
1134 esp_encrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1135 vlib_frame_t * frame)
1137 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1138 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1139 u32 *from = vlib_frame_vector_args (frame);
1140 u32 n_left = frame->n_vectors;
1142 vlib_get_buffers (vm, from, b, n_left);
1146 vlib_prefetch_buffer_header (b[0], LOAD);
1147 vlib_prefetch_buffer_header (b[1], LOAD);
1148 vlib_prefetch_buffer_header (b[2], LOAD);
1149 vlib_prefetch_buffer_header (b[3], LOAD);
1154 vlib_prefetch_buffer_header (b[4], LOAD);
1155 vlib_prefetch_buffer_header (b[5], LOAD);
1156 vlib_prefetch_buffer_header (b[6], LOAD);
1157 vlib_prefetch_buffer_header (b[7], LOAD);
1159 next[0] = (esp_post_data (b[0]))->next_index;
1160 next[1] = (esp_post_data (b[1]))->next_index;
1161 next[2] = (esp_post_data (b[2]))->next_index;
1162 next[3] = (esp_post_data (b[3]))->next_index;
1164 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
1166 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
1168 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1170 tr->next_index = next[0];
1172 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
1174 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[1],
1176 tr->next_index = next[1];
1178 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
1180 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[2],
1182 tr->next_index = next[2];
1184 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
1186 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[3],
1188 tr->next_index = next[3];
1199 next[0] = (esp_post_data (b[0]))->next_index;
1200 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1202 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1204 tr->next_index = next[0];
1212 vlib_node_increment_counter (vm, node->node_index,
1213 ESP_ENCRYPT_ERROR_POST_RX_PKTS,
1215 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1216 return frame->n_vectors;
1219 VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
1220 vlib_node_runtime_t * node,
1221 vlib_frame_t * from_frame)
1223 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 0,
1224 esp_encrypt_async_next.esp4_post_next);
1227 VLIB_REGISTER_NODE (esp4_encrypt_node) = {
1228 .name = "esp4-encrypt",
1229 .vector_size = sizeof (u32),
1230 .format_trace = format_esp_encrypt_trace,
1231 .type = VLIB_NODE_TYPE_INTERNAL,
1233 .n_errors = ESP_ENCRYPT_N_ERROR,
1234 .error_counters = esp_encrypt_error_counters,
1236 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1237 .next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1238 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1239 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1240 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
1241 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
1242 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "error-drop",
1243 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output" },
1246 VLIB_NODE_FN (esp4_encrypt_post_node) (vlib_main_t * vm,
1247 vlib_node_runtime_t * node,
1248 vlib_frame_t * from_frame)
1250 return esp_encrypt_post_inline (vm, node, from_frame);
1253 VLIB_REGISTER_NODE (esp4_encrypt_post_node) = {
1254 .name = "esp4-encrypt-post",
1255 .vector_size = sizeof (u32),
1256 .format_trace = format_esp_post_encrypt_trace,
1257 .type = VLIB_NODE_TYPE_INTERNAL,
1258 .sibling_of = "esp4-encrypt",
1260 .n_errors = ESP_ENCRYPT_N_ERROR,
1261 .error_counters = esp_encrypt_error_counters,
1264 VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
1265 vlib_node_runtime_t * node,
1266 vlib_frame_t * from_frame)
1268 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 0,
1269 esp_encrypt_async_next.esp6_post_next);
1272 VLIB_REGISTER_NODE (esp6_encrypt_node) = {
1273 .name = "esp6-encrypt",
1274 .vector_size = sizeof (u32),
1275 .format_trace = format_esp_encrypt_trace,
1276 .type = VLIB_NODE_TYPE_INTERNAL,
1277 .sibling_of = "esp4-encrypt",
1279 .n_errors = ESP_ENCRYPT_N_ERROR,
1280 .error_counters = esp_encrypt_error_counters,
1283 VLIB_NODE_FN (esp6_encrypt_post_node) (vlib_main_t * vm,
1284 vlib_node_runtime_t * node,
1285 vlib_frame_t * from_frame)
1287 return esp_encrypt_post_inline (vm, node, from_frame);
1290 VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
1291 .name = "esp6-encrypt-post",
1292 .vector_size = sizeof (u32),
1293 .format_trace = format_esp_post_encrypt_trace,
1294 .type = VLIB_NODE_TYPE_INTERNAL,
1295 .sibling_of = "esp4-encrypt",
1297 .n_errors = ESP_ENCRYPT_N_ERROR,
1298 .error_counters = esp_encrypt_error_counters,
1301 VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
1302 vlib_node_runtime_t * node,
1303 vlib_frame_t * from_frame)
1305 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 1,
1306 esp_encrypt_async_next.esp4_tun_post_next);
1309 VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
1310 .name = "esp4-encrypt-tun",
1311 .vector_size = sizeof (u32),
1312 .format_trace = format_esp_encrypt_trace,
1313 .type = VLIB_NODE_TYPE_INTERNAL,
1315 .n_errors = ESP_ENCRYPT_N_ERROR,
1316 .error_counters = esp_encrypt_error_counters,
1318 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1320 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1321 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1322 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1323 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1324 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1325 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1326 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1330 VLIB_NODE_FN (esp4_encrypt_tun_post_node) (vlib_main_t * vm,
1331 vlib_node_runtime_t * node,
1332 vlib_frame_t * from_frame)
1334 return esp_encrypt_post_inline (vm, node, from_frame);
1337 VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = {
1338 .name = "esp4-encrypt-tun-post",
1339 .vector_size = sizeof (u32),
1340 .format_trace = format_esp_post_encrypt_trace,
1341 .type = VLIB_NODE_TYPE_INTERNAL,
1342 .sibling_of = "esp4-encrypt-tun",
1344 .n_errors = ESP_ENCRYPT_N_ERROR,
1345 .error_counters = esp_encrypt_error_counters,
1348 VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
1349 vlib_node_runtime_t * node,
1350 vlib_frame_t * from_frame)
1352 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 1,
1353 esp_encrypt_async_next.esp6_tun_post_next);
1356 VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
1357 .name = "esp6-encrypt-tun",
1358 .vector_size = sizeof (u32),
1359 .format_trace = format_esp_encrypt_trace,
1360 .type = VLIB_NODE_TYPE_INTERNAL,
1362 .n_errors = ESP_ENCRYPT_N_ERROR,
1363 .error_counters = esp_encrypt_error_counters,
1365 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1367 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1368 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1369 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1370 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1371 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1372 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1373 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1378 VLIB_NODE_FN (esp6_encrypt_tun_post_node) (vlib_main_t * vm,
1379 vlib_node_runtime_t * node,
1380 vlib_frame_t * from_frame)
1382 return esp_encrypt_post_inline (vm, node, from_frame);
1385 VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
1386 .name = "esp6-encrypt-tun-post",
1387 .vector_size = sizeof (u32),
1388 .format_trace = format_esp_post_encrypt_trace,
1389 .type = VLIB_NODE_TYPE_INTERNAL,
1390 .sibling_of = "esp-mpls-encrypt-tun",
1392 .n_errors = ESP_ENCRYPT_N_ERROR,
1393 .error_counters = esp_encrypt_error_counters,
1396 VLIB_NODE_FN (esp_mpls_encrypt_tun_node)
1397 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
1399 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_MPLS, 1,
1400 esp_encrypt_async_next.esp_mpls_tun_post_next);
1403 VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_node) = {
1404 .name = "esp-mpls-encrypt-tun",
1405 .vector_size = sizeof (u32),
1406 .format_trace = format_esp_encrypt_trace,
1407 .type = VLIB_NODE_TYPE_INTERNAL,
1409 .n_errors = ESP_ENCRYPT_N_ERROR,
1410 .error_counters = esp_encrypt_error_counters,
1412 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1414 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1415 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1416 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1417 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1418 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1419 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1420 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1424 VLIB_NODE_FN (esp_mpls_encrypt_tun_post_node)
1425 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
1427 return esp_encrypt_post_inline (vm, node, from_frame);
1430 VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node) = {
1431 .name = "esp-mpls-encrypt-tun-post",
1432 .vector_size = sizeof (u32),
1433 .format_trace = format_esp_post_encrypt_trace,
1434 .type = VLIB_NODE_TYPE_INTERNAL,
1435 .sibling_of = "esp-mpls-encrypt-tun",
1437 .n_errors = ESP_ENCRYPT_N_ERROR,
1438 .error_counters = esp_encrypt_error_counters,
1441 #ifndef CLIB_MARCH_VARIANT
1443 static clib_error_t *
1444 esp_encrypt_init (vlib_main_t *vm)
1446 ipsec_main_t *im = &ipsec_main;
1448 im->esp4_enc_fq_index =
1449 vlib_frame_queue_main_init (esp4_encrypt_node.index, 0);
1450 im->esp6_enc_fq_index =
1451 vlib_frame_queue_main_init (esp6_encrypt_node.index, 0);
1452 im->esp4_enc_tun_fq_index =
1453 vlib_frame_queue_main_init (esp4_encrypt_tun_node.index, 0);
1454 im->esp6_enc_tun_fq_index =
1455 vlib_frame_queue_main_init (esp6_encrypt_tun_node.index, 0);
1456 im->esp_mpls_enc_tun_fq_index =
1457 vlib_frame_queue_main_init (esp_mpls_encrypt_tun_node.index, 0);
1462 VLIB_INIT_FUNCTION (esp_encrypt_init);
1467 * fd.io coding-style-patch-verification: ON
1470 * eval: (c-set-style "gnu")