2 * esp_encrypt.c : IPSec ESP encrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/crypto/crypto.h>
24 #include <vnet/ipsec/ipsec.h>
25 #include <vnet/ipsec/ipsec_tun.h>
26 #include <vnet/ipsec/esp.h>
27 #include <vnet/tunnel/tunnel_dp.h>
29 #define foreach_esp_encrypt_next \
30 _ (DROP4, "ip4-drop") \
31 _ (DROP6, "ip6-drop") \
32 _ (DROP_MPLS, "mpls-drop") \
33 _ (HANDOFF4, "handoff4") \
34 _ (HANDOFF6, "handoff6") \
35 _ (HANDOFF_MPLS, "handoff-mpls") \
36 _ (INTERFACE_OUTPUT, "interface-output")
38 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
41 foreach_esp_encrypt_next
46 #define foreach_esp_encrypt_error \
47 _ (RX_PKTS, "ESP pkts received") \
48 _ (POST_RX_PKTS, "ESP-post pkts received") \
49 _ (HANDOFF, "Hand-off") \
50 _ (SEQ_CYCLED, "sequence number cycled (packet dropped)") \
51 _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
52 _ (CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)") \
53 _ (NO_BUFFERS, "no buffers (packet dropped)") \
54 _ (NO_PROTECTION, "no protecting SA (packet dropped)") \
55 _ (NO_ENCRYPTION, "no Encrypting SA (packet dropped)")
59 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
60 foreach_esp_encrypt_error
63 } esp_encrypt_error_t;
65 static char *esp_encrypt_error_strings[] = {
66 #define _(sym,string) string,
67 foreach_esp_encrypt_error
78 ipsec_crypto_alg_t crypto_alg;
79 ipsec_integ_alg_t integ_alg;
80 } esp_encrypt_trace_t;
85 } esp_encrypt_post_trace_t;
87 /* packet trace format function */
89 format_esp_encrypt_trace (u8 * s, va_list * args)
91 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
92 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
93 esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
97 "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
98 t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
99 format_ipsec_crypto_alg,
100 t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
101 t->udp_encap ? " udp-encap-enabled" : "");
106 format_esp_post_encrypt_trace (u8 * s, va_list * args)
108 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
109 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
110 esp_encrypt_post_trace_t *t = va_arg (*args, esp_encrypt_post_trace_t *);
112 s = format (s, "esp-post: next node index %u", t->next_index);
116 /* pad packet in input buffer */
117 static_always_inline u8 *
118 esp_add_footer_and_icv (vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align,
119 u8 icv_sz, vlib_node_runtime_t *node,
120 u16 buffer_data_size, uword total_len)
122 static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
123 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
124 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
127 u16 min_length = total_len + sizeof (esp_footer_t);
128 u16 new_length = round_pow2 (min_length, esp_align);
129 u8 pad_bytes = new_length - min_length;
130 esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (last[0]) +
131 last[0]->current_length + pad_bytes);
132 u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
134 if (last[0]->current_data + last[0]->current_length + tail_sz >
138 if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
141 vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
142 last[0]->next_buffer = tmp_bi;
143 last[0]->flags |= VLIB_BUFFER_NEXT_PRESENT;
144 f = (esp_footer_t *) (vlib_buffer_get_current (tmp) + pad_bytes);
145 tmp->current_length += tail_sz;
149 last[0]->current_length += tail_sz;
151 f->pad_length = pad_bytes;
154 ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
155 pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
156 clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
159 return &f->next_header;
162 static_always_inline void
163 esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
168 len = clib_net_to_host_u16 (len);
169 old_len = ip4->length;
173 u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
175 sum = ip_csum_update (ip4->checksum, ip4->protocol,
176 prot, ip4_header_t, protocol);
177 ip4->protocol = prot;
179 sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
182 sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
185 ip4->checksum = ip_csum_fold (sum);
188 static_always_inline void
189 esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
191 clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
192 udp->length = clib_net_to_host_u16 (len);
195 static_always_inline u8
196 ext_hdr_is_pre_esp (u8 nexthdr)
198 #ifdef CLIB_HAVE_VEC128
199 static const u8x16 ext_hdr_types = {
200 IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
201 IP_PROTOCOL_IPV6_ROUTE,
202 IP_PROTOCOL_IPV6_FRAGMENTATION,
205 return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
207 return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
208 (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
209 ((nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0));
213 static_always_inline u8
214 esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
216 /* this code assumes that HbH, route and frag headers will be before
217 others, if that is not the case, they will end up encrypted */
218 u8 len = sizeof (ip6_header_t);
221 /* if next packet doesn't have ext header */
222 if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
228 p = (void *) (ip6 + 1);
229 len += ip6_ext_header_len (p);
231 while (ext_hdr_is_pre_esp (p->next_hdr))
233 len += ip6_ext_header_len (p);
234 p = ip6_ext_next_header (p);
241 static_always_inline void
242 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
243 vnet_crypto_op_t * ops, vlib_buffer_t * b[],
244 u16 * nexts, vnet_crypto_op_chunk_t * chunks,
247 u32 n_fail, n_ops = vec_len (ops);
248 vnet_crypto_op_t *op = ops;
253 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
257 ASSERT (op - ops < n_ops);
259 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
261 u32 bi = op->user_data;
262 b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
263 nexts[bi] = drop_next;
270 static_always_inline void
271 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
272 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
275 u32 n_fail, n_ops = vec_len (ops);
276 vnet_crypto_op_t *op = ops;
281 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
285 ASSERT (op - ops < n_ops);
287 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
289 u32 bi = op->user_data;
290 b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
291 nexts[bi] = drop_next;
298 static_always_inline u32
299 esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
300 ipsec_sa_t * sa0, vlib_buffer_t * b,
301 vlib_buffer_t * lb, u8 icv_sz, u8 * start,
302 u32 start_len, u16 * n_ch)
304 vnet_crypto_op_chunk_t *ch;
305 vlib_buffer_t *cb = b;
308 vec_add2 (ptd->chunks, ch, 1);
309 total_len = ch->len = start_len;
310 ch->src = ch->dst = start;
311 cb = vlib_get_buffer (vm, cb->next_buffer);
315 vec_add2 (ptd->chunks, ch, 1);
318 total_len += ch->len = cb->current_length - icv_sz;
320 total_len += ch->len = cb->current_length;
321 ch->src = ch->dst = vlib_buffer_get_current (cb);
323 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
326 cb = vlib_get_buffer (vm, cb->next_buffer);
335 static_always_inline u32
336 esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
337 ipsec_sa_t * sa0, vlib_buffer_t * b,
338 vlib_buffer_t * lb, u8 icv_sz, u8 * start,
339 u32 start_len, u8 * digest, u16 * n_ch)
341 vnet_crypto_op_chunk_t *ch;
342 vlib_buffer_t *cb = b;
345 vec_add2 (ptd->chunks, ch, 1);
346 total_len = ch->len = start_len;
348 cb = vlib_get_buffer (vm, cb->next_buffer);
352 vec_add2 (ptd->chunks, ch, 1);
356 total_len += ch->len = cb->current_length - icv_sz;
357 if (ipsec_sa_is_set_USE_ESN (sa0))
359 u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
360 clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi));
361 ch->len += sizeof (seq_hi);
362 total_len += sizeof (seq_hi);
366 total_len += ch->len = cb->current_length;
367 ch->src = vlib_buffer_get_current (cb);
369 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
372 cb = vlib_get_buffer (vm, cb->next_buffer);
382 esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
383 vnet_crypto_op_t **crypto_ops,
384 vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi,
385 u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
386 vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
389 if (sa0->crypto_enc_op_id)
391 vnet_crypto_op_t *op;
392 vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
393 vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
395 op->src = op->dst = payload;
396 op->key_index = sa0->crypto_key_index;
397 op->len = payload_len - icv_sz;
400 if (ipsec_sa_is_set_IS_CTR (sa0))
402 ASSERT (sizeof (u64) == iv_sz);
403 /* construct nonce in a scratch space in front of the IP header */
404 esp_ctr_nonce_t *nonce =
405 (esp_ctr_nonce_t *) (payload - sizeof (u64) - hdr_len -
407 u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
409 if (ipsec_sa_is_set_IS_AEAD (sa0))
411 /* constuct aad in a scratch space in front of the nonce */
412 op->aad = (u8 *) nonce - sizeof (esp_aead_t);
413 op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
414 op->tag = payload + op->len;
419 nonce->ctr = clib_host_to_net_u32 (1);
422 nonce->salt = sa0->salt;
423 nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa0->ctr_iv_counter++);
424 op->iv = (u8 *) nonce;
428 op->iv = payload - iv_sz;
429 op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
435 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
436 op->chunk_index = vec_len (ptd->chunks);
437 op->tag = vlib_buffer_get_tail (lb) - icv_sz;
438 esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload,
439 payload_len, &op->n_chunks);
443 if (sa0->integ_op_id)
445 vnet_crypto_op_t *op;
446 vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
447 vnet_crypto_op_init (op, sa0->integ_op_id);
448 op->src = payload - iv_sz - sizeof (esp_header_t);
449 op->digest = payload + payload_len - icv_sz;
450 op->key_index = sa0->integ_key_index;
451 op->digest_len = icv_sz;
452 op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
458 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
459 op->chunk_index = vec_len (ptd->chunks);
460 op->digest = vlib_buffer_get_tail (lb) - icv_sz;
462 esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
463 payload - iv_sz - sizeof (esp_header_t),
464 payload_len + iv_sz +
465 sizeof (esp_header_t), op->digest,
468 else if (ipsec_sa_is_set_USE_ESN (sa0))
470 u32 tmp = clib_net_to_host_u32 (seq_hi);
471 clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
472 op->len += sizeof (seq_hi);
477 static_always_inline void
478 esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
479 vnet_crypto_async_frame_t *async_frame,
480 ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp,
481 u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
482 u32 bi, u16 next, u32 hdr_len, u16 async_next,
485 esp_post_data_t *post = esp_post_data (b);
486 u8 *tag, *iv, *aad = 0;
489 i16 crypto_start_offset, integ_start_offset = 0;
490 u16 crypto_total_len, integ_total_len;
492 post->next_index = next;
495 crypto_start_offset = payload - b->data;
496 crypto_total_len = integ_total_len = payload_len - icv_sz;
497 tag = payload + crypto_total_len;
499 key_index = sa->linked_key_index;
501 if (ipsec_sa_is_set_IS_CTR (sa))
503 ASSERT (sizeof (u64) == iv_sz);
504 /* construct nonce in a scratch space in front of the IP header */
505 esp_ctr_nonce_t *nonce = (esp_ctr_nonce_t *) (payload - sizeof (u64) -
506 hdr_len - sizeof (*nonce));
507 u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
509 if (ipsec_sa_is_set_IS_AEAD (sa))
511 /* constuct aad in a scratch space in front of the nonce */
512 aad = (u8 *) nonce - sizeof (esp_aead_t);
513 esp_aad_fill (aad, esp, sa, sa->seq_hi);
514 key_index = sa->crypto_key_index;
518 nonce->ctr = clib_host_to_net_u32 (1);
521 nonce->salt = sa->salt;
522 nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->ctr_iv_counter++);
527 iv = payload - iv_sz;
528 flag |= VNET_CRYPTO_OP_FLAG_INIT_IV;
534 flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
535 tag = vlib_buffer_get_tail (lb) - icv_sz;
536 crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, icv_sz,
537 payload, payload_len, 0);
542 integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t);
543 integ_total_len += iv_sz + sizeof (esp_header_t);
547 integ_total_len = esp_encrypt_chain_integ (
548 vm, ptd, sa, b, lb, icv_sz,
549 payload - iv_sz - sizeof (esp_header_t),
550 payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
552 else if (ipsec_sa_is_set_USE_ESN (sa))
554 u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
555 clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
556 integ_total_len += sizeof (seq_hi);
560 /* this always succeeds because we know the frame is not full */
561 vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len,
562 integ_total_len - crypto_total_len,
563 crypto_start_offset, integ_start_offset, bi,
564 async_next, iv, tag, aad, flag);
568 esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
569 vlib_frame_t *frame, vnet_link_t lt, int is_tun,
572 ipsec_main_t *im = &ipsec_main;
573 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
574 u32 *from = vlib_frame_vector_args (frame);
575 u32 n_left = frame->n_vectors;
576 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
577 u32 thread_index = vm->thread_index;
578 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
579 u32 current_sa_index = ~0, current_sa_packets = 0;
580 u32 current_sa_bytes = 0, spi = 0;
581 u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
584 vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
585 vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
586 vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
587 int is_async = im->async_mode;
588 vnet_crypto_async_op_id_t async_op = ~0;
590 (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
591 (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
592 ESP_ENCRYPT_NEXT_DROP_MPLS));
593 u16 handoff_next = (lt == VNET_LINK_IP6 ?
594 ESP_ENCRYPT_NEXT_HANDOFF6 :
595 (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
596 ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
597 vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
598 u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
599 u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0;
600 u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
601 u32 sync_bi[VLIB_FRAME_SIZE];
602 u32 noop_bi[VLIB_FRAME_SIZE];
603 esp_encrypt_error_t err;
605 vlib_get_buffers (vm, from, b, n_left);
607 vec_reset_length (ptd->crypto_ops);
608 vec_reset_length (ptd->integ_ops);
609 vec_reset_length (ptd->chained_crypto_ops);
610 vec_reset_length (ptd->chained_integ_ops);
611 vec_reset_length (ptd->async_frames);
612 vec_reset_length (ptd->chunks);
613 clib_memset (async_frames, 0, sizeof (async_frames));
620 u8 *payload, *next_hdr_ptr;
621 u16 payload_len, payload_len_total, n_bufs;
624 err = ESP_ENCRYPT_ERROR_RX_PKTS;
629 vlib_prefetch_buffer_header (b[2], LOAD);
630 p = vlib_buffer_get_current (b[1]);
631 clib_prefetch_load (p);
632 p -= CLIB_CACHE_LINE_BYTES;
633 clib_prefetch_load (p);
634 /* speculate that the trailer goes in the first buffer */
635 CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
636 CLIB_CACHE_LINE_BYTES, LOAD);
641 /* we are on a ipsec tunnel's feature arc */
642 vnet_buffer (b[0])->ipsec.sad_index =
643 sa_index0 = ipsec_tun_protect_get_sa_out
644 (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
646 if (PREDICT_FALSE (INDEX_INVALID == sa_index0))
648 err = ESP_ENCRYPT_ERROR_NO_PROTECTION;
649 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
655 sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
657 if (sa_index0 != current_sa_index)
659 if (current_sa_packets)
660 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
664 current_sa_packets = current_sa_bytes = 0;
666 sa0 = ipsec_sa_get (sa_index0);
668 if (PREDICT_FALSE ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
669 sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
670 !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0)))
672 err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
673 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
677 /* fetch the second cacheline ASAP */
678 clib_prefetch_load (sa0->cacheline1);
680 current_sa_index = sa_index0;
681 spi = clib_net_to_host_u32 (sa0->spi);
682 esp_align = sa0->esp_block_align;
683 icv_sz = sa0->integ_icv_size;
684 iv_sz = sa0->crypto_iv_size;
685 is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
688 if (PREDICT_FALSE (~0 == sa0->thread_index))
690 /* this is the first packet to use this SA, claim the SA
691 * for this thread. this could happen simultaneously on
693 clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
694 ipsec_sa_assign_thread (thread_index));
697 if (PREDICT_FALSE (thread_index != sa0->thread_index))
699 vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
700 err = ESP_ENCRYPT_ERROR_HANDOFF;
701 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
707 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
710 err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
711 esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
717 /* find last buffer in the chain */
718 while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
719 lb = vlib_get_buffer (vm, lb->next_buffer);
722 if (PREDICT_FALSE (esp_seq_advance (sa0)))
724 err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
725 esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
732 if (ipsec_sa_is_set_IS_TUNNEL (sa0))
734 payload = vlib_buffer_get_current (b[0]);
735 next_hdr_ptr = esp_add_footer_and_icv (
736 vm, &lb, esp_align, icv_sz, node, buffer_data_size,
737 vlib_buffer_length_in_chain (vm, b[0]));
740 err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
741 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
745 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
746 payload_len = b[0]->current_length;
747 payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
750 hdr_len += sizeof (*esp);
751 esp = (esp_header_t *) (payload - hdr_len);
753 /* optional UDP header */
754 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
756 hdr_len += sizeof (udp_header_t);
757 esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
758 payload_len_total + hdr_len);
762 if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
765 u16 len = sizeof (ip6_header_t);
767 ip6 = (ip6_header_t *) (payload - hdr_len);
768 clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
770 if (VNET_LINK_IP6 == lt)
772 *next_hdr_ptr = IP_PROTOCOL_IPV6;
773 tunnel_encap_fixup_6o6 (sa0->tunnel_flags,
774 (const ip6_header_t *) payload,
777 else if (VNET_LINK_IP4 == lt)
779 *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
780 tunnel_encap_fixup_4o6 (sa0->tunnel_flags, b[0],
781 (const ip4_header_t *) payload, ip6);
783 else if (VNET_LINK_MPLS == lt)
785 *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
786 tunnel_encap_fixup_mplso6 (
787 sa0->tunnel_flags, b[0],
788 (const mpls_unicast_header_t *) payload, ip6);
793 len = payload_len_total + hdr_len - len;
794 ip6->payload_length = clib_net_to_host_u16 (len);
795 b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
800 u16 len = sizeof (ip4_header_t);
802 ip4 = (ip4_header_t *) (payload - hdr_len);
803 clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
805 if (VNET_LINK_IP6 == lt)
807 *next_hdr_ptr = IP_PROTOCOL_IPV6;
808 tunnel_encap_fixup_6o4_w_chksum (sa0->tunnel_flags,
809 (const ip6_header_t *)
812 else if (VNET_LINK_IP4 == lt)
814 *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
815 tunnel_encap_fixup_4o4_w_chksum (sa0->tunnel_flags,
816 (const ip4_header_t *)
819 else if (VNET_LINK_MPLS == lt)
821 *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
822 tunnel_encap_fixup_mplso4_w_chksum (
823 sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
829 len = payload_len_total + hdr_len;
830 esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
836 sync_next[0] = dpo->dpoi_next_node;
837 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
840 sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
841 b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
843 else /* transport mode */
845 u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
846 ip6_ext_header_t *ext_hdr;
847 udp_header_t *udp = 0;
849 u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
852 (VNET_LINK_IP6 == lt ?
853 esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
854 ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
856 vlib_buffer_advance (b[0], ip_len);
857 payload = vlib_buffer_get_current (b[0]);
858 next_hdr_ptr = esp_add_footer_and_icv (
859 vm, &lb, esp_align, icv_sz, node, buffer_data_size,
860 vlib_buffer_length_in_chain (vm, b[0]));
863 err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
864 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
869 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
870 payload_len = b[0]->current_length;
871 payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
874 hdr_len += sizeof (*esp);
875 esp = (esp_header_t *) (payload - hdr_len);
877 /* optional UDP header */
878 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
880 hdr_len += sizeof (udp_header_t);
881 udp = (udp_header_t *) (payload - hdr_len);
886 ip_hdr = payload - hdr_len;
891 l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
893 l2_hdr = payload - hdr_len;
895 /* copy l2 and ip header */
896 clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
901 if (VNET_LINK_IP6 == lt)
903 ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
904 if (PREDICT_TRUE (NULL == ext_hdr))
906 *next_hdr_ptr = ip6->protocol;
907 ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
911 *next_hdr_ptr = ext_hdr->next_hdr;
912 ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP;
914 ip6->payload_length =
915 clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
916 sizeof (ip6_header_t));
918 else if (VNET_LINK_IP4 == lt)
921 ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
922 *next_hdr_ptr = ip4->protocol;
923 len = payload_len_total + hdr_len - l2_len;
926 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
927 udp_len = len - ip_len;
930 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
933 clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
937 esp_fill_udp_hdr (sa0, udp, udp_len);
940 sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
945 crypto_ops = &ptd->chained_crypto_ops;
946 integ_ops = &ptd->chained_integ_ops;
950 crypto_ops = &ptd->crypto_ops;
951 integ_ops = &ptd->integ_ops;
955 esp->seq = clib_net_to_host_u32 (sa0->seq);
959 async_op = sa0->crypto_async_enc_op_id;
961 /* get a frame for this op if we don't yet have one or it's full
963 if (NULL == async_frames[async_op] ||
964 vnet_crypto_async_frame_is_full (async_frames[async_op]))
966 async_frames[async_op] =
967 vnet_crypto_async_get_frame (vm, async_op);
968 /* Save the frame to the list we'll submit at the end */
969 vec_add1 (ptd->async_frames, async_frames[async_op]);
972 esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
973 esp, payload, payload_len, iv_sz, icv_sz,
974 from[b - bufs], sync_next[0], hdr_len,
975 async_next_node, lb);
978 esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, sa0->seq_hi,
979 payload, payload_len, iv_sz, icv_sz, n_sync, b,
982 vlib_buffer_advance (b[0], 0LL - hdr_len);
984 current_sa_packets += 1;
985 current_sa_bytes += payload_len_total;
988 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
990 esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
992 if (INDEX_INVALID == sa_index0)
993 clib_memset_u8 (tr, 0xff, sizeof (*tr));
996 tr->sa_index = sa_index0;
1000 tr->sa_seq_hi = sa0->seq_hi;
1001 tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
1002 tr->crypto_alg = sa0->crypto_alg;
1003 tr->integ_alg = sa0->integ_alg;
1008 if (ESP_ENCRYPT_ERROR_RX_PKTS != err)
1010 noop_bi[n_noop] = from[b - bufs];
1016 sync_bi[n_sync] = from[b - bufs];
1017 sync_bufs[n_sync] = b[0];
1030 if (INDEX_INVALID != current_sa_index)
1031 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1032 current_sa_index, current_sa_packets,
1036 esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
1038 esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
1039 sync_nexts, ptd->chunks, drop_next);
1041 esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
1043 esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
1044 sync_nexts, ptd->chunks, drop_next);
1046 vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
1050 /* submit all of the open frames */
1051 vnet_crypto_async_frame_t **async_frame;
1053 vec_foreach (async_frame, ptd->async_frames)
1055 if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
1057 n_noop += esp_async_recycle_failed_submit (
1058 vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
1059 n_sync, noop_bi, noop_nexts, drop_next);
1060 vnet_crypto_async_reset_frame (*async_frame);
1061 vnet_crypto_async_free_frame (vm, *async_frame);
1066 vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
1068 vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS,
1071 return frame->n_vectors;
1075 esp_encrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1076 vlib_frame_t * frame)
1078 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1079 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1080 u32 *from = vlib_frame_vector_args (frame);
1081 u32 n_left = frame->n_vectors;
1083 vlib_get_buffers (vm, from, b, n_left);
1087 vlib_prefetch_buffer_header (b[0], LOAD);
1088 vlib_prefetch_buffer_header (b[1], LOAD);
1089 vlib_prefetch_buffer_header (b[2], LOAD);
1090 vlib_prefetch_buffer_header (b[3], LOAD);
1095 vlib_prefetch_buffer_header (b[4], LOAD);
1096 vlib_prefetch_buffer_header (b[5], LOAD);
1097 vlib_prefetch_buffer_header (b[6], LOAD);
1098 vlib_prefetch_buffer_header (b[7], LOAD);
1100 next[0] = (esp_post_data (b[0]))->next_index;
1101 next[1] = (esp_post_data (b[1]))->next_index;
1102 next[2] = (esp_post_data (b[2]))->next_index;
1103 next[3] = (esp_post_data (b[3]))->next_index;
1105 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
1107 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
1109 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1111 tr->next_index = next[0];
1113 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
1115 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[1],
1117 tr->next_index = next[1];
1119 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
1121 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[2],
1123 tr->next_index = next[2];
1125 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
1127 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[3],
1129 tr->next_index = next[3];
1140 next[0] = (esp_post_data (b[0]))->next_index;
1141 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1143 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1145 tr->next_index = next[0];
1153 vlib_node_increment_counter (vm, node->node_index,
1154 ESP_ENCRYPT_ERROR_POST_RX_PKTS,
1156 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1157 return frame->n_vectors;
1160 VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
1161 vlib_node_runtime_t * node,
1162 vlib_frame_t * from_frame)
1164 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 0,
1165 esp_encrypt_async_next.esp4_post_next);
1169 VLIB_REGISTER_NODE (esp4_encrypt_node) = {
1170 .name = "esp4-encrypt",
1171 .vector_size = sizeof (u32),
1172 .format_trace = format_esp_encrypt_trace,
1173 .type = VLIB_NODE_TYPE_INTERNAL,
1175 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
1176 .error_strings = esp_encrypt_error_strings,
1178 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1179 .next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1180 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1181 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1182 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
1183 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
1184 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "error-drop",
1185 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output" },
1189 VLIB_NODE_FN (esp4_encrypt_post_node) (vlib_main_t * vm,
1190 vlib_node_runtime_t * node,
1191 vlib_frame_t * from_frame)
1193 return esp_encrypt_post_inline (vm, node, from_frame);
1197 VLIB_REGISTER_NODE (esp4_encrypt_post_node) = {
1198 .name = "esp4-encrypt-post",
1199 .vector_size = sizeof (u32),
1200 .format_trace = format_esp_post_encrypt_trace,
1201 .type = VLIB_NODE_TYPE_INTERNAL,
1202 .sibling_of = "esp4-encrypt",
1204 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1205 .error_strings = esp_encrypt_error_strings,
1209 VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
1210 vlib_node_runtime_t * node,
1211 vlib_frame_t * from_frame)
1213 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 0,
1214 esp_encrypt_async_next.esp6_post_next);
1218 VLIB_REGISTER_NODE (esp6_encrypt_node) = {
1219 .name = "esp6-encrypt",
1220 .vector_size = sizeof (u32),
1221 .format_trace = format_esp_encrypt_trace,
1222 .type = VLIB_NODE_TYPE_INTERNAL,
1223 .sibling_of = "esp4-encrypt",
1225 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1226 .error_strings = esp_encrypt_error_strings,
1230 VLIB_NODE_FN (esp6_encrypt_post_node) (vlib_main_t * vm,
1231 vlib_node_runtime_t * node,
1232 vlib_frame_t * from_frame)
1234 return esp_encrypt_post_inline (vm, node, from_frame);
1238 VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
1239 .name = "esp6-encrypt-post",
1240 .vector_size = sizeof (u32),
1241 .format_trace = format_esp_post_encrypt_trace,
1242 .type = VLIB_NODE_TYPE_INTERNAL,
1243 .sibling_of = "esp4-encrypt",
1245 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1246 .error_strings = esp_encrypt_error_strings,
1250 VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
1251 vlib_node_runtime_t * node,
1252 vlib_frame_t * from_frame)
1254 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 1,
1255 esp_encrypt_async_next.esp4_tun_post_next);
1259 VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
1260 .name = "esp4-encrypt-tun",
1261 .vector_size = sizeof (u32),
1262 .format_trace = format_esp_encrypt_trace,
1263 .type = VLIB_NODE_TYPE_INTERNAL,
1265 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1266 .error_strings = esp_encrypt_error_strings,
1268 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1270 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1271 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1272 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1273 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1274 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1275 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1276 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1280 VLIB_NODE_FN (esp4_encrypt_tun_post_node) (vlib_main_t * vm,
1281 vlib_node_runtime_t * node,
1282 vlib_frame_t * from_frame)
1284 return esp_encrypt_post_inline (vm, node, from_frame);
1288 VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = {
1289 .name = "esp4-encrypt-tun-post",
1290 .vector_size = sizeof (u32),
1291 .format_trace = format_esp_post_encrypt_trace,
1292 .type = VLIB_NODE_TYPE_INTERNAL,
1293 .sibling_of = "esp4-encrypt-tun",
1295 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1296 .error_strings = esp_encrypt_error_strings,
1300 VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
1301 vlib_node_runtime_t * node,
1302 vlib_frame_t * from_frame)
1304 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 1,
1305 esp_encrypt_async_next.esp6_tun_post_next);
1309 VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
1310 .name = "esp6-encrypt-tun",
1311 .vector_size = sizeof (u32),
1312 .format_trace = format_esp_encrypt_trace,
1313 .type = VLIB_NODE_TYPE_INTERNAL,
1315 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1316 .error_strings = esp_encrypt_error_strings,
1318 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1320 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1321 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1322 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1323 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1324 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1325 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1326 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1332 VLIB_NODE_FN (esp6_encrypt_tun_post_node) (vlib_main_t * vm,
1333 vlib_node_runtime_t * node,
1334 vlib_frame_t * from_frame)
1336 return esp_encrypt_post_inline (vm, node, from_frame);
1340 VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
1341 .name = "esp6-encrypt-tun-post",
1342 .vector_size = sizeof (u32),
1343 .format_trace = format_esp_post_encrypt_trace,
1344 .type = VLIB_NODE_TYPE_INTERNAL,
1345 .sibling_of = "esp-mpls-encrypt-tun",
1347 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
1348 .error_strings = esp_encrypt_error_strings,
1352 VLIB_NODE_FN (esp_mpls_encrypt_tun_node)
1353 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
1355 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_MPLS, 1,
1356 esp_encrypt_async_next.esp_mpls_tun_post_next);
1359 VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_node) = {
1360 .name = "esp-mpls-encrypt-tun",
1361 .vector_size = sizeof (u32),
1362 .format_trace = format_esp_encrypt_trace,
1363 .type = VLIB_NODE_TYPE_INTERNAL,
1365 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1366 .error_strings = esp_encrypt_error_strings,
1368 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1370 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1371 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1372 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1373 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1374 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1375 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1376 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1380 VLIB_NODE_FN (esp_mpls_encrypt_tun_post_node)
1381 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
1383 return esp_encrypt_post_inline (vm, node, from_frame);
1386 VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node) = {
1387 .name = "esp-mpls-encrypt-tun-post",
1388 .vector_size = sizeof (u32),
1389 .format_trace = format_esp_post_encrypt_trace,
1390 .type = VLIB_NODE_TYPE_INTERNAL,
1391 .sibling_of = "esp-mpls-encrypt-tun",
1393 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
1394 .error_strings = esp_encrypt_error_strings,
1397 #ifndef CLIB_MARCH_VARIANT
1399 static clib_error_t *
1400 esp_encrypt_init (vlib_main_t *vm)
1402 ipsec_main_t *im = &ipsec_main;
1404 im->esp4_enc_fq_index =
1405 vlib_frame_queue_main_init (esp4_encrypt_node.index, 0);
1406 im->esp6_enc_fq_index =
1407 vlib_frame_queue_main_init (esp6_encrypt_node.index, 0);
1408 im->esp4_enc_tun_fq_index =
1409 vlib_frame_queue_main_init (esp4_encrypt_tun_node.index, 0);
1410 im->esp6_enc_tun_fq_index =
1411 vlib_frame_queue_main_init (esp6_encrypt_tun_node.index, 0);
1412 im->esp_mpls_enc_tun_fq_index =
1413 vlib_frame_queue_main_init (esp_mpls_encrypt_tun_node.index, 0);
1418 VLIB_INIT_FUNCTION (esp_encrypt_init);
1423 * fd.io coding-style-patch-verification: ON
1426 * eval: (c-set-style "gnu")