2 * esp_encrypt.c : IPSec ESP encrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/crypto/crypto.h>
24 #include <vnet/ipsec/ipsec.h>
25 #include <vnet/ipsec/ipsec_tun.h>
26 #include <vnet/ipsec/esp.h>
27 #include <vnet/tunnel/tunnel_dp.h>
29 #define foreach_esp_encrypt_next \
30 _ (DROP4, "ip4-drop") \
31 _ (DROP6, "ip6-drop") \
32 _ (DROP_MPLS, "mpls-drop") \
33 _ (HANDOFF4, "handoff4") \
34 _ (HANDOFF6, "handoff6") \
35 _ (HANDOFF_MPLS, "handoff-mpls") \
36 _ (INTERFACE_OUTPUT, "interface-output")
38 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
41 foreach_esp_encrypt_next
46 #define foreach_esp_encrypt_error \
47 _ (RX_PKTS, "ESP pkts received") \
48 _ (POST_RX_PKTS, "ESP-post pkts received") \
49 _ (HANDOFF, "Hand-off") \
50 _ (SEQ_CYCLED, "sequence number cycled (packet dropped)") \
51 _ (CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
52 _ (CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)") \
53 _ (NO_BUFFERS, "no buffers (packet dropped)")
57 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
58 foreach_esp_encrypt_error
61 } esp_encrypt_error_t;
63 static char *esp_encrypt_error_strings[] = {
64 #define _(sym,string) string,
65 foreach_esp_encrypt_error
76 ipsec_crypto_alg_t crypto_alg;
77 ipsec_integ_alg_t integ_alg;
78 } esp_encrypt_trace_t;
83 } esp_encrypt_post_trace_t;
85 /* packet trace format function */
87 format_esp_encrypt_trace (u8 * s, va_list * args)
89 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
90 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
91 esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
95 "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
96 t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
97 format_ipsec_crypto_alg,
98 t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
99 t->udp_encap ? " udp-encap-enabled" : "");
104 format_esp_post_encrypt_trace (u8 * s, va_list * args)
106 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
107 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
108 esp_encrypt_post_trace_t *t = va_arg (*args, esp_encrypt_post_trace_t *);
110 s = format (s, "esp-post: next node index %u", t->next_index);
114 /* pad packet in input buffer */
115 static_always_inline u8 *
116 esp_add_footer_and_icv (vlib_main_t *vm, vlib_buffer_t **last, u8 esp_align,
117 u8 icv_sz, vlib_node_runtime_t *node,
118 u16 buffer_data_size, uword total_len)
120 static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
121 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
122 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
125 u16 min_length = total_len + sizeof (esp_footer_t);
126 u16 new_length = round_pow2 (min_length, esp_align);
127 u8 pad_bytes = new_length - min_length;
128 esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (last[0]) +
129 last[0]->current_length + pad_bytes);
130 u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
132 if (last[0]->current_data + last[0]->current_length + tail_sz >
136 if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
139 vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
140 last[0]->next_buffer = tmp_bi;
141 last[0]->flags |= VLIB_BUFFER_NEXT_PRESENT;
142 f = (esp_footer_t *) (vlib_buffer_get_current (tmp) + pad_bytes);
143 tmp->current_length += tail_sz;
147 last[0]->current_length += tail_sz;
149 f->pad_length = pad_bytes;
152 ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
153 pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
154 clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
157 return &f->next_header;
160 static_always_inline void
161 esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
166 len = clib_net_to_host_u16 (len);
167 old_len = ip4->length;
171 u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
173 sum = ip_csum_update (ip4->checksum, ip4->protocol,
174 prot, ip4_header_t, protocol);
175 ip4->protocol = prot;
177 sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
180 sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
183 ip4->checksum = ip_csum_fold (sum);
186 static_always_inline void
187 esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
189 clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
190 udp->length = clib_net_to_host_u16 (len);
193 static_always_inline u8
194 ext_hdr_is_pre_esp (u8 nexthdr)
196 #ifdef CLIB_HAVE_VEC128
197 static const u8x16 ext_hdr_types = {
198 IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
199 IP_PROTOCOL_IPV6_ROUTE,
200 IP_PROTOCOL_IPV6_FRAGMENTATION,
203 return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
205 return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
206 (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
207 (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
211 static_always_inline u8
212 esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
214 /* this code assumes that HbH, route and frag headers will be before
215 others, if that is not the case, they will end up encrypted */
216 u8 len = sizeof (ip6_header_t);
219 /* if next packet doesn't have ext header */
220 if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
226 p = (void *) (ip6 + 1);
227 len += ip6_ext_header_len (p);
229 while (ext_hdr_is_pre_esp (p->next_hdr))
231 len += ip6_ext_header_len (p);
232 p = ip6_ext_next_header (p);
239 /* IPsec IV generation: IVs requirements differ depending of the
240 * encryption mode: IVs must be unpredictable for AES-CBC whereas it can
241 * be predictable but should never be reused with the same key material
243 * We use a packet counter as the IV for CTR and GCM, and to ensure the
244 * IV is unpredictable for CBC, it is then encrypted using the same key
245 * as the message. You can refer to NIST SP800-38a and NIST SP800-38d
246 * for more details. */
247 static_always_inline void *
248 esp_generate_iv (ipsec_sa_t *sa, void *payload, int iv_sz)
250 ASSERT (iv_sz >= sizeof (u64));
251 u64 *iv = (u64 *) (payload - iv_sz);
252 clib_memset_u8 (iv, 0, iv_sz);
253 *iv = sa->iv_counter++;
257 static_always_inline void
258 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
259 vnet_crypto_op_t * ops, vlib_buffer_t * b[],
260 u16 * nexts, vnet_crypto_op_chunk_t * chunks,
263 u32 n_fail, n_ops = vec_len (ops);
264 vnet_crypto_op_t *op = ops;
269 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
273 ASSERT (op - ops < n_ops);
275 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
277 u32 bi = op->user_data;
278 b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
279 nexts[bi] = drop_next;
286 static_always_inline void
287 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
288 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
291 u32 n_fail, n_ops = vec_len (ops);
292 vnet_crypto_op_t *op = ops;
297 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
301 ASSERT (op - ops < n_ops);
303 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
305 u32 bi = op->user_data;
306 b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
307 nexts[bi] = drop_next;
314 static_always_inline u32
315 esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
316 ipsec_sa_t * sa0, vlib_buffer_t * b,
317 vlib_buffer_t * lb, u8 icv_sz, u8 * start,
318 u32 start_len, u16 * n_ch)
320 vnet_crypto_op_chunk_t *ch;
321 vlib_buffer_t *cb = b;
324 vec_add2 (ptd->chunks, ch, 1);
325 total_len = ch->len = start_len;
326 ch->src = ch->dst = start;
327 cb = vlib_get_buffer (vm, cb->next_buffer);
331 vec_add2 (ptd->chunks, ch, 1);
334 total_len += ch->len = cb->current_length - icv_sz;
336 total_len += ch->len = cb->current_length;
337 ch->src = ch->dst = vlib_buffer_get_current (cb);
339 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
342 cb = vlib_get_buffer (vm, cb->next_buffer);
351 static_always_inline u32
352 esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
353 ipsec_sa_t * sa0, vlib_buffer_t * b,
354 vlib_buffer_t * lb, u8 icv_sz, u8 * start,
355 u32 start_len, u8 * digest, u16 * n_ch)
357 vnet_crypto_op_chunk_t *ch;
358 vlib_buffer_t *cb = b;
361 vec_add2 (ptd->chunks, ch, 1);
362 total_len = ch->len = start_len;
364 cb = vlib_get_buffer (vm, cb->next_buffer);
368 vec_add2 (ptd->chunks, ch, 1);
372 total_len += ch->len = cb->current_length - icv_sz;
373 if (ipsec_sa_is_set_USE_ESN (sa0))
375 u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
376 clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi));
377 ch->len += sizeof (seq_hi);
378 total_len += sizeof (seq_hi);
382 total_len += ch->len = cb->current_length;
383 ch->src = vlib_buffer_get_current (cb);
385 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
388 cb = vlib_get_buffer (vm, cb->next_buffer);
398 esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
399 vnet_crypto_op_t **crypto_ops,
400 vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi,
401 u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
402 vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
405 if (sa0->crypto_enc_op_id)
407 vnet_crypto_op_t *op;
408 vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
409 vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
410 u8 *crypto_start = payload;
411 /* esp_add_footer_and_icv() in esp_encrypt_inline() makes sure we always
412 * have enough space for ESP header and footer which includes ICV */
413 ASSERT (payload_len > icv_sz);
414 u16 crypto_len = payload_len - icv_sz;
416 /* generate the IV in front of the payload */
417 void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz);
419 op->key_index = sa0->crypto_key_index;
422 if (ipsec_sa_is_set_IS_CTR (sa0))
424 /* construct nonce in a scratch space in front of the IP header */
425 esp_ctr_nonce_t *nonce =
426 (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
427 if (ipsec_sa_is_set_IS_AEAD (sa0))
429 /* constuct aad in a scratch space in front of the nonce */
430 op->aad = (u8 *) nonce - sizeof (esp_aead_t);
431 op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
432 op->tag = payload + crypto_len;
437 nonce->ctr = clib_host_to_net_u32 (1);
440 nonce->salt = sa0->salt;
441 nonce->iv = *(u64 *) pkt_iv;
442 op->iv = (u8 *) nonce;
446 /* construct zero iv in front of the IP header */
447 op->iv = pkt_iv - hdr_len - iv_sz;
448 clib_memset_u8 (op->iv, 0, iv_sz);
449 /* include iv field in crypto */
450 crypto_start -= iv_sz;
457 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
458 op->chunk_index = vec_len (ptd->chunks);
459 op->tag = vlib_buffer_get_tail (lb) - icv_sz;
460 esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz,
461 crypto_start, crypto_len + icv_sz,
467 op->src = op->dst = crypto_start;
468 op->len = crypto_len;
472 if (sa0->integ_op_id)
474 vnet_crypto_op_t *op;
475 vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
476 vnet_crypto_op_init (op, sa0->integ_op_id);
477 op->src = payload - iv_sz - sizeof (esp_header_t);
478 op->digest = payload + payload_len - icv_sz;
479 op->key_index = sa0->integ_key_index;
480 op->digest_len = icv_sz;
481 op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
487 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
488 op->chunk_index = vec_len (ptd->chunks);
489 op->digest = vlib_buffer_get_tail (lb) - icv_sz;
491 esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
492 payload - iv_sz - sizeof (esp_header_t),
493 payload_len + iv_sz +
494 sizeof (esp_header_t), op->digest,
497 else if (ipsec_sa_is_set_USE_ESN (sa0))
499 u32 tmp = clib_net_to_host_u32 (seq_hi);
500 clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
501 op->len += sizeof (seq_hi);
506 static_always_inline void
507 esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
508 vnet_crypto_async_frame_t *async_frame,
509 ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp,
510 u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
511 u32 bi, u16 next, u32 hdr_len, u16 async_next,
514 esp_post_data_t *post = esp_post_data (b);
515 u8 *tag, *iv, *aad = 0;
518 i16 crypto_start_offset, integ_start_offset;
519 u16 crypto_total_len, integ_total_len;
521 post->next_index = next;
524 crypto_start_offset = integ_start_offset = payload - b->data;
525 crypto_total_len = integ_total_len = payload_len - icv_sz;
526 tag = payload + crypto_total_len;
528 key_index = sa->linked_key_index;
530 /* generate the IV in front of the payload */
531 void *pkt_iv = esp_generate_iv (sa, payload, iv_sz);
533 if (ipsec_sa_is_set_IS_CTR (sa))
535 /* construct nonce in a scratch space in front of the IP header */
536 esp_ctr_nonce_t *nonce =
537 (esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
538 if (ipsec_sa_is_set_IS_AEAD (sa))
540 /* constuct aad in a scratch space in front of the nonce */
541 aad = (u8 *) nonce - sizeof (esp_aead_t);
542 esp_aad_fill (aad, esp, sa, sa->seq_hi);
543 key_index = sa->crypto_key_index;
547 nonce->ctr = clib_host_to_net_u32 (1);
550 nonce->salt = sa->salt;
551 nonce->iv = *(u64 *) pkt_iv;
556 /* construct zero iv in front of the IP header */
557 iv = pkt_iv - hdr_len - iv_sz;
558 clib_memset_u8 (iv, 0, iv_sz);
559 /* include iv field in crypto */
560 crypto_start_offset -= iv_sz;
561 crypto_total_len += iv_sz;
567 flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
568 tag = vlib_buffer_get_tail (lb) - icv_sz;
569 crypto_total_len = esp_encrypt_chain_crypto (
570 vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset,
571 crypto_total_len + icv_sz, 0);
576 integ_start_offset -= iv_sz + sizeof (esp_header_t);
577 integ_total_len += iv_sz + sizeof (esp_header_t);
581 integ_total_len = esp_encrypt_chain_integ (
582 vm, ptd, sa, b, lb, icv_sz,
583 payload - iv_sz - sizeof (esp_header_t),
584 payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
586 else if (ipsec_sa_is_set_USE_ESN (sa))
588 u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
589 clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
590 integ_total_len += sizeof (seq_hi);
594 /* this always succeeds because we know the frame is not full */
595 vnet_crypto_async_add_to_frame (vm, async_frame, key_index, crypto_total_len,
596 integ_total_len - crypto_total_len,
597 crypto_start_offset, integ_start_offset, bi,
598 async_next, iv, tag, aad, flag);
602 esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
603 vlib_frame_t *frame, vnet_link_t lt, int is_tun,
606 ipsec_main_t *im = &ipsec_main;
607 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
608 u32 *from = vlib_frame_vector_args (frame);
609 u32 n_left = frame->n_vectors;
610 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
611 u32 thread_index = vm->thread_index;
612 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
613 u32 current_sa_index = ~0, current_sa_packets = 0;
614 u32 current_sa_bytes = 0, spi = 0;
615 u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
618 vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
619 vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
620 vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
621 int is_async = im->async_mode;
622 vnet_crypto_async_op_id_t async_op = ~0;
624 (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
625 (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
626 ESP_ENCRYPT_NEXT_DROP_MPLS));
627 u16 handoff_next = (lt == VNET_LINK_IP6 ?
628 ESP_ENCRYPT_NEXT_HANDOFF6 :
629 (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
630 ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
631 vlib_buffer_t *sync_bufs[VLIB_FRAME_SIZE];
632 u16 sync_nexts[VLIB_FRAME_SIZE], *sync_next = sync_nexts, n_sync = 0;
633 u16 async_nexts[VLIB_FRAME_SIZE], *async_next = async_nexts, n_async = 0;
634 u16 noop_nexts[VLIB_FRAME_SIZE], *noop_next = noop_nexts, n_noop = 0;
635 u32 sync_bi[VLIB_FRAME_SIZE];
636 u32 noop_bi[VLIB_FRAME_SIZE];
637 esp_encrypt_error_t err;
639 vlib_get_buffers (vm, from, b, n_left);
641 vec_reset_length (ptd->crypto_ops);
642 vec_reset_length (ptd->integ_ops);
643 vec_reset_length (ptd->chained_crypto_ops);
644 vec_reset_length (ptd->chained_integ_ops);
645 vec_reset_length (ptd->async_frames);
646 vec_reset_length (ptd->chunks);
647 clib_memset (async_frames, 0, sizeof (async_frames));
654 u8 *payload, *next_hdr_ptr;
655 u16 payload_len, payload_len_total, n_bufs;
658 err = ESP_ENCRYPT_ERROR_RX_PKTS;
663 vlib_prefetch_buffer_header (b[2], LOAD);
664 p = vlib_buffer_get_current (b[1]);
665 clib_prefetch_load (p);
666 p -= CLIB_CACHE_LINE_BYTES;
667 clib_prefetch_load (p);
668 /* speculate that the trailer goes in the first buffer */
669 CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
670 CLIB_CACHE_LINE_BYTES, LOAD);
675 /* we are on a ipsec tunnel's feature arc */
676 vnet_buffer (b[0])->ipsec.sad_index =
677 sa_index0 = ipsec_tun_protect_get_sa_out
678 (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
681 sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
683 if (sa_index0 != current_sa_index)
685 if (current_sa_packets)
686 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
690 current_sa_packets = current_sa_bytes = 0;
692 sa0 = ipsec_sa_get (sa_index0);
694 /* fetch the second cacheline ASAP */
695 clib_prefetch_load (sa0->cacheline1);
697 current_sa_index = sa_index0;
698 spi = clib_net_to_host_u32 (sa0->spi);
699 esp_align = sa0->esp_block_align;
700 icv_sz = sa0->integ_icv_size;
701 iv_sz = sa0->crypto_iv_size;
702 is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
705 if (PREDICT_FALSE (~0 == sa0->thread_index))
707 /* this is the first packet to use this SA, claim the SA
708 * for this thread. this could happen simultaneously on
710 clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
711 ipsec_sa_assign_thread (thread_index));
714 if (PREDICT_FALSE (thread_index != sa0->thread_index))
716 vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
717 err = ESP_ENCRYPT_ERROR_HANDOFF;
718 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
724 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
727 err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
728 esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
734 /* find last buffer in the chain */
735 while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
736 lb = vlib_get_buffer (vm, lb->next_buffer);
739 if (PREDICT_FALSE (esp_seq_advance (sa0)))
741 err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
742 esp_set_next_index (b[0], node, err, n_noop, noop_nexts, drop_next);
749 if (ipsec_sa_is_set_IS_TUNNEL (sa0))
751 payload = vlib_buffer_get_current (b[0]);
752 next_hdr_ptr = esp_add_footer_and_icv (
753 vm, &lb, esp_align, icv_sz, node, buffer_data_size,
754 vlib_buffer_length_in_chain (vm, b[0]));
757 err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
758 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
762 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
763 payload_len = b[0]->current_length;
764 payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
767 hdr_len += sizeof (*esp);
768 esp = (esp_header_t *) (payload - hdr_len);
770 /* optional UDP header */
771 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
773 hdr_len += sizeof (udp_header_t);
774 esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
775 payload_len_total + hdr_len);
779 if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
782 u16 len = sizeof (ip6_header_t);
784 ip6 = (ip6_header_t *) (payload - hdr_len);
785 clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
787 if (VNET_LINK_IP6 == lt)
789 *next_hdr_ptr = IP_PROTOCOL_IPV6;
790 tunnel_encap_fixup_6o6 (sa0->tunnel_flags,
791 (const ip6_header_t *) payload,
794 else if (VNET_LINK_IP4 == lt)
796 *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
797 tunnel_encap_fixup_4o6 (sa0->tunnel_flags, b[0],
798 (const ip4_header_t *) payload, ip6);
800 else if (VNET_LINK_MPLS == lt)
802 *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
803 tunnel_encap_fixup_mplso6 (
804 sa0->tunnel_flags, b[0],
805 (const mpls_unicast_header_t *) payload, ip6);
810 len = payload_len_total + hdr_len - len;
811 ip6->payload_length = clib_net_to_host_u16 (len);
812 b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
817 u16 len = sizeof (ip4_header_t);
819 ip4 = (ip4_header_t *) (payload - hdr_len);
820 clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
822 if (VNET_LINK_IP6 == lt)
824 *next_hdr_ptr = IP_PROTOCOL_IPV6;
825 tunnel_encap_fixup_6o4_w_chksum (sa0->tunnel_flags,
826 (const ip6_header_t *)
829 else if (VNET_LINK_IP4 == lt)
831 *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
832 tunnel_encap_fixup_4o4_w_chksum (sa0->tunnel_flags,
833 (const ip4_header_t *)
836 else if (VNET_LINK_MPLS == lt)
838 *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
839 tunnel_encap_fixup_mplso4_w_chksum (
840 sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
846 len = payload_len_total + hdr_len;
847 esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
853 sync_next[0] = dpo->dpoi_next_node;
854 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
857 sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
858 b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
860 else /* transport mode */
862 u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
863 ip6_ext_header_t *ext_hdr;
864 udp_header_t *udp = 0;
866 u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
869 (VNET_LINK_IP6 == lt ?
870 esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
871 ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
873 vlib_buffer_advance (b[0], ip_len);
874 payload = vlib_buffer_get_current (b[0]);
875 next_hdr_ptr = esp_add_footer_and_icv (
876 vm, &lb, esp_align, icv_sz, node, buffer_data_size,
877 vlib_buffer_length_in_chain (vm, b[0]));
880 err = ESP_ENCRYPT_ERROR_NO_BUFFERS;
881 esp_set_next_index (b[0], node, err, n_noop, noop_nexts,
886 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
887 payload_len = b[0]->current_length;
888 payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
891 hdr_len += sizeof (*esp);
892 esp = (esp_header_t *) (payload - hdr_len);
894 /* optional UDP header */
895 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
897 hdr_len += sizeof (udp_header_t);
898 udp = (udp_header_t *) (payload - hdr_len);
903 ip_hdr = payload - hdr_len;
908 l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
910 l2_hdr = payload - hdr_len;
912 /* copy l2 and ip header */
913 clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
918 if (VNET_LINK_IP6 == lt)
920 ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
921 if (PREDICT_TRUE (NULL == ext_hdr))
923 *next_hdr_ptr = ip6->protocol;
924 ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
928 *next_hdr_ptr = ext_hdr->next_hdr;
929 ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP;
931 ip6->payload_length =
932 clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
933 sizeof (ip6_header_t));
935 else if (VNET_LINK_IP4 == lt)
938 ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
939 *next_hdr_ptr = ip4->protocol;
940 len = payload_len_total + hdr_len - l2_len;
943 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
944 udp_len = len - ip_len;
947 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
950 clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
954 esp_fill_udp_hdr (sa0, udp, udp_len);
957 sync_next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
962 crypto_ops = &ptd->chained_crypto_ops;
963 integ_ops = &ptd->chained_integ_ops;
967 crypto_ops = &ptd->crypto_ops;
968 integ_ops = &ptd->integ_ops;
972 esp->seq = clib_net_to_host_u32 (sa0->seq);
976 async_op = sa0->crypto_async_enc_op_id;
978 /* get a frame for this op if we don't yet have one or it's full
980 if (NULL == async_frames[async_op] ||
981 vnet_crypto_async_frame_is_full (async_frames[async_op]))
983 async_frames[async_op] =
984 vnet_crypto_async_get_frame (vm, async_op);
985 /* Save the frame to the list we'll submit at the end */
986 vec_add1 (ptd->async_frames, async_frames[async_op]);
989 esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
990 esp, payload, payload_len, iv_sz, icv_sz,
991 from[b - bufs], sync_next[0], hdr_len,
992 async_next_node, lb);
995 esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, sa0->seq_hi,
996 payload, payload_len, iv_sz, icv_sz, n_sync, b,
999 vlib_buffer_advance (b[0], 0LL - hdr_len);
1001 current_sa_packets += 1;
1002 current_sa_bytes += payload_len_total;
1005 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1007 esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
1009 tr->sa_index = sa_index0;
1012 tr->sa_seq_hi = sa0->seq_hi;
1013 tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
1014 tr->crypto_alg = sa0->crypto_alg;
1015 tr->integ_alg = sa0->integ_alg;
1019 if (ESP_ENCRYPT_ERROR_RX_PKTS != err)
1021 noop_bi[n_noop] = from[b - bufs];
1027 sync_bi[n_sync] = from[b - bufs];
1028 sync_bufs[n_sync] = b[0];
1041 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1042 current_sa_index, current_sa_packets,
1046 esp_process_ops (vm, node, ptd->crypto_ops, sync_bufs, sync_nexts,
1048 esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, sync_bufs,
1049 sync_nexts, ptd->chunks, drop_next);
1051 esp_process_ops (vm, node, ptd->integ_ops, sync_bufs, sync_nexts,
1053 esp_process_chained_ops (vm, node, ptd->chained_integ_ops, sync_bufs,
1054 sync_nexts, ptd->chunks, drop_next);
1056 vlib_buffer_enqueue_to_next (vm, node, sync_bi, sync_nexts, n_sync);
1060 /* submit all of the open frames */
1061 vnet_crypto_async_frame_t **async_frame;
1063 vec_foreach (async_frame, ptd->async_frames)
1065 if (vnet_crypto_async_submit_open_frame (vm, *async_frame) < 0)
1067 n_noop += esp_async_recycle_failed_submit (
1068 vm, *async_frame, node, ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR,
1069 n_sync, noop_bi, noop_nexts, drop_next);
1070 vnet_crypto_async_reset_frame (*async_frame);
1071 vnet_crypto_async_free_frame (vm, *async_frame);
1076 vlib_buffer_enqueue_to_next (vm, node, noop_bi, noop_nexts, n_noop);
1078 vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS,
1081 return frame->n_vectors;
1085 esp_encrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1086 vlib_frame_t * frame)
1088 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1089 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1090 u32 *from = vlib_frame_vector_args (frame);
1091 u32 n_left = frame->n_vectors;
1093 vlib_get_buffers (vm, from, b, n_left);
1097 vlib_prefetch_buffer_header (b[0], LOAD);
1098 vlib_prefetch_buffer_header (b[1], LOAD);
1099 vlib_prefetch_buffer_header (b[2], LOAD);
1100 vlib_prefetch_buffer_header (b[3], LOAD);
1105 vlib_prefetch_buffer_header (b[4], LOAD);
1106 vlib_prefetch_buffer_header (b[5], LOAD);
1107 vlib_prefetch_buffer_header (b[6], LOAD);
1108 vlib_prefetch_buffer_header (b[7], LOAD);
1110 next[0] = (esp_post_data (b[0]))->next_index;
1111 next[1] = (esp_post_data (b[1]))->next_index;
1112 next[2] = (esp_post_data (b[2]))->next_index;
1113 next[3] = (esp_post_data (b[3]))->next_index;
1115 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
1117 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
1119 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1121 tr->next_index = next[0];
1123 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
1125 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[1],
1127 tr->next_index = next[1];
1129 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
1131 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[2],
1133 tr->next_index = next[2];
1135 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
1137 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[3],
1139 tr->next_index = next[3];
1150 next[0] = (esp_post_data (b[0]))->next_index;
1151 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1153 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1155 tr->next_index = next[0];
1163 vlib_node_increment_counter (vm, node->node_index,
1164 ESP_ENCRYPT_ERROR_POST_RX_PKTS,
1166 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1167 return frame->n_vectors;
1170 VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
1171 vlib_node_runtime_t * node,
1172 vlib_frame_t * from_frame)
1174 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 0,
1175 esp_encrypt_async_next.esp4_post_next);
1179 VLIB_REGISTER_NODE (esp4_encrypt_node) = {
1180 .name = "esp4-encrypt",
1181 .vector_size = sizeof (u32),
1182 .format_trace = format_esp_encrypt_trace,
1183 .type = VLIB_NODE_TYPE_INTERNAL,
1185 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
1186 .error_strings = esp_encrypt_error_strings,
1188 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1189 .next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1190 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1191 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1192 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
1193 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
1194 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "error-drop",
1195 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output" },
1199 VLIB_NODE_FN (esp4_encrypt_post_node) (vlib_main_t * vm,
1200 vlib_node_runtime_t * node,
1201 vlib_frame_t * from_frame)
1203 return esp_encrypt_post_inline (vm, node, from_frame);
1207 VLIB_REGISTER_NODE (esp4_encrypt_post_node) = {
1208 .name = "esp4-encrypt-post",
1209 .vector_size = sizeof (u32),
1210 .format_trace = format_esp_post_encrypt_trace,
1211 .type = VLIB_NODE_TYPE_INTERNAL,
1212 .sibling_of = "esp4-encrypt",
1214 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1215 .error_strings = esp_encrypt_error_strings,
1219 VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
1220 vlib_node_runtime_t * node,
1221 vlib_frame_t * from_frame)
1223 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 0,
1224 esp_encrypt_async_next.esp6_post_next);
1228 VLIB_REGISTER_NODE (esp6_encrypt_node) = {
1229 .name = "esp6-encrypt",
1230 .vector_size = sizeof (u32),
1231 .format_trace = format_esp_encrypt_trace,
1232 .type = VLIB_NODE_TYPE_INTERNAL,
1233 .sibling_of = "esp4-encrypt",
1235 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1236 .error_strings = esp_encrypt_error_strings,
1240 VLIB_NODE_FN (esp6_encrypt_post_node) (vlib_main_t * vm,
1241 vlib_node_runtime_t * node,
1242 vlib_frame_t * from_frame)
1244 return esp_encrypt_post_inline (vm, node, from_frame);
1248 VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
1249 .name = "esp6-encrypt-post",
1250 .vector_size = sizeof (u32),
1251 .format_trace = format_esp_post_encrypt_trace,
1252 .type = VLIB_NODE_TYPE_INTERNAL,
1253 .sibling_of = "esp4-encrypt",
1255 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1256 .error_strings = esp_encrypt_error_strings,
1260 VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
1261 vlib_node_runtime_t * node,
1262 vlib_frame_t * from_frame)
1264 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 1,
1265 esp_encrypt_async_next.esp4_tun_post_next);
1269 VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
1270 .name = "esp4-encrypt-tun",
1271 .vector_size = sizeof (u32),
1272 .format_trace = format_esp_encrypt_trace,
1273 .type = VLIB_NODE_TYPE_INTERNAL,
1275 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1276 .error_strings = esp_encrypt_error_strings,
1278 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1280 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1281 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1282 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1283 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1284 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1285 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1286 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1290 VLIB_NODE_FN (esp4_encrypt_tun_post_node) (vlib_main_t * vm,
1291 vlib_node_runtime_t * node,
1292 vlib_frame_t * from_frame)
1294 return esp_encrypt_post_inline (vm, node, from_frame);
1298 VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = {
1299 .name = "esp4-encrypt-tun-post",
1300 .vector_size = sizeof (u32),
1301 .format_trace = format_esp_post_encrypt_trace,
1302 .type = VLIB_NODE_TYPE_INTERNAL,
1303 .sibling_of = "esp4-encrypt-tun",
1305 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1306 .error_strings = esp_encrypt_error_strings,
1310 VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
1311 vlib_node_runtime_t * node,
1312 vlib_frame_t * from_frame)
1314 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 1,
1315 esp_encrypt_async_next.esp6_tun_post_next);
1319 VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
1320 .name = "esp6-encrypt-tun",
1321 .vector_size = sizeof (u32),
1322 .format_trace = format_esp_encrypt_trace,
1323 .type = VLIB_NODE_TYPE_INTERNAL,
1325 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1326 .error_strings = esp_encrypt_error_strings,
1328 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1330 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1331 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1332 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1333 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1334 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1335 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1336 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1342 VLIB_NODE_FN (esp6_encrypt_tun_post_node) (vlib_main_t * vm,
1343 vlib_node_runtime_t * node,
1344 vlib_frame_t * from_frame)
1346 return esp_encrypt_post_inline (vm, node, from_frame);
1350 VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
1351 .name = "esp6-encrypt-tun-post",
1352 .vector_size = sizeof (u32),
1353 .format_trace = format_esp_post_encrypt_trace,
1354 .type = VLIB_NODE_TYPE_INTERNAL,
1355 .sibling_of = "esp-mpls-encrypt-tun",
1357 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
1358 .error_strings = esp_encrypt_error_strings,
1362 VLIB_NODE_FN (esp_mpls_encrypt_tun_node)
1363 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
1365 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_MPLS, 1,
1366 esp_encrypt_async_next.esp_mpls_tun_post_next);
1369 VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_node) = {
1370 .name = "esp-mpls-encrypt-tun",
1371 .vector_size = sizeof (u32),
1372 .format_trace = format_esp_encrypt_trace,
1373 .type = VLIB_NODE_TYPE_INTERNAL,
1375 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1376 .error_strings = esp_encrypt_error_strings,
1378 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1380 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1381 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1382 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1383 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1384 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1385 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1386 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1390 VLIB_NODE_FN (esp_mpls_encrypt_tun_post_node)
1391 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
1393 return esp_encrypt_post_inline (vm, node, from_frame);
1396 VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node) = {
1397 .name = "esp-mpls-encrypt-tun-post",
1398 .vector_size = sizeof (u32),
1399 .format_trace = format_esp_post_encrypt_trace,
1400 .type = VLIB_NODE_TYPE_INTERNAL,
1401 .sibling_of = "esp-mpls-encrypt-tun",
1403 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
1404 .error_strings = esp_encrypt_error_strings,
1410 } esp_no_crypto_trace_t;
1413 format_esp_no_crypto_trace (u8 * s, va_list * args)
1415 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1416 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1417 esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
1419 s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
1426 ESP_NO_CRYPTO_NEXT_DROP,
1427 ESP_NO_CRYPTO_N_NEXT,
1432 ESP_NO_CRYPTO_ERROR_RX_PKTS,
1435 static char *esp_no_crypto_error_strings[] = {
1436 "Outbound ESP packets received",
1440 esp_no_crypto_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1441 vlib_frame_t * frame)
1443 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1444 u32 *from = vlib_frame_vector_args (frame);
1445 u32 n_left = frame->n_vectors;
1447 vlib_get_buffers (vm, from, b, n_left);
1453 /* packets are always going to be dropped, but get the sa_index */
1454 sa_index0 = ipsec_tun_protect_get_sa_out
1455 (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
1457 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1459 esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0],
1461 tr->sa_index = sa_index0;
1468 vlib_node_increment_counter (vm, node->node_index,
1469 ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors);
1471 vlib_buffer_enqueue_to_single_next (vm, node, from,
1472 ESP_NO_CRYPTO_NEXT_DROP,
1475 return frame->n_vectors;
1478 VLIB_NODE_FN (esp4_no_crypto_tun_node) (vlib_main_t * vm,
1479 vlib_node_runtime_t * node,
1480 vlib_frame_t * from_frame)
1482 return esp_no_crypto_inline (vm, node, from_frame);
1486 VLIB_REGISTER_NODE (esp4_no_crypto_tun_node) =
1488 .name = "esp4-no-crypto",
1489 .vector_size = sizeof (u32),
1490 .format_trace = format_esp_no_crypto_trace,
1491 .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
1492 .error_strings = esp_no_crypto_error_strings,
1493 .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
1495 [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
1499 VLIB_NODE_FN (esp6_no_crypto_tun_node) (vlib_main_t * vm,
1500 vlib_node_runtime_t * node,
1501 vlib_frame_t * from_frame)
1503 return esp_no_crypto_inline (vm, node, from_frame);
1507 VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) =
1509 .name = "esp6-no-crypto",
1510 .vector_size = sizeof (u32),
1511 .format_trace = format_esp_no_crypto_trace,
1512 .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
1513 .error_strings = esp_no_crypto_error_strings,
1514 .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
1516 [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
1521 #ifndef CLIB_MARCH_VARIANT
1523 static clib_error_t *
1524 esp_encrypt_init (vlib_main_t *vm)
1526 ipsec_main_t *im = &ipsec_main;
1528 im->esp4_enc_fq_index =
1529 vlib_frame_queue_main_init (esp4_encrypt_node.index, 0);
1530 im->esp6_enc_fq_index =
1531 vlib_frame_queue_main_init (esp6_encrypt_node.index, 0);
1532 im->esp4_enc_tun_fq_index =
1533 vlib_frame_queue_main_init (esp4_encrypt_tun_node.index, 0);
1534 im->esp6_enc_tun_fq_index =
1535 vlib_frame_queue_main_init (esp6_encrypt_tun_node.index, 0);
1536 im->esp_mpls_enc_tun_fq_index =
1537 vlib_frame_queue_main_init (esp_mpls_encrypt_tun_node.index, 0);
1542 VLIB_INIT_FUNCTION (esp_encrypt_init);
1547 * fd.io coding-style-patch-verification: ON
1550 * eval: (c-set-style "gnu")