2 * esp_encrypt.c : IPSec ESP encrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/udp/udp.h>
23 #include <vnet/crypto/crypto.h>
25 #include <vnet/ipsec/ipsec.h>
26 #include <vnet/ipsec/ipsec_tun.h>
27 #include <vnet/ipsec/esp.h>
29 #define foreach_esp_encrypt_next \
30 _(DROP, "error-drop") \
31 _(HANDOFF, "handoff") \
32 _(INTERFACE_OUTPUT, "interface-output")
34 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
37 foreach_esp_encrypt_next
42 #define foreach_esp_encrypt_error \
43 _(RX_PKTS, "ESP pkts received") \
44 _(SEQ_CYCLED, "sequence number cycled (packet dropped)") \
45 _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
46 _(NO_BUFFERS, "no buffers (packet dropped)") \
47 _(NO_TRAILER_SPACE, "no trailer space (packet dropped)")
51 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
52 foreach_esp_encrypt_error
55 } esp_encrypt_error_t;
57 static char *esp_encrypt_error_strings[] = {
58 #define _(sym,string) string,
59 foreach_esp_encrypt_error
70 ipsec_crypto_alg_t crypto_alg;
71 ipsec_integ_alg_t integ_alg;
72 } esp_encrypt_trace_t;
74 /* packet trace format function */
76 format_esp_encrypt_trace (u8 * s, va_list * args)
78 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
79 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
80 esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
84 "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
85 t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
86 format_ipsec_crypto_alg,
87 t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
88 t->udp_encap ? " udp-encap-enabled" : "");
92 /* pad packet in input buffer */
93 static_always_inline u8 *
94 esp_add_footer_and_icv (vlib_buffer_t * b, u8 block_size, u8 icv_sz,
95 u16 * next, vlib_node_runtime_t * node,
96 u16 buffer_data_size, uword total_len)
98 static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
99 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
100 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x00, 0x00,
103 u16 min_length = total_len + sizeof (esp_footer_t);
104 u16 new_length = round_pow2 (min_length, block_size);
105 u8 pad_bytes = new_length - min_length;
106 esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (b) +
107 b->current_length + pad_bytes);
108 u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
110 if (b->current_data + tail_sz > buffer_data_size)
112 // TODO alloc new buffer
113 b->error = node->errors[ESP_ENCRYPT_ERROR_NO_TRAILER_SPACE];
114 next[0] = ESP_ENCRYPT_NEXT_DROP;
120 ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
121 pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
122 clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
125 f->pad_length = pad_bytes;
126 b->current_length += tail_sz;
127 return &f->next_header;
130 static_always_inline void
131 esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
136 len = clib_net_to_host_u16 (len);
137 old_len = ip4->length;
141 u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
143 sum = ip_csum_update (ip4->checksum, ip4->protocol,
144 prot, ip4_header_t, protocol);
145 ip4->protocol = prot;
147 sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
150 sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
153 ip4->checksum = ip_csum_fold (sum);
156 static_always_inline void
157 esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
159 clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
160 udp->length = clib_net_to_host_u16 (len);
163 static_always_inline u8
164 ext_hdr_is_pre_esp (u8 nexthdr)
166 #ifdef CLIB_HAVE_VEC128
167 static const u8x16 ext_hdr_types = {
168 IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
169 IP_PROTOCOL_IPV6_ROUTE,
170 IP_PROTOCOL_IPV6_FRAGMENTATION,
173 return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
175 return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
176 (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
177 (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
181 static_always_inline u8
182 esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
184 /* this code assumes that HbH, route and frag headers will be before
185 others, if that is not the case, they will end up encrypted */
186 u8 len = sizeof (ip6_header_t);
189 /* if next packet doesn't have ext header */
190 if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
196 p = (void *) (ip6 + 1);
197 len += ip6_ext_header_len (p);
199 while (ext_hdr_is_pre_esp (p->next_hdr))
201 len += ip6_ext_header_len (p);
202 p = ip6_ext_next_header (p);
209 static_always_inline void
210 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
211 vnet_crypto_op_t * ops, vlib_buffer_t * b[],
212 u16 * nexts, vnet_crypto_op_chunk_t * chunks)
214 u32 n_fail, n_ops = vec_len (ops);
215 vnet_crypto_op_t *op = ops;
220 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
224 ASSERT (op - ops < n_ops);
226 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
228 u32 bi = op->user_data;
229 b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
230 nexts[bi] = ESP_ENCRYPT_NEXT_DROP;
237 static_always_inline void
238 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
239 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
241 u32 n_fail, n_ops = vec_len (ops);
242 vnet_crypto_op_t *op = ops;
247 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
251 ASSERT (op - ops < n_ops);
253 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
255 u32 bi = op->user_data;
256 b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
257 nexts[bi] = ESP_ENCRYPT_NEXT_DROP;
268 } __clib_packed esp_gcm_nonce_t;
270 STATIC_ASSERT_SIZEOF (esp_gcm_nonce_t, 12);
273 esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
274 vlib_frame_t * frame, int is_ip6, int is_tun)
276 ipsec_main_t *im = &ipsec_main;
277 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
278 u32 *from = vlib_frame_vector_args (frame);
279 u32 n_left = frame->n_vectors;
280 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
281 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
282 esp_gcm_nonce_t nonces[VLIB_FRAME_SIZE], *nonce = nonces;
283 u32 thread_index = vm->thread_index;
284 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
285 u32 current_sa_index = ~0, current_sa_packets = 0;
286 u32 current_sa_bytes = 0, spi = 0;
287 u8 block_sz = 0, iv_sz = 0, icv_sz = 0;
289 vnet_crypto_op_chunk_t *ch;
291 vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
292 vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
294 vlib_get_buffers (vm, from, b, n_left);
295 vec_reset_length (ptd->crypto_ops);
296 vec_reset_length (ptd->integ_ops);
297 vec_reset_length (ptd->chained_crypto_ops);
298 vec_reset_length (ptd->chained_integ_ops);
299 vec_reset_length (ptd->chunks);
306 u8 *payload, *next_hdr_ptr;
307 u16 payload_len, payload_len_total, n_bufs;
308 u32 hdr_len, config_index;
313 vlib_prefetch_buffer_header (b[2], LOAD);
314 p = vlib_buffer_get_current (b[1]);
315 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
316 p -= CLIB_CACHE_LINE_BYTES;
317 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
322 /* we are on a ipsec tunnel's feature arc */
323 config_index = b[0]->current_config_index;
324 vnet_feature_next_u16 (&next[0], b[0]);
325 vnet_buffer (b[0])->ipsec.sad_index =
326 sa_index0 = ipsec_tun_protect_get_sa_out
327 (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
330 sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
332 if (sa_index0 != current_sa_index)
334 if (current_sa_packets)
335 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
339 current_sa_packets = current_sa_bytes = 0;
341 sa0 = pool_elt_at_index (im->sad, sa_index0);
342 current_sa_index = sa_index0;
343 spi = clib_net_to_host_u32 (sa0->spi);
344 block_sz = sa0->crypto_block_size;
345 icv_sz = sa0->integ_icv_size;
346 iv_sz = sa0->crypto_iv_size;
349 if (PREDICT_FALSE (~0 == sa0->encrypt_thread_index))
351 /* this is the first packet to use this SA, claim the SA
352 * for this thread. this could happen simultaneously on
354 clib_atomic_cmp_and_swap (&sa0->encrypt_thread_index, ~0,
355 ipsec_sa_assign_thread (thread_index));
358 if (PREDICT_TRUE (thread_index != sa0->encrypt_thread_index))
360 next[0] = ESP_ENCRYPT_NEXT_HANDOFF;
363 b[0]->current_config_index = config_index;
369 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
372 b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
373 next[0] = ESP_ENCRYPT_NEXT_DROP;
379 crypto_ops = &ptd->chained_crypto_ops;
380 integ_ops = &ptd->chained_integ_ops;
382 /* find last buffer in the chain */
383 while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
384 lb = vlib_get_buffer (vm, lb->next_buffer);
388 crypto_ops = &ptd->crypto_ops;
389 integ_ops = &ptd->integ_ops;
392 if (PREDICT_FALSE (esp_seq_advance (sa0)))
394 b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
395 next[0] = ESP_ENCRYPT_NEXT_DROP;
402 if (ipsec_sa_is_set_IS_TUNNEL (sa0))
404 payload = vlib_buffer_get_current (b[0]);
405 next_hdr_ptr = esp_add_footer_and_icv (lb, block_sz, icv_sz,
408 vlib_buffer_length_in_chain
412 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
413 payload_len = b[0]->current_length;
414 payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
417 hdr_len += sizeof (*esp);
418 esp = (esp_header_t *) (payload - hdr_len);
420 /* optional UDP header */
421 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
423 hdr_len += sizeof (udp_header_t);
424 esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
425 payload_len_total + hdr_len);
429 if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
432 u16 len = sizeof (ip6_header_t);
434 ip6 = (ip6_header_t *) (payload - hdr_len);
435 clib_memcpy_fast (ip6, &sa0->ip6_hdr, len);
436 *next_hdr_ptr = (is_ip6 ?
437 IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
438 len = payload_len_total + hdr_len - len;
439 ip6->payload_length = clib_net_to_host_u16 (len);
444 u16 len = sizeof (ip4_header_t);
446 ip4 = (ip4_header_t *) (payload - hdr_len);
447 clib_memcpy_fast (ip4, &sa0->ip4_hdr, len);
448 *next_hdr_ptr = (is_ip6 ?
449 IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
450 len = payload_len_total + hdr_len;
451 esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
457 next[0] = dpo->dpoi_next_node;
458 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
461 else /* transport mode */
463 u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
464 ip6_ext_header_t *ext_hdr;
465 udp_header_t *udp = 0;
466 u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
469 esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
470 ip4_header_bytes ((ip4_header_t *) old_ip_hdr);
472 vlib_buffer_advance (b[0], ip_len);
473 payload = vlib_buffer_get_current (b[0]);
474 next_hdr_ptr = esp_add_footer_and_icv (lb, block_sz, icv_sz,
477 vlib_buffer_length_in_chain
482 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
483 payload_len = b[0]->current_length;
484 payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
487 hdr_len += sizeof (*esp);
488 esp = (esp_header_t *) (payload - hdr_len);
490 /* optional UDP header */
491 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
493 hdr_len += sizeof (udp_header_t);
494 udp = (udp_header_t *) (payload - hdr_len);
499 ip_hdr = payload - hdr_len;
504 l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
506 l2_hdr = payload - hdr_len;
508 /* copy l2 and ip header */
509 clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
516 ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
517 if (PREDICT_TRUE (NULL == ext_hdr))
519 *next_hdr_ptr = ip6->protocol;
520 ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
524 *next_hdr_ptr = ext_hdr->next_hdr;
525 ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP;
527 ip6->payload_length =
528 clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
529 sizeof (ip6_header_t));
534 ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
535 *next_hdr_ptr = ip4->protocol;
536 len = payload_len_total + hdr_len - l2_len;
539 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
540 esp_fill_udp_hdr (sa0, udp, len - ip_len);
543 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
546 clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
549 next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
553 esp->seq = clib_net_to_host_u32 (sa0->seq);
555 if (sa0->crypto_enc_op_id)
557 vnet_crypto_op_t *op;
558 vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
559 vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
561 op->src = op->dst = payload;
562 op->key_index = sa0->crypto_key_index;
563 op->len = payload_len - icv_sz;
564 op->user_data = b - bufs;
566 if (ipsec_sa_is_set_IS_AEAD (sa0))
569 * construct the AAD in a scratch space in front
572 op->aad = payload - hdr_len - sizeof (esp_aead_t);
574 esp_aad_fill (op, esp, sa0);
576 op->tag = payload + op->len;
579 u64 *iv = (u64 *) (payload - iv_sz);
580 nonce->salt = sa0->salt;
581 nonce->iv = *iv = clib_host_to_net_u64 (sa0->gcm_iv_counter++);
582 op->iv = (u8 *) nonce;
587 op->iv = payload - iv_sz;
588 op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
594 vlib_buffer_t *cb = b[0];
595 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
596 op->chunk_index = vec_len (ptd->chunks);
597 op->tag = vlib_buffer_get_tail (lb) - icv_sz;
598 vec_add2 (ptd->chunks, ch, 1);
599 ch->len = payload_len;
600 ch->src = ch->dst = payload;
601 cb = vlib_get_buffer (vm, cb->next_buffer);
606 vec_add2 (ptd->chunks, ch, 1);
609 ch->len = cb->current_length - icv_sz;
611 ch->len = cb->current_length;
612 ch->src = ch->dst = vlib_buffer_get_current (cb);
614 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
617 cb = vlib_get_buffer (vm, cb->next_buffer);
622 if (sa0->integ_op_id)
624 vnet_crypto_op_t *op;
625 vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
626 vnet_crypto_op_init (op, sa0->integ_op_id);
627 op->src = payload - iv_sz - sizeof (esp_header_t);
628 op->digest = payload + payload_len - icv_sz;
629 op->key_index = sa0->integ_key_index;
630 op->digest_len = icv_sz;
631 op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
632 op->user_data = b - bufs;
637 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
638 vlib_buffer_t *cb = b[0];
639 op->chunk_index = vec_len (ptd->chunks);
640 op->digest = vlib_buffer_get_tail (lb) - icv_sz;
641 vec_add2 (ptd->chunks, ch, 1);
642 ch->len = payload_len + iv_sz + sizeof (esp_header_t);
643 ch->src = payload - iv_sz - sizeof (esp_header_t);
644 cb = vlib_get_buffer (vm, cb->next_buffer);
649 vec_add2 (ptd->chunks, ch, 1);
653 ch->len = cb->current_length - icv_sz;
654 if (ipsec_sa_is_set_USE_ESN (sa0))
656 u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
657 clib_memcpy_fast (op->digest, &seq_hi,
659 ch->len += sizeof (seq_hi);
663 ch->len = cb->current_length;
664 ch->src = vlib_buffer_get_current (cb);
666 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
669 cb = vlib_get_buffer (vm, cb->next_buffer);
672 else if (ipsec_sa_is_set_USE_ESN (sa0))
674 u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
675 clib_memcpy_fast (op->digest, &seq_hi, sizeof (seq_hi));
676 op->len += sizeof (seq_hi);
680 vlib_buffer_advance (b[0], 0LL - hdr_len);
682 current_sa_packets += 1;
683 current_sa_bytes += payload_len_total;
686 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
688 esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
690 tr->sa_index = sa_index0;
693 tr->sa_seq_hi = sa0->seq_hi;
694 tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
695 tr->crypto_alg = sa0->crypto_alg;
696 tr->integ_alg = sa0->integ_alg;
704 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
705 current_sa_index, current_sa_packets,
708 esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts);
709 esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
712 esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts);
713 esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
716 vlib_node_increment_counter (vm, node->node_index,
717 ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
719 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
720 return frame->n_vectors;
723 VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
724 vlib_node_runtime_t * node,
725 vlib_frame_t * from_frame)
727 return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 0);
731 VLIB_REGISTER_NODE (esp4_encrypt_node) = {
732 .name = "esp4-encrypt",
733 .vector_size = sizeof (u32),
734 .format_trace = format_esp_encrypt_trace,
735 .type = VLIB_NODE_TYPE_INTERNAL,
737 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
738 .error_strings = esp_encrypt_error_strings,
740 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
742 [ESP_ENCRYPT_NEXT_DROP] = "ip4-drop",
743 [ESP_ENCRYPT_NEXT_HANDOFF] = "esp4-encrypt-handoff",
744 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
749 VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
750 vlib_node_runtime_t * node,
751 vlib_frame_t * from_frame)
753 return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 0);
757 VLIB_REGISTER_NODE (esp6_encrypt_node) = {
758 .name = "esp6-encrypt",
759 .vector_size = sizeof (u32),
760 .format_trace = format_esp_encrypt_trace,
761 .type = VLIB_NODE_TYPE_INTERNAL,
763 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
764 .error_strings = esp_encrypt_error_strings,
766 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
768 [ESP_ENCRYPT_NEXT_DROP] = "ip6-drop",
769 [ESP_ENCRYPT_NEXT_HANDOFF] = "esp6-encrypt-handoff",
770 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
775 VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
776 vlib_node_runtime_t * node,
777 vlib_frame_t * from_frame)
779 return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 1);
783 VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
784 .name = "esp4-encrypt-tun",
785 .vector_size = sizeof (u32),
786 .format_trace = format_esp_encrypt_trace,
787 .type = VLIB_NODE_TYPE_INTERNAL,
789 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
790 .error_strings = esp_encrypt_error_strings,
792 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
794 [ESP_ENCRYPT_NEXT_DROP] = "ip4-drop",
795 [ESP_ENCRYPT_NEXT_HANDOFF] = "esp4-encrypt-tun-handoff",
796 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "error-drop",
800 VNET_FEATURE_INIT (esp4_encrypt_tun_feat_node, static) =
802 .arc_name = "ip4-output",
803 .node_name = "esp4-encrypt-tun",
804 .runs_before = VNET_FEATURES ("adj-midchain-tx"),
807 VNET_FEATURE_INIT (esp6o4_encrypt_tun_feat_node, static) =
809 .arc_name = "ip6-output",
810 .node_name = "esp4-encrypt-tun",
811 .runs_before = VNET_FEATURES ("adj-midchain-tx"),
814 VNET_FEATURE_INIT (esp4_ethernet_encrypt_tun_feat_node, static) =
816 .arc_name = "ethernet-output",
817 .node_name = "esp4-encrypt-tun",
818 .runs_before = VNET_FEATURES ("adj-midchain-tx", "adj-midchain-tx-no-count"),
822 VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
823 vlib_node_runtime_t * node,
824 vlib_frame_t * from_frame)
826 return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 1);
830 VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
831 .name = "esp6-encrypt-tun",
832 .vector_size = sizeof (u32),
833 .format_trace = format_esp_encrypt_trace,
834 .type = VLIB_NODE_TYPE_INTERNAL,
836 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
837 .error_strings = esp_encrypt_error_strings,
839 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
841 [ESP_ENCRYPT_NEXT_DROP] = "ip6-drop",
842 [ESP_ENCRYPT_NEXT_HANDOFF] = "esp6-encrypt-tun-handoff",
843 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "error-drop",
847 VNET_FEATURE_INIT (esp6_encrypt_tun_feat_node, static) =
849 .arc_name = "ip6-output",
850 .node_name = "esp6-encrypt-tun",
851 .runs_before = VNET_FEATURES ("adj-midchain-tx"),
854 VNET_FEATURE_INIT (esp4o6_encrypt_tun_feat_node, static) =
856 .arc_name = "ip4-output",
857 .node_name = "esp6-encrypt-tun",
858 .runs_before = VNET_FEATURES ("adj-midchain-tx"),
866 } esp_no_crypto_trace_t;
869 format_esp_no_crypto_trace (u8 * s, va_list * args)
871 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
872 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
873 esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
875 s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
882 ESP_NO_CRYPTO_NEXT_DROP,
883 ESP_NO_CRYPTO_N_NEXT,
888 ESP_NO_CRYPTO_ERROR_RX_PKTS,
891 static char *esp_no_crypto_error_strings[] = {
892 "Outbound ESP packets received",
896 esp_no_crypto_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
897 vlib_frame_t * frame)
899 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
900 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
901 u32 *from = vlib_frame_vector_args (frame);
902 u32 n_left = frame->n_vectors;
904 vlib_get_buffers (vm, from, b, n_left);
911 /* packets are always going to be dropped, but get the sa_index */
912 sa_index0 = *(u32 *) vnet_feature_next_with_data (&next0, b[0],
915 next[0] = ESP_NO_CRYPTO_NEXT_DROP;
917 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
919 esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0],
921 tr->sa_index = sa_index0;
929 vlib_node_increment_counter (vm, node->node_index,
930 ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors);
932 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
934 return frame->n_vectors;
937 VLIB_NODE_FN (esp4_no_crypto_tun_node) (vlib_main_t * vm,
938 vlib_node_runtime_t * node,
939 vlib_frame_t * from_frame)
941 return esp_no_crypto_inline (vm, node, from_frame);
945 VLIB_REGISTER_NODE (esp4_no_crypto_tun_node) =
947 .name = "esp4-no-crypto",
948 .vector_size = sizeof (u32),
949 .format_trace = format_esp_no_crypto_trace,
950 .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
951 .error_strings = esp_no_crypto_error_strings,
952 .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
954 [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
958 VNET_FEATURE_INIT (esp4_no_crypto_tun_feat_node, static) =
960 .arc_name = "ip4-output",
961 .node_name = "esp4-no-crypto",
962 .runs_before = VNET_FEATURES ("adj-midchain-tx"),
965 VLIB_NODE_FN (esp6_no_crypto_tun_node) (vlib_main_t * vm,
966 vlib_node_runtime_t * node,
967 vlib_frame_t * from_frame)
969 return esp_no_crypto_inline (vm, node, from_frame);
973 VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) =
975 .name = "esp6-no-crypto",
976 .vector_size = sizeof (u32),
977 .format_trace = format_esp_no_crypto_trace,
978 .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
979 .error_strings = esp_no_crypto_error_strings,
980 .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
982 [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
986 VNET_FEATURE_INIT (esp6_no_crypto_tun_feat_node, static) =
988 .arc_name = "ip6-output",
989 .node_name = "esp6-no-crypto",
990 .runs_before = VNET_FEATURES ("adj-midchain-tx"),
995 * fd.io coding-style-patch-verification: ON
998 * eval: (c-set-style "gnu")