2 * esp_encrypt.c : IPSec ESP encrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/udp/udp.h>
23 #include <vnet/crypto/crypto.h>
25 #include <vnet/ipsec/ipsec.h>
26 #include <vnet/ipsec/esp.h>
28 #define foreach_esp_encrypt_next \
29 _(DROP, "error-drop") \
30 _(HANDOFF, "handoff") \
31 _(INTERFACE_OUTPUT, "interface-output")
33 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
36 foreach_esp_encrypt_next
41 #define foreach_esp_encrypt_error \
42 _(RX_PKTS, "ESP pkts received") \
43 _(SEQ_CYCLED, "sequence number cycled (packet dropped)") \
44 _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
45 _(CHAINED_BUFFER, "chained buffers (packet dropped)") \
46 _(NO_TRAILER_SPACE, "no trailer space (packet dropped)")
50 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
51 foreach_esp_encrypt_error
54 } esp_encrypt_error_t;
56 static char *esp_encrypt_error_strings[] = {
57 #define _(sym,string) string,
58 foreach_esp_encrypt_error
69 ipsec_crypto_alg_t crypto_alg;
70 ipsec_integ_alg_t integ_alg;
71 } esp_encrypt_trace_t;
73 /* packet trace format function */
75 format_esp_encrypt_trace (u8 * s, va_list * args)
77 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
78 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
79 esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
83 "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
84 t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
85 format_ipsec_crypto_alg,
86 t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
87 t->udp_encap ? " udp-encap-enabled" : "");
91 /* pad packet in input buffer */
92 static_always_inline u8 *
93 esp_add_footer_and_icv (vlib_buffer_t * b, u8 block_size, u8 icv_sz,
94 u16 * next, vlib_node_runtime_t * node,
97 static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
98 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
99 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x00, 0x00,
102 u16 min_length = b->current_length + sizeof (esp_footer_t);
103 u16 new_length = round_pow2 (min_length, block_size);
104 u8 pad_bytes = new_length - min_length;
105 esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (b) +
106 new_length - sizeof (esp_footer_t));
108 if (b->current_data + new_length + icv_sz > buffer_data_size)
110 b->error = node->errors[ESP_ENCRYPT_ERROR_NO_TRAILER_SPACE];
111 next[0] = ESP_ENCRYPT_NEXT_DROP;
117 ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
118 pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
119 clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
122 f->pad_length = pad_bytes;
123 b->current_length = new_length + icv_sz;
124 return &f->next_header;
127 static_always_inline void
128 esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
133 len = clib_net_to_host_u16 (len);
134 old_len = ip4->length;
138 u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
140 sum = ip_csum_update (ip4->checksum, ip4->protocol,
141 prot, ip4_header_t, protocol);
142 ip4->protocol = prot;
144 sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
147 sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
150 ip4->checksum = ip_csum_fold (sum);
153 static_always_inline void
154 esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
156 clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
157 udp->length = clib_net_to_host_u16 (len);
160 static_always_inline u8
161 ext_hdr_is_pre_esp (u8 nexthdr)
163 #ifdef CLIB_HAVE_VEC128
164 static const u8x16 ext_hdr_types = {
165 IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
166 IP_PROTOCOL_IPV6_ROUTE,
167 IP_PROTOCOL_IPV6_FRAGMENTATION,
170 return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
172 return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
173 (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
174 (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
178 static_always_inline u8
179 esp_get_ip6_hdr_len (ip6_header_t * ip6)
181 /* this code assumes that HbH, route and frag headers will be before
182 others, if that is not the case, they will end up encrypted */
184 u8 len = sizeof (ip6_header_t);
187 /* if next packet doesn't have ext header */
188 if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
191 p = (void *) (ip6 + 1);
192 len += ip6_ext_header_len (p);
194 while (ext_hdr_is_pre_esp (p->next_hdr))
196 len += ip6_ext_header_len (p);
197 p = ip6_ext_next_header (p);
203 static_always_inline void
204 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
205 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
207 u32 n_fail, n_ops = vec_len (ops);
208 vnet_crypto_op_t *op = ops;
213 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
217 ASSERT (op - ops < n_ops);
219 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
221 u32 bi = op->user_data;
222 b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
223 nexts[bi] = ESP_ENCRYPT_NEXT_DROP;
234 } __clib_packed esp_gcm_nonce_t;
236 STATIC_ASSERT_SIZEOF (esp_gcm_nonce_t, 12);
239 esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
240 vlib_frame_t * frame, int is_ip6, int is_tun)
242 ipsec_main_t *im = &ipsec_main;
243 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
244 u32 *from = vlib_frame_vector_args (frame);
245 u32 n_left = frame->n_vectors;
246 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
247 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
248 esp_gcm_nonce_t nonces[VLIB_FRAME_SIZE], *nonce = nonces;
249 u32 thread_index = vm->thread_index;
250 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
251 u32 current_sa_index = ~0, current_sa_packets = 0;
252 u32 current_sa_bytes = 0, spi = 0;
253 u8 block_sz = 0, iv_sz = 0, icv_sz = 0;
256 vlib_get_buffers (vm, from, b, n_left);
257 vec_reset_length (ptd->crypto_ops);
258 vec_reset_length (ptd->integ_ops);
265 u8 *payload, *next_hdr_ptr;
267 u32 hdr_len, config_index;
272 vlib_prefetch_buffer_header (b[2], LOAD);
273 p = vlib_buffer_get_current (b[1]);
274 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
275 p -= CLIB_CACHE_LINE_BYTES;
276 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
281 /* we are on a ipsec tunnel's feature arc */
283 config_index = b[0]->current_config_index;
284 sa_index0 = *(u32 *) vnet_feature_next_with_data (&next0, b[0],
287 vnet_buffer (b[0])->ipsec.sad_index = sa_index0;
291 sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
293 if (sa_index0 != current_sa_index)
295 if (current_sa_packets)
296 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
300 current_sa_packets = current_sa_bytes = 0;
302 sa0 = pool_elt_at_index (im->sad, sa_index0);
303 current_sa_index = sa_index0;
304 spi = clib_net_to_host_u32 (sa0->spi);
305 block_sz = sa0->crypto_block_size;
306 icv_sz = sa0->integ_icv_size;
307 iv_sz = sa0->crypto_iv_size;
310 if (PREDICT_FALSE (~0 == sa0->encrypt_thread_index))
312 /* this is the first packet to use this SA, claim the SA
313 * for this thread. this could happen simultaneously on
315 clib_atomic_cmp_and_swap (&sa0->encrypt_thread_index, ~0,
316 ipsec_sa_assign_thread (thread_index));
319 if (PREDICT_TRUE (thread_index != sa0->encrypt_thread_index))
321 next[0] = ESP_ENCRYPT_NEXT_HANDOFF;
324 b[0]->current_config_index = config_index;
329 if (vlib_buffer_chain_linearize (vm, b[0]) != 1)
331 b[0]->error = node->errors[ESP_ENCRYPT_ERROR_CHAINED_BUFFER];
332 next[0] = ESP_ENCRYPT_NEXT_DROP;
336 if (PREDICT_FALSE (esp_seq_advance (sa0)))
338 b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
339 next[0] = ESP_ENCRYPT_NEXT_DROP;
346 if (ipsec_sa_is_set_IS_TUNNEL (sa0))
348 payload = vlib_buffer_get_current (b[0]);
349 next_hdr_ptr = esp_add_footer_and_icv (b[0], block_sz, icv_sz,
354 payload_len = b[0]->current_length;
357 hdr_len += sizeof (*esp);
358 esp = (esp_header_t *) (payload - hdr_len);
360 /* optional UDP header */
361 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
363 hdr_len += sizeof (udp_header_t);
364 esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
365 payload_len + hdr_len);
369 if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
372 u16 len = sizeof (ip6_header_t);
374 ip6 = (ip6_header_t *) (payload - hdr_len);
375 clib_memcpy_fast (ip6, &sa0->ip6_hdr, len);
376 *next_hdr_ptr = (is_ip6 ?
377 IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
378 len = payload_len + hdr_len - len;
379 ip6->payload_length = clib_net_to_host_u16 (len);
384 u16 len = sizeof (ip4_header_t);
386 ip4 = (ip4_header_t *) (payload - hdr_len);
387 clib_memcpy_fast (ip4, &sa0->ip4_hdr, len);
388 *next_hdr_ptr = (is_ip6 ?
389 IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
390 len = payload_len + hdr_len;
391 esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
397 next[0] = dpo->dpoi_next_node;
398 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
401 else /* transport mode */
403 u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
404 udp_header_t *udp = 0;
405 u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
408 esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr) :
409 ip4_header_bytes ((ip4_header_t *) old_ip_hdr);
411 vlib_buffer_advance (b[0], ip_len);
412 payload = vlib_buffer_get_current (b[0]);
413 next_hdr_ptr = esp_add_footer_and_icv (b[0], block_sz, icv_sz,
418 payload_len = b[0]->current_length;
421 hdr_len += sizeof (*esp);
422 esp = (esp_header_t *) (payload - hdr_len);
424 /* optional UDP header */
425 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
427 hdr_len += sizeof (udp_header_t);
428 udp = (udp_header_t *) (payload - hdr_len);
433 ip_hdr = payload - hdr_len;
438 l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
440 l2_hdr = payload - hdr_len;
442 /* copy l2 and ip header */
443 clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
448 clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
452 ip6_header_t *ip6 = (ip6_header_t *) (ip_hdr);
453 *next_hdr_ptr = ip6->protocol;
454 ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
455 ip6->payload_length =
456 clib_host_to_net_u16 (payload_len + hdr_len - l2_len -
462 ip4_header_t *ip4 = (ip4_header_t *) (ip_hdr);
463 *next_hdr_ptr = ip4->protocol;
464 len = payload_len + hdr_len - l2_len;
467 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
468 esp_fill_udp_hdr (sa0, udp, len - ip_len);
471 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
475 next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
479 esp->seq = clib_net_to_host_u32 (sa0->seq);
481 if (sa0->crypto_enc_op_id)
483 vnet_crypto_op_t *op;
484 vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES);
485 vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
486 op->src = op->dst = payload;
487 op->key_index = sa0->crypto_key_index;
488 op->len = payload_len - icv_sz;
489 op->user_data = b - bufs;
491 if (ipsec_sa_is_set_IS_AEAD (sa0))
494 * construct the AAD in a scratch space in front
497 op->aad = payload - hdr_len - sizeof (esp_aead_t);
499 esp_aad_fill (op, esp, sa0);
501 op->tag = payload + op->len;
504 u64 *iv = (u64 *) (payload - iv_sz);
505 nonce->salt = sa0->salt;
506 nonce->iv = *iv = clib_host_to_net_u64 (sa0->gcm_iv_counter++);
507 op->iv = (u8 *) nonce;
512 op->iv = payload - iv_sz;
513 op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
517 if (sa0->integ_op_id)
519 vnet_crypto_op_t *op;
520 vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
521 vnet_crypto_op_init (op, sa0->integ_op_id);
522 op->src = payload - iv_sz - sizeof (esp_header_t);
523 op->digest = payload + payload_len - icv_sz;
524 op->key_index = sa0->integ_key_index;
525 op->digest_len = icv_sz;
526 op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
527 op->user_data = b - bufs;
528 if (ipsec_sa_is_set_USE_ESN (sa0))
530 u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
531 clib_memcpy_fast (op->digest, &seq_hi, sizeof (seq_hi));
532 op->len += sizeof (seq_hi);
536 vlib_buffer_advance (b[0], 0LL - hdr_len);
538 current_sa_packets += 1;
539 current_sa_bytes += payload_len;
542 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
544 esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
546 tr->sa_index = sa_index0;
549 tr->sa_seq_hi = sa0->seq_hi;
550 tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
551 tr->crypto_alg = sa0->crypto_alg;
552 tr->integ_alg = sa0->integ_alg;
560 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
561 current_sa_index, current_sa_packets,
563 esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts);
564 esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts);
566 vlib_node_increment_counter (vm, node->node_index,
567 ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
569 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
570 return frame->n_vectors;
573 VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
574 vlib_node_runtime_t * node,
575 vlib_frame_t * from_frame)
577 return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 0);
581 VLIB_REGISTER_NODE (esp4_encrypt_node) = {
582 .name = "esp4-encrypt",
583 .vector_size = sizeof (u32),
584 .format_trace = format_esp_encrypt_trace,
585 .type = VLIB_NODE_TYPE_INTERNAL,
587 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
588 .error_strings = esp_encrypt_error_strings,
590 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
592 [ESP_ENCRYPT_NEXT_DROP] = "ip4-drop",
593 [ESP_ENCRYPT_NEXT_HANDOFF] = "esp4-encrypt-handoff",
594 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
599 VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
600 vlib_node_runtime_t * node,
601 vlib_frame_t * from_frame)
603 return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 0);
607 VLIB_REGISTER_NODE (esp6_encrypt_node) = {
608 .name = "esp6-encrypt",
609 .vector_size = sizeof (u32),
610 .format_trace = format_esp_encrypt_trace,
611 .type = VLIB_NODE_TYPE_INTERNAL,
613 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
614 .error_strings = esp_encrypt_error_strings,
616 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
618 [ESP_ENCRYPT_NEXT_DROP] = "ip6-drop",
619 [ESP_ENCRYPT_NEXT_HANDOFF] = "esp6-encrypt-handoff",
620 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
625 VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
626 vlib_node_runtime_t * node,
627 vlib_frame_t * from_frame)
629 return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 1);
633 VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
634 .name = "esp4-encrypt-tun",
635 .vector_size = sizeof (u32),
636 .format_trace = format_esp_encrypt_trace,
637 .type = VLIB_NODE_TYPE_INTERNAL,
639 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
640 .error_strings = esp_encrypt_error_strings,
642 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
644 [ESP_ENCRYPT_NEXT_DROP] = "ip4-drop",
645 [ESP_ENCRYPT_NEXT_HANDOFF] = "esp4-encrypt-handoff",
646 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "error-drop",
650 VNET_FEATURE_INIT (esp4_encrypt_tun_feat_node, static) =
652 .arc_name = "ip4-output",
653 .node_name = "esp4-encrypt-tun",
654 .runs_before = VNET_FEATURES ("adj-midchain-tx"),
657 VNET_FEATURE_INIT (esp6o4_encrypt_tun_feat_node, static) =
659 .arc_name = "ip6-output",
660 .node_name = "esp4-encrypt-tun",
661 .runs_before = VNET_FEATURES ("adj-midchain-tx"),
664 VNET_FEATURE_INIT (esp4_ethernet_encrypt_tun_feat_node, static) =
666 .arc_name = "ethernet-output",
667 .node_name = "esp4-encrypt-tun",
668 .runs_before = VNET_FEATURES ("adj-midchain-tx", "adj-midchain-tx-no-count"),
672 VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
673 vlib_node_runtime_t * node,
674 vlib_frame_t * from_frame)
676 return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 1);
680 VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
681 .name = "esp6-encrypt-tun",
682 .vector_size = sizeof (u32),
683 .format_trace = format_esp_encrypt_trace,
684 .type = VLIB_NODE_TYPE_INTERNAL,
686 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
687 .error_strings = esp_encrypt_error_strings,
689 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
691 [ESP_ENCRYPT_NEXT_DROP] = "ip6-drop",
692 [ESP_ENCRYPT_NEXT_HANDOFF] = "esp6-encrypt-handoff",
693 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "error-drop",
697 VNET_FEATURE_INIT (esp6_encrypt_tun_feat_node, static) =
699 .arc_name = "ip6-output",
700 .node_name = "esp6-encrypt-tun",
701 .runs_before = VNET_FEATURES ("adj-midchain-tx"),
704 VNET_FEATURE_INIT (esp4o6_encrypt_tun_feat_node, static) =
706 .arc_name = "ip4-output",
707 .node_name = "esp6-encrypt-tun",
708 .runs_before = VNET_FEATURES ("adj-midchain-tx"),
716 } esp_no_crypto_trace_t;
719 format_esp_no_crypto_trace (u8 * s, va_list * args)
721 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
722 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
723 esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
725 s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
732 ESP_NO_CRYPTO_NEXT_DROP,
733 ESP_NO_CRYPTO_N_NEXT,
738 ESP_NO_CRYPTO_ERROR_RX_PKTS,
741 static char *esp_no_crypto_error_strings[] = {
742 "Outbound ESP packets received",
746 esp_no_crypto_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
747 vlib_frame_t * frame)
749 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
750 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
751 u32 *from = vlib_frame_vector_args (frame);
752 u32 n_left = frame->n_vectors;
754 vlib_get_buffers (vm, from, b, n_left);
761 /* packets are always going to be dropped, but get the sa_index */
762 sa_index0 = *(u32 *) vnet_feature_next_with_data (&next0, b[0],
765 next[0] = ESP_NO_CRYPTO_NEXT_DROP;
767 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
769 esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0],
771 tr->sa_index = sa_index0;
779 vlib_node_increment_counter (vm, node->node_index,
780 ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors);
782 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
784 return frame->n_vectors;
787 VLIB_NODE_FN (esp4_no_crypto_tun_node) (vlib_main_t * vm,
788 vlib_node_runtime_t * node,
789 vlib_frame_t * from_frame)
791 return esp_no_crypto_inline (vm, node, from_frame);
795 VLIB_REGISTER_NODE (esp4_no_crypto_tun_node) =
797 .name = "esp4-no-crypto",
798 .vector_size = sizeof (u32),
799 .format_trace = format_esp_no_crypto_trace,
800 .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
801 .error_strings = esp_no_crypto_error_strings,
802 .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
804 [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
808 VNET_FEATURE_INIT (esp4_no_crypto_tun_feat_node, static) =
810 .arc_name = "ip4-output",
811 .node_name = "esp4-no-crypto",
812 .runs_before = VNET_FEATURES ("adj-midchain-tx"),
815 VLIB_NODE_FN (esp6_no_crypto_tun_node) (vlib_main_t * vm,
816 vlib_node_runtime_t * node,
817 vlib_frame_t * from_frame)
819 return esp_no_crypto_inline (vm, node, from_frame);
823 VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) =
825 .name = "esp6-no-crypto",
826 .vector_size = sizeof (u32),
827 .format_trace = format_esp_no_crypto_trace,
828 .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
829 .error_strings = esp_no_crypto_error_strings,
830 .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
832 [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
836 VNET_FEATURE_INIT (esp6_no_crypto_tun_feat_node, static) =
838 .arc_name = "ip6-output",
839 .node_name = "esp6-no-crypto",
840 .runs_before = VNET_FEATURES ("adj-midchain-tx"),
845 * fd.io coding-style-patch-verification: ON
848 * eval: (c-set-style "gnu")