2 * esp_encrypt.c : IPSec ESP encrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/crypto/crypto.h>
24 #include <vnet/ipsec/ipsec.h>
25 #include <vnet/ipsec/ipsec_tun.h>
26 #include <vnet/ipsec/esp.h>
27 #include <vnet/tunnel/tunnel_dp.h>
29 #define foreach_esp_encrypt_next \
30 _ (DROP4, "ip4-drop") \
31 _ (DROP6, "ip6-drop") \
32 _ (DROP_MPLS, "mpls-drop") \
33 _ (HANDOFF4, "handoff4") \
34 _ (HANDOFF6, "handoff6") \
35 _ (HANDOFF_MPLS, "handoff-mpls") \
36 _ (INTERFACE_OUTPUT, "interface-output")
38 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
41 foreach_esp_encrypt_next
46 #define foreach_esp_encrypt_error \
47 _(RX_PKTS, "ESP pkts received") \
48 _(POST_RX_PKTS, "ESP-post pkts received") \
49 _(SEQ_CYCLED, "sequence number cycled (packet dropped)") \
50 _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
51 _(CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)") \
52 _(NO_BUFFERS, "no buffers (packet dropped)") \
56 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
57 foreach_esp_encrypt_error
60 } esp_encrypt_error_t;
62 static char *esp_encrypt_error_strings[] = {
63 #define _(sym,string) string,
64 foreach_esp_encrypt_error
75 ipsec_crypto_alg_t crypto_alg;
76 ipsec_integ_alg_t integ_alg;
77 } esp_encrypt_trace_t;
82 } esp_encrypt_post_trace_t;
84 /* packet trace format function */
86 format_esp_encrypt_trace (u8 * s, va_list * args)
88 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
89 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
90 esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
94 "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
95 t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
96 format_ipsec_crypto_alg,
97 t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
98 t->udp_encap ? " udp-encap-enabled" : "");
103 format_esp_post_encrypt_trace (u8 * s, va_list * args)
105 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
106 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
107 esp_encrypt_post_trace_t *t = va_arg (*args, esp_encrypt_post_trace_t *);
109 s = format (s, "esp-post: next node index %u", t->next_index);
113 /* pad packet in input buffer */
114 static_always_inline u8 *
115 esp_add_footer_and_icv (vlib_main_t * vm, vlib_buffer_t ** last,
116 u8 esp_align, u8 icv_sz,
117 u16 * next, vlib_node_runtime_t * node,
118 u16 buffer_data_size, uword total_len)
120 static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
121 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
122 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
125 u16 min_length = total_len + sizeof (esp_footer_t);
126 u16 new_length = round_pow2 (min_length, esp_align);
127 u8 pad_bytes = new_length - min_length;
128 esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (last[0]) +
129 last[0]->current_length + pad_bytes);
130 u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
132 if (last[0]->current_length + tail_sz > buffer_data_size)
135 if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
138 vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
139 last[0]->next_buffer = tmp_bi;
140 last[0]->flags |= VLIB_BUFFER_NEXT_PRESENT;
141 f = (esp_footer_t *) (vlib_buffer_get_current (tmp) + pad_bytes);
142 tmp->current_length += tail_sz;
146 last[0]->current_length += tail_sz;
148 f->pad_length = pad_bytes;
151 ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
152 pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
153 clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
156 return &f->next_header;
159 static_always_inline void
160 esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
165 len = clib_net_to_host_u16 (len);
166 old_len = ip4->length;
170 u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
172 sum = ip_csum_update (ip4->checksum, ip4->protocol,
173 prot, ip4_header_t, protocol);
174 ip4->protocol = prot;
176 sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
179 sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
182 ip4->checksum = ip_csum_fold (sum);
185 static_always_inline void
186 esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
188 clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
189 udp->length = clib_net_to_host_u16 (len);
192 static_always_inline u8
193 ext_hdr_is_pre_esp (u8 nexthdr)
195 #ifdef CLIB_HAVE_VEC128
196 static const u8x16 ext_hdr_types = {
197 IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
198 IP_PROTOCOL_IPV6_ROUTE,
199 IP_PROTOCOL_IPV6_FRAGMENTATION,
202 return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
204 return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
205 (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
206 (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
210 static_always_inline u8
211 esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
213 /* this code assumes that HbH, route and frag headers will be before
214 others, if that is not the case, they will end up encrypted */
215 u8 len = sizeof (ip6_header_t);
218 /* if next packet doesn't have ext header */
219 if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
225 p = (void *) (ip6 + 1);
226 len += ip6_ext_header_len (p);
228 while (ext_hdr_is_pre_esp (p->next_hdr))
230 len += ip6_ext_header_len (p);
231 p = ip6_ext_next_header (p);
238 static_always_inline void
239 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
240 vnet_crypto_op_t * ops, vlib_buffer_t * b[],
241 u16 * nexts, vnet_crypto_op_chunk_t * chunks,
244 u32 n_fail, n_ops = vec_len (ops);
245 vnet_crypto_op_t *op = ops;
250 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
254 ASSERT (op - ops < n_ops);
256 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
258 u32 bi = op->user_data;
259 b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
260 nexts[bi] = drop_next;
267 static_always_inline void
268 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
269 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
272 u32 n_fail, n_ops = vec_len (ops);
273 vnet_crypto_op_t *op = ops;
278 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
282 ASSERT (op - ops < n_ops);
284 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
286 u32 bi = op->user_data;
287 b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
288 nexts[bi] = drop_next;
295 static_always_inline u32
296 esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
297 ipsec_sa_t * sa0, vlib_buffer_t * b,
298 vlib_buffer_t * lb, u8 icv_sz, u8 * start,
299 u32 start_len, u16 * n_ch)
301 vnet_crypto_op_chunk_t *ch;
302 vlib_buffer_t *cb = b;
305 vec_add2 (ptd->chunks, ch, 1);
306 total_len = ch->len = start_len;
307 ch->src = ch->dst = start;
308 cb = vlib_get_buffer (vm, cb->next_buffer);
312 vec_add2 (ptd->chunks, ch, 1);
315 total_len += ch->len = cb->current_length - icv_sz;
317 total_len += ch->len = cb->current_length;
318 ch->src = ch->dst = vlib_buffer_get_current (cb);
320 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
323 cb = vlib_get_buffer (vm, cb->next_buffer);
332 static_always_inline u32
333 esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
334 ipsec_sa_t * sa0, vlib_buffer_t * b,
335 vlib_buffer_t * lb, u8 icv_sz, u8 * start,
336 u32 start_len, u8 * digest, u16 * n_ch)
338 vnet_crypto_op_chunk_t *ch;
339 vlib_buffer_t *cb = b;
342 vec_add2 (ptd->chunks, ch, 1);
343 total_len = ch->len = start_len;
345 cb = vlib_get_buffer (vm, cb->next_buffer);
349 vec_add2 (ptd->chunks, ch, 1);
353 total_len += ch->len = cb->current_length - icv_sz;
354 if (ipsec_sa_is_set_USE_ESN (sa0))
356 u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
357 clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi));
358 ch->len += sizeof (seq_hi);
359 total_len += sizeof (seq_hi);
363 total_len += ch->len = cb->current_length;
364 ch->src = vlib_buffer_get_current (cb);
366 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
369 cb = vlib_get_buffer (vm, cb->next_buffer);
379 esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
380 vnet_crypto_op_t **crypto_ops,
381 vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0,
382 u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz,
383 vlib_buffer_t **bufs, vlib_buffer_t **b,
384 vlib_buffer_t *lb, u32 hdr_len, esp_header_t *esp)
386 if (sa0->crypto_enc_op_id)
388 vnet_crypto_op_t *op;
389 vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
390 vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
392 op->src = op->dst = payload;
393 op->key_index = sa0->crypto_key_index;
394 op->len = payload_len - icv_sz;
395 op->user_data = b - bufs;
397 if (ipsec_sa_is_set_IS_CTR (sa0))
399 ASSERT (sizeof (u64) == iv_sz);
400 /* construct nonce in a scratch space in front of the IP header */
401 esp_ctr_nonce_t *nonce =
402 (esp_ctr_nonce_t *) (payload - sizeof (u64) - hdr_len -
404 u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
406 if (ipsec_sa_is_set_IS_AEAD (sa0))
408 /* constuct aad in a scratch space in front of the nonce */
409 op->aad = (u8 *) nonce - sizeof (esp_aead_t);
410 op->aad_len = esp_aad_fill (op->aad, esp, sa0);
411 op->tag = payload + op->len;
416 nonce->ctr = clib_host_to_net_u32 (1);
419 nonce->salt = sa0->salt;
420 nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa0->ctr_iv_counter++);
421 op->iv = (u8 *) nonce;
425 op->iv = payload - iv_sz;
426 op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
432 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
433 op->chunk_index = vec_len (ptd->chunks);
434 op->tag = vlib_buffer_get_tail (lb) - icv_sz;
435 esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload,
436 payload_len, &op->n_chunks);
440 if (sa0->integ_op_id)
442 vnet_crypto_op_t *op;
443 vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
444 vnet_crypto_op_init (op, sa0->integ_op_id);
445 op->src = payload - iv_sz - sizeof (esp_header_t);
446 op->digest = payload + payload_len - icv_sz;
447 op->key_index = sa0->integ_key_index;
448 op->digest_len = icv_sz;
449 op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
450 op->user_data = b - bufs;
455 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
456 op->chunk_index = vec_len (ptd->chunks);
457 op->digest = vlib_buffer_get_tail (lb) - icv_sz;
459 esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
460 payload - iv_sz - sizeof (esp_header_t),
461 payload_len + iv_sz +
462 sizeof (esp_header_t), op->digest,
465 else if (ipsec_sa_is_set_USE_ESN (sa0))
467 u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
468 clib_memcpy_fast (op->digest, &seq_hi, sizeof (seq_hi));
469 op->len += sizeof (seq_hi);
474 static_always_inline int
475 esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
476 vnet_crypto_async_frame_t ** async_frame,
477 ipsec_sa_t * sa, vlib_buffer_t * b,
478 esp_header_t * esp, u8 * payload, u32 payload_len,
479 u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len,
480 u16 async_next, vlib_buffer_t * lb)
482 esp_post_data_t *post = esp_post_data (b);
483 u8 *tag, *iv, *aad = 0;
486 i16 crypto_start_offset, integ_start_offset = 0;
487 u16 crypto_total_len, integ_total_len;
489 post->next_index = next;
492 crypto_start_offset = payload - b->data;
493 crypto_total_len = integ_total_len = payload_len - icv_sz;
494 tag = payload + crypto_total_len;
496 key_index = sa->linked_key_index;
498 if (ipsec_sa_is_set_IS_CTR (sa))
500 ASSERT (sizeof (u64) == iv_sz);
501 /* construct nonce in a scratch space in front of the IP header */
502 esp_ctr_nonce_t *nonce = (esp_ctr_nonce_t *) (payload - sizeof (u64) -
503 hdr_len - sizeof (*nonce));
504 u64 *pkt_iv = (u64 *) (payload - sizeof (u64));
506 if (ipsec_sa_is_set_IS_AEAD (sa))
508 /* constuct aad in a scratch space in front of the nonce */
509 aad = (u8 *) nonce - sizeof (esp_aead_t);
510 esp_aad_fill (aad, esp, sa);
511 key_index = sa->crypto_key_index;
515 nonce->ctr = clib_host_to_net_u32 (1);
518 nonce->salt = sa->salt;
519 nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->ctr_iv_counter++);
524 iv = payload - iv_sz;
525 flag |= VNET_CRYPTO_OP_FLAG_INIT_IV;
531 flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
532 tag = vlib_buffer_get_tail (lb) - icv_sz;
533 crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb, icv_sz,
534 payload, payload_len, 0);
539 integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t);
540 integ_total_len += iv_sz + sizeof (esp_header_t);
544 integ_total_len = esp_encrypt_chain_integ (
545 vm, ptd, sa, b, lb, icv_sz,
546 payload - iv_sz - sizeof (esp_header_t),
547 payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
549 else if (ipsec_sa_is_set_USE_ESN (sa))
551 u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
552 clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
553 integ_total_len += sizeof (seq_hi);
557 return vnet_crypto_async_add_to_frame (vm, async_frame, key_index,
559 integ_total_len - crypto_total_len,
561 integ_start_offset, bi, async_next,
566 esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
567 vlib_frame_t *frame, vnet_link_t lt, int is_tun,
570 ipsec_main_t *im = &ipsec_main;
571 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
572 u32 *from = vlib_frame_vector_args (frame);
573 u32 n_left = frame->n_vectors;
574 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
575 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
576 u32 thread_index = vm->thread_index;
577 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
578 u32 current_sa_index = ~0, current_sa_packets = 0;
579 u32 current_sa_bytes = 0, spi = 0;
580 u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
583 vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
584 vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
585 vnet_crypto_async_frame_t *async_frame = 0;
586 int is_async = im->async_mode;
587 vnet_crypto_async_op_id_t last_async_op = ~0;
589 (lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
590 (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
591 ESP_ENCRYPT_NEXT_DROP_MPLS));
592 u16 handoff_next = (lt == VNET_LINK_IP6 ?
593 ESP_ENCRYPT_NEXT_HANDOFF6 :
594 (lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_HANDOFF4 :
595 ESP_ENCRYPT_NEXT_HANDOFF_MPLS));
596 u16 n_async_drop = 0;
598 vlib_get_buffers (vm, from, b, n_left);
601 vec_reset_length (ptd->crypto_ops);
602 vec_reset_length (ptd->integ_ops);
603 vec_reset_length (ptd->chained_crypto_ops);
604 vec_reset_length (ptd->chained_integ_ops);
606 vec_reset_length (ptd->chunks);
613 u8 *payload, *next_hdr_ptr;
614 u16 payload_len, payload_len_total, n_bufs;
620 vlib_prefetch_buffer_header (b[2], LOAD);
621 p = vlib_buffer_get_current (b[1]);
622 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
623 p -= CLIB_CACHE_LINE_BYTES;
624 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
625 /* speculate that the trailer goes in the first buffer */
626 CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
627 CLIB_CACHE_LINE_BYTES, LOAD);
632 /* we are on a ipsec tunnel's feature arc */
633 vnet_buffer (b[0])->ipsec.sad_index =
634 sa_index0 = ipsec_tun_protect_get_sa_out
635 (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
638 sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
640 if (sa_index0 != current_sa_index)
642 if (current_sa_packets)
643 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
647 current_sa_packets = current_sa_bytes = 0;
649 sa0 = pool_elt_at_index (im->sad, sa_index0);
651 /* fetch the second cacheline ASAP */
652 CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD);
654 current_sa_index = sa_index0;
655 spi = clib_net_to_host_u32 (sa0->spi);
656 esp_align = sa0->esp_block_align;
657 icv_sz = sa0->integ_icv_size;
658 iv_sz = sa0->crypto_iv_size;
660 /* submit frame when op_id is different then the old one */
661 if (is_async && sa0->crypto_async_enc_op_id != last_async_op)
663 if (async_frame && async_frame->n_elts)
665 if (vnet_crypto_async_submit_open_frame (vm, async_frame))
666 esp_async_recycle_failed_submit (async_frame, b, from,
667 nexts, &n_async_drop,
669 ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
672 vnet_crypto_async_get_frame (vm, sa0->crypto_async_enc_op_id);
673 last_async_op = sa0->crypto_async_enc_op_id;
677 if (PREDICT_FALSE (~0 == sa0->thread_index))
679 /* this is the first packet to use this SA, claim the SA
680 * for this thread. this could happen simultaneously on
682 clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
683 ipsec_sa_assign_thread (thread_index));
686 if (PREDICT_FALSE (thread_index != sa0->thread_index))
688 esp_set_next_index (is_async, from, nexts, from[b - bufs],
689 &n_async_drop, handoff_next, next);
694 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
697 b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
698 esp_set_next_index (is_async, from, nexts, from[b - bufs],
699 &n_async_drop, drop_next, next);
705 /* find last buffer in the chain */
706 while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
707 lb = vlib_get_buffer (vm, lb->next_buffer);
710 if (PREDICT_FALSE (esp_seq_advance (sa0)))
712 b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
713 esp_set_next_index (is_async, from, nexts, from[b - bufs],
714 &n_async_drop, drop_next, next);
721 if (ipsec_sa_is_set_IS_TUNNEL (sa0))
723 payload = vlib_buffer_get_current (b[0]);
724 next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
727 vlib_buffer_length_in_chain
731 b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
732 esp_set_next_index (is_async, from, nexts, from[b - bufs],
733 &n_async_drop, drop_next, next);
736 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
737 payload_len = b[0]->current_length;
738 payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
741 hdr_len += sizeof (*esp);
742 esp = (esp_header_t *) (payload - hdr_len);
744 /* optional UDP header */
745 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
747 hdr_len += sizeof (udp_header_t);
748 esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
749 payload_len_total + hdr_len);
753 if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
756 u16 len = sizeof (ip6_header_t);
758 ip6 = (ip6_header_t *) (payload - hdr_len);
759 clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
761 if (VNET_LINK_IP6 == lt)
763 *next_hdr_ptr = IP_PROTOCOL_IPV6;
764 tunnel_encap_fixup_6o6 (sa0->tunnel_flags,
765 (const ip6_header_t *) payload,
768 else if (VNET_LINK_IP4 == lt)
770 *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
771 tunnel_encap_fixup_4o6 (sa0->tunnel_flags, b[0],
772 (const ip4_header_t *) payload, ip6);
774 else if (VNET_LINK_MPLS == lt)
776 *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
777 tunnel_encap_fixup_mplso6 (
778 sa0->tunnel_flags, b[0],
779 (const mpls_unicast_header_t *) payload, ip6);
784 len = payload_len_total + hdr_len - len;
785 ip6->payload_length = clib_net_to_host_u16 (len);
786 b[0]->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
791 u16 len = sizeof (ip4_header_t);
793 ip4 = (ip4_header_t *) (payload - hdr_len);
794 clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
796 if (VNET_LINK_IP6 == lt)
798 *next_hdr_ptr = IP_PROTOCOL_IPV6;
799 tunnel_encap_fixup_6o4_w_chksum (sa0->tunnel_flags,
800 (const ip6_header_t *)
803 else if (VNET_LINK_IP4 == lt)
805 *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
806 tunnel_encap_fixup_4o4_w_chksum (sa0->tunnel_flags,
807 (const ip4_header_t *)
810 else if (VNET_LINK_MPLS == lt)
812 *next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
813 tunnel_encap_fixup_mplso4_w_chksum (
814 sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
820 len = payload_len_total + hdr_len;
821 esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
827 next[0] = dpo->dpoi_next_node;
828 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
831 next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
833 else /* transport mode */
835 u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
836 ip6_ext_header_t *ext_hdr;
837 udp_header_t *udp = 0;
839 u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
842 (VNET_LINK_IP6 == lt ?
843 esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
844 ip4_header_bytes ((ip4_header_t *) old_ip_hdr));
846 vlib_buffer_advance (b[0], ip_len);
847 payload = vlib_buffer_get_current (b[0]);
848 next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
851 vlib_buffer_length_in_chain
855 esp_set_next_index (is_async, from, nexts, from[b - bufs],
856 &n_async_drop, drop_next, next);
860 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
861 payload_len = b[0]->current_length;
862 payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
865 hdr_len += sizeof (*esp);
866 esp = (esp_header_t *) (payload - hdr_len);
868 /* optional UDP header */
869 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
871 hdr_len += sizeof (udp_header_t);
872 udp = (udp_header_t *) (payload - hdr_len);
877 ip_hdr = payload - hdr_len;
882 l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
884 l2_hdr = payload - hdr_len;
886 /* copy l2 and ip header */
887 clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
892 if (VNET_LINK_IP6 == lt)
894 ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
895 if (PREDICT_TRUE (NULL == ext_hdr))
897 *next_hdr_ptr = ip6->protocol;
898 ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
902 *next_hdr_ptr = ext_hdr->next_hdr;
903 ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP;
905 ip6->payload_length =
906 clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
907 sizeof (ip6_header_t));
909 else if (VNET_LINK_IP4 == lt)
912 ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
913 *next_hdr_ptr = ip4->protocol;
914 len = payload_len_total + hdr_len - l2_len;
917 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
918 udp_len = len - ip_len;
921 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
924 clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
928 esp_fill_udp_hdr (sa0, udp, udp_len);
931 next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
936 crypto_ops = &ptd->chained_crypto_ops;
937 integ_ops = &ptd->chained_integ_ops;
941 crypto_ops = &ptd->crypto_ops;
942 integ_ops = &ptd->integ_ops;
946 esp->seq = clib_net_to_host_u32 (sa0->seq);
950 if (PREDICT_FALSE (sa0->crypto_async_enc_op_id == 0))
952 esp_set_next_index (is_async, from, nexts, from[b - bufs],
953 &n_async_drop, drop_next, next);
957 if (esp_prepare_async_frame (vm, ptd, &async_frame, sa0, b[0], esp,
958 payload, payload_len, iv_sz,
959 icv_sz, from[b - bufs], next[0],
960 hdr_len, async_next, lb))
962 /* The fail only caused by submission, free the whole frame. */
963 if (async_frame->n_elts)
964 esp_async_recycle_failed_submit (async_frame, b, from, nexts,
965 &n_async_drop, drop_next,
966 ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
967 b[0]->error = ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR;
968 esp_set_next_index (1, from, nexts, from[b - bufs],
969 &n_async_drop, drop_next, next);
975 esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload,
976 payload_len, iv_sz, icv_sz, bufs, b, lb,
980 vlib_buffer_advance (b[0], 0LL - hdr_len);
982 current_sa_packets += 1;
983 current_sa_bytes += payload_len_total;
986 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
988 esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
990 tr->sa_index = sa_index0;
993 tr->sa_seq_hi = sa0->seq_hi;
994 tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
995 tr->crypto_alg = sa0->crypto_alg;
996 tr->integ_alg = sa0->integ_alg;
1004 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
1005 current_sa_index, current_sa_packets,
1009 esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts, drop_next);
1010 esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
1011 ptd->chunks, drop_next);
1013 esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, drop_next);
1014 esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
1015 ptd->chunks, drop_next);
1019 if (async_frame && async_frame->n_elts)
1021 if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
1022 esp_async_recycle_failed_submit (async_frame, b, from, nexts,
1023 &n_async_drop, drop_next,
1024 ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
1026 vlib_node_increment_counter (vm, node->node_index,
1027 ESP_ENCRYPT_ERROR_RX_PKTS,
1030 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
1032 return frame->n_vectors;
1035 vlib_node_increment_counter (vm, node->node_index,
1036 ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
1038 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1039 return frame->n_vectors;
1043 esp_encrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1044 vlib_frame_t * frame)
1046 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1047 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1048 u32 *from = vlib_frame_vector_args (frame);
1049 u32 n_left = frame->n_vectors;
1051 vlib_get_buffers (vm, from, b, n_left);
1055 vlib_prefetch_buffer_header (b[0], LOAD);
1056 vlib_prefetch_buffer_header (b[1], LOAD);
1057 vlib_prefetch_buffer_header (b[2], LOAD);
1058 vlib_prefetch_buffer_header (b[3], LOAD);
1063 vlib_prefetch_buffer_header (b[4], LOAD);
1064 vlib_prefetch_buffer_header (b[5], LOAD);
1065 vlib_prefetch_buffer_header (b[6], LOAD);
1066 vlib_prefetch_buffer_header (b[7], LOAD);
1068 next[0] = (esp_post_data (b[0]))->next_index;
1069 next[1] = (esp_post_data (b[1]))->next_index;
1070 next[2] = (esp_post_data (b[2]))->next_index;
1071 next[3] = (esp_post_data (b[3]))->next_index;
1073 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
1075 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
1077 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1079 tr->next_index = next[0];
1081 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
1083 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[1],
1085 tr->next_index = next[1];
1087 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
1089 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[2],
1091 tr->next_index = next[2];
1093 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
1095 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[3],
1097 tr->next_index = next[3];
1108 next[0] = (esp_post_data (b[0]))->next_index;
1109 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1111 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1113 tr->next_index = next[0];
1121 vlib_node_increment_counter (vm, node->node_index,
1122 ESP_ENCRYPT_ERROR_POST_RX_PKTS,
1124 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1125 return frame->n_vectors;
1128 VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
1129 vlib_node_runtime_t * node,
1130 vlib_frame_t * from_frame)
1132 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 0,
1133 esp_encrypt_async_next.esp4_post_next);
1137 VLIB_REGISTER_NODE (esp4_encrypt_node) = {
1138 .name = "esp4-encrypt",
1139 .vector_size = sizeof (u32),
1140 .format_trace = format_esp_encrypt_trace,
1141 .type = VLIB_NODE_TYPE_INTERNAL,
1143 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
1144 .error_strings = esp_encrypt_error_strings,
1146 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1147 .next_nodes = { [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1148 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1149 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1150 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
1151 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
1152 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "error-drop",
1153 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output" },
1157 VLIB_NODE_FN (esp4_encrypt_post_node) (vlib_main_t * vm,
1158 vlib_node_runtime_t * node,
1159 vlib_frame_t * from_frame)
1161 return esp_encrypt_post_inline (vm, node, from_frame);
1165 VLIB_REGISTER_NODE (esp4_encrypt_post_node) = {
1166 .name = "esp4-encrypt-post",
1167 .vector_size = sizeof (u32),
1168 .format_trace = format_esp_post_encrypt_trace,
1169 .type = VLIB_NODE_TYPE_INTERNAL,
1170 .sibling_of = "esp4-encrypt",
1172 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1173 .error_strings = esp_encrypt_error_strings,
1177 VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
1178 vlib_node_runtime_t * node,
1179 vlib_frame_t * from_frame)
1181 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 0,
1182 esp_encrypt_async_next.esp6_post_next);
1186 VLIB_REGISTER_NODE (esp6_encrypt_node) = {
1187 .name = "esp6-encrypt",
1188 .vector_size = sizeof (u32),
1189 .format_trace = format_esp_encrypt_trace,
1190 .type = VLIB_NODE_TYPE_INTERNAL,
1191 .sibling_of = "esp4-encrypt",
1193 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1194 .error_strings = esp_encrypt_error_strings,
1198 VLIB_NODE_FN (esp6_encrypt_post_node) (vlib_main_t * vm,
1199 vlib_node_runtime_t * node,
1200 vlib_frame_t * from_frame)
1202 return esp_encrypt_post_inline (vm, node, from_frame);
1206 VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
1207 .name = "esp6-encrypt-post",
1208 .vector_size = sizeof (u32),
1209 .format_trace = format_esp_post_encrypt_trace,
1210 .type = VLIB_NODE_TYPE_INTERNAL,
1211 .sibling_of = "esp4-encrypt",
1213 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1214 .error_strings = esp_encrypt_error_strings,
1218 VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
1219 vlib_node_runtime_t * node,
1220 vlib_frame_t * from_frame)
1222 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP4, 1,
1223 esp_encrypt_async_next.esp4_tun_post_next);
1227 VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
1228 .name = "esp4-encrypt-tun",
1229 .vector_size = sizeof (u32),
1230 .format_trace = format_esp_encrypt_trace,
1231 .type = VLIB_NODE_TYPE_INTERNAL,
1233 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1234 .error_strings = esp_encrypt_error_strings,
1236 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1238 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1239 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1240 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1241 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1242 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1243 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1244 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1248 VLIB_NODE_FN (esp4_encrypt_tun_post_node) (vlib_main_t * vm,
1249 vlib_node_runtime_t * node,
1250 vlib_frame_t * from_frame)
1252 return esp_encrypt_post_inline (vm, node, from_frame);
1256 VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = {
1257 .name = "esp4-encrypt-tun-post",
1258 .vector_size = sizeof (u32),
1259 .format_trace = format_esp_post_encrypt_trace,
1260 .type = VLIB_NODE_TYPE_INTERNAL,
1261 .sibling_of = "esp4-encrypt-tun",
1263 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1264 .error_strings = esp_encrypt_error_strings,
1268 VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
1269 vlib_node_runtime_t * node,
1270 vlib_frame_t * from_frame)
1272 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_IP6, 1,
1273 esp_encrypt_async_next.esp6_tun_post_next);
1277 VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
1278 .name = "esp6-encrypt-tun",
1279 .vector_size = sizeof (u32),
1280 .format_trace = format_esp_encrypt_trace,
1281 .type = VLIB_NODE_TYPE_INTERNAL,
1283 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1284 .error_strings = esp_encrypt_error_strings,
1286 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1288 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1289 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1290 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1291 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1292 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1293 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1294 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1300 VLIB_NODE_FN (esp6_encrypt_tun_post_node) (vlib_main_t * vm,
1301 vlib_node_runtime_t * node,
1302 vlib_frame_t * from_frame)
1304 return esp_encrypt_post_inline (vm, node, from_frame);
1308 VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
1309 .name = "esp6-encrypt-tun-post",
1310 .vector_size = sizeof (u32),
1311 .format_trace = format_esp_post_encrypt_trace,
1312 .type = VLIB_NODE_TYPE_INTERNAL,
1313 .sibling_of = "esp-mpls-encrypt-tun",
1315 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
1316 .error_strings = esp_encrypt_error_strings,
1320 VLIB_NODE_FN (esp_mpls_encrypt_tun_node)
1321 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
1323 return esp_encrypt_inline (vm, node, from_frame, VNET_LINK_MPLS, 1,
1324 esp_encrypt_async_next.esp_mpls_tun_post_next);
1327 VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_node) = {
1328 .name = "esp-mpls-encrypt-tun",
1329 .vector_size = sizeof (u32),
1330 .format_trace = format_esp_encrypt_trace,
1331 .type = VLIB_NODE_TYPE_INTERNAL,
1333 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1334 .error_strings = esp_encrypt_error_strings,
1336 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1338 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1339 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1340 [ESP_ENCRYPT_NEXT_DROP_MPLS] = "mpls-drop",
1341 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1342 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1343 [ESP_ENCRYPT_NEXT_HANDOFF_MPLS] = "esp-mpls-encrypt-tun-handoff",
1344 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1348 VLIB_NODE_FN (esp_mpls_encrypt_tun_post_node)
1349 (vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *from_frame)
1351 return esp_encrypt_post_inline (vm, node, from_frame);
1354 VLIB_REGISTER_NODE (esp_mpls_encrypt_tun_post_node) = {
1355 .name = "esp-mpls-encrypt-tun-post",
1356 .vector_size = sizeof (u32),
1357 .format_trace = format_esp_post_encrypt_trace,
1358 .type = VLIB_NODE_TYPE_INTERNAL,
1359 .sibling_of = "esp-mpls-encrypt-tun",
1361 .n_errors = ARRAY_LEN (esp_encrypt_error_strings),
1362 .error_strings = esp_encrypt_error_strings,
1368 } esp_no_crypto_trace_t;
1371 format_esp_no_crypto_trace (u8 * s, va_list * args)
1373 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1374 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1375 esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
1377 s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
1384 ESP_NO_CRYPTO_NEXT_DROP,
1385 ESP_NO_CRYPTO_N_NEXT,
1390 ESP_NO_CRYPTO_ERROR_RX_PKTS,
1393 static char *esp_no_crypto_error_strings[] = {
1394 "Outbound ESP packets received",
1398 esp_no_crypto_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1399 vlib_frame_t * frame)
1401 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1402 u32 *from = vlib_frame_vector_args (frame);
1403 u32 n_left = frame->n_vectors;
1405 vlib_get_buffers (vm, from, b, n_left);
1411 /* packets are always going to be dropped, but get the sa_index */
1412 sa_index0 = ipsec_tun_protect_get_sa_out
1413 (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
1415 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1417 esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0],
1419 tr->sa_index = sa_index0;
1426 vlib_node_increment_counter (vm, node->node_index,
1427 ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors);
1429 vlib_buffer_enqueue_to_single_next (vm, node, from,
1430 ESP_NO_CRYPTO_NEXT_DROP,
1433 return frame->n_vectors;
1436 VLIB_NODE_FN (esp4_no_crypto_tun_node) (vlib_main_t * vm,
1437 vlib_node_runtime_t * node,
1438 vlib_frame_t * from_frame)
1440 return esp_no_crypto_inline (vm, node, from_frame);
1444 VLIB_REGISTER_NODE (esp4_no_crypto_tun_node) =
1446 .name = "esp4-no-crypto",
1447 .vector_size = sizeof (u32),
1448 .format_trace = format_esp_no_crypto_trace,
1449 .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
1450 .error_strings = esp_no_crypto_error_strings,
1451 .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
1453 [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
1457 VLIB_NODE_FN (esp6_no_crypto_tun_node) (vlib_main_t * vm,
1458 vlib_node_runtime_t * node,
1459 vlib_frame_t * from_frame)
1461 return esp_no_crypto_inline (vm, node, from_frame);
1465 VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) =
1467 .name = "esp6-no-crypto",
1468 .vector_size = sizeof (u32),
1469 .format_trace = format_esp_no_crypto_trace,
1470 .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
1471 .error_strings = esp_no_crypto_error_strings,
1472 .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
1474 [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
1480 * fd.io coding-style-patch-verification: ON
1483 * eval: (c-set-style "gnu")