2 * esp_encrypt.c : IPSec ESP encrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/crypto/crypto.h>
24 #include <vnet/ipsec/ipsec.h>
25 #include <vnet/ipsec/ipsec_tun.h>
26 #include <vnet/ipsec/esp.h>
27 #include <vnet/tunnel/tunnel_dp.h>
29 #define foreach_esp_encrypt_next \
30 _(DROP4, "ip4-drop") \
31 _(DROP6, "ip6-drop") \
32 _(HANDOFF4, "handoff4") \
33 _(HANDOFF6, "handoff6") \
34 _(INTERFACE_OUTPUT, "interface-output")
36 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
39 foreach_esp_encrypt_next
44 #define foreach_esp_encrypt_error \
45 _(RX_PKTS, "ESP pkts received") \
46 _(POST_RX_PKTS, "ESP-post pkts received") \
47 _(SEQ_CYCLED, "sequence number cycled (packet dropped)") \
48 _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
49 _(CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)") \
50 _(NO_BUFFERS, "no buffers (packet dropped)") \
54 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
55 foreach_esp_encrypt_error
58 } esp_encrypt_error_t;
60 static char *esp_encrypt_error_strings[] = {
61 #define _(sym,string) string,
62 foreach_esp_encrypt_error
73 ipsec_crypto_alg_t crypto_alg;
74 ipsec_integ_alg_t integ_alg;
75 } esp_encrypt_trace_t;
80 } esp_encrypt_post_trace_t;
82 /* packet trace format function */
84 format_esp_encrypt_trace (u8 * s, va_list * args)
86 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
87 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
88 esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
92 "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
93 t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
94 format_ipsec_crypto_alg,
95 t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
96 t->udp_encap ? " udp-encap-enabled" : "");
101 format_esp_post_encrypt_trace (u8 * s, va_list * args)
103 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
104 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
105 esp_encrypt_post_trace_t *t = va_arg (*args, esp_encrypt_post_trace_t *);
107 s = format (s, "esp-post: next node index %u", t->next_index);
111 /* pad packet in input buffer */
112 static_always_inline u8 *
113 esp_add_footer_and_icv (vlib_main_t * vm, vlib_buffer_t ** last,
114 u8 esp_align, u8 icv_sz,
115 u16 * next, vlib_node_runtime_t * node,
116 u16 buffer_data_size, uword total_len)
118 static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
119 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
120 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x00,
123 u16 min_length = total_len + sizeof (esp_footer_t);
124 u16 new_length = round_pow2 (min_length, esp_align);
125 u8 pad_bytes = new_length - min_length;
126 esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (last[0]) +
127 last[0]->current_length + pad_bytes);
128 u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
130 if (last[0]->current_length + tail_sz > buffer_data_size)
133 if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
136 vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
137 last[0]->next_buffer = tmp_bi;
138 last[0]->flags |= VLIB_BUFFER_NEXT_PRESENT;
139 f = (esp_footer_t *) (vlib_buffer_get_current (tmp) + pad_bytes);
140 tmp->current_length += tail_sz;
144 last[0]->current_length += tail_sz;
146 f->pad_length = pad_bytes;
149 ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
150 pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
151 clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
154 return &f->next_header;
157 static_always_inline void
158 esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
163 len = clib_net_to_host_u16 (len);
164 old_len = ip4->length;
168 u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
170 sum = ip_csum_update (ip4->checksum, ip4->protocol,
171 prot, ip4_header_t, protocol);
172 ip4->protocol = prot;
174 sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
177 sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
180 ip4->checksum = ip_csum_fold (sum);
183 static_always_inline void
184 esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
186 clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
187 udp->length = clib_net_to_host_u16 (len);
190 static_always_inline u8
191 ext_hdr_is_pre_esp (u8 nexthdr)
193 #ifdef CLIB_HAVE_VEC128
194 static const u8x16 ext_hdr_types = {
195 IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
196 IP_PROTOCOL_IPV6_ROUTE,
197 IP_PROTOCOL_IPV6_FRAGMENTATION,
200 return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
202 return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
203 (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
204 (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
208 static_always_inline u8
209 esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
211 /* this code assumes that HbH, route and frag headers will be before
212 others, if that is not the case, they will end up encrypted */
213 u8 len = sizeof (ip6_header_t);
216 /* if next packet doesn't have ext header */
217 if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
223 p = (void *) (ip6 + 1);
224 len += ip6_ext_header_len (p);
226 while (ext_hdr_is_pre_esp (p->next_hdr))
228 len += ip6_ext_header_len (p);
229 p = ip6_ext_next_header (p);
236 static_always_inline void
237 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
238 vnet_crypto_op_t * ops, vlib_buffer_t * b[],
239 u16 * nexts, vnet_crypto_op_chunk_t * chunks,
242 u32 n_fail, n_ops = vec_len (ops);
243 vnet_crypto_op_t *op = ops;
248 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
252 ASSERT (op - ops < n_ops);
254 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
256 u32 bi = op->user_data;
257 b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
258 nexts[bi] = drop_next;
265 static_always_inline void
266 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
267 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
270 u32 n_fail, n_ops = vec_len (ops);
271 vnet_crypto_op_t *op = ops;
276 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
280 ASSERT (op - ops < n_ops);
282 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
284 u32 bi = op->user_data;
285 b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
286 nexts[bi] = drop_next;
297 } __clib_packed esp_gcm_nonce_t;
299 STATIC_ASSERT_SIZEOF (esp_gcm_nonce_t, 12);
301 static_always_inline u32
302 esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
303 ipsec_sa_t * sa0, vlib_buffer_t * b,
304 vlib_buffer_t * lb, u8 icv_sz, u8 * start,
305 u32 start_len, u16 * n_ch)
307 vnet_crypto_op_chunk_t *ch;
308 vlib_buffer_t *cb = b;
311 vec_add2 (ptd->chunks, ch, 1);
312 total_len = ch->len = start_len;
313 ch->src = ch->dst = start;
314 cb = vlib_get_buffer (vm, cb->next_buffer);
318 vec_add2 (ptd->chunks, ch, 1);
321 total_len += ch->len = cb->current_length - icv_sz;
323 total_len += ch->len = cb->current_length;
324 ch->src = ch->dst = vlib_buffer_get_current (cb);
326 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
329 cb = vlib_get_buffer (vm, cb->next_buffer);
338 static_always_inline u32
339 esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
340 ipsec_sa_t * sa0, vlib_buffer_t * b,
341 vlib_buffer_t * lb, u8 icv_sz, u8 * start,
342 u32 start_len, u8 * digest, u16 * n_ch)
344 vnet_crypto_op_chunk_t *ch;
345 vlib_buffer_t *cb = b;
348 vec_add2 (ptd->chunks, ch, 1);
349 total_len = ch->len = start_len;
351 cb = vlib_get_buffer (vm, cb->next_buffer);
355 vec_add2 (ptd->chunks, ch, 1);
359 total_len += ch->len = cb->current_length - icv_sz;
360 if (ipsec_sa_is_set_USE_ESN (sa0))
362 u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
363 clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi));
364 ch->len += sizeof (seq_hi);
365 total_len += sizeof (seq_hi);
369 total_len += ch->len = cb->current_length;
370 ch->src = vlib_buffer_get_current (cb);
372 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
375 cb = vlib_get_buffer (vm, cb->next_buffer);
385 esp_prepare_sync_op (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
386 vnet_crypto_op_t ** crypto_ops,
387 vnet_crypto_op_t ** integ_ops, ipsec_sa_t * sa0,
388 u8 * payload, u16 payload_len, u8 iv_sz, u8 icv_sz,
389 vlib_buffer_t ** bufs, vlib_buffer_t ** b,
390 vlib_buffer_t * lb, u32 hdr_len, esp_header_t * esp,
391 esp_gcm_nonce_t * nonce)
393 if (sa0->crypto_enc_op_id)
395 vnet_crypto_op_t *op;
396 vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
397 vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
399 op->src = op->dst = payload;
400 op->key_index = sa0->crypto_key_index;
401 op->len = payload_len - icv_sz;
402 op->user_data = b - bufs;
404 if (ipsec_sa_is_set_IS_AEAD (sa0))
407 * construct the AAD in a scratch space in front
410 op->aad = payload - hdr_len - sizeof (esp_aead_t);
411 op->aad_len = esp_aad_fill (op->aad, esp, sa0);
413 op->tag = payload + op->len;
416 u64 *iv = (u64 *) (payload - iv_sz);
417 nonce->salt = sa0->salt;
418 nonce->iv = *iv = clib_host_to_net_u64 (sa0->gcm_iv_counter++);
419 op->iv = (u8 *) nonce;
423 op->iv = payload - iv_sz;
424 op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
430 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
431 op->chunk_index = vec_len (ptd->chunks);
432 op->tag = vlib_buffer_get_tail (lb) - icv_sz;
433 esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload,
434 payload_len, &op->n_chunks);
438 if (sa0->integ_op_id)
440 vnet_crypto_op_t *op;
441 vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
442 vnet_crypto_op_init (op, sa0->integ_op_id);
443 op->src = payload - iv_sz - sizeof (esp_header_t);
444 op->digest = payload + payload_len - icv_sz;
445 op->key_index = sa0->integ_key_index;
446 op->digest_len = icv_sz;
447 op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
448 op->user_data = b - bufs;
453 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
454 op->chunk_index = vec_len (ptd->chunks);
455 op->digest = vlib_buffer_get_tail (lb) - icv_sz;
457 esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
458 payload - iv_sz - sizeof (esp_header_t),
459 payload_len + iv_sz +
460 sizeof (esp_header_t), op->digest,
463 else if (ipsec_sa_is_set_USE_ESN (sa0))
465 u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
466 clib_memcpy_fast (op->digest, &seq_hi, sizeof (seq_hi));
467 op->len += sizeof (seq_hi);
472 static_always_inline int
473 esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
474 vnet_crypto_async_frame_t ** async_frame,
475 ipsec_sa_t * sa, vlib_buffer_t * b,
476 esp_header_t * esp, u8 * payload, u32 payload_len,
477 u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len,
478 u16 async_next, vlib_buffer_t * lb)
480 esp_post_data_t *post = esp_post_data (b);
481 u8 *tag, *iv, *aad = 0;
484 i16 crypto_start_offset, integ_start_offset = 0;
485 u16 crypto_total_len, integ_total_len;
487 post->next_index = next;
490 crypto_start_offset = payload - b->data;
491 crypto_total_len = integ_total_len = payload_len - icv_sz;
492 tag = payload + crypto_total_len;
495 if (ipsec_sa_is_set_IS_AEAD (sa))
497 esp_gcm_nonce_t *nonce;
498 u64 *pkt_iv = (u64 *) (payload - iv_sz);
500 aad = payload - hdr_len - sizeof (esp_aead_t);
501 esp_aad_fill (aad, esp, sa);
502 nonce = (esp_gcm_nonce_t *) (aad - sizeof (*nonce));
503 nonce->salt = sa->salt;
504 nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->gcm_iv_counter++);
506 key_index = sa->crypto_key_index;
511 flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
512 tag = vlib_buffer_get_tail (lb) - icv_sz;
513 crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb,
520 /* cipher then hash */
521 iv = payload - iv_sz;
522 integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t);
523 integ_total_len += iv_sz + sizeof (esp_header_t);
524 flag |= VNET_CRYPTO_OP_FLAG_INIT_IV;
525 key_index = sa->linked_key_index;
529 flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
530 crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb,
533 tag = vlib_buffer_get_tail (lb) - icv_sz;
534 integ_total_len = esp_encrypt_chain_integ (vm, ptd, sa, b, lb, icv_sz,
536 sizeof (esp_header_t),
537 payload_len + iv_sz +
538 sizeof (esp_header_t),
541 else if (ipsec_sa_is_set_USE_ESN (sa) && !ipsec_sa_is_set_IS_AEAD (sa))
543 u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
544 clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
545 integ_total_len += sizeof (seq_hi);
549 return vnet_crypto_async_add_to_frame (vm, async_frame, key_index,
551 integ_total_len - crypto_total_len,
553 integ_start_offset, bi, async_next,
558 esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
559 vlib_frame_t * frame, int is_ip6, int is_tun,
562 ipsec_main_t *im = &ipsec_main;
563 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
564 u32 *from = vlib_frame_vector_args (frame);
565 u32 n_left = frame->n_vectors;
566 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
567 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
568 esp_gcm_nonce_t nonces[VLIB_FRAME_SIZE], *nonce = nonces;
569 u32 thread_index = vm->thread_index;
570 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
571 u32 current_sa_index = ~0, current_sa_packets = 0;
572 u32 current_sa_bytes = 0, spi = 0;
573 u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
576 vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
577 vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
578 vnet_crypto_async_frame_t *async_frame = 0;
579 int is_async = im->async_mode;
580 vnet_crypto_async_op_id_t last_async_op = ~0;
581 u16 drop_next = (is_ip6 ? ESP_ENCRYPT_NEXT_DROP6 : ESP_ENCRYPT_NEXT_DROP4);
582 u16 n_async_drop = 0;
584 vlib_get_buffers (vm, from, b, n_left);
587 vec_reset_length (ptd->crypto_ops);
588 vec_reset_length (ptd->integ_ops);
589 vec_reset_length (ptd->chained_crypto_ops);
590 vec_reset_length (ptd->chained_integ_ops);
592 vec_reset_length (ptd->chunks);
599 u8 *payload, *next_hdr_ptr;
600 u16 payload_len, payload_len_total, n_bufs;
606 vlib_prefetch_buffer_header (b[2], LOAD);
607 p = vlib_buffer_get_current (b[1]);
608 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
609 p -= CLIB_CACHE_LINE_BYTES;
610 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
611 /* speculate that the trailer goes in the first buffer */
612 CLIB_PREFETCH (vlib_buffer_get_tail (b[1]),
613 CLIB_CACHE_LINE_BYTES, LOAD);
618 /* we are on a ipsec tunnel's feature arc */
619 vnet_buffer (b[0])->ipsec.sad_index =
620 sa_index0 = ipsec_tun_protect_get_sa_out
621 (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
624 sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
626 if (sa_index0 != current_sa_index)
628 if (current_sa_packets)
629 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
633 current_sa_packets = current_sa_bytes = 0;
635 sa0 = pool_elt_at_index (im->sad, sa_index0);
637 /* fetch the second cacheline ASAP */
638 CLIB_PREFETCH (sa0->cacheline1, CLIB_CACHE_LINE_BYTES, LOAD);
640 current_sa_index = sa_index0;
641 spi = clib_net_to_host_u32 (sa0->spi);
642 esp_align = sa0->esp_block_align;
643 icv_sz = sa0->integ_icv_size;
644 iv_sz = sa0->crypto_iv_size;
646 /* submit frame when op_id is different then the old one */
647 if (is_async && sa0->crypto_async_enc_op_id != last_async_op)
649 if (async_frame && async_frame->n_elts)
651 if (vnet_crypto_async_submit_open_frame (vm, async_frame))
652 esp_async_recycle_failed_submit (async_frame, b, from,
653 nexts, &n_async_drop,
655 ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
658 vnet_crypto_async_get_frame (vm, sa0->crypto_async_enc_op_id);
659 last_async_op = sa0->crypto_async_enc_op_id;
663 if (PREDICT_FALSE (~0 == sa0->encrypt_thread_index))
665 /* this is the first packet to use this SA, claim the SA
666 * for this thread. this could happen simultaneously on
668 clib_atomic_cmp_and_swap (&sa0->encrypt_thread_index, ~0,
669 ipsec_sa_assign_thread (thread_index));
672 if (PREDICT_FALSE (thread_index != sa0->encrypt_thread_index))
674 esp_set_next_index (is_async, from, nexts, from[b - bufs],
676 (is_ip6 ? ESP_ENCRYPT_NEXT_HANDOFF6 :
677 ESP_ENCRYPT_NEXT_HANDOFF4), next);
682 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
685 b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
686 esp_set_next_index (is_async, from, nexts, from[b - bufs],
687 &n_async_drop, drop_next, next);
693 /* find last buffer in the chain */
694 while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
695 lb = vlib_get_buffer (vm, lb->next_buffer);
698 if (PREDICT_FALSE (esp_seq_advance (sa0)))
700 b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
701 esp_set_next_index (is_async, from, nexts, from[b - bufs],
702 &n_async_drop, drop_next, next);
709 if (ipsec_sa_is_set_IS_TUNNEL (sa0))
711 payload = vlib_buffer_get_current (b[0]);
712 next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
715 vlib_buffer_length_in_chain
719 b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
720 esp_set_next_index (is_async, from, nexts, from[b - bufs],
721 &n_async_drop, drop_next, next);
724 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
725 payload_len = b[0]->current_length;
726 payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
729 hdr_len += sizeof (*esp);
730 esp = (esp_header_t *) (payload - hdr_len);
732 /* optional UDP header */
733 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
735 hdr_len += sizeof (udp_header_t);
736 esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
737 payload_len_total + hdr_len);
741 if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
744 u16 len = sizeof (ip6_header_t);
746 ip6 = (ip6_header_t *) (payload - hdr_len);
747 clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
751 *next_hdr_ptr = IP_PROTOCOL_IPV6;
752 tunnel_encap_fixup_6o6 (sa0->tunnel_flags,
753 (const ip6_header_t *) payload,
758 *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
759 tunnel_encap_fixup_4o6 (sa0->tunnel_flags,
760 (const ip4_header_t *) payload,
763 len = payload_len_total + hdr_len - len;
764 ip6->payload_length = clib_net_to_host_u16 (len);
769 u16 len = sizeof (ip4_header_t);
771 ip4 = (ip4_header_t *) (payload - hdr_len);
772 clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
776 *next_hdr_ptr = IP_PROTOCOL_IPV6;
777 tunnel_encap_fixup_6o4_w_chksum (sa0->tunnel_flags,
778 (const ip6_header_t *)
783 *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
784 tunnel_encap_fixup_4o4_w_chksum (sa0->tunnel_flags,
785 (const ip4_header_t *)
788 len = payload_len_total + hdr_len;
789 esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
795 next[0] = dpo->dpoi_next_node;
796 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
799 next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
801 else /* transport mode */
803 u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
804 ip6_ext_header_t *ext_hdr;
805 udp_header_t *udp = 0;
807 u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
810 esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
811 ip4_header_bytes ((ip4_header_t *) old_ip_hdr);
813 vlib_buffer_advance (b[0], ip_len);
814 payload = vlib_buffer_get_current (b[0]);
815 next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, esp_align, icv_sz,
818 vlib_buffer_length_in_chain
822 esp_set_next_index (is_async, from, nexts, from[b - bufs],
823 &n_async_drop, drop_next, next);
827 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
828 payload_len = b[0]->current_length;
829 payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
832 hdr_len += sizeof (*esp);
833 esp = (esp_header_t *) (payload - hdr_len);
835 /* optional UDP header */
836 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
838 hdr_len += sizeof (udp_header_t);
839 udp = (udp_header_t *) (payload - hdr_len);
844 ip_hdr = payload - hdr_len;
849 l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
851 l2_hdr = payload - hdr_len;
853 /* copy l2 and ip header */
854 clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
861 ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
862 if (PREDICT_TRUE (NULL == ext_hdr))
864 *next_hdr_ptr = ip6->protocol;
865 ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
869 *next_hdr_ptr = ext_hdr->next_hdr;
870 ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP;
872 ip6->payload_length =
873 clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
874 sizeof (ip6_header_t));
879 ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
880 *next_hdr_ptr = ip4->protocol;
881 len = payload_len_total + hdr_len - l2_len;
884 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
885 udp_len = len - ip_len;
888 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
891 clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
895 esp_fill_udp_hdr (sa0, udp, udp_len);
898 next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
903 crypto_ops = &ptd->chained_crypto_ops;
904 integ_ops = &ptd->chained_integ_ops;
908 crypto_ops = &ptd->crypto_ops;
909 integ_ops = &ptd->integ_ops;
913 esp->seq = clib_net_to_host_u32 (sa0->seq);
917 if (PREDICT_FALSE (sa0->crypto_async_enc_op_id == 0))
919 esp_set_next_index (is_async, from, nexts, from[b - bufs],
920 &n_async_drop, drop_next, next);
924 if (esp_prepare_async_frame (vm, ptd, &async_frame, sa0, b[0], esp,
925 payload, payload_len, iv_sz,
926 icv_sz, from[b - bufs], next[0],
927 hdr_len, async_next, lb))
929 /* The fail only caused by submission, free the whole frame. */
930 if (async_frame->n_elts)
931 esp_async_recycle_failed_submit (async_frame, b, from, nexts,
932 &n_async_drop, drop_next,
933 ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
934 b[0]->error = ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR;
935 esp_set_next_index (1, from, nexts, from[b - bufs],
936 &n_async_drop, drop_next, next);
942 esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload,
943 payload_len, iv_sz, icv_sz, bufs, b, lb,
944 hdr_len, esp, nonce++);
947 vlib_buffer_advance (b[0], 0LL - hdr_len);
949 current_sa_packets += 1;
950 current_sa_bytes += payload_len_total;
953 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
955 esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
957 tr->sa_index = sa_index0;
960 tr->sa_seq_hi = sa0->seq_hi;
961 tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
962 tr->crypto_alg = sa0->crypto_alg;
963 tr->integ_alg = sa0->integ_alg;
971 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
972 current_sa_index, current_sa_packets,
976 esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts, drop_next);
977 esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
978 ptd->chunks, drop_next);
980 esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, drop_next);
981 esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
982 ptd->chunks, drop_next);
986 if (async_frame && async_frame->n_elts)
988 if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
989 esp_async_recycle_failed_submit (async_frame, b, from, nexts,
990 &n_async_drop, drop_next,
991 ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR);
993 vlib_node_increment_counter (vm, node->node_index,
994 ESP_ENCRYPT_ERROR_RX_PKTS,
997 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_async_drop);
999 return frame->n_vectors;
1002 vlib_node_increment_counter (vm, node->node_index,
1003 ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
1005 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1006 return frame->n_vectors;
1010 esp_encrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1011 vlib_frame_t * frame)
1013 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1014 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
1015 u32 *from = vlib_frame_vector_args (frame);
1016 u32 n_left = frame->n_vectors;
1018 vlib_get_buffers (vm, from, b, n_left);
1022 vlib_prefetch_buffer_header (b[0], LOAD);
1023 vlib_prefetch_buffer_header (b[1], LOAD);
1024 vlib_prefetch_buffer_header (b[2], LOAD);
1025 vlib_prefetch_buffer_header (b[3], LOAD);
1030 vlib_prefetch_buffer_header (b[4], LOAD);
1031 vlib_prefetch_buffer_header (b[5], LOAD);
1032 vlib_prefetch_buffer_header (b[6], LOAD);
1033 vlib_prefetch_buffer_header (b[7], LOAD);
1035 next[0] = (esp_post_data (b[0]))->next_index;
1036 next[1] = (esp_post_data (b[1]))->next_index;
1037 next[2] = (esp_post_data (b[2]))->next_index;
1038 next[3] = (esp_post_data (b[3]))->next_index;
1040 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
1042 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
1044 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1046 tr->next_index = next[0];
1048 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
1050 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[1],
1052 tr->next_index = next[1];
1054 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
1056 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[2],
1058 tr->next_index = next[2];
1060 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
1062 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[3],
1064 tr->next_index = next[3];
1075 next[0] = (esp_post_data (b[0]))->next_index;
1076 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1078 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1080 tr->next_index = next[0];
1088 vlib_node_increment_counter (vm, node->node_index,
1089 ESP_ENCRYPT_ERROR_POST_RX_PKTS,
1091 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1092 return frame->n_vectors;
1095 VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
1096 vlib_node_runtime_t * node,
1097 vlib_frame_t * from_frame)
1099 return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 0,
1100 esp_encrypt_async_next.esp4_post_next);
1104 VLIB_REGISTER_NODE (esp4_encrypt_node) = {
1105 .name = "esp4-encrypt",
1106 .vector_size = sizeof (u32),
1107 .format_trace = format_esp_encrypt_trace,
1108 .type = VLIB_NODE_TYPE_INTERNAL,
1110 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1111 .error_strings = esp_encrypt_error_strings,
1113 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1115 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1116 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1117 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
1118 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
1119 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output"
1124 VLIB_NODE_FN (esp4_encrypt_post_node) (vlib_main_t * vm,
1125 vlib_node_runtime_t * node,
1126 vlib_frame_t * from_frame)
1128 return esp_encrypt_post_inline (vm, node, from_frame);
1132 VLIB_REGISTER_NODE (esp4_encrypt_post_node) = {
1133 .name = "esp4-encrypt-post",
1134 .vector_size = sizeof (u32),
1135 .format_trace = format_esp_post_encrypt_trace,
1136 .type = VLIB_NODE_TYPE_INTERNAL,
1137 .sibling_of = "esp4-encrypt",
1139 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1140 .error_strings = esp_encrypt_error_strings,
1144 VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
1145 vlib_node_runtime_t * node,
1146 vlib_frame_t * from_frame)
1148 return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 0,
1149 esp_encrypt_async_next.esp6_post_next);
1153 VLIB_REGISTER_NODE (esp6_encrypt_node) = {
1154 .name = "esp6-encrypt",
1155 .vector_size = sizeof (u32),
1156 .format_trace = format_esp_encrypt_trace,
1157 .type = VLIB_NODE_TYPE_INTERNAL,
1158 .sibling_of = "esp4-encrypt",
1160 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1161 .error_strings = esp_encrypt_error_strings,
1165 VLIB_NODE_FN (esp6_encrypt_post_node) (vlib_main_t * vm,
1166 vlib_node_runtime_t * node,
1167 vlib_frame_t * from_frame)
1169 return esp_encrypt_post_inline (vm, node, from_frame);
1173 VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
1174 .name = "esp6-encrypt-post",
1175 .vector_size = sizeof (u32),
1176 .format_trace = format_esp_post_encrypt_trace,
1177 .type = VLIB_NODE_TYPE_INTERNAL,
1178 .sibling_of = "esp4-encrypt",
1180 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1181 .error_strings = esp_encrypt_error_strings,
1185 VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
1186 vlib_node_runtime_t * node,
1187 vlib_frame_t * from_frame)
1189 return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 1,
1190 esp_encrypt_async_next.esp4_tun_post_next);
1194 VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
1195 .name = "esp4-encrypt-tun",
1196 .vector_size = sizeof (u32),
1197 .format_trace = format_esp_encrypt_trace,
1198 .type = VLIB_NODE_TYPE_INTERNAL,
1200 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1201 .error_strings = esp_encrypt_error_strings,
1203 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1205 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1206 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1207 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1208 [ESP_ENCRYPT_NEXT_HANDOFF6] = "error-drop",
1209 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1213 VLIB_NODE_FN (esp4_encrypt_tun_post_node) (vlib_main_t * vm,
1214 vlib_node_runtime_t * node,
1215 vlib_frame_t * from_frame)
1217 return esp_encrypt_post_inline (vm, node, from_frame);
1221 VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = {
1222 .name = "esp4-encrypt-tun-post",
1223 .vector_size = sizeof (u32),
1224 .format_trace = format_esp_post_encrypt_trace,
1225 .type = VLIB_NODE_TYPE_INTERNAL,
1226 .sibling_of = "esp4-encrypt-tun",
1228 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1229 .error_strings = esp_encrypt_error_strings,
1233 VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
1234 vlib_node_runtime_t * node,
1235 vlib_frame_t * from_frame)
1237 return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 1,
1238 esp_encrypt_async_next.esp6_tun_post_next);
1242 VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
1243 .name = "esp6-encrypt-tun",
1244 .vector_size = sizeof (u32),
1245 .format_trace = format_esp_encrypt_trace,
1246 .type = VLIB_NODE_TYPE_INTERNAL,
1248 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1249 .error_strings = esp_encrypt_error_strings,
1251 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1253 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1254 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1255 [ESP_ENCRYPT_NEXT_HANDOFF4] = "error-drop",
1256 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1257 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1263 VLIB_NODE_FN (esp6_encrypt_tun_post_node) (vlib_main_t * vm,
1264 vlib_node_runtime_t * node,
1265 vlib_frame_t * from_frame)
1267 return esp_encrypt_post_inline (vm, node, from_frame);
1271 VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
1272 .name = "esp6-encrypt-tun-post",
1273 .vector_size = sizeof (u32),
1274 .format_trace = format_esp_post_encrypt_trace,
1275 .type = VLIB_NODE_TYPE_INTERNAL,
1276 .sibling_of = "esp6-encrypt-tun",
1278 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1279 .error_strings = esp_encrypt_error_strings,
1286 } esp_no_crypto_trace_t;
1289 format_esp_no_crypto_trace (u8 * s, va_list * args)
1291 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1292 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1293 esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
1295 s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
1302 ESP_NO_CRYPTO_NEXT_DROP,
1303 ESP_NO_CRYPTO_N_NEXT,
1308 ESP_NO_CRYPTO_ERROR_RX_PKTS,
1311 static char *esp_no_crypto_error_strings[] = {
1312 "Outbound ESP packets received",
1316 esp_no_crypto_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1317 vlib_frame_t * frame)
1319 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1320 u32 *from = vlib_frame_vector_args (frame);
1321 u32 n_left = frame->n_vectors;
1323 vlib_get_buffers (vm, from, b, n_left);
1329 /* packets are always going to be dropped, but get the sa_index */
1330 sa_index0 = ipsec_tun_protect_get_sa_out
1331 (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
1333 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1335 esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0],
1337 tr->sa_index = sa_index0;
1344 vlib_node_increment_counter (vm, node->node_index,
1345 ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors);
1347 vlib_buffer_enqueue_to_single_next (vm, node, from,
1348 ESP_NO_CRYPTO_NEXT_DROP,
1351 return frame->n_vectors;
1354 VLIB_NODE_FN (esp4_no_crypto_tun_node) (vlib_main_t * vm,
1355 vlib_node_runtime_t * node,
1356 vlib_frame_t * from_frame)
1358 return esp_no_crypto_inline (vm, node, from_frame);
1362 VLIB_REGISTER_NODE (esp4_no_crypto_tun_node) =
1364 .name = "esp4-no-crypto",
1365 .vector_size = sizeof (u32),
1366 .format_trace = format_esp_no_crypto_trace,
1367 .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
1368 .error_strings = esp_no_crypto_error_strings,
1369 .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
1371 [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
1375 VLIB_NODE_FN (esp6_no_crypto_tun_node) (vlib_main_t * vm,
1376 vlib_node_runtime_t * node,
1377 vlib_frame_t * from_frame)
1379 return esp_no_crypto_inline (vm, node, from_frame);
1383 VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) =
1385 .name = "esp6-no-crypto",
1386 .vector_size = sizeof (u32),
1387 .format_trace = format_esp_no_crypto_trace,
1388 .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
1389 .error_strings = esp_no_crypto_error_strings,
1390 .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
1392 [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
1398 * fd.io coding-style-patch-verification: ON
1401 * eval: (c-set-style "gnu")