2 * esp_encrypt.c : IPSec ESP encrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/udp/udp.h>
23 #include <vnet/crypto/crypto.h>
25 #include <vnet/ipsec/ipsec.h>
26 #include <vnet/ipsec/ipsec_tun.h>
27 #include <vnet/ipsec/esp.h>
29 #define foreach_esp_encrypt_next \
30 _(DROP4, "ip4-drop") \
31 _(DROP6, "ip6-drop") \
32 _(PENDING, "pending") \
33 _(HANDOFF4, "handoff4") \
34 _(HANDOFF6, "handoff6") \
35 _(INTERFACE_OUTPUT, "interface-output")
37 #define _(v, s) ESP_ENCRYPT_NEXT_##v,
40 foreach_esp_encrypt_next
45 #define foreach_esp_encrypt_error \
46 _(RX_PKTS, "ESP pkts received") \
47 _(POST_RX_PKTS, "ESP-post pkts received") \
48 _(SEQ_CYCLED, "sequence number cycled (packet dropped)") \
49 _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
50 _(CRYPTO_QUEUE_FULL, "crypto queue full (packet dropped)") \
51 _(NO_BUFFERS, "no buffers (packet dropped)") \
55 #define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
56 foreach_esp_encrypt_error
59 } esp_encrypt_error_t;
61 static char *esp_encrypt_error_strings[] = {
62 #define _(sym,string) string,
63 foreach_esp_encrypt_error
74 ipsec_crypto_alg_t crypto_alg;
75 ipsec_integ_alg_t integ_alg;
76 } esp_encrypt_trace_t;
81 } esp_encrypt_post_trace_t;
83 /* packet trace format function */
85 format_esp_encrypt_trace (u8 * s, va_list * args)
87 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
88 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
89 esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
93 "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
94 t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
95 format_ipsec_crypto_alg,
96 t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
97 t->udp_encap ? " udp-encap-enabled" : "");
102 format_esp_post_encrypt_trace (u8 * s, va_list * args)
104 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
105 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
106 esp_encrypt_post_trace_t *t = va_arg (*args, esp_encrypt_post_trace_t *);
108 s = format (s, "esp-post: next node index %u", t->next_index);
112 /* pad packet in input buffer */
113 static_always_inline u8 *
114 esp_add_footer_and_icv (vlib_main_t * vm, vlib_buffer_t ** last,
115 u8 block_size, u8 icv_sz,
116 u16 * next, vlib_node_runtime_t * node,
117 u16 buffer_data_size, uword total_len)
119 static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
120 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
121 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x00, 0x00,
124 u16 min_length = total_len + sizeof (esp_footer_t);
125 u16 new_length = round_pow2 (min_length, block_size);
126 u8 pad_bytes = new_length - min_length;
127 esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (last[0]) +
128 last[0]->current_length + pad_bytes);
129 u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz;
131 if (last[0]->current_length + tail_sz > buffer_data_size)
134 if (vlib_buffer_alloc (vm, &tmp_bi, 1) != 1)
137 vlib_buffer_t *tmp = vlib_get_buffer (vm, tmp_bi);
138 last[0]->next_buffer = tmp_bi;
139 last[0]->flags |= VLIB_BUFFER_NEXT_PRESENT;
140 f = (esp_footer_t *) (vlib_buffer_get_current (tmp) + pad_bytes);
141 tmp->current_length += tail_sz;
145 last[0]->current_length += tail_sz;
147 f->pad_length = pad_bytes;
150 ASSERT (pad_bytes <= ESP_MAX_BLOCK_SIZE);
151 pad_bytes = clib_min (ESP_MAX_BLOCK_SIZE, pad_bytes);
152 clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, pad_bytes);
155 return &f->next_header;
158 static_always_inline void
159 esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
164 len = clib_net_to_host_u16 (len);
165 old_len = ip4->length;
169 u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
171 sum = ip_csum_update (ip4->checksum, ip4->protocol,
172 prot, ip4_header_t, protocol);
173 ip4->protocol = prot;
175 sum = ip_csum_update (sum, old_len, len, ip4_header_t, length);
178 sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
181 ip4->checksum = ip_csum_fold (sum);
184 static_always_inline void
185 esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
187 clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
188 udp->length = clib_net_to_host_u16 (len);
191 static_always_inline u8
192 ext_hdr_is_pre_esp (u8 nexthdr)
194 #ifdef CLIB_HAVE_VEC128
195 static const u8x16 ext_hdr_types = {
196 IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
197 IP_PROTOCOL_IPV6_ROUTE,
198 IP_PROTOCOL_IPV6_FRAGMENTATION,
201 return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
203 return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
204 (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
205 (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
209 static_always_inline u8
210 esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
212 /* this code assumes that HbH, route and frag headers will be before
213 others, if that is not the case, they will end up encrypted */
214 u8 len = sizeof (ip6_header_t);
217 /* if next packet doesn't have ext header */
218 if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
224 p = (void *) (ip6 + 1);
225 len += ip6_ext_header_len (p);
227 while (ext_hdr_is_pre_esp (p->next_hdr))
229 len += ip6_ext_header_len (p);
230 p = ip6_ext_next_header (p);
237 static_always_inline void
238 esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
239 vnet_crypto_op_t * ops, vlib_buffer_t * b[],
240 u16 * nexts, vnet_crypto_op_chunk_t * chunks,
243 u32 n_fail, n_ops = vec_len (ops);
244 vnet_crypto_op_t *op = ops;
249 n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
253 ASSERT (op - ops < n_ops);
255 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
257 u32 bi = op->user_data;
258 b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
259 nexts[bi] = drop_next;
266 static_always_inline void
267 esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
268 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts,
271 u32 n_fail, n_ops = vec_len (ops);
272 vnet_crypto_op_t *op = ops;
277 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
281 ASSERT (op - ops < n_ops);
283 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
285 u32 bi = op->user_data;
286 b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
287 nexts[bi] = drop_next;
298 } __clib_packed esp_gcm_nonce_t;
300 STATIC_ASSERT_SIZEOF (esp_gcm_nonce_t, 12);
302 static_always_inline u32
303 esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
304 ipsec_sa_t * sa0, vlib_buffer_t * b,
305 vlib_buffer_t * lb, u8 icv_sz, u8 * start,
306 u32 start_len, u16 * n_ch)
308 vnet_crypto_op_chunk_t *ch;
309 vlib_buffer_t *cb = b;
312 vec_add2 (ptd->chunks, ch, 1);
313 total_len = ch->len = start_len;
314 ch->src = ch->dst = start;
315 cb = vlib_get_buffer (vm, cb->next_buffer);
319 vec_add2 (ptd->chunks, ch, 1);
322 total_len += ch->len = cb->current_length - icv_sz;
324 total_len += ch->len = cb->current_length;
325 ch->src = ch->dst = vlib_buffer_get_current (cb);
327 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
330 cb = vlib_get_buffer (vm, cb->next_buffer);
339 static_always_inline u32
340 esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
341 ipsec_sa_t * sa0, vlib_buffer_t * b,
342 vlib_buffer_t * lb, u8 icv_sz, u8 * start,
343 u32 start_len, u8 * digest, u16 * n_ch)
345 vnet_crypto_op_chunk_t *ch;
346 vlib_buffer_t *cb = b;
349 vec_add2 (ptd->chunks, ch, 1);
350 total_len = ch->len = start_len;
352 cb = vlib_get_buffer (vm, cb->next_buffer);
356 vec_add2 (ptd->chunks, ch, 1);
360 total_len += ch->len = cb->current_length - icv_sz;
361 if (ipsec_sa_is_set_USE_ESN (sa0))
363 u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
364 clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi));
365 ch->len += sizeof (seq_hi);
366 total_len += sizeof (seq_hi);
370 total_len += ch->len = cb->current_length;
371 ch->src = vlib_buffer_get_current (cb);
373 if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT))
376 cb = vlib_get_buffer (vm, cb->next_buffer);
386 esp_prepare_sync_op (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
387 vnet_crypto_op_t ** crypto_ops,
388 vnet_crypto_op_t ** integ_ops, ipsec_sa_t * sa0,
389 u8 * payload, u16 payload_len, u8 iv_sz, u8 icv_sz,
390 vlib_buffer_t ** bufs, vlib_buffer_t ** b,
391 vlib_buffer_t * lb, u32 hdr_len, esp_header_t * esp,
392 esp_gcm_nonce_t * nonce)
394 if (sa0->crypto_enc_op_id)
396 vnet_crypto_op_t *op;
397 vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
398 vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
400 op->src = op->dst = payload;
401 op->key_index = sa0->crypto_key_index;
402 op->len = payload_len - icv_sz;
403 op->user_data = b - bufs;
405 if (ipsec_sa_is_set_IS_AEAD (sa0))
408 * construct the AAD in a scratch space in front
411 op->aad = payload - hdr_len - sizeof (esp_aead_t);
412 op->aad_len = esp_aad_fill (op->aad, esp, sa0);
414 op->tag = payload + op->len;
417 u64 *iv = (u64 *) (payload - iv_sz);
418 nonce->salt = sa0->salt;
419 nonce->iv = *iv = clib_host_to_net_u64 (sa0->gcm_iv_counter++);
420 op->iv = (u8 *) nonce;
424 op->iv = payload - iv_sz;
425 op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
431 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
432 op->chunk_index = vec_len (ptd->chunks);
433 op->tag = vlib_buffer_get_tail (lb) - icv_sz;
434 esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz, payload,
435 payload_len, &op->n_chunks);
439 if (sa0->integ_op_id)
441 vnet_crypto_op_t *op;
442 vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
443 vnet_crypto_op_init (op, sa0->integ_op_id);
444 op->src = payload - iv_sz - sizeof (esp_header_t);
445 op->digest = payload + payload_len - icv_sz;
446 op->key_index = sa0->integ_key_index;
447 op->digest_len = icv_sz;
448 op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
449 op->user_data = b - bufs;
454 op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
455 op->chunk_index = vec_len (ptd->chunks);
456 op->digest = vlib_buffer_get_tail (lb) - icv_sz;
458 esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
459 payload - iv_sz - sizeof (esp_header_t),
460 payload_len + iv_sz +
461 sizeof (esp_header_t), op->digest,
464 else if (ipsec_sa_is_set_USE_ESN (sa0))
466 u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
467 clib_memcpy_fast (op->digest, &seq_hi, sizeof (seq_hi));
468 op->len += sizeof (seq_hi);
473 static_always_inline int
474 esp_prepare_async_frame (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
475 vnet_crypto_async_frame_t ** async_frame,
476 ipsec_sa_t * sa, vlib_buffer_t * b,
477 esp_header_t * esp, u8 * payload, u32 payload_len,
478 u8 iv_sz, u8 icv_sz, u32 bi, u16 * next, u32 hdr_len,
479 u16 async_next, vlib_buffer_t * lb)
481 esp_post_data_t *post = esp_post_data (b);
482 u8 *tag, *iv, *aad = 0;
485 i16 crypto_start_offset, integ_start_offset = 0;
486 u16 crypto_total_len, integ_total_len;
488 post->next_index = next[0];
489 next[0] = ESP_ENCRYPT_NEXT_PENDING;
492 crypto_start_offset = payload - b->data;
493 crypto_total_len = integ_total_len = payload_len - icv_sz;
494 tag = payload + crypto_total_len;
497 if (ipsec_sa_is_set_IS_AEAD (sa))
499 esp_gcm_nonce_t *nonce;
500 u64 *pkt_iv = (u64 *) (payload - iv_sz);
502 aad = payload - hdr_len - sizeof (esp_aead_t);
503 esp_aad_fill (aad, esp, sa);
504 nonce = (esp_gcm_nonce_t *) (aad - sizeof (*nonce));
505 nonce->salt = sa->salt;
506 nonce->iv = *pkt_iv = clib_host_to_net_u64 (sa->gcm_iv_counter++);
508 key_index = sa->crypto_key_index;
513 flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
514 tag = vlib_buffer_get_tail (lb) - icv_sz;
515 crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb,
522 /* cipher then hash */
523 iv = payload - iv_sz;
524 integ_start_offset = crypto_start_offset - iv_sz - sizeof (esp_header_t);
525 integ_total_len += iv_sz + sizeof (esp_header_t);
526 flag |= VNET_CRYPTO_OP_FLAG_INIT_IV;
527 key_index = sa->linked_key_index;
531 flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
532 crypto_total_len = esp_encrypt_chain_crypto (vm, ptd, sa, b, lb,
535 tag = vlib_buffer_get_tail (lb) - icv_sz;
536 integ_total_len = esp_encrypt_chain_integ (vm, ptd, sa, b, lb, icv_sz,
538 sizeof (esp_header_t),
539 payload_len + iv_sz +
540 sizeof (esp_header_t),
543 else if (ipsec_sa_is_set_USE_ESN (sa) && !ipsec_sa_is_set_IS_AEAD (sa))
545 u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
546 clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
547 integ_total_len += sizeof (seq_hi);
551 return vnet_crypto_async_add_to_frame (vm, async_frame, key_index,
553 integ_total_len - crypto_total_len,
555 integ_start_offset, bi, async_next,
559 /* when submitting a frame is failed, drop all buffers in the frame */
560 static_always_inline void
561 esp_async_recycle_failed_submit (vnet_crypto_async_frame_t * f,
562 vlib_buffer_t ** b, u16 * next,
565 u32 n_drop = f->n_elts;
568 (b - n_drop)[0]->error = ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR;
569 (next - n_drop)[0] = drop_next;
571 vnet_crypto_async_reset_frame (f);
575 esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
576 vlib_frame_t * frame, int is_ip6, int is_tun,
579 ipsec_main_t *im = &ipsec_main;
580 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
581 u32 *from = vlib_frame_vector_args (frame);
582 u32 n_left = frame->n_vectors;
583 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
584 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
585 esp_gcm_nonce_t nonces[VLIB_FRAME_SIZE], *nonce = nonces;
586 u32 thread_index = vm->thread_index;
587 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
588 u32 current_sa_index = ~0, current_sa_packets = 0;
589 u32 current_sa_bytes = 0, spi = 0;
590 u8 block_sz = 0, iv_sz = 0, icv_sz = 0;
593 vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
594 vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
595 vnet_crypto_async_frame_t *async_frame = 0;
596 int is_async = im->async_mode;
597 vnet_crypto_async_op_id_t last_async_op = ~0;
598 u16 drop_next = (is_ip6 ? ESP_ENCRYPT_NEXT_DROP6 : ESP_ENCRYPT_NEXT_DROP4);
600 vlib_get_buffers (vm, from, b, n_left);
603 vec_reset_length (ptd->crypto_ops);
604 vec_reset_length (ptd->integ_ops);
605 vec_reset_length (ptd->chained_crypto_ops);
606 vec_reset_length (ptd->chained_integ_ops);
608 vec_reset_length (ptd->chunks);
615 u8 *payload, *next_hdr_ptr;
616 u16 payload_len, payload_len_total, n_bufs;
622 vlib_prefetch_buffer_header (b[2], LOAD);
623 p = vlib_buffer_get_current (b[1]);
624 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
625 p -= CLIB_CACHE_LINE_BYTES;
626 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
631 /* we are on a ipsec tunnel's feature arc */
632 vnet_buffer (b[0])->ipsec.sad_index =
633 sa_index0 = ipsec_tun_protect_get_sa_out
634 (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
637 sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
639 if (sa_index0 != current_sa_index)
641 if (current_sa_packets)
642 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
646 current_sa_packets = current_sa_bytes = 0;
648 sa0 = pool_elt_at_index (im->sad, sa_index0);
649 current_sa_index = sa_index0;
650 spi = clib_net_to_host_u32 (sa0->spi);
651 block_sz = sa0->crypto_block_size;
652 icv_sz = sa0->integ_icv_size;
653 iv_sz = sa0->crypto_iv_size;
655 /* submit frame when op_id is different then the old one */
656 if (is_async && sa0->crypto_async_enc_op_id != last_async_op)
658 if (async_frame && async_frame->n_elts)
660 if (vnet_crypto_async_submit_open_frame (vm, async_frame)
662 esp_async_recycle_failed_submit (async_frame, b,
666 vnet_crypto_async_get_frame (vm, sa0->crypto_async_enc_op_id);
667 last_async_op = sa0->crypto_async_enc_op_id;
671 if (PREDICT_FALSE (~0 == sa0->encrypt_thread_index))
673 /* this is the first packet to use this SA, claim the SA
674 * for this thread. this could happen simultaneously on
676 clib_atomic_cmp_and_swap (&sa0->encrypt_thread_index, ~0,
677 ipsec_sa_assign_thread (thread_index));
680 if (PREDICT_TRUE (thread_index != sa0->encrypt_thread_index))
683 ESP_ENCRYPT_NEXT_HANDOFF6 : ESP_ENCRYPT_NEXT_HANDOFF4);
688 n_bufs = vlib_buffer_chain_linearize (vm, b[0]);
691 b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
698 /* find last buffer in the chain */
699 while (lb->flags & VLIB_BUFFER_NEXT_PRESENT)
700 lb = vlib_get_buffer (vm, lb->next_buffer);
703 if (PREDICT_FALSE (esp_seq_advance (sa0)))
705 b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
713 if (ipsec_sa_is_set_IS_TUNNEL (sa0))
715 payload = vlib_buffer_get_current (b[0]);
716 next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, block_sz, icv_sz,
719 vlib_buffer_length_in_chain
723 b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS];
727 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
728 payload_len = b[0]->current_length;
729 payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
732 hdr_len += sizeof (*esp);
733 esp = (esp_header_t *) (payload - hdr_len);
735 /* optional UDP header */
736 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
738 hdr_len += sizeof (udp_header_t);
739 esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
740 payload_len_total + hdr_len);
744 if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
747 u16 len = sizeof (ip6_header_t);
749 ip6 = (ip6_header_t *) (payload - hdr_len);
750 clib_memcpy_fast (ip6, &sa0->ip6_hdr, len);
751 *next_hdr_ptr = (is_ip6 ?
752 IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
753 len = payload_len_total + hdr_len - len;
754 ip6->payload_length = clib_net_to_host_u16 (len);
759 u16 len = sizeof (ip4_header_t);
761 ip4 = (ip4_header_t *) (payload - hdr_len);
762 clib_memcpy_fast (ip4, &sa0->ip4_hdr, len);
763 *next_hdr_ptr = (is_ip6 ?
764 IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP);
765 len = payload_len_total + hdr_len;
766 esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
772 next[0] = dpo->dpoi_next_node;
773 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
776 next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
778 else /* transport mode */
780 u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
781 ip6_ext_header_t *ext_hdr;
782 udp_header_t *udp = 0;
784 u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
787 esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr, &ext_hdr) :
788 ip4_header_bytes ((ip4_header_t *) old_ip_hdr);
790 vlib_buffer_advance (b[0], ip_len);
791 payload = vlib_buffer_get_current (b[0]);
792 next_hdr_ptr = esp_add_footer_and_icv (vm, &lb, block_sz, icv_sz,
795 vlib_buffer_length_in_chain
800 b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID;
801 payload_len = b[0]->current_length;
802 payload_len_total = vlib_buffer_length_in_chain (vm, b[0]);
805 hdr_len += sizeof (*esp);
806 esp = (esp_header_t *) (payload - hdr_len);
808 /* optional UDP header */
809 if (ipsec_sa_is_set_UDP_ENCAP (sa0))
811 hdr_len += sizeof (udp_header_t);
812 udp = (udp_header_t *) (payload - hdr_len);
817 ip_hdr = payload - hdr_len;
822 l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
824 l2_hdr = payload - hdr_len;
826 /* copy l2 and ip header */
827 clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
834 ip6_header_t *ip6 = (ip6_header_t *) (old_ip_hdr);
835 if (PREDICT_TRUE (NULL == ext_hdr))
837 *next_hdr_ptr = ip6->protocol;
838 ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
842 *next_hdr_ptr = ext_hdr->next_hdr;
843 ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP;
845 ip6->payload_length =
846 clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len -
847 sizeof (ip6_header_t));
852 ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr);
853 *next_hdr_ptr = ip4->protocol;
854 len = payload_len_total + hdr_len - l2_len;
857 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
858 udp_len = len - ip_len;
861 esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
864 clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
868 esp_fill_udp_hdr (sa0, udp, udp_len);
871 next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
876 crypto_ops = &ptd->chained_crypto_ops;
877 integ_ops = &ptd->chained_integ_ops;
881 crypto_ops = &ptd->crypto_ops;
882 integ_ops = &ptd->integ_ops;
886 esp->seq = clib_net_to_host_u32 (sa0->seq);
890 if (PREDICT_FALSE (sa0->crypto_async_enc_op_id == 0))
893 if (esp_prepare_async_frame (vm, ptd, &async_frame, sa0, b[0], esp,
894 payload, payload_len, iv_sz,
895 icv_sz, from[b - bufs], next, hdr_len,
898 esp_async_recycle_failed_submit (async_frame, b, next,
905 esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, payload,
906 payload_len, iv_sz, icv_sz, bufs, b, lb,
907 hdr_len, esp, nonce++);
910 vlib_buffer_advance (b[0], 0LL - hdr_len);
912 current_sa_packets += 1;
913 current_sa_bytes += payload_len_total;
916 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
918 esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
920 tr->sa_index = sa_index0;
923 tr->sa_seq_hi = sa0->seq_hi;
924 tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
925 tr->crypto_alg = sa0->crypto_alg;
926 tr->integ_alg = sa0->integ_alg;
934 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
935 current_sa_index, current_sa_packets,
939 esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts, drop_next);
940 esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts,
941 ptd->chunks, drop_next);
943 esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, drop_next);
944 esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts,
945 ptd->chunks, drop_next);
947 else if (async_frame && async_frame->n_elts)
949 if (vnet_crypto_async_submit_open_frame (vm, async_frame) < 0)
950 esp_async_recycle_failed_submit (async_frame, b, next, drop_next);
953 vlib_node_increment_counter (vm, node->node_index,
954 ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
956 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
957 return frame->n_vectors;
961 esp_encrypt_post_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
962 vlib_frame_t * frame)
964 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
965 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
966 u32 *from = vlib_frame_vector_args (frame);
967 u32 n_left = frame->n_vectors;
969 vlib_get_buffers (vm, from, b, n_left);
973 vlib_prefetch_buffer_header (b[0], LOAD);
974 vlib_prefetch_buffer_header (b[1], LOAD);
975 vlib_prefetch_buffer_header (b[2], LOAD);
976 vlib_prefetch_buffer_header (b[3], LOAD);
981 vlib_prefetch_buffer_header (b[4], LOAD);
982 vlib_prefetch_buffer_header (b[5], LOAD);
983 vlib_prefetch_buffer_header (b[6], LOAD);
984 vlib_prefetch_buffer_header (b[7], LOAD);
986 next[0] = (esp_post_data (b[0]))->next_index;
987 next[1] = (esp_post_data (b[1]))->next_index;
988 next[2] = (esp_post_data (b[2]))->next_index;
989 next[3] = (esp_post_data (b[3]))->next_index;
991 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
993 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
995 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
997 tr->next_index = next[0];
999 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
1001 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[1],
1003 tr->next_index = next[1];
1005 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
1007 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[2],
1009 tr->next_index = next[2];
1011 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
1013 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[3],
1015 tr->next_index = next[3];
1026 next[0] = (esp_post_data (b[0]))->next_index;
1027 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1029 esp_encrypt_post_trace_t *tr = vlib_add_trace (vm, node, b[0],
1031 tr->next_index = next[0];
1039 vlib_node_increment_counter (vm, node->node_index,
1040 ESP_ENCRYPT_ERROR_POST_RX_PKTS,
1042 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
1043 return frame->n_vectors;
1046 VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
1047 vlib_node_runtime_t * node,
1048 vlib_frame_t * from_frame)
1050 return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 0,
1051 esp_encrypt_async_next.esp4_post_next);
1055 VLIB_REGISTER_NODE (esp4_encrypt_node) = {
1056 .name = "esp4-encrypt",
1057 .vector_size = sizeof (u32),
1058 .format_trace = format_esp_encrypt_trace,
1059 .type = VLIB_NODE_TYPE_INTERNAL,
1061 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1062 .error_strings = esp_encrypt_error_strings,
1064 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1066 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1067 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1068 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-handoff",
1069 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-handoff",
1070 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
1071 [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending",
1076 VLIB_NODE_FN (esp4_encrypt_post_node) (vlib_main_t * vm,
1077 vlib_node_runtime_t * node,
1078 vlib_frame_t * from_frame)
1080 return esp_encrypt_post_inline (vm, node, from_frame);
1084 VLIB_REGISTER_NODE (esp4_encrypt_post_node) = {
1085 .name = "esp4-encrypt-post",
1086 .vector_size = sizeof (u32),
1087 .format_trace = format_esp_post_encrypt_trace,
1088 .type = VLIB_NODE_TYPE_INTERNAL,
1089 .sibling_of = "esp4-encrypt",
1091 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1092 .error_strings = esp_encrypt_error_strings,
1096 VLIB_NODE_FN (esp6_encrypt_node) (vlib_main_t * vm,
1097 vlib_node_runtime_t * node,
1098 vlib_frame_t * from_frame)
1100 return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 0,
1101 esp_encrypt_async_next.esp6_post_next);
1105 VLIB_REGISTER_NODE (esp6_encrypt_node) = {
1106 .name = "esp6-encrypt",
1107 .vector_size = sizeof (u32),
1108 .format_trace = format_esp_encrypt_trace,
1109 .type = VLIB_NODE_TYPE_INTERNAL,
1110 .sibling_of = "esp4-encrypt",
1112 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1113 .error_strings = esp_encrypt_error_strings,
1117 VLIB_NODE_FN (esp6_encrypt_post_node) (vlib_main_t * vm,
1118 vlib_node_runtime_t * node,
1119 vlib_frame_t * from_frame)
1121 return esp_encrypt_post_inline (vm, node, from_frame);
1125 VLIB_REGISTER_NODE (esp6_encrypt_post_node) = {
1126 .name = "esp6-encrypt-post",
1127 .vector_size = sizeof (u32),
1128 .format_trace = format_esp_post_encrypt_trace,
1129 .type = VLIB_NODE_TYPE_INTERNAL,
1130 .sibling_of = "esp4-encrypt",
1132 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1133 .error_strings = esp_encrypt_error_strings,
1137 VLIB_NODE_FN (esp4_encrypt_tun_node) (vlib_main_t * vm,
1138 vlib_node_runtime_t * node,
1139 vlib_frame_t * from_frame)
1141 return esp_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ , 1,
1142 esp_encrypt_async_next.esp4_tun_post_next);
1146 VLIB_REGISTER_NODE (esp4_encrypt_tun_node) = {
1147 .name = "esp4-encrypt-tun",
1148 .vector_size = sizeof (u32),
1149 .format_trace = format_esp_encrypt_trace,
1150 .type = VLIB_NODE_TYPE_INTERNAL,
1152 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1153 .error_strings = esp_encrypt_error_strings,
1155 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1157 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1158 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1159 [ESP_ENCRYPT_NEXT_HANDOFF4] = "esp4-encrypt-tun-handoff",
1160 [ESP_ENCRYPT_NEXT_HANDOFF6] = "error-drop",
1161 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1162 [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending",
1166 VLIB_NODE_FN (esp4_encrypt_tun_post_node) (vlib_main_t * vm,
1167 vlib_node_runtime_t * node,
1168 vlib_frame_t * from_frame)
1170 return esp_encrypt_post_inline (vm, node, from_frame);
1174 VLIB_REGISTER_NODE (esp4_encrypt_tun_post_node) = {
1175 .name = "esp4-encrypt-tun-post",
1176 .vector_size = sizeof (u32),
1177 .format_trace = format_esp_post_encrypt_trace,
1178 .type = VLIB_NODE_TYPE_INTERNAL,
1179 .sibling_of = "esp4-encrypt-tun",
1181 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1182 .error_strings = esp_encrypt_error_strings,
1186 VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
1187 vlib_node_runtime_t * node,
1188 vlib_frame_t * from_frame)
1190 return esp_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ , 1,
1191 esp_encrypt_async_next.esp6_tun_post_next);
1195 VLIB_REGISTER_NODE (esp6_encrypt_tun_node) = {
1196 .name = "esp6-encrypt-tun",
1197 .vector_size = sizeof (u32),
1198 .format_trace = format_esp_encrypt_trace,
1199 .type = VLIB_NODE_TYPE_INTERNAL,
1201 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1202 .error_strings = esp_encrypt_error_strings,
1204 .n_next_nodes = ESP_ENCRYPT_N_NEXT,
1206 [ESP_ENCRYPT_NEXT_DROP4] = "ip4-drop",
1207 [ESP_ENCRYPT_NEXT_DROP6] = "ip6-drop",
1208 [ESP_ENCRYPT_NEXT_HANDOFF4] = "error-drop",
1209 [ESP_ENCRYPT_NEXT_HANDOFF6] = "esp6-encrypt-tun-handoff",
1210 [ESP_ENCRYPT_NEXT_PENDING] = "esp-encrypt-pending",
1211 [ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "adj-midchain-tx",
1217 VLIB_NODE_FN (esp6_encrypt_tun_post_node) (vlib_main_t * vm,
1218 vlib_node_runtime_t * node,
1219 vlib_frame_t * from_frame)
1221 return esp_encrypt_post_inline (vm, node, from_frame);
1225 VLIB_REGISTER_NODE (esp6_encrypt_tun_post_node) = {
1226 .name = "esp6-encrypt-tun-post",
1227 .vector_size = sizeof (u32),
1228 .format_trace = format_esp_post_encrypt_trace,
1229 .type = VLIB_NODE_TYPE_INTERNAL,
1230 .sibling_of = "esp6-encrypt-tun",
1232 .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
1233 .error_strings = esp_encrypt_error_strings,
1240 } esp_no_crypto_trace_t;
1243 format_esp_no_crypto_trace (u8 * s, va_list * args)
1245 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
1246 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
1247 esp_no_crypto_trace_t *t = va_arg (*args, esp_no_crypto_trace_t *);
1249 s = format (s, "esp-no-crypto: sa-index %u", t->sa_index);
1256 ESP_NO_CRYPTO_NEXT_DROP,
1257 ESP_NO_CRYPTO_N_NEXT,
1262 ESP_NO_CRYPTO_ERROR_RX_PKTS,
1265 static char *esp_no_crypto_error_strings[] = {
1266 "Outbound ESP packets received",
1270 esp_no_crypto_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
1271 vlib_frame_t * frame)
1273 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
1274 u32 *from = vlib_frame_vector_args (frame);
1275 u32 n_left = frame->n_vectors;
1277 vlib_get_buffers (vm, from, b, n_left);
1283 /* packets are always going to be dropped, but get the sa_index */
1284 sa_index0 = ipsec_tun_protect_get_sa_out
1285 (vnet_buffer (b[0])->ip.adj_index[VLIB_TX]);
1287 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
1289 esp_no_crypto_trace_t *tr = vlib_add_trace (vm, node, b[0],
1291 tr->sa_index = sa_index0;
1298 vlib_node_increment_counter (vm, node->node_index,
1299 ESP_NO_CRYPTO_ERROR_RX_PKTS, frame->n_vectors);
1301 vlib_buffer_enqueue_to_single_next (vm, node, from,
1302 ESP_NO_CRYPTO_NEXT_DROP,
1305 return frame->n_vectors;
1308 VLIB_NODE_FN (esp4_no_crypto_tun_node) (vlib_main_t * vm,
1309 vlib_node_runtime_t * node,
1310 vlib_frame_t * from_frame)
1312 return esp_no_crypto_inline (vm, node, from_frame);
1316 VLIB_REGISTER_NODE (esp4_no_crypto_tun_node) =
1318 .name = "esp4-no-crypto",
1319 .vector_size = sizeof (u32),
1320 .format_trace = format_esp_no_crypto_trace,
1321 .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
1322 .error_strings = esp_no_crypto_error_strings,
1323 .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
1325 [ESP_NO_CRYPTO_NEXT_DROP] = "ip4-drop",
1329 VLIB_NODE_FN (esp6_no_crypto_tun_node) (vlib_main_t * vm,
1330 vlib_node_runtime_t * node,
1331 vlib_frame_t * from_frame)
1333 return esp_no_crypto_inline (vm, node, from_frame);
1337 VLIB_REGISTER_NODE (esp6_no_crypto_tun_node) =
1339 .name = "esp6-no-crypto",
1340 .vector_size = sizeof (u32),
1341 .format_trace = format_esp_no_crypto_trace,
1342 .n_errors = ARRAY_LEN(esp_no_crypto_error_strings),
1343 .error_strings = esp_no_crypto_error_strings,
1344 .n_next_nodes = ESP_NO_CRYPTO_N_NEXT,
1346 [ESP_NO_CRYPTO_NEXT_DROP] = "ip6-drop",
1351 VLIB_NODE_FN (esp_encrypt_pending_node) (vlib_main_t * vm,
1352 vlib_node_runtime_t * node,
1353 vlib_frame_t * from_frame)
1355 return from_frame->n_vectors;
1359 VLIB_REGISTER_NODE (esp_encrypt_pending_node) = {
1360 .name = "esp-encrypt-pending",
1361 .vector_size = sizeof (u32),
1362 .type = VLIB_NODE_TYPE_INTERNAL,
1369 * fd.io coding-style-patch-verification: ON
1372 * eval: (c-set-style "gnu")