2 * esp_decrypt.c : IPSec ESP decrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ipsec_io.h>
26 #define foreach_esp_decrypt_next \
27 _(DROP, "error-drop") \
28 _(IP4_INPUT, "ip4-input-no-checksum") \
29 _(IP6_INPUT, "ip6-input") \
30 _(IPSEC_GRE_INPUT, "ipsec-gre-input")
32 #define _(v, s) ESP_DECRYPT_NEXT_##v,
35 foreach_esp_decrypt_next
41 #define foreach_esp_decrypt_error \
42 _(RX_PKTS, "ESP pkts received") \
43 _(DECRYPTION_FAILED, "ESP decryption failed") \
44 _(INTEG_ERROR, "Integrity check failed") \
45 _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
46 _(REPLAY, "SA replayed packet") \
47 _(CHAINED_BUFFER, "chained buffers (packet dropped)") \
48 _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \
49 _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)")
54 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
55 foreach_esp_decrypt_error
58 } esp_decrypt_error_t;
60 static char *esp_decrypt_error_strings[] = {
61 #define _(sym,string) string,
62 foreach_esp_decrypt_error
69 ipsec_crypto_alg_t crypto_alg;
70 ipsec_integ_alg_t integ_alg;
71 } esp_decrypt_trace_t;
73 /* packet trace format function */
75 format_esp_decrypt_trace (u8 * s, va_list * args)
77 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
78 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
79 esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
81 s = format (s, "esp: crypto %U integrity %U seq %u",
82 format_ipsec_crypto_alg, t->crypto_alg,
83 format_ipsec_integ_alg, t->integ_alg, t->seq);
95 ipsec_sa_flags_t flags:8;
104 } esp_decrypt_packet_data_t;
106 STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 2 * sizeof (u64));
108 #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2)
111 esp_decrypt_inline (vlib_main_t * vm,
112 vlib_node_runtime_t * node, vlib_frame_t * from_frame,
115 ipsec_main_t *im = &ipsec_main;
116 u32 thread_index = vm->thread_index;
117 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
119 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
120 u32 *from = vlib_frame_vector_args (from_frame);
121 u32 n, n_left = from_frame->n_vectors;
122 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
123 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
124 esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
125 esp_decrypt_packet_data_t cpd = { };
126 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
127 const u8 esp_sz = sizeof (esp_header_t);
130 vlib_get_buffers (vm, from, b, n_left);
131 vec_reset_length (ptd->crypto_ops);
132 vec_reset_length (ptd->integ_ops);
133 clib_memset_u16 (nexts, -1, n_left);
142 vlib_prefetch_buffer_header (b[2], LOAD);
143 p = vlib_buffer_get_current (b[1]);
144 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
145 p -= CLIB_CACHE_LINE_BYTES;
146 CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
149 if (vlib_buffer_chain_linearize (vm, b[0]) != 1)
151 b[0]->error = node->errors[ESP_DECRYPT_ERROR_CHAINED_BUFFER];
152 next[0] = ESP_DECRYPT_NEXT_DROP;
156 if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
158 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
159 sa0 = pool_elt_at_index (im->sad, current_sa_index);
160 cpd.icv_sz = sa0->integ_icv_size;
161 cpd.iv_sz = sa0->crypto_iv_size;
162 cpd.flags = sa0->flags;
163 cpd.sa_index = current_sa_index;
165 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
166 current_sa_index, current_sa_pkts,
169 current_sa_bytes = current_sa_pkts = 0;
172 /* store packet data for next round for easier prefetch */
173 pd->sa_data = cpd.sa_data;
174 pd->current_data = b[0]->current_data;
175 pd->current_length = b[0]->current_length;
176 pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset;
177 payload = b[0]->data + pd->current_data;
179 /* we need 4 extra bytes for HMAC calculation when ESN are used */
180 if ((sa0->flags & IPSEC_SA_FLAG_USE_ESN) && pd->icv_sz &&
181 (pd->current_data + pd->current_length + 4 > buffer_data_size))
183 b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_TAIL_SPACE];
184 next[0] = ESP_DECRYPT_NEXT_DROP;
188 /* anti-reply check */
189 if (ipsec_sa_anti_replay_check (sa0, &((esp_header_t *) payload)->seq))
191 b[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
192 next[0] = ESP_DECRYPT_NEXT_DROP;
196 len = pd->current_length - cpd.icv_sz;
197 current_sa_pkts += 1;
198 current_sa_bytes += pd->current_length;
200 if (PREDICT_TRUE (cpd.icv_sz > 0))
202 vnet_crypto_op_t *op;
203 vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
205 vnet_crypto_op_init (op, sa0->integ_op_id);
206 op->key = sa0->integ_key.data;
207 op->key_len = sa0->integ_key.len;
209 op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
210 op->user_data = b - bufs;
211 op->digest = payload + len;
212 op->digest_len = cpd.icv_sz;
214 if (PREDICT_TRUE (sa0->flags & IPSEC_SA_FLAG_USE_ESN))
216 /* shift ICV for 4 bytes to insert ESN */
217 u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi);
218 clib_memcpy_fast (tmp, payload + len, ESP_MAX_ICV_SIZE);
219 clib_memcpy_fast (payload + len, &sa0->seq_hi, sz);
220 clib_memcpy_fast (payload + len + sz, tmp, ESP_MAX_ICV_SIZE);
229 if (sa0->crypto_enc_op_id != VNET_CRYPTO_OP_NONE)
231 vnet_crypto_op_t *op;
232 vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES);
233 vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
234 op->key = sa0->crypto_key.data;
235 op->key_len = sa0->crypto_key.len;
237 op->iv_len = cpd.iv_sz;
238 op->src = op->dst = payload += cpd.iv_sz;
240 op->user_data = b - bufs;
251 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
252 current_sa_index, current_sa_pkts,
255 if ((n = vec_len (ptd->integ_ops)))
257 vnet_crypto_op_t *op = ptd->integ_ops;
258 n -= vnet_crypto_process_ops (vm, op, n);
261 ASSERT (op - ptd->integ_ops < vec_len (ptd->integ_ops));
262 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
264 u32 err, bi = op->user_data;
265 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
266 err = ESP_DECRYPT_ERROR_INTEG_ERROR;
268 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
269 bufs[bi]->error = node->errors[err];
270 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
276 if ((n = vec_len (ptd->crypto_ops)))
278 vnet_crypto_op_t *op = ptd->crypto_ops;
279 n -= vnet_crypto_process_ops (vm, op, n);
282 ASSERT (op - ptd->crypto_ops < vec_len (ptd->crypto_ops));
283 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
289 if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC)
290 err = ESP_DECRYPT_ERROR_INTEG_ERROR;
292 err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR;
294 bufs[bi]->error = node->errors[err];
295 nexts[bi] = ESP_DECRYPT_NEXT_DROP;
302 /* Post decryption ronud - adjust packet data start and length and next
305 n_left = from_frame->n_vectors;
312 const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL |
313 IPSEC_SA_FLAG_IS_TUNNEL_V6;
317 void *data = b[1]->data + pd[1].current_data;
319 /* buffer metadata */
320 vlib_prefetch_buffer_header (b[1], LOAD);
323 CLIB_PREFETCH (data + pd[1].current_length - pd[1].icv_sz - 2,
324 CLIB_CACHE_LINE_BYTES, LOAD);
327 CLIB_PREFETCH (data - CLIB_CACHE_LINE_BYTES,
328 CLIB_CACHE_LINE_BYTES * 2, LOAD);
331 if (next[0] < ESP_DECRYPT_N_NEXT)
334 sa0 = vec_elt_at_index (im->sad, pd->sa_index);
335 u8 *payload = b[0]->data + pd->current_data;
337 ipsec_sa_anti_replay_advance (sa0, &((esp_header_t *) payload)->seq);
339 esp_footer_t *f = (esp_footer_t *) (b[0]->data + pd->current_data +
340 pd->current_length - sizeof (*f) -
342 u16 adv = pd->iv_sz + esp_sz;
343 u16 tail = sizeof (esp_footer_t) + f->pad_length + pd->icv_sz;
345 if ((pd->flags & tun_flags) == 0) /* transport mode */
347 u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
348 sizeof (udp_header_t) : 0;
349 u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
350 u8 *old_ip = b[0]->data + pd->current_data - ip_hdr_sz - udp_sz;
351 u8 *ip = old_ip + adv + udp_sz;
353 if (is_ip6 && ip_hdr_sz > 64)
354 memmove (ip, old_ip, ip_hdr_sz);
356 clib_memcpy_le64 (ip, old_ip, ip_hdr_sz);
358 b[0]->current_data = pd->current_data + adv - ip_hdr_sz;
359 b[0]->current_length = pd->current_length + ip_hdr_sz - tail - adv;
363 ip6_header_t *ip6 = (ip6_header_t *) ip;
364 u16 len = clib_net_to_host_u16 (ip6->payload_length);
366 ip6->payload_length = clib_host_to_net_u16 (len);
367 ip6->protocol = f->next_header;
368 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
372 ip4_header_t *ip4 = (ip4_header_t *) ip;
373 ip_csum_t sum = ip4->checksum;
374 u16 len = clib_net_to_host_u16 (ip4->length);
375 len = clib_host_to_net_u16 (len - adv - tail - udp_sz);
376 sum = ip_csum_update (sum, ip4->protocol, f->next_header,
377 ip4_header_t, protocol);
378 sum = ip_csum_update (sum, ip4->length, len,
379 ip4_header_t, length);
380 ip4->checksum = ip_csum_fold (sum);
381 ip4->protocol = f->next_header;
383 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
388 if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP))
390 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
391 b[0]->current_data = pd->current_data + adv;
392 b[0]->current_length = pd->current_length + adv - tail;
394 else if (f->next_header == IP_PROTOCOL_IPV6)
396 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
397 b[0]->current_data = pd->current_data + adv;
398 b[0]->current_length = pd->current_length + adv - tail;
402 next[0] = ESP_DECRYPT_NEXT_DROP;
403 b[0]->error = node->errors[ESP_DECRYPT_ERROR_DECRYPTION_FAILED];
407 if (PREDICT_FALSE (ipsec_sa_is_set_IS_GRE (sa0)))
408 next[0] = ESP_DECRYPT_NEXT_IPSEC_GRE_INPUT;
411 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
413 esp_decrypt_trace_t *tr;
414 u8 *payload = b[0]->data + pd->current_data;
415 tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
416 sa0 = pool_elt_at_index (im->sad,
417 vnet_buffer (b[0])->ipsec.sad_index);
418 tr->crypto_alg = sa0->crypto_alg;
419 tr->integ_alg = sa0->integ_alg;
420 tr->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq);
430 n_left = from_frame->n_vectors;
431 vlib_node_increment_counter (vm, node->node_index,
432 ESP_DECRYPT_ERROR_RX_PKTS, n_left);
434 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
440 VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
441 vlib_node_runtime_t * node,
442 vlib_frame_t * from_frame)
444 return esp_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
448 VLIB_REGISTER_NODE (esp4_decrypt_node) = {
449 .name = "esp4-decrypt",
450 .vector_size = sizeof (u32),
451 .format_trace = format_esp_decrypt_trace,
452 .type = VLIB_NODE_TYPE_INTERNAL,
454 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
455 .error_strings = esp_decrypt_error_strings,
457 .n_next_nodes = ESP_DECRYPT_N_NEXT,
459 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
460 foreach_esp_decrypt_next
466 VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
467 vlib_node_runtime_t * node,
468 vlib_frame_t * from_frame)
470 return esp_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
474 VLIB_REGISTER_NODE (esp6_decrypt_node) = {
475 .name = "esp6-decrypt",
476 .vector_size = sizeof (u32),
477 .format_trace = format_esp_decrypt_trace,
478 .type = VLIB_NODE_TYPE_INTERNAL,
480 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
481 .error_strings = esp_decrypt_error_strings,
483 .n_next_nodes = ESP_DECRYPT_N_NEXT,
485 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
486 foreach_esp_decrypt_next
493 * fd.io coding-style-patch-verification: ON
496 * eval: (c-set-style "gnu")