2 * esp_decrypt.c : IPSec ESP decrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ipsec_io.h>
26 #define foreach_esp_decrypt_next \
27 _(DROP, "error-drop") \
28 _(IP4_INPUT, "ip4-input-no-checksum") \
29 _(IP6_INPUT, "ip6-input") \
30 _(IPSEC_GRE_INPUT, "ipsec-gre-input")
32 #define _(v, s) ESP_DECRYPT_NEXT_##v,
35 foreach_esp_decrypt_next
41 #define foreach_esp_decrypt_error \
42 _(RX_PKTS, "ESP pkts received") \
43 _(NO_BUFFER, "No buffer (packed dropped)") \
44 _(DECRYPTION_FAILED, "ESP decryption failed") \
45 _(INTEG_ERROR, "Integrity check failed") \
46 _(REPLAY, "SA replayed packet") \
47 _(NOT_IP, "Not IP packet (dropped)")
52 #define _(sym,str) ESP_DECRYPT_ERROR_##sym,
53 foreach_esp_decrypt_error
56 } esp_decrypt_error_t;
58 static char *esp_decrypt_error_strings[] = {
59 #define _(sym,string) string,
60 foreach_esp_decrypt_error
66 ipsec_crypto_alg_t crypto_alg;
67 ipsec_integ_alg_t integ_alg;
68 } esp_decrypt_trace_t;
70 /* packet trace format function */
72 format_esp_decrypt_trace (u8 * s, va_list * args)
74 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
75 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
76 esp_decrypt_trace_t *t = va_arg (*args, esp_decrypt_trace_t *);
78 s = format (s, "esp: crypto %U integrity %U",
79 format_ipsec_crypto_alg, t->crypto_alg,
80 format_ipsec_integ_alg, t->integ_alg);
85 esp_decrypt_cbc (vlib_main_t * vm, ipsec_crypto_alg_t alg,
86 u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
88 ipsec_main_t *im = &ipsec_main;
89 ipsec_main_crypto_alg_t *a;
90 vnet_crypto_op_t _op, *op = &_op;
92 ASSERT (alg < IPSEC_CRYPTO_N_ALG);
94 a = &im->crypto_algs[alg];
96 if (PREDICT_FALSE (a->dec_op_type == VNET_CRYPTO_OP_NONE))
99 op->op = a->dec_op_type;
106 vnet_crypto_process_ops (vm, op, 1);
110 esp_decrypt_inline (vlib_main_t * vm,
111 vlib_node_runtime_t * node, vlib_frame_t * from_frame,
114 ipsec_main_t *im = &ipsec_main;
115 u32 *from = vlib_frame_vector_args (from_frame);
116 u32 n_left_from = from_frame->n_vectors;
117 u32 new_bufs[VLIB_FRAME_SIZE];
118 vlib_buffer_t *i_bufs[VLIB_FRAME_SIZE], **ib = i_bufs;
119 vlib_buffer_t *o_bufs[VLIB_FRAME_SIZE], **ob = o_bufs;
120 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
121 u32 n_alloc, thread_index = vm->thread_index;
123 n_alloc = vlib_buffer_alloc (vm, new_bufs, n_left_from);
124 if (n_alloc != n_left_from)
126 vlib_node_increment_counter (vm, node->node_index,
127 ESP_DECRYPT_ERROR_NO_BUFFER,
128 n_left_from - n_alloc);
131 n_left_from = n_alloc;
134 vlib_get_buffers (vm, from, ib, n_left_from);
135 vlib_get_buffers (vm, new_bufs, ob, n_left_from);
137 while (n_left_from > 0)
143 ip4_header_t *ih4 = 0, *oh4 = 0;
144 ip6_header_t *ih6 = 0, *oh6 = 0;
147 next[0] = ESP_DECRYPT_NEXT_DROP;
149 esp0 = vlib_buffer_get_current (ib[0]);
150 sa_index0 = vnet_buffer (ib[0])->ipsec.sad_index;
151 sa0 = pool_elt_at_index (im->sad, sa_index0);
152 seq = clib_host_to_net_u32 (esp0->seq);
154 /* anti-replay check */
155 if (sa0->use_anti_replay)
159 if (PREDICT_TRUE (sa0->use_esn))
160 rv = esp_replay_check_esn (sa0, seq);
162 rv = esp_replay_check (sa0, seq);
164 if (PREDICT_FALSE (rv))
166 u32 tmp, off = n_alloc - n_left_from;
167 /* send original packet to drop node */
169 from[off] = new_bufs[off];
171 ib[0]->error = node->errors[ESP_DECRYPT_ERROR_REPLAY];
172 next[0] = ESP_DECRYPT_NEXT_DROP;
177 vlib_increment_combined_counter
178 (&ipsec_sa_counters, thread_index, sa_index0,
179 1, ib[0]->current_length);
181 if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
184 int icv_size = im->integ_algs[sa0->integ_alg].trunc_size;
185 clib_memset (sig, 0, sizeof (sig));
186 u8 *icv = vlib_buffer_get_current (ib[0]) + ib[0]->current_length -
188 ib[0]->current_length -= icv_size;
190 hmac_calc (vm, sa0->integ_alg, sa0->integ_key.data,
191 sa0->integ_key.len, (u8 *) esp0,
192 ib[0]->current_length, sig, sa0->use_esn, sa0->seq_hi);
194 if (PREDICT_FALSE (memcmp (icv, sig, icv_size)))
196 u32 tmp, off = n_alloc - n_left_from;
197 /* send original packet to drop node */
199 from[off] = new_bufs[off];
201 ib[0]->error = node->errors[ESP_DECRYPT_ERROR_INTEG_ERROR];
202 next[0] = ESP_DECRYPT_NEXT_DROP;
207 if (PREDICT_TRUE (sa0->use_anti_replay))
209 if (PREDICT_TRUE (sa0->use_esn))
210 esp_replay_advance_esn (sa0, seq);
212 esp_replay_advance (sa0, seq);
215 if ((sa0->crypto_alg >= IPSEC_CRYPTO_ALG_AES_CBC_128 &&
216 sa0->crypto_alg <= IPSEC_CRYPTO_ALG_AES_CBC_256) ||
217 (sa0->crypto_alg >= IPSEC_CRYPTO_ALG_DES_CBC &&
218 sa0->crypto_alg <= IPSEC_CRYPTO_ALG_3DES_CBC))
220 const int BLOCK_SIZE = im->crypto_algs[sa0->crypto_alg].block_size;
221 const int IV_SIZE = im->crypto_algs[sa0->crypto_alg].iv_size;
226 (ib[0]->current_length - sizeof (esp_header_t) -
227 IV_SIZE) / BLOCK_SIZE;
229 ob[0]->current_data = sizeof (ethernet_header_t);
232 if (PREDICT_FALSE (!sa0->is_tunnel && !sa0->is_tunnel_ip6))
238 ip_hdr_size = sizeof (ip6_header_t);
239 ih6 = (ip6_header_t *) ((u8 *) esp0 - ip_hdr_size);
240 oh6 = vlib_buffer_get_current (ob[0]);
244 ip_hdr_size = sizeof (ip4_header_t);
246 ih4 = (ip4_header_t *) ((u8 *) esp0 - ip_hdr_size -
247 sizeof (udp_header_t));
249 ih4 = (ip4_header_t *) ((u8 *) esp0 - ip_hdr_size);
250 oh4 = vlib_buffer_get_current (ob[0]);
254 esp_decrypt_cbc (vm, sa0->crypto_alg,
255 esp0->data + IV_SIZE,
256 (u8 *) vlib_buffer_get_current (ob[0]) +
257 ip_hdr_size, BLOCK_SIZE * blocks,
258 sa0->crypto_key.data, esp0->data);
260 ob[0]->current_length = (blocks * BLOCK_SIZE) - 2 + ip_hdr_size;
261 ob[0]->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
262 f0 = (esp_footer_t *) ((u8 *) vlib_buffer_get_current (ob[0]) +
263 ob[0]->current_length);
264 ob[0]->current_length -= f0->pad_length;
267 if (PREDICT_TRUE (tunnel_mode))
269 if (PREDICT_TRUE (f0->next_header == IP_PROTOCOL_IP_IN_IP))
271 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
272 oh4 = vlib_buffer_get_current (ob[0]);
274 else if (f0->next_header == IP_PROTOCOL_IPV6)
275 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
278 vlib_node_increment_counter (vm, node->node_index,
279 ESP_DECRYPT_ERROR_DECRYPTION_FAILED,
288 u32 len = vlib_buffer_length_in_chain (vm, ob[0]);
291 next[0] = ESP_DECRYPT_NEXT_IP6_INPUT;
292 oh6->ip_version_traffic_class_and_flow_label =
293 ih6->ip_version_traffic_class_and_flow_label;
294 oh6->protocol = f0->next_header;
295 oh6->hop_limit = ih6->hop_limit;
296 oh6->src_address.as_u64[0] = ih6->src_address.as_u64[0];
297 oh6->src_address.as_u64[1] = ih6->src_address.as_u64[1];
298 oh6->dst_address.as_u64[0] = ih6->dst_address.as_u64[0];
299 oh6->dst_address.as_u64[1] = ih6->dst_address.as_u64[1];
300 len -= sizeof (ip6_header_t);
301 oh6->payload_length = clib_host_to_net_u16 (len);
305 next[0] = ESP_DECRYPT_NEXT_IP4_INPUT;
306 oh4->ip_version_and_header_length = 0x45;
308 oh4->fragment_id = 0;
309 oh4->flags_and_fragment_offset = 0;
311 oh4->protocol = f0->next_header;
312 oh4->src_address.as_u32 = ih4->src_address.as_u32;
313 oh4->dst_address.as_u32 = ih4->dst_address.as_u32;
314 oh4->length = clib_host_to_net_u16 (len);
315 oh4->checksum = ip4_header_checksum (oh4);
319 /* for IPSec-GRE tunnel next node is ipsec-gre-input */
321 ((vnet_buffer (ib[0])->ipsec.flags) &
322 IPSEC_FLAG_IPSEC_GRE_TUNNEL))
323 next[0] = ESP_DECRYPT_NEXT_IPSEC_GRE_INPUT;
325 vnet_buffer (ob[0])->sw_if_index[VLIB_TX] = (u32) ~ 0;
326 vnet_buffer (ob[0])->sw_if_index[VLIB_RX] =
327 vnet_buffer (ib[0])->sw_if_index[VLIB_RX];
331 if (PREDICT_FALSE (ib[0]->flags & VLIB_BUFFER_IS_TRACED))
335 ob[0]->flags |= VLIB_BUFFER_IS_TRACED;
336 ob[0]->trace_index = ib[0]->trace_index;
337 esp_decrypt_trace_t *tr =
338 vlib_add_trace (vm, node, ob[0], sizeof (*tr));
339 tr->crypto_alg = sa0->crypto_alg;
340 tr->integ_alg = sa0->integ_alg;
351 vlib_node_increment_counter (vm, node->node_index,
352 ESP_DECRYPT_ERROR_RX_PKTS, n_alloc);
354 vlib_buffer_enqueue_to_next (vm, node, new_bufs, nexts, n_alloc);
356 vlib_buffer_free (vm, from, from_frame->n_vectors);
360 VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
361 vlib_node_runtime_t * node,
362 vlib_frame_t * from_frame)
364 return esp_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
368 VLIB_REGISTER_NODE (esp4_decrypt_node) = {
369 .name = "esp4-decrypt",
370 .vector_size = sizeof (u32),
371 .format_trace = format_esp_decrypt_trace,
372 .type = VLIB_NODE_TYPE_INTERNAL,
374 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
375 .error_strings = esp_decrypt_error_strings,
377 .n_next_nodes = ESP_DECRYPT_N_NEXT,
379 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
380 foreach_esp_decrypt_next
386 VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
387 vlib_node_runtime_t * node,
388 vlib_frame_t * from_frame)
390 return esp_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
394 VLIB_REGISTER_NODE (esp6_decrypt_node) = {
395 .name = "esp6-decrypt",
396 .vector_size = sizeof (u32),
397 .format_trace = format_esp_decrypt_trace,
398 .type = VLIB_NODE_TYPE_INTERNAL,
400 .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
401 .error_strings = esp_decrypt_error_strings,
403 .n_next_nodes = ESP_DECRYPT_N_NEXT,
405 #define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
406 foreach_esp_decrypt_next
413 * fd.io coding-style-patch-verification: ON
416 * eval: (c-set-style "gnu")