2 * ah_decrypt.c : IPSec AH decrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ah.h>
25 #include <vnet/ipsec/ipsec_io.h>
27 #define foreach_ah_decrypt_next \
28 _ (DROP, "error-drop") \
29 _ (IP4_INPUT, "ip4-input") \
30 _ (IP6_INPUT, "ip6-input") \
31 _ (IPSEC_GRE_INPUT, "ipsec-gre-input")
33 #define _(v, s) AH_DECRYPT_NEXT_##v,
36 foreach_ah_decrypt_next
41 #define foreach_ah_decrypt_error \
42 _ (RX_PKTS, "AH pkts received") \
43 _ (DECRYPTION_FAILED, "AH decryption failed") \
44 _ (INTEG_ERROR, "Integrity check failed") \
45 _ (NO_TAIL_SPACE, "not enough buffer tail space (dropped)") \
46 _ (DROP_FRAGMENTS, "IP fragments drop") \
47 _ (REPLAY, "SA replayed packet")
51 #define _(sym,str) AH_DECRYPT_ERROR_##sym,
52 foreach_ah_decrypt_error
57 static char *ah_decrypt_error_strings[] = {
58 #define _(sym,string) string,
59 foreach_ah_decrypt_error
65 ipsec_integ_alg_t integ_alg;
69 /* packet trace format function */
71 format_ah_decrypt_trace (u8 * s, va_list * args)
73 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
74 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
75 ah_decrypt_trace_t *t = va_arg (*args, ah_decrypt_trace_t *);
77 s = format (s, "ah: integrity %U seq-num %d",
78 format_ipsec_integ_alg, t->integ_alg, t->seq_num);
90 u32 ip_version_traffic_class_and_flow_label;
106 } ah_decrypt_packet_data_t;
108 static_always_inline void
109 ah_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
110 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
112 u32 n_fail, n_ops = vec_len (ops);
113 vnet_crypto_op_t *op = ops;
118 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
122 ASSERT (op - ops < n_ops);
124 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
126 u32 bi = op->user_data;
127 b[bi]->error = node->errors[AH_DECRYPT_ERROR_INTEG_ERROR];
128 nexts[bi] = AH_DECRYPT_NEXT_DROP;
136 ah_decrypt_inline (vlib_main_t * vm,
137 vlib_node_runtime_t * node, vlib_frame_t * from_frame,
141 u32 thread_index = vm->thread_index;
142 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
143 ah_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
144 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
145 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
146 ipsec_main_t *im = &ipsec_main;
147 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
148 from = vlib_frame_vector_args (from_frame);
149 n_left = from_frame->n_vectors;
151 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
153 clib_memset (pkt_data, 0, VLIB_FRAME_SIZE * sizeof (pkt_data[0]));
154 vlib_get_buffers (vm, from, b, n_left);
155 clib_memset_u16 (nexts, -1, n_left);
156 vec_reset_length (ptd->integ_ops);
164 if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
166 if (current_sa_index != ~0)
167 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
171 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
172 sa0 = pool_elt_at_index (im->sad, current_sa_index);
174 current_sa_bytes = current_sa_pkts = 0;
175 vlib_prefetch_combined_counter (&ipsec_sa_counters,
176 thread_index, current_sa_index);
179 pd->sa_index = current_sa_index;
181 ih4 = vlib_buffer_get_current (b[0]);
182 ih6 = vlib_buffer_get_current (b[0]);
183 pd->current_data = b[0]->current_data;
187 ip6_ext_header_t *prev = NULL;
188 ip6_ext_header_find_t (ih6, prev, ah0, IP_PROTOCOL_IPSEC_AH);
189 pd->ip_hdr_size = sizeof (ip6_header_t);
190 ASSERT ((u8 *) ah0 - (u8 *) ih6 == pd->ip_hdr_size);
194 if (ip4_is_fragment (ih4))
196 b[0]->error = node->errors[AH_DECRYPT_ERROR_DROP_FRAGMENTS];
197 next[0] = AH_DECRYPT_NEXT_DROP;
200 pd->ip_hdr_size = ip4_header_bytes (ih4);
201 ah0 = (ah_header_t *) ((u8 *) ih4 + pd->ip_hdr_size);
204 pd->seq = clib_host_to_net_u32 (ah0->seq_no);
206 /* anti-replay check */
207 if (ipsec_sa_anti_replay_check (sa0, &ah0->seq_no))
209 b[0]->error = node->errors[AH_DECRYPT_ERROR_REPLAY];
210 next[0] = AH_DECRYPT_NEXT_DROP;
214 current_sa_bytes += b[0]->current_length;
215 current_sa_pkts += 1;
217 pd->icv_size = sa0->integ_icv_size;
218 pd->nexthdr_cached = ah0->nexthdr;
219 if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
221 if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0) &&
222 pd->current_data + b[0]->current_length
223 + sizeof (u32) > buffer_data_size))
225 b[0]->error = node->errors[AH_DECRYPT_ERROR_NO_TAIL_SPACE];
226 next[0] = AH_DECRYPT_NEXT_DROP;
230 vnet_crypto_op_t *op;
231 vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
232 vnet_crypto_op_init (op, sa0->integ_op_id);
234 op->src = (u8 *) ih4;
235 op->len = b[0]->current_length;
236 op->digest = (u8 *) ih4 - pd->icv_size;
237 op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
238 op->digest_len = pd->icv_size;
239 op->key_index = sa0->integ_key_index;
240 op->user_data = b - bufs;
241 if (ipsec_sa_is_set_USE_ESN (sa0))
243 u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
245 op->len += sizeof (seq_hi);
246 clib_memcpy (op->src + b[0]->current_length, &seq_hi,
249 clib_memcpy (op->digest, ah0->auth_data, pd->icv_size);
250 clib_memset (ah0->auth_data, 0, pd->icv_size);
254 pd->ip_version_traffic_class_and_flow_label =
255 ih6->ip_version_traffic_class_and_flow_label;
256 pd->hop_limit = ih6->hop_limit;
257 ih6->ip_version_traffic_class_and_flow_label = 0x60;
259 pd->nexthdr = ah0->nexthdr;
260 pd->icv_padding_len =
261 ah_calc_icv_padding_len (pd->icv_size, 1 /* is_ipv6 */ );
270 pd->icv_padding_len =
271 ah_calc_icv_padding_len (pd->icv_size, 0 /* is_ipv6 */ );
282 n_left = from_frame->n_vectors;
287 vlib_node_increment_counter (vm, node->node_index, AH_DECRYPT_ERROR_RX_PKTS,
289 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
290 current_sa_index, current_sa_pkts,
293 ah_process_ops (vm, node, ptd->integ_ops, bufs, nexts);
300 if (next[0] < AH_DECRYPT_N_NEXT)
303 sa0 = vec_elt_at_index (im->sad, pd->sa_index);
305 if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
307 ipsec_sa_anti_replay_advance (sa0, clib_host_to_net_u32 (pd->seq));
310 u16 ah_hdr_len = sizeof (ah_header_t) + pd->icv_size
311 + pd->icv_padding_len;
312 vlib_buffer_advance (b[0], pd->ip_hdr_size + ah_hdr_len);
313 b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
315 if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
317 if (PREDICT_TRUE (pd->nexthdr_cached == IP_PROTOCOL_IP_IN_IP))
318 next[0] = AH_DECRYPT_NEXT_IP4_INPUT;
319 else if (pd->nexthdr_cached == IP_PROTOCOL_IPV6)
320 next[0] = AH_DECRYPT_NEXT_IP6_INPUT;
323 b[0]->error = node->errors[AH_DECRYPT_ERROR_DECRYPTION_FAILED];
324 next[0] = AH_DECRYPT_NEXT_DROP;
329 { /* transport mode */
332 vlib_buffer_advance (b[0], -sizeof (ip6_header_t));
333 oh6 = vlib_buffer_get_current (b[0]);
334 if (ah_hdr_len >= sizeof (ip6_header_t))
335 clib_memcpy (oh6, b[0]->data + pd->current_data,
336 sizeof (ip6_header_t));
338 memmove (oh6, b[0]->data + pd->current_data,
339 sizeof (ip6_header_t));
341 next[0] = AH_DECRYPT_NEXT_IP6_INPUT;
342 oh6->protocol = pd->nexthdr;
343 oh6->hop_limit = pd->hop_limit;
344 oh6->ip_version_traffic_class_and_flow_label =
345 pd->ip_version_traffic_class_and_flow_label;
346 oh6->payload_length =
347 clib_host_to_net_u16 (vlib_buffer_length_in_chain
348 (vm, b[0]) - sizeof (ip6_header_t));
352 vlib_buffer_advance (b[0], -sizeof (ip4_header_t));
353 oh4 = vlib_buffer_get_current (b[0]);
354 if (ah_hdr_len >= sizeof (ip4_header_t))
355 clib_memcpy (oh4, b[0]->data + pd->current_data,
356 sizeof (ip4_header_t));
358 memmove (oh4, b[0]->data + pd->current_data,
359 sizeof (ip4_header_t));
361 next[0] = AH_DECRYPT_NEXT_IP4_INPUT;
362 oh4->ip_version_and_header_length = 0x45;
363 oh4->fragment_id = 0;
364 oh4->flags_and_fragment_offset = 0;
365 oh4->protocol = pd->nexthdr_cached;
367 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
370 oh4->checksum = ip4_header_checksum (oh4);
374 /* for IPSec-GRE tunnel next node is ipsec-gre-input */
375 if (PREDICT_FALSE (ipsec_sa_is_set_IS_GRE (sa0)))
376 next[0] = AH_DECRYPT_NEXT_IPSEC_GRE_INPUT;
378 vnet_buffer (b[0])->sw_if_index[VLIB_TX] = (u32) ~ 0;
380 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
382 sa0 = pool_elt_at_index (im->sad,
383 vnet_buffer (b[0])->ipsec.sad_index);
384 ah_decrypt_trace_t *tr =
385 vlib_add_trace (vm, node, b[0], sizeof (*tr));
386 tr->integ_alg = sa0->integ_alg;
387 tr->seq_num = pd->seq;
396 n_left = from_frame->n_vectors;
397 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
402 VLIB_NODE_FN (ah4_decrypt_node) (vlib_main_t * vm,
403 vlib_node_runtime_t * node,
404 vlib_frame_t * from_frame)
406 return ah_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
410 VLIB_REGISTER_NODE (ah4_decrypt_node) = {
411 .name = "ah4-decrypt",
412 .vector_size = sizeof (u32),
413 .format_trace = format_ah_decrypt_trace,
414 .type = VLIB_NODE_TYPE_INTERNAL,
416 .n_errors = ARRAY_LEN(ah_decrypt_error_strings),
417 .error_strings = ah_decrypt_error_strings,
419 .n_next_nodes = AH_DECRYPT_N_NEXT,
421 #define _(s,n) [AH_DECRYPT_NEXT_##s] = n,
422 foreach_ah_decrypt_next
428 VLIB_NODE_FN (ah6_decrypt_node) (vlib_main_t * vm,
429 vlib_node_runtime_t * node,
430 vlib_frame_t * from_frame)
432 return ah_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
436 VLIB_REGISTER_NODE (ah6_decrypt_node) = {
437 .name = "ah6-decrypt",
438 .vector_size = sizeof (u32),
439 .format_trace = format_ah_decrypt_trace,
440 .type = VLIB_NODE_TYPE_INTERNAL,
442 .n_errors = ARRAY_LEN(ah_decrypt_error_strings),
443 .error_strings = ah_decrypt_error_strings,
445 .n_next_nodes = AH_DECRYPT_N_NEXT,
447 #define _(s,n) [AH_DECRYPT_NEXT_##s] = n,
448 foreach_ah_decrypt_next
455 * fd.io coding-style-patch-verification: ON
458 * eval: (c-set-style "gnu")