2 * ah_decrypt.c : IPSec AH decrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ah.h>
25 #include <vnet/ipsec/ipsec_io.h>
27 #define foreach_ah_decrypt_next \
28 _(DROP, "error-drop") \
29 _(IP4_INPUT, "ip4-input") \
30 _(IP6_INPUT, "ip6-input") \
33 #define _(v, s) AH_DECRYPT_NEXT_##v,
36 foreach_ah_decrypt_next
43 ipsec_integ_alg_t integ_alg;
47 /* packet trace format function */
49 format_ah_decrypt_trace (u8 * s, va_list * args)
51 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
52 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
53 ah_decrypt_trace_t *t = va_arg (*args, ah_decrypt_trace_t *);
55 s = format (s, "ah: integrity %U seq-num %d",
56 format_ipsec_integ_alg, t->integ_alg, t->seq_num);
68 u32 ip_version_traffic_class_and_flow_label;
85 } ah_decrypt_packet_data_t;
87 static_always_inline void
88 ah_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
89 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
91 u32 n_fail, n_ops = vec_len (ops);
92 vnet_crypto_op_t *op = ops;
97 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
101 ASSERT (op - ops < n_ops);
103 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
105 u32 bi = op->user_data;
106 ah_decrypt_set_next_index (
107 b[bi], node, vm->thread_index, AH_DECRYPT_ERROR_INTEG_ERROR, bi,
108 nexts, AH_DECRYPT_NEXT_DROP, vnet_buffer (b[bi])->ipsec.sad_index);
116 ah_decrypt_inline (vlib_main_t * vm,
117 vlib_node_runtime_t * node, vlib_frame_t * from_frame,
121 u32 thread_index = vm->thread_index;
122 u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
123 ah_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
124 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
125 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
126 ipsec_main_t *im = &ipsec_main;
127 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
128 from = vlib_frame_vector_args (from_frame);
129 n_left = from_frame->n_vectors;
131 bool anti_replay_result;
132 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
134 clib_memset (pkt_data, 0, VLIB_FRAME_SIZE * sizeof (pkt_data[0]));
135 vlib_get_buffers (vm, from, b, n_left);
136 clib_memset_u16 (nexts, -1, n_left);
137 vec_reset_length (ptd->integ_ops);
145 if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
147 if (current_sa_index != ~0)
148 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
149 current_sa_index, current_sa_pkts,
151 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
152 sa0 = ipsec_sa_get (current_sa_index);
154 current_sa_bytes = current_sa_pkts = 0;
155 vlib_prefetch_combined_counter (&ipsec_sa_counters,
156 thread_index, current_sa_index);
159 if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
161 /* this is the first packet to use this SA, claim the SA
162 * for this thread. this could happen simultaneously on
164 clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
165 ipsec_sa_assign_thread (thread_index));
168 if (PREDICT_TRUE (thread_index != sa0->thread_index))
170 vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
171 next[0] = AH_DECRYPT_NEXT_HANDOFF;
175 pd->sa_index = current_sa_index;
177 ih4 = vlib_buffer_get_current (b[0]);
178 ih6 = vlib_buffer_get_current (b[0]);
179 pd->current_data = b[0]->current_data;
183 ip6_ext_header_t *prev = NULL;
185 ip6_ext_header_find (vm, b[0], ih6, IP_PROTOCOL_IPSEC_AH, &prev);
186 pd->ip_hdr_size = sizeof (ip6_header_t);
187 ASSERT ((u8 *) ah0 - (u8 *) ih6 == pd->ip_hdr_size);
191 if (ip4_is_fragment (ih4))
193 ah_decrypt_set_next_index (
194 b[0], node, vm->thread_index, AH_DECRYPT_ERROR_DROP_FRAGMENTS,
195 0, next, AH_DECRYPT_NEXT_DROP, current_sa_index);
198 pd->ip_hdr_size = ip4_header_bytes (ih4);
199 ah0 = (ah_header_t *) ((u8 *) ih4 + pd->ip_hdr_size);
202 pd->seq = clib_host_to_net_u32 (ah0->seq_no);
204 /* anti-replay check */
205 if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
207 anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
208 sa0, pd->seq, ~0, false, &pd->seq_hi, true);
212 anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
213 sa0, pd->seq, ~0, false, &pd->seq_hi, false);
215 if (anti_replay_result)
217 ah_decrypt_set_next_index (b[0], node, vm->thread_index,
218 AH_DECRYPT_ERROR_REPLAY, 0, next,
219 AH_DECRYPT_NEXT_DROP, current_sa_index);
223 current_sa_bytes += b[0]->current_length;
224 current_sa_pkts += 1;
226 pd->icv_size = sa0->integ_icv_size;
227 pd->nexthdr_cached = ah0->nexthdr;
228 if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
230 if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0) &&
231 pd->current_data + b[0]->current_length
232 + sizeof (u32) > buffer_data_size))
234 ah_decrypt_set_next_index (
235 b[0], node, vm->thread_index, AH_DECRYPT_ERROR_NO_TAIL_SPACE,
236 0, next, AH_DECRYPT_NEXT_DROP, current_sa_index);
240 vnet_crypto_op_t *op;
241 vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
242 vnet_crypto_op_init (op, sa0->integ_op_id);
244 op->src = (u8 *) ih4;
245 op->len = b[0]->current_length;
246 op->digest = (u8 *) ih4 - pd->icv_size;
247 op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
248 op->digest_len = pd->icv_size;
249 op->key_index = sa0->integ_key_index;
250 op->user_data = b - bufs;
251 if (ipsec_sa_is_set_USE_ESN (sa0))
253 u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
255 op->len += sizeof (seq_hi);
256 clib_memcpy (op->src + b[0]->current_length, &seq_hi,
259 clib_memcpy (op->digest, ah0->auth_data, pd->icv_size);
260 clib_memset (ah0->auth_data, 0, pd->icv_size);
264 pd->ip_version_traffic_class_and_flow_label =
265 ih6->ip_version_traffic_class_and_flow_label;
266 pd->hop_limit = ih6->hop_limit;
267 ih6->ip_version_traffic_class_and_flow_label = 0x60;
269 pd->nexthdr = ah0->nexthdr;
270 pd->icv_padding_len =
271 ah_calc_icv_padding_len (pd->icv_size, 1 /* is_ipv6 */ );
280 pd->icv_padding_len =
281 ah_calc_icv_padding_len (pd->icv_size, 0 /* is_ipv6 */ );
292 n_left = from_frame->n_vectors;
297 vlib_node_increment_counter (vm, node->node_index, AH_DECRYPT_ERROR_RX_PKTS,
299 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
300 current_sa_index, current_sa_pkts,
303 ah_process_ops (vm, node, ptd->integ_ops, bufs, nexts);
311 if (next[0] < AH_DECRYPT_N_NEXT)
314 sa0 = ipsec_sa_get (pd->sa_index);
316 if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
318 /* redo the anti-reply check. see esp_decrypt for details */
319 if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
321 if (ipsec_sa_anti_replay_and_sn_advance (
322 sa0, pd->seq, pd->seq_hi, true, NULL, true))
324 ah_decrypt_set_next_index (
325 b[0], node, vm->thread_index, AH_DECRYPT_ERROR_REPLAY, 0,
326 next, AH_DECRYPT_NEXT_DROP, pd->sa_index);
329 n_lost = ipsec_sa_anti_replay_advance (
330 sa0, thread_index, pd->seq, pd->seq_hi, true);
334 if (ipsec_sa_anti_replay_and_sn_advance (
335 sa0, pd->seq, pd->seq_hi, true, NULL, false))
337 ah_decrypt_set_next_index (
338 b[0], node, vm->thread_index, AH_DECRYPT_ERROR_REPLAY, 0,
339 next, AH_DECRYPT_NEXT_DROP, pd->sa_index);
342 n_lost = ipsec_sa_anti_replay_advance (
343 sa0, thread_index, pd->seq, pd->seq_hi, false);
345 vlib_prefetch_simple_counter (
346 &ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST], thread_index,
350 u16 ah_hdr_len = sizeof (ah_header_t) + pd->icv_size
351 + pd->icv_padding_len;
352 vlib_buffer_advance (b[0], pd->ip_hdr_size + ah_hdr_len);
353 b[0]->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
354 b[0]->flags &= ~(VNET_BUFFER_F_L4_CHECKSUM_COMPUTED |
355 VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
357 if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
359 if (PREDICT_TRUE (pd->nexthdr_cached == IP_PROTOCOL_IP_IN_IP))
360 next[0] = AH_DECRYPT_NEXT_IP4_INPUT;
361 else if (pd->nexthdr_cached == IP_PROTOCOL_IPV6)
362 next[0] = AH_DECRYPT_NEXT_IP6_INPUT;
365 ah_decrypt_set_next_index (b[0], node, vm->thread_index,
366 AH_DECRYPT_ERROR_DECRYPTION_FAILED, 0,
367 next, AH_DECRYPT_NEXT_DROP,
373 { /* transport mode */
376 vlib_buffer_advance (b[0], -sizeof (ip6_header_t));
377 oh6 = vlib_buffer_get_current (b[0]);
378 if (ah_hdr_len >= sizeof (ip6_header_t))
379 clib_memcpy (oh6, b[0]->data + pd->current_data,
380 sizeof (ip6_header_t));
382 memmove (oh6, b[0]->data + pd->current_data,
383 sizeof (ip6_header_t));
385 next[0] = AH_DECRYPT_NEXT_IP6_INPUT;
386 oh6->protocol = pd->nexthdr;
387 oh6->hop_limit = pd->hop_limit;
388 oh6->ip_version_traffic_class_and_flow_label =
389 pd->ip_version_traffic_class_and_flow_label;
390 oh6->payload_length =
391 clib_host_to_net_u16 (vlib_buffer_length_in_chain
392 (vm, b[0]) - sizeof (ip6_header_t));
396 vlib_buffer_advance (b[0], -sizeof (ip4_header_t));
397 oh4 = vlib_buffer_get_current (b[0]);
398 if (ah_hdr_len >= sizeof (ip4_header_t))
399 clib_memcpy (oh4, b[0]->data + pd->current_data,
400 sizeof (ip4_header_t));
402 memmove (oh4, b[0]->data + pd->current_data,
403 sizeof (ip4_header_t));
405 next[0] = AH_DECRYPT_NEXT_IP4_INPUT;
406 oh4->ip_version_and_header_length = 0x45;
407 oh4->fragment_id = 0;
408 oh4->flags_and_fragment_offset = 0;
409 oh4->protocol = pd->nexthdr_cached;
411 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
414 oh4->checksum = ip4_header_checksum (oh4);
418 if (PREDICT_FALSE (n_lost))
419 vlib_increment_simple_counter (
420 &ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST], thread_index,
421 pd->sa_index, n_lost);
423 vnet_buffer (b[0])->sw_if_index[VLIB_TX] = (u32) ~ 0;
425 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
427 sa0 = ipsec_sa_get (vnet_buffer (b[0])->ipsec.sad_index);
428 ah_decrypt_trace_t *tr =
429 vlib_add_trace (vm, node, b[0], sizeof (*tr));
430 tr->integ_alg = sa0->integ_alg;
431 tr->seq_num = pd->seq;
440 n_left = from_frame->n_vectors;
441 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
446 VLIB_NODE_FN (ah4_decrypt_node) (vlib_main_t * vm,
447 vlib_node_runtime_t * node,
448 vlib_frame_t * from_frame)
450 return ah_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
454 VLIB_REGISTER_NODE (ah4_decrypt_node) = {
455 .name = "ah4-decrypt",
456 .vector_size = sizeof (u32),
457 .format_trace = format_ah_decrypt_trace,
458 .type = VLIB_NODE_TYPE_INTERNAL,
460 .n_errors = AH_DECRYPT_N_ERROR,
461 .error_counters = ah_decrypt_error_counters,
463 .n_next_nodes = AH_DECRYPT_N_NEXT,
465 [AH_DECRYPT_NEXT_DROP] = "ip4-drop",
466 [AH_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
467 [AH_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
468 [AH_DECRYPT_NEXT_HANDOFF] = "ah4-decrypt-handoff",
473 VLIB_NODE_FN (ah6_decrypt_node) (vlib_main_t * vm,
474 vlib_node_runtime_t * node,
475 vlib_frame_t * from_frame)
477 return ah_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
481 VLIB_REGISTER_NODE (ah6_decrypt_node) = {
482 .name = "ah6-decrypt",
483 .vector_size = sizeof (u32),
484 .format_trace = format_ah_decrypt_trace,
485 .type = VLIB_NODE_TYPE_INTERNAL,
487 .n_errors = AH_DECRYPT_N_ERROR,
488 .error_counters = ah_decrypt_error_counters,
490 .n_next_nodes = AH_DECRYPT_N_NEXT,
492 [AH_DECRYPT_NEXT_DROP] = "ip6-drop",
493 [AH_DECRYPT_NEXT_IP4_INPUT] = "ip4-input-no-checksum",
494 [AH_DECRYPT_NEXT_IP6_INPUT] = "ip6-input",
495 [AH_DECRYPT_NEXT_HANDOFF] = "ah6-decrypt-handoff",
500 #ifndef CLIB_MARCH_VARIANT
502 static clib_error_t *
503 ah_decrypt_init (vlib_main_t *vm)
505 ipsec_main_t *im = &ipsec_main;
507 im->ah4_dec_fq_index =
508 vlib_frame_queue_main_init (ah4_decrypt_node.index, 0);
509 im->ah6_dec_fq_index =
510 vlib_frame_queue_main_init (ah6_decrypt_node.index, 0);
515 VLIB_INIT_FUNCTION (ah_decrypt_init);
520 * fd.io coding-style-patch-verification: ON
523 * eval: (c-set-style "gnu")