2 * ah_encrypt.c : IPSec AH encrypt node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ah.h>
25 #include <vnet/ipsec/ipsec.api_enum.h>
26 #include <vnet/tunnel/tunnel_dp.h>
28 #define foreach_ah_encrypt_next \
29 _ (DROP, "error-drop") \
30 _ (HANDOFF, "handoff") \
31 _ (INTERFACE_OUTPUT, "interface-output")
34 #define _(v, s) AH_ENCRYPT_NEXT_##v,
37 foreach_ah_encrypt_next
48 ipsec_integ_alg_t integ_alg;
51 /* packet trace format function */
53 format_ah_encrypt_trace (u8 * s, va_list * args)
55 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
56 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
57 ah_encrypt_trace_t *t = va_arg (*args, ah_encrypt_trace_t *);
59 s = format (s, "ah: sa-index %d spi %u (0x%08x) seq %u:%u integrity %U",
60 t->sa_index, t->spi, t->spi, t->seq_hi, t->seq_lo,
61 format_ipsec_integ_alg, t->integ_alg);
65 static_always_inline void
66 ah_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
67 vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
69 u32 n_fail, n_ops = vec_len (ops);
70 vnet_crypto_op_t *op = ops;
75 n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
79 ASSERT (op - ops < n_ops);
81 if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
83 u32 bi = op->user_data;
84 ah_encrypt_set_next_index (b[bi], node, vm->thread_index,
85 AH_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR, bi,
86 nexts, AH_ENCRYPT_NEXT_DROP,
87 vnet_buffer (b[bi])->ipsec.sad_index);
98 /* Variable fields in the IP header not covered by the AH
102 u32 ip_version_traffic_class_and_flow_label;
114 } ah_encrypt_packet_data_t;
117 ah_encrypt_inline (vlib_main_t * vm,
118 vlib_node_runtime_t * node, vlib_frame_t * frame,
121 u32 n_left, *from, thread_index;
123 from = vlib_frame_vector_args (frame);
124 n_left = frame->n_vectors;
125 ipsec_main_t *im = &ipsec_main;
126 ah_encrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data;
127 thread_index = vm->thread_index;
128 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
129 u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
130 ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
132 ip4_and_ah_header_t *ih0, *oh0 = 0;
133 ip6_and_ah_header_t *ih6_0, *oh6_0 = 0;
134 u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
135 const static ip4_header_t ip4_hdr_template = {
136 .ip_version_and_header_length = 0x45,
137 .protocol = IP_PROTOCOL_IPSEC_AH,
139 const static ip6_header_t ip6_hdr_template = {
140 .ip_version_traffic_class_and_flow_label = 0x60,
141 .protocol = IP_PROTOCOL_IPSEC_AH,
144 clib_memset (pkt_data, 0, VLIB_FRAME_SIZE * sizeof (pkt_data[0]));
145 vlib_get_buffers (vm, from, b, n_left);
146 vec_reset_length (ptd->crypto_ops);
147 vec_reset_length (ptd->integ_ops);
154 if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
156 if (current_sa_index != ~0)
157 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
158 current_sa_index, current_sa_pkts,
160 current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
161 sa0 = ipsec_sa_get (current_sa_index);
163 current_sa_bytes = current_sa_pkts = 0;
164 vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
168 pd->sa_index = current_sa_index;
169 next[0] = AH_ENCRYPT_NEXT_DROP;
171 if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
173 /* this is the first packet to use this SA, claim the SA
174 * for this thread. this could happen simultaneously on
176 clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
177 ipsec_sa_assign_thread (thread_index));
180 if (PREDICT_TRUE (thread_index != sa0->thread_index))
182 vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
183 next[0] = AH_ENCRYPT_NEXT_HANDOFF;
187 if (PREDICT_FALSE (esp_seq_advance (sa0)))
189 ah_encrypt_set_next_index (b[0], node, vm->thread_index,
190 AH_ENCRYPT_ERROR_SEQ_CYCLED, 0, next,
191 AH_ENCRYPT_NEXT_DROP, current_sa_index);
196 current_sa_pkts += 1;
197 current_sa_bytes += b[0]->current_length;
200 ih0 = vlib_buffer_get_current (b[0]);
202 if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
205 adv = -sizeof (ip6_and_ah_header_t);
207 adv = -sizeof (ip4_and_ah_header_t);
211 adv = -sizeof (ah_header_t);
214 icv_size = sa0->integ_icv_size;
215 const u8 padding_len = ah_calc_icv_padding_len (icv_size, is_ip6);
217 /* transport mode save the eth header before it is overwritten */
218 if (PREDICT_FALSE (!ipsec_sa_is_set_IS_TUNNEL (sa0)))
220 const u32 l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
221 u8 *l2_hdr_in = (u8 *) vlib_buffer_get_current (b[0]) - l2_len;
223 u8 *l2_hdr_out = l2_hdr_in + adv - icv_size;
225 clib_memcpy_le32 (l2_hdr_out, l2_hdr_in, l2_len);
228 vlib_buffer_advance (b[0], adv - icv_size);
232 ih6_0 = (ip6_and_ah_header_t *) ih0;
233 ip_hdr_size = sizeof (ip6_header_t);
234 oh6_0 = vlib_buffer_get_current (b[0]);
235 pd->current_data = b[0]->current_data;
236 pd->hop_limit = ih6_0->ip6.hop_limit;
238 oh6_0->ip6.ip_version_traffic_class_and_flow_label =
239 ih6_0->ip6.ip_version_traffic_class_and_flow_label;
241 if (PREDICT_FALSE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
243 ip6_set_dscp_network_order (&oh6_0->ip6, sa0->tunnel.t_dscp);
244 tunnel_encap_fixup_6o6 (sa0->tunnel_flags, &ih6_0->ip6,
247 pd->ip_version_traffic_class_and_flow_label =
248 oh6_0->ip6.ip_version_traffic_class_and_flow_label;
250 if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
252 next_hdr_type = IP_PROTOCOL_IPV6;
256 next_hdr_type = ih6_0->ip6.protocol;
257 memmove (oh6_0, ih6_0, sizeof (ip6_header_t));
260 clib_memcpy_fast (&oh6_0->ip6, &ip6_hdr_template, 8);
261 oh6_0->ah.reserved = 0;
262 oh6_0->ah.nexthdr = next_hdr_type;
263 oh6_0->ah.spi = clib_net_to_host_u32 (sa0->spi);
264 oh6_0->ah.seq_no = clib_net_to_host_u32 (sa0->seq);
265 oh6_0->ip6.payload_length =
266 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
267 sizeof (ip6_header_t));
269 (sizeof (ah_header_t) + icv_size + padding_len) / 4 - 2;
273 ip_hdr_size = sizeof (ip4_header_t);
274 oh0 = vlib_buffer_get_current (b[0]);
275 pd->ttl = ih0->ip4.ttl;
277 if (PREDICT_FALSE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
279 if (sa0->tunnel.t_dscp)
280 pd->tos = sa0->tunnel.t_dscp << 2;
283 pd->tos = ih0->ip4.tos;
285 if (!(sa0->tunnel_flags &
286 TUNNEL_ENCAP_DECAP_FLAG_ENCAP_COPY_DSCP))
288 if (!(sa0->tunnel_flags &
289 TUNNEL_ENCAP_DECAP_FLAG_ENCAP_COPY_ECN))
295 pd->tos = ih0->ip4.tos;
298 pd->current_data = b[0]->current_data;
299 clib_memset (oh0, 0, sizeof (ip4_and_ah_header_t));
301 if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
303 next_hdr_type = IP_PROTOCOL_IP_IN_IP;
307 next_hdr_type = ih0->ip4.protocol;
308 memmove (oh0, ih0, sizeof (ip4_header_t));
311 clib_memcpy_fast (&oh0->ip4, &ip4_hdr_template,
312 sizeof (ip4_header_t) -
313 sizeof (ip4_address_pair_t));
316 clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
317 oh0->ah.spi = clib_net_to_host_u32 (sa0->spi);
318 oh0->ah.seq_no = clib_net_to_host_u32 (sa0->seq);
319 oh0->ah.nexthdr = next_hdr_type;
321 (sizeof (ah_header_t) + icv_size + padding_len) / 4 - 2;
324 if (PREDICT_TRUE (!is_ip6 && ipsec_sa_is_set_IS_TUNNEL (sa0) &&
325 !ipsec_sa_is_set_IS_TUNNEL_V6 (sa0)))
327 clib_memcpy_fast (&oh0->ip4.address_pair,
328 &sa0->ip4_hdr.address_pair,
329 sizeof (ip4_address_pair_t));
331 next[0] = sa0->dpo.dpoi_next_node;
332 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = sa0->dpo.dpoi_index;
334 else if (is_ip6 && ipsec_sa_is_set_IS_TUNNEL (sa0) &&
335 ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
337 clib_memcpy_fast (&oh6_0->ip6.src_address,
338 &sa0->ip6_hdr.src_address,
339 sizeof (ip6_address_t) * 2);
340 next[0] = sa0->dpo.dpoi_next_node;
341 vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = sa0->dpo.dpoi_index;
344 if (PREDICT_TRUE (sa0->integ_op_id))
346 vnet_crypto_op_t *op;
347 vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
348 vnet_crypto_op_init (op, sa0->integ_op_id);
349 op->src = vlib_buffer_get_current (b[0]);
350 op->len = b[0]->current_length;
351 op->digest = vlib_buffer_get_current (b[0]) + ip_hdr_size +
352 sizeof (ah_header_t);
353 clib_memset (op->digest, 0, icv_size);
354 op->digest_len = icv_size;
355 op->key_index = sa0->integ_key_index;
356 op->user_data = b - bufs;
357 if (ipsec_sa_is_set_USE_ESN (sa0))
359 u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
361 op->len += sizeof (seq_hi);
362 clib_memcpy (op->src + b[0]->current_length, &seq_hi,
367 if (!ipsec_sa_is_set_IS_TUNNEL (sa0))
369 next[0] = AH_ENCRYPT_NEXT_INTERFACE_OUTPUT;
370 vlib_buffer_advance (b[0], -sizeof (ethernet_header_t));
374 if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
376 sa0 = ipsec_sa_get (pd->sa_index);
377 ah_encrypt_trace_t *tr =
378 vlib_add_trace (vm, node, b[0], sizeof (*tr));
380 tr->seq_lo = sa0->seq;
381 tr->seq_hi = sa0->seq_hi;
382 tr->integ_alg = sa0->integ_alg;
383 tr->sa_index = pd->sa_index;
392 n_left = frame->n_vectors;
397 vlib_node_increment_counter (vm, node->node_index,
398 AH_ENCRYPT_ERROR_RX_PKTS, n_left);
399 vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
400 current_sa_index, current_sa_pkts,
403 ah_process_ops (vm, node, ptd->integ_ops, bufs, nexts);
412 oh6_0 = (ip6_and_ah_header_t *) (b[0]->data + pd->current_data);
413 oh6_0->ip6.hop_limit = pd->hop_limit;
414 oh6_0->ip6.ip_version_traffic_class_and_flow_label =
415 pd->ip_version_traffic_class_and_flow_label;
419 oh0 = (ip4_and_ah_header_t *) (b[0]->data + pd->current_data);
420 oh0->ip4.ttl = pd->ttl;
421 oh0->ip4.tos = pd->tos;
422 oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
432 n_left = frame->n_vectors;
433 vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left);
438 VLIB_NODE_FN (ah4_encrypt_node) (vlib_main_t * vm,
439 vlib_node_runtime_t * node,
440 vlib_frame_t * from_frame)
442 return ah_encrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
446 VLIB_REGISTER_NODE (ah4_encrypt_node) = {
447 .name = "ah4-encrypt",
448 .vector_size = sizeof (u32),
449 .format_trace = format_ah_encrypt_trace,
450 .type = VLIB_NODE_TYPE_INTERNAL,
452 .n_errors = AH_ENCRYPT_N_ERROR,
453 .error_counters = ah_encrypt_error_counters,
455 .n_next_nodes = AH_ENCRYPT_N_NEXT,
457 [AH_ENCRYPT_NEXT_DROP] = "ip4-drop",
458 [AH_ENCRYPT_NEXT_HANDOFF] = "ah4-encrypt-handoff",
459 [AH_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
464 VLIB_NODE_FN (ah6_encrypt_node) (vlib_main_t * vm,
465 vlib_node_runtime_t * node,
466 vlib_frame_t * from_frame)
468 return ah_encrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
472 VLIB_REGISTER_NODE (ah6_encrypt_node) = {
473 .name = "ah6-encrypt",
474 .vector_size = sizeof (u32),
475 .format_trace = format_ah_encrypt_trace,
476 .type = VLIB_NODE_TYPE_INTERNAL,
478 .n_errors = AH_ENCRYPT_N_ERROR,
479 .error_counters = ah_encrypt_error_counters,
481 .n_next_nodes = AH_ENCRYPT_N_NEXT,
483 [AH_ENCRYPT_NEXT_DROP] = "ip6-drop",
484 [AH_ENCRYPT_NEXT_HANDOFF] = "ah6-encrypt-handoff",
485 [AH_ENCRYPT_NEXT_INTERFACE_OUTPUT] = "interface-output",
490 #ifndef CLIB_MARCH_VARIANT
492 static clib_error_t *
493 ah_encrypt_init (vlib_main_t *vm)
495 ipsec_main_t *im = &ipsec_main;
497 im->ah4_enc_fq_index =
498 vlib_frame_queue_main_init (ah4_encrypt_node.index, 0);
499 im->ah6_enc_fq_index =
500 vlib_frame_queue_main_init (ah6_encrypt_node.index, 0);
505 VLIB_INIT_FUNCTION (ah_encrypt_init);
510 * fd.io coding-style-patch-verification: ON
513 * eval: (c-set-style "gnu")