2 * ipsec_output.c : IPSec output node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/ipsec_io.h>
25 #define foreach_ipsec_output_error \
26 _(RX_PKTS, "IPSec pkts received") \
27 _(POLICY_DISCARD, "IPSec policy discard") \
28 _(POLICY_NO_MATCH, "IPSec policy (no match)") \
29 _(POLICY_PROTECT, "IPSec policy protect") \
30 _(POLICY_BYPASS, "IPSec policy bypass") \
31 _(ENCAPS_FAILED, "IPSec encapsulation failed")
35 #define _(sym,str) IPSEC_OUTPUT_ERROR_##sym,
36 foreach_ipsec_output_error
39 } ipsec_output_error_t;
41 static char *ipsec_output_error_strings[] = {
42 #define _(sym,string) string,
43 foreach_ipsec_output_error
51 } ipsec_output_trace_t;
53 /* packet trace format function */
55 format_ipsec_output_trace (u8 * s, va_list * args)
57 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
58 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
59 ipsec_output_trace_t *t = va_arg (*args, ipsec_output_trace_t *);
61 s = format (s, "spd %u policy %d", t->spd_id, t->policy_id);
66 always_inline ipsec_policy_t *
67 ipsec_output_policy_match (ipsec_spd_t * spd, u8 pr, u32 la, u32 ra, u16 lp,
70 ipsec_main_t *im = &ipsec_main;
77 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
79 p = pool_elt_at_index (im->policies, *i);
80 if (PREDICT_FALSE (p->protocol && (p->protocol != pr)))
83 if (ra < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
86 if (ra > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
89 if (la < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
92 if (la > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
96 ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP)
97 && (pr != IP_PROTOCOL_SCTP)))
100 if (lp < p->lport.start)
103 if (lp > p->lport.stop)
106 if (rp < p->rport.start)
109 if (rp > p->rport.stop)
118 ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la,
121 if ((memcmp (a->as_u64, la->as_u64, 2 * sizeof (u64)) >= 0) &&
122 (memcmp (a->as_u64, ua->as_u64, 2 * sizeof (u64)) <= 0))
127 always_inline ipsec_policy_t *
128 ipsec6_output_policy_match (ipsec_spd_t * spd,
130 ip6_address_t * ra, u16 lp, u16 rp, u8 pr)
132 ipsec_main_t *im = &ipsec_main;
139 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP6_OUTBOUND])
141 p = pool_elt_at_index (im->policies, *i);
142 if (PREDICT_FALSE (p->protocol && (p->protocol != pr)))
145 if (!ip6_addr_match_range (ra, &p->raddr.start.ip6, &p->raddr.stop.ip6))
148 if (!ip6_addr_match_range (la, &p->laddr.start.ip6, &p->laddr.stop.ip6))
152 ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP)
153 && (pr != IP_PROTOCOL_SCTP)))
156 if (lp < p->lport.start)
159 if (lp > p->lport.stop)
162 if (rp < p->rport.start)
165 if (rp > p->rport.stop)
175 ipsec_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
176 vlib_frame_t * from_frame, int is_ipv6)
178 ipsec_main_t *im = &ipsec_main;
180 u32 *from, *to_next = 0, thread_index;
181 u32 n_left_from, sw_if_index0, last_sw_if_index = (u32) ~ 0;
182 u32 next_node_index = (u32) ~ 0, last_next_node_index = (u32) ~ 0;
185 ipsec_spd_t *spd0 = 0;
187 u64 nc_protect = 0, nc_bypass = 0, nc_discard = 0, nc_nomatch = 0;
189 from = vlib_frame_vector_args (from_frame);
190 n_left_from = from_frame->n_vectors;
191 thread_index = vm->thread_index;
193 while (n_left_from > 0)
196 vlib_buffer_t *b0, *b1;
199 ip6_header_t *ip6_0 = 0;
206 b0 = vlib_get_buffer (vm, bi0);
210 b1 = vlib_get_buffer (vm, bi1);
211 CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES * 2, STORE);
212 vlib_prefetch_buffer_data (b1, LOAD);
214 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
215 iph_offset = vnet_buffer (b0)->ip.save_rewrite_length;
216 ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0)
219 /* lookup for SPD only if sw_if_index is changed */
220 if (PREDICT_FALSE (last_sw_if_index != sw_if_index0))
222 uword *p = hash_get (im->spd_index_by_sw_if_index, sw_if_index0);
225 spd0 = pool_elt_at_index (im->spds, spd_index0);
226 last_sw_if_index = sw_if_index0;
231 ip6_0 = (ip6_header_t *) ((u8 *) vlib_buffer_get_current (b0)
234 udp0 = ip6_next_header (ip6_0);
237 ("packet received from %U port %u to %U port %u spd_id %u",
238 format_ip6_address, &ip6_0->src_address,
239 clib_net_to_host_u16 (udp0->src_port), format_ip6_address,
240 &ip6_0->dst_address, clib_net_to_host_u16 (udp0->dst_port),
244 p0 = ipsec6_output_policy_match (spd0,
250 (udp0->dst_port), ip6_0->protocol);
254 udp0 = (udp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
257 clib_warning ("packet received from %U to %U port %u",
258 format_ip4_address, ip0->src_address.as_u8,
259 format_ip4_address, ip0->dst_address.as_u8,
260 clib_net_to_host_u16 (udp0->dst_port));
261 clib_warning ("sw_if_index0 %u spd_index0 %u spd_id %u",
262 sw_if_index0, spd_index0, spd0->id);
265 p0 = ipsec_output_policy_match (spd0, ip0->protocol,
267 (ip0->src_address.as_u32),
269 (ip0->dst_address.as_u32),
275 tcp0 = (void *) udp0;
277 if (PREDICT_TRUE (p0 != NULL))
279 pi0 = p0 - im->policies;
281 vlib_prefetch_combined_counter (&ipsec_spd_policy_counters,
286 bytes0 = clib_net_to_host_u16 (ip6_0->payload_length);
287 bytes0 += sizeof (ip6_header_t);
291 bytes0 = clib_net_to_host_u16 (ip0->length);
294 if (p0->policy == IPSEC_POLICY_ACTION_PROTECT)
298 sa = ipsec_sa_get (p0->sa_index);
299 if (sa->protocol == IPSEC_PROTOCOL_ESP)
301 next_node_index = im->esp6_encrypt_node_index;
303 next_node_index = im->esp4_encrypt_node_index;
305 next_node_index = im->ah6_encrypt_node_index;
307 next_node_index = im->ah4_encrypt_node_index;
308 vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
310 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_OFFLOAD))
312 vnet_buffer_oflags_t oflags = vnet_buffer (b0)->oflags;
315 * Clearing offload flags before checksum is computed
316 * It guarantees the cache hit!
318 vnet_buffer_offload_flags_clear (b0, oflags);
322 if (PREDICT_FALSE (oflags &
323 VNET_BUFFER_OFFLOAD_F_TCP_CKSUM))
325 tcp0->checksum = ip6_tcp_udp_icmp_compute_checksum (
326 vm, b0, ip6_0, &bogus);
328 if (PREDICT_FALSE (oflags &
329 VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
331 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (
332 vm, b0, ip6_0, &bogus);
337 if (PREDICT_FALSE (oflags &
338 VNET_BUFFER_OFFLOAD_F_IP_CKSUM))
340 ip0->checksum = ip4_header_checksum (ip0);
342 if (PREDICT_FALSE (oflags &
343 VNET_BUFFER_OFFLOAD_F_TCP_CKSUM))
346 ip4_tcp_udp_compute_checksum (vm, b0, ip0);
348 if (PREDICT_FALSE (oflags &
349 VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
352 ip4_tcp_udp_compute_checksum (vm, b0, ip0);
356 vlib_buffer_advance (b0, iph_offset);
358 else if (p0->policy == IPSEC_POLICY_ACTION_BYPASS)
361 next_node_index = get_next_output_feature_node_index (b0, node);
366 next_node_index = im->error_drop_node_index;
368 vlib_increment_combined_counter
369 (&ipsec_spd_policy_counters, thread_index, pi0, 1, bytes0);
375 next_node_index = im->error_drop_node_index;
381 if (PREDICT_FALSE ((last_next_node_index != next_node_index) || f == 0))
383 /* if this is not 1st frame */
385 vlib_put_frame_to_node (vm, last_next_node_index, f);
387 last_next_node_index = next_node_index;
389 f = vlib_get_frame_to_node (vm, next_node_index);
391 /* frame->frame_flags, copy it from node */
392 /* Copy trace flag from next_frame and from runtime. */
393 f->frame_flags |= node->flags & VLIB_NODE_FLAG_TRACE;
395 to_next = vlib_frame_vector_args (f);
402 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
403 PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
405 ipsec_output_trace_t *tr =
406 vlib_add_trace (vm, node, b0, sizeof (*tr));
408 tr->spd_id = spd0->id;
413 vlib_put_frame_to_node (vm, next_node_index, f);
414 vlib_node_increment_counter (vm, node->node_index,
415 IPSEC_OUTPUT_ERROR_POLICY_PROTECT, nc_protect);
416 vlib_node_increment_counter (vm, node->node_index,
417 IPSEC_OUTPUT_ERROR_POLICY_BYPASS, nc_bypass);
418 vlib_node_increment_counter (vm, node->node_index,
419 IPSEC_OUTPUT_ERROR_POLICY_DISCARD, nc_discard);
420 vlib_node_increment_counter (vm, node->node_index,
421 IPSEC_OUTPUT_ERROR_POLICY_NO_MATCH,
423 return from_frame->n_vectors;
426 VLIB_NODE_FN (ipsec4_output_node) (vlib_main_t * vm,
427 vlib_node_runtime_t * node,
428 vlib_frame_t * frame)
430 return ipsec_output_inline (vm, node, frame, 0);
434 VLIB_REGISTER_NODE (ipsec4_output_node) = {
435 .name = "ipsec4-output-feature",
436 .vector_size = sizeof (u32),
437 .format_trace = format_ipsec_output_trace,
438 .type = VLIB_NODE_TYPE_INTERNAL,
440 .n_errors = ARRAY_LEN(ipsec_output_error_strings),
441 .error_strings = ipsec_output_error_strings,
443 .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
445 #define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
446 foreach_ipsec_output_next
452 VLIB_NODE_FN (ipsec6_output_node) (vlib_main_t * vm,
453 vlib_node_runtime_t * node,
454 vlib_frame_t * frame)
456 return ipsec_output_inline (vm, node, frame, 1);
459 VLIB_REGISTER_NODE (ipsec6_output_node) = {
460 .name = "ipsec6-output-feature",
461 .vector_size = sizeof (u32),
462 .format_trace = format_ipsec_output_trace,
463 .type = VLIB_NODE_TYPE_INTERNAL,
465 .n_errors = ARRAY_LEN(ipsec_output_error_strings),
466 .error_strings = ipsec_output_error_strings,
468 .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
470 #define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
471 foreach_ipsec_output_next