2 * ipsec_output.c : IPSec output node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
26 #define foreach_ipsec_output_error \
27 _(RX_PKTS, "IPSec pkts received") \
28 _(POLICY_DISCARD, "IPSec policy discard") \
29 _(POLICY_NO_MATCH, "IPSec policy (no match)") \
30 _(POLICY_PROTECT, "IPSec policy protect") \
31 _(POLICY_BYPASS, "IPSec policy bypass") \
32 _(ENCAPS_FAILED, "IPSec encapsulation failed")
36 #define _(sym,str) IPSEC_OUTPUT_ERROR_##sym,
37 foreach_ipsec_output_error
40 } ipsec_output_error_t;
42 static char *ipsec_output_error_strings[] = {
43 #define _(sym,string) string,
44 foreach_ipsec_output_error
51 } ipsec_output_trace_t;
53 /* packet trace format function */
55 format_ipsec_output_trace (u8 * s, va_list * args)
57 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
58 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
59 ipsec_output_trace_t *t = va_arg (*args, ipsec_output_trace_t *);
63 s = format (s, "spd %u ", t->spd_id);
67 s = format (s, "no spd");
72 always_inline ipsec_policy_t *
73 ipsec_output_policy_match (ipsec_spd_t * spd, u8 pr, u32 la, u32 ra, u16 lp,
82 vec_foreach (i, spd->ipv4_outbound_policies)
84 p = pool_elt_at_index (spd->policies, *i);
85 if (PREDICT_FALSE (p->protocol && (p->protocol != pr)))
88 if (ra < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
91 if (ra > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
94 if (la < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
97 if (la > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
101 ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP)
102 && (pr != IP_PROTOCOL_SCTP)))
105 if (lp < p->lport.start)
108 if (lp > p->lport.stop)
111 if (rp < p->rport.start)
114 if (rp > p->rport.stop)
123 ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la,
126 if ((memcmp (a->as_u64, la->as_u64, 2 * sizeof (u64)) >= 0) &&
127 (memcmp (a->as_u64, ua->as_u64, 2 * sizeof (u64)) <= 0))
132 always_inline ipsec_policy_t *
133 ipsec6_output_policy_match (ipsec_spd_t * spd,
135 ip6_address_t * ra, u16 lp, u16 rp, u8 pr)
143 vec_foreach (i, spd->ipv6_outbound_policies)
145 p = pool_elt_at_index (spd->policies, *i);
146 if (PREDICT_FALSE (p->protocol && (p->protocol != pr)))
149 if (!ip6_addr_match_range (ra, &p->raddr.start.ip6, &p->raddr.stop.ip6))
152 if (!ip6_addr_match_range (la, &p->laddr.start.ip6, &p->laddr.stop.ip6))
156 ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP)
157 && (pr != IP_PROTOCOL_SCTP)))
160 if (lp < p->lport.start)
163 if (lp > p->lport.stop)
166 if (rp < p->rport.start)
169 if (rp > p->rport.stop)
179 ipsec_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
180 vlib_frame_t * from_frame, int is_ipv6)
182 ipsec_main_t *im = &ipsec_main;
184 u32 *from, *to_next = 0;
185 u32 n_left_from, sw_if_index0, last_sw_if_index = (u32) ~ 0;
186 u32 next_node_index = (u32) ~ 0, last_next_node_index = (u32) ~ 0;
189 ipsec_spd_t *spd0 = 0;
191 u64 nc_protect = 0, nc_bypass = 0, nc_discard = 0, nc_nomatch = 0;
193 from = vlib_frame_vector_args (from_frame);
194 n_left_from = from_frame->n_vectors;
196 while (n_left_from > 0)
202 ip6_header_t *ip6_0 = 0;
208 b0 = vlib_get_buffer (vm, bi0);
209 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
210 iph_offset = vnet_buffer (b0)->ip.save_rewrite_length;
211 ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0)
214 /* lookup for SPD only if sw_if_index is changed */
215 if (PREDICT_FALSE (last_sw_if_index != sw_if_index0))
217 uword *p = hash_get (im->spd_index_by_sw_if_index, sw_if_index0);
220 spd0 = pool_elt_at_index (im->spds, spd_index0);
221 last_sw_if_index = sw_if_index0;
226 ip6_0 = (ip6_header_t *) ((u8 *) vlib_buffer_get_current (b0)
229 udp0 = ip6_next_header (ip6_0);
232 ("packet received from %U port %u to %U port %u spd_id %u",
233 format_ip6_address, &ip6_0->src_address,
234 clib_net_to_host_u16 (udp0->src_port), format_ip6_address,
235 &ip6_0->dst_address, clib_net_to_host_u16 (udp0->dst_port),
239 p0 = ipsec6_output_policy_match (spd0,
245 (udp0->dst_port), ip6_0->protocol);
249 udp0 = (udp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
252 clib_warning ("packet received from %U to %U port %u",
253 format_ip4_address, ip0->src_address.as_u8,
254 format_ip4_address, ip0->dst_address.as_u8,
255 clib_net_to_host_u16 (udp0->dst_port));
256 clib_warning ("sw_if_index0 %u spd_index0 %u spd_id %u",
257 sw_if_index0, spd_index0, spd0->id);
260 p0 = ipsec_output_policy_match (spd0, ip0->protocol,
262 (ip0->src_address.as_u32),
264 (ip0->dst_address.as_u32),
270 tcp0 = (void *) udp0;
272 if (PREDICT_TRUE (p0 != NULL))
274 if (p0->policy == IPSEC_POLICY_ACTION_PROTECT)
278 sa = pool_elt_at_index (im->sad, p0->sa_index);
279 if (sa->protocol == IPSEC_PROTOCOL_ESP)
281 next_node_index = im->esp6_encrypt_node_index;
283 next_node_index = im->esp4_encrypt_node_index;
285 next_node_index = im->ah6_encrypt_node_index;
287 next_node_index = im->ah4_encrypt_node_index;
288 vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
289 p0->counter.packets++;
293 clib_net_to_host_u16 (ip6_0->payload_length);
294 p0->counter.bytes += sizeof (ip6_header_t);
296 (b0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM))
299 ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6_0,
301 b0->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
304 (b0->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
307 ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6_0,
309 b0->flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
314 p0->counter.bytes += clib_net_to_host_u16 (ip0->length);
315 if (b0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
317 ip0->checksum = ip4_header_checksum (ip0);
318 b0->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
321 (b0->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM))
324 ip4_tcp_udp_compute_checksum (vm, b0, ip0);
325 b0->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
328 (b0->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
331 ip4_tcp_udp_compute_checksum (vm, b0, ip0);
332 b0->flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
335 vlib_buffer_advance (b0, iph_offset);
337 else if (p0->policy == IPSEC_POLICY_ACTION_BYPASS)
340 next_node_index = get_next_output_feature_node_index (b0, node);
341 p0->counter.packets++;
345 clib_net_to_host_u16 (ip6_0->payload_length);
346 p0->counter.bytes += sizeof (ip6_header_t);
350 p0->counter.bytes += clib_net_to_host_u16 (ip0->length);
356 p0->counter.packets++;
360 clib_net_to_host_u16 (ip6_0->payload_length);
361 p0->counter.bytes += sizeof (ip6_header_t);
365 p0->counter.bytes += clib_net_to_host_u16 (ip0->length);
367 next_node_index = im->error_drop_node_index;
373 next_node_index = im->error_drop_node_index;
379 if (PREDICT_FALSE ((last_next_node_index != next_node_index) || f == 0))
381 /* if this is not 1st frame */
383 vlib_put_frame_to_node (vm, last_next_node_index, f);
385 last_next_node_index = next_node_index;
387 f = vlib_get_frame_to_node (vm, next_node_index);
389 /* frame->frame_flags, copy it from node */
390 /* Copy trace flag from next_frame and from runtime. */
391 f->frame_flags |= node->flags & VLIB_NODE_FLAG_TRACE;
393 to_next = vlib_frame_vector_args (f);
400 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
402 ipsec_output_trace_t *tr =
403 vlib_add_trace (vm, node, b0, sizeof (*tr));
405 tr->spd_id = spd0->id;
409 vlib_put_frame_to_node (vm, next_node_index, f);
410 vlib_node_increment_counter (vm, node->node_index,
411 IPSEC_OUTPUT_ERROR_POLICY_PROTECT, nc_protect);
412 vlib_node_increment_counter (vm, node->node_index,
413 IPSEC_OUTPUT_ERROR_POLICY_BYPASS, nc_bypass);
414 vlib_node_increment_counter (vm, node->node_index,
415 IPSEC_OUTPUT_ERROR_POLICY_DISCARD, nc_discard);
416 vlib_node_increment_counter (vm, node->node_index,
417 IPSEC_OUTPUT_ERROR_POLICY_NO_MATCH,
419 return from_frame->n_vectors;
422 VLIB_NODE_FN (ipsec4_output_node) (vlib_main_t * vm,
423 vlib_node_runtime_t * node,
424 vlib_frame_t * frame)
426 return ipsec_output_inline (vm, node, frame, 0);
430 VLIB_REGISTER_NODE (ipsec4_output_node) = {
431 .name = "ipsec4-output",
432 .vector_size = sizeof (u32),
433 .format_trace = format_ipsec_output_trace,
434 .type = VLIB_NODE_TYPE_INTERNAL,
436 .n_errors = ARRAY_LEN(ipsec_output_error_strings),
437 .error_strings = ipsec_output_error_strings,
439 .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
441 #define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
442 foreach_ipsec_output_next
448 VLIB_NODE_FN (ipsec6_output_node) (vlib_main_t * vm,
449 vlib_node_runtime_t * node,
450 vlib_frame_t * frame)
452 return ipsec_output_inline (vm, node, frame, 1);
456 VLIB_REGISTER_NODE (ipsec6_output_node) = {
457 .name = "ipsec6-output",
458 .vector_size = sizeof (u32),
459 .format_trace = format_ipsec_output_trace,
460 .type = VLIB_NODE_TYPE_INTERNAL,
462 .n_errors = ARRAY_LEN(ipsec_output_error_strings),
463 .error_strings = ipsec_output_error_strings,
465 .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
467 #define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
468 foreach_ipsec_output_next
474 #else /* IPSEC > 1 */
476 /* Dummy ipsec output node, in case when IPSec is disabled */
479 ipsec_output_node_fn (vlib_main_t * vm,
480 vlib_node_runtime_t * node, vlib_frame_t * frame)
482 clib_warning ("IPSec disabled");
487 VLIB_REGISTER_NODE (ipsec4_output_node) = {
488 .vector_size = sizeof (u32),
489 .function = ipsec_output_node_fn,
490 .name = "ipsec4-output",
493 VLIB_REGISTER_NODE (ipsec6_output_node) = {
494 .vector_size = sizeof (u32),
495 .function = ipsec_output_node_fn,
496 .name = "ipsec6-output",
502 * fd.io coding-style-patch-verification: ON
505 * eval: (c-set-style "gnu")