2 * ipsec_output.c : IPSec output node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/ipsec_io.h>
24 #include <vnet/ipsec/ipsec_output.h>
26 #define foreach_ipsec_output_error \
27 _(RX_PKTS, "IPSec pkts received") \
28 _(POLICY_DISCARD, "IPSec policy discard") \
29 _(POLICY_NO_MATCH, "IPSec policy (no match)") \
30 _(POLICY_PROTECT, "IPSec policy protect") \
31 _(POLICY_BYPASS, "IPSec policy bypass") \
32 _(ENCAPS_FAILED, "IPSec encapsulation failed")
36 #define _(sym,str) IPSEC_OUTPUT_ERROR_##sym,
37 foreach_ipsec_output_error
40 } ipsec_output_error_t;
42 static char *ipsec_output_error_strings[] = {
43 #define _(sym,string) string,
44 foreach_ipsec_output_error
52 } ipsec_output_trace_t;
54 /* packet trace format function */
56 format_ipsec_output_trace (u8 * s, va_list * args)
58 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60 ipsec_output_trace_t *t = va_arg (*args, ipsec_output_trace_t *);
62 s = format (s, "spd %u policy %d", t->spd_id, t->policy_id);
68 ipsec_output_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
69 vlib_frame_t * from_frame, int is_ipv6)
71 ipsec_main_t *im = &ipsec_main;
73 u32 *from, *to_next = 0, thread_index;
74 u32 n_left_from, sw_if_index0, last_sw_if_index = (u32) ~ 0;
75 u32 next_node_index = (u32) ~ 0, last_next_node_index = (u32) ~ 0;
78 ipsec_spd_t *spd0 = 0;
80 u64 nc_protect = 0, nc_bypass = 0, nc_discard = 0, nc_nomatch = 0;
81 u8 flow_cache_enabled = im->output_flow_cache_flag;
83 from = vlib_frame_vector_args (from_frame);
84 n_left_from = from_frame->n_vectors;
85 thread_index = vm->thread_index;
87 while (n_left_from > 0)
90 vlib_buffer_t *b0, *b1;
91 ipsec_policy_t *p0 = NULL;
93 ip6_header_t *ip6_0 = 0;
100 b0 = vlib_get_buffer (vm, bi0);
104 b1 = vlib_get_buffer (vm, bi1);
105 CLIB_PREFETCH (b1, CLIB_CACHE_LINE_BYTES * 2, STORE);
106 vlib_prefetch_buffer_data (b1, LOAD);
108 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
109 iph_offset = vnet_buffer (b0)->ip.save_rewrite_length;
110 ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0)
113 /* lookup for SPD only if sw_if_index is changed */
114 if (PREDICT_FALSE (last_sw_if_index != sw_if_index0))
116 uword *p = hash_get (im->spd_index_by_sw_if_index, sw_if_index0);
119 spd0 = pool_elt_at_index (im->spds, spd_index0);
120 last_sw_if_index = sw_if_index0;
125 ip6_0 = (ip6_header_t *) ((u8 *) vlib_buffer_get_current (b0)
128 udp0 = ip6_next_header (ip6_0);
131 ("packet received from %U port %u to %U port %u spd_id %u",
132 format_ip6_address, &ip6_0->src_address,
133 clib_net_to_host_u16 (udp0->src_port), format_ip6_address,
134 &ip6_0->dst_address, clib_net_to_host_u16 (udp0->dst_port),
138 p0 = ipsec6_output_policy_match (spd0,
144 (udp0->dst_port), ip6_0->protocol);
148 udp0 = (udp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
151 clib_warning ("packet received from %U to %U port %u",
152 format_ip4_address, ip0->src_address.as_u8,
153 format_ip4_address, ip0->dst_address.as_u8,
154 clib_net_to_host_u16 (udp0->dst_port));
155 clib_warning ("sw_if_index0 %u spd_index0 %u spd_id %u",
156 sw_if_index0, spd_index0, spd0->id);
160 * Check whether flow cache is enabled.
162 if (flow_cache_enabled)
164 p0 = ipsec4_out_spd_find_flow_cache_entry (
165 im, ip0->protocol, ip0->src_address.as_u32,
166 ip0->dst_address.as_u32, udp0->src_port, udp0->dst_port);
169 /* Fall back to linear search if flow cache lookup fails */
172 p0 = ipsec_output_policy_match (
174 clib_net_to_host_u32 (ip0->src_address.as_u32),
175 clib_net_to_host_u32 (ip0->dst_address.as_u32),
176 clib_net_to_host_u16 (udp0->src_port),
177 clib_net_to_host_u16 (udp0->dst_port), flow_cache_enabled);
180 tcp0 = (void *) udp0;
182 if (PREDICT_TRUE (p0 != NULL))
184 pi0 = p0 - im->policies;
186 vlib_prefetch_combined_counter (&ipsec_spd_policy_counters,
191 bytes0 = clib_net_to_host_u16 (ip6_0->payload_length);
192 bytes0 += sizeof (ip6_header_t);
196 bytes0 = clib_net_to_host_u16 (ip0->length);
199 if (p0->policy == IPSEC_POLICY_ACTION_PROTECT)
203 sa = ipsec_sa_get (p0->sa_index);
204 if (sa->protocol == IPSEC_PROTOCOL_ESP)
206 next_node_index = im->esp6_encrypt_node_index;
208 next_node_index = im->esp4_encrypt_node_index;
210 next_node_index = im->ah6_encrypt_node_index;
212 next_node_index = im->ah4_encrypt_node_index;
213 vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
215 if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_OFFLOAD))
217 vnet_buffer_oflags_t oflags = vnet_buffer (b0)->oflags;
220 * Clearing offload flags before checksum is computed
221 * It guarantees the cache hit!
223 vnet_buffer_offload_flags_clear (b0, oflags);
227 if (PREDICT_FALSE (oflags &
228 VNET_BUFFER_OFFLOAD_F_TCP_CKSUM))
230 tcp0->checksum = ip6_tcp_udp_icmp_compute_checksum (
231 vm, b0, ip6_0, &bogus);
233 if (PREDICT_FALSE (oflags &
234 VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
236 udp0->checksum = ip6_tcp_udp_icmp_compute_checksum (
237 vm, b0, ip6_0, &bogus);
242 if (PREDICT_FALSE (oflags &
243 VNET_BUFFER_OFFLOAD_F_IP_CKSUM))
245 ip0->checksum = ip4_header_checksum (ip0);
247 if (PREDICT_FALSE (oflags &
248 VNET_BUFFER_OFFLOAD_F_TCP_CKSUM))
251 ip4_tcp_udp_compute_checksum (vm, b0, ip0);
253 if (PREDICT_FALSE (oflags &
254 VNET_BUFFER_OFFLOAD_F_UDP_CKSUM))
257 ip4_tcp_udp_compute_checksum (vm, b0, ip0);
261 vlib_buffer_advance (b0, iph_offset);
263 else if (p0->policy == IPSEC_POLICY_ACTION_BYPASS)
266 next_node_index = get_next_output_feature_node_index (b0, node);
271 next_node_index = im->error_drop_node_index;
273 vlib_increment_combined_counter
274 (&ipsec_spd_policy_counters, thread_index, pi0, 1, bytes0);
280 next_node_index = im->error_drop_node_index;
286 if (PREDICT_FALSE ((last_next_node_index != next_node_index) || f == 0))
288 /* if this is not 1st frame */
290 vlib_put_frame_to_node (vm, last_next_node_index, f);
292 last_next_node_index = next_node_index;
294 f = vlib_get_frame_to_node (vm, next_node_index);
296 /* frame->frame_flags, copy it from node */
297 /* Copy trace flag from next_frame and from runtime. */
298 f->frame_flags |= node->flags & VLIB_NODE_FLAG_TRACE;
300 to_next = vlib_frame_vector_args (f);
307 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
308 PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
310 ipsec_output_trace_t *tr =
311 vlib_add_trace (vm, node, b0, sizeof (*tr));
313 tr->spd_id = spd0->id;
318 vlib_put_frame_to_node (vm, next_node_index, f);
319 vlib_node_increment_counter (vm, node->node_index,
320 IPSEC_OUTPUT_ERROR_POLICY_PROTECT, nc_protect);
321 vlib_node_increment_counter (vm, node->node_index,
322 IPSEC_OUTPUT_ERROR_POLICY_BYPASS, nc_bypass);
323 vlib_node_increment_counter (vm, node->node_index,
324 IPSEC_OUTPUT_ERROR_POLICY_DISCARD, nc_discard);
325 vlib_node_increment_counter (vm, node->node_index,
326 IPSEC_OUTPUT_ERROR_POLICY_NO_MATCH,
328 return from_frame->n_vectors;
331 VLIB_NODE_FN (ipsec4_output_node) (vlib_main_t * vm,
332 vlib_node_runtime_t * node,
333 vlib_frame_t * frame)
335 return ipsec_output_inline (vm, node, frame, 0);
339 VLIB_REGISTER_NODE (ipsec4_output_node) = {
340 .name = "ipsec4-output-feature",
341 .vector_size = sizeof (u32),
342 .format_trace = format_ipsec_output_trace,
343 .type = VLIB_NODE_TYPE_INTERNAL,
345 .n_errors = ARRAY_LEN(ipsec_output_error_strings),
346 .error_strings = ipsec_output_error_strings,
348 .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
350 #define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
351 foreach_ipsec_output_next
357 VLIB_NODE_FN (ipsec6_output_node) (vlib_main_t * vm,
358 vlib_node_runtime_t * node,
359 vlib_frame_t * frame)
361 return ipsec_output_inline (vm, node, frame, 1);
364 VLIB_REGISTER_NODE (ipsec6_output_node) = {
365 .name = "ipsec6-output-feature",
366 .vector_size = sizeof (u32),
367 .format_trace = format_ipsec_output_trace,
368 .type = VLIB_NODE_TYPE_INTERNAL,
370 .n_errors = ARRAY_LEN(ipsec_output_error_strings),
371 .error_strings = ipsec_output_error_strings,
373 .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
375 #define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
376 foreach_ipsec_output_next