2 * ipsec_output.c : IPSec output node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
26 #define foreach_ipsec_output_next \
27 _(DROP, "error-drop") \
28 _(ESP_ENCRYPT, "esp-encrypt")
30 #define _(v, s) IPSEC_OUTPUT_NEXT_##v,
32 foreach_intf_output_feat
33 foreach_ipsec_output_next
36 } ipsec_output_next_t;
39 #define foreach_ipsec_output_error \
40 _(RX_PKTS, "IPSec pkts received") \
41 _(POLICY_DISCARD, "IPSec policy discard") \
42 _(POLICY_NO_MATCH, "IPSec policy (no match)") \
43 _(POLICY_PROTECT, "IPSec policy protect") \
44 _(POLICY_BYPASS, "IPSec policy bypass") \
45 _(ENCAPS_FAILED, "IPSec encapsulation failed")
49 #define _(sym,str) IPSEC_OUTPUT_ERROR_##sym,
50 foreach_ipsec_output_error
53 } ipsec_output_error_t;
55 static char * ipsec_output_error_strings[] = {
56 #define _(sym,string) string,
57 foreach_ipsec_output_error
61 static vlib_node_registration_t ipsec_output_node;
65 } ipsec_output_trace_t;
67 /* packet trace format function */
68 static u8 * format_ipsec_output_trace (u8 * s, va_list * args)
70 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
71 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
72 ipsec_output_trace_t * t = va_arg (*args, ipsec_output_trace_t *);
76 s = format (s, "spd %u ", t->spd_id);
80 s = format (s, "no spd");
85 always_inline intf_output_feat_t __attribute__((unused))
86 get_next_intf_output_feature_and_reset_bit(vlib_buffer_t *b)
89 count_trailing_zeros(next_feature, vnet_buffer(b)->output_features.bitmap);
90 if (next_feature != INTF_OUTPUT_FEAT_DONE)
91 vnet_buffer(b)->output_features.bitmap &= ~(1 << next_feature);
95 always_inline ipsec_policy_t *
96 ipsec_output_policy_match(ipsec_spd_t * spd, u8 pr, u32 la, u32 ra, u16 lp, u16 rp)
101 vec_foreach(i, spd->ipv4_outbound_policies)
103 p = pool_elt_at_index(spd->policies, *i);
104 if (PREDICT_FALSE(p->protocol && (p->protocol != pr)))
107 if (la < clib_net_to_host_u32(p->laddr.start.ip4.as_u32))
110 if (la > clib_net_to_host_u32(p->laddr.stop.ip4.as_u32))
113 if (ra < clib_net_to_host_u32(p->raddr.start.ip4.as_u32))
116 if (ra > clib_net_to_host_u32(p->raddr.stop.ip4.as_u32))
119 if (PREDICT_FALSE((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP)))
122 if (lp < p->lport.start)
125 if (lp > p->lport.stop)
128 if (rp < p->rport.start)
131 if (rp > p->rport.stop)
140 ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la, ip6_address_t * ua)
142 if ((memcmp(a->as_u64, la->as_u64, 2 * sizeof(u64)) >= 0) &&
143 (memcmp(a->as_u64, ua->as_u64, 2 * sizeof(u64)) <= 0))
148 always_inline ipsec_policy_t *
149 ipsec_output_ip6_policy_match (ipsec_spd_t * spd,
159 vec_foreach(i, spd->ipv6_outbound_policies)
161 p = pool_elt_at_index(spd->policies, *i);
162 if (PREDICT_FALSE(p->protocol && (p->protocol != pr)))
165 if (!ip6_addr_match_range(sa, &p->raddr.start.ip6, &p->raddr.stop.ip6))
168 if (!ip6_addr_match_range(da, &p->laddr.start.ip6, &p->laddr.stop.ip6))
171 if (PREDICT_FALSE((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP)))
174 if (lp < p->lport.start)
177 if (lp > p->lport.stop)
180 if (rp < p->rport.start)
183 if (rp > p->rport.stop)
192 ipsec_output_node_fn (vlib_main_t * vm,
193 vlib_node_runtime_t * node,
194 vlib_frame_t * from_frame)
196 ipsec_main_t *im = &ipsec_main;
197 vnet_main_t * vnm = im->vnet_main;
199 u32 * from, * to_next = 0;
200 u32 n_left_from, sw_if_index0, last_sw_if_index = (u32) ~0;
201 u32 next_node_index = (u32)~0, last_next_node_index = (u32) ~0;
204 ipsec_spd_t * spd0 = 0;
205 u64 nc_protect = 0, nc_bypass = 0, nc_discard = 0, nc_nomatch = 0;
207 from = vlib_frame_vector_args (from_frame);
208 n_left_from = from_frame->n_vectors;
210 while (n_left_from > 0)
216 ip6_header_t * ip6_0 = 0;
221 b0 = vlib_get_buffer (vm, bi0);
222 sw_if_index0 = vnet_buffer(b0)->sw_if_index[VLIB_TX];
225 ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b0) +
226 sizeof(ethernet_header_t));
228 /* just forward non ipv4 packets */
229 if (PREDICT_FALSE((ip0->ip_version_and_header_length & 0xF0 ) != 0x40))
232 if (PREDICT_TRUE((ip0->ip_version_and_header_length & 0xF0 ) == 0x60))
235 ip6_0 = (ip6_header_t *) ((u8 *) vlib_buffer_get_current (b0) +
236 sizeof(ethernet_header_t));
240 next_node_index = get_next_output_feature_node_index(vnm, b0);
245 /* lookup for SPD only if sw_if_index is changed */
246 if (PREDICT_FALSE(last_sw_if_index != sw_if_index0))
248 uword * p = hash_get (im->spd_index_by_sw_if_index, sw_if_index0);
251 spd0 = pool_elt_at_index(im->spds, spd_index0);
252 last_sw_if_index = sw_if_index0;
257 udp0 = ip6_next_header(ip6_0);
259 clib_warning("packet received from %U port %u to %U port %u spd_id %u",
260 format_ip6_address, &ip6_0->src_address,
261 clib_net_to_host_u16(udp0->src_port),
262 format_ip6_address, &ip6_0->dst_address,
263 clib_net_to_host_u16(udp0->dst_port),
267 p0 = ipsec_output_ip6_policy_match(spd0,
270 clib_net_to_host_u16(udp0->src_port),
271 clib_net_to_host_u16(udp0->dst_port),
276 udp0 = (udp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
279 clib_warning("packet received from %U to %U port %u",
280 format_ip4_address, ip0->src_address.as_u8,
281 format_ip4_address, ip0->dst_address.as_u8,
282 clib_net_to_host_u16(udp0->dst_port));
283 clib_warning("sw_if_index0 %u spd_index0 %u spd_id %u",
284 sw_if_index0, spd_index0, spd0->id);
287 p0 = ipsec_output_policy_match(spd0, ip0->protocol,
288 clib_net_to_host_u32(ip0->src_address.as_u32),
289 clib_net_to_host_u32(ip0->dst_address.as_u32),
290 clib_net_to_host_u16(udp0->src_port),
291 clib_net_to_host_u16(udp0->dst_port));
294 if (PREDICT_TRUE(p0 != NULL))
296 if (p0->policy == IPSEC_POLICY_ACTION_PROTECT)
299 next_node_index = im->esp_encrypt_node_index;
300 vnet_buffer(b0)->output_features.ipsec_sad_index = p0->sa_index;
301 vlib_buffer_advance(b0, sizeof(ethernet_header_t));
302 p0->counter.packets++;
305 p0->counter.bytes += clib_net_to_host_u16(ip6_0->payload_length);
306 p0->counter.bytes += sizeof(ip6_header_t);
310 p0->counter.bytes += clib_net_to_host_u16(ip0->length);
313 else if (p0->policy == IPSEC_POLICY_ACTION_BYPASS)
316 next_node_index = get_next_output_feature_node_index(vnm, b0);
317 p0->counter.packets++;
320 p0->counter.bytes += clib_net_to_host_u16(ip6_0->payload_length);
321 p0->counter.bytes += sizeof(ip6_header_t);
325 p0->counter.bytes += clib_net_to_host_u16(ip0->length);
331 p0->counter.packets++;
334 p0->counter.bytes += clib_net_to_host_u16(ip6_0->payload_length);
335 p0->counter.bytes += sizeof(ip6_header_t);
339 p0->counter.bytes += clib_net_to_host_u16(ip0->length);
341 next_node_index = im->error_drop_node_index;
347 next_node_index = im->error_drop_node_index;
354 if (PREDICT_FALSE((last_next_node_index != next_node_index)))
356 /* if this is not 1st frame */
358 vlib_put_frame_to_node (vm, last_next_node_index, f);
360 last_next_node_index = next_node_index;
362 f = vlib_get_frame_to_node(vm, next_node_index);
363 to_next = vlib_frame_vector_args (f);
370 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) {
371 ipsec_output_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
373 tr->spd_id = spd0->id;
377 vlib_put_frame_to_node (vm, next_node_index, f);
378 vlib_node_increment_counter (vm, ipsec_output_node.index,
379 IPSEC_OUTPUT_ERROR_POLICY_PROTECT, nc_protect);
380 vlib_node_increment_counter (vm, ipsec_output_node.index,
381 IPSEC_OUTPUT_ERROR_POLICY_BYPASS, nc_bypass);
382 vlib_node_increment_counter (vm, ipsec_output_node.index,
383 IPSEC_OUTPUT_ERROR_POLICY_DISCARD, nc_discard);
384 vlib_node_increment_counter (vm, ipsec_output_node.index,
385 IPSEC_OUTPUT_ERROR_POLICY_NO_MATCH, nc_nomatch);
386 return from_frame->n_vectors;
389 VLIB_REGISTER_NODE (ipsec_output_node,static) = {
390 .function = ipsec_output_node_fn,
391 .name = "ipsec-output",
392 .vector_size = sizeof (u32),
393 .format_trace = format_ipsec_output_trace,
394 .type = VLIB_NODE_TYPE_INTERNAL,
396 .n_errors = ARRAY_LEN(ipsec_output_error_strings),
397 .error_strings = ipsec_output_error_strings,
399 .n_next_nodes = IPSEC_OUTPUT_N_NEXT,
401 #define _(s,n) [IPSEC_OUTPUT_NEXT_##s] = n,
402 foreach_intf_output_feat
403 foreach_ipsec_output_next
408 VLIB_NODE_FUNCTION_MULTIARCH (ipsec_output_node, ipsec_output_node_fn)
410 #else /* IPSEC > 1 */
412 /* Dummy ipsec output node, in case when IPSec is disabled */
415 ipsec_output_node_fn (vlib_main_t * vm,
416 vlib_node_runtime_t * node,
417 vlib_frame_t * frame)
419 clib_warning ("IPSec disabled");
423 VLIB_REGISTER_NODE (ipsec_output_node) = {
424 .vector_size = sizeof (u32),
425 .function = ipsec_output_node_fn,
426 .name = "ipsec-output",