2 * decap.c : IPSec tunnel decapsulation
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
25 #define foreach_ipsec_input_next \
26 _(DROP, "error-drop") \
27 _(ESP_DECRYPT, "esp-decrypt")
29 #define _(v, s) IPSEC_INPUT_NEXT_##v,
31 foreach_ipsec_input_next
37 #define foreach_ipsec_input_error \
38 _(RX_PKTS, "IPSEC pkts received") \
39 _(DECRYPTION_FAILED, "IPSEC decryption failed")
43 #define _(sym,str) IPSEC_INPUT_ERROR_##sym,
44 foreach_ipsec_input_error
47 } ipsec_input_error_t;
49 static char * ipsec_input_error_strings[] = {
50 #define _(sym,string) string,
51 foreach_ipsec_input_error
59 } ipsec_input_trace_t;
61 /* packet trace format function */
62 static u8 * format_ipsec_input_trace (u8 * s, va_list * args)
64 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
65 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
66 ipsec_input_trace_t * t = va_arg (*args, ipsec_input_trace_t *);
68 if (t->tunnel_index != ~0)
70 s = format (s, "esp: tunnel %u spi %u seq %u", t->tunnel_index, t->spi, t->seq);
74 s = format (s, "esp: no tunnel spi %u seq %u",t->spi, t->seq);
79 always_inline ipsec_policy_t *
80 ipsec_input_protect_policy_match(ipsec_spd_t * spd, u32 sa, u32 da, u32 spi)
82 ipsec_main_t *im = &ipsec_main;
87 vec_foreach(i, spd->ipv4_inbound_protect_policy_indices)
89 p = pool_elt_at_index(spd->policies, *i);
90 s = pool_elt_at_index(im->sad, p->sa_index);
97 if (da != clib_net_to_host_u32(s->tunnel_dst_addr.ip4.as_u32))
100 if (sa != clib_net_to_host_u32(s->tunnel_src_addr.ip4.as_u32))
106 if (da < clib_net_to_host_u32(p->laddr.start.ip4.as_u32))
109 if (da > clib_net_to_host_u32(p->laddr.stop.ip4.as_u32))
112 if (sa < clib_net_to_host_u32(p->raddr.start.ip4.as_u32))
115 if (sa > clib_net_to_host_u32(p->raddr.stop.ip4.as_u32))
124 ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la, ip6_address_t * ua)
126 if ((memcmp(a->as_u64, la->as_u64, 2 * sizeof(u64)) >= 0) &&
127 (memcmp(a->as_u64, ua->as_u64, 2 * sizeof(u64)) <= 0))
132 always_inline ipsec_policy_t *
133 ipsec_input_ip6_protect_policy_match (ipsec_spd_t * spd,
138 ipsec_main_t *im = &ipsec_main;
143 vec_foreach(i, spd->ipv6_inbound_protect_policy_indices)
145 p = pool_elt_at_index(spd->policies, *i);
146 s = pool_elt_at_index(im->sad, p->sa_index);
153 if (!ip6_address_is_equal(sa, &s->tunnel_src_addr.ip6))
156 if (!ip6_address_is_equal(da, &s->tunnel_dst_addr.ip6))
162 if (!ip6_addr_match_range(sa, &p->raddr.start.ip6, &p->raddr.stop.ip6))
165 if (!ip6_addr_match_range(da, &p->laddr.start.ip6, &p->laddr.stop.ip6))
173 static vlib_node_registration_t ipsec_input_ip4_node;
176 ipsec_input_ip4_node_fn (vlib_main_t * vm,
177 vlib_node_runtime_t * node,
178 vlib_frame_t * from_frame)
180 ip4_main_t * i4m = &ip4_main;
181 ip_lookup_main_t * lm = &i4m->lookup_main;
182 ip_config_main_t * cm = &lm->rx_config_mains[VNET_UNICAST];
183 u32 n_left_from, *from, next_index, *to_next;
184 ipsec_main_t *im = &ipsec_main;
186 from = vlib_frame_vector_args (from_frame);
187 n_left_from = from_frame->n_vectors;
189 next_index = node->cached_next_index;
191 while (n_left_from > 0)
195 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
197 while (n_left_from > 0 && n_left_to_next > 0)
203 ip4_ipsec_config_t * c0;
204 u32 tunnel_index0 = ~0;
207 bi0 = to_next[0] = from[0];
213 b0 = vlib_get_buffer (vm, bi0);
214 c0 = vnet_get_config_data (&cm->config_main,
215 &b0->current_config_index,
216 &next0, sizeof (c0[0]));
218 spd0 = pool_elt_at_index(im->spds, c0->spd_index);
220 ip0 = vlib_buffer_get_current (b0);
221 esp0 = (esp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
223 if (PREDICT_TRUE(ip0->protocol == IP_PROTOCOL_IPSEC_ESP))
226 clib_warning("packet received from %U to %U spi %u size %u spd_id %u",
227 format_ip4_address, ip0->src_address.as_u8,
228 format_ip4_address, ip0->dst_address.as_u8,
229 clib_net_to_host_u32(esp0->spi),
230 clib_net_to_host_u16(ip0->length),
234 p0 = ipsec_input_protect_policy_match(spd0,
235 clib_net_to_host_u32(ip0->src_address.as_u32),
236 clib_net_to_host_u32(ip0->dst_address.as_u32),
237 clib_net_to_host_u32(esp0->spi));
239 if (PREDICT_TRUE(p0 != 0))
241 p0->counter.packets++;
242 p0->counter.bytes += clib_net_to_host_u16(ip0->length);
243 vnet_buffer(b0)->output_features.ipsec_sad_index = p0->sa_index;
244 next0 = IPSEC_INPUT_NEXT_ESP_DECRYPT;
245 vlib_buffer_advance(b0, ip4_header_bytes (ip0));
250 /* FIXME bypass and discard */
253 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) {
254 ipsec_input_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
255 tr->tunnel_index = tunnel_index0;
256 tr->spi = clib_host_to_net_u32(esp0->spi);
257 tr->seq = clib_host_to_net_u32(esp0->seq);
260 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
261 to_next, n_left_to_next, bi0, next0);
263 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
265 vlib_node_increment_counter (vm, ipsec_input_ip4_node.index,
266 IPSEC_INPUT_ERROR_RX_PKTS,
267 from_frame->n_vectors);
269 return from_frame->n_vectors;
273 VLIB_REGISTER_NODE (ipsec_input_ip4_node,static) = {
274 .function = ipsec_input_ip4_node_fn,
275 .name = "ipsec-input-ip4",
276 .vector_size = sizeof (u32),
277 .format_trace = format_ipsec_input_trace,
278 .type = VLIB_NODE_TYPE_INTERNAL,
280 .n_errors = ARRAY_LEN(ipsec_input_error_strings),
281 .error_strings = ipsec_input_error_strings,
283 .n_next_nodes = IPSEC_INPUT_N_NEXT,
285 #define _(s,n) [IPSEC_INPUT_NEXT_##s] = n,
286 foreach_ipsec_input_next
291 VLIB_NODE_FUNCTION_MULTIARCH (ipsec_input_ip4_node,
292 ipsec_input_ip4_node_fn)
295 static vlib_node_registration_t ipsec_input_ip6_node;
298 ipsec_input_ip6_node_fn (vlib_main_t * vm,
299 vlib_node_runtime_t * node,
300 vlib_frame_t * from_frame)
302 ip6_main_t * i6m = &ip6_main;
303 ip_lookup_main_t * lm = &i6m->lookup_main;
304 ip_config_main_t * cm = &lm->rx_config_mains[VNET_UNICAST];
305 u32 n_left_from, *from, next_index, *to_next;
306 ipsec_main_t *im = &ipsec_main;
308 from = vlib_frame_vector_args (from_frame);
309 n_left_from = from_frame->n_vectors;
311 next_index = node->cached_next_index;
313 while (n_left_from > 0)
317 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
319 while (n_left_from > 0 && n_left_to_next > 0)
325 ip4_ipsec_config_t * c0;
326 u32 tunnel_index0 = ~0;
328 u32 header_size = sizeof(ip0[0]);
330 bi0 = to_next[0] = from[0];
336 b0 = vlib_get_buffer (vm, bi0);
337 c0 = vnet_get_config_data (&cm->config_main,
338 &b0->current_config_index,
339 &next0, sizeof (c0[0]));
341 spd0 = pool_elt_at_index(im->spds, c0->spd_index);
343 ip0 = vlib_buffer_get_current (b0);
344 esp0 = (esp_header_t *) ((u8 *) ip0 + header_size);
346 if (PREDICT_TRUE(ip0->protocol == IP_PROTOCOL_IPSEC_ESP))
349 clib_warning("packet received from %U to %U spi %u size %u spd_id %u",
350 format_ip6_address, &ip0->src_address,
351 format_ip6_address, &ip0->dst_address,
352 clib_net_to_host_u32(esp0->spi),
353 clib_net_to_host_u16(ip0->payload_length) + header_size,
357 p0 = ipsec_input_ip6_protect_policy_match(spd0,
360 clib_net_to_host_u32(esp0->spi));
362 if (PREDICT_TRUE(p0 != 0))
364 p0->counter.packets++;
365 p0->counter.bytes += clib_net_to_host_u16(ip0->payload_length);
366 p0->counter.bytes += header_size;
367 vnet_buffer(b0)->output_features.ipsec_sad_index = p0->sa_index;
368 next0 = IPSEC_INPUT_NEXT_ESP_DECRYPT;
369 vlib_buffer_advance(b0, header_size);
375 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) {
376 ipsec_input_trace_t *tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
377 tr->tunnel_index = tunnel_index0;
378 tr->spi = clib_host_to_net_u32(esp0->spi);
379 tr->seq = clib_host_to_net_u32(esp0->seq);
382 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
383 n_left_to_next, bi0, next0);
385 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
387 vlib_node_increment_counter (vm, ipsec_input_ip6_node.index,
388 IPSEC_INPUT_ERROR_RX_PKTS,
389 from_frame->n_vectors);
391 return from_frame->n_vectors;
395 VLIB_REGISTER_NODE (ipsec_input_ip6_node,static) = {
396 .function = ipsec_input_ip6_node_fn,
397 .name = "ipsec-input-ip6",
398 .vector_size = sizeof (u32),
399 .format_trace = format_ipsec_input_trace,
400 .type = VLIB_NODE_TYPE_INTERNAL,
402 .n_errors = ARRAY_LEN(ipsec_input_error_strings),
403 .error_strings = ipsec_input_error_strings,
405 .n_next_nodes = IPSEC_INPUT_N_NEXT,
407 #define _(s,n) [IPSEC_INPUT_NEXT_##s] = n,
408 foreach_ipsec_input_next
413 VLIB_NODE_FUNCTION_MULTIARCH (ipsec_input_ip6_node,
414 ipsec_input_ip6_node_fn)