2 * ipsec_tun_protect_in.c : IPSec interface input node
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
22 #include <vnet/ipsec/ipsec.h>
23 #include <vnet/ipsec/esp.h>
24 #include <vnet/ipsec/ipsec_io.h>
25 #include <vnet/ipsec/ipsec_punt.h>
26 #include <vnet/ipsec/ipsec_tun.h>
27 #include <vnet/ipsec/ipsec.api_enum.h>
28 #include <vnet/ip/ip4_input.h>
30 typedef vl_counter_ipsec_tun_enum_t ipsec_tun_protect_input_error_t;
32 typedef enum ipsec_tun_next_t_
34 #define _(v, s) IPSEC_TUN_PROTECT_NEXT_##v,
35 foreach_ipsec_input_next
37 IPSEC_TUN_PROTECT_N_NEXT,
44 ipsec4_tunnel_kv_t kv4;
45 ipsec6_tunnel_kv_t kv6;
49 } ipsec_tun_protect_input_trace_t;
52 format_ipsec_tun_protect_input_trace (u8 * s, va_list * args)
54 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
55 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
56 ipsec_tun_protect_input_trace_t *t =
57 va_arg (*args, ipsec_tun_protect_input_trace_t *);
60 s = format (s, "IPSec: %U seq %u",
61 format_ipsec6_tunnel_kv, &t->kv6, t->seq);
63 s = format (s, "IPSec: %U seq %u sa %d",
64 format_ipsec4_tunnel_kv, &t->kv4, t->seq);
69 ipsec_ip4_if_no_tunnel (vlib_node_runtime_t * node,
71 const esp_header_t * esp, const ip4_header_t * ip4)
73 if (PREDICT_FALSE (0 == esp->spi))
75 b->error = node->errors[IPSEC_TUN_ERROR_SPI_0];
76 b->punt_reason = ipsec_punt_reason[(ip4->protocol == IP_PROTOCOL_UDP ?
77 IPSEC_PUNT_IP4_SPI_UDP_0 :
78 IPSEC_PUNT_IP4_NO_SUCH_TUNNEL)];
82 b->error = node->errors[IPSEC_TUN_ERROR_NO_TUNNEL];
83 b->punt_reason = ipsec_punt_reason[IPSEC_PUNT_IP4_NO_SUCH_TUNNEL];
85 return VNET_DEVICE_INPUT_NEXT_PUNT;
89 ipsec_ip6_if_no_tunnel (vlib_node_runtime_t * node,
90 vlib_buffer_t * b, const esp_header_t * esp)
92 b->error = node->errors[IPSEC_TUN_ERROR_NO_TUNNEL];
93 b->punt_reason = ipsec_punt_reason[IPSEC_PUNT_IP6_NO_SUCH_TUNNEL];
95 return VNET_DEVICE_INPUT_NEXT_PUNT;
99 ipsec_tun_protect_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
100 vlib_frame_t * from_frame, int is_ip6)
102 ipsec_main_t *im = &ipsec_main;
103 vnet_main_t *vnm = im->vnet_main;
104 vnet_interface_main_t *vim = &vnm->interface_main;
106 int is_trace = node->flags & VLIB_NODE_FLAG_TRACE;
107 u32 thread_index = vm->thread_index;
109 u32 n_left_from, *from;
110 u16 nexts[VLIB_FRAME_SIZE], *next;
111 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
113 from = vlib_frame_vector_args (from_frame);
114 n_left_from = from_frame->n_vectors;
116 vlib_get_buffers (vm, from, bufs, n_left_from);
121 nexts, is_ip6 ? im->esp6_decrypt_next_index : im->esp4_decrypt_next_index,
124 u64 n_bytes = 0, n_packets = 0;
125 u32 n_disabled = 0, n_no_tunnel = 0;
127 u32 last_sw_if_index = ~0;
128 ipsec_tun_lkup_result_t last_result = {
131 ipsec4_tunnel_kv_t last_key4;
132 ipsec6_tunnel_kv_t last_key6;
133 ipsec_tun_lkup_result_t itr0;
135 vlib_combined_counter_main_t *rx_counter;
136 vlib_combined_counter_main_t *drop_counter;
139 clib_memset (&last_key6, 0xff, sizeof (last_key6));
143 rx_counter = vim->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
144 drop_counter = vim->combined_sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
146 while (n_left_from > 0)
148 u32 sw_if_index0, len0, hdr_sz0;
149 clib_bihash_kv_24_16_t bkey60;
150 clib_bihash_kv_8_16_t bkey40;
151 ipsec4_tunnel_kv_t *key40;
152 ipsec6_tunnel_kv_t *key60;
159 (ip4_header_t *) (b[0]->data + vnet_buffer (b[0])->l3_hdr_offset);
161 key60 = (ipsec6_tunnel_kv_t *) & bkey60;
162 key40 = (ipsec4_tunnel_kv_t *) & bkey40;
166 ip60 = (ip6_header_t *) ip40;
167 esp0 = (esp_header_t *) (ip60 + 1);
168 buf_rewind0 = hdr_sz0 = sizeof (ip6_header_t);
172 if (ip40->protocol == IP_PROTOCOL_UDP)
174 /* NAT UDP port 4500 case, don't advance any more */
176 (esp_header_t *) ((u8 *) ip40 + ip4_header_bytes (ip40) +
177 sizeof (udp_header_t));
179 buf_rewind0 = ip4_header_bytes (ip40) + sizeof (udp_header_t);
181 const udp_header_t *udp0 =
182 (udp_header_t *) ((u8 *) ip40 + ip4_header_bytes (ip40));
184 /* length 9 = sizeof(udp_header) + 1 byte of special SPI */
185 if (clib_net_to_host_u16 (udp0->length) == 9 &&
186 esp0->spi_bytes[0] == 0xff)
188 b[0]->error = node->errors[IPSEC_TUN_ERROR_NAT_KEEPALIVE];
190 next[0] = VNET_DEVICE_INPUT_NEXT_IP4_DROP;
193 vlib_buffer_advance (b[0], -buf_rewind0);
199 esp0 = (esp_header_t *) ((u8 *) ip40 + ip4_header_bytes (ip40));
200 buf_rewind0 = hdr_sz0 = ip4_header_bytes (ip40);
204 /* stats for the tunnel include all the data after the IP header
205 just like a norml IP-IP tunnel */
206 vlib_buffer_advance (b[0], hdr_sz0);
207 len0 = vlib_buffer_length_in_chain (vm, b[0]);
209 if (len0 < sizeof (esp_header_t))
211 b[0]->error = node->errors[IPSEC_TUN_ERROR_TOO_SHORT];
213 next[0] = is_ip6 ? VNET_DEVICE_INPUT_NEXT_IP6_DROP :
214 VNET_DEVICE_INPUT_NEXT_IP4_DROP;
215 vlib_buffer_advance (b[0], -buf_rewind0);
221 key60->key.remote_ip = ip60->src_address;
222 key60->key.spi = esp0->spi;
223 key60->key.__pad = 0;
225 if (memcmp (key60, &last_key6, sizeof (last_key6)) == 0)
227 clib_memcpy_fast (&itr0, &last_result, sizeof (itr0));
232 clib_bihash_search_inline_24_16 (&im->tun6_protect_by_key,
236 clib_memcpy_fast (&itr0, &bkey60.value, sizeof (itr0));
237 clib_memcpy_fast (&last_result, &bkey60.value,
238 sizeof (last_result));
239 clib_memcpy_fast (&last_key6, key60, sizeof (last_key6));
243 next[0] = ipsec_ip6_if_no_tunnel (node, b[0], esp0);
251 ipsec4_tunnel_mk_key (key40, &ip40->src_address, esp0->spi);
253 if (key40->key == last_key4.key)
255 clib_memcpy_fast (&itr0, &last_result, sizeof (itr0));
260 clib_bihash_search_inline_8_16 (&im->tun4_protect_by_key,
264 clib_memcpy_fast (&itr0, &bkey40.value, sizeof (itr0));
265 clib_memcpy_fast (&last_result, &bkey40.value,
266 sizeof (last_result));
267 last_key4.key = key40->key;
271 next[0] = ipsec_ip4_if_no_tunnel (node, b[0], esp0, ip40);
272 vlib_buffer_advance (b[0], -buf_rewind0);
279 vnet_buffer (b[0])->ipsec.sad_index = itr0.sa_index;
280 vnet_buffer (b[0])->ipsec.protect_index = itr0.tun_index;
282 sw_if_index0 = itr0.sw_if_index;
283 vnet_buffer (b[0])->sw_if_index[VLIB_RX] = sw_if_index0;
285 if (PREDICT_FALSE (!vnet_sw_interface_is_admin_up (vnm, sw_if_index0)))
287 vlib_increment_combined_counter
288 (drop_counter, thread_index, sw_if_index0, 1, len0);
290 b[0]->error = node->errors[IPSEC_TUN_ERROR_DISABLED];
291 next[0] = is_ip6 ? VNET_DEVICE_INPUT_NEXT_IP6_DROP :
292 VNET_DEVICE_INPUT_NEXT_IP4_DROP;
297 if (PREDICT_TRUE (sw_if_index0 == last_sw_if_index))
304 if (n_packets && !(itr0.flags & IPSEC_PROTECT_ENCAPED))
306 vlib_increment_combined_counter
307 (rx_counter, thread_index, last_sw_if_index,
311 last_sw_if_index = sw_if_index0;
316 //IPSEC_TUN_PROTECT_NEXT_DECRYPT;
317 next[0] = is_ip6 ? im->esp6_decrypt_tun_next_index :
318 im->esp4_decrypt_tun_next_index;
320 if (itr0.flags & IPSEC_PROTECT_FEAT)
323 u8 arc = feature_main.device_input_feature_arc_index;
326 vnet_feature_arc_start (arc, sw_if_index0, &next32, b[0]);
331 if (PREDICT_FALSE (is_trace))
333 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
335 ipsec_tun_protect_input_trace_t *tr =
336 vlib_add_trace (vm, node, b[0], sizeof (*tr));
338 clib_memcpy (&tr->kv6, &bkey60, sizeof (tr->kv6));
340 clib_memcpy (&tr->kv4, &bkey40, sizeof (tr->kv4));
342 tr->seq = (len0 >= sizeof (*esp0) ?
343 clib_host_to_net_u32 (esp0->seq) : ~0);
353 if (n_packets && !(itr0.flags & IPSEC_PROTECT_ENCAPED))
354 vlib_increment_combined_counter (rx_counter,
356 last_sw_if_index, n_packets, n_bytes);
358 vlib_node_increment_counter (vm, node->node_index, IPSEC_TUN_ERROR_RX,
359 from_frame->n_vectors -
360 (n_disabled + n_no_tunnel));
361 vlib_node_increment_counter (vm, node->node_index, IPSEC_TUN_ERROR_NO_TUNNEL,
364 vlib_buffer_enqueue_to_next (vm, node, from, nexts, from_frame->n_vectors);
366 return from_frame->n_vectors;
369 VLIB_NODE_FN (ipsec4_tun_input_node) (vlib_main_t * vm,
370 vlib_node_runtime_t * node,
371 vlib_frame_t * from_frame)
373 return ipsec_tun_protect_input_inline (vm, node, from_frame, 0);
377 VLIB_REGISTER_NODE (ipsec4_tun_input_node) = {
378 .name = "ipsec4-tun-input",
379 .vector_size = sizeof (u32),
380 .format_trace = format_ipsec_tun_protect_input_trace,
381 .type = VLIB_NODE_TYPE_INTERNAL,
382 .n_errors = IPSEC_TUN_N_ERROR,
383 .error_counters = ipsec_tun_error_counters,
384 .sibling_of = "device-input",
388 VLIB_NODE_FN (ipsec6_tun_input_node) (vlib_main_t * vm,
389 vlib_node_runtime_t * node,
390 vlib_frame_t * from_frame)
392 return ipsec_tun_protect_input_inline (vm, node, from_frame, 1);
396 VLIB_REGISTER_NODE (ipsec6_tun_input_node) = {
397 .name = "ipsec6-tun-input",
398 .vector_size = sizeof (u32),
399 .format_trace = format_ipsec_tun_protect_input_trace,
400 .type = VLIB_NODE_TYPE_INTERNAL,
401 .n_errors = IPSEC_TUN_N_ERROR,
402 .error_counters = ipsec_tun_error_counters,
403 .sibling_of = "device-input",
408 * fd.io coding-style-patch-verification: ON
411 * eval: (c-set-style "gnu")