2 * decap.c : IPSec tunnel decapsulation
4 * Copyright (c) 2015 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/feature/feature.h>
22 #include <vnet/ipsec/ipsec_spd_fp_lookup.h>
24 #include <vnet/ipsec/ipsec.h>
25 #include <vnet/ipsec/esp.h>
26 #include <vnet/ipsec/ah.h>
27 #include <vnet/ipsec/ipsec_io.h>
29 #define foreach_ipsec_input_error \
30 _(RX_PKTS, "IPSec pkts received") \
31 _(RX_POLICY_MATCH, "IPSec policy match") \
32 _(RX_POLICY_NO_MATCH, "IPSec policy not matched") \
33 _(RX_POLICY_BYPASS, "IPSec policy bypass") \
34 _(RX_POLICY_DISCARD, "IPSec policy discard")
38 #define _(sym,str) IPSEC_INPUT_ERROR_##sym,
39 foreach_ipsec_input_error
42 } ipsec_input_error_t;
44 static char *ipsec_input_error_strings[] = {
45 #define _(sym,string) string,
46 foreach_ipsec_input_error
58 } ipsec_input_trace_t;
60 /* packet trace format function */
62 format_ipsec_input_trace (u8 * s, va_list * args)
64 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
65 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
66 ipsec_input_trace_t *t = va_arg (*args, ipsec_input_trace_t *);
68 s = format (s, "%U: sa_id %u spd %u policy %d spi %u (0x%08x) seq %u",
69 format_ip_protocol, t->proto, t->sa_id,
70 t->spd, t->policy_index, t->spi, t->spi, t->seq);
76 ipsec4_input_spd_add_flow_cache_entry (ipsec_main_t *im, u32 sa, u32 da,
77 ipsec_spd_policy_type_t policy_type,
81 u8 is_overwrite = 0, is_stale_overwrite = 0;
82 /* Store in network byte order to avoid conversion on lookup */
83 ipsec4_inbound_spd_tuple_t ip4_tuple = {
84 .ip4_src_addr = (ip4_address_t) clib_host_to_net_u32 (sa),
85 .ip4_dest_addr = (ip4_address_t) clib_host_to_net_u32 (da),
86 .policy_type = policy_type
89 ip4_tuple.kv_16_8.value =
90 (((u64) pol_id) << 32) | ((u64) im->input_epoch_count);
92 hash = ipsec4_hash_16_8 (&ip4_tuple.kv_16_8);
93 hash &= (im->ipsec4_in_spd_hash_num_buckets - 1);
95 ipsec_spinlock_lock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
96 /* Check if we are overwriting an existing entry so we know
97 whether to increment the flow cache counter. Since flow
98 cache counter is reset on any policy add/remove, but
99 hash table values are not, we need to check if the entry
100 we are overwriting is stale or not. If it's a stale entry
101 overwrite, we still want to increment flow cache counter */
102 is_overwrite = (im->ipsec4_in_spd_hash_tbl[hash].value != 0);
103 /* Check if we are overwriting a stale entry by comparing
104 with current epoch count */
105 if (PREDICT_FALSE (is_overwrite))
107 (im->input_epoch_count !=
108 ((u32) (im->ipsec4_in_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
109 clib_memcpy_fast (&im->ipsec4_in_spd_hash_tbl[hash], &ip4_tuple.kv_16_8,
110 sizeof (ip4_tuple.kv_16_8));
111 ipsec_spinlock_unlock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
113 /* Increment the counter to track active flow cache entries
114 when entering a fresh entry or overwriting a stale one */
115 if (!is_overwrite || is_stale_overwrite)
116 clib_atomic_fetch_add_relax (&im->ipsec4_in_spd_flow_cache_entries, 1);
121 always_inline ipsec_policy_t *
122 ipsec4_input_spd_find_flow_cache_entry (ipsec_main_t *im, u32 sa, u32 da,
123 ipsec_spd_policy_type_t policy_type)
125 ipsec_policy_t *p = NULL;
126 ipsec4_hash_kv_16_8_t kv_result;
128 ipsec4_inbound_spd_tuple_t ip4_tuple = { .ip4_src_addr = (ip4_address_t) sa,
129 .ip4_dest_addr = (ip4_address_t) da,
130 .policy_type = policy_type };
132 hash = ipsec4_hash_16_8 (&ip4_tuple.kv_16_8);
133 hash &= (im->ipsec4_in_spd_hash_num_buckets - 1);
135 ipsec_spinlock_lock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
136 kv_result = im->ipsec4_in_spd_hash_tbl[hash];
137 ipsec_spinlock_unlock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
139 if (ipsec4_hash_key_compare_16_8 ((u64 *) &ip4_tuple.kv_16_8,
142 if (im->input_epoch_count == ((u32) (kv_result.value & 0xFFFFFFFF)))
144 /* Get the policy based on the index */
146 pool_elt_at_index (im->policies, ((u32) (kv_result.value >> 32)));
154 ipsec_fp_in_5tuple_from_ip4_range (ipsec_fp_5tuple_t *tuple, u32 la, u32 ra,
157 clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
158 tuple->laddr.as_u32 = la;
159 tuple->raddr.as_u32 = ra;
161 tuple->action = action;
165 always_inline ipsec_policy_t *
166 ipsec_input_policy_match (ipsec_spd_t *spd, u32 sa, u32 da,
167 ipsec_spd_policy_type_t policy_type)
169 ipsec_main_t *im = &ipsec_main;
173 vec_foreach (i, spd->policies[policy_type])
175 p = pool_elt_at_index (im->policies, *i);
177 if (da < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
180 if (da > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
183 if (sa < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
186 if (sa > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
189 if (im->input_flow_cache_flag)
191 /* Add an Entry in Flow cache */
192 ipsec4_input_spd_add_flow_cache_entry (im, sa, da, policy_type, *i);
199 always_inline ipsec_policy_t *
200 ipsec_input_protect_policy_match (ipsec_spd_t *spd, u32 sa, u32 da, u32 spi)
202 ipsec_main_t *im = &ipsec_main;
207 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT])
209 p = pool_elt_at_index (im->policies, *i);
210 s = ipsec_sa_get (p->sa_index);
215 if (ipsec_sa_is_set_IS_TUNNEL (s))
217 if (da != clib_net_to_host_u32 (s->tunnel.t_dst.ip.ip4.as_u32))
220 if (sa != clib_net_to_host_u32 (s->tunnel.t_src.ip.ip4.as_u32))
226 if (da < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
229 if (da > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
232 if (sa < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
235 if (sa > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
239 if (im->input_flow_cache_flag)
241 /* Add an Entry in Flow cache */
242 ipsec4_input_spd_add_flow_cache_entry (
243 im, sa, da, IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT, *i);
252 ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la,
255 if ((memcmp (a->as_u64, la->as_u64, 2 * sizeof (u64)) >= 0) &&
256 (memcmp (a->as_u64, ua->as_u64, 2 * sizeof (u64)) <= 0))
261 always_inline ipsec_policy_t *
262 ipsec6_input_protect_policy_match (ipsec_spd_t * spd,
264 ip6_address_t * da, u32 spi)
266 ipsec_main_t *im = &ipsec_main;
271 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT])
273 p = pool_elt_at_index (im->policies, *i);
274 s = ipsec_sa_get (p->sa_index);
279 if (ipsec_sa_is_set_IS_TUNNEL (s))
281 if (!ip6_address_is_equal (sa, &s->tunnel.t_src.ip.ip6))
284 if (!ip6_address_is_equal (da, &s->tunnel.t_dst.ip.ip6))
290 if (!ip6_addr_match_range (sa, &p->raddr.start.ip6, &p->raddr.stop.ip6))
293 if (!ip6_addr_match_range (da, &p->laddr.start.ip6, &p->laddr.stop.ip6))
301 extern vlib_node_registration_t ipsec4_input_node;
303 VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
304 vlib_node_runtime_t * node,
305 vlib_frame_t * frame)
307 u32 n_left_from, *from, thread_index;
308 ipsec_main_t *im = &ipsec_main;
309 u64 ipsec_unprocessed = 0, ipsec_matched = 0;
310 u64 ipsec_dropped = 0, ipsec_bypassed = 0;
311 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
312 vlib_buffer_t **b = bufs;
313 u16 nexts[VLIB_FRAME_SIZE], *next;
315 from = vlib_frame_vector_args (frame);
316 n_left_from = frame->n_vectors;
318 vlib_get_buffers (vm, from, bufs, n_left_from);
319 thread_index = vm->thread_index;
322 while (n_left_from > 0)
326 esp_header_t *esp0 = NULL;
328 ip4_ipsec_config_t *c0;
330 ipsec_policy_t *p0 = NULL;
332 bool search_flow_cache = false;
333 ipsec_policy_t *policies[1];
334 ipsec_fp_5tuple_t tuples[1];
339 vlib_prefetch_buffer_data (b[1], LOAD);
342 b[0]->flags |= VNET_BUFFER_F_IS_IP4;
343 b[0]->flags &= ~VNET_BUFFER_F_IS_IP6;
344 c0 = vnet_feature_next_with_data (&next32, b[0], sizeof (c0[0]));
345 next[0] = (u16) next32;
347 spd0 = pool_elt_at_index (im->spds, c0->spd_index);
349 ip0 = vlib_buffer_get_current (b[0]);
352 (ip0->protocol == IP_PROTOCOL_IPSEC_ESP
353 || ip0->protocol == IP_PROTOCOL_UDP))
356 esp0 = (esp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
357 if (PREDICT_FALSE (ip0->protocol == IP_PROTOCOL_UDP))
359 /* FIXME Skip, if not a UDP encapsulated packet */
360 esp0 = (esp_header_t *) ((u8 *) esp0 + sizeof (udp_header_t));
363 // if flow cache is enabled, first search through flow cache for a
364 // policy match for either protect, bypass or discard rules, in that
365 // order. if no match is found search_flow_cache is set to false (1)
366 // and we revert back to linear search
367 search_flow_cache = im->input_flow_cache_flag;
370 if (im->fp_spd_ipv4_in_is_enabled &&
371 PREDICT_TRUE (INDEX_INVALID !=
372 spd0->fp_spd.ip4_in_lookup_hash_idx))
374 ipsec_fp_in_5tuple_from_ip4_range (
375 &tuples[0], ip0->src_address.as_u32, ip0->dst_address.as_u32,
376 clib_net_to_host_u32 (esp0->spi),
377 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
378 ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples,
382 else if (search_flow_cache) // attempt to match policy in flow cache
384 p0 = ipsec4_input_spd_find_flow_cache_entry (
385 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
386 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
389 else // linear search if flow cache is not enabled,
390 // or flow cache search just failed
392 p0 = ipsec_input_protect_policy_match (
393 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
394 clib_net_to_host_u32 (ip0->dst_address.as_u32),
395 clib_net_to_host_u32 (esp0->spi));
399 vlib_buffer_has_space (b[0],
400 (clib_address_t) (esp0 + 1) -
401 (clib_address_t) ip0);
403 if (PREDICT_TRUE ((p0 != NULL) & (has_space0)))
407 pi0 = p0 - im->policies;
408 vlib_increment_combined_counter
409 (&ipsec_spd_policy_counters,
410 thread_index, pi0, 1, clib_net_to_host_u16 (ip0->length));
412 vnet_buffer (b[0])->ipsec.sad_index = p0->sa_index;
413 next[0] = im->esp4_decrypt_next_index;
414 vlib_buffer_advance (b[0], ((u8 *) esp0 - (u8 *) ip0));
423 if (im->fp_spd_ipv4_in_is_enabled &&
424 PREDICT_TRUE (INDEX_INVALID !=
425 spd0->fp_spd.ip4_in_lookup_hash_idx))
427 tuples->action = IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS;
428 ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples,
432 else if (search_flow_cache)
434 p0 = ipsec4_input_spd_find_flow_cache_entry (
435 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
436 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
441 p0 = ipsec_input_policy_match (
442 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
443 clib_net_to_host_u32 (ip0->dst_address.as_u32),
444 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
447 if (PREDICT_TRUE ((p0 != NULL)))
451 pi0 = p0 - im->policies;
452 vlib_increment_combined_counter (
453 &ipsec_spd_policy_counters, thread_index, pi0, 1,
454 clib_net_to_host_u16 (ip0->length));
464 if (im->fp_spd_ipv4_in_is_enabled &&
465 PREDICT_TRUE (INDEX_INVALID !=
466 spd0->fp_spd.ip4_in_lookup_hash_idx))
468 tuples->action = IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD;
469 ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples,
475 if (search_flow_cache)
477 p0 = ipsec4_input_spd_find_flow_cache_entry (
478 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
479 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
484 p0 = ipsec_input_policy_match (
485 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
486 clib_net_to_host_u32 (ip0->dst_address.as_u32),
487 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
490 if (PREDICT_TRUE ((p0 != NULL)))
494 pi0 = p0 - im->policies;
495 vlib_increment_combined_counter (
496 &ipsec_spd_policy_counters, thread_index, pi0, 1,
497 clib_net_to_host_u16 (ip0->length));
499 next[0] = IPSEC_INPUT_NEXT_DROP;
508 // flow cache search failed, try again with linear search
509 if (search_flow_cache && p0 == NULL)
511 search_flow_cache = false;
515 /* Drop by default if no match on PROTECT, BYPASS or DISCARD */
516 ipsec_unprocessed += 1;
517 next[0] = IPSEC_INPUT_NEXT_DROP;
520 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
521 PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
523 ipsec_input_trace_t *tr =
524 vlib_add_trace (vm, node, b[0], sizeof (*tr));
526 tr->proto = ip0->protocol;
527 tr->sa_id = p0 ? p0->sa_id : ~0;
528 tr->spi = has_space0 ? clib_net_to_host_u32 (esp0->spi) : ~0;
529 tr->seq = has_space0 ? clib_net_to_host_u32 (esp0->seq) : ~0;
531 tr->policy_index = pi0;
534 else if (ip0->protocol == IP_PROTOCOL_IPSEC_AH)
536 ah0 = (ah_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
538 // if flow cache is enabled, first search through flow cache for a
539 // policy match and revert back to linear search on failure
540 search_flow_cache = im->input_flow_cache_flag;
543 if (search_flow_cache)
545 p0 = ipsec4_input_spd_find_flow_cache_entry (
546 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
547 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
552 p0 = ipsec_input_protect_policy_match (
553 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
554 clib_net_to_host_u32 (ip0->dst_address.as_u32),
555 clib_net_to_host_u32 (ah0->spi));
559 vlib_buffer_has_space (b[0],
560 (clib_address_t) (ah0 + 1) -
561 (clib_address_t) ip0);
563 if (PREDICT_TRUE ((p0 != NULL) & (has_space0)))
567 pi0 = p0 - im->policies;
568 vlib_increment_combined_counter
569 (&ipsec_spd_policy_counters,
570 thread_index, pi0, 1, clib_net_to_host_u16 (ip0->length));
572 vnet_buffer (b[0])->ipsec.sad_index = p0->sa_index;
573 next[0] = im->ah4_decrypt_next_index;
582 if (search_flow_cache)
584 p0 = ipsec4_input_spd_find_flow_cache_entry (
585 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
586 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
591 p0 = ipsec_input_policy_match (
592 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
593 clib_net_to_host_u32 (ip0->dst_address.as_u32),
594 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
597 if (PREDICT_TRUE ((p0 != NULL)))
601 pi0 = p0 - im->policies;
602 vlib_increment_combined_counter (
603 &ipsec_spd_policy_counters, thread_index, pi0, 1,
604 clib_net_to_host_u16 (ip0->length));
614 if (search_flow_cache)
616 p0 = ipsec4_input_spd_find_flow_cache_entry (
617 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
618 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
623 p0 = ipsec_input_policy_match (
624 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
625 clib_net_to_host_u32 (ip0->dst_address.as_u32),
626 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
629 if (PREDICT_TRUE ((p0 != NULL)))
633 pi0 = p0 - im->policies;
634 vlib_increment_combined_counter (
635 &ipsec_spd_policy_counters, thread_index, pi0, 1,
636 clib_net_to_host_u16 (ip0->length));
638 next[0] = IPSEC_INPUT_NEXT_DROP;
647 // flow cache search failed, retry with linear search
648 if (search_flow_cache && p0 == NULL)
650 search_flow_cache = false;
654 /* Drop by default if no match on PROTECT, BYPASS or DISCARD */
655 ipsec_unprocessed += 1;
656 next[0] = IPSEC_INPUT_NEXT_DROP;
659 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
660 PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
662 ipsec_input_trace_t *tr =
663 vlib_add_trace (vm, node, b[0], sizeof (*tr));
665 tr->proto = ip0->protocol;
666 tr->sa_id = p0 ? p0->sa_id : ~0;
667 tr->spi = has_space0 ? clib_net_to_host_u32 (ah0->spi) : ~0;
668 tr->seq = has_space0 ? clib_net_to_host_u32 (ah0->seq_no) : ~0;
670 tr->policy_index = pi0;
675 ipsec_unprocessed += 1;
682 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
684 vlib_node_increment_counter (vm, ipsec4_input_node.index,
685 IPSEC_INPUT_ERROR_RX_PKTS, frame->n_vectors);
687 vlib_node_increment_counter (vm, ipsec4_input_node.index,
688 IPSEC_INPUT_ERROR_RX_POLICY_MATCH,
691 vlib_node_increment_counter (vm, ipsec4_input_node.index,
692 IPSEC_INPUT_ERROR_RX_POLICY_NO_MATCH,
695 vlib_node_increment_counter (vm, ipsec4_input_node.index,
696 IPSEC_INPUT_ERROR_RX_POLICY_DISCARD,
699 vlib_node_increment_counter (vm, ipsec4_input_node.index,
700 IPSEC_INPUT_ERROR_RX_POLICY_BYPASS,
703 return frame->n_vectors;
708 VLIB_REGISTER_NODE (ipsec4_input_node) = {
709 .name = "ipsec4-input-feature",
710 .vector_size = sizeof (u32),
711 .format_trace = format_ipsec_input_trace,
712 .type = VLIB_NODE_TYPE_INTERNAL,
713 .n_errors = ARRAY_LEN(ipsec_input_error_strings),
714 .error_strings = ipsec_input_error_strings,
715 .n_next_nodes = IPSEC_INPUT_N_NEXT,
717 #define _(s,n) [IPSEC_INPUT_NEXT_##s] = n,
718 foreach_ipsec_input_next
724 extern vlib_node_registration_t ipsec6_input_node;
727 VLIB_NODE_FN (ipsec6_input_node) (vlib_main_t * vm,
728 vlib_node_runtime_t * node,
729 vlib_frame_t * from_frame)
731 u32 n_left_from, *from, next_index, *to_next, thread_index;
732 ipsec_main_t *im = &ipsec_main;
733 u32 ipsec_unprocessed = 0;
734 u32 ipsec_matched = 0;
736 from = vlib_frame_vector_args (from_frame);
737 n_left_from = from_frame->n_vectors;
738 thread_index = vm->thread_index;
740 next_index = node->cached_next_index;
742 while (n_left_from > 0)
746 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
748 while (n_left_from > 0 && n_left_to_next > 0)
754 ip4_ipsec_config_t *c0;
756 ipsec_policy_t *p0 = 0;
758 u32 header_size = sizeof (ip0[0]);
760 bi0 = to_next[0] = from[0];
766 b0 = vlib_get_buffer (vm, bi0);
767 b0->flags |= VNET_BUFFER_F_IS_IP6;
768 b0->flags &= ~VNET_BUFFER_F_IS_IP4;
769 c0 = vnet_feature_next_with_data (&next0, b0, sizeof (c0[0]));
771 spd0 = pool_elt_at_index (im->spds, c0->spd_index);
773 ip0 = vlib_buffer_get_current (b0);
774 esp0 = (esp_header_t *) ((u8 *) ip0 + header_size);
775 ah0 = (ah_header_t *) ((u8 *) ip0 + header_size);
777 if (PREDICT_TRUE (ip0->protocol == IP_PROTOCOL_IPSEC_ESP))
781 ("packet received from %U to %U spi %u size %u spd_id %u",
782 format_ip6_address, &ip0->src_address, format_ip6_address,
783 &ip0->dst_address, clib_net_to_host_u32 (esp0->spi),
784 clib_net_to_host_u16 (ip0->payload_length) + header_size,
787 p0 = ipsec6_input_protect_policy_match (spd0,
793 if (PREDICT_TRUE (p0 != 0))
797 pi0 = p0 - im->policies;
798 vlib_increment_combined_counter
799 (&ipsec_spd_policy_counters,
800 thread_index, pi0, 1,
801 clib_net_to_host_u16 (ip0->payload_length) +
804 vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
805 next0 = im->esp6_decrypt_next_index;
806 vlib_buffer_advance (b0, header_size);
812 ipsec_unprocessed += 1;
813 next0 = IPSEC_INPUT_NEXT_DROP;
816 else if (ip0->protocol == IP_PROTOCOL_IPSEC_AH)
818 p0 = ipsec6_input_protect_policy_match (spd0,
824 if (PREDICT_TRUE (p0 != 0))
827 pi0 = p0 - im->policies;
828 vlib_increment_combined_counter
829 (&ipsec_spd_policy_counters,
830 thread_index, pi0, 1,
831 clib_net_to_host_u16 (ip0->payload_length) +
834 vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
835 next0 = im->ah6_decrypt_next_index;
841 ipsec_unprocessed += 1;
842 next0 = IPSEC_INPUT_NEXT_DROP;
847 ipsec_unprocessed += 1;
851 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
852 PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
854 ipsec_input_trace_t *tr =
855 vlib_add_trace (vm, node, b0, sizeof (*tr));
858 tr->sa_id = p0->sa_id;
859 tr->proto = ip0->protocol;
860 tr->spi = clib_net_to_host_u32 (esp0->spi);
861 tr->seq = clib_net_to_host_u32 (esp0->seq);
865 vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
866 n_left_to_next, bi0, next0);
868 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
871 vlib_node_increment_counter (vm, ipsec6_input_node.index,
872 IPSEC_INPUT_ERROR_RX_PKTS,
873 from_frame->n_vectors - ipsec_unprocessed);
875 vlib_node_increment_counter (vm, ipsec6_input_node.index,
876 IPSEC_INPUT_ERROR_RX_POLICY_MATCH,
879 return from_frame->n_vectors;
884 VLIB_REGISTER_NODE (ipsec6_input_node) = {
885 .name = "ipsec6-input-feature",
886 .vector_size = sizeof (u32),
887 .format_trace = format_ipsec_input_trace,
888 .type = VLIB_NODE_TYPE_INTERNAL,
889 .n_errors = ARRAY_LEN(ipsec_input_error_strings),
890 .error_strings = ipsec_input_error_strings,
891 .n_next_nodes = IPSEC_INPUT_N_NEXT,
893 #define _(s,n) [IPSEC_INPUT_NEXT_##s] = n,
894 foreach_ipsec_input_next
901 * fd.io coding-style-patch-verification: ON
904 * eval: (c-set-style "gnu")