0c572c83e968f0ef3c0d6c535f4b323431427684
[vpp.git] / src / vnet / ipsec / ipsec_input.c
1 /*
2  * decap.c : IPSec tunnel decapsulation
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/feature/feature.h>
22 #include <vnet/ipsec/ipsec_spd_fp_lookup.h>
23
24 #include <vnet/ipsec/ipsec.h>
25 #include <vnet/ipsec/esp.h>
26 #include <vnet/ipsec/ah.h>
27 #include <vnet/ipsec/ipsec_io.h>
28
29 #define foreach_ipsec_input_error                       \
30 _(RX_PKTS, "IPSec pkts received")                       \
31 _(RX_POLICY_MATCH, "IPSec policy match")                \
32 _(RX_POLICY_NO_MATCH, "IPSec policy not matched")       \
33 _(RX_POLICY_BYPASS, "IPSec policy bypass")              \
34 _(RX_POLICY_DISCARD, "IPSec policy discard")
35
36 typedef enum
37 {
38 #define _(sym,str) IPSEC_INPUT_ERROR_##sym,
39   foreach_ipsec_input_error
40 #undef _
41     IPSEC_INPUT_N_ERROR,
42 } ipsec_input_error_t;
43
44 static char *ipsec_input_error_strings[] = {
45 #define _(sym,string) string,
46   foreach_ipsec_input_error
47 #undef _
48 };
49
50 typedef struct
51 {
52   ip_protocol_t proto;
53   u32 spd;
54   u32 policy_index;
55   u32 sa_id;
56   u32 spi;
57   u32 seq;
58 } ipsec_input_trace_t;
59
60 /* packet trace format function */
61 static u8 *
62 format_ipsec_input_trace (u8 * s, va_list * args)
63 {
64   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
65   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
66   ipsec_input_trace_t *t = va_arg (*args, ipsec_input_trace_t *);
67
68   s = format (s, "%U: sa_id %u spd %u policy %d spi %u (0x%08x) seq %u",
69               format_ip_protocol, t->proto, t->sa_id,
70               t->spd, t->policy_index, t->spi, t->spi, t->seq);
71
72   return s;
73 }
74
75 always_inline void
76 ipsec4_input_spd_add_flow_cache_entry (ipsec_main_t *im, u32 sa, u32 da,
77                                        ipsec_spd_policy_type_t policy_type,
78                                        u32 pol_id)
79 {
80   u64 hash;
81   u8 is_overwrite = 0, is_stale_overwrite = 0;
82   /* Store in network byte order to avoid conversion on lookup */
83   ipsec4_inbound_spd_tuple_t ip4_tuple = {
84     .ip4_src_addr = (ip4_address_t) clib_host_to_net_u32 (sa),
85     .ip4_dest_addr = (ip4_address_t) clib_host_to_net_u32 (da),
86     .policy_type = policy_type
87   };
88
89   ip4_tuple.kv_16_8.value =
90     (((u64) pol_id) << 32) | ((u64) im->input_epoch_count);
91
92   hash = ipsec4_hash_16_8 (&ip4_tuple.kv_16_8);
93   hash &= (im->ipsec4_in_spd_hash_num_buckets - 1);
94
95   ipsec_spinlock_lock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
96   /* Check if we are overwriting an existing entry so we know
97     whether to increment the flow cache counter. Since flow
98     cache counter is reset on any policy add/remove, but
99     hash table values are not, we need to check if the entry
100     we are overwriting is stale or not. If it's a stale entry
101     overwrite, we still want to increment flow cache counter */
102   is_overwrite = (im->ipsec4_in_spd_hash_tbl[hash].value != 0);
103   /* Check if we are overwriting a stale entry by comparing
104      with current epoch count */
105   if (PREDICT_FALSE (is_overwrite))
106     is_stale_overwrite =
107       (im->input_epoch_count !=
108        ((u32) (im->ipsec4_in_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
109   clib_memcpy_fast (&im->ipsec4_in_spd_hash_tbl[hash], &ip4_tuple.kv_16_8,
110                     sizeof (ip4_tuple.kv_16_8));
111   ipsec_spinlock_unlock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
112
113   /* Increment the counter to track active flow cache entries
114     when entering a fresh entry or overwriting a stale one */
115   if (!is_overwrite || is_stale_overwrite)
116     clib_atomic_fetch_add_relax (&im->ipsec4_in_spd_flow_cache_entries, 1);
117
118   return;
119 }
120
121 always_inline ipsec_policy_t *
122 ipsec4_input_spd_find_flow_cache_entry (ipsec_main_t *im, u32 sa, u32 da,
123                                         ipsec_spd_policy_type_t policy_type)
124 {
125   ipsec_policy_t *p = NULL;
126   ipsec4_hash_kv_16_8_t kv_result;
127   u64 hash;
128   ipsec4_inbound_spd_tuple_t ip4_tuple = { .ip4_src_addr = (ip4_address_t) sa,
129                                            .ip4_dest_addr = (ip4_address_t) da,
130                                            .policy_type = policy_type };
131
132   hash = ipsec4_hash_16_8 (&ip4_tuple.kv_16_8);
133   hash &= (im->ipsec4_in_spd_hash_num_buckets - 1);
134
135   ipsec_spinlock_lock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
136   kv_result = im->ipsec4_in_spd_hash_tbl[hash];
137   ipsec_spinlock_unlock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
138
139   if (ipsec4_hash_key_compare_16_8 ((u64 *) &ip4_tuple.kv_16_8,
140                                     (u64 *) &kv_result))
141     {
142       if (im->input_epoch_count == ((u32) (kv_result.value & 0xFFFFFFFF)))
143         {
144           /* Get the policy based on the index */
145           p =
146             pool_elt_at_index (im->policies, ((u32) (kv_result.value >> 32)));
147         }
148     }
149
150   return p;
151 }
152
153 always_inline void
154 ipsec_fp_in_5tuple_from_ip4_range (ipsec_fp_5tuple_t *tuple, u32 la, u32 ra,
155                                    u32 spi, u8 action)
156 {
157   clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
158   tuple->laddr.as_u32 = la;
159   tuple->raddr.as_u32 = ra;
160   tuple->spi = spi;
161   tuple->action = action;
162   tuple->is_ipv6 = 0;
163 }
164
165 always_inline ipsec_policy_t *
166 ipsec_input_policy_match (ipsec_spd_t *spd, u32 sa, u32 da,
167                           ipsec_spd_policy_type_t policy_type)
168 {
169   ipsec_main_t *im = &ipsec_main;
170   ipsec_policy_t *p;
171   u32 *i;
172
173   vec_foreach (i, spd->policies[policy_type])
174   {
175     p = pool_elt_at_index (im->policies, *i);
176
177     if (da < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
178       continue;
179
180     if (da > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
181       continue;
182
183     if (sa < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
184       continue;
185
186     if (sa > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
187       continue;
188
189     if (im->input_flow_cache_flag)
190       {
191         /* Add an Entry in Flow cache */
192         ipsec4_input_spd_add_flow_cache_entry (im, sa, da, policy_type, *i);
193       }
194     return p;
195   }
196   return 0;
197 }
198
199 always_inline ipsec_policy_t *
200 ipsec_input_protect_policy_match (ipsec_spd_t *spd, u32 sa, u32 da, u32 spi)
201 {
202   ipsec_main_t *im = &ipsec_main;
203   ipsec_policy_t *p;
204   ipsec_sa_t *s;
205   u32 *i;
206
207   vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT])
208   {
209     p = pool_elt_at_index (im->policies, *i);
210     s = ipsec_sa_get (p->sa_index);
211
212     if (spi != s->spi)
213       continue;
214
215     if (ipsec_sa_is_set_IS_TUNNEL (s))
216       {
217         if (da != clib_net_to_host_u32 (s->tunnel.t_dst.ip.ip4.as_u32))
218           continue;
219
220         if (sa != clib_net_to_host_u32 (s->tunnel.t_src.ip.ip4.as_u32))
221           continue;
222
223         goto return_policy;
224       }
225
226     if (da < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
227       continue;
228
229     if (da > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
230       continue;
231
232     if (sa < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
233       continue;
234
235     if (sa > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
236       continue;
237
238   return_policy:
239     if (im->input_flow_cache_flag)
240       {
241         /* Add an Entry in Flow cache */
242         ipsec4_input_spd_add_flow_cache_entry (
243           im, sa, da, IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT, *i);
244       }
245
246     return p;
247   }
248   return 0;
249 }
250
251 always_inline uword
252 ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la,
253                       ip6_address_t * ua)
254 {
255   if ((memcmp (a->as_u64, la->as_u64, 2 * sizeof (u64)) >= 0) &&
256       (memcmp (a->as_u64, ua->as_u64, 2 * sizeof (u64)) <= 0))
257     return 1;
258   return 0;
259 }
260
261 always_inline ipsec_policy_t *
262 ipsec6_input_protect_policy_match (ipsec_spd_t * spd,
263                                    ip6_address_t * sa,
264                                    ip6_address_t * da, u32 spi)
265 {
266   ipsec_main_t *im = &ipsec_main;
267   ipsec_policy_t *p;
268   ipsec_sa_t *s;
269   u32 *i;
270
271   vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT])
272   {
273     p = pool_elt_at_index (im->policies, *i);
274     s = ipsec_sa_get (p->sa_index);
275
276     if (spi != s->spi)
277       continue;
278
279     if (ipsec_sa_is_set_IS_TUNNEL (s))
280       {
281         if (!ip6_address_is_equal (sa, &s->tunnel.t_src.ip.ip6))
282           continue;
283
284         if (!ip6_address_is_equal (da, &s->tunnel.t_dst.ip.ip6))
285           continue;
286
287         return p;
288       }
289
290     if (!ip6_addr_match_range (sa, &p->raddr.start.ip6, &p->raddr.stop.ip6))
291       continue;
292
293     if (!ip6_addr_match_range (da, &p->laddr.start.ip6, &p->laddr.stop.ip6))
294       continue;
295
296     return p;
297   }
298   return 0;
299 }
300
301 extern vlib_node_registration_t ipsec4_input_node;
302
303 VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
304                                   vlib_node_runtime_t * node,
305                                   vlib_frame_t * frame)
306 {
307   u32 n_left_from, *from, thread_index;
308   ipsec_main_t *im = &ipsec_main;
309   u64 ipsec_unprocessed = 0, ipsec_matched = 0;
310   u64 ipsec_dropped = 0, ipsec_bypassed = 0;
311   vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
312   vlib_buffer_t **b = bufs;
313   u16 nexts[VLIB_FRAME_SIZE], *next;
314
315   from = vlib_frame_vector_args (frame);
316   n_left_from = frame->n_vectors;
317   next = nexts;
318   vlib_get_buffers (vm, from, bufs, n_left_from);
319   thread_index = vm->thread_index;
320
321
322   while (n_left_from > 0)
323     {
324       u32 next32, pi0;
325       ip4_header_t *ip0;
326       esp_header_t *esp0 = NULL;
327       ah_header_t *ah0;
328       ip4_ipsec_config_t *c0;
329       ipsec_spd_t *spd0;
330       ipsec_policy_t *p0 = NULL;
331       u8 has_space0;
332       bool search_flow_cache = false;
333       ipsec_policy_t *policies[1];
334       ipsec_fp_5tuple_t tuples[1];
335       bool ip_v6 = true;
336
337       if (n_left_from > 2)
338         {
339           vlib_prefetch_buffer_data (b[1], LOAD);
340         }
341
342       b[0]->flags |= VNET_BUFFER_F_IS_IP4;
343       b[0]->flags &= ~VNET_BUFFER_F_IS_IP6;
344       c0 = vnet_feature_next_with_data (&next32, b[0], sizeof (c0[0]));
345       next[0] = (u16) next32;
346
347       spd0 = pool_elt_at_index (im->spds, c0->spd_index);
348
349       ip0 = vlib_buffer_get_current (b[0]);
350
351       if (PREDICT_TRUE
352           (ip0->protocol == IP_PROTOCOL_IPSEC_ESP
353            || ip0->protocol == IP_PROTOCOL_UDP))
354         {
355
356           esp0 = (esp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
357           if (PREDICT_FALSE (ip0->protocol == IP_PROTOCOL_UDP))
358             {
359               /* FIXME Skip, if not a UDP encapsulated packet */
360               esp0 = (esp_header_t *) ((u8 *) esp0 + sizeof (udp_header_t));
361             }
362
363           // if flow cache is enabled, first search through flow cache for a
364           // policy match for either protect, bypass or discard rules, in that
365           // order. if no match is found search_flow_cache is set to false (1)
366           // and we revert back to linear search
367           search_flow_cache = im->input_flow_cache_flag;
368
369         esp_or_udp:
370           if (im->fp_spd_ipv4_in_is_enabled &&
371               PREDICT_TRUE (INDEX_INVALID !=
372                             spd0->fp_spd.ip4_in_lookup_hash_idx))
373             {
374               ipsec_fp_in_5tuple_from_ip4_range (
375                 &tuples[0], ip0->src_address.as_u32, ip0->dst_address.as_u32,
376                 clib_net_to_host_u32 (esp0->spi),
377                 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
378               ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples,
379                                           policies, 1);
380               p0 = policies[0];
381             }
382           else if (search_flow_cache) // attempt to match policy in flow cache
383             {
384               p0 = ipsec4_input_spd_find_flow_cache_entry (
385                 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
386                 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
387             }
388
389           else // linear search if flow cache is not enabled,
390                // or flow cache search just failed
391             {
392               p0 = ipsec_input_protect_policy_match (
393                 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
394                 clib_net_to_host_u32 (ip0->dst_address.as_u32),
395                 clib_net_to_host_u32 (esp0->spi));
396             }
397
398           has_space0 =
399             vlib_buffer_has_space (b[0],
400                                    (clib_address_t) (esp0 + 1) -
401                                    (clib_address_t) ip0);
402
403           if (PREDICT_TRUE ((p0 != NULL) & (has_space0)))
404             {
405               ipsec_matched += 1;
406
407               pi0 = p0 - im->policies;
408               vlib_increment_combined_counter
409                 (&ipsec_spd_policy_counters,
410                  thread_index, pi0, 1, clib_net_to_host_u16 (ip0->length));
411
412               vnet_buffer (b[0])->ipsec.sad_index = p0->sa_index;
413               next[0] = im->esp4_decrypt_next_index;
414               vlib_buffer_advance (b[0], ((u8 *) esp0 - (u8 *) ip0));
415               goto trace0;
416             }
417           else
418             {
419               p0 = 0;
420               pi0 = ~0;
421             };
422
423           if (im->fp_spd_ipv4_in_is_enabled &&
424               PREDICT_TRUE (INDEX_INVALID !=
425                             spd0->fp_spd.ip4_in_lookup_hash_idx))
426             {
427               tuples->action = IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS;
428               ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples,
429                                           policies, 1);
430               p0 = policies[0];
431             }
432           else if (search_flow_cache)
433             {
434               p0 = ipsec4_input_spd_find_flow_cache_entry (
435                 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
436                 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
437             }
438
439           else
440             {
441               p0 = ipsec_input_policy_match (
442                 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
443                 clib_net_to_host_u32 (ip0->dst_address.as_u32),
444                 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
445             }
446
447           if (PREDICT_TRUE ((p0 != NULL)))
448             {
449               ipsec_bypassed += 1;
450
451               pi0 = p0 - im->policies;
452               vlib_increment_combined_counter (
453                 &ipsec_spd_policy_counters, thread_index, pi0, 1,
454                 clib_net_to_host_u16 (ip0->length));
455
456               goto trace0;
457             }
458           else
459             {
460               p0 = 0;
461               pi0 = ~0;
462             };
463
464           if (im->fp_spd_ipv4_in_is_enabled &&
465               PREDICT_TRUE (INDEX_INVALID !=
466                             spd0->fp_spd.ip4_in_lookup_hash_idx))
467             {
468               tuples->action = IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD;
469               ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples,
470                                           policies, 1);
471               p0 = policies[0];
472             }
473           else
474
475             if (search_flow_cache)
476             {
477               p0 = ipsec4_input_spd_find_flow_cache_entry (
478                 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
479                 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
480             }
481
482           else
483             {
484               p0 = ipsec_input_policy_match (
485                 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
486                 clib_net_to_host_u32 (ip0->dst_address.as_u32),
487                 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
488             }
489
490           if (PREDICT_TRUE ((p0 != NULL)))
491             {
492               ipsec_dropped += 1;
493
494               pi0 = p0 - im->policies;
495               vlib_increment_combined_counter (
496                 &ipsec_spd_policy_counters, thread_index, pi0, 1,
497                 clib_net_to_host_u16 (ip0->length));
498
499               next[0] = IPSEC_INPUT_NEXT_DROP;
500               goto trace0;
501             }
502           else
503             {
504               p0 = 0;
505               pi0 = ~0;
506             };
507
508           // flow cache search failed, try again with linear search
509           if (search_flow_cache && p0 == NULL)
510             {
511               search_flow_cache = false;
512               goto esp_or_udp;
513             }
514
515           /* Drop by default if no match on PROTECT, BYPASS or DISCARD */
516           ipsec_unprocessed += 1;
517           next[0] = IPSEC_INPUT_NEXT_DROP;
518
519         trace0:
520           if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
521               PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
522             {
523               ipsec_input_trace_t *tr =
524                 vlib_add_trace (vm, node, b[0], sizeof (*tr));
525
526               tr->proto = ip0->protocol;
527               tr->sa_id = p0 ? p0->sa_id : ~0;
528               tr->spi = has_space0 ? clib_net_to_host_u32 (esp0->spi) : ~0;
529               tr->seq = has_space0 ? clib_net_to_host_u32 (esp0->seq) : ~0;
530               tr->spd = spd0->id;
531               tr->policy_index = pi0;
532             }
533         }
534       else if (ip0->protocol == IP_PROTOCOL_IPSEC_AH)
535         {
536           ah0 = (ah_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
537
538           // if flow cache is enabled, first search through flow cache for a
539           // policy match and revert back to linear search on failure
540           search_flow_cache = im->input_flow_cache_flag;
541
542         ah:
543           if (search_flow_cache)
544             {
545               p0 = ipsec4_input_spd_find_flow_cache_entry (
546                 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
547                 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
548             }
549
550           else
551             {
552               p0 = ipsec_input_protect_policy_match (
553                 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
554                 clib_net_to_host_u32 (ip0->dst_address.as_u32),
555                 clib_net_to_host_u32 (ah0->spi));
556             }
557
558           has_space0 =
559             vlib_buffer_has_space (b[0],
560                                    (clib_address_t) (ah0 + 1) -
561                                    (clib_address_t) ip0);
562
563           if (PREDICT_TRUE ((p0 != NULL) & (has_space0)))
564             {
565               ipsec_matched += 1;
566
567               pi0 = p0 - im->policies;
568               vlib_increment_combined_counter
569                 (&ipsec_spd_policy_counters,
570                  thread_index, pi0, 1, clib_net_to_host_u16 (ip0->length));
571
572               vnet_buffer (b[0])->ipsec.sad_index = p0->sa_index;
573               next[0] = im->ah4_decrypt_next_index;
574               goto trace1;
575             }
576           else
577             {
578               p0 = 0;
579               pi0 = ~0;
580             }
581
582           if (search_flow_cache)
583             {
584               p0 = ipsec4_input_spd_find_flow_cache_entry (
585                 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
586                 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
587             }
588
589           else
590             {
591               p0 = ipsec_input_policy_match (
592                 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
593                 clib_net_to_host_u32 (ip0->dst_address.as_u32),
594                 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
595             }
596
597           if (PREDICT_TRUE ((p0 != NULL)))
598             {
599               ipsec_bypassed += 1;
600
601               pi0 = p0 - im->policies;
602               vlib_increment_combined_counter (
603                 &ipsec_spd_policy_counters, thread_index, pi0, 1,
604                 clib_net_to_host_u16 (ip0->length));
605
606               goto trace1;
607             }
608           else
609             {
610               p0 = 0;
611               pi0 = ~0;
612             };
613
614           if (search_flow_cache)
615             {
616               p0 = ipsec4_input_spd_find_flow_cache_entry (
617                 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
618                 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
619             }
620
621           else
622             {
623               p0 = ipsec_input_policy_match (
624                 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
625                 clib_net_to_host_u32 (ip0->dst_address.as_u32),
626                 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
627             }
628
629           if (PREDICT_TRUE ((p0 != NULL)))
630             {
631               ipsec_dropped += 1;
632
633               pi0 = p0 - im->policies;
634               vlib_increment_combined_counter (
635                 &ipsec_spd_policy_counters, thread_index, pi0, 1,
636                 clib_net_to_host_u16 (ip0->length));
637
638               next[0] = IPSEC_INPUT_NEXT_DROP;
639               goto trace1;
640             }
641           else
642             {
643               p0 = 0;
644               pi0 = ~0;
645             };
646
647           // flow cache search failed, retry with linear search
648           if (search_flow_cache && p0 == NULL)
649             {
650               search_flow_cache = false;
651               goto ah;
652             }
653
654           /* Drop by default if no match on PROTECT, BYPASS or DISCARD */
655           ipsec_unprocessed += 1;
656           next[0] = IPSEC_INPUT_NEXT_DROP;
657
658         trace1:
659           if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
660               PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
661             {
662               ipsec_input_trace_t *tr =
663                 vlib_add_trace (vm, node, b[0], sizeof (*tr));
664
665               tr->proto = ip0->protocol;
666               tr->sa_id = p0 ? p0->sa_id : ~0;
667               tr->spi = has_space0 ? clib_net_to_host_u32 (ah0->spi) : ~0;
668               tr->seq = has_space0 ? clib_net_to_host_u32 (ah0->seq_no) : ~0;
669               tr->spd = spd0->id;
670               tr->policy_index = pi0;
671             }
672         }
673       else
674         {
675           ipsec_unprocessed += 1;
676         }
677       n_left_from -= 1;
678       b += 1;
679       next += 1;
680     }
681
682   vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
683
684   vlib_node_increment_counter (vm, ipsec4_input_node.index,
685                                IPSEC_INPUT_ERROR_RX_PKTS, frame->n_vectors);
686
687   vlib_node_increment_counter (vm, ipsec4_input_node.index,
688                                IPSEC_INPUT_ERROR_RX_POLICY_MATCH,
689                                ipsec_matched);
690
691   vlib_node_increment_counter (vm, ipsec4_input_node.index,
692                                IPSEC_INPUT_ERROR_RX_POLICY_NO_MATCH,
693                                ipsec_unprocessed);
694
695   vlib_node_increment_counter (vm, ipsec4_input_node.index,
696                                IPSEC_INPUT_ERROR_RX_POLICY_DISCARD,
697                                ipsec_dropped);
698
699   vlib_node_increment_counter (vm, ipsec4_input_node.index,
700                                IPSEC_INPUT_ERROR_RX_POLICY_BYPASS,
701                                ipsec_bypassed);
702
703   return frame->n_vectors;
704 }
705
706
707 /* *INDENT-OFF* */
708 VLIB_REGISTER_NODE (ipsec4_input_node) = {
709   .name = "ipsec4-input-feature",
710   .vector_size = sizeof (u32),
711   .format_trace = format_ipsec_input_trace,
712   .type = VLIB_NODE_TYPE_INTERNAL,
713   .n_errors = ARRAY_LEN(ipsec_input_error_strings),
714   .error_strings = ipsec_input_error_strings,
715   .n_next_nodes = IPSEC_INPUT_N_NEXT,
716   .next_nodes = {
717 #define _(s,n) [IPSEC_INPUT_NEXT_##s] = n,
718     foreach_ipsec_input_next
719 #undef _
720   },
721 };
722 /* *INDENT-ON* */
723
724 extern vlib_node_registration_t ipsec6_input_node;
725
726
727 VLIB_NODE_FN (ipsec6_input_node) (vlib_main_t * vm,
728                                   vlib_node_runtime_t * node,
729                                   vlib_frame_t * from_frame)
730 {
731   u32 n_left_from, *from, next_index, *to_next, thread_index;
732   ipsec_main_t *im = &ipsec_main;
733   u32 ipsec_unprocessed = 0;
734   u32 ipsec_matched = 0;
735
736   from = vlib_frame_vector_args (from_frame);
737   n_left_from = from_frame->n_vectors;
738   thread_index = vm->thread_index;
739
740   next_index = node->cached_next_index;
741
742   while (n_left_from > 0)
743     {
744       u32 n_left_to_next;
745
746       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
747
748       while (n_left_from > 0 && n_left_to_next > 0)
749         {
750           u32 bi0, next0, pi0;
751           vlib_buffer_t *b0;
752           ip6_header_t *ip0;
753           esp_header_t *esp0;
754           ip4_ipsec_config_t *c0;
755           ipsec_spd_t *spd0;
756           ipsec_policy_t *p0 = 0;
757           ah_header_t *ah0;
758           u32 header_size = sizeof (ip0[0]);
759
760           bi0 = to_next[0] = from[0];
761           from += 1;
762           n_left_from -= 1;
763           to_next += 1;
764           n_left_to_next -= 1;
765
766           b0 = vlib_get_buffer (vm, bi0);
767           b0->flags |= VNET_BUFFER_F_IS_IP6;
768           b0->flags &= ~VNET_BUFFER_F_IS_IP4;
769           c0 = vnet_feature_next_with_data (&next0, b0, sizeof (c0[0]));
770
771           spd0 = pool_elt_at_index (im->spds, c0->spd_index);
772
773           ip0 = vlib_buffer_get_current (b0);
774           esp0 = (esp_header_t *) ((u8 *) ip0 + header_size);
775           ah0 = (ah_header_t *) ((u8 *) ip0 + header_size);
776
777           if (PREDICT_TRUE (ip0->protocol == IP_PROTOCOL_IPSEC_ESP))
778             {
779 #if 0
780               clib_warning
781                 ("packet received from %U to %U spi %u size %u spd_id %u",
782                  format_ip6_address, &ip0->src_address, format_ip6_address,
783                  &ip0->dst_address, clib_net_to_host_u32 (esp0->spi),
784                  clib_net_to_host_u16 (ip0->payload_length) + header_size,
785                  spd0->id);
786 #endif
787               p0 = ipsec6_input_protect_policy_match (spd0,
788                                                       &ip0->src_address,
789                                                       &ip0->dst_address,
790                                                       clib_net_to_host_u32
791                                                       (esp0->spi));
792
793               if (PREDICT_TRUE (p0 != 0))
794                 {
795                   ipsec_matched += 1;
796
797                   pi0 = p0 - im->policies;
798                   vlib_increment_combined_counter
799                     (&ipsec_spd_policy_counters,
800                      thread_index, pi0, 1,
801                      clib_net_to_host_u16 (ip0->payload_length) +
802                      header_size);
803
804                   vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
805                   next0 = im->esp6_decrypt_next_index;
806                   vlib_buffer_advance (b0, header_size);
807                   goto trace0;
808                 }
809               else
810                 {
811                   pi0 = ~0;
812                   ipsec_unprocessed += 1;
813                   next0 = IPSEC_INPUT_NEXT_DROP;
814                 }
815             }
816           else if (ip0->protocol == IP_PROTOCOL_IPSEC_AH)
817             {
818               p0 = ipsec6_input_protect_policy_match (spd0,
819                                                       &ip0->src_address,
820                                                       &ip0->dst_address,
821                                                       clib_net_to_host_u32
822                                                       (ah0->spi));
823
824               if (PREDICT_TRUE (p0 != 0))
825                 {
826                   ipsec_matched += 1;
827                   pi0 = p0 - im->policies;
828                   vlib_increment_combined_counter
829                     (&ipsec_spd_policy_counters,
830                      thread_index, pi0, 1,
831                      clib_net_to_host_u16 (ip0->payload_length) +
832                      header_size);
833
834                   vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
835                   next0 = im->ah6_decrypt_next_index;
836                   goto trace0;
837                 }
838               else
839                 {
840                   pi0 = ~0;
841                   ipsec_unprocessed += 1;
842                   next0 = IPSEC_INPUT_NEXT_DROP;
843                 }
844             }
845           else
846             {
847               ipsec_unprocessed += 1;
848             }
849
850         trace0:
851           if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
852               PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
853             {
854               ipsec_input_trace_t *tr =
855                 vlib_add_trace (vm, node, b0, sizeof (*tr));
856
857               if (p0)
858                 tr->sa_id = p0->sa_id;
859               tr->proto = ip0->protocol;
860               tr->spi = clib_net_to_host_u32 (esp0->spi);
861               tr->seq = clib_net_to_host_u32 (esp0->seq);
862               tr->spd = spd0->id;
863             }
864
865           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
866                                            n_left_to_next, bi0, next0);
867         }
868       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
869     }
870
871   vlib_node_increment_counter (vm, ipsec6_input_node.index,
872                                IPSEC_INPUT_ERROR_RX_PKTS,
873                                from_frame->n_vectors - ipsec_unprocessed);
874
875   vlib_node_increment_counter (vm, ipsec6_input_node.index,
876                                IPSEC_INPUT_ERROR_RX_POLICY_MATCH,
877                                ipsec_matched);
878
879   return from_frame->n_vectors;
880 }
881
882
883 /* *INDENT-OFF* */
884 VLIB_REGISTER_NODE (ipsec6_input_node) = {
885   .name = "ipsec6-input-feature",
886   .vector_size = sizeof (u32),
887   .format_trace = format_ipsec_input_trace,
888   .type = VLIB_NODE_TYPE_INTERNAL,
889   .n_errors = ARRAY_LEN(ipsec_input_error_strings),
890   .error_strings = ipsec_input_error_strings,
891   .n_next_nodes = IPSEC_INPUT_N_NEXT,
892   .next_nodes = {
893 #define _(s,n) [IPSEC_INPUT_NEXT_##s] = n,
894     foreach_ipsec_input_next
895 #undef _
896   },
897 };
898 /* *INDENT-ON* */
899
900 /*
901  * fd.io coding-style-patch-verification: ON
902  *
903  * Local Variables:
904  * eval: (c-set-style "gnu")
905  * End:
906  */