udp: fix csum computation when offload disabled
[vpp.git] / src / vnet / ipsec / ipsec_input.c
1 /*
2  * decap.c : IPSec tunnel decapsulation
3  *
4  * Copyright (c) 2015 Cisco and/or its affiliates.
5  * Licensed under the Apache License, Version 2.0 (the "License");
6  * you may not use this file except in compliance with the License.
7  * You may obtain a copy of the License at:
8  *
9  *     http://www.apache.org/licenses/LICENSE-2.0
10  *
11  * Unless required by applicable law or agreed to in writing, software
12  * distributed under the License is distributed on an "AS IS" BASIS,
13  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14  * See the License for the specific language governing permissions and
15  * limitations under the License.
16  */
17
18 #include <vnet/vnet.h>
19 #include <vnet/api_errno.h>
20 #include <vnet/ip/ip.h>
21 #include <vnet/feature/feature.h>
22 #include <vnet/ipsec/ipsec_spd_fp_lookup.h>
23
24 #include <vnet/ipsec/ipsec.h>
25 #include <vnet/ipsec/esp.h>
26 #include <vnet/ipsec/ah.h>
27 #include <vnet/ipsec/ipsec_io.h>
28
29 #define foreach_ipsec_input_error                       \
30 _(RX_PKTS, "IPSec pkts received")                       \
31 _(RX_POLICY_MATCH, "IPSec policy match")                \
32 _(RX_POLICY_NO_MATCH, "IPSec policy not matched")       \
33 _(RX_POLICY_BYPASS, "IPSec policy bypass")              \
34 _(RX_POLICY_DISCARD, "IPSec policy discard")
35
36 typedef enum
37 {
38 #define _(sym,str) IPSEC_INPUT_ERROR_##sym,
39   foreach_ipsec_input_error
40 #undef _
41     IPSEC_INPUT_N_ERROR,
42 } ipsec_input_error_t;
43
44 static char *ipsec_input_error_strings[] = {
45 #define _(sym,string) string,
46   foreach_ipsec_input_error
47 #undef _
48 };
49
50 typedef struct
51 {
52   ip_protocol_t proto;
53   u32 spd;
54   u32 policy_index;
55   u32 policy_type;
56   u32 sa_id;
57   u32 spi;
58   u32 seq;
59 } ipsec_input_trace_t;
60
61 /* packet trace format function */
62 static u8 *
63 format_ipsec_input_trace (u8 * s, va_list * args)
64 {
65   CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
66   CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
67   ipsec_input_trace_t *t = va_arg (*args, ipsec_input_trace_t *);
68
69   s =
70     format (s, "%U: sa_id %u type: %u spd %u policy %d spi %u (0x%08x) seq %u",
71             format_ip_protocol, t->proto, t->sa_id, t->policy_type, t->spd,
72             t->policy_index, t->spi, t->spi, t->seq);
73
74   return s;
75 }
76
77 always_inline void
78 ipsec4_input_spd_add_flow_cache_entry (ipsec_main_t *im, u32 sa, u32 da,
79                                        ipsec_spd_policy_type_t policy_type,
80                                        u32 pol_id)
81 {
82   u64 hash;
83   u8 is_overwrite = 0, is_stale_overwrite = 0;
84   /* Store in network byte order to avoid conversion on lookup */
85   ipsec4_inbound_spd_tuple_t ip4_tuple = {
86     .ip4_src_addr = (ip4_address_t) clib_host_to_net_u32 (sa),
87     .ip4_dest_addr = (ip4_address_t) clib_host_to_net_u32 (da),
88     .policy_type = policy_type
89   };
90
91   ip4_tuple.kv_16_8.value =
92     (((u64) pol_id) << 32) | ((u64) im->input_epoch_count);
93
94   hash = ipsec4_hash_16_8 (&ip4_tuple.kv_16_8);
95   hash &= (im->ipsec4_in_spd_hash_num_buckets - 1);
96
97   ipsec_spinlock_lock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
98   /* Check if we are overwriting an existing entry so we know
99     whether to increment the flow cache counter. Since flow
100     cache counter is reset on any policy add/remove, but
101     hash table values are not, we need to check if the entry
102     we are overwriting is stale or not. If it's a stale entry
103     overwrite, we still want to increment flow cache counter */
104   is_overwrite = (im->ipsec4_in_spd_hash_tbl[hash].value != 0);
105   /* Check if we are overwriting a stale entry by comparing
106      with current epoch count */
107   if (PREDICT_FALSE (is_overwrite))
108     is_stale_overwrite =
109       (im->input_epoch_count !=
110        ((u32) (im->ipsec4_in_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
111   clib_memcpy_fast (&im->ipsec4_in_spd_hash_tbl[hash], &ip4_tuple.kv_16_8,
112                     sizeof (ip4_tuple.kv_16_8));
113   ipsec_spinlock_unlock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
114
115   /* Increment the counter to track active flow cache entries
116     when entering a fresh entry or overwriting a stale one */
117   if (!is_overwrite || is_stale_overwrite)
118     clib_atomic_fetch_add_relax (&im->ipsec4_in_spd_flow_cache_entries, 1);
119
120   return;
121 }
122
123 always_inline ipsec_policy_t *
124 ipsec4_input_spd_find_flow_cache_entry (ipsec_main_t *im, u32 sa, u32 da,
125                                         ipsec_spd_policy_type_t policy_type)
126 {
127   ipsec_policy_t *p = NULL;
128   ipsec4_hash_kv_16_8_t kv_result;
129   u64 hash;
130   ipsec4_inbound_spd_tuple_t ip4_tuple = { .ip4_src_addr = (ip4_address_t) sa,
131                                            .ip4_dest_addr = (ip4_address_t) da,
132                                            .policy_type = policy_type };
133
134   hash = ipsec4_hash_16_8 (&ip4_tuple.kv_16_8);
135   hash &= (im->ipsec4_in_spd_hash_num_buckets - 1);
136
137   ipsec_spinlock_lock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
138   kv_result = im->ipsec4_in_spd_hash_tbl[hash];
139   ipsec_spinlock_unlock (&im->ipsec4_in_spd_hash_tbl[hash].bucket_lock);
140
141   if (ipsec4_hash_key_compare_16_8 ((u64 *) &ip4_tuple.kv_16_8,
142                                     (u64 *) &kv_result))
143     {
144       if (im->input_epoch_count == ((u32) (kv_result.value & 0xFFFFFFFF)))
145         {
146           /* Get the policy based on the index */
147           p =
148             pool_elt_at_index (im->policies, ((u32) (kv_result.value >> 32)));
149         }
150     }
151
152   return p;
153 }
154
155 always_inline void
156 ipsec_fp_in_5tuple_from_ip4_range (ipsec_fp_5tuple_t *tuple, u32 sa, u32 da,
157                                    u32 spi, u8 action)
158 {
159   clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
160   tuple->laddr.as_u32 = da;
161   tuple->raddr.as_u32 = sa;
162   tuple->spi = spi;
163   tuple->action = action;
164   tuple->is_ipv6 = 0;
165 }
166
167 always_inline void
168 ipsec_fp_in_5tuple_from_ip6_range (ipsec_fp_5tuple_t *tuple, ip6_address_t *sa,
169                                    ip6_address_t *da, u32 spi, u8 action)
170
171 {
172   clib_memcpy (&tuple->ip6_laddr, da, sizeof (ip6_address_t));
173   clib_memcpy (&tuple->ip6_raddr, sa, sizeof (ip6_address_t));
174
175   tuple->spi = spi;
176   tuple->action = action;
177   tuple->is_ipv6 = 1;
178 }
179
180 always_inline ipsec_policy_t *
181 ipsec_input_policy_match (ipsec_spd_t *spd, u32 sa, u32 da,
182                           ipsec_spd_policy_type_t policy_type)
183 {
184   ipsec_main_t *im = &ipsec_main;
185   ipsec_policy_t *p;
186   u32 *i;
187
188   vec_foreach (i, spd->policies[policy_type])
189   {
190     p = pool_elt_at_index (im->policies, *i);
191
192     if (da < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
193       continue;
194
195     if (da > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
196       continue;
197
198     if (sa < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
199       continue;
200
201     if (sa > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
202       continue;
203
204     if (im->input_flow_cache_flag)
205       {
206         /* Add an Entry in Flow cache */
207         ipsec4_input_spd_add_flow_cache_entry (im, sa, da, policy_type, *i);
208       }
209     return p;
210   }
211   return 0;
212 }
213
214 always_inline ipsec_policy_t *
215 ipsec_input_protect_policy_match (ipsec_spd_t *spd, u32 sa, u32 da, u32 spi)
216 {
217   ipsec_main_t *im = &ipsec_main;
218   ipsec_policy_t *p;
219   ipsec_sa_t *s;
220   u32 *i;
221
222   vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT])
223   {
224     p = pool_elt_at_index (im->policies, *i);
225     s = ipsec_sa_get (p->sa_index);
226
227     if (spi != s->spi)
228       continue;
229
230     if (ipsec_sa_is_set_IS_TUNNEL (s))
231       {
232         if (da != clib_net_to_host_u32 (s->tunnel.t_dst.ip.ip4.as_u32))
233           continue;
234
235         if (sa != clib_net_to_host_u32 (s->tunnel.t_src.ip.ip4.as_u32))
236           continue;
237
238         goto return_policy;
239       }
240
241     if (da < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
242       continue;
243
244     if (da > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
245       continue;
246
247     if (sa < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
248       continue;
249
250     if (sa > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
251       continue;
252
253   return_policy:
254     if (im->input_flow_cache_flag)
255       {
256         /* Add an Entry in Flow cache */
257         ipsec4_input_spd_add_flow_cache_entry (
258           im, sa, da, IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT, *i);
259       }
260
261     return p;
262   }
263   return 0;
264 }
265
266 always_inline uword
267 ip6_addr_match_range (ip6_address_t * a, ip6_address_t * la,
268                       ip6_address_t * ua)
269 {
270   if ((memcmp (a->as_u64, la->as_u64, 2 * sizeof (u64)) >= 0) &&
271       (memcmp (a->as_u64, ua->as_u64, 2 * sizeof (u64)) <= 0))
272     return 1;
273   return 0;
274 }
275
276 always_inline void
277 ipsec_esp_packet_process (vlib_main_t *vm, ipsec_main_t *im, ip4_header_t *ip0,
278                           esp_header_t *esp0, u32 thread_index,
279                           ipsec_spd_t *spd0, vlib_buffer_t **b,
280                           vlib_node_runtime_t *node, u64 *ipsec_bypassed,
281                           u64 *ipsec_dropped, u64 *ipsec_matched,
282                           u64 *ipsec_unprocessed, u16 *next)
283
284 {
285   ipsec_policy_t *p0 = NULL;
286   u32 pi0;
287   u8 has_space0;
288   bool search_flow_cache = false;
289   ipsec_policy_t *policies[1];
290   ipsec_fp_5tuple_t tuples[1];
291   bool ip_v6 = true;
292
293   /* if flow cache is enabled, first search through flow cache for a
294    * policy match for either protect, bypass or discard rules, in that
295    * order. if no match is found search_flow_cache is set to false (1)
296    * and we revert back to linear search
297    */
298
299   search_flow_cache = im->input_flow_cache_flag;
300 udp_or_esp:
301
302   /* SPI ID field in the ESP header MUST NOT be a zero value */
303   if (esp0->spi == 0)
304     {
305       /* Drop the packet if SPI ID is zero */
306       *ipsec_unprocessed += 1;
307       next[0] = IPSEC_INPUT_NEXT_DROP;
308       return;
309     }
310
311   if (im->fp_spd_ipv4_in_is_enabled &&
312       PREDICT_TRUE (INDEX_INVALID != spd0->fp_spd.ip4_in_lookup_hash_idx))
313     {
314       ipsec_fp_in_5tuple_from_ip4_range (&tuples[0], ip0->src_address.as_u32,
315                                          ip0->dst_address.as_u32,
316                                          clib_net_to_host_u32 (esp0->spi),
317                                          IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
318       ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples, policies, 1);
319       p0 = policies[0];
320     }
321   else if (search_flow_cache) /* attempt to match policy in flow cache */
322     {
323       p0 = ipsec4_input_spd_find_flow_cache_entry (
324         im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
325         IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
326     }
327
328   else /* linear search if flow cache is not enabled,
329        or flow cache search just failed */
330     {
331       p0 = ipsec_input_protect_policy_match (
332         spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
333         clib_net_to_host_u32 (ip0->dst_address.as_u32),
334         clib_net_to_host_u32 (esp0->spi));
335     }
336   has_space0 = vlib_buffer_has_space (b[0], (clib_address_t) (esp0 + 1) -
337                                               (clib_address_t) ip0);
338
339   if (PREDICT_TRUE ((p0 != NULL) & (has_space0)))
340     {
341       *ipsec_matched += 1;
342
343       pi0 = p0 - im->policies;
344       vlib_increment_combined_counter (&ipsec_spd_policy_counters,
345                                        thread_index, pi0, 1,
346                                        clib_net_to_host_u16 (ip0->length));
347
348       vnet_buffer (b[0])->ipsec.sad_index = p0->sa_index;
349       next[0] = im->esp4_decrypt_next_index;
350       vlib_buffer_advance (b[0], ((u8 *) esp0 - (u8 *) ip0));
351       goto trace0;
352     }
353   else
354     {
355       p0 = 0;
356       pi0 = ~0;
357     }
358   if (im->fp_spd_ipv4_in_is_enabled &&
359       PREDICT_TRUE (INDEX_INVALID != spd0->fp_spd.ip4_in_lookup_hash_idx))
360     {
361       tuples->action = IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS;
362       ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples, policies, 1);
363       p0 = policies[0];
364     }
365   else if (search_flow_cache)
366     {
367       p0 = ipsec4_input_spd_find_flow_cache_entry (
368         im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
369         IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
370     }
371
372   else
373     {
374       p0 = ipsec_input_policy_match (
375         spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
376         clib_net_to_host_u32 (ip0->dst_address.as_u32),
377         IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
378     }
379
380   if (PREDICT_TRUE ((p0 != NULL)))
381     {
382       *ipsec_bypassed += 1;
383
384       pi0 = p0 - im->policies;
385       vlib_increment_combined_counter (&ipsec_spd_policy_counters,
386                                        thread_index, pi0, 1,
387                                        clib_net_to_host_u16 (ip0->length));
388
389       goto trace0;
390     }
391   else
392     {
393       p0 = 0;
394       pi0 = ~0;
395     };
396   if (im->fp_spd_ipv4_in_is_enabled &&
397       PREDICT_TRUE (INDEX_INVALID != spd0->fp_spd.ip4_in_lookup_hash_idx))
398     {
399       tuples->action = IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD;
400       ipsec_fp_in_policy_match_n (&spd0->fp_spd, !ip_v6, tuples, policies, 1);
401       p0 = policies[0];
402     }
403   else
404
405     if (search_flow_cache)
406     {
407       p0 = ipsec4_input_spd_find_flow_cache_entry (
408         im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
409         IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
410     }
411
412   else
413     {
414       p0 = ipsec_input_policy_match (
415         spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
416         clib_net_to_host_u32 (ip0->dst_address.as_u32),
417         IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
418     }
419
420   if (PREDICT_TRUE ((p0 != NULL)))
421     {
422       *ipsec_dropped += 1;
423
424       pi0 = p0 - im->policies;
425       vlib_increment_combined_counter (&ipsec_spd_policy_counters,
426                                        thread_index, pi0, 1,
427                                        clib_net_to_host_u16 (ip0->length));
428
429       next[0] = IPSEC_INPUT_NEXT_DROP;
430       goto trace0;
431     }
432   else
433     {
434       p0 = 0;
435       pi0 = ~0;
436     };
437   /* flow cache search failed, try again with linear search */
438   if (search_flow_cache && p0 == NULL)
439     {
440       search_flow_cache = false;
441       goto udp_or_esp;
442     }
443
444   /* Drop by default if no match on PROTECT, BYPASS or DISCARD */
445   *ipsec_unprocessed += 1;
446   next[0] = IPSEC_INPUT_NEXT_DROP;
447
448 trace0:
449   if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
450       PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
451     {
452       ipsec_input_trace_t *tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
453
454       tr->proto = ip0->protocol;
455       tr->sa_id = p0 ? p0->sa_id : ~0;
456       tr->spi = has_space0 ? clib_net_to_host_u32 (esp0->spi) : ~0;
457       tr->seq = has_space0 ? clib_net_to_host_u32 (esp0->seq) : ~0;
458       tr->spd = spd0->id;
459       tr->policy_index = pi0;
460     }
461 }
462
463 always_inline ipsec_policy_t *
464 ipsec6_input_protect_policy_match (ipsec_spd_t * spd,
465                                    ip6_address_t * sa,
466                                    ip6_address_t * da, u32 spi)
467 {
468   ipsec_main_t *im = &ipsec_main;
469   ipsec_policy_t *p;
470   ipsec_sa_t *s;
471   u32 *i;
472
473   vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT])
474   {
475     p = pool_elt_at_index (im->policies, *i);
476     s = ipsec_sa_get (p->sa_index);
477
478     if (spi != s->spi)
479       continue;
480
481     if (ipsec_sa_is_set_IS_TUNNEL (s))
482       {
483         if (!ip6_address_is_equal (sa, &s->tunnel.t_src.ip.ip6))
484           continue;
485
486         if (!ip6_address_is_equal (da, &s->tunnel.t_dst.ip.ip6))
487           continue;
488
489         return p;
490       }
491
492     if (!ip6_addr_match_range (sa, &p->raddr.start.ip6, &p->raddr.stop.ip6))
493       continue;
494
495     if (!ip6_addr_match_range (da, &p->laddr.start.ip6, &p->laddr.stop.ip6))
496       continue;
497
498     return p;
499   }
500   return 0;
501 }
502
503 extern vlib_node_registration_t ipsec4_input_node;
504
505 VLIB_NODE_FN (ipsec4_input_node) (vlib_main_t * vm,
506                                   vlib_node_runtime_t * node,
507                                   vlib_frame_t * frame)
508 {
509   u32 n_left_from, *from, thread_index;
510   ipsec_main_t *im = &ipsec_main;
511   u64 ipsec_unprocessed = 0, ipsec_matched = 0;
512   u64 ipsec_dropped = 0, ipsec_bypassed = 0;
513   vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
514   vlib_buffer_t **b = bufs;
515   u16 nexts[VLIB_FRAME_SIZE], *next;
516
517   from = vlib_frame_vector_args (frame);
518   n_left_from = frame->n_vectors;
519   next = nexts;
520   vlib_get_buffers (vm, from, bufs, n_left_from);
521   thread_index = vm->thread_index;
522
523
524   while (n_left_from > 0)
525     {
526       u32 next32, pi0;
527       ip4_header_t *ip0;
528       esp_header_t *esp0 = NULL;
529       ah_header_t *ah0;
530       ip4_ipsec_config_t *c0;
531       ipsec_spd_t *spd0;
532       ipsec_policy_t *p0 = NULL;
533       u8 has_space0;
534       bool search_flow_cache = false;
535
536       if (n_left_from > 2)
537         {
538           vlib_prefetch_buffer_data (b[1], LOAD);
539         }
540
541       b[0]->flags |= VNET_BUFFER_F_IS_IP4;
542       b[0]->flags &= ~VNET_BUFFER_F_IS_IP6;
543       c0 = vnet_feature_next_with_data (&next32, b[0], sizeof (c0[0]));
544       next[0] = (u16) next32;
545
546       spd0 = pool_elt_at_index (im->spds, c0->spd_index);
547
548       ip0 = vlib_buffer_get_current (b[0]);
549
550       if (ip0->protocol == IP_PROTOCOL_UDP)
551         {
552           udp_header_t *udp0 = NULL;
553           udp0 = (udp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
554
555           /* As per rfc3948 in UDP Encapsulated Header, UDP checksum must be
556            * Zero, and receivers must not depen upon UPD checksum.
557            * inside ESP header , SPI ID value MUST NOT be a zero value
558            * */
559
560           if (udp0->checksum == 0)
561             {
562               esp0 = (esp_header_t *) ((u8 *) udp0 + sizeof (udp_header_t));
563
564               ipsec_esp_packet_process (vm, im, ip0, esp0, thread_index, spd0,
565                                         b, node, &ipsec_bypassed,
566                                         &ipsec_dropped, &ipsec_matched,
567                                         &ipsec_unprocessed, next);
568               if (ipsec_bypassed > 0)
569                 goto ipsec_bypassed;
570             }
571         }
572       else if (PREDICT_TRUE (ip0->protocol == IP_PROTOCOL_IPSEC_ESP))
573         {
574           esp0 = (esp_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
575           ipsec_esp_packet_process (vm, im, ip0, esp0, thread_index, spd0, b,
576                                     node, &ipsec_bypassed, &ipsec_dropped,
577                                     &ipsec_matched, &ipsec_unprocessed, next);
578           if (ipsec_bypassed > 0)
579             goto ipsec_bypassed;
580         }
581       else if (ip0->protocol == IP_PROTOCOL_IPSEC_AH)
582         {
583           ah0 = (ah_header_t *) ((u8 *) ip0 + ip4_header_bytes (ip0));
584
585           // if flow cache is enabled, first search through flow cache for a
586           // policy match and revert back to linear search on failure
587           search_flow_cache = im->input_flow_cache_flag;
588
589         ah:
590           if (search_flow_cache)
591             {
592               p0 = ipsec4_input_spd_find_flow_cache_entry (
593                 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
594                 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
595             }
596
597           else
598             {
599               p0 = ipsec_input_protect_policy_match (
600                 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
601                 clib_net_to_host_u32 (ip0->dst_address.as_u32),
602                 clib_net_to_host_u32 (ah0->spi));
603             }
604
605           has_space0 =
606             vlib_buffer_has_space (b[0],
607                                    (clib_address_t) (ah0 + 1) -
608                                    (clib_address_t) ip0);
609
610           if (PREDICT_TRUE ((p0 != NULL) & (has_space0)))
611             {
612               ipsec_matched += 1;
613
614               pi0 = p0 - im->policies;
615               vlib_increment_combined_counter
616                 (&ipsec_spd_policy_counters,
617                  thread_index, pi0, 1, clib_net_to_host_u16 (ip0->length));
618
619               vnet_buffer (b[0])->ipsec.sad_index = p0->sa_index;
620               next[0] = im->ah4_decrypt_next_index;
621               goto trace1;
622             }
623           else
624             {
625               p0 = 0;
626               pi0 = ~0;
627             }
628
629           if (search_flow_cache)
630             {
631               p0 = ipsec4_input_spd_find_flow_cache_entry (
632                 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
633                 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
634             }
635
636           else
637             {
638               p0 = ipsec_input_policy_match (
639                 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
640                 clib_net_to_host_u32 (ip0->dst_address.as_u32),
641                 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
642             }
643
644           if (PREDICT_TRUE ((p0 != NULL)))
645             {
646               ipsec_bypassed += 1;
647
648               pi0 = p0 - im->policies;
649               vlib_increment_combined_counter (
650                 &ipsec_spd_policy_counters, thread_index, pi0, 1,
651                 clib_net_to_host_u16 (ip0->length));
652
653               goto trace1;
654             }
655           else
656             {
657               p0 = 0;
658               pi0 = ~0;
659             };
660
661           if (search_flow_cache)
662             {
663               p0 = ipsec4_input_spd_find_flow_cache_entry (
664                 im, ip0->src_address.as_u32, ip0->dst_address.as_u32,
665                 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
666             }
667
668           else
669             {
670               p0 = ipsec_input_policy_match (
671                 spd0, clib_net_to_host_u32 (ip0->src_address.as_u32),
672                 clib_net_to_host_u32 (ip0->dst_address.as_u32),
673                 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
674             }
675
676           if (PREDICT_TRUE ((p0 != NULL)))
677             {
678               ipsec_dropped += 1;
679
680               pi0 = p0 - im->policies;
681               vlib_increment_combined_counter (
682                 &ipsec_spd_policy_counters, thread_index, pi0, 1,
683                 clib_net_to_host_u16 (ip0->length));
684
685               next[0] = IPSEC_INPUT_NEXT_DROP;
686               goto trace1;
687             }
688           else
689             {
690               p0 = 0;
691               pi0 = ~0;
692             };
693
694           // flow cache search failed, retry with linear search
695           if (search_flow_cache && p0 == NULL)
696             {
697               search_flow_cache = false;
698               goto ah;
699             }
700
701           /* Drop by default if no match on PROTECT, BYPASS or DISCARD */
702           ipsec_unprocessed += 1;
703           next[0] = IPSEC_INPUT_NEXT_DROP;
704
705         trace1:
706           if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
707               PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
708             {
709               ipsec_input_trace_t *tr =
710                 vlib_add_trace (vm, node, b[0], sizeof (*tr));
711
712               tr->proto = ip0->protocol;
713               tr->sa_id = p0 ? p0->sa_id : ~0;
714               tr->spi = has_space0 ? clib_net_to_host_u32 (ah0->spi) : ~0;
715               tr->seq = has_space0 ? clib_net_to_host_u32 (ah0->seq_no) : ~0;
716               tr->spd = spd0->id;
717               tr->policy_index = pi0;
718             }
719         }
720       else
721         {
722         ipsec_bypassed:
723           ipsec_unprocessed += 1;
724         }
725       n_left_from -= 1;
726       b += 1;
727       next += 1;
728     }
729
730   vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
731
732   vlib_node_increment_counter (vm, ipsec4_input_node.index,
733                                IPSEC_INPUT_ERROR_RX_PKTS, frame->n_vectors);
734
735   vlib_node_increment_counter (vm, ipsec4_input_node.index,
736                                IPSEC_INPUT_ERROR_RX_POLICY_MATCH,
737                                ipsec_matched);
738
739   vlib_node_increment_counter (vm, ipsec4_input_node.index,
740                                IPSEC_INPUT_ERROR_RX_POLICY_NO_MATCH,
741                                ipsec_unprocessed);
742
743   vlib_node_increment_counter (vm, ipsec4_input_node.index,
744                                IPSEC_INPUT_ERROR_RX_POLICY_DISCARD,
745                                ipsec_dropped);
746
747   vlib_node_increment_counter (vm, ipsec4_input_node.index,
748                                IPSEC_INPUT_ERROR_RX_POLICY_BYPASS,
749                                ipsec_bypassed);
750
751   return frame->n_vectors;
752 }
753
754 VLIB_REGISTER_NODE (ipsec4_input_node) = {
755   .name = "ipsec4-input-feature",
756   .vector_size = sizeof (u32),
757   .format_trace = format_ipsec_input_trace,
758   .type = VLIB_NODE_TYPE_INTERNAL,
759   .n_errors = ARRAY_LEN(ipsec_input_error_strings),
760   .error_strings = ipsec_input_error_strings,
761   .n_next_nodes = IPSEC_INPUT_N_NEXT,
762   .next_nodes = {
763 #define _(s,n) [IPSEC_INPUT_NEXT_##s] = n,
764     foreach_ipsec_input_next
765 #undef _
766   },
767 };
768
769 extern vlib_node_registration_t ipsec6_input_node;
770
771
772 VLIB_NODE_FN (ipsec6_input_node) (vlib_main_t * vm,
773                                   vlib_node_runtime_t * node,
774                                   vlib_frame_t * from_frame)
775 {
776   u32 n_left_from, *from, next_index, *to_next, thread_index;
777   ipsec_main_t *im = &ipsec_main;
778   u32 ipsec_unprocessed = 0;
779   u32 ipsec_matched = 0;
780   ipsec_policy_t *policies[1];
781   ipsec_fp_5tuple_t tuples[1];
782   bool ip_v6 = true;
783
784   from = vlib_frame_vector_args (from_frame);
785   n_left_from = from_frame->n_vectors;
786   thread_index = vm->thread_index;
787
788   next_index = node->cached_next_index;
789
790   while (n_left_from > 0)
791     {
792       u32 n_left_to_next;
793
794       vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
795
796       while (n_left_from > 0 && n_left_to_next > 0)
797         {
798           u32 bi0, next0, pi0 = ~0;
799           vlib_buffer_t *b0;
800           ip6_header_t *ip0;
801           esp_header_t *esp0;
802           ip4_ipsec_config_t *c0;
803           ipsec_spd_t *spd0;
804           ipsec_policy_t *p0 = 0;
805           ah_header_t *ah0;
806           u32 header_size = sizeof (ip0[0]);
807
808           bi0 = to_next[0] = from[0];
809           from += 1;
810           n_left_from -= 1;
811           to_next += 1;
812           n_left_to_next -= 1;
813
814           b0 = vlib_get_buffer (vm, bi0);
815           b0->flags |= VNET_BUFFER_F_IS_IP6;
816           b0->flags &= ~VNET_BUFFER_F_IS_IP4;
817           c0 = vnet_feature_next_with_data (&next0, b0, sizeof (c0[0]));
818
819           spd0 = pool_elt_at_index (im->spds, c0->spd_index);
820
821           ip0 = vlib_buffer_get_current (b0);
822           esp0 = (esp_header_t *) ((u8 *) ip0 + header_size);
823           ah0 = (ah_header_t *) ((u8 *) ip0 + header_size);
824
825           if (PREDICT_TRUE (ip0->protocol == IP_PROTOCOL_IPSEC_ESP))
826             {
827 #if 0
828               clib_warning
829                 ("packet received from %U to %U spi %u size %u spd_id %u",
830                  format_ip6_address, &ip0->src_address, format_ip6_address,
831                  &ip0->dst_address, clib_net_to_host_u32 (esp0->spi),
832                  clib_net_to_host_u16 (ip0->payload_length) + header_size,
833                  spd0->id);
834 #endif
835               if (im->fp_spd_ipv6_in_is_enabled &&
836                   PREDICT_TRUE (INDEX_INVALID !=
837                                 spd0->fp_spd.ip6_in_lookup_hash_idx))
838                 {
839                   ipsec_fp_in_5tuple_from_ip6_range (
840                     &tuples[0], &ip0->src_address, &ip0->dst_address,
841                     clib_net_to_host_u32 (esp0->spi),
842                     IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT);
843                   ipsec_fp_in_policy_match_n (&spd0->fp_spd, ip_v6, tuples,
844                                               policies, 1);
845                   p0 = policies[0];
846                 }
847               else
848                 p0 = ipsec6_input_protect_policy_match (
849                   spd0, &ip0->src_address, &ip0->dst_address,
850                   clib_net_to_host_u32 (esp0->spi));
851
852               if (PREDICT_TRUE (p0 != 0))
853                 {
854                   ipsec_matched += 1;
855
856                   pi0 = p0 - im->policies;
857                   vlib_increment_combined_counter
858                     (&ipsec_spd_policy_counters,
859                      thread_index, pi0, 1,
860                      clib_net_to_host_u16 (ip0->payload_length) +
861                      header_size);
862
863                   vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
864                   next0 = im->esp6_decrypt_next_index;
865                   vlib_buffer_advance (b0, header_size);
866                   /* TODO Add policy matching for bypass and discard policy
867                    * type */
868                   goto trace0;
869                 }
870               else
871                 {
872                   pi0 = ~0;
873                   ipsec_unprocessed += 1;
874                   next0 = IPSEC_INPUT_NEXT_DROP;
875                 }
876             }
877           else if (ip0->protocol == IP_PROTOCOL_IPSEC_AH)
878             {
879               p0 = ipsec6_input_protect_policy_match (spd0,
880                                                       &ip0->src_address,
881                                                       &ip0->dst_address,
882                                                       clib_net_to_host_u32
883                                                       (ah0->spi));
884
885               if (PREDICT_TRUE (p0 != 0))
886                 {
887                   ipsec_matched += 1;
888                   pi0 = p0 - im->policies;
889                   vlib_increment_combined_counter
890                     (&ipsec_spd_policy_counters,
891                      thread_index, pi0, 1,
892                      clib_net_to_host_u16 (ip0->payload_length) +
893                      header_size);
894
895                   vnet_buffer (b0)->ipsec.sad_index = p0->sa_index;
896                   next0 = im->ah6_decrypt_next_index;
897                   goto trace0;
898                 }
899               else
900                 {
901                   pi0 = ~0;
902                   ipsec_unprocessed += 1;
903                   next0 = IPSEC_INPUT_NEXT_DROP;
904                 }
905             }
906           else
907             {
908               ipsec_unprocessed += 1;
909             }
910
911         trace0:
912           if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE) &&
913               PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
914             {
915               ipsec_input_trace_t *tr =
916                 vlib_add_trace (vm, node, b0, sizeof (*tr));
917
918               if (p0)
919                 {
920                   tr->sa_id = p0->sa_id;
921                   tr->policy_type = p0->type;
922                 }
923
924               tr->proto = ip0->protocol;
925               tr->spi = clib_net_to_host_u32 (esp0->spi);
926               tr->seq = clib_net_to_host_u32 (esp0->seq);
927               tr->spd = spd0->id;
928               tr->policy_index = pi0;
929             }
930
931           vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
932                                            n_left_to_next, bi0, next0);
933         }
934       vlib_put_next_frame (vm, node, next_index, n_left_to_next);
935     }
936
937   vlib_node_increment_counter (vm, ipsec6_input_node.index,
938                                IPSEC_INPUT_ERROR_RX_PKTS,
939                                from_frame->n_vectors - ipsec_unprocessed);
940
941   vlib_node_increment_counter (vm, ipsec6_input_node.index,
942                                IPSEC_INPUT_ERROR_RX_POLICY_MATCH,
943                                ipsec_matched);
944
945   return from_frame->n_vectors;
946 }
947
948
949 VLIB_REGISTER_NODE (ipsec6_input_node) = {
950   .name = "ipsec6-input-feature",
951   .vector_size = sizeof (u32),
952   .format_trace = format_ipsec_input_trace,
953   .type = VLIB_NODE_TYPE_INTERNAL,
954   .n_errors = ARRAY_LEN(ipsec_input_error_strings),
955   .error_strings = ipsec_input_error_strings,
956   .n_next_nodes = IPSEC_INPUT_N_NEXT,
957   .next_nodes = {
958 #define _(s,n) [IPSEC_INPUT_NEXT_##s] = n,
959     foreach_ipsec_input_next
960 #undef _
961   },
962 };
963
964 /*
965  * fd.io coding-style-patch-verification: ON
966  *
967  * Local Variables:
968  * eval: (c-set-style "gnu")
969  * End:
970  */