ip: add support for buffer offload metadata in ip midchain
[vpp.git] / src / vnet / ipsec / ipsec_output.h
1 /*
2  *------------------------------------------------------------------
3  * Copyright (c) 2021 Intel and/or its affiliates.
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at:
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  *------------------------------------------------------------------
16  */
17
18 #ifndef IPSEC_OUTPUT_H
19 #define IPSEC_OUTPUT_H
20
21 #include <vppinfra/types.h>
22 #include <vnet/ipsec/ipsec_spd.h>
23 #include <vnet/ipsec/ipsec_spd_fp_lookup.h>
24
25 always_inline void
26 ipsec4_out_spd_add_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
27                                      u16 lp, u16 rp, u32 pol_id)
28 {
29   u64 hash;
30   u8 overwrite = 0, stale_overwrite = 0;
31   ipsec4_spd_5tuple_t ip4_5tuple = { .ip4_addr = { (ip4_address_t) la,
32                                                    (ip4_address_t) ra },
33                                      .port = { lp, rp },
34                                      .proto = pr };
35
36   ip4_5tuple.kv_16_8.value = (((u64) pol_id) << 32) | ((u64) im->epoch_count);
37
38   hash = ipsec4_hash_16_8 (&ip4_5tuple.kv_16_8);
39   hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
40
41   ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
42   /* Check if we are overwriting an existing entry so we know
43   whether to increment the flow cache counter. Since flow
44   cache counter is reset on any policy add/remove, but
45   hash table values are not, we also need to check if the entry
46   we are overwriting is stale or not. If it's a stale entry
47   overwrite, we still want to increment flow cache counter */
48   overwrite = (im->ipsec4_out_spd_hash_tbl[hash].value != 0);
49   /* Check for stale entry by comparing with current epoch count */
50   if (PREDICT_FALSE (overwrite))
51     stale_overwrite =
52       (im->epoch_count !=
53        ((u32) (im->ipsec4_out_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
54   clib_memcpy_fast (&im->ipsec4_out_spd_hash_tbl[hash], &ip4_5tuple.kv_16_8,
55                     sizeof (ip4_5tuple.kv_16_8));
56   ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
57
58   /* Increment the counter to track active flow cache entries
59     when entering a fresh entry or overwriting a stale one */
60   if (!overwrite || stale_overwrite)
61     clib_atomic_fetch_add_relax (&im->ipsec4_out_spd_flow_cache_entries, 1);
62
63   return;
64 }
65
66 always_inline void
67 ipsec4_out_spd_add_flow_cache_entry_n (ipsec_main_t *im,
68                                        ipsec4_spd_5tuple_t *ip4_5tuple,
69                                        u32 pol_id)
70 {
71   u64 hash;
72   u8 overwrite = 0, stale_overwrite = 0;
73
74   ip4_5tuple->kv_16_8.value = (((u64) pol_id) << 32) | ((u64) im->epoch_count);
75
76   hash = ipsec4_hash_16_8 (&ip4_5tuple->kv_16_8);
77   hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
78
79   ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
80   /* Check if we are overwriting an existing entry so we know
81   whether to increment the flow cache counter. Since flow
82   cache counter is reset on any policy add/remove, but
83   hash table values are not, we also need to check if the entry
84   we are overwriting is stale or not. If it's a stale entry
85   overwrite, we still want to increment flow cache counter */
86   overwrite = (im->ipsec4_out_spd_hash_tbl[hash].value != 0);
87   /* Check for stale entry by comparing with current epoch count */
88   if (PREDICT_FALSE (overwrite))
89     stale_overwrite =
90       (im->epoch_count !=
91        ((u32) (im->ipsec4_out_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
92   clib_memcpy_fast (&im->ipsec4_out_spd_hash_tbl[hash], &ip4_5tuple->kv_16_8,
93                     sizeof (ip4_5tuple->kv_16_8));
94   ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
95
96   /* Increment the counter to track active flow cache entries
97     when entering a fresh entry or overwriting a stale one */
98   if (!overwrite || stale_overwrite)
99     clib_atomic_fetch_add_relax (&im->ipsec4_out_spd_flow_cache_entries, 1);
100
101   return;
102 }
103
104 always_inline void
105 ipsec_fp_5tuple_from_ip4_range (ipsec_fp_5tuple_t *tuple, u32 la, u32 ra,
106                                 u16 lp, u16 rp, u8 pr)
107 {
108   clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
109   tuple->laddr.as_u32 = clib_host_to_net_u32 (la);
110   tuple->raddr.as_u32 = clib_host_to_net_u32 (ra);
111
112   if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
113                      (pr != IP_PROTOCOL_SCTP)))
114     {
115       tuple->lport = 0;
116       tuple->rport = 0;
117     }
118   else
119     {
120       tuple->lport = lp;
121       tuple->rport = rp;
122     }
123
124   tuple->protocol = pr;
125   tuple->is_ipv6 = 0;
126 }
127
128 always_inline void
129 ipsec_fp_5tuple_from_ip4_range_n (ipsec_fp_5tuple_t *tuples,
130                                   ipsec4_spd_5tuple_t *ip4_5tuple, u32 n)
131 {
132   u32 n_left = n;
133   ipsec_fp_5tuple_t *tuple = tuples;
134
135   while (n_left)
136     {
137       clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
138       tuple->laddr.as_u32 =
139         clib_host_to_net_u32 (ip4_5tuple->ip4_addr[0].as_u32);
140       tuple->raddr.as_u32 =
141         clib_host_to_net_u32 (ip4_5tuple->ip4_addr[1].as_u32);
142       if (PREDICT_FALSE ((ip4_5tuple->proto != IP_PROTOCOL_TCP) &&
143                          (ip4_5tuple->proto != IP_PROTOCOL_UDP) &&
144                          (ip4_5tuple->proto != IP_PROTOCOL_SCTP)))
145         {
146           tuple->lport = 0;
147           tuple->rport = 0;
148         }
149       else
150         {
151           tuple->lport = ip4_5tuple->port[0];
152           tuple->rport = ip4_5tuple->port[1];
153         }
154       tuple->protocol = ip4_5tuple->proto;
155       tuple->is_ipv6 = 0;
156       n_left--;
157       tuple++;
158     }
159 }
160
161 always_inline int
162 ipsec_output_policy_match_n (ipsec_spd_t *spd,
163                              ipsec4_spd_5tuple_t *ip4_5tuples,
164                              ipsec_policy_t **policies, u32 n,
165                              u8 flow_cache_enabled)
166 {
167   ipsec_main_t *im = &ipsec_main;
168   ipsec_policy_t *p;
169   ipsec_policy_t **pp = policies;
170   u32 n_left = n;
171   ipsec4_spd_5tuple_t *ip4_5tuple = ip4_5tuples;
172   u32 policy_ids[n], *policy_id = policy_ids;
173   ipsec_fp_5tuple_t tuples[n];
174   u32 *i;
175   u32 counter = 0;
176
177   if (!spd)
178     return 0;
179
180   clib_memset (policies, 0, n * sizeof (ipsec_policy_t *));
181
182   if (im->fp_spd_ipv4_out_is_enabled &&
183       PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_out_lookup_hash_idx))
184     {
185       ipsec_fp_5tuple_from_ip4_range_n (tuples, ip4_5tuples, n);
186       counter += ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples,
187                                               policies, policy_ids, n);
188     }
189
190   while (n_left)
191     {
192       if (*pp != 0)
193         goto next;
194
195       vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
196         {
197           p = pool_elt_at_index (im->policies, *i);
198           if (PREDICT_FALSE (p->protocol &&
199                              (p->protocol != ip4_5tuple->proto)))
200             continue;
201
202           if (ip4_5tuple->ip4_addr[0].as_u32 <
203               clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
204             continue;
205
206           if (ip4_5tuple->ip4_addr[1].as_u32 >
207               clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
208             continue;
209
210           if (ip4_5tuple->ip4_addr[0].as_u32 <
211               clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
212             continue;
213
214           if (ip4_5tuple->ip4_addr[1].as_u32 >
215               clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
216             continue;
217
218           if (PREDICT_FALSE ((ip4_5tuple->proto != IP_PROTOCOL_TCP) &&
219                              (ip4_5tuple->proto != IP_PROTOCOL_UDP) &&
220                              (ip4_5tuple->proto != IP_PROTOCOL_SCTP)))
221             {
222               ip4_5tuple->port[0] = 0;
223               ip4_5tuple->port[1] = 0;
224               goto add_policy;
225             }
226
227           if (ip4_5tuple->port[0] < p->lport.start)
228             continue;
229
230           if (ip4_5tuple->port[0] > p->lport.stop)
231             continue;
232
233           if (ip4_5tuple->port[1] < p->rport.start)
234             continue;
235
236           if (ip4_5tuple->port[1] > p->rport.stop)
237             continue;
238
239         add_policy:
240           *pp = p;
241           *policy_id = *i;
242           counter++;
243           break;
244         }
245
246     next:
247       n_left--;
248       pp++;
249       ip4_5tuple++;
250       policy_id++;
251     }
252
253   if (flow_cache_enabled)
254     {
255       n_left = n;
256       policy_id = policy_ids;
257       ip4_5tuple = ip4_5tuples;
258       pp = policies;
259
260       while (n_left)
261         {
262           if (*pp != NULL)
263             {
264               /* Add an Entry in Flow cache */
265               ipsec4_out_spd_add_flow_cache_entry_n (im, ip4_5tuple,
266                                                      *policy_id);
267             }
268
269           n_left--;
270           policy_id++;
271           ip4_5tuple++;
272           pp++;
273         }
274     }
275
276   return counter;
277 }
278
279 always_inline ipsec_policy_t *
280 ipsec4_out_spd_find_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
281                                       u16 lp, u16 rp)
282 {
283   ipsec_policy_t *p = NULL;
284   ipsec4_hash_kv_16_8_t kv_result;
285   u64 hash;
286
287   if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
288                      (pr != IP_PROTOCOL_SCTP)))
289     {
290       lp = 0;
291       rp = 0;
292     }
293   ipsec4_spd_5tuple_t ip4_5tuple = { .ip4_addr = { (ip4_address_t) la,
294                                                    (ip4_address_t) ra },
295                                      .port = { lp, rp },
296                                      .proto = pr };
297
298   hash = ipsec4_hash_16_8 (&ip4_5tuple.kv_16_8);
299   hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
300
301   ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
302   kv_result = im->ipsec4_out_spd_hash_tbl[hash];
303   ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
304
305   if (ipsec4_hash_key_compare_16_8 ((u64 *) &ip4_5tuple.kv_16_8,
306                                     (u64 *) &kv_result))
307     {
308       if (im->epoch_count == ((u32) (kv_result.value & 0xFFFFFFFF)))
309         {
310           /* Get the policy based on the index */
311           p =
312             pool_elt_at_index (im->policies, ((u32) (kv_result.value >> 32)));
313         }
314     }
315
316   return p;
317 }
318
319 always_inline ipsec_policy_t *
320 ipsec_output_policy_match (ipsec_spd_t *spd, u8 pr, u32 la, u32 ra, u16 lp,
321                            u16 rp, u8 flow_cache_enabled)
322 {
323   ipsec_main_t *im = &ipsec_main;
324   ipsec_policy_t *p;
325   ipsec_policy_t *policies[1];
326   ipsec_fp_5tuple_t tuples[1];
327   u32 fp_policy_ids[1];
328
329   u32 *i;
330
331   if (!spd)
332     return 0;
333
334   if (im->fp_spd_ipv4_out_is_enabled &&
335       PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_out_lookup_hash_idx))
336     {
337       ipsec_fp_5tuple_from_ip4_range (&tuples[0], la, ra, lp, rp, pr);
338       ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples, policies,
339                                    fp_policy_ids, 1);
340       p = policies[0];
341       i = fp_policy_ids;
342       if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
343                          (pr != IP_PROTOCOL_SCTP)))
344         {
345           lp = 0;
346           rp = 0;
347         }
348       goto add_flow_cache;
349     }
350
351   vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
352     {
353       p = pool_elt_at_index (im->policies, *i);
354       if (PREDICT_FALSE ((p->protocol != IPSEC_POLICY_PROTOCOL_ANY) &&
355                          (p->protocol != pr)))
356         continue;
357
358       if (ra < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
359         continue;
360
361       if (ra > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
362         continue;
363
364       if (la < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
365         continue;
366
367       if (la > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
368         continue;
369
370       if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
371                          (pr != IP_PROTOCOL_SCTP)))
372         {
373           lp = 0;
374           rp = 0;
375           goto add_flow_cache;
376         }
377
378       if (lp < p->lport.start)
379         continue;
380
381       if (lp > p->lport.stop)
382         continue;
383
384       if (rp < p->rport.start)
385         continue;
386
387       if (rp > p->rport.stop)
388         continue;
389
390     add_flow_cache:
391       if (flow_cache_enabled)
392         {
393           /* Add an Entry in Flow cache */
394           ipsec4_out_spd_add_flow_cache_entry (
395             im, pr, clib_host_to_net_u32 (la), clib_host_to_net_u32 (ra),
396             clib_host_to_net_u16 (lp), clib_host_to_net_u16 (rp), *i);
397         }
398
399       return p;
400     }
401   return 0;
402 }
403
404 always_inline uword
405 ip6_addr_match_range (ip6_address_t *a, ip6_address_t *la, ip6_address_t *ua)
406 {
407   if ((memcmp (a->as_u64, la->as_u64, 2 * sizeof (u64)) >= 0) &&
408       (memcmp (a->as_u64, ua->as_u64, 2 * sizeof (u64)) <= 0))
409     return 1;
410   return 0;
411 }
412
413 always_inline void
414 ipsec_fp_5tuple_from_ip6_range (ipsec_fp_5tuple_t *tuple, ip6_address_t *la,
415                                 ip6_address_t *ra, u16 lp, u16 rp, u8 pr)
416
417 {
418   clib_memcpy (&tuple->ip6_laddr, la, sizeof (ip6_address_t));
419   clib_memcpy (&tuple->ip6_raddr, ra, sizeof (ip6_address_t));
420
421   tuple->lport = lp;
422   tuple->rport = rp;
423   tuple->protocol = pr;
424   tuple->is_ipv6 = 1;
425 }
426
427 always_inline ipsec_policy_t *
428 ipsec6_output_policy_match (ipsec_spd_t *spd, ip6_address_t *la,
429                             ip6_address_t *ra, u16 lp, u16 rp, u8 pr)
430 {
431   ipsec_main_t *im = &ipsec_main;
432   ipsec_policy_t *p;
433   ipsec_policy_t *policies[1];
434   ipsec_fp_5tuple_t tuples[1];
435   u32 fp_policy_ids[1];
436
437   u32 *i;
438
439   if (!spd)
440     return 0;
441
442   if (im->fp_spd_ipv6_out_is_enabled &&
443       PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip6_out_lookup_hash_idx))
444     {
445
446       ipsec_fp_5tuple_from_ip6_range (&tuples[0], la, ra, lp, rp, pr);
447       ipsec_fp_out_policy_match_n (&spd->fp_spd, 1, tuples, policies,
448                                    fp_policy_ids, 1);
449       p = policies[0];
450       i = fp_policy_ids;
451       return p;
452     }
453
454   vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP6_OUTBOUND])
455     {
456       p = pool_elt_at_index (im->policies, *i);
457       if (PREDICT_FALSE ((p->protocol != IPSEC_POLICY_PROTOCOL_ANY) &&
458                          (p->protocol != pr)))
459         continue;
460
461       if (!ip6_addr_match_range (ra, &p->raddr.start.ip6, &p->raddr.stop.ip6))
462         continue;
463
464       if (!ip6_addr_match_range (la, &p->laddr.start.ip6, &p->laddr.stop.ip6))
465         continue;
466
467       if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
468                          (pr != IP_PROTOCOL_SCTP)))
469         return p;
470
471       if (lp < p->lport.start)
472         continue;
473
474       if (lp > p->lport.stop)
475         continue;
476
477       if (rp < p->rport.start)
478         continue;
479
480       if (rp > p->rport.stop)
481         continue;
482
483       return p;
484     }
485
486   return 0;
487 }
488
489 #endif /* !IPSEC_OUTPUT_H */