2 *------------------------------------------------------------------
3 * Copyright (c) 2021 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #ifndef IPSEC_OUTPUT_H
19 #define IPSEC_OUTPUT_H
21 #include <vppinfra/types.h>
22 #include <vnet/ipsec/ipsec_spd.h>
23 #include <vnet/ipsec/ipsec_spd_fp_lookup.h>
26 ipsec4_out_spd_add_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
27 u16 lp, u16 rp, u32 pol_id)
30 u8 overwrite = 0, stale_overwrite = 0;
31 ipsec4_spd_5tuple_t ip4_5tuple = { .ip4_addr = { (ip4_address_t) la,
36 ip4_5tuple.kv_16_8.value = (((u64) pol_id) << 32) | ((u64) im->epoch_count);
38 hash = ipsec4_hash_16_8 (&ip4_5tuple.kv_16_8);
39 hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
41 ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
42 /* Check if we are overwriting an existing entry so we know
43 whether to increment the flow cache counter. Since flow
44 cache counter is reset on any policy add/remove, but
45 hash table values are not, we also need to check if the entry
46 we are overwriting is stale or not. If it's a stale entry
47 overwrite, we still want to increment flow cache counter */
48 overwrite = (im->ipsec4_out_spd_hash_tbl[hash].value != 0);
49 /* Check for stale entry by comparing with current epoch count */
50 if (PREDICT_FALSE (overwrite))
53 ((u32) (im->ipsec4_out_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
54 clib_memcpy_fast (&im->ipsec4_out_spd_hash_tbl[hash], &ip4_5tuple.kv_16_8,
55 sizeof (ip4_5tuple.kv_16_8));
56 ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
58 /* Increment the counter to track active flow cache entries
59 when entering a fresh entry or overwriting a stale one */
60 if (!overwrite || stale_overwrite)
61 clib_atomic_fetch_add_relax (&im->ipsec4_out_spd_flow_cache_entries, 1);
67 ipsec4_out_spd_add_flow_cache_entry_n (ipsec_main_t *im,
68 ipsec4_spd_5tuple_t *ip4_5tuple,
72 u8 overwrite = 0, stale_overwrite = 0;
74 ip4_5tuple->kv_16_8.value = (((u64) pol_id) << 32) | ((u64) im->epoch_count);
76 hash = ipsec4_hash_16_8 (&ip4_5tuple->kv_16_8);
77 hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
79 ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
80 /* Check if we are overwriting an existing entry so we know
81 whether to increment the flow cache counter. Since flow
82 cache counter is reset on any policy add/remove, but
83 hash table values are not, we also need to check if the entry
84 we are overwriting is stale or not. If it's a stale entry
85 overwrite, we still want to increment flow cache counter */
86 overwrite = (im->ipsec4_out_spd_hash_tbl[hash].value != 0);
87 /* Check for stale entry by comparing with current epoch count */
88 if (PREDICT_FALSE (overwrite))
91 ((u32) (im->ipsec4_out_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
92 clib_memcpy_fast (&im->ipsec4_out_spd_hash_tbl[hash], &ip4_5tuple->kv_16_8,
93 sizeof (ip4_5tuple->kv_16_8));
94 ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
96 /* Increment the counter to track active flow cache entries
97 when entering a fresh entry or overwriting a stale one */
98 if (!overwrite || stale_overwrite)
99 clib_atomic_fetch_add_relax (&im->ipsec4_out_spd_flow_cache_entries, 1);
105 ipsec_fp_5tuple_from_ip4_range (ipsec_fp_5tuple_t *tuple, u32 la, u32 ra,
106 u16 lp, u16 rp, u8 pr)
108 clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
109 tuple->laddr.as_u32 = clib_host_to_net_u32 (la);
110 tuple->raddr.as_u32 = clib_host_to_net_u32 (ra);
112 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
113 (pr != IP_PROTOCOL_SCTP)))
124 tuple->protocol = pr;
129 ipsec_fp_5tuple_from_ip4_range_n (ipsec_fp_5tuple_t *tuples,
130 ipsec4_spd_5tuple_t *ip4_5tuple, u32 n)
133 ipsec_fp_5tuple_t *tuple = tuples;
137 clib_memset (tuple->l3_zero_pad, 0, sizeof (tuple->l3_zero_pad));
138 tuple->laddr.as_u32 =
139 clib_host_to_net_u32 (ip4_5tuple->ip4_addr[0].as_u32);
140 tuple->raddr.as_u32 =
141 clib_host_to_net_u32 (ip4_5tuple->ip4_addr[1].as_u32);
142 if (PREDICT_FALSE ((ip4_5tuple->proto != IP_PROTOCOL_TCP) &&
143 (ip4_5tuple->proto != IP_PROTOCOL_UDP) &&
144 (ip4_5tuple->proto != IP_PROTOCOL_SCTP)))
151 tuple->lport = ip4_5tuple->port[0];
152 tuple->rport = ip4_5tuple->port[1];
154 tuple->protocol = ip4_5tuple->proto;
162 ipsec_output_policy_match_n (ipsec_spd_t *spd,
163 ipsec4_spd_5tuple_t *ip4_5tuples,
164 ipsec_policy_t **policies, u32 n,
165 u8 flow_cache_enabled)
167 ipsec_main_t *im = &ipsec_main;
169 ipsec_policy_t **pp = policies;
171 ipsec4_spd_5tuple_t *ip4_5tuple = ip4_5tuples;
172 u32 policy_ids[n], *policy_id = policy_ids;
173 ipsec_fp_5tuple_t tuples[n];
180 clib_memset (policies, 0, n * sizeof (ipsec_policy_t *));
182 if (im->fp_spd_is_enabled)
184 ipsec_fp_5tuple_from_ip4_range_n (tuples, ip4_5tuples, n);
185 counter += ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples,
186 policies, policy_ids, n);
194 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
196 p = pool_elt_at_index (im->policies, *i);
197 if (PREDICT_FALSE (p->protocol &&
198 (p->protocol != ip4_5tuple->proto)))
201 if (ip4_5tuple->ip4_addr[0].as_u32 <
202 clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
205 if (ip4_5tuple->ip4_addr[1].as_u32 >
206 clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
209 if (ip4_5tuple->ip4_addr[0].as_u32 <
210 clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
213 if (ip4_5tuple->ip4_addr[1].as_u32 >
214 clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
217 if (PREDICT_FALSE ((ip4_5tuple->proto != IP_PROTOCOL_TCP) &&
218 (ip4_5tuple->proto != IP_PROTOCOL_UDP) &&
219 (ip4_5tuple->proto != IP_PROTOCOL_SCTP)))
221 ip4_5tuple->port[0] = 0;
222 ip4_5tuple->port[1] = 0;
226 if (ip4_5tuple->port[0] < p->lport.start)
229 if (ip4_5tuple->port[0] > p->lport.stop)
232 if (ip4_5tuple->port[1] < p->rport.start)
235 if (ip4_5tuple->port[1] > p->rport.stop)
252 if (flow_cache_enabled)
255 policy_id = policy_ids;
256 ip4_5tuple = ip4_5tuples;
263 /* Add an Entry in Flow cache */
264 ipsec4_out_spd_add_flow_cache_entry_n (im, ip4_5tuple,
278 always_inline ipsec_policy_t *
279 ipsec4_out_spd_find_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
282 ipsec_policy_t *p = NULL;
283 ipsec4_hash_kv_16_8_t kv_result;
286 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
287 (pr != IP_PROTOCOL_SCTP)))
292 ipsec4_spd_5tuple_t ip4_5tuple = { .ip4_addr = { (ip4_address_t) la,
293 (ip4_address_t) ra },
297 hash = ipsec4_hash_16_8 (&ip4_5tuple.kv_16_8);
298 hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
300 ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
301 kv_result = im->ipsec4_out_spd_hash_tbl[hash];
302 ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
304 if (ipsec4_hash_key_compare_16_8 ((u64 *) &ip4_5tuple.kv_16_8,
307 if (im->epoch_count == ((u32) (kv_result.value & 0xFFFFFFFF)))
309 /* Get the policy based on the index */
311 pool_elt_at_index (im->policies, ((u32) (kv_result.value >> 32)));
318 always_inline ipsec_policy_t *
319 ipsec_output_policy_match (ipsec_spd_t *spd, u8 pr, u32 la, u32 ra, u16 lp,
320 u16 rp, u8 flow_cache_enabled)
322 ipsec_main_t *im = &ipsec_main;
324 ipsec_policy_t *policies[1];
325 ipsec_fp_5tuple_t tuples[1];
326 u32 fp_policy_ids[1];
333 ipsec_fp_5tuple_from_ip4_range (&tuples[0], la, ra, lp, rp, pr);
335 if (im->fp_spd_is_enabled &&
336 (1 == ipsec_fp_out_policy_match_n (&spd->fp_spd, 0, tuples, policies,
341 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
342 (pr != IP_PROTOCOL_SCTP)))
350 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
352 p = pool_elt_at_index (im->policies, *i);
353 if (PREDICT_FALSE ((p->protocol != IPSEC_POLICY_PROTOCOL_ANY) &&
354 (p->protocol != pr)))
357 if (ra < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
360 if (ra > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
363 if (la < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
366 if (la > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
369 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
370 (pr != IP_PROTOCOL_SCTP)))
377 if (lp < p->lport.start)
380 if (lp > p->lport.stop)
383 if (rp < p->rport.start)
386 if (rp > p->rport.stop)
390 if (flow_cache_enabled)
392 /* Add an Entry in Flow cache */
393 ipsec4_out_spd_add_flow_cache_entry (
394 im, pr, clib_host_to_net_u32 (la), clib_host_to_net_u32 (ra),
395 clib_host_to_net_u16 (lp), clib_host_to_net_u16 (rp), *i);
403 #endif /* !IPSEC_OUTPUT_H */