2 *------------------------------------------------------------------
3 * Copyright (c) 2021 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
18 #ifndef IPSEC_OUTPUT_H
19 #define IPSEC_OUTPUT_H
21 #include <vppinfra/types.h>
22 #include <vnet/ipsec/ipsec_spd.h>
25 ipsec4_out_spd_add_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
26 u16 lp, u16 rp, u32 pol_id)
29 u8 overwrite = 0, stale_overwrite = 0;
30 ipsec4_spd_5tuple_t ip4_5tuple = { .ip4_addr = { (ip4_address_t) la,
35 ip4_5tuple.kv_16_8.value = (((u64) pol_id) << 32) | ((u64) im->epoch_count);
37 hash = ipsec4_hash_16_8 (&ip4_5tuple.kv_16_8);
38 hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
40 ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
41 /* Check if we are overwriting an existing entry so we know
42 whether to increment the flow cache counter. Since flow
43 cache counter is reset on any policy add/remove, but
44 hash table values are not, we also need to check if the entry
45 we are overwriting is stale or not. If it's a stale entry
46 overwrite, we still want to increment flow cache counter */
47 overwrite = (im->ipsec4_out_spd_hash_tbl[hash].value != 0);
48 /* Check for stale entry by comparing with current epoch count */
49 if (PREDICT_FALSE (overwrite))
52 ((u32) (im->ipsec4_out_spd_hash_tbl[hash].value & 0xFFFFFFFF)));
53 clib_memcpy_fast (&im->ipsec4_out_spd_hash_tbl[hash], &ip4_5tuple.kv_16_8,
54 sizeof (ip4_5tuple.kv_16_8));
55 ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
57 /* Increment the counter to track active flow cache entries
58 when entering a fresh entry or overwriting a stale one */
59 if (!overwrite || stale_overwrite)
60 clib_atomic_fetch_add_relax (&im->ipsec4_out_spd_flow_cache_entries, 1);
65 always_inline ipsec_policy_t *
66 ipsec4_out_spd_find_flow_cache_entry (ipsec_main_t *im, u8 pr, u32 la, u32 ra,
69 ipsec_policy_t *p = NULL;
70 ipsec4_hash_kv_16_8_t kv_result;
73 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
74 (pr != IP_PROTOCOL_SCTP)))
79 ipsec4_spd_5tuple_t ip4_5tuple = { .ip4_addr = { (ip4_address_t) la,
84 hash = ipsec4_hash_16_8 (&ip4_5tuple.kv_16_8);
85 hash &= (im->ipsec4_out_spd_hash_num_buckets - 1);
87 ipsec_spinlock_lock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
88 kv_result = im->ipsec4_out_spd_hash_tbl[hash];
89 ipsec_spinlock_unlock (&im->ipsec4_out_spd_hash_tbl[hash].bucket_lock);
91 if (ipsec4_hash_key_compare_16_8 ((u64 *) &ip4_5tuple.kv_16_8,
94 if (im->epoch_count == ((u32) (kv_result.value & 0xFFFFFFFF)))
96 /* Get the policy based on the index */
98 pool_elt_at_index (im->policies, ((u32) (kv_result.value >> 32)));
105 always_inline ipsec_policy_t *
106 ipsec_output_policy_match (ipsec_spd_t *spd, u8 pr, u32 la, u32 ra, u16 lp,
107 u16 rp, u8 flow_cache_enabled)
109 ipsec_main_t *im = &ipsec_main;
116 vec_foreach (i, spd->policies[IPSEC_SPD_POLICY_IP4_OUTBOUND])
118 p = pool_elt_at_index (im->policies, *i);
119 if (PREDICT_FALSE ((p->protocol != IPSEC_POLICY_PROTOCOL_ANY) &&
120 (p->protocol != pr)))
123 if (ra < clib_net_to_host_u32 (p->raddr.start.ip4.as_u32))
126 if (ra > clib_net_to_host_u32 (p->raddr.stop.ip4.as_u32))
129 if (la < clib_net_to_host_u32 (p->laddr.start.ip4.as_u32))
132 if (la > clib_net_to_host_u32 (p->laddr.stop.ip4.as_u32))
135 if (PREDICT_FALSE ((pr != IP_PROTOCOL_TCP) && (pr != IP_PROTOCOL_UDP) &&
136 (pr != IP_PROTOCOL_SCTP)))
143 if (lp < p->lport.start)
146 if (lp > p->lport.stop)
149 if (rp < p->rport.start)
152 if (rp > p->rport.stop)
156 if (flow_cache_enabled)
158 /* Add an Entry in Flow cache */
159 ipsec4_out_spd_add_flow_cache_entry (
160 im, pr, clib_host_to_net_u32 (la), clib_host_to_net_u32 (ra),
161 clib_host_to_net_u16 (lp), clib_host_to_net_u16 (rp), *i);
169 #endif /* !IPSEC_OUTPUT_H */