2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ipsec/ipsec.h>
20 * Policy packet & bytes counters
22 vlib_combined_counter_main_t ipsec_spd_policy_counters = {
24 .stat_segment_name = "/net/ipsec/policy",
28 ipsec_policy_is_equal (ipsec_policy_t * p1, ipsec_policy_t * p2)
30 if (p1->priority != p2->priority)
32 if (p1->type != p2->type)
34 if (p1->policy != p2->policy)
36 if (p1->sa_id != p2->sa_id)
38 if (p1->protocol != p2->protocol)
40 if (p1->lport.start != p2->lport.start)
42 if (p1->lport.stop != p2->lport.stop)
44 if (p1->rport.start != p2->rport.start)
46 if (p1->rport.stop != p2->rport.stop)
48 if (p1->is_ipv6 != p2->is_ipv6)
52 if (p1->laddr.start.ip6.as_u64[0] != p2->laddr.start.ip6.as_u64[0])
54 if (p1->laddr.start.ip6.as_u64[1] != p2->laddr.start.ip6.as_u64[1])
56 if (p1->laddr.stop.ip6.as_u64[0] != p2->laddr.stop.ip6.as_u64[0])
58 if (p1->laddr.stop.ip6.as_u64[1] != p2->laddr.stop.ip6.as_u64[1])
60 if (p1->raddr.start.ip6.as_u64[0] != p2->raddr.start.ip6.as_u64[0])
62 if (p1->raddr.start.ip6.as_u64[1] != p2->raddr.start.ip6.as_u64[1])
64 if (p1->raddr.stop.ip6.as_u64[0] != p2->raddr.stop.ip6.as_u64[0])
66 if (p1->laddr.stop.ip6.as_u64[1] != p2->laddr.stop.ip6.as_u64[1])
71 if (p1->laddr.start.ip4.as_u32 != p2->laddr.start.ip4.as_u32)
73 if (p1->laddr.stop.ip4.as_u32 != p2->laddr.stop.ip4.as_u32)
75 if (p1->raddr.start.ip4.as_u32 != p2->raddr.start.ip4.as_u32)
77 if (p1->raddr.stop.ip4.as_u32 != p2->raddr.stop.ip4.as_u32)
84 ipsec_spd_entry_sort (void *a1, void *a2)
86 ipsec_main_t *im = &ipsec_main;
89 ipsec_policy_t *p1, *p2;
91 p1 = pool_elt_at_index (im->policies, *id1);
92 p2 = pool_elt_at_index (im->policies, *id2);
94 return p2->priority - p1->priority;
100 ipsec_policy_mk_type (bool is_outbound,
102 ipsec_policy_action_t action,
103 ipsec_spd_policy_type_t * type)
108 IPSEC_SPD_POLICY_IP6_OUTBOUND : IPSEC_SPD_POLICY_IP4_OUTBOUND);
115 case IPSEC_POLICY_ACTION_PROTECT:
117 IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT :
118 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
120 case IPSEC_POLICY_ACTION_BYPASS:
122 IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS :
123 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
125 case IPSEC_POLICY_ACTION_DISCARD:
127 IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD :
128 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
130 case IPSEC_POLICY_ACTION_RESOLVE:
135 /* Unsupported type */
140 ipsec_add_del_policy (vlib_main_t * vm,
141 ipsec_policy_t * policy, int is_add, u32 * stat_index)
143 ipsec_main_t *im = &ipsec_main;
144 ipsec_spd_t *spd = 0;
149 p = hash_get (im->spd_index_by_spd_id, policy->id);
152 return VNET_API_ERROR_SYSCALL_ERROR_1;
155 spd = pool_elt_at_index (im->spds, spd_index);
157 return VNET_API_ERROR_SYSCALL_ERROR_1;
159 if (im->flow_cache_flag && !policy->is_ipv6 &&
160 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND)
163 * Flow cache entry is valid only when epoch_count value in control
164 * plane and data plane match. Otherwise, flow cache entry is considered
165 * stale. To avoid the race condition of using old epoch_count value
166 * in data plane after the roll over of epoch_count in control plane,
167 * entire flow cache is reset.
169 if (im->epoch_count == 0xFFFFFFFF)
171 /* Reset all the entries in flow cache */
172 clib_memset_u8 (im->ipsec4_out_spd_hash_tbl, 0,
173 im->ipsec4_out_spd_hash_num_buckets *
174 (sizeof (*(im->ipsec4_out_spd_hash_tbl))));
176 /* Increment epoch counter by 1 */
177 clib_atomic_fetch_add_relax (&im->epoch_count, 1);
178 /* Reset spd flow cache counter since all old entries are stale */
179 clib_atomic_store_relax_n (&im->ipsec4_out_spd_flow_cache_entries, 0);
186 if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
188 index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
190 if (INDEX_INVALID == sa_index)
191 return VNET_API_ERROR_SYSCALL_ERROR_1;
192 policy->sa_index = sa_index;
195 policy->sa_index = INDEX_INVALID;
197 pool_get (im->policies, vp);
198 clib_memcpy (vp, policy, sizeof (*vp));
199 policy_index = vp - im->policies;
201 vlib_validate_combined_counter (&ipsec_spd_policy_counters,
203 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
205 vec_add1 (spd->policies[policy->type], policy_index);
206 vec_sort_with_function (spd->policies[policy->type],
207 ipsec_spd_entry_sort);
208 *stat_index = policy_index;
214 vec_foreach_index (ii, (spd->policies[policy->type]))
216 vp = pool_elt_at_index (im->policies,
217 spd->policies[policy->type][ii]);
218 if (ipsec_policy_is_equal (vp, policy))
220 vec_delete (spd->policies[policy->type], 1, ii);
221 ipsec_sa_unlock (vp->sa_index);
222 pool_put (im->policies, vp);
232 * fd.io coding-style-patch-verification: ON
235 * eval: (c-set-style "gnu")