2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ipsec/ipsec.h>
20 * Policy packet & bytes counters
22 vlib_combined_counter_main_t ipsec_spd_policy_counters = {
24 .stat_segment_name = "/net/ipsec/policy",
28 ipsec_spd_entry_sort (void *a1, void *a2)
30 ipsec_main_t *im = &ipsec_main;
33 ipsec_policy_t *p1, *p2;
35 p1 = pool_elt_at_index (im->policies, *id1);
36 p2 = pool_elt_at_index (im->policies, *id2);
38 return p2->priority - p1->priority;
44 ipsec_policy_mk_type (bool is_outbound,
46 ipsec_policy_action_t action,
47 ipsec_spd_policy_type_t * type)
52 IPSEC_SPD_POLICY_IP6_OUTBOUND : IPSEC_SPD_POLICY_IP4_OUTBOUND);
59 case IPSEC_POLICY_ACTION_PROTECT:
61 IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT :
62 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
64 case IPSEC_POLICY_ACTION_BYPASS:
66 IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS :
67 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
69 case IPSEC_POLICY_ACTION_DISCARD:
71 IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD :
72 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
74 case IPSEC_POLICY_ACTION_RESOLVE:
79 /* Unsupported type */
83 static_always_inline int
84 ipsec_is_policy_inbound (ipsec_policy_t *policy)
86 if (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
87 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
88 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD)
95 ipsec_add_del_policy (vlib_main_t * vm,
96 ipsec_policy_t * policy, int is_add, u32 * stat_index)
98 ipsec_main_t *im = &ipsec_main;
104 p = hash_get (im->spd_index_by_spd_id, policy->id);
107 return VNET_API_ERROR_SYSCALL_ERROR_1;
110 spd = pool_elt_at_index (im->spds, spd_index);
112 return VNET_API_ERROR_SYSCALL_ERROR_1;
114 if (im->output_flow_cache_flag && !policy->is_ipv6 &&
115 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND)
118 * Flow cache entry is valid only when epoch_count value in control
119 * plane and data plane match. Otherwise, flow cache entry is considered
120 * stale. To avoid the race condition of using old epoch_count value
121 * in data plane after the roll over of epoch_count in control plane,
122 * entire flow cache is reset.
124 if (im->epoch_count == 0xFFFFFFFF)
126 /* Reset all the entries in flow cache */
127 clib_memset_u8 (im->ipsec4_out_spd_hash_tbl, 0,
128 im->ipsec4_out_spd_hash_num_buckets *
129 (sizeof (*(im->ipsec4_out_spd_hash_tbl))));
131 /* Increment epoch counter by 1 */
132 clib_atomic_fetch_add_relax (&im->epoch_count, 1);
133 /* Reset spd flow cache counter since all old entries are stale */
134 clib_atomic_store_relax_n (&im->ipsec4_out_spd_flow_cache_entries, 0);
137 if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
138 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
139 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD) &&
140 im->input_flow_cache_flag && !policy->is_ipv6)
143 * Flow cache entry is valid only when input_epoch_count value in control
144 * plane and data plane match. Otherwise, flow cache entry is considered
145 * stale. To avoid the race condition of using old input_epoch_count
146 * value in data plane after the roll over of input_epoch_count in
147 * control plane, entire flow cache is reset.
149 if (im->input_epoch_count == 0xFFFFFFFF)
151 /* Reset all the entries in flow cache */
152 clib_memset_u8 (im->ipsec4_in_spd_hash_tbl, 0,
153 im->ipsec4_in_spd_hash_num_buckets *
154 (sizeof (*(im->ipsec4_in_spd_hash_tbl))));
156 /* Increment epoch counter by 1 */
157 clib_atomic_fetch_add_relax (&im->input_epoch_count, 1);
158 /* Reset spd flow cache counter since all old entries are stale */
159 im->ipsec4_in_spd_flow_cache_entries = 0;
166 if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
168 index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
170 if (INDEX_INVALID == sa_index)
171 return VNET_API_ERROR_SYSCALL_ERROR_1;
172 policy->sa_index = sa_index;
175 policy->sa_index = INDEX_INVALID;
178 * Try adding the policy into fast path SPD first. Only adding to
179 * traditional SPD when failed.
181 if ((im->fp_spd_ipv4_out_is_enabled &&
182 PREDICT_TRUE (INDEX_INVALID !=
183 spd->fp_spd.ip4_out_lookup_hash_idx) &&
184 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
185 (im->fp_spd_ipv4_in_is_enabled &&
186 PREDICT_TRUE (INDEX_INVALID !=
187 spd->fp_spd.ip4_in_lookup_hash_idx) &&
188 (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
189 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
190 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD)) ||
191 (im->fp_spd_ipv6_out_is_enabled &&
192 PREDICT_TRUE (INDEX_INVALID !=
193 spd->fp_spd.ip6_out_lookup_hash_idx) &&
194 policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
195 return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 1,
198 pool_get (im->policies, vp);
199 clib_memcpy (vp, policy, sizeof (*vp));
200 policy_index = vp - im->policies;
202 vlib_validate_combined_counter (&ipsec_spd_policy_counters,
204 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
205 vec_add1 (spd->policies[policy->type], policy_index);
206 vec_sort_with_function (spd->policies[policy->type],
207 ipsec_spd_entry_sort);
208 *stat_index = policy_index;
215 * Try to delete the policy from the fast path SPD first. Delete from
216 * traditional SPD when fp delete fails.
219 if ((im->fp_spd_ipv4_out_is_enabled &&
220 PREDICT_TRUE (INDEX_INVALID !=
221 spd->fp_spd.ip4_out_lookup_hash_idx) &&
222 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
223 (im->fp_spd_ipv4_in_is_enabled &&
224 PREDICT_TRUE (INDEX_INVALID !=
225 spd->fp_spd.ip4_in_lookup_hash_idx) &&
226 (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
227 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
228 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD)) ||
229 (im->fp_spd_ipv6_out_is_enabled &&
230 PREDICT_TRUE (INDEX_INVALID !=
231 spd->fp_spd.ip6_out_lookup_hash_idx) &&
232 policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
234 if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
236 index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
238 if (INDEX_INVALID == sa_index)
239 return VNET_API_ERROR_SYSCALL_ERROR_1;
240 policy->sa_index = sa_index;
241 ipsec_sa_unlock_id (policy->sa_id);
244 policy->sa_index = INDEX_INVALID;
246 return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 0,
250 vec_foreach_index (ii, (spd->policies[policy->type]))
252 vp = pool_elt_at_index (im->policies,
253 spd->policies[policy->type][ii]);
254 if (ipsec_policy_is_equal (vp, policy))
256 vec_delete (spd->policies[policy->type], 1, ii);
257 ipsec_sa_unlock (vp->sa_index);
258 pool_put (im->policies, vp);
267 static_always_inline void
268 ipsec_fp_release_mask_type (ipsec_main_t *im, u32 mask_type_index)
270 ipsec_fp_mask_type_entry_t *mte =
271 pool_elt_at_index (im->fp_mask_types, mask_type_index);
273 if (mte->refcount == 0)
275 /* this entry is not in use anymore */
276 ASSERT (clib_memset (mte, 0xae, sizeof (*mte)) == EOK);
277 pool_put (im->fp_mask_types, mte);
281 static_always_inline u32
282 find_mask_type_index (ipsec_main_t *im, ipsec_fp_5tuple_t *mask)
284 ipsec_fp_mask_type_entry_t *mte;
286 pool_foreach (mte, im->fp_mask_types)
288 if (memcmp (&mte->mask, mask, sizeof (*mask)) == 0)
289 return (mte - im->fp_mask_types);
295 static_always_inline void
296 fill_ip6_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
297 clib_bihash_kv_40_8_t *kv)
299 ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value;
300 u64 *pmatch = (u64 *) match->kv_40_8.key;
301 u64 *pmask = (u64 *) mask->kv_40_8.key;
302 u64 *pkey = (u64 *) kv->key;
304 *pkey++ = *pmatch++ & *pmask++;
305 *pkey++ = *pmatch++ & *pmask++;
306 *pkey++ = *pmatch++ & *pmask++;
307 *pkey++ = *pmatch++ & *pmask++;
308 *pkey = *pmatch & *pmask;
313 static_always_inline void
314 fill_ip4_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
315 clib_bihash_kv_16_8_t *kv)
317 ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value;
318 u64 *pmatch = (u64 *) match->kv_16_8.key;
319 u64 *pmask = (u64 *) mask->kv_16_8.key;
320 u64 *pkey = (u64 *) kv->key;
322 *pkey++ = *pmatch++ & *pmask++;
323 *pkey = *pmatch & *pmask;
328 static_always_inline u16
329 mask_out_highest_set_bit_u16 (u16 x)
338 static_always_inline u32
339 mask_out_highest_set_bit_u32 (u32 x)
349 static_always_inline u64
350 mask_out_highest_set_bit_u64 (u64 x)
361 static_always_inline void
362 ipsec_fp_get_policy_ports_mask (ipsec_policy_t *policy,
363 ipsec_fp_5tuple_t *mask)
365 if (PREDICT_TRUE ((policy->protocol == IP_PROTOCOL_TCP) ||
366 (policy->protocol == IP_PROTOCOL_UDP) ||
367 (policy->protocol == IP_PROTOCOL_SCTP)))
369 mask->lport = policy->lport.start ^ policy->lport.stop;
370 mask->rport = policy->rport.start ^ policy->rport.stop;
372 mask->lport = mask_out_highest_set_bit_u16 (mask->lport);
374 mask->rport = mask_out_highest_set_bit_u16 (mask->rport);
382 mask->protocol = (policy->protocol == IPSEC_POLICY_PROTOCOL_ANY) ? 0 : ~0;
386 static_always_inline void
387 ipsec_fp_ip4_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask,
390 u32 *pladdr_start = (u32 *) &policy->laddr.start.ip4;
391 u32 *pladdr_stop = (u32 *) &policy->laddr.stop.ip4;
392 u32 *plmask = (u32 *) &mask->laddr;
393 u32 *praddr_start = (u32 *) &policy->raddr.start.ip4;
394 u32 *praddr_stop = (u32 *) &policy->raddr.stop.ip4;
395 u32 *prmask = (u32 *) &mask->raddr;
397 clib_memset_u8 (mask, 0xff, sizeof (ipsec_fp_5tuple_t));
398 clib_memset_u8 (&mask->l3_zero_pad, 0, sizeof (mask->l3_zero_pad));
400 /* find bits where start != stop */
401 *plmask = *pladdr_start ^ *pladdr_stop;
402 *prmask = *praddr_start ^ *praddr_stop;
403 /* Find most significant bit set (that is the first position
404 * start differs from stop). Mask out everything after that bit and
405 * the bit itself. Remember that policy stores start and stop in the net
408 *plmask = clib_host_to_net_u32 (
409 mask_out_highest_set_bit_u32 (clib_net_to_host_u32 (*plmask)));
411 *prmask = clib_host_to_net_u32 (
412 mask_out_highest_set_bit_u32 (clib_net_to_host_u32 (*prmask)));
416 if (policy->type != IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT)
424 ipsec_fp_get_policy_ports_mask (policy, mask);
428 static_always_inline void
429 ipsec_fp_ip6_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask)
431 u64 *pladdr_start = (u64 *) &policy->laddr.start;
432 u64 *pladdr_stop = (u64 *) &policy->laddr.stop;
433 u64 *plmask = (u64 *) &mask->ip6_laddr;
434 u64 *praddr_start = (u64 *) &policy->raddr.start;
435 u64 *praddr_stop = (u64 *) &policy->raddr.stop;
436 u64 *prmask = (u64 *) &mask->ip6_raddr;
438 clib_memset_u8 (mask, 0xff, sizeof (ipsec_fp_5tuple_t));
440 *plmask = (*pladdr_start++ ^ *pladdr_stop++);
442 *prmask = (*praddr_start++ ^ *praddr_stop++);
444 /* Find most significant bit set (that is the first position
445 * start differs from stop). Mask out everything after that bit and
446 * the bit itself. Remember that policy stores start and stop in the net
449 *plmask = clib_host_to_net_u64 (
450 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*plmask)));
452 if (*plmask++ & clib_host_to_net_u64 (0x1))
454 *plmask = (*pladdr_start ^ *pladdr_stop);
455 *plmask = clib_host_to_net_u64 (
456 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*plmask)));
461 *prmask = clib_host_to_net_u64 (
462 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*prmask)));
464 if (*prmask++ & clib_host_to_net_u64 (0x1))
466 *prmask = (*pladdr_start ^ *pladdr_stop);
467 *prmask = clib_host_to_net_u64 (
468 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*prmask)));
473 ipsec_fp_get_policy_ports_mask (policy, mask);
476 static_always_inline void
477 ipsec_fp_get_policy_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *tuple,
480 memset (tuple, 0, sizeof (*tuple));
481 tuple->is_ipv6 = policy->is_ipv6;
484 tuple->ip6_laddr = policy->laddr.start.ip6;
485 tuple->ip6_raddr = policy->raddr.start.ip6;
489 tuple->laddr = policy->laddr.start.ip4;
490 tuple->raddr = policy->raddr.start.ip4;
496 if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
497 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT) &&
498 policy->sa_index != INDEX_INVALID)
500 ipsec_sa_t *s = ipsec_sa_get (policy->sa_index);
504 tuple->spi = INDEX_INVALID;
505 tuple->action = policy->type;
509 tuple->protocol = policy->protocol;
511 tuple->lport = policy->lport.start;
512 tuple->rport = policy->rport.start;
515 static_always_inline int
516 ipsec_fp_mask_type_idx_cmp (ipsec_fp_mask_id_t *mask_id, u32 *idx)
518 return mask_id->mask_type_idx == *idx;
522 ipsec_fp_ip4_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
523 ipsec_policy_t *policy, u32 *stat_index)
525 u32 mask_index, searched_idx;
527 ipsec_fp_mask_type_entry_t *mte;
529 clib_bihash_kv_16_8_t kv;
530 clib_bihash_kv_16_8_t result;
531 ipsec_fp_lookup_value_t *result_val =
532 (ipsec_fp_lookup_value_t *) &result.value;
533 ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value;
535 ipsec_fp_5tuple_t mask, policy_5tuple;
537 bool inbound = ipsec_is_policy_inbound (policy);
538 clib_bihash_16_8_t *bihash_table =
539 inbound ? pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
540 fp_spd->ip4_in_lookup_hash_idx) :
541 pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
542 fp_spd->ip4_out_lookup_hash_idx);
544 ipsec_fp_ip4_get_policy_mask (policy, &mask, inbound);
545 pool_get (im->policies, vp);
546 policy_index = vp - im->policies;
547 vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
548 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
549 *stat_index = policy_index;
550 mask_index = find_mask_type_index (im, &mask);
552 if (mask_index == ~0)
554 /* mask type not found, we need to create a new entry */
555 pool_get (im->fp_mask_types, mte);
556 mask_index = mte - im->fp_mask_types;
560 mte = im->fp_mask_types + mask_index;
562 policy->fp_mask_type_id = mask_index;
563 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
565 fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
567 res = clib_bihash_search_inline_2_16_8 (bihash_table, &kv, &result);
570 /* key was not found crate a new entry */
571 vec_add1 (key_val->fp_policies_ids, policy_index);
572 res = clib_bihash_add_del_16_8 (bihash_table, &kv, 1);
580 if (vec_max_len (result_val->fp_policies_ids) !=
581 vec_len (result_val->fp_policies_ids))
583 /* no need to resize */
584 vec_add1 (result_val->fp_policies_ids, policy_index);
588 vec_add1 (result_val->fp_policies_ids, policy_index);
590 res = clib_bihash_add_del_16_8 (bihash_table, &result, 1);
597 if (mte->refcount == 0)
599 clib_memcpy (&mte->mask, &mask, sizeof (mask));
604 vec_search_with_function (fp_spd->fp_mask_ids[policy->type], &mask_index,
605 ipsec_fp_mask_type_idx_cmp);
606 if (~0 == searched_idx)
608 ipsec_fp_mask_id_t mask_id = { mask_index, 1 };
609 vec_add1 (fp_spd->fp_mask_ids[policy->type], mask_id);
612 (fp_spd->fp_mask_ids[policy->type] + searched_idx)->refcount++;
615 vec_add1 (fp_spd->fp_policies[policy->type], policy_index);
616 clib_memcpy (vp, policy, sizeof (*vp));
621 pool_put (im->policies, vp);
622 ipsec_fp_release_mask_type (im, mask_index);
627 ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
628 ipsec_policy_t *policy, u32 *stat_index)
631 u32 mask_index, searched_idx;
633 ipsec_fp_mask_type_entry_t *mte;
635 clib_bihash_kv_40_8_t kv;
636 clib_bihash_kv_40_8_t result;
637 ipsec_fp_lookup_value_t *result_val =
638 (ipsec_fp_lookup_value_t *) &result.value;
639 ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value;
641 ipsec_fp_5tuple_t mask, policy_5tuple;
643 bool inbound = ipsec_is_policy_inbound (policy);
645 ipsec_fp_ip6_get_policy_mask (policy, &mask);
646 pool_get (im->policies, vp);
647 policy_index = vp - im->policies;
648 vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
649 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
650 *stat_index = policy_index;
651 mask_index = find_mask_type_index (im, &mask);
652 clib_bihash_40_8_t *bihash_table =
653 inbound ? pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
654 fp_spd->ip6_in_lookup_hash_idx) :
655 pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
656 fp_spd->ip6_out_lookup_hash_idx);
658 if (mask_index == ~0)
660 /* mask type not found, we need to create a new entry */
661 pool_get (im->fp_mask_types, mte);
662 mask_index = mte - im->fp_mask_types;
666 mte = im->fp_mask_types + mask_index;
668 policy->fp_mask_type_id = mask_index;
669 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
671 fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
673 res = clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);
676 /* key was not found crate a new entry */
677 vec_add1 (key_val->fp_policies_ids, policy_index);
678 res = clib_bihash_add_del_40_8 (bihash_table, &kv, 1);
685 if (vec_max_len (result_val->fp_policies_ids) !=
686 vec_len (result_val->fp_policies_ids))
688 /* no need to resize */
689 vec_add1 (result_val->fp_policies_ids, policy_index);
693 vec_add1 (result_val->fp_policies_ids, policy_index);
695 res = clib_bihash_add_del_40_8 (bihash_table, &result, 1);
702 if (mte->refcount == 0)
704 clib_memcpy (&mte->mask, &mask, sizeof (mask));
709 vec_search_with_function (fp_spd->fp_mask_ids[policy->type], &mask_index,
710 ipsec_fp_mask_type_idx_cmp);
711 if (~0 == searched_idx)
713 ipsec_fp_mask_id_t mask_id = { mask_index, 1 };
714 vec_add1 (fp_spd->fp_mask_ids[policy->type], mask_id);
717 (fp_spd->fp_mask_ids[policy->type] + searched_idx)->refcount++;
720 vec_add1 (fp_spd->fp_policies[policy->type], policy_index);
721 clib_memcpy (vp, policy, sizeof (*vp));
726 pool_put (im->policies, vp);
727 ipsec_fp_release_mask_type (im, mask_index);
732 ipsec_fp_ip6_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
733 ipsec_policy_t *policy)
736 ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple;
737 clib_bihash_kv_40_8_t kv;
738 clib_bihash_kv_40_8_t result;
739 ipsec_fp_lookup_value_t *result_val =
740 (ipsec_fp_lookup_value_t *) &result.value;
741 bool inbound = ipsec_is_policy_inbound (policy);
742 clib_bihash_40_8_t *bihash_table =
743 inbound ? pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
744 fp_spd->ip6_in_lookup_hash_idx) :
745 pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
746 fp_spd->ip6_out_lookup_hash_idx);
751 ipsec_fp_ip6_get_policy_mask (policy, &mask);
752 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
753 fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
754 res = clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);
759 vec_foreach_index (ii, result_val->fp_policies_ids)
762 pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii));
763 if (ipsec_policy_is_equal (vp, policy))
765 vec_foreach_index (iii, fp_spd->fp_policies[policy->type])
767 if (*(fp_spd->fp_policies[policy->type] + iii) ==
768 *(result_val->fp_policies_ids + ii))
770 if (vec_len (result_val->fp_policies_ids) == 1)
772 vec_free (result_val->fp_policies_ids);
773 clib_bihash_add_del_40_8 (bihash_table, &result, 0);
777 vec_del1 (result_val->fp_policies_ids, ii);
779 vec_del1 (fp_spd->fp_policies[policy->type], iii);
781 vec_foreach_index (imt, fp_spd->fp_mask_ids[policy->type])
783 if ((fp_spd->fp_mask_ids[policy->type] + imt)
784 ->mask_type_idx == vp->fp_mask_type_id)
787 if ((fp_spd->fp_mask_ids[policy->type] + imt)
789 vec_del1 (fp_spd->fp_mask_ids[policy->type], imt);
804 ipsec_fp_release_mask_type (im, vp->fp_mask_type_id);
805 ipsec_sa_unlock (vp->sa_index);
806 pool_put (im->policies, vp);
815 ipsec_fp_ip4_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
816 ipsec_policy_t *policy)
819 ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple;
820 clib_bihash_kv_16_8_t kv;
821 clib_bihash_kv_16_8_t result;
822 ipsec_fp_lookup_value_t *result_val =
823 (ipsec_fp_lookup_value_t *) &result.value;
824 bool inbound = ipsec_is_policy_inbound (policy);
827 clib_bihash_16_8_t *bihash_table =
828 inbound ? pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
829 fp_spd->ip4_in_lookup_hash_idx) :
830 pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
831 fp_spd->ip4_out_lookup_hash_idx);
833 ipsec_fp_ip4_get_policy_mask (policy, &mask, inbound);
834 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
835 fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
836 res = clib_bihash_search_inline_2_16_8 (bihash_table, &kv, &result);
842 vec_foreach_index (ii, result_val->fp_policies_ids)
845 pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii));
846 if (ipsec_policy_is_equal (vp, policy))
848 vec_foreach_index (iii, fp_spd->fp_policies[policy->type])
850 if (*(fp_spd->fp_policies[policy->type] + iii) ==
851 *(result_val->fp_policies_ids + ii))
853 if (vec_len (result_val->fp_policies_ids) == 1)
855 vec_free (result_val->fp_policies_ids);
856 clib_bihash_add_del_16_8 (bihash_table, &result, 0);
860 vec_del1 (result_val->fp_policies_ids, ii);
862 vec_del1 (fp_spd->fp_policies[policy->type], iii);
864 vec_foreach_index (imt, fp_spd->fp_mask_ids[policy->type])
866 if ((fp_spd->fp_mask_ids[policy->type] + imt)
867 ->mask_type_idx == vp->fp_mask_type_id)
870 if ((fp_spd->fp_mask_ids[policy->type] + imt)
872 vec_del1 (fp_spd->fp_mask_ids[policy->type], imt);
887 ipsec_fp_release_mask_type (im, vp->fp_mask_type_id);
888 ipsec_sa_unlock (vp->sa_index);
889 pool_put (im->policies, vp);
898 ipsec_fp_add_del_policy (void *fp_spd, ipsec_policy_t *policy, int is_add,
901 ipsec_main_t *im = &ipsec_main;
905 return ipsec_fp_ip6_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy,
908 return ipsec_fp_ip4_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy,
911 else if (policy->is_ipv6)
913 return ipsec_fp_ip6_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy);
915 return ipsec_fp_ip4_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy);
919 * fd.io coding-style-patch-verification: ON
922 * eval: (c-set-style "gnu")