2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ipsec/ipsec.h>
20 * Policy packet & bytes counters
22 vlib_combined_counter_main_t ipsec_spd_policy_counters = {
24 .stat_segment_name = "/net/ipsec/policy",
28 ipsec_spd_entry_sort (void *a1, void *a2)
30 ipsec_main_t *im = &ipsec_main;
33 ipsec_policy_t *p1, *p2;
35 p1 = pool_elt_at_index (im->policies, *id1);
36 p2 = pool_elt_at_index (im->policies, *id2);
38 return p2->priority - p1->priority;
44 ipsec_policy_mk_type (bool is_outbound,
46 ipsec_policy_action_t action,
47 ipsec_spd_policy_type_t * type)
52 IPSEC_SPD_POLICY_IP6_OUTBOUND : IPSEC_SPD_POLICY_IP4_OUTBOUND);
59 case IPSEC_POLICY_ACTION_PROTECT:
61 IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT :
62 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
64 case IPSEC_POLICY_ACTION_BYPASS:
66 IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS :
67 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
69 case IPSEC_POLICY_ACTION_DISCARD:
71 IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD :
72 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
74 case IPSEC_POLICY_ACTION_RESOLVE:
79 /* Unsupported type */
83 static_always_inline int
84 ipsec_is_policy_inbound (ipsec_policy_t *policy)
86 if (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
87 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
88 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD ||
89 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT ||
90 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS ||
91 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD)
97 static_always_inline int
98 ipsec_is_fp_enabled (ipsec_main_t *im, ipsec_spd_t *spd,
99 ipsec_policy_t *policy)
101 if ((im->fp_spd_ipv4_out_is_enabled &&
102 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_out_lookup_hash_idx) &&
103 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
104 (im->fp_spd_ipv4_in_is_enabled &&
105 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_in_lookup_hash_idx) &&
106 (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
107 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
108 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD)) ||
109 (im->fp_spd_ipv6_in_is_enabled &&
110 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip6_in_lookup_hash_idx) &&
111 (policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT ||
112 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS ||
113 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD)) ||
114 (im->fp_spd_ipv6_out_is_enabled &&
115 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip6_out_lookup_hash_idx) &&
116 policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
122 ipsec_add_del_policy (vlib_main_t * vm,
123 ipsec_policy_t * policy, int is_add, u32 * stat_index)
125 ipsec_main_t *im = &ipsec_main;
126 ipsec_spd_t *spd = 0;
131 p = hash_get (im->spd_index_by_spd_id, policy->id);
134 return VNET_API_ERROR_SYSCALL_ERROR_1;
137 spd = pool_elt_at_index (im->spds, spd_index);
139 return VNET_API_ERROR_SYSCALL_ERROR_1;
141 if (im->output_flow_cache_flag && !policy->is_ipv6 &&
142 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND)
145 * Flow cache entry is valid only when epoch_count value in control
146 * plane and data plane match. Otherwise, flow cache entry is considered
147 * stale. To avoid the race condition of using old epoch_count value
148 * in data plane after the roll over of epoch_count in control plane,
149 * entire flow cache is reset.
151 if (im->epoch_count == 0xFFFFFFFF)
153 /* Reset all the entries in flow cache */
154 clib_memset_u8 (im->ipsec4_out_spd_hash_tbl, 0,
155 im->ipsec4_out_spd_hash_num_buckets *
156 (sizeof (*(im->ipsec4_out_spd_hash_tbl))));
158 /* Increment epoch counter by 1 */
159 clib_atomic_fetch_add_relax (&im->epoch_count, 1);
160 /* Reset spd flow cache counter since all old entries are stale */
161 clib_atomic_store_relax_n (&im->ipsec4_out_spd_flow_cache_entries, 0);
164 if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
165 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
166 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD) &&
167 im->input_flow_cache_flag && !policy->is_ipv6)
170 * Flow cache entry is valid only when input_epoch_count value in control
171 * plane and data plane match. Otherwise, flow cache entry is considered
172 * stale. To avoid the race condition of using old input_epoch_count
173 * value in data plane after the roll over of input_epoch_count in
174 * control plane, entire flow cache is reset.
176 if (im->input_epoch_count == 0xFFFFFFFF)
178 /* Reset all the entries in flow cache */
179 clib_memset_u8 (im->ipsec4_in_spd_hash_tbl, 0,
180 im->ipsec4_in_spd_hash_num_buckets *
181 (sizeof (*(im->ipsec4_in_spd_hash_tbl))));
183 /* Increment epoch counter by 1 */
184 clib_atomic_fetch_add_relax (&im->input_epoch_count, 1);
185 /* Reset spd flow cache counter since all old entries are stale */
186 im->ipsec4_in_spd_flow_cache_entries = 0;
193 if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
195 index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
197 if (INDEX_INVALID == sa_index)
198 return VNET_API_ERROR_SYSCALL_ERROR_1;
199 policy->sa_index = sa_index;
202 policy->sa_index = INDEX_INVALID;
205 * Try adding the policy into fast path SPD first. Only adding to
206 * traditional SPD when failed.
208 if (ipsec_is_fp_enabled (im, spd, policy))
209 return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 1,
212 pool_get (im->policies, vp);
213 clib_memcpy (vp, policy, sizeof (*vp));
214 policy_index = vp - im->policies;
216 vlib_validate_combined_counter (&ipsec_spd_policy_counters,
218 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
219 vec_add1 (spd->policies[policy->type], policy_index);
220 vec_sort_with_function (spd->policies[policy->type],
221 ipsec_spd_entry_sort);
222 *stat_index = policy_index;
229 * Try to delete the policy from the fast path SPD first. Delete from
230 * traditional SPD when fp delete fails.
233 if (ipsec_is_fp_enabled (im, spd, policy))
236 if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
238 index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
240 if (INDEX_INVALID == sa_index)
241 return VNET_API_ERROR_SYSCALL_ERROR_1;
242 policy->sa_index = sa_index;
243 ipsec_sa_unlock_id (policy->sa_id);
246 policy->sa_index = INDEX_INVALID;
248 return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 0,
252 vec_foreach_index (ii, (spd->policies[policy->type]))
254 vp = pool_elt_at_index (im->policies,
255 spd->policies[policy->type][ii]);
256 if (ipsec_policy_is_equal (vp, policy))
258 vec_delete (spd->policies[policy->type], 1, ii);
259 ipsec_sa_unlock (vp->sa_index);
260 pool_put (im->policies, vp);
269 static_always_inline void
270 ipsec_fp_release_mask_type (ipsec_main_t *im, u32 mask_type_index)
272 ipsec_fp_mask_type_entry_t *mte =
273 pool_elt_at_index (im->fp_mask_types, mask_type_index);
275 if (mte->refcount == 0)
277 /* this entry is not in use anymore */
278 ASSERT (clib_memset (mte, 0xae, sizeof (*mte)) == EOK);
279 pool_put (im->fp_mask_types, mte);
283 static_always_inline u32
284 find_mask_type_index (ipsec_main_t *im, ipsec_fp_5tuple_t *mask)
286 ipsec_fp_mask_type_entry_t *mte;
288 pool_foreach (mte, im->fp_mask_types)
290 if (memcmp (&mte->mask, mask, sizeof (*mask)) == 0)
291 return (mte - im->fp_mask_types);
297 static_always_inline void
298 fill_ip6_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
299 clib_bihash_kv_40_8_t *kv)
301 ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value;
302 u64 *pmatch = (u64 *) match->kv_40_8.key;
303 u64 *pmask = (u64 *) mask->kv_40_8.key;
304 u64 *pkey = (u64 *) kv->key;
306 *pkey++ = *pmatch++ & *pmask++;
307 *pkey++ = *pmatch++ & *pmask++;
308 *pkey++ = *pmatch++ & *pmask++;
309 *pkey++ = *pmatch++ & *pmask++;
310 *pkey = *pmatch & *pmask;
315 static_always_inline void
316 fill_ip4_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
317 clib_bihash_kv_16_8_t *kv)
319 ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value;
320 u64 *pmatch = (u64 *) match->kv_16_8.key;
321 u64 *pmask = (u64 *) mask->kv_16_8.key;
322 u64 *pkey = (u64 *) kv->key;
324 *pkey++ = *pmatch++ & *pmask++;
325 *pkey = *pmatch & *pmask;
330 static_always_inline u16
331 mask_out_highest_set_bit_u16 (u16 x)
340 static_always_inline u32
341 mask_out_highest_set_bit_u32 (u32 x)
351 static_always_inline u64
352 mask_out_highest_set_bit_u64 (u64 x)
363 static_always_inline void
364 ipsec_fp_get_policy_ports_mask (ipsec_policy_t *policy,
365 ipsec_fp_5tuple_t *mask)
367 if (PREDICT_TRUE ((policy->protocol == IP_PROTOCOL_TCP) ||
368 (policy->protocol == IP_PROTOCOL_UDP) ||
369 (policy->protocol == IP_PROTOCOL_SCTP)))
371 mask->lport = policy->lport.start ^ policy->lport.stop;
372 mask->rport = policy->rport.start ^ policy->rport.stop;
374 mask->lport = mask_out_highest_set_bit_u16 (mask->lport);
376 mask->rport = mask_out_highest_set_bit_u16 (mask->rport);
384 mask->protocol = (policy->protocol == IPSEC_POLICY_PROTOCOL_ANY) ? 0 : ~0;
388 static_always_inline void
389 ipsec_fp_ip4_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask,
392 u32 *pladdr_start = (u32 *) &policy->laddr.start.ip4;
393 u32 *pladdr_stop = (u32 *) &policy->laddr.stop.ip4;
394 u32 *plmask = (u32 *) &mask->laddr;
395 u32 *praddr_start = (u32 *) &policy->raddr.start.ip4;
396 u32 *praddr_stop = (u32 *) &policy->raddr.stop.ip4;
397 u32 *prmask = (u32 *) &mask->raddr;
399 clib_memset_u8 (mask, 0xff, sizeof (ipsec_fp_5tuple_t));
400 clib_memset_u8 (&mask->l3_zero_pad, 0, sizeof (mask->l3_zero_pad));
402 /* find bits where start != stop */
403 *plmask = *pladdr_start ^ *pladdr_stop;
404 *prmask = *praddr_start ^ *praddr_stop;
405 /* Find most significant bit set (that is the first position
406 * start differs from stop). Mask out everything after that bit and
407 * the bit itself. Remember that policy stores start and stop in the net
410 *plmask = clib_host_to_net_u32 (
411 mask_out_highest_set_bit_u32 (clib_net_to_host_u32 (*plmask)));
413 *prmask = clib_host_to_net_u32 (
414 mask_out_highest_set_bit_u32 (clib_net_to_host_u32 (*prmask)));
418 if (policy->type != IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT)
426 ipsec_fp_get_policy_ports_mask (policy, mask);
430 static_always_inline void
431 ipsec_fp_ip6_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask,
434 u64 *pladdr_start = (u64 *) &policy->laddr.start;
435 u64 *pladdr_stop = (u64 *) &policy->laddr.stop;
436 u64 *plmask = (u64 *) &mask->ip6_laddr;
437 u64 *praddr_start = (u64 *) &policy->raddr.start;
438 u64 *praddr_stop = (u64 *) &policy->raddr.stop;
439 u64 *prmask = (u64 *) &mask->ip6_raddr;
441 clib_memset_u8 (mask, 0xff, sizeof (ipsec_fp_5tuple_t));
443 *plmask = (*pladdr_start++ ^ *pladdr_stop++);
445 *prmask = (*praddr_start++ ^ *praddr_stop++);
447 /* Find most significant bit set (that is the first position
448 * start differs from stop). Mask out everything after that bit and
449 * the bit itself. Remember that policy stores start and stop in the net
452 *plmask = clib_host_to_net_u64 (
453 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*plmask)));
455 if (*plmask++ & clib_host_to_net_u64 (0x1))
457 *plmask = (*pladdr_start ^ *pladdr_stop);
458 *plmask = clib_host_to_net_u64 (
459 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*plmask)));
464 *prmask = clib_host_to_net_u64 (
465 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*prmask)));
467 if (*prmask++ & clib_host_to_net_u64 (0x1))
469 *prmask = (*pladdr_start ^ *pladdr_stop);
470 *prmask = clib_host_to_net_u64 (
471 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*prmask)));
478 if (policy->type != IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT)
486 ipsec_fp_get_policy_ports_mask (policy, mask);
490 static_always_inline void
491 ipsec_fp_get_policy_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *tuple,
494 memset (tuple, 0, sizeof (*tuple));
495 tuple->is_ipv6 = policy->is_ipv6;
498 tuple->ip6_laddr = policy->laddr.start.ip6;
499 tuple->ip6_raddr = policy->raddr.start.ip6;
503 tuple->laddr = policy->laddr.start.ip4;
504 tuple->raddr = policy->raddr.start.ip4;
510 if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
511 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT) &&
512 policy->sa_index != INDEX_INVALID)
514 ipsec_sa_t *s = ipsec_sa_get (policy->sa_index);
518 tuple->spi = INDEX_INVALID;
519 tuple->action = policy->type;
523 tuple->protocol = policy->protocol;
525 tuple->lport = policy->lport.start;
526 tuple->rport = policy->rport.start;
529 static_always_inline int
530 ipsec_fp_mask_type_idx_cmp (ipsec_fp_mask_id_t *mask_id, u32 *idx)
532 return mask_id->mask_type_idx == *idx;
536 ipsec_fp_ip4_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
537 ipsec_policy_t *policy, u32 *stat_index)
539 u32 mask_index, searched_idx;
541 ipsec_fp_mask_type_entry_t *mte;
543 clib_bihash_kv_16_8_t kv;
544 clib_bihash_kv_16_8_t result;
545 ipsec_fp_lookup_value_t *result_val =
546 (ipsec_fp_lookup_value_t *) &result.value;
547 ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value;
549 ipsec_fp_5tuple_t mask, policy_5tuple;
551 bool inbound = ipsec_is_policy_inbound (policy);
552 clib_bihash_16_8_t *bihash_table =
553 inbound ? pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
554 fp_spd->ip4_in_lookup_hash_idx) :
555 pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
556 fp_spd->ip4_out_lookup_hash_idx);
558 ipsec_fp_ip4_get_policy_mask (policy, &mask, inbound);
559 pool_get (im->policies, vp);
560 policy_index = vp - im->policies;
561 vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
562 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
563 *stat_index = policy_index;
564 mask_index = find_mask_type_index (im, &mask);
566 if (mask_index == ~0)
568 /* mask type not found, we need to create a new entry */
569 pool_get (im->fp_mask_types, mte);
570 mask_index = mte - im->fp_mask_types;
574 mte = im->fp_mask_types + mask_index;
576 policy->fp_mask_type_id = mask_index;
577 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
579 fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
581 res = clib_bihash_search_inline_2_16_8 (bihash_table, &kv, &result);
584 /* key was not found crate a new entry */
585 vec_add1 (key_val->fp_policies_ids, policy_index);
586 res = clib_bihash_add_del_16_8 (bihash_table, &kv, 1);
594 if (vec_max_len (result_val->fp_policies_ids) !=
595 vec_len (result_val->fp_policies_ids))
597 /* no need to resize */
598 vec_add1 (result_val->fp_policies_ids, policy_index);
602 vec_add1 (result_val->fp_policies_ids, policy_index);
604 res = clib_bihash_add_del_16_8 (bihash_table, &result, 1);
611 if (mte->refcount == 0)
613 clib_memcpy (&mte->mask, &mask, sizeof (mask));
618 vec_search_with_function (fp_spd->fp_mask_ids[policy->type], &mask_index,
619 ipsec_fp_mask_type_idx_cmp);
620 if (~0 == searched_idx)
622 ipsec_fp_mask_id_t mask_id = { mask_index, 1 };
623 vec_add1 (fp_spd->fp_mask_ids[policy->type], mask_id);
626 (fp_spd->fp_mask_ids[policy->type] + searched_idx)->refcount++;
629 vec_add1 (fp_spd->fp_policies[policy->type], policy_index);
630 clib_memcpy (vp, policy, sizeof (*vp));
635 pool_put (im->policies, vp);
636 ipsec_fp_release_mask_type (im, mask_index);
641 ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
642 ipsec_policy_t *policy, u32 *stat_index)
645 u32 mask_index, searched_idx;
647 ipsec_fp_mask_type_entry_t *mte;
649 clib_bihash_kv_40_8_t kv;
650 clib_bihash_kv_40_8_t result;
651 ipsec_fp_lookup_value_t *result_val =
652 (ipsec_fp_lookup_value_t *) &result.value;
653 ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value;
655 ipsec_fp_5tuple_t mask, policy_5tuple;
657 bool inbound = ipsec_is_policy_inbound (policy);
659 ipsec_fp_ip6_get_policy_mask (policy, &mask, inbound);
660 pool_get (im->policies, vp);
661 policy_index = vp - im->policies;
662 vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
663 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
664 *stat_index = policy_index;
665 mask_index = find_mask_type_index (im, &mask);
666 clib_bihash_40_8_t *bihash_table =
667 inbound ? pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
668 fp_spd->ip6_in_lookup_hash_idx) :
669 pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
670 fp_spd->ip6_out_lookup_hash_idx);
672 if (mask_index == ~0)
674 /* mask type not found, we need to create a new entry */
675 pool_get (im->fp_mask_types, mte);
676 mask_index = mte - im->fp_mask_types;
680 mte = im->fp_mask_types + mask_index;
682 policy->fp_mask_type_id = mask_index;
683 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
685 fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
687 res = clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);
690 /* key was not found crate a new entry */
691 vec_add1 (key_val->fp_policies_ids, policy_index);
692 res = clib_bihash_add_del_40_8 (bihash_table, &kv, 1);
699 if (vec_max_len (result_val->fp_policies_ids) !=
700 vec_len (result_val->fp_policies_ids))
702 /* no need to resize */
703 vec_add1 (result_val->fp_policies_ids, policy_index);
707 vec_add1 (result_val->fp_policies_ids, policy_index);
709 res = clib_bihash_add_del_40_8 (bihash_table, &result, 1);
716 if (mte->refcount == 0)
718 clib_memcpy (&mte->mask, &mask, sizeof (mask));
723 vec_search_with_function (fp_spd->fp_mask_ids[policy->type], &mask_index,
724 ipsec_fp_mask_type_idx_cmp);
725 if (~0 == searched_idx)
727 ipsec_fp_mask_id_t mask_id = { mask_index, 1 };
728 vec_add1 (fp_spd->fp_mask_ids[policy->type], mask_id);
731 (fp_spd->fp_mask_ids[policy->type] + searched_idx)->refcount++;
734 vec_add1 (fp_spd->fp_policies[policy->type], policy_index);
735 clib_memcpy (vp, policy, sizeof (*vp));
740 pool_put (im->policies, vp);
741 ipsec_fp_release_mask_type (im, mask_index);
746 ipsec_fp_ip6_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
747 ipsec_policy_t *policy)
750 ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple;
751 clib_bihash_kv_40_8_t kv;
752 clib_bihash_kv_40_8_t result;
753 ipsec_fp_lookup_value_t *result_val =
754 (ipsec_fp_lookup_value_t *) &result.value;
755 bool inbound = ipsec_is_policy_inbound (policy);
756 clib_bihash_40_8_t *bihash_table =
757 inbound ? pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
758 fp_spd->ip6_in_lookup_hash_idx) :
759 pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
760 fp_spd->ip6_out_lookup_hash_idx);
765 ipsec_fp_ip6_get_policy_mask (policy, &mask, inbound);
766 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
767 fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
768 res = clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);
773 vec_foreach_index (ii, result_val->fp_policies_ids)
776 pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii));
777 if (ipsec_policy_is_equal (vp, policy))
779 vec_foreach_index (iii, fp_spd->fp_policies[policy->type])
781 if (*(fp_spd->fp_policies[policy->type] + iii) ==
782 *(result_val->fp_policies_ids + ii))
784 if (vec_len (result_val->fp_policies_ids) == 1)
786 vec_free (result_val->fp_policies_ids);
787 clib_bihash_add_del_40_8 (bihash_table, &result, 0);
791 vec_del1 (result_val->fp_policies_ids, ii);
793 vec_del1 (fp_spd->fp_policies[policy->type], iii);
795 vec_foreach_index (imt, fp_spd->fp_mask_ids[policy->type])
797 if ((fp_spd->fp_mask_ids[policy->type] + imt)
798 ->mask_type_idx == vp->fp_mask_type_id)
801 if ((fp_spd->fp_mask_ids[policy->type] + imt)
803 vec_del1 (fp_spd->fp_mask_ids[policy->type], imt);
818 ipsec_fp_release_mask_type (im, vp->fp_mask_type_id);
819 ipsec_sa_unlock (vp->sa_index);
820 pool_put (im->policies, vp);
829 ipsec_fp_ip4_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
830 ipsec_policy_t *policy)
833 ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple;
834 clib_bihash_kv_16_8_t kv;
835 clib_bihash_kv_16_8_t result;
836 ipsec_fp_lookup_value_t *result_val =
837 (ipsec_fp_lookup_value_t *) &result.value;
838 bool inbound = ipsec_is_policy_inbound (policy);
841 clib_bihash_16_8_t *bihash_table =
842 inbound ? pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
843 fp_spd->ip4_in_lookup_hash_idx) :
844 pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
845 fp_spd->ip4_out_lookup_hash_idx);
847 ipsec_fp_ip4_get_policy_mask (policy, &mask, inbound);
848 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
849 fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
850 res = clib_bihash_search_inline_2_16_8 (bihash_table, &kv, &result);
856 vec_foreach_index (ii, result_val->fp_policies_ids)
859 pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii));
860 if (ipsec_policy_is_equal (vp, policy))
862 vec_foreach_index (iii, fp_spd->fp_policies[policy->type])
864 if (*(fp_spd->fp_policies[policy->type] + iii) ==
865 *(result_val->fp_policies_ids + ii))
867 if (vec_len (result_val->fp_policies_ids) == 1)
869 vec_free (result_val->fp_policies_ids);
870 clib_bihash_add_del_16_8 (bihash_table, &result, 0);
874 vec_del1 (result_val->fp_policies_ids, ii);
876 vec_del1 (fp_spd->fp_policies[policy->type], iii);
878 vec_foreach_index (imt, fp_spd->fp_mask_ids[policy->type])
880 if ((fp_spd->fp_mask_ids[policy->type] + imt)
881 ->mask_type_idx == vp->fp_mask_type_id)
884 if ((fp_spd->fp_mask_ids[policy->type] + imt)
886 vec_del1 (fp_spd->fp_mask_ids[policy->type], imt);
901 ipsec_fp_release_mask_type (im, vp->fp_mask_type_id);
902 ipsec_sa_unlock (vp->sa_index);
903 pool_put (im->policies, vp);
912 ipsec_fp_add_del_policy (void *fp_spd, ipsec_policy_t *policy, int is_add,
915 ipsec_main_t *im = &ipsec_main;
919 return ipsec_fp_ip6_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy,
922 return ipsec_fp_ip4_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy,
925 else if (policy->is_ipv6)
927 return ipsec_fp_ip6_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy);
929 return ipsec_fp_ip4_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy);
933 * fd.io coding-style-patch-verification: ON
936 * eval: (c-set-style "gnu")