2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ipsec/ipsec.h>
20 * Policy packet & bytes counters
22 vlib_combined_counter_main_t ipsec_spd_policy_counters = {
24 .stat_segment_name = "/net/ipsec/policy",
28 ipsec_spd_entry_sort (void *a1, void *a2)
30 ipsec_main_t *im = &ipsec_main;
33 ipsec_policy_t *p1, *p2;
35 p1 = pool_elt_at_index (im->policies, *id1);
36 p2 = pool_elt_at_index (im->policies, *id2);
38 return p2->priority - p1->priority;
44 ipsec_policy_mk_type (bool is_outbound,
46 ipsec_policy_action_t action,
47 ipsec_spd_policy_type_t * type)
52 IPSEC_SPD_POLICY_IP6_OUTBOUND : IPSEC_SPD_POLICY_IP4_OUTBOUND);
59 case IPSEC_POLICY_ACTION_PROTECT:
61 IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT :
62 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
64 case IPSEC_POLICY_ACTION_BYPASS:
66 IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS :
67 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
69 case IPSEC_POLICY_ACTION_DISCARD:
71 IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD :
72 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
74 case IPSEC_POLICY_ACTION_RESOLVE:
79 /* Unsupported type */
84 ipsec_add_del_policy (vlib_main_t * vm,
85 ipsec_policy_t * policy, int is_add, u32 * stat_index)
87 ipsec_main_t *im = &ipsec_main;
93 p = hash_get (im->spd_index_by_spd_id, policy->id);
96 return VNET_API_ERROR_SYSCALL_ERROR_1;
99 spd = pool_elt_at_index (im->spds, spd_index);
101 return VNET_API_ERROR_SYSCALL_ERROR_1;
103 if (im->output_flow_cache_flag && !policy->is_ipv6 &&
104 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND)
107 * Flow cache entry is valid only when epoch_count value in control
108 * plane and data plane match. Otherwise, flow cache entry is considered
109 * stale. To avoid the race condition of using old epoch_count value
110 * in data plane after the roll over of epoch_count in control plane,
111 * entire flow cache is reset.
113 if (im->epoch_count == 0xFFFFFFFF)
115 /* Reset all the entries in flow cache */
116 clib_memset_u8 (im->ipsec4_out_spd_hash_tbl, 0,
117 im->ipsec4_out_spd_hash_num_buckets *
118 (sizeof (*(im->ipsec4_out_spd_hash_tbl))));
120 /* Increment epoch counter by 1 */
121 clib_atomic_fetch_add_relax (&im->epoch_count, 1);
122 /* Reset spd flow cache counter since all old entries are stale */
123 clib_atomic_store_relax_n (&im->ipsec4_out_spd_flow_cache_entries, 0);
126 if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
127 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
128 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD) &&
129 im->input_flow_cache_flag && !policy->is_ipv6)
132 * Flow cache entry is valid only when input_epoch_count value in control
133 * plane and data plane match. Otherwise, flow cache entry is considered
134 * stale. To avoid the race condition of using old input_epoch_count
135 * value in data plane after the roll over of input_epoch_count in
136 * control plane, entire flow cache is reset.
138 if (im->input_epoch_count == 0xFFFFFFFF)
140 /* Reset all the entries in flow cache */
141 clib_memset_u8 (im->ipsec4_in_spd_hash_tbl, 0,
142 im->ipsec4_in_spd_hash_num_buckets *
143 (sizeof (*(im->ipsec4_in_spd_hash_tbl))));
145 /* Increment epoch counter by 1 */
146 clib_atomic_fetch_add_relax (&im->input_epoch_count, 1);
147 /* Reset spd flow cache counter since all old entries are stale */
148 im->ipsec4_in_spd_flow_cache_entries = 0;
155 if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
157 index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
159 if (INDEX_INVALID == sa_index)
160 return VNET_API_ERROR_SYSCALL_ERROR_1;
161 policy->sa_index = sa_index;
164 policy->sa_index = INDEX_INVALID;
167 * Try adding the policy into fast path SPD first. Only adding to
168 * traditional SPD when failed.
170 if ((im->ipv4_fp_spd_is_enabled &&
171 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
172 (im->ipv6_fp_spd_is_enabled &&
173 policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
174 return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 1,
177 pool_get (im->policies, vp);
178 clib_memcpy (vp, policy, sizeof (*vp));
179 policy_index = vp - im->policies;
181 vlib_validate_combined_counter (&ipsec_spd_policy_counters,
183 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
184 vec_add1 (spd->policies[policy->type], policy_index);
185 vec_sort_with_function (spd->policies[policy->type],
186 ipsec_spd_entry_sort);
187 *stat_index = policy_index;
194 * Try to delete the policy from the fast path SPD first. Delete from
195 * traditional SPD when fp delete fails.
198 if ((im->ipv4_fp_spd_is_enabled &&
199 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
200 (im->ipv6_fp_spd_is_enabled &&
201 policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
202 return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 0,
205 vec_foreach_index (ii, (spd->policies[policy->type]))
207 vp = pool_elt_at_index (im->policies,
208 spd->policies[policy->type][ii]);
209 if (ipsec_policy_is_equal (vp, policy))
211 vec_delete (spd->policies[policy->type], 1, ii);
212 ipsec_sa_unlock (vp->sa_index);
213 pool_put (im->policies, vp);
222 static_always_inline void
223 release_mask_type_index (ipsec_main_t *im, u32 mask_type_index)
225 ipsec_fp_mask_type_entry_t *mte =
226 pool_elt_at_index (im->fp_mask_types, mask_type_index);
228 if (mte->refcount == 0)
230 /* this entry is not in use anymore */
231 ASSERT (clib_memset (mte, 0xae, sizeof (*mte)) == EOK);
232 pool_put (im->fp_mask_types, mte);
236 static_always_inline u32
237 find_mask_type_index (ipsec_main_t *im, ipsec_fp_5tuple_t *mask)
239 ipsec_fp_mask_type_entry_t *mte;
241 pool_foreach (mte, im->fp_mask_types)
243 if (memcmp (&mte->mask, mask, sizeof (*mask)) == 0)
244 return (mte - im->fp_mask_types);
250 static_always_inline void
251 fill_ip6_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
252 clib_bihash_kv_40_8_t *kv)
254 ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value;
255 u64 *pmatch = (u64 *) match->kv_40_8.key;
256 u64 *pmask = (u64 *) mask->kv_40_8.key;
257 u64 *pkey = (u64 *) kv->key;
259 *pkey++ = *pmatch++ & *pmask++;
260 *pkey++ = *pmatch++ & *pmask++;
261 *pkey++ = *pmatch++ & *pmask++;
262 *pkey++ = *pmatch++ & *pmask++;
263 *pkey = *pmatch & *pmask;
268 static_always_inline void
269 fill_ip4_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
270 clib_bihash_kv_16_8_t *kv)
272 ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value;
273 u64 *pmatch = (u64 *) match->kv_16_8.key;
274 u64 *pmask = (u64 *) mask->kv_16_8.key;
275 u64 *pkey = (u64 *) kv->key;
277 *pkey++ = *pmatch++ & *pmask++;
278 *pkey = *pmatch & *pmask;
283 static_always_inline u16
284 get_highest_set_bit_u16 (u16 x)
293 static_always_inline u32
294 get_highest_set_bit_u32 (u32 x)
304 static_always_inline u64
305 mask_out_highest_set_bit_u64 (u64 x)
316 static_always_inline void
317 ipsec_fp_get_policy_ports_mask (ipsec_policy_t *policy,
318 ipsec_fp_5tuple_t *mask)
320 if (PREDICT_TRUE ((policy->protocol == IP_PROTOCOL_TCP) ||
321 (policy->protocol == IP_PROTOCOL_UDP) ||
322 (policy->protocol == IP_PROTOCOL_SCTP)))
324 mask->lport = policy->lport.start ^ policy->lport.stop;
325 mask->rport = policy->rport.start ^ policy->rport.stop;
327 mask->lport = get_highest_set_bit_u16 (mask->lport);
328 mask->lport = ~(mask->lport - 1) & (~mask->lport);
330 mask->rport = get_highest_set_bit_u16 (mask->rport);
331 mask->rport = ~(mask->rport - 1) & (~mask->rport);
339 mask->protocol = (policy->protocol == IPSEC_POLICY_PROTOCOL_ANY) ? 0 : ~0;
342 static_always_inline void
343 ipsec_fp_ip4_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask)
345 u32 *pladdr_start = (u32 *) &policy->laddr.start.ip4;
346 u32 *pladdr_stop = (u32 *) &policy->laddr.stop.ip4;
347 u32 *plmask = (u32 *) &mask->laddr;
348 u32 *praddr_start = (u32 *) &policy->raddr.start.ip4;
349 u32 *praddr_stop = (u32 *) &policy->raddr.stop.ip4;
350 u32 *prmask = (u32 *) &mask->raddr;
352 clib_memset_u8 (mask, 0xff, sizeof (ipsec_fp_5tuple_t));
353 clib_memset_u8 (&mask->l3_zero_pad, 0, sizeof (mask->l3_zero_pad));
355 /* find bits where start != stop */
356 *plmask = *pladdr_start ^ *pladdr_stop;
357 *prmask = *praddr_start ^ *praddr_stop;
358 /* Find most significant bit set (that is the first position
359 * start differs from stop). Mask out everything after that bit and
360 * the bit itself. Remember that policy stores start and stop in the net
363 *plmask = get_highest_set_bit_u32 (clib_net_to_host_u32 (*plmask));
364 *plmask = clib_host_to_net_u32 (~(*plmask - 1) & (~*plmask));
366 *prmask = get_highest_set_bit_u32 (clib_net_to_host_u32 (*prmask));
367 *prmask = clib_host_to_net_u32 (~(*prmask - 1) & (~*prmask));
369 if (PREDICT_TRUE ((policy->protocol == IP_PROTOCOL_TCP) ||
370 (policy->protocol == IP_PROTOCOL_UDP) ||
371 (policy->protocol == IP_PROTOCOL_SCTP)))
373 mask->lport = policy->lport.start ^ policy->lport.stop;
374 mask->rport = policy->rport.start ^ policy->rport.stop;
376 mask->lport = get_highest_set_bit_u16 (mask->lport);
377 mask->lport = ~(mask->lport - 1) & (~mask->lport);
379 mask->rport = get_highest_set_bit_u16 (mask->rport);
380 mask->rport = ~(mask->rport - 1) & (~mask->rport);
388 mask->protocol = (policy->protocol == IPSEC_POLICY_PROTOCOL_ANY) ? 0 : ~0;
391 static_always_inline void
392 ipsec_fp_ip6_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask)
394 u64 *pladdr_start = (u64 *) &policy->laddr.start;
395 u64 *pladdr_stop = (u64 *) &policy->laddr.stop;
396 u64 *plmask = (u64 *) &mask->ip6_laddr;
397 u64 *praddr_start = (u64 *) &policy->raddr.start;
398 u64 *praddr_stop = (u64 *) &policy->raddr.stop;
399 u64 *prmask = (u64 *) &mask->ip6_raddr;
401 clib_memset_u8 (mask, 0xff, sizeof (ipsec_fp_5tuple_t));
403 *plmask = (*pladdr_start++ ^ *pladdr_stop++);
405 *prmask = (*praddr_start++ ^ *praddr_stop++);
407 /* Find most significant bit set (that is the first position
408 * start differs from stop). Mask out everything after that bit and
409 * the bit itself. Remember that policy stores start and stop in the net
412 *plmask = clib_host_to_net_u64 (
413 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*plmask)));
415 if (*plmask++ & clib_host_to_net_u64 (0x1))
417 *plmask = (*pladdr_start ^ *pladdr_stop);
418 *plmask = clib_host_to_net_u64 (
419 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*plmask)));
424 *prmask = clib_host_to_net_u64 (
425 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*prmask)));
427 if (*prmask++ & clib_host_to_net_u64 (0x1))
429 *prmask = (*pladdr_start ^ *pladdr_stop);
430 *prmask = clib_host_to_net_u64 (
431 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*prmask)));
436 ipsec_fp_get_policy_ports_mask (policy, mask);
439 static_always_inline void
440 ipsec_fp_get_policy_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *tuple)
442 memset (tuple, 0, sizeof (*tuple));
443 tuple->is_ipv6 = policy->is_ipv6;
446 tuple->ip6_laddr = policy->laddr.start.ip6;
447 tuple->ip6_raddr = policy->raddr.start.ip6;
451 tuple->laddr = policy->laddr.start.ip4;
452 tuple->raddr = policy->raddr.start.ip4;
455 tuple->protocol = policy->protocol;
457 tuple->lport = policy->lport.start;
458 tuple->rport = policy->rport.start;
462 ipsec_fp_ip4_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
463 ipsec_policy_t *policy, u32 *stat_index)
467 ipsec_fp_mask_type_entry_t *mte;
469 clib_bihash_kv_16_8_t kv;
470 clib_bihash_kv_16_8_t result;
471 ipsec_fp_lookup_value_t *result_val =
472 (ipsec_fp_lookup_value_t *) &result.value;
473 ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value;
475 ipsec_fp_5tuple_t mask, policy_5tuple;
478 ipsec_fp_ip4_get_policy_mask (policy, &mask);
479 pool_get (im->policies, vp);
480 policy_index = vp - im->policies;
481 vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
482 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
483 *stat_index = policy_index;
484 mask_index = find_mask_type_index (im, &mask);
486 if (mask_index == ~0)
488 /* mask type not found, we need to create a new entry */
489 pool_get (im->fp_mask_types, mte);
490 mask_index = mte - im->fp_mask_types;
494 mte = im->fp_mask_types + mask_index;
496 policy->fp_mask_type_id = mask_index;
497 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple);
499 fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
501 res = clib_bihash_search_inline_2_16_8 (&fp_spd->fp_ip4_lookup_hash, &kv,
505 /* key was not found crate a new entry */
506 vec_add1 (key_val->fp_policies_ids, policy_index);
507 res = clib_bihash_add_del_16_8 (&fp_spd->fp_ip4_lookup_hash, &kv, 1);
514 if (vec_max_len (result_val->fp_policies_ids) !=
515 vec_len (result_val->fp_policies_ids))
517 /* no need to resize */
518 vec_add1 (result_val->fp_policies_ids, policy_index);
522 vec_add1 (result_val->fp_policies_ids, policy_index);
525 clib_bihash_add_del_16_8 (&fp_spd->fp_ip4_lookup_hash, &result, 1);
532 if (mte->refcount == 0)
534 clib_memcpy (&mte->mask, &mask, sizeof (mask));
536 vec_add1 (fp_spd->fp_mask_types[policy->type], mask_index);
540 vec_add1 (fp_spd->fp_policies[policy->type], policy_index);
541 clib_memcpy (vp, policy, sizeof (*vp));
546 pool_put (im->policies, vp);
547 release_mask_type_index (im, mask_index);
552 ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
553 ipsec_policy_t *policy, u32 *stat_index)
558 ipsec_fp_mask_type_entry_t *mte;
560 clib_bihash_kv_40_8_t kv;
561 clib_bihash_kv_40_8_t result;
562 ipsec_fp_lookup_value_t *result_val =
563 (ipsec_fp_lookup_value_t *) &result.value;
564 ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value;
566 ipsec_fp_5tuple_t mask, policy_5tuple;
568 ipsec_fp_ip6_get_policy_mask (policy, &mask);
570 pool_get (im->policies, vp);
571 policy_index = vp - im->policies;
572 vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
573 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
574 *stat_index = policy_index;
575 mask_index = find_mask_type_index (im, &mask);
577 if (mask_index == ~0)
579 /* mask type not found, we need to create a new entry */
580 pool_get (im->fp_mask_types, mte);
581 mask_index = mte - im->fp_mask_types;
585 mte = im->fp_mask_types + mask_index;
587 policy->fp_mask_type_id = mask_index;
588 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple);
590 fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
592 res = clib_bihash_search_inline_2_40_8 (&fp_spd->fp_ip6_lookup_hash, &kv,
596 /* key was not found crate a new entry */
597 vec_add1 (key_val->fp_policies_ids, policy_index);
598 res = clib_bihash_add_del_40_8 (&fp_spd->fp_ip6_lookup_hash, &kv, 1);
605 if (vec_max_len (result_val->fp_policies_ids) !=
606 vec_len (result_val->fp_policies_ids))
608 /* no need to resize */
609 vec_add1 (result_val->fp_policies_ids, policy_index);
613 vec_add1 (result_val->fp_policies_ids, policy_index);
616 clib_bihash_add_del_40_8 (&fp_spd->fp_ip6_lookup_hash, &result, 1);
623 if (mte->refcount == 0)
625 clib_memcpy (&mte->mask, &mask, sizeof (mask));
627 vec_add1 (fp_spd->fp_mask_types[policy->type], mask_index);
631 vec_add1 (fp_spd->fp_policies[policy->type], policy_index);
632 clib_memcpy (vp, policy, sizeof (*vp));
637 pool_put (im->policies, vp);
638 release_mask_type_index (im, mask_index);
643 ipsec_fp_ip6_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
644 ipsec_policy_t *policy)
647 ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple;
648 clib_bihash_kv_40_8_t kv;
649 clib_bihash_kv_40_8_t result;
650 ipsec_fp_lookup_value_t *result_val =
651 (ipsec_fp_lookup_value_t *) &result.value;
656 ipsec_fp_ip6_get_policy_mask (policy, &mask);
657 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple);
658 fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
659 res = clib_bihash_search_inline_2_40_8 (&fp_spd->fp_ip6_lookup_hash, &kv,
665 vec_foreach_index (ii, result_val->fp_policies_ids)
668 pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii));
669 if (ipsec_policy_is_equal (vp, policy))
671 vec_foreach_index (iii, fp_spd->fp_policies[policy->type])
673 if (*(fp_spd->fp_policies[policy->type] + iii) ==
674 *(result_val->fp_policies_ids + ii))
676 if (vec_len (result_val->fp_policies_ids) == 1)
678 vec_free (result_val->fp_policies_ids);
679 clib_bihash_add_del_40_8 (&fp_spd->fp_ip6_lookup_hash,
684 vec_del1 (result_val->fp_policies_ids, ii);
686 vec_del1 (fp_spd->fp_policies[policy->type], iii);
688 vec_foreach_index (imt, fp_spd->fp_mask_types[policy->type])
690 if (*(fp_spd->fp_mask_types[policy->type] + imt) ==
693 ipsec_fp_mask_type_entry_t *mte = pool_elt_at_index (
694 im->fp_mask_types, vp->fp_mask_type_id);
696 if (mte->refcount == 1)
697 vec_del1 (fp_spd->fp_mask_types[policy->type],
712 release_mask_type_index (im, vp->fp_mask_type_id);
713 ipsec_sa_unlock (vp->sa_index);
714 pool_put (im->policies, vp);
723 ipsec_fp_ip4_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
724 ipsec_policy_t *policy)
727 ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple;
728 clib_bihash_kv_16_8_t kv;
729 clib_bihash_kv_16_8_t result;
730 ipsec_fp_lookup_value_t *result_val =
731 (ipsec_fp_lookup_value_t *) &result.value;
736 ipsec_fp_ip4_get_policy_mask (policy, &mask);
737 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple);
738 fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
739 res = clib_bihash_search_inline_2_16_8 (&fp_spd->fp_ip4_lookup_hash, &kv,
745 vec_foreach_index (ii, result_val->fp_policies_ids)
748 pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii));
749 if (ipsec_policy_is_equal (vp, policy))
751 vec_foreach_index (iii, fp_spd->fp_policies[policy->type])
753 if (*(fp_spd->fp_policies[policy->type] + iii) ==
754 *(result_val->fp_policies_ids + ii))
756 if (vec_len (result_val->fp_policies_ids) == 1)
758 vec_free (result_val->fp_policies_ids);
759 clib_bihash_add_del_16_8 (&fp_spd->fp_ip4_lookup_hash,
764 vec_del1 (result_val->fp_policies_ids, ii);
766 vec_del1 (fp_spd->fp_policies[policy->type], iii);
768 vec_foreach_index (imt, fp_spd->fp_mask_types[policy->type])
770 if (*(fp_spd->fp_mask_types[policy->type] + imt) ==
773 ipsec_fp_mask_type_entry_t *mte = pool_elt_at_index (
774 im->fp_mask_types, vp->fp_mask_type_id);
776 if (mte->refcount == 1)
777 vec_del1 (fp_spd->fp_mask_types[policy->type],
792 release_mask_type_index (im, vp->fp_mask_type_id);
793 ipsec_sa_unlock (vp->sa_index);
794 pool_put (im->policies, vp);
803 ipsec_fp_add_del_policy (void *fp_spd, ipsec_policy_t *policy, int is_add,
806 ipsec_main_t *im = &ipsec_main;
810 return ipsec_fp_ip6_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy,
813 return ipsec_fp_ip4_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy,
816 else if (policy->is_ipv6)
818 return ipsec_fp_ip6_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy);
820 return ipsec_fp_ip4_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy);
824 * fd.io coding-style-patch-verification: ON
827 * eval: (c-set-style "gnu")