2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/ipsec/ipsec.h>
20 * Policy packet & bytes counters
22 vlib_combined_counter_main_t ipsec_spd_policy_counters = {
24 .stat_segment_name = "/net/ipsec/policy",
28 ipsec_policy_mk_type (bool is_outbound,
30 ipsec_policy_action_t action,
31 ipsec_spd_policy_type_t * type)
36 IPSEC_SPD_POLICY_IP6_OUTBOUND : IPSEC_SPD_POLICY_IP4_OUTBOUND);
43 case IPSEC_POLICY_ACTION_PROTECT:
45 IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT :
46 IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT);
48 case IPSEC_POLICY_ACTION_BYPASS:
50 IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS :
51 IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
53 case IPSEC_POLICY_ACTION_DISCARD:
55 IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD :
56 IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
58 case IPSEC_POLICY_ACTION_RESOLVE:
63 /* Unsupported type */
67 static_always_inline int
68 ipsec_is_policy_inbound (ipsec_policy_t *policy)
70 if (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
71 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
72 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD ||
73 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT ||
74 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS ||
75 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD)
81 static_always_inline int
82 ipsec_is_fp_enabled (ipsec_main_t *im, ipsec_spd_t *spd,
83 ipsec_policy_t *policy)
85 if ((im->fp_spd_ipv4_out_is_enabled &&
86 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_out_lookup_hash_idx) &&
87 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND) ||
88 (im->fp_spd_ipv4_in_is_enabled &&
89 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip4_in_lookup_hash_idx) &&
90 (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
91 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
92 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD)) ||
93 (im->fp_spd_ipv6_in_is_enabled &&
94 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip6_in_lookup_hash_idx) &&
95 (policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT ||
96 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_BYPASS ||
97 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD)) ||
98 (im->fp_spd_ipv6_out_is_enabled &&
99 PREDICT_TRUE (INDEX_INVALID != spd->fp_spd.ip6_out_lookup_hash_idx) &&
100 policy->type == IPSEC_SPD_POLICY_IP6_OUTBOUND))
106 ipsec_add_del_policy (vlib_main_t * vm,
107 ipsec_policy_t * policy, int is_add, u32 * stat_index)
109 ipsec_main_t *im = &ipsec_main;
110 ipsec_spd_t *spd = 0;
115 p = hash_get (im->spd_index_by_spd_id, policy->id);
118 return VNET_API_ERROR_SYSCALL_ERROR_1;
121 spd = pool_elt_at_index (im->spds, spd_index);
123 return VNET_API_ERROR_SYSCALL_ERROR_1;
125 if (im->output_flow_cache_flag && !policy->is_ipv6 &&
126 policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND)
129 * Flow cache entry is valid only when epoch_count value in control
130 * plane and data plane match. Otherwise, flow cache entry is considered
131 * stale. To avoid the race condition of using old epoch_count value
132 * in data plane after the roll over of epoch_count in control plane,
133 * entire flow cache is reset.
135 if (im->epoch_count == 0xFFFFFFFF)
137 /* Reset all the entries in flow cache */
138 clib_memset_u8 (im->ipsec4_out_spd_hash_tbl, 0,
139 im->ipsec4_out_spd_hash_num_buckets *
140 (sizeof (*(im->ipsec4_out_spd_hash_tbl))));
142 /* Increment epoch counter by 1 */
143 clib_atomic_fetch_add_relax (&im->epoch_count, 1);
144 /* Reset spd flow cache counter since all old entries are stale */
145 clib_atomic_store_relax_n (&im->ipsec4_out_spd_flow_cache_entries, 0);
148 if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
149 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS ||
150 policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD) &&
151 im->input_flow_cache_flag && !policy->is_ipv6)
154 * Flow cache entry is valid only when input_epoch_count value in control
155 * plane and data plane match. Otherwise, flow cache entry is considered
156 * stale. To avoid the race condition of using old input_epoch_count
157 * value in data plane after the roll over of input_epoch_count in
158 * control plane, entire flow cache is reset.
160 if (im->input_epoch_count == 0xFFFFFFFF)
162 /* Reset all the entries in flow cache */
163 clib_memset_u8 (im->ipsec4_in_spd_hash_tbl, 0,
164 im->ipsec4_in_spd_hash_num_buckets *
165 (sizeof (*(im->ipsec4_in_spd_hash_tbl))));
167 /* Increment epoch counter by 1 */
168 clib_atomic_fetch_add_relax (&im->input_epoch_count, 1);
169 /* Reset spd flow cache counter since all old entries are stale */
170 im->ipsec4_in_spd_flow_cache_entries = 0;
178 if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
180 index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
182 if (INDEX_INVALID == sa_index)
183 return VNET_API_ERROR_SYSCALL_ERROR_1;
184 policy->sa_index = sa_index;
187 policy->sa_index = INDEX_INVALID;
190 * Try adding the policy into fast path SPD first. Only adding to
191 * traditional SPD when failed.
193 if (ipsec_is_fp_enabled (im, spd, policy))
194 return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 1,
197 pool_get (im->policies, vp);
198 clib_memcpy (vp, policy, sizeof (*vp));
199 policy_index = vp - im->policies;
201 vlib_validate_combined_counter (&ipsec_spd_policy_counters,
203 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
205 vec_foreach_index (i, spd->policies[policy->type])
208 pool_elt_at_index (im->policies, spd->policies[policy->type][i]);
210 if (p->priority <= vp->priority)
216 vec_insert_elts (spd->policies[policy->type], &policy_index, 1, i);
218 *stat_index = policy_index;
225 * Try to delete the policy from the fast path SPD first. Delete from
226 * traditional SPD when fp delete fails.
229 if (ipsec_is_fp_enabled (im, spd, policy))
232 if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
234 index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
236 if (INDEX_INVALID == sa_index)
237 return VNET_API_ERROR_SYSCALL_ERROR_1;
238 policy->sa_index = sa_index;
239 ipsec_sa_unlock_id (policy->sa_id);
242 policy->sa_index = INDEX_INVALID;
244 return ipsec_fp_add_del_policy ((void *) &spd->fp_spd, policy, 0,
248 vec_foreach_index (ii, (spd->policies[policy->type]))
250 vp = pool_elt_at_index (im->policies,
251 spd->policies[policy->type][ii]);
252 if (ipsec_policy_is_equal (vp, policy))
254 vec_delete (spd->policies[policy->type], 1, ii);
255 ipsec_sa_unlock (vp->sa_index);
256 pool_put (im->policies, vp);
265 static_always_inline void
266 ipsec_fp_release_mask_type (ipsec_main_t *im, u32 mask_type_index)
268 ipsec_fp_mask_type_entry_t *mte =
269 pool_elt_at_index (im->fp_mask_types, mask_type_index);
271 if (mte->refcount == 0)
273 /* this entry is not in use anymore */
274 ASSERT (clib_memset (mte, 0xae, sizeof (*mte)) == EOK);
275 pool_put (im->fp_mask_types, mte);
279 static_always_inline u32
280 find_mask_type_index (ipsec_main_t *im, ipsec_fp_5tuple_t *mask)
282 ipsec_fp_mask_type_entry_t *mte;
284 pool_foreach (mte, im->fp_mask_types)
286 if (memcmp (&mte->mask, mask, sizeof (*mask)) == 0)
287 return (mte - im->fp_mask_types);
293 static_always_inline void
294 fill_ip6_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
295 clib_bihash_kv_40_8_t *kv)
297 ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value;
298 u64 *pmatch = (u64 *) match->kv_40_8.key;
299 u64 *pmask = (u64 *) mask->kv_40_8.key;
300 u64 *pkey = (u64 *) kv->key;
302 *pkey++ = *pmatch++ & *pmask++;
303 *pkey++ = *pmatch++ & *pmask++;
304 *pkey++ = *pmatch++ & *pmask++;
305 *pkey++ = *pmatch++ & *pmask++;
306 *pkey = *pmatch & *pmask;
311 static_always_inline void
312 fill_ip4_hash_policy_kv (ipsec_fp_5tuple_t *match, ipsec_fp_5tuple_t *mask,
313 clib_bihash_kv_16_8_t *kv)
315 ipsec_fp_lookup_value_t *kv_val = (ipsec_fp_lookup_value_t *) &kv->value;
316 u64 *pmatch = (u64 *) match->kv_16_8.key;
317 u64 *pmask = (u64 *) mask->kv_16_8.key;
318 u64 *pkey = (u64 *) kv->key;
320 *pkey++ = *pmatch++ & *pmask++;
321 *pkey = *pmatch & *pmask;
326 static_always_inline u16
327 mask_out_highest_set_bit_u16 (u16 x)
336 static_always_inline u32
337 mask_out_highest_set_bit_u32 (u32 x)
347 static_always_inline u64
348 mask_out_highest_set_bit_u64 (u64 x)
359 static_always_inline void
360 ipsec_fp_get_policy_ports_mask (ipsec_policy_t *policy,
361 ipsec_fp_5tuple_t *mask)
363 if (PREDICT_TRUE ((policy->protocol == IP_PROTOCOL_TCP) ||
364 (policy->protocol == IP_PROTOCOL_UDP) ||
365 (policy->protocol == IP_PROTOCOL_SCTP)))
367 mask->lport = policy->lport.start ^ policy->lport.stop;
368 mask->rport = policy->rport.start ^ policy->rport.stop;
370 mask->lport = mask_out_highest_set_bit_u16 (mask->lport);
372 mask->rport = mask_out_highest_set_bit_u16 (mask->rport);
380 mask->protocol = (policy->protocol == IPSEC_POLICY_PROTOCOL_ANY) ? 0 : ~0;
383 static_always_inline void
384 ipsec_fp_ip4_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask,
387 u32 *pladdr_start = (u32 *) &policy->laddr.start.ip4;
388 u32 *pladdr_stop = (u32 *) &policy->laddr.stop.ip4;
389 u32 *plmask = (u32 *) &mask->laddr;
390 u32 *praddr_start = (u32 *) &policy->raddr.start.ip4;
391 u32 *praddr_stop = (u32 *) &policy->raddr.stop.ip4;
392 u32 *prmask = (u32 *) &mask->raddr;
394 clib_memset_u8 (mask, 0xff, sizeof (ipsec_fp_5tuple_t));
395 clib_memset_u8 (&mask->l3_zero_pad, 0, sizeof (mask->l3_zero_pad));
397 if (inbound && (policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT &&
398 policy->sa_index != INDEX_INVALID))
400 ipsec_sa_t *s = ipsec_sa_get (policy->sa_index);
402 if (ipsec_sa_is_set_IS_TUNNEL (s))
406 /* find bits where start != stop */
407 *plmask = *pladdr_start ^ *pladdr_stop;
408 *prmask = *praddr_start ^ *praddr_stop;
409 /* Find most significant bit set (that is the first position
410 * start differs from stop). Mask out everything after that bit and
411 * the bit itself. Remember that policy stores start and stop in the net
414 *plmask = clib_host_to_net_u32 (
415 mask_out_highest_set_bit_u32 (clib_net_to_host_u32 (*plmask)));
417 *prmask = clib_host_to_net_u32 (
418 mask_out_highest_set_bit_u32 (clib_net_to_host_u32 (*prmask)));
423 if (policy->type != IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT)
431 ipsec_fp_get_policy_ports_mask (policy, mask);
435 static_always_inline void
436 ipsec_fp_ip6_get_policy_mask (ipsec_policy_t *policy, ipsec_fp_5tuple_t *mask,
439 u64 *pladdr_start = (u64 *) &policy->laddr.start;
440 u64 *pladdr_stop = (u64 *) &policy->laddr.stop;
441 u64 *plmask = (u64 *) &mask->ip6_laddr;
442 u64 *praddr_start = (u64 *) &policy->raddr.start;
443 u64 *praddr_stop = (u64 *) &policy->raddr.stop;
444 u64 *prmask = (u64 *) &mask->ip6_raddr;
446 clib_memset_u8 (mask, 0xff, sizeof (ipsec_fp_5tuple_t));
448 if (inbound && (policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT &&
449 policy->sa_index != INDEX_INVALID))
451 ipsec_sa_t *s = ipsec_sa_get (policy->sa_index);
453 if (ipsec_sa_is_set_IS_TUNNEL (s))
457 *plmask = (*pladdr_start++ ^ *pladdr_stop++);
459 *prmask = (*praddr_start++ ^ *praddr_stop++);
461 /* Find most significant bit set (that is the first position
462 * start differs from stop). Mask out everything after that bit and
463 * the bit itself. Remember that policy stores start and stop in the net
466 *plmask = clib_host_to_net_u64 (
467 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*plmask)));
469 if (*plmask++ & clib_host_to_net_u64 (0x1))
471 *plmask = (*pladdr_start ^ *pladdr_stop);
472 *plmask = clib_host_to_net_u64 (
473 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*plmask)));
478 *prmask = clib_host_to_net_u64 (
479 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*prmask)));
481 if (*prmask++ & clib_host_to_net_u64 (0x1))
483 *prmask = (*pladdr_start ^ *pladdr_stop);
484 *prmask = clib_host_to_net_u64 (
485 mask_out_highest_set_bit_u64 (clib_net_to_host_u64 (*prmask)));
492 if (policy->type != IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT)
500 ipsec_fp_get_policy_ports_mask (policy, mask);
504 static_always_inline void
505 ipsec_fp_get_policy_5tuple (ipsec_policy_t *policy, ipsec_fp_5tuple_t *tuple,
508 memset (tuple, 0, sizeof (*tuple));
509 tuple->is_ipv6 = policy->is_ipv6;
512 tuple->ip6_laddr = policy->laddr.start.ip6;
513 tuple->ip6_raddr = policy->raddr.start.ip6;
517 tuple->laddr = policy->laddr.start.ip4;
518 tuple->raddr = policy->raddr.start.ip4;
524 if ((policy->type == IPSEC_SPD_POLICY_IP4_INBOUND_PROTECT ||
525 policy->type == IPSEC_SPD_POLICY_IP6_INBOUND_PROTECT) &&
526 policy->sa_index != INDEX_INVALID)
528 ipsec_sa_t *s = ipsec_sa_get (policy->sa_index);
531 if (ipsec_sa_is_set_IS_TUNNEL (s))
535 tuple->ip6_laddr = s->tunnel.t_dst.ip.ip6;
536 tuple->ip6_raddr = s->tunnel.t_src.ip.ip6;
540 tuple->laddr = s->tunnel.t_dst.ip.ip4;
541 tuple->raddr = s->tunnel.t_src.ip.ip4;
546 tuple->spi = INDEX_INVALID;
547 tuple->action = policy->type;
551 tuple->protocol = policy->protocol;
552 tuple->lport = policy->lport.start;
553 tuple->rport = policy->rport.start;
556 static_always_inline int
557 ipsec_fp_mask_type_idx_cmp (ipsec_fp_mask_id_t *mask_id, u32 *idx)
559 return mask_id->mask_type_idx == *idx;
563 ipsec_fp_ip4_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
564 ipsec_policy_t *policy, u32 *stat_index)
566 u32 mask_index, searched_idx;
568 ipsec_fp_mask_type_entry_t *mte;
570 clib_bihash_kv_16_8_t kv;
571 clib_bihash_kv_16_8_t result;
572 ipsec_fp_lookup_value_t *result_val =
573 (ipsec_fp_lookup_value_t *) &result.value;
574 ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value;
576 ipsec_fp_5tuple_t mask, policy_5tuple;
578 bool inbound = ipsec_is_policy_inbound (policy);
579 clib_bihash_16_8_t *bihash_table =
580 inbound ? pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
581 fp_spd->ip4_in_lookup_hash_idx) :
582 pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
583 fp_spd->ip4_out_lookup_hash_idx);
585 ipsec_fp_ip4_get_policy_mask (policy, &mask, inbound);
586 pool_get (im->policies, vp);
587 policy_index = vp - im->policies;
588 vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
589 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
590 *stat_index = policy_index;
591 mask_index = find_mask_type_index (im, &mask);
593 if (mask_index == ~0)
595 /* mask type not found, we need to create a new entry */
596 pool_get (im->fp_mask_types, mte);
597 mask_index = mte - im->fp_mask_types;
601 mte = im->fp_mask_types + mask_index;
603 policy->fp_mask_type_id = mask_index;
604 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
606 fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
608 res = clib_bihash_search_inline_2_16_8 (bihash_table, &kv, &result);
611 /* key was not found crate a new entry */
612 vec_add1 (key_val->fp_policies_ids, policy_index);
613 res = clib_bihash_add_del_16_8 (bihash_table, &kv, 1);
621 u32 *old_fp_policies_ids = result_val->fp_policies_ids;
623 vec_foreach_index (i, result_val->fp_policies_ids)
626 pool_elt_at_index (im->policies, result_val->fp_policies_ids[i]);
628 if (p->priority <= policy->priority)
634 vec_insert_elts (result_val->fp_policies_ids, &policy_index, 1, i);
636 if (result_val->fp_policies_ids != old_fp_policies_ids)
638 res = clib_bihash_add_del_16_8 (bihash_table, &result, 1);
645 if (mte->refcount == 0)
647 clib_memcpy (&mte->mask, &mask, sizeof (mask));
652 vec_search_with_function (fp_spd->fp_mask_ids[policy->type], &mask_index,
653 ipsec_fp_mask_type_idx_cmp);
654 if (~0 == searched_idx)
656 ipsec_fp_mask_id_t mask_id = { mask_index, 1 };
657 vec_add1 (fp_spd->fp_mask_ids[policy->type], mask_id);
660 (fp_spd->fp_mask_ids[policy->type] + searched_idx)->refcount++;
663 clib_memcpy (vp, policy, sizeof (*vp));
668 pool_put (im->policies, vp);
669 ipsec_fp_release_mask_type (im, mask_index);
674 ipsec_fp_ip6_add_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
675 ipsec_policy_t *policy, u32 *stat_index)
678 u32 mask_index, searched_idx;
680 ipsec_fp_mask_type_entry_t *mte;
682 clib_bihash_kv_40_8_t kv;
683 clib_bihash_kv_40_8_t result;
684 ipsec_fp_lookup_value_t *result_val =
685 (ipsec_fp_lookup_value_t *) &result.value;
686 ipsec_fp_lookup_value_t *key_val = (ipsec_fp_lookup_value_t *) &kv.value;
688 ipsec_fp_5tuple_t mask, policy_5tuple;
690 bool inbound = ipsec_is_policy_inbound (policy);
692 ipsec_fp_ip6_get_policy_mask (policy, &mask, inbound);
693 pool_get (im->policies, vp);
694 policy_index = vp - im->policies;
695 vlib_validate_combined_counter (&ipsec_spd_policy_counters, policy_index);
696 vlib_zero_combined_counter (&ipsec_spd_policy_counters, policy_index);
697 *stat_index = policy_index;
698 mask_index = find_mask_type_index (im, &mask);
699 clib_bihash_40_8_t *bihash_table =
700 inbound ? pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
701 fp_spd->ip6_in_lookup_hash_idx) :
702 pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
703 fp_spd->ip6_out_lookup_hash_idx);
705 if (mask_index == ~0)
707 /* mask type not found, we need to create a new entry */
708 pool_get (im->fp_mask_types, mte);
709 mask_index = mte - im->fp_mask_types;
713 mte = im->fp_mask_types + mask_index;
715 policy->fp_mask_type_id = mask_index;
716 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
718 fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
720 res = clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);
723 /* key was not found crate a new entry */
724 vec_add1 (key_val->fp_policies_ids, policy_index);
725 res = clib_bihash_add_del_40_8 (bihash_table, &kv, 1);
732 u32 *old_fp_policies_ids = result_val->fp_policies_ids;
734 vec_foreach_index (i, result_val->fp_policies_ids)
737 pool_elt_at_index (im->policies, result_val->fp_policies_ids[i]);
739 if (p->priority <= policy->priority)
745 vec_insert_elts (result_val->fp_policies_ids, &policy_index, 1, i);
747 if (result_val->fp_policies_ids != old_fp_policies_ids)
749 res = clib_bihash_add_del_40_8 (bihash_table, &result, 1);
756 if (mte->refcount == 0)
758 clib_memcpy (&mte->mask, &mask, sizeof (mask));
763 vec_search_with_function (fp_spd->fp_mask_ids[policy->type], &mask_index,
764 ipsec_fp_mask_type_idx_cmp);
765 if (~0 == searched_idx)
767 ipsec_fp_mask_id_t mask_id = { mask_index, 1 };
768 vec_add1 (fp_spd->fp_mask_ids[policy->type], mask_id);
771 (fp_spd->fp_mask_ids[policy->type] + searched_idx)->refcount++;
774 clib_memcpy (vp, policy, sizeof (*vp));
779 pool_put (im->policies, vp);
780 ipsec_fp_release_mask_type (im, mask_index);
785 ipsec_fp_ip6_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
786 ipsec_policy_t *policy)
789 ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple;
790 clib_bihash_kv_40_8_t kv;
791 clib_bihash_kv_40_8_t result;
792 ipsec_fp_lookup_value_t *result_val =
793 (ipsec_fp_lookup_value_t *) &result.value;
794 bool inbound = ipsec_is_policy_inbound (policy);
795 clib_bihash_40_8_t *bihash_table =
796 inbound ? pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
797 fp_spd->ip6_in_lookup_hash_idx) :
798 pool_elt_at_index (im->fp_ip6_lookup_hashes_pool,
799 fp_spd->ip6_out_lookup_hash_idx);
804 ipsec_fp_ip6_get_policy_mask (policy, &mask, inbound);
805 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
806 fill_ip6_hash_policy_kv (&policy_5tuple, &mask, &kv);
807 res = clib_bihash_search_inline_2_40_8 (bihash_table, &kv, &result);
811 vec_foreach_index (ii, result_val->fp_policies_ids)
814 pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii));
815 if (ipsec_policy_is_equal (vp, policy))
817 if (vec_len (result_val->fp_policies_ids) == 1)
819 vec_free (result_val->fp_policies_ids);
820 clib_bihash_add_del_40_8 (bihash_table, &result, 0);
823 vec_delete (result_val->fp_policies_ids, 1, ii);
825 vec_foreach_index (imt, fp_spd->fp_mask_ids[policy->type])
827 if ((fp_spd->fp_mask_ids[policy->type] + imt)->mask_type_idx ==
831 if ((fp_spd->fp_mask_ids[policy->type] + imt)->refcount-- ==
833 vec_del1 (fp_spd->fp_mask_ids[policy->type], imt);
839 ipsec_fp_release_mask_type (im, vp->fp_mask_type_id);
840 ipsec_sa_unlock (vp->sa_index);
841 pool_put (im->policies, vp);
849 ipsec_fp_ip4_del_policy (ipsec_main_t *im, ipsec_spd_fp_t *fp_spd,
850 ipsec_policy_t *policy)
853 ipsec_fp_5tuple_t mask = { 0 }, policy_5tuple;
854 clib_bihash_kv_16_8_t kv;
855 clib_bihash_kv_16_8_t result;
856 ipsec_fp_lookup_value_t *result_val =
857 (ipsec_fp_lookup_value_t *) &result.value;
858 bool inbound = ipsec_is_policy_inbound (policy);
861 clib_bihash_16_8_t *bihash_table =
862 inbound ? pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
863 fp_spd->ip4_in_lookup_hash_idx) :
864 pool_elt_at_index (im->fp_ip4_lookup_hashes_pool,
865 fp_spd->ip4_out_lookup_hash_idx);
867 ipsec_fp_ip4_get_policy_mask (policy, &mask, inbound);
868 ipsec_fp_get_policy_5tuple (policy, &policy_5tuple, inbound);
869 fill_ip4_hash_policy_kv (&policy_5tuple, &mask, &kv);
870 res = clib_bihash_search_inline_2_16_8 (bihash_table, &kv, &result);
875 vec_foreach_index (ii, result_val->fp_policies_ids)
878 pool_elt_at_index (im->policies, *(result_val->fp_policies_ids + ii));
879 if (ipsec_policy_is_equal (vp, policy))
881 if (vec_len (result_val->fp_policies_ids) == 1)
883 vec_free (result_val->fp_policies_ids);
884 clib_bihash_add_del_16_8 (bihash_table, &result, 0);
887 vec_delete (result_val->fp_policies_ids, 1, ii);
889 vec_foreach_index (imt, fp_spd->fp_mask_ids[policy->type])
891 if ((fp_spd->fp_mask_ids[policy->type] + imt)->mask_type_idx ==
895 if ((fp_spd->fp_mask_ids[policy->type] + imt)->refcount-- ==
897 vec_del1 (fp_spd->fp_mask_ids[policy->type], imt);
902 ipsec_fp_release_mask_type (im, vp->fp_mask_type_id);
903 ipsec_sa_unlock (vp->sa_index);
904 pool_put (im->policies, vp);
912 ipsec_fp_add_del_policy (void *fp_spd, ipsec_policy_t *policy, int is_add,
915 ipsec_main_t *im = &ipsec_main;
919 return ipsec_fp_ip6_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy,
922 return ipsec_fp_ip4_add_policy (im, (ipsec_spd_fp_t *) fp_spd, policy,
925 else if (policy->is_ipv6)
927 return ipsec_fp_ip6_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy);
929 return ipsec_fp_ip4_del_policy (im, (ipsec_spd_fp_t *) fp_spd, policy);
933 * fd.io coding-style-patch-verification: ON
936 * eval: (c-set-style "gnu")