BV (clib_bihash_add_del) (&am->acl_lookup_hash, kv, is_add);
}
+/*
+ * TupleMerge
+ *
+ * Initial adaptation by Valerio Bruschi (valerio.bruschi@telecom-paristech.fr)
+ * based on the TupleMerge [1] simulator kindly made available
+ * by James Daly (dalyjamese@gmail.com) and Eric Torng (torng@cse.msu.edu)
+ * ( http://www.cse.msu.edu/~dalyjame/ or http://www.cse.msu.edu/~torng/ ),
+ * refactoring by Andrew Yourtchenko.
+ *
+ * [1] James Daly, Eric Torng "TupleMerge: Building Online Packet Classifiers
+ * by Omitting Bits", In Proc. IEEE ICCCN 2017, pp. 1-10
+ *
+ */
+
+static int
+count_bits (u64 word)
+{
+ int counter = 0;
+ while (word)
+ {
+ counter += word & 1;
+ word >>= 1;
+ }
+ return counter;
+}
+
+/* check if mask2 can be contained by mask1 */
+static u8
+first_mask_contains_second_mask(int is_ip6, fa_5tuple_t * mask1, fa_5tuple_t * mask2)
+{
+ int i;
+ if (is_ip6)
+ {
+ for (i = 0; i < 2; i++)
+ {
+ if ((mask1->ip6_addr[0].as_u64[i] & mask2->ip6_addr[0].as_u64[i]) !=
+ mask1->ip6_addr[0].as_u64[i])
+ return 0;
+ if ((mask1->ip6_addr[1].as_u64[i] & mask2->ip6_addr[1].as_u64[i]) !=
+ mask1->ip6_addr[1].as_u64[i])
+ return 0;
+ }
+ }
+ else
+ {
+ /* check the pads, both masks must have it 0 */
+ u32 padcheck = 0;
+ int i;
+ for (i=0; i<6; i++) {
+ padcheck |= mask1->l3_zero_pad[i];
+ padcheck |= mask2->l3_zero_pad[i];
+ }
+ if (padcheck != 0)
+ return 0;
+ if ((mask1->ip4_addr[0].as_u32 & mask2->ip4_addr[0].as_u32) !=
+ mask1->ip4_addr[0].as_u32)
+ return 0;
+ if ((mask1->ip4_addr[1].as_u32 & mask2->ip4_addr[1].as_u32) !=
+ mask1->ip4_addr[1].as_u32)
+ return 0;
+ }
+
+ /* take care if port are not exact-match */
+ if ((mask1->l4.as_u64 & mask2->l4.as_u64) != mask1->l4.as_u64)
+ return 0;
+
+ if ((mask1->pkt.as_u64 & mask2->pkt.as_u64) != mask1->pkt.as_u64)
+ return 0;
+
+ return 1;
+}
+
+
+
+/*
+ * TupleMerge:
+ *
+ * Consider the situation when we have to create a new table
+ * T for a given rule R. This occurs for the first rule inserted and
+ * for later rules if it is incompatible with all existing tables.
+ * In this event, we need to determine mT for a new table.
+ * Setting mT = mR is not a good strategy; if another similar,
+ * but slightly less specific, rule appears we will be unable to
+ * add it to T and will thus have to create another new table. We
+ * thus consider two factors: is the rule more strongly aligned
+ * with source or destination addresses (usually the two most
+ * important fields) and how much slack needs to be given to
+ * allow for other rules. If the source and destination addresses
+ * are close together (within 4 bits for our experiments), we use
+ * both of them. Otherwise, we drop the smaller (less specific)
+ * address and its associated port field from consideration; R is
+ * predominantly aligned with one of the two fields and should
+ * be grouped with other similar rules. This is similar to TSS
+ * dropping port fields, but since it is based on observable rule
+ * characteristics it is more likely to keep important fields and
+ * discard less useful ones.
+ * We then look at the absolute lengths of the addresses. If
+ * the address is long, we are more likely to try to add shorter
+ * lengths and likewise the reverse. We thus remove a few bits
+ * from both address fields with more bits removed from longer
+ * addresses. For 32 bit addresses, we remove 4 bits, 3 for more
+ * than 24, 2 for more than 16, and so on (so 8 and fewer bits
+ * don’t have any removed). We only do this for prefix fields like
+ * addresses; both range fields (like ports) and exact match fields
+ * (like protocol) should remain as they are.
+ */
+
+
+static u32
+shift_ip4_if(u32 mask, u32 thresh, int numshifts, u32 else_val)
+{
+ if (mask > thresh)
+ return clib_host_to_net_u32((clib_net_to_host_u32(mask) << numshifts) & 0xFFFFFFFF);
+ else
+ return else_val;
+}
+
+static void
+relax_ip4_addr(ip4_address_t *ip4_mask, int relax2) {
+ int shifts_per_relax[2][4] = { { 6, 5, 4, 2 }, { 3, 2, 1, 1 } };
+
+ int *shifts = shifts_per_relax[relax2];
+ if(ip4_mask->as_u32 == 0xffffffff)
+ ip4_mask->as_u32 = clib_host_to_net_u32((clib_net_to_host_u32(ip4_mask->as_u32) << shifts[0])&0xFFFFFFFF);
+ else
+ ip4_mask->as_u32 = shift_ip4_if(ip4_mask->as_u32, 0xffffff00, shifts[1],
+ shift_ip4_if(ip4_mask->as_u32, 0xffff0000, shifts[2],
+ shift_ip4_if(ip4_mask->as_u32, 0xff000000, shifts[3], ip4_mask->as_u32)));
+}
+
+static void
+relax_ip6_addr(ip6_address_t *ip6_mask, int relax2) {
+ /*
+ * This "better than nothing" relax logic is based on heuristics
+ * from IPv6 knowledge, and may not be optimal.
+ * Some further tuning may be needed in the future.
+ */
+ if (ip6_mask->as_u64[0] == 0xffffffffffffffffULL) {
+ if (ip6_mask->as_u64[1] == 0xffffffffffffffffULL) {
+ /* relax a /128 down to /64 - likely to have more hosts */
+ ip6_mask->as_u64[1] = 0;
+ } else if (ip6_mask->as_u64[1] == 0) {
+ /* relax a /64 down to /56 - likely to have more subnets */
+ ip6_mask->as_u64[0] = clib_host_to_net_u64(0xffffffffffffff00ULL);
+ }
+ }
+}
+
+static void
+relax_tuple(fa_5tuple_t *mask, int is_ip6, int relax2){
+ fa_5tuple_t save_mask = *mask;
+
+ int counter_s = 0, counter_d = 0;
+ if (is_ip6) {
+ int i;
+ for(i=0; i<2; i++){
+ counter_s += count_bits(mask->ip6_addr[0].as_u64[i]);
+ counter_d += count_bits(mask->ip6_addr[1].as_u64[i]);
+ }
+ } else {
+ counter_s += count_bits(mask->ip4_addr[0].as_u32);
+ counter_d += count_bits(mask->ip4_addr[1].as_u32);
+ }
+
+/*
+ * is the rule more strongly aligned with source or destination addresses
+ * (usually the two most important fields) and how much slack needs to be
+ * given to allow for other rules. If the source and destination addresses
+ * are close together (within 4 bits for our experiments), we use both of them.
+ * Otherwise, we drop the smaller (less specific) address and its associated
+ * port field from consideration
+ */
+ const int deltaThreshold = 4;
+ /* const int deltaThreshold = 8; if IPV6? */
+ int delta = counter_s - counter_d;
+ if (-delta > deltaThreshold) {
+ if (is_ip6)
+ mask->ip6_addr[0].as_u64[1] = mask->ip6_addr[0].as_u64[0] = 0;
+ else
+ mask->ip4_addr[0].as_u32 = 0;
+ mask->l4.port[0] = 0;
+ } else if (delta > deltaThreshold) {
+ if (is_ip6)
+ mask->ip6_addr[1].as_u64[1] = mask->ip6_addr[1].as_u64[0] = 0;
+ else
+ mask->ip4_addr[1].as_u32 = 0;
+ mask->l4.port[1] = 0;
+ }
+
+ if (is_ip6) {
+ relax_ip6_addr(&mask->ip6_addr[0], relax2);
+ relax_ip6_addr(&mask->ip6_addr[1], relax2);
+ } else {
+ relax_ip4_addr(&mask->ip4_addr[0], relax2);
+ relax_ip4_addr(&mask->ip4_addr[1], relax2);
+ }
+ mask->pkt.is_nonfirst_fragment = 0;
+ mask->pkt.l4_valid = 0;
+ if(!first_mask_contains_second_mask(is_ip6, mask, &save_mask)){
+ DBG( "TM-relaxing-ERROR");
+ *mask = save_mask;
+ }
+ DBG( "TM-relaxing-end");
+}
+
+
+static u32
+tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_index)
+{
+ u32 mask_type_index = ~0;
+ u32 for_mask_type_index = ~0;
+ ace_mask_type_entry_t *mte;
+ int order_index;
+ /* look for existing mask comparable with the one in input */
+
+ hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
+ hash_applied_mask_info_t *minfo;
+
+ if (vec_len(*hash_applied_mask_info_vec) > 0) {
+ for(order_index = vec_len((*hash_applied_mask_info_vec)) -1; order_index >= 0; order_index--) {
+ minfo = vec_elt_at_index((*hash_applied_mask_info_vec), order_index);
+ for_mask_type_index = minfo->mask_type_index;
+ mte = vec_elt_at_index(am->ace_mask_type_pool, for_mask_type_index);
+ if(first_mask_contains_second_mask(is_ip6, &mte->mask, mask)){
+ mask_type_index = (mte - am->ace_mask_type_pool);
+ break;
+ }
+ }
+ }
+
+ if(~0 == mask_type_index) {
+ /* if no mask is found, then let's use a relaxed version of the original one, in order to be used by new ace_entries */
+ DBG( "TM-assigning mask type index-new one");
+ pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
+ mask_type_index = mte - am->ace_mask_type_pool;
+
+ hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
+
+ int spot = vec_len((*hash_applied_mask_info_vec));
+ vec_validate((*hash_applied_mask_info_vec), spot);
+ minfo = vec_elt_at_index((*hash_applied_mask_info_vec), spot);
+ minfo->mask_type_index = mask_type_index;
+ minfo->num_entries = 0;
+ minfo->max_collisions = 0;
+ minfo->first_rule_index = ~0;
+
+ clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
+ relax_tuple(&mte->mask, is_ip6, 0);
+
+ mte->refcount = 0;
+ /*
+ * We can use only 16 bits, since in the match there is only u16 field.
+ * Realistically, once you go to 64K of mask types, it is a huge
+ * problem anyway, so we might as well stop half way.
+ */
+ ASSERT(mask_type_index < 32768);
+ }
+ mte = am->ace_mask_type_pool + mask_type_index;
+ mte->refcount++;
+ return mask_type_index;
+}
+
+
static void
fill_applied_hash_ace_kv(acl_main_t *am,
applied_hash_ace_entry_t **applied_hash_aces,
applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
- memcpy(kv_key, &(vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->match), sizeof(*kv_key));
- /* initialize the sw_if_index and direction */
+ /* apply the mask to ace key */
+ hash_ace_info_t *ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
+ ace_mask_type_entry_t *mte = vec_elt_at_index(am->ace_mask_type_pool, pae->mask_type_index);
+
+ u64 *pmatch = (u64 *) &ace_info->match;
+ u64 *pmask = (u64 *)&mte->mask;
+ u64 *pkey = (u64 *)kv->key;
+
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+ *pkey++ = *pmatch++ & *pmask++;
+
+ kv_key->pkt.mask_type_index_lsb = pae->mask_type_index;
kv_key->pkt.lc_index = lc_index;
kv_val->as_u64 = 0;
kv_val->applied_entry_index = new_index;
- kv_val->need_portrange_check = vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->src_portrange_not_powerof2 ||
- vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->dst_portrange_not_powerof2;
- /* by default assume all values are shadowed -> check all mask types */
- kv_val->shadowed = 1;
}
static void
}
+static u32
+find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
+{
+ ace_mask_type_entry_t *mte;
+ /* *INDENT-OFF* */
+ pool_foreach(mte, am->ace_mask_type_pool,
+ ({
+ if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
+ return (mte - am->ace_mask_type_pool);
+ }));
+ /* *INDENT-ON* */
+ return ~0;
+}
+
+static u32
+assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
+{
+ u32 mask_type_index = find_mask_type_index(am, mask);
+ ace_mask_type_entry_t *mte;
+ if(~0 == mask_type_index) {
+ pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
+ mask_type_index = mte - am->ace_mask_type_pool;
+ clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
+ mte->refcount = 0;
+ /*
+ * We can use only 16 bits, since in the match there is only u16 field.
+ * Realistically, once you go to 64K of mask types, it is a huge
+ * problem anyway, so we might as well stop half way.
+ */
+ ASSERT(mask_type_index < 32768);
+ }
+ mte = am->ace_mask_type_pool + mask_type_index;
+ mte->refcount++;
+ return mask_type_index;
+}
+
+static void
+release_mask_type_index(acl_main_t *am, u32 mask_type_index)
+{
+ ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
+ mte->refcount--;
+ if (mte->refcount == 0) {
+ /* we are not using this entry anymore */
+ pool_put(am->ace_mask_type_pool, mte);
+ }
+}
+
+static void
+remake_hash_applied_mask_info_vec (acl_main_t * am,
+ applied_hash_ace_entry_t **
+ applied_hash_aces, u32 lc_index)
+{
+ hash_applied_mask_info_t *new_hash_applied_mask_info_vec =
+ vec_new (hash_applied_mask_info_t, 0);
+
+ hash_applied_mask_info_t *minfo;
+ int i;
+ for (i = 0; i < vec_len ((*applied_hash_aces)); i++)
+ {
+ applied_hash_ace_entry_t *pae =
+ vec_elt_at_index ((*applied_hash_aces), i);
+
+ /* check if mask_type_index is already there */
+ u32 new_pointer = vec_len (new_hash_applied_mask_info_vec);
+ int search;
+ for (search = 0; search < vec_len (new_hash_applied_mask_info_vec);
+ search++)
+ {
+ minfo = vec_elt_at_index (new_hash_applied_mask_info_vec, search);
+ if (minfo->mask_type_index == pae->mask_type_index)
+ break;
+ }
+
+ vec_validate ((new_hash_applied_mask_info_vec), search);
+ minfo = vec_elt_at_index ((new_hash_applied_mask_info_vec), search);
+ if (search == new_pointer)
+ {
+ minfo->mask_type_index = pae->mask_type_index;
+ minfo->num_entries = 0;
+ minfo->max_collisions = 0;
+ minfo->first_rule_index = ~0;
+ }
+
+ minfo->num_entries = minfo->num_entries + 1;
+
+ if (vec_len (pae->colliding_rules) > minfo->max_collisions)
+ minfo->max_collisions = vec_len (pae->colliding_rules);
+
+ if (minfo->first_rule_index > i)
+ minfo->first_rule_index = i;
+ }
+
+ hash_applied_mask_info_t **hash_applied_mask_info_vec =
+ vec_elt_at_index (am->hash_applied_mask_info_vec_by_lc_index, lc_index);
+
+ vec_free ((*hash_applied_mask_info_vec));
+ (*hash_applied_mask_info_vec) = new_hash_applied_mask_info_vec;
+}
+
+static void
+vec_del_collision_rule (collision_match_rule_t ** pvec,
+ u32 applied_entry_index)
+{
+ u32 i;
+ for (i = 0; i < vec_len ((*pvec)); i++)
+ {
+ collision_match_rule_t *cr = vec_elt_at_index ((*pvec), i);
+ if (cr->applied_entry_index == applied_entry_index)
+ {
+ vec_del1 ((*pvec), i);
+ }
+ }
+}
+
+static void
+del_colliding_rule (applied_hash_ace_entry_t ** applied_hash_aces,
+ u32 head_index, u32 applied_entry_index)
+{
+ applied_hash_ace_entry_t *head_pae =
+ vec_elt_at_index ((*applied_hash_aces), head_index);
+ vec_del_collision_rule (&head_pae->colliding_rules, applied_entry_index);
+}
static void
+add_colliding_rule (acl_main_t * am,
+ applied_hash_ace_entry_t ** applied_hash_aces,
+ u32 head_index, u32 applied_entry_index)
+{
+ applied_hash_ace_entry_t *head_pae =
+ vec_elt_at_index ((*applied_hash_aces), head_index);
+ applied_hash_ace_entry_t *pae =
+ vec_elt_at_index ((*applied_hash_aces), applied_entry_index);
+
+ collision_match_rule_t cr;
+
+ cr.acl_index = pae->acl_index;
+ cr.ace_index = pae->ace_index;
+ cr.acl_position = pae->acl_position;
+ cr.applied_entry_index = applied_entry_index;
+ cr.rule = am->acls[pae->acl_index].rules[pae->ace_index];
+ vec_add1 (head_pae->colliding_rules, cr);
+}
+
+static u32
activate_applied_ace_hash_entry(acl_main_t *am,
u32 lc_index,
applied_hash_ace_entry_t **applied_hash_aces,
pae->prev_applied_entry_index = last_index;
/* adjust the pointer to the new tail */
first_pae->tail_applied_entry_index = new_index;
+ add_colliding_rule(am, applied_hash_aces, first_index, new_index);
+ return first_index;
} else {
/* It's the very first entry */
hashtable_add_del(am, &kv, 1);
ASSERT(new_index != ~0);
pae->tail_applied_entry_index = new_index;
+ add_colliding_rule(am, applied_hash_aces, new_index, new_index);
+ return new_index;
}
}
-static void
-applied_hash_entries_analyze(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces)
-{
- /*
- * Go over the rules and check which ones are shadowed and which aren't.
- * Naive approach: try to match the match value from every ACE as if it
- * was a live packet, and see if the resulting match happens earlier in the list.
- * if it does not match or it is later in the ACL - then the entry is not shadowed.
- *
- * This approach fails, an example:
- * deny tcp 2001:db8::/32 2001:db8::/32
- * permit ip 2001:db8::1/128 2001:db8::2/128
- */
-}
static void *
hash_acl_set_heap(acl_main_t *am)
}
}
+static void
+assign_mask_type_index_to_pae(acl_main_t *am, u32 lc_index, int is_ip6, applied_hash_ace_entry_t *pae)
+{
+ hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
+ hash_ace_info_t *ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
+
+ ace_mask_type_entry_t *mte;
+ fa_5tuple_t *mask;
+ /*
+ * Start taking base_mask associated to ace, and essentially copy it.
+ * With TupleMerge we will assign a relaxed mask here.
+ */
+ mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
+ mask = &mte->mask;
+ if (am->use_tuple_merge)
+ pae->mask_type_index = tm_assign_mask_type_index(am, mask, is_ip6, lc_index);
+ else
+ pae->mask_type_index = assign_mask_type_index(am, mask);
+}
+
+static void
+split_partition(acl_main_t *am, u32 first_index,
+ u32 lc_index, int is_ip6);
+
+
+static void
+check_collision_count_and_maybe_split(acl_main_t *am, u32 lc_index, int is_ip6, u32 first_index)
+{
+ applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
+ applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index);
+ if (vec_len(first_pae->colliding_rules) > am->tuple_merge_split_threshold) {
+ split_partition(am, first_index, lc_index, is_ip6);
+ }
+}
+
void
hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
{
}
vec_add1((*hash_acl_applied_lc_index), lc_index);
- pal->mask_type_index_bitmap = clib_bitmap_or(pal->mask_type_index_bitmap,
- ha->mask_type_index_bitmap);
/*
* if the applied ACL is empty, the current code will cause a
* different behavior compared to current linear search: an empty ACL will
/* expand the applied aces vector by the necessary amount */
vec_resize((*applied_hash_aces), vec_len(ha->rules));
+ vec_validate(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
/* add the rules from the ACL to the hash table for lookup and append to the vector*/
for(i=0; i < vec_len(ha->rules); i++) {
+ int is_ip6 = ha->rules[i].match.pkt.is_ip6;
u32 new_index = base_offset + i;
applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
pae->acl_index = acl_index;
pae->next_applied_entry_index = ~0;
pae->prev_applied_entry_index = ~0;
pae->tail_applied_entry_index = ~0;
- activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, new_index);
+ pae->colliding_rules = NULL;
+ pae->mask_type_index = ~0;
+ assign_mask_type_index_to_pae(am, lc_index, is_ip6, pae);
+ u32 first_index = activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, new_index);
+ if (am->use_tuple_merge)
+ check_collision_count_and_maybe_split(am, lc_index, is_ip6, first_index);
}
- applied_hash_entries_analyze(am, applied_hash_aces);
+ remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
done:
clib_mem_set_heap (oldheap);
}
applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
ASSERT(prev_pae->next_applied_entry_index == old_index);
prev_pae->next_applied_entry_index = pae->next_applied_entry_index;
+
+ u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
+ ASSERT(head_index != ~0);
+ applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
+ del_colliding_rule(applied_hash_aces, head_index, old_index);
+
if (pae->next_applied_entry_index == ~0) {
/* it was a last entry we removed, update the pointer on the first one */
- u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
- DBG("UNAPPLY = index %d head index to update %d", old_index, head_index);
- ASSERT(head_index != ~0);
- applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
-
ASSERT(head_pae->tail_applied_entry_index == old_index);
head_pae->tail_applied_entry_index = pae->prev_applied_entry_index;
} else {
applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
ASSERT(pae->tail_applied_entry_index != ~0);
next_pae->tail_applied_entry_index = pae->tail_applied_entry_index;
- DBG("Resetting the hash table entry from %d to %d, setting tail index to %d", old_index, pae->next_applied_entry_index, pae->tail_applied_entry_index);
+ /* Remove ourselves and transfer the ownership of the colliding rules vector */
+ del_colliding_rule(applied_hash_aces, old_index, old_index);
+ next_pae->colliding_rules = pae->colliding_rules;
/* unlink from the next element */
next_pae->prev_applied_entry_index = ~0;
add_del_hashtable_entry(am, lc_index,
applied_hash_aces, old_index, 0);
}
}
+
+ release_mask_type_index(am, pae->mask_type_index);
/* invalidate the old entry */
+ pae->mask_type_index = ~0;
pae->prev_applied_entry_index = ~0;
pae->next_applied_entry_index = ~0;
pae->tail_applied_entry_index = ~0;
+ /* always has to be 0 */
+ pae->colliding_rules = NULL;
}
-static void
-hash_acl_build_applied_lookup_bitmap(acl_main_t *am, u32 lc_index)
-{
- int i;
- uword *new_lookup_bitmap = 0;
-
- applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
- vec_validate((*applied_hash_acls), lc_index);
- applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
-
- for(i=0; i < vec_len(pal->applied_acls); i++) {
- u32 a_acl_index = *vec_elt_at_index((pal->applied_acls), i);
- hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, a_acl_index);
- DBG("Update bitmask = %U or %U (acl_index %d)\n", format_bitmap_hex, new_lookup_bitmap,
- format_bitmap_hex, ha->mask_type_index_bitmap, a_acl_index);
- new_lookup_bitmap = clib_bitmap_or(new_lookup_bitmap,
- ha->mask_type_index_bitmap);
- }
- uword *old_lookup_bitmap = pal->mask_type_index_bitmap;
- pal->mask_type_index_bitmap = new_lookup_bitmap;
- clib_bitmap_free(old_lookup_bitmap);
-}
-
void
hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
{
/* trim the end of the vector */
_vec_len((*applied_hash_aces)) -= vec_len(ha->rules);
- applied_hash_entries_analyze(am, applied_hash_aces);
+ remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
- /* After deletion we might not need some of the mask-types anymore... */
- hash_acl_build_applied_lookup_bitmap(am, lc_index);
clib_mem_set_heap (oldheap);
}
DBG0("Start index for acl %d in lc_index %d is %d", acl_index, lc_index, start_index);
/*
- * This function is called after we find out the sw_if_index where ACL is applied.
- * If the by-sw_if_index vector does not have the ACL#, then it's a bug.
+ * This function is called after we find out the lc_index where ACL is applied.
+ * If the by-lc_index vector does not have the ACL#, then it's a bug.
*/
ASSERT(start_index < vec_len(*applied_acls));
ip4_address_mask_from_width(addr, prefix_len);
}
-static u8
+static void
make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
{
if (port_first == port_last) {
*portmask = 0xffff;
/* single port is representable by masked value */
- return 0;
- }
- if ((port_first == 0) && (port_last == 65535)) {
- *portmask = 0;
- /* wildcard port is representable by a masked value */
- return 0;
+ return;
}
- /*
- * For now match all the ports, later
- * here might be a better optimization which would
- * pick out bitmaskable portranges.
- *
- * However, adding a new mask type potentially
- * adds a per-packet extra lookup, so the benefit is not clear.
- */
*portmask = 0;
- /* This port range can't be represented via bitmask exactly. */
- return 1;
+ return;
}
static void
hi->match.l4.proto = r->proto;
/* Calculate the src/dst port masks and make the src/dst port matches accordingly */
- hi->src_portrange_not_powerof2 = make_port_mask(&mask->l4.port[0], r->src_port_or_type_first, r->src_port_or_type_last);
+ make_port_mask(&mask->l4.port[0], r->src_port_or_type_first, r->src_port_or_type_last);
hi->match.l4.port[0] = r->src_port_or_type_first & mask->l4.port[0];
- hi->dst_portrange_not_powerof2 = make_port_mask(&mask->l4.port[1], r->dst_port_or_code_first, r->dst_port_or_code_last);
+
+ make_port_mask(&mask->l4.port[1], r->dst_port_or_code_first, r->dst_port_or_code_last);
hi->match.l4.port[1] = r->dst_port_or_code_first & mask->l4.port[1];
/* L4 info must be valid in order to match */
mask->pkt.l4_valid = 1;
}
}
-static u32
-find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
-{
- ace_mask_type_entry_t *mte;
- /* *INDENT-OFF* */
- pool_foreach(mte, am->ace_mask_type_pool,
- ({
- if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
- return (mte - am->ace_mask_type_pool);
- }));
- /* *INDENT-ON* */
- return ~0;
-}
-
-static u32
-assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
-{
- u32 mask_type_index = find_mask_type_index(am, mask);
- ace_mask_type_entry_t *mte;
- if(~0 == mask_type_index) {
- pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
- mask_type_index = mte - am->ace_mask_type_pool;
- clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
- mte->refcount = 0;
- /*
- * We can use only 16 bits, since in the match there is only u16 field.
- * Realistically, once you go to 64K of mask types, it is a huge
- * problem anyway, so we might as well stop half way.
- */
- ASSERT(mask_type_index < 32768);
- }
- mte = am->ace_mask_type_pool + mask_type_index;
- mte->refcount++;
- return mask_type_index;
-}
-
-static void
-release_mask_type_index(acl_main_t *am, u32 mask_type_index)
-{
- ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
- mte->refcount--;
- if (mte->refcount == 0) {
- /* we are not using this entry anymore */
- pool_put(am->ace_mask_type_pool, mte);
- }
-}
int hash_acl_exists(acl_main_t *am, int acl_index)
{
ace_info.ace_index = i;
make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info);
- ace_info.mask_type_index = assign_mask_type_index(am, &mask);
+ mask.pkt.flags_reserved = 0b000;
+ ace_info.base_mask_type_index = assign_mask_type_index(am, &mask);
/* assign the mask type index for matching itself */
- ace_info.match.pkt.mask_type_index_lsb = ace_info.mask_type_index;
- DBG("ACE: %d mask_type_index: %d", i, ace_info.mask_type_index);
- /* Ensure a given index is set in the mask type index bitmap for this ACL */
- ha->mask_type_index_bitmap = clib_bitmap_set(ha->mask_type_index_bitmap, ace_info.mask_type_index, 1);
+ ace_info.match.pkt.mask_type_index_lsb = ace_info.base_mask_type_index;
+ DBG("ACE: %d mask_type_index: %d", i, ace_info.base_mask_type_index);
vec_add1(ha->rules, ace_info);
}
/*
* the reference count, possibly freeing up some of them */
int i;
for(i=0; i < vec_len(ha->rules); i++) {
- release_mask_type_index(am, ha->rules[i].mask_type_index);
+ release_mask_type_index(am, ha->rules[i].base_mask_type_index);
}
- clib_bitmap_free(ha->mask_type_index_bitmap);
ha->hash_acl_exists = 0;
vec_free(ha->rules);
clib_mem_set_heap (oldheap);
vlib_cli_output (vm, "acl-index %u bitmask-ready layout\n", i);
vlib_cli_output (vm, " applied lc_index list: %U\n",
format_vec32, ha->lc_index_list, "%d");
- vlib_cli_output (vm, " mask type index bitmap: %U\n",
- format_bitmap_hex, ha->mask_type_index_bitmap);
for (j = 0; j < vec_len (ha->rules); j++)
{
hash_ace_info_t *pa = &ha->rules[j];
m = (u64 *) & pa->match;
vlib_cli_output (vm,
- " %4d: %016llx %016llx %016llx %016llx %016llx %016llx mask index %d acl %d rule %d action %d src/dst portrange not ^2: %d,%d\n",
+ " %4d: %016llx %016llx %016llx %016llx %016llx %016llx base mask index %d acl %d rule %d action %d\n",
j, m[0], m[1], m[2], m[3], m[4], m[5],
- pa->mask_type_index, pa->acl_index, pa->ace_index,
- pa->action, pa->src_portrange_not_powerof2,
- pa->dst_portrange_not_powerof2);
+ pa->base_mask_type_index, pa->acl_index, pa->ace_index,
+ pa->action);
}
}
}
-void
+static void
+acl_plugin_print_colliding_rule (vlib_main_t * vm, int j, collision_match_rule_t *cr) {
+ vlib_cli_output(vm,
+ " %4d: acl %d ace %d acl pos %d pae index: %d",
+ j, cr->acl_index, cr->ace_index, cr->acl_position, cr->applied_entry_index);
+}
+
+static void
acl_plugin_print_pae (vlib_main_t * vm, int j, applied_hash_ace_entry_t * pae)
{
vlib_cli_output (vm,
- " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d hitcount %lld",
+ " %4d: acl %d rule %d action %d bitmask-ready rule %d colliding_rules: %d next %d prev %d tail %d hitcount %lld acl_pos: %d",
j, pae->acl_index, pae->ace_index, pae->action,
- pae->hash_ace_info_index, pae->next_applied_entry_index,
+ pae->hash_ace_info_index, vec_len(pae->colliding_rules), pae->next_applied_entry_index,
pae->prev_applied_entry_index,
- pae->tail_applied_entry_index, pae->hitcount);
+ pae->tail_applied_entry_index, pae->hitcount, pae->acl_position);
+ int jj;
+ for(jj=0; jj<vec_len(pae->colliding_rules); jj++)
+ acl_plugin_print_colliding_rule(vm, jj, vec_elt_at_index(pae->colliding_rules, jj));
+}
+
+static void
+acl_plugin_print_applied_mask_info (vlib_main_t * vm, int j, hash_applied_mask_info_t *mi)
+{
+ vlib_cli_output (vm,
+ " %4d: mask type index %d first rule index %d num_entries %d max_collisions %d",
+ j, mi->mask_type_index, mi->first_rule_index, mi->num_entries, mi->max_collisions);
}
void
{
applied_hash_acl_info_t *pal =
&am->applied_hash_acl_info_by_lc_index[lci];
- vlib_cli_output (vm, " lookup mask_type_index_bitmap: %U",
- format_bitmap_hex, pal->mask_type_index_bitmap);
vlib_cli_output (vm, " applied acls: %U", format_vec32,
pal->applied_acls, "%d");
}
+ if (lci < vec_len (am->hash_applied_mask_info_vec_by_lc_index))
+ {
+ vlib_cli_output (vm, " applied mask info entries:");
+ for (j = 0;
+ j < vec_len (am->hash_applied_mask_info_vec_by_lc_index[lci]);
+ j++)
+ {
+ acl_plugin_print_applied_mask_info (vm, j,
+ &am->hash_applied_mask_info_vec_by_lc_index
+ [lci][j]);
+ }
+ }
if (lci < vec_len (am->hash_entry_vec_by_lc_index))
{
vlib_cli_output (vm, " lookup applied entries:");
show_hash_acl_hash (vm, am, show_bihash_verbose);
}
+/*
+ * Split of the partition needs to happen when the collision count
+ * goes over a specified threshold.
+ *
+ * This is a signal that we ignored too many bits in
+ * mT and we need to split the table into two tables. We select
+ * all of the colliding rules L and find their maximum common
+ * tuple mL. Normally mL is specific enough to hash L with few
+ * or no collisions. We then create a new table T2 with tuple mL
+ * and transfer all compatible rules from T to T2. If mL is not
+ * specific enough, we find the field with the biggest difference
+ * between the minimum and maximum tuple lengths for all of
+ * the rules in L and set that field to be the average of those two
+ * values. We then transfer all compatible rules as before. This
+ * guarantees that some rules from L will move and that T2 will
+ * have a smaller number of collisions than T did.
+ */
+
+
+static void
+ensure_ip6_min_addr (ip6_address_t * min_addr, ip6_address_t * mask_addr)
+{
+ int update =
+ (clib_net_to_host_u64 (mask_addr->as_u64[0]) <
+ clib_net_to_host_u64 (min_addr->as_u64[0]))
+ ||
+ ((clib_net_to_host_u64 (mask_addr->as_u64[0]) ==
+ clib_net_to_host_u64 (min_addr->as_u64[0]))
+ && (clib_net_to_host_u64 (mask_addr->as_u64[1]) <
+ clib_net_to_host_u64 (min_addr->as_u64[1])));
+ if (update)
+ {
+ min_addr->as_u64[0] = mask_addr->as_u64[0];
+ min_addr->as_u64[1] = mask_addr->as_u64[1];
+ }
+}
+
+static void
+ensure_ip6_max_addr (ip6_address_t * max_addr, ip6_address_t * mask_addr)
+{
+ int update =
+ (clib_net_to_host_u64 (mask_addr->as_u64[0]) >
+ clib_net_to_host_u64 (max_addr->as_u64[0]))
+ ||
+ ((clib_net_to_host_u64 (mask_addr->as_u64[0]) ==
+ clib_net_to_host_u64 (max_addr->as_u64[0]))
+ && (clib_net_to_host_u64 (mask_addr->as_u64[1]) >
+ clib_net_to_host_u64 (max_addr->as_u64[1])));
+ if (update)
+ {
+ max_addr->as_u64[0] = mask_addr->as_u64[0];
+ max_addr->as_u64[1] = mask_addr->as_u64[1];
+ }
+}
+
+static void
+ensure_ip4_min_addr (ip4_address_t * min_addr, ip4_address_t * mask_addr)
+{
+ int update =
+ (clib_net_to_host_u32 (mask_addr->as_u32) <
+ clib_net_to_host_u32 (min_addr->as_u32));
+ if (update)
+ min_addr->as_u32 = mask_addr->as_u32;
+}
+
+static void
+ensure_ip4_max_addr (ip4_address_t * max_addr, ip4_address_t * mask_addr)
+{
+ int update =
+ (clib_net_to_host_u32 (mask_addr->as_u32) >
+ clib_net_to_host_u32 (max_addr->as_u32));
+ if (update)
+ max_addr->as_u32 = mask_addr->as_u32;
+}
+
+enum {
+ DIM_SRC_ADDR = 0,
+ DIM_DST_ADDR,
+ DIM_SRC_PORT,
+ DIM_DST_PORT,
+ DIM_PROTO,
+};
+
+
+
+static void
+split_partition(acl_main_t *am, u32 first_index,
+ u32 lc_index, int is_ip6){
+ DBG( "TM-split_partition - first_entry:%d", first_index);
+ applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
+ ace_mask_type_entry_t *mte;
+ fa_5tuple_t the_min_tuple, *min_tuple = &the_min_tuple;
+ fa_5tuple_t the_max_tuple, *max_tuple = &the_max_tuple;
+ applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), first_index);
+ hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
+ hash_ace_info_t *ace_info;
+ u32 coll_mask_type_index = pae->mask_type_index;
+ memset(&the_min_tuple, 0, sizeof(the_min_tuple));
+ memset(&the_max_tuple, 0, sizeof(the_max_tuple));
+
+ int i=0;
+ u64 collisions = vec_len(pae->colliding_rules);
+// while(pae->next_applied_entry_index == ~0){
+ for(i=0; i<collisions; i++){
+
+ DBG( "TM-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
+ pae->ace_index, pae->mask_type_index, coll_mask_type_index);
+
+ ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
+ mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
+ fa_5tuple_t *mask = &mte->mask;
+
+ if(pae->mask_type_index != coll_mask_type_index) continue;
+ /* Computing min_mask and max_mask for colliding rules */
+ if(i==0){
+ clib_memcpy(min_tuple, mask, sizeof(fa_5tuple_t));
+ clib_memcpy(max_tuple, mask, sizeof(fa_5tuple_t));
+ }else{
+ int j;
+ for(j=0; j<2; j++){
+ if (is_ip6)
+ ensure_ip6_min_addr(&min_tuple->ip6_addr[j], &mask->ip6_addr[j]);
+ else
+ ensure_ip4_min_addr(&min_tuple->ip4_addr[j], &mask->ip4_addr[j]);
+
+ if ((mask->l4.port[j] < min_tuple->l4.port[j]))
+ min_tuple->l4.port[j] = mask->l4.port[j];
+ }
+
+ if ((mask->l4.proto < min_tuple->l4.proto))
+ min_tuple->l4.proto = mask->l4.proto;
+
+ if(mask->pkt.as_u64 < min_tuple->pkt.as_u64)
+ min_tuple->pkt.as_u64 = mask->pkt.as_u64;
+
+
+ for(j=0; j<2; j++){
+ if (is_ip6)
+ ensure_ip6_max_addr(&max_tuple->ip6_addr[j], &mask->ip6_addr[j]);
+ else
+ ensure_ip4_max_addr(&max_tuple->ip4_addr[j], &mask->ip4_addr[j]);
+
+ if ((mask->l4.port[j] > max_tuple->l4.port[j]))
+ max_tuple->l4.port[j] = mask->l4.port[j];
+ }
+
+ if ((mask->l4.proto < max_tuple->l4.proto))
+ max_tuple->l4.proto = mask->l4.proto;
+
+ if(mask->pkt.as_u64 > max_tuple->pkt.as_u64)
+ max_tuple->pkt.as_u64 = mask->pkt.as_u64;
+ }
+
+ pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
+ }
+
+ /* Computing field with max difference between (min/max)_mask */
+ int best_dim=-1, best_delta=0, delta=0;
+
+ /* SRC_addr dimension */
+ if (is_ip6) {
+ int i;
+ for(i=0; i<2; i++){
+ delta += count_bits(max_tuple->ip6_addr[0].as_u64[i]) - count_bits(min_tuple->ip6_addr[0].as_u64[i]);
+ }
+ } else {
+ delta += count_bits(max_tuple->ip4_addr[0].as_u32) - count_bits(min_tuple->ip4_addr[0].as_u32);
+ }
+ if(delta > best_delta){
+ best_delta = delta;
+ best_dim = DIM_SRC_ADDR;
+ }
+
+ /* DST_addr dimension */
+ delta = 0;
+ if (is_ip6) {
+ int i;
+ for(i=0; i<2; i++){
+ delta += count_bits(max_tuple->ip6_addr[1].as_u64[i]) - count_bits(min_tuple->ip6_addr[1].as_u64[i]);
+ }
+ } else {
+ delta += count_bits(max_tuple->ip4_addr[1].as_u32) - count_bits(min_tuple->ip4_addr[1].as_u32);
+ }
+ if(delta > best_delta){
+ best_delta = delta;
+ best_dim = DIM_DST_ADDR;
+ }
+
+ /* SRC_port dimension */
+ delta = count_bits(max_tuple->l4.port[0]) - count_bits(min_tuple->l4.port[0]);
+ if(delta > best_delta){
+ best_delta = delta;
+ best_dim = DIM_SRC_PORT;
+ }
+
+ /* DST_port dimension */
+ delta = count_bits(max_tuple->l4.port[1]) - count_bits(min_tuple->l4.port[1]);
+ if(delta > best_delta){
+ best_delta = delta;
+ best_dim = DIM_DST_PORT;
+ }
+
+ /* Proto dimension */
+ delta = count_bits(max_tuple->l4.proto) - count_bits(min_tuple->l4.proto);
+ if(delta > best_delta){
+ best_delta = delta;
+ best_dim = DIM_PROTO;
+ }
+
+ int shifting = 0; //, ipv4_block = 0;
+ switch(best_dim){
+ case DIM_SRC_ADDR:
+ shifting = (best_delta)/2; // FIXME IPV4-only
+ // ipv4_block = count_bits(max_tuple->ip4_addr[0].as_u32);
+ min_tuple->ip4_addr[0].as_u32 =
+ clib_host_to_net_u32((clib_net_to_host_u32(max_tuple->ip4_addr[0].as_u32) << (shifting))&0xFFFFFFFF);
+
+ break;
+ case DIM_DST_ADDR:
+ shifting = (best_delta)/2;
+/*
+ ipv4_block = count_bits(max_tuple->addr[1].as_u64[1]);
+ if(ipv4_block > shifting)
+ min_tuple->addr[1].as_u64[1] =
+ clib_host_to_net_u64((clib_net_to_host_u64(max_tuple->addr[1].as_u64[1]) << (shifting))&0xFFFFFFFF);
+ else{
+ shifting = shifting - ipv4_block;
+ min_tuple->addr[1].as_u64[1] = 0;
+ min_tuple->addr[1].as_u64[0] =
+ clib_host_to_net_u64((clib_net_to_host_u64(max_tuple->addr[1].as_u64[0]) << (shifting))&0xFFFFFFFF);
+ }
+*/
+ min_tuple->ip4_addr[1].as_u32 =
+ clib_host_to_net_u32((clib_net_to_host_u32(max_tuple->ip4_addr[1].as_u32) << (shifting))&0xFFFFFFFF);
+
+ break;
+ case DIM_SRC_PORT: min_tuple->l4.port[0] = max_tuple->l4.port[0] << (best_delta)/2;
+ break;
+ case DIM_DST_PORT: min_tuple->l4.port[1] = max_tuple->l4.port[1] << (best_delta)/2;
+ break;
+ case DIM_PROTO: min_tuple->l4.proto = max_tuple->l4.proto << (best_delta)/2;
+ break;
+ default: relax_tuple(min_tuple, is_ip6, 1);
+ break;
+ }
+
+ min_tuple->pkt.is_nonfirst_fragment = 0;
+ u32 new_mask_type_index = assign_mask_type_index(am, min_tuple);
+
+ hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
+
+ hash_applied_mask_info_t *minfo;
+ //search in order pool if mask_type_index is already there
+ int search;
+ for (search=0; search < vec_len((*hash_applied_mask_info_vec)); search++){
+ minfo = vec_elt_at_index((*hash_applied_mask_info_vec), search);
+ if(minfo->mask_type_index == new_mask_type_index)
+ break;
+ }
+
+ vec_validate((*hash_applied_mask_info_vec), search);
+ minfo = vec_elt_at_index((*hash_applied_mask_info_vec), search);
+ minfo->mask_type_index = new_mask_type_index;
+ minfo->num_entries = 0;
+ minfo->max_collisions = 0;
+ minfo->first_rule_index = ~0;
+
+ DBG( "TM-split_partition - mask type index-assigned!! -> %d", new_mask_type_index);
+
+ if(coll_mask_type_index == new_mask_type_index){
+ //vlib_cli_output(vm, "TM-There are collisions over threshold, but i'm not able to split! %d %d", coll_mask_type_index, new_mask_type_index);
+ return;
+ }
+
+
+ /* populate new partition */
+ DBG( "TM-Populate new partition");
+ u32 r_ace_index = first_index;
+
+// for(i=0; i<collisions; i++){
+ for(r_ace_index=0; r_ace_index < vec_len((*applied_hash_aces)); r_ace_index++) {
+
+ applied_hash_ace_entry_t *pop_pae = vec_elt_at_index((*applied_hash_aces), r_ace_index);
+ DBG( "TM-Population-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
+ pop_pae->ace_index, pop_pae->mask_type_index, coll_mask_type_index);
+
+ if(pop_pae->mask_type_index != coll_mask_type_index) continue;
+ u32 next_index = pop_pae->next_applied_entry_index;
+
+ ace_info = vec_elt_at_index(ha->rules, pop_pae->hash_ace_info_index);
+ mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
+ //can insert rule?
+ //mte = vec_elt_at_index(am->ace_mask_type_pool, pop_pae->mask_type_index);
+ fa_5tuple_t *pop_mask = &mte->mask;
+
+ if(!first_mask_contains_second_mask(is_ip6, min_tuple, pop_mask)) continue;
+ DBG( "TM-new partition can insert -> applied_ace:%d", r_ace_index);
+
+ //delete and insert in new format
+ deactivate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);
+
+ /* insert the new entry */
+ pop_pae->mask_type_index = new_mask_type_index;
+
+ activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);
+
+ r_ace_index = next_index;
+ }
+
+ DBG( "TM-Populate new partition-END");
+ DBG( "TM-split_partition - END");
+
+}
+