2 *------------------------------------------------------------------
3 * Copyright (c) 2017 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
19 #include <netinet/in.h>
21 #include <vlibapi/api.h>
22 #include <vlibmemory/api.h>
24 #include <vlib/vlib.h>
25 #include <vnet/vnet.h>
26 #include <vnet/pg/pg.h>
27 #include <vppinfra/error.h>
28 #include <vnet/plugin/plugin.h>
30 #include <vppinfra/bihash_48_8.h>
32 #include "hash_lookup.h"
33 #include "hash_lookup_private.h"
36 always_inline applied_hash_ace_entry_t **get_applied_hash_aces(acl_main_t *am, u32 lc_index)
38 applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
40 /*is_input ? vec_elt_at_index(am->input_hash_entry_vec_by_sw_if_index, sw_if_index)
41 : vec_elt_at_index(am->output_hash_entry_vec_by_sw_if_index, sw_if_index);
43 return applied_hash_aces;
48 hashtable_add_del(acl_main_t *am, clib_bihash_kv_48_8_t *kv, int is_add)
50 DBG("HASH ADD/DEL: %016llx %016llx %016llx %016llx %016llx %016llx %016llx add %d",
51 kv->key[0], kv->key[1], kv->key[2],
52 kv->key[3], kv->key[4], kv->key[5], kv->value, is_add);
53 BV (clib_bihash_add_del) (&am->acl_lookup_hash, kv, is_add);
59 * Initial adaptation by Valerio Bruschi (valerio.bruschi@telecom-paristech.fr)
60 * based on the TupleMerge [1] simulator kindly made available
61 * by James Daly (dalyjamese@gmail.com) and Eric Torng (torng@cse.msu.edu)
62 * ( http://www.cse.msu.edu/~dalyjame/ or http://www.cse.msu.edu/~torng/ ),
63 * refactoring by Andrew Yourtchenko.
65 * [1] James Daly, Eric Torng "TupleMerge: Building Online Packet Classifiers
66 * by Omitting Bits", In Proc. IEEE ICCCN 2017, pp. 1-10
82 /* check if mask2 can be contained by mask1 */
84 first_mask_contains_second_mask(int is_ip6, fa_5tuple_t * mask1, fa_5tuple_t * mask2)
89 for (i = 0; i < 2; i++)
91 if ((mask1->ip6_addr[0].as_u64[i] & mask2->ip6_addr[0].as_u64[i]) !=
92 mask1->ip6_addr[0].as_u64[i])
94 if ((mask1->ip6_addr[1].as_u64[i] & mask2->ip6_addr[1].as_u64[i]) !=
95 mask1->ip6_addr[1].as_u64[i])
101 /* check the pads, both masks must have it 0 */
104 for (i=0; i<6; i++) {
105 padcheck |= mask1->l3_zero_pad[i];
106 padcheck |= mask2->l3_zero_pad[i];
110 if ((mask1->ip4_addr[0].as_u32 & mask2->ip4_addr[0].as_u32) !=
111 mask1->ip4_addr[0].as_u32)
113 if ((mask1->ip4_addr[1].as_u32 & mask2->ip4_addr[1].as_u32) !=
114 mask1->ip4_addr[1].as_u32)
118 /* take care if port are not exact-match */
119 if ((mask1->l4.as_u64 & mask2->l4.as_u64) != mask1->l4.as_u64)
122 if ((mask1->pkt.as_u64 & mask2->pkt.as_u64) != mask1->pkt.as_u64)
133 * Consider the situation when we have to create a new table
134 * T for a given rule R. This occurs for the first rule inserted and
135 * for later rules if it is incompatible with all existing tables.
136 * In this event, we need to determine mT for a new table.
137 * Setting mT = mR is not a good strategy; if another similar,
138 * but slightly less specific, rule appears we will be unable to
139 * add it to T and will thus have to create another new table. We
140 * thus consider two factors: is the rule more strongly aligned
141 * with source or destination addresses (usually the two most
142 * important fields) and how much slack needs to be given to
143 * allow for other rules. If the source and destination addresses
144 * are close together (within 4 bits for our experiments), we use
145 * both of them. Otherwise, we drop the smaller (less specific)
146 * address and its associated port field from consideration; R is
147 * predominantly aligned with one of the two fields and should
148 * be grouped with other similar rules. This is similar to TSS
149 * dropping port fields, but since it is based on observable rule
150 * characteristics it is more likely to keep important fields and
151 * discard less useful ones.
152 * We then look at the absolute lengths of the addresses. If
153 * the address is long, we are more likely to try to add shorter
154 * lengths and likewise the reverse. We thus remove a few bits
155 * from both address fields with more bits removed from longer
156 * addresses. For 32 bit addresses, we remove 4 bits, 3 for more
157 * than 24, 2 for more than 16, and so on (so 8 and fewer bits
158 * don’t have any removed). We only do this for prefix fields like
159 * addresses; both range fields (like ports) and exact match fields
160 * (like protocol) should remain as they are.
165 shift_ip4_if(u32 mask, u32 thresh, int numshifts, u32 else_val)
168 return clib_host_to_net_u32((clib_net_to_host_u32(mask) << numshifts) & 0xFFFFFFFF);
174 relax_ip4_addr(ip4_address_t *ip4_mask, int relax2) {
175 int shifts_per_relax[2][4] = { { 6, 5, 4, 2 }, { 3, 2, 1, 1 } };
177 int *shifts = shifts_per_relax[relax2];
178 if(ip4_mask->as_u32 == 0xffffffff)
179 ip4_mask->as_u32 = clib_host_to_net_u32((clib_net_to_host_u32(ip4_mask->as_u32) << shifts[0])&0xFFFFFFFF);
181 ip4_mask->as_u32 = shift_ip4_if(ip4_mask->as_u32, 0xffffff00, shifts[1],
182 shift_ip4_if(ip4_mask->as_u32, 0xffff0000, shifts[2],
183 shift_ip4_if(ip4_mask->as_u32, 0xff000000, shifts[3], ip4_mask->as_u32)));
187 relax_ip6_addr(ip6_address_t *ip6_mask, int relax2) {
189 * This "better than nothing" relax logic is based on heuristics
190 * from IPv6 knowledge, and may not be optimal.
191 * Some further tuning may be needed in the future.
193 if (ip6_mask->as_u64[0] == 0xffffffffffffffffULL) {
194 if (ip6_mask->as_u64[1] == 0xffffffffffffffffULL) {
195 /* relax a /128 down to /64 - likely to have more hosts */
196 ip6_mask->as_u64[1] = 0;
197 } else if (ip6_mask->as_u64[1] == 0) {
198 /* relax a /64 down to /56 - likely to have more subnets */
199 ip6_mask->as_u64[0] = clib_host_to_net_u64(0xffffffffffffff00ULL);
205 relax_tuple(fa_5tuple_t *mask, int is_ip6, int relax2){
206 fa_5tuple_t save_mask = *mask;
208 int counter_s = 0, counter_d = 0;
212 counter_s += count_bits(mask->ip6_addr[0].as_u64[i]);
213 counter_d += count_bits(mask->ip6_addr[1].as_u64[i]);
216 counter_s += count_bits(mask->ip4_addr[0].as_u32);
217 counter_d += count_bits(mask->ip4_addr[1].as_u32);
221 * is the rule more strongly aligned with source or destination addresses
222 * (usually the two most important fields) and how much slack needs to be
223 * given to allow for other rules. If the source and destination addresses
224 * are close together (within 4 bits for our experiments), we use both of them.
225 * Otherwise, we drop the smaller (less specific) address and its associated
226 * port field from consideration
228 const int deltaThreshold = 4;
229 /* const int deltaThreshold = 8; if IPV6? */
230 int delta = counter_s - counter_d;
231 if (-delta > deltaThreshold) {
233 mask->ip6_addr[0].as_u64[1] = mask->ip6_addr[0].as_u64[0] = 0;
235 mask->ip4_addr[0].as_u32 = 0;
236 mask->l4.port[0] = 0;
237 } else if (delta > deltaThreshold) {
239 mask->ip6_addr[1].as_u64[1] = mask->ip6_addr[1].as_u64[0] = 0;
241 mask->ip4_addr[1].as_u32 = 0;
242 mask->l4.port[1] = 0;
246 relax_ip6_addr(&mask->ip6_addr[0], relax2);
247 relax_ip6_addr(&mask->ip6_addr[1], relax2);
249 relax_ip4_addr(&mask->ip4_addr[0], relax2);
250 relax_ip4_addr(&mask->ip4_addr[1], relax2);
252 mask->pkt.is_nonfirst_fragment = 0;
253 mask->pkt.l4_valid = 0;
254 if(!first_mask_contains_second_mask(is_ip6, mask, &save_mask)){
255 DBG( "TM-relaxing-ERROR");
258 DBG( "TM-relaxing-end");
263 tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_index)
265 u32 mask_type_index = ~0;
266 u32 for_mask_type_index = ~0;
267 ace_mask_type_entry_t *mte;
269 /* look for existing mask comparable with the one in input */
271 hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
272 hash_applied_mask_info_t *minfo;
274 if (vec_len(*hash_applied_mask_info_vec) > 0) {
275 for(order_index = vec_len((*hash_applied_mask_info_vec)) -1; order_index >= 0; order_index--) {
276 minfo = vec_elt_at_index((*hash_applied_mask_info_vec), order_index);
277 for_mask_type_index = minfo->mask_type_index;
278 mte = vec_elt_at_index(am->ace_mask_type_pool, for_mask_type_index);
279 if(first_mask_contains_second_mask(is_ip6, &mte->mask, mask)){
280 mask_type_index = (mte - am->ace_mask_type_pool);
286 if(~0 == mask_type_index) {
287 /* if no mask is found, then let's use a relaxed version of the original one, in order to be used by new ace_entries */
288 DBG( "TM-assigning mask type index-new one");
289 pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
290 mask_type_index = mte - am->ace_mask_type_pool;
292 hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
294 int spot = vec_len((*hash_applied_mask_info_vec));
295 vec_validate((*hash_applied_mask_info_vec), spot);
296 minfo = vec_elt_at_index((*hash_applied_mask_info_vec), spot);
297 minfo->mask_type_index = mask_type_index;
298 minfo->num_entries = 0;
299 minfo->max_collisions = 0;
300 minfo->first_rule_index = ~0;
302 clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
303 relax_tuple(&mte->mask, is_ip6, 0);
307 * We can use only 16 bits, since in the match there is only u16 field.
308 * Realistically, once you go to 64K of mask types, it is a huge
309 * problem anyway, so we might as well stop half way.
311 ASSERT(mask_type_index < 32768);
313 mte = am->ace_mask_type_pool + mask_type_index;
315 return mask_type_index;
320 fill_applied_hash_ace_kv(acl_main_t *am,
321 applied_hash_ace_entry_t **applied_hash_aces,
323 u32 new_index, clib_bihash_kv_48_8_t *kv)
325 fa_5tuple_t *kv_key = (fa_5tuple_t *)kv->key;
326 hash_acl_lookup_value_t *kv_val = (hash_acl_lookup_value_t *)&kv->value;
327 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
328 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
330 /* apply the mask to ace key */
331 hash_ace_info_t *ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
332 ace_mask_type_entry_t *mte = vec_elt_at_index(am->ace_mask_type_pool, pae->mask_type_index);
334 u64 *pmatch = (u64 *) &ace_info->match;
335 u64 *pmask = (u64 *)&mte->mask;
336 u64 *pkey = (u64 *)kv->key;
338 *pkey++ = *pmatch++ & *pmask++;
339 *pkey++ = *pmatch++ & *pmask++;
340 *pkey++ = *pmatch++ & *pmask++;
341 *pkey++ = *pmatch++ & *pmask++;
342 *pkey++ = *pmatch++ & *pmask++;
343 *pkey++ = *pmatch++ & *pmask++;
345 kv_key->pkt.mask_type_index_lsb = pae->mask_type_index;
346 kv_key->pkt.lc_index = lc_index;
348 kv_val->applied_entry_index = new_index;
352 add_del_hashtable_entry(acl_main_t *am,
354 applied_hash_ace_entry_t **applied_hash_aces,
355 u32 index, int is_add)
357 clib_bihash_kv_48_8_t kv;
359 fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, index, &kv);
360 hashtable_add_del(am, &kv, is_add);
365 find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
367 ace_mask_type_entry_t *mte;
369 pool_foreach(mte, am->ace_mask_type_pool,
371 if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
372 return (mte - am->ace_mask_type_pool);
379 assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
381 u32 mask_type_index = find_mask_type_index(am, mask);
382 ace_mask_type_entry_t *mte;
383 if(~0 == mask_type_index) {
384 pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
385 mask_type_index = mte - am->ace_mask_type_pool;
386 clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
389 * We can use only 16 bits, since in the match there is only u16 field.
390 * Realistically, once you go to 64K of mask types, it is a huge
391 * problem anyway, so we might as well stop half way.
393 ASSERT(mask_type_index < 32768);
395 mte = am->ace_mask_type_pool + mask_type_index;
397 return mask_type_index;
401 release_mask_type_index(acl_main_t *am, u32 mask_type_index)
403 ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
405 if (mte->refcount == 0) {
406 /* we are not using this entry anymore */
407 pool_put(am->ace_mask_type_pool, mte);
412 remake_hash_applied_mask_info_vec (acl_main_t * am,
413 applied_hash_ace_entry_t **
414 applied_hash_aces, u32 lc_index)
416 hash_applied_mask_info_t *new_hash_applied_mask_info_vec =
417 vec_new (hash_applied_mask_info_t, 0);
419 hash_applied_mask_info_t *minfo;
421 for (i = 0; i < vec_len ((*applied_hash_aces)); i++)
423 applied_hash_ace_entry_t *pae =
424 vec_elt_at_index ((*applied_hash_aces), i);
426 /* check if mask_type_index is already there */
427 u32 new_pointer = vec_len (new_hash_applied_mask_info_vec);
429 for (search = 0; search < vec_len (new_hash_applied_mask_info_vec);
432 minfo = vec_elt_at_index (new_hash_applied_mask_info_vec, search);
433 if (minfo->mask_type_index == pae->mask_type_index)
437 vec_validate ((new_hash_applied_mask_info_vec), search);
438 minfo = vec_elt_at_index ((new_hash_applied_mask_info_vec), search);
439 if (search == new_pointer)
441 minfo->mask_type_index = pae->mask_type_index;
442 minfo->num_entries = 0;
443 minfo->max_collisions = 0;
444 minfo->first_rule_index = ~0;
447 minfo->num_entries = minfo->num_entries + 1;
449 if (vec_len (pae->colliding_rules) > minfo->max_collisions)
450 minfo->max_collisions = vec_len (pae->colliding_rules);
452 if (minfo->first_rule_index > i)
453 minfo->first_rule_index = i;
456 hash_applied_mask_info_t **hash_applied_mask_info_vec =
457 vec_elt_at_index (am->hash_applied_mask_info_vec_by_lc_index, lc_index);
459 vec_free ((*hash_applied_mask_info_vec));
460 (*hash_applied_mask_info_vec) = new_hash_applied_mask_info_vec;
464 vec_del_collision_rule (collision_match_rule_t ** pvec,
465 u32 applied_entry_index)
468 for (i = 0; i < vec_len ((*pvec)); i++)
470 collision_match_rule_t *cr = vec_elt_at_index ((*pvec), i);
471 if (cr->applied_entry_index == applied_entry_index)
473 vec_del1 ((*pvec), i);
479 del_colliding_rule (applied_hash_ace_entry_t ** applied_hash_aces,
480 u32 head_index, u32 applied_entry_index)
482 applied_hash_ace_entry_t *head_pae =
483 vec_elt_at_index ((*applied_hash_aces), head_index);
484 vec_del_collision_rule (&head_pae->colliding_rules, applied_entry_index);
488 add_colliding_rule (acl_main_t * am,
489 applied_hash_ace_entry_t ** applied_hash_aces,
490 u32 head_index, u32 applied_entry_index)
492 applied_hash_ace_entry_t *head_pae =
493 vec_elt_at_index ((*applied_hash_aces), head_index);
494 applied_hash_ace_entry_t *pae =
495 vec_elt_at_index ((*applied_hash_aces), applied_entry_index);
497 collision_match_rule_t cr;
499 cr.acl_index = pae->acl_index;
500 cr.ace_index = pae->ace_index;
501 cr.acl_position = pae->acl_position;
502 cr.applied_entry_index = applied_entry_index;
503 cr.rule = am->acls[pae->acl_index].rules[pae->ace_index];
504 vec_add1 (head_pae->colliding_rules, cr);
508 activate_applied_ace_hash_entry(acl_main_t *am,
510 applied_hash_ace_entry_t **applied_hash_aces,
513 clib_bihash_kv_48_8_t kv;
514 ASSERT(new_index != ~0);
515 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
516 DBG("activate_applied_ace_hash_entry lc_index %d new_index %d", lc_index, new_index);
518 fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, new_index, &kv);
520 DBG("APPLY ADD KY: %016llx %016llx %016llx %016llx %016llx %016llx",
521 kv.key[0], kv.key[1], kv.key[2],
522 kv.key[3], kv.key[4], kv.key[5]);
524 clib_bihash_kv_48_8_t result;
525 hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
526 int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result);
527 ASSERT(new_index != ~0);
528 ASSERT(new_index < vec_len((*applied_hash_aces)));
530 /* There already exists an entry or more. Append at the end. */
531 u32 first_index = result_val->applied_entry_index;
532 ASSERT(first_index != ~0);
533 DBG("A key already exists, with applied entry index: %d", first_index);
534 applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index);
535 u32 last_index = first_pae->tail_applied_entry_index;
536 ASSERT(last_index != ~0);
537 applied_hash_ace_entry_t *last_pae = vec_elt_at_index((*applied_hash_aces), last_index);
538 DBG("...advance to chained entry index: %d", last_index);
539 /* link ourseves in */
540 last_pae->next_applied_entry_index = new_index;
541 pae->prev_applied_entry_index = last_index;
542 /* adjust the pointer to the new tail */
543 first_pae->tail_applied_entry_index = new_index;
544 add_colliding_rule(am, applied_hash_aces, first_index, new_index);
547 /* It's the very first entry */
548 hashtable_add_del(am, &kv, 1);
549 ASSERT(new_index != ~0);
550 pae->tail_applied_entry_index = new_index;
551 add_colliding_rule(am, applied_hash_aces, new_index, new_index);
558 hash_acl_set_heap(acl_main_t *am)
560 if (0 == am->hash_lookup_mheap) {
561 am->hash_lookup_mheap = mheap_alloc_with_lock (0 /* use VM */ ,
562 am->hash_lookup_mheap_size,
564 if (0 == am->hash_lookup_mheap) {
565 clib_error("ACL plugin failed to allocate lookup heap of %U bytes",
566 format_memory_size, am->hash_lookup_mheap_size);
569 void *oldheap = clib_mem_set_heap(am->hash_lookup_mheap);
574 acl_plugin_hash_acl_set_validate_heap(int on)
576 acl_main_t *am = &acl_main;
577 clib_mem_set_heap(hash_acl_set_heap(am));
578 #if USE_DLMALLOC == 0
579 mheap_t *h = mheap_header (am->hash_lookup_mheap);
581 h->flags |= MHEAP_FLAG_VALIDATE;
582 h->flags &= ~MHEAP_FLAG_SMALL_OBJECT_CACHE;
585 h->flags &= ~MHEAP_FLAG_VALIDATE;
586 h->flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE;
592 acl_plugin_hash_acl_set_trace_heap(int on)
594 acl_main_t *am = &acl_main;
595 clib_mem_set_heap(hash_acl_set_heap(am));
596 #if USE_DLMALLOC == 0
597 mheap_t *h = mheap_header (am->hash_lookup_mheap);
599 h->flags |= MHEAP_FLAG_TRACE;
601 h->flags &= ~MHEAP_FLAG_TRACE;
607 assign_mask_type_index_to_pae(acl_main_t *am, u32 lc_index, int is_ip6, applied_hash_ace_entry_t *pae)
609 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
610 hash_ace_info_t *ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
612 ace_mask_type_entry_t *mte;
615 * Start taking base_mask associated to ace, and essentially copy it.
616 * With TupleMerge we will assign a relaxed mask here.
618 mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
620 if (am->use_tuple_merge)
621 pae->mask_type_index = tm_assign_mask_type_index(am, mask, is_ip6, lc_index);
623 pae->mask_type_index = assign_mask_type_index(am, mask);
627 split_partition(acl_main_t *am, u32 first_index,
628 u32 lc_index, int is_ip6);
632 check_collision_count_and_maybe_split(acl_main_t *am, u32 lc_index, int is_ip6, u32 first_index)
634 applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
635 applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index);
636 if (vec_len(first_pae->colliding_rules) > am->tuple_merge_split_threshold) {
637 split_partition(am, first_index, lc_index, is_ip6);
642 hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
646 DBG0("HASH ACL apply: lc_index %d acl %d", lc_index, acl_index);
647 if (!am->acl_lookup_hash_initialized) {
648 BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash",
649 am->hash_lookup_hash_buckets, am->hash_lookup_hash_memory);
650 am->acl_lookup_hash_initialized = 1;
653 void *oldheap = hash_acl_set_heap(am);
654 vec_validate(am->hash_entry_vec_by_lc_index, lc_index);
655 vec_validate(am->hash_acl_infos, acl_index);
656 applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
658 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
659 u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
661 int base_offset = vec_len(*applied_hash_aces);
663 /* Update the bitmap of the mask types with which the lookup
664 needs to happen for the ACLs applied to this lc_index */
665 applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
666 vec_validate((*applied_hash_acls), lc_index);
667 applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
669 /* ensure the list of applied hash acls is initialized and add this acl# to it */
670 u32 index = vec_search(pal->applied_acls, acl_index);
672 clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to lc",
673 acl_index, lc_index);
676 vec_add1(pal->applied_acls, acl_index);
677 u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
679 clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to hash h-acl info",
680 acl_index, lc_index);
683 vec_add1((*hash_acl_applied_lc_index), lc_index);
686 * if the applied ACL is empty, the current code will cause a
687 * different behavior compared to current linear search: an empty ACL will
688 * simply fallthrough to the next ACL, or the default deny in the end.
690 * This is not a problem, because after vpp-dev discussion,
691 * the consensus was it should not be possible to apply the non-existent
692 * ACL, so the change adding this code also takes care of that.
695 /* expand the applied aces vector by the necessary amount */
696 vec_resize((*applied_hash_aces), vec_len(ha->rules));
698 vec_validate(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
699 /* add the rules from the ACL to the hash table for lookup and append to the vector*/
700 for(i=0; i < vec_len(ha->rules); i++) {
701 int is_ip6 = ha->rules[i].match.pkt.is_ip6;
702 u32 new_index = base_offset + i;
703 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
704 pae->acl_index = acl_index;
705 pae->ace_index = ha->rules[i].ace_index;
706 pae->acl_position = acl_position;
707 pae->action = ha->rules[i].action;
709 pae->hash_ace_info_index = i;
710 /* we might link it in later */
711 pae->next_applied_entry_index = ~0;
712 pae->prev_applied_entry_index = ~0;
713 pae->tail_applied_entry_index = ~0;
714 pae->colliding_rules = NULL;
715 pae->mask_type_index = ~0;
716 assign_mask_type_index_to_pae(am, lc_index, is_ip6, pae);
717 u32 first_index = activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, new_index);
718 if (am->use_tuple_merge)
719 check_collision_count_and_maybe_split(am, lc_index, is_ip6, first_index);
721 remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
723 clib_mem_set_heap (oldheap);
727 find_head_applied_ace_index(applied_hash_ace_entry_t **applied_hash_aces, u32 curr_index)
730 * find back the first entry. Inefficient so might need to be a bit cleverer
731 * if this proves to be a problem..
733 u32 an_index = curr_index;
734 ASSERT(an_index != ~0);
735 applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), an_index);
736 while(head_pae->prev_applied_entry_index != ~0) {
737 an_index = head_pae->prev_applied_entry_index;
738 ASSERT(an_index != ~0);
739 head_pae = vec_elt_at_index((*applied_hash_aces), an_index);
745 move_applied_ace_hash_entry(acl_main_t *am,
747 applied_hash_ace_entry_t **applied_hash_aces,
748 u32 old_index, u32 new_index)
750 ASSERT(old_index != ~0);
751 ASSERT(new_index != ~0);
753 *vec_elt_at_index((*applied_hash_aces), new_index) = *vec_elt_at_index((*applied_hash_aces), old_index);
755 /* update the linkage and hash table if necessary */
756 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
758 if (pae->prev_applied_entry_index != ~0) {
759 applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
760 ASSERT(prev_pae->next_applied_entry_index == old_index);
761 prev_pae->next_applied_entry_index = new_index;
763 /* first entry - so the hash points to it, update */
764 add_del_hashtable_entry(am, lc_index,
765 applied_hash_aces, new_index, 1);
766 ASSERT(pae->tail_applied_entry_index != ~0);
768 if (pae->next_applied_entry_index != ~0) {
769 applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
770 ASSERT(next_pae->prev_applied_entry_index == old_index);
771 next_pae->prev_applied_entry_index = new_index;
774 * Moving the very last entry, so we need to update the tail pointer in the first one.
776 u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
777 ASSERT(head_index != ~0);
778 applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
780 ASSERT(head_pae->tail_applied_entry_index == old_index);
781 head_pae->tail_applied_entry_index = new_index;
783 /* invalidate the old entry */
784 pae->prev_applied_entry_index = ~0;
785 pae->next_applied_entry_index = ~0;
786 pae->tail_applied_entry_index = ~0;
790 deactivate_applied_ace_hash_entry(acl_main_t *am,
792 applied_hash_ace_entry_t **applied_hash_aces,
795 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
796 DBG("UNAPPLY DEACTIVATE: lc_index %d applied index %d", lc_index, old_index);
798 if (pae->prev_applied_entry_index != ~0) {
799 DBG("UNAPPLY = index %d has prev_applied_entry_index %d", old_index, pae->prev_applied_entry_index);
800 applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
801 ASSERT(prev_pae->next_applied_entry_index == old_index);
802 prev_pae->next_applied_entry_index = pae->next_applied_entry_index;
804 u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
805 ASSERT(head_index != ~0);
806 applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
807 del_colliding_rule(applied_hash_aces, head_index, old_index);
809 if (pae->next_applied_entry_index == ~0) {
810 /* it was a last entry we removed, update the pointer on the first one */
811 ASSERT(head_pae->tail_applied_entry_index == old_index);
812 head_pae->tail_applied_entry_index = pae->prev_applied_entry_index;
814 applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
815 next_pae->prev_applied_entry_index = pae->prev_applied_entry_index;
818 /* It was the first entry. We need either to reset the hash entry or delete it */
819 if (pae->next_applied_entry_index != ~0) {
820 /* the next element becomes the new first one, so needs the tail pointer to be set */
821 applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
822 ASSERT(pae->tail_applied_entry_index != ~0);
823 next_pae->tail_applied_entry_index = pae->tail_applied_entry_index;
824 /* Remove ourselves and transfer the ownership of the colliding rules vector */
825 del_colliding_rule(applied_hash_aces, old_index, old_index);
826 next_pae->colliding_rules = pae->colliding_rules;
827 /* unlink from the next element */
828 next_pae->prev_applied_entry_index = ~0;
829 add_del_hashtable_entry(am, lc_index,
830 applied_hash_aces, pae->next_applied_entry_index, 1);
832 /* no next entry, so just delete the entry in the hash table */
833 add_del_hashtable_entry(am, lc_index,
834 applied_hash_aces, old_index, 0);
838 release_mask_type_index(am, pae->mask_type_index);
839 /* invalidate the old entry */
840 pae->mask_type_index = ~0;
841 pae->prev_applied_entry_index = ~0;
842 pae->next_applied_entry_index = ~0;
843 pae->tail_applied_entry_index = ~0;
844 /* always has to be 0 */
845 pae->colliding_rules = NULL;
850 hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
854 DBG0("HASH ACL unapply: lc_index %d acl %d", lc_index, acl_index);
855 applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
856 applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
858 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
859 u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
861 /* remove this acl# from the list of applied hash acls */
862 u32 index = vec_search(pal->applied_acls, acl_index);
864 clib_warning("BUG: trying to unapply unapplied acl_index %d on lc_index %d, according to lc",
865 acl_index, lc_index);
868 vec_del1(pal->applied_acls, index);
870 u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
872 clib_warning("BUG: trying to unapply twice acl_index %d on lc_index %d, according to h-acl info",
873 acl_index, lc_index);
876 vec_del1((*hash_acl_applied_lc_index), index2);
878 applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
880 for(i=0; i < vec_len((*applied_hash_aces)); i++) {
881 if (vec_elt_at_index(*applied_hash_aces,i)->acl_index == acl_index) {
882 DBG("Found applied ACL#%d at applied index %d", acl_index, i);
886 if (vec_len((*applied_hash_aces)) <= i) {
887 DBG("Did not find applied ACL#%d at lc_index %d", acl_index, lc_index);
888 /* we went all the way without finding any entries. Probably a list was empty. */
892 void *oldheap = hash_acl_set_heap(am);
894 int tail_offset = base_offset + vec_len(ha->rules);
895 int tail_len = vec_len((*applied_hash_aces)) - tail_offset;
896 DBG("base_offset: %d, tail_offset: %d, tail_len: %d", base_offset, tail_offset, tail_len);
898 for(i=0; i < vec_len(ha->rules); i ++) {
899 deactivate_applied_ace_hash_entry(am, lc_index,
900 applied_hash_aces, base_offset + i);
902 for(i=0; i < tail_len; i ++) {
903 /* move the entry at tail offset to base offset */
904 /* that is, from (tail_offset+i) -> (base_offset+i) */
905 DBG("UNAPPLY MOVE: lc_index %d, applied index %d -> %d", lc_index, tail_offset+i, base_offset + i);
906 move_applied_ace_hash_entry(am, lc_index, applied_hash_aces, tail_offset + i, base_offset + i);
908 /* trim the end of the vector */
909 _vec_len((*applied_hash_aces)) -= vec_len(ha->rules);
911 remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
913 clib_mem_set_heap (oldheap);
917 * Create the applied ACEs and update the hash table,
918 * taking into account that the ACL may not be the last
919 * in the vector of applied ACLs.
921 * For now, walk from the end of the vector and unapply the ACLs,
922 * then apply the one in question and reapply the rest.
926 hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
928 acl_lookup_context_t *acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index);
929 u32 **applied_acls = &acontext->acl_indices;
931 int start_index = vec_search((*applied_acls), acl_index);
933 DBG0("Start index for acl %d in lc_index %d is %d", acl_index, lc_index, start_index);
935 * This function is called after we find out the lc_index where ACL is applied.
936 * If the by-lc_index vector does not have the ACL#, then it's a bug.
938 ASSERT(start_index < vec_len(*applied_acls));
940 /* unapply all the ACLs at the tail side, up to the current one */
941 for(i = vec_len(*applied_acls) - 1; i > start_index; i--) {
942 hash_acl_unapply(am, lc_index, *vec_elt_at_index(*applied_acls, i));
944 for(i = start_index; i < vec_len(*applied_acls); i++) {
945 hash_acl_apply(am, lc_index, *vec_elt_at_index(*applied_acls, i), i);
950 make_ip6_address_mask(ip6_address_t *addr, u8 prefix_len)
952 ip6_address_mask_from_width(addr, prefix_len);
956 /* Maybe should be moved into the core somewhere */
958 ip4_address_mask_from_width (ip4_address_t * a, u32 width)
960 int i, byte, bit, bitnum;
961 ASSERT (width <= 32);
962 memset (a, 0, sizeof (a[0]));
963 for (i = 0; i < width; i++)
965 bitnum = (7 - (i & 7));
968 a->as_u8[byte] |= bit;
974 make_ip4_address_mask(ip4_address_t *addr, u8 prefix_len)
976 ip4_address_mask_from_width(addr, prefix_len);
980 make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
982 if (port_first == port_last) {
984 /* single port is representable by masked value */
993 make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t *hi)
995 memset(mask, 0, sizeof(*mask));
996 memset(&hi->match, 0, sizeof(hi->match));
997 hi->action = r->is_permit;
999 /* we will need to be matching based on lc_index and mask_type_index when applied */
1000 mask->pkt.lc_index = ~0;
1001 /* we will assign the match of mask_type_index later when we find it*/
1002 mask->pkt.mask_type_index_lsb = ~0;
1004 mask->pkt.is_ip6 = 1;
1005 hi->match.pkt.is_ip6 = r->is_ipv6;
1007 make_ip6_address_mask(&mask->ip6_addr[0], r->src_prefixlen);
1008 hi->match.ip6_addr[0] = r->src.ip6;
1009 make_ip6_address_mask(&mask->ip6_addr[1], r->dst_prefixlen);
1010 hi->match.ip6_addr[1] = r->dst.ip6;
1012 memset(hi->match.l3_zero_pad, 0, sizeof(hi->match.l3_zero_pad));
1013 make_ip4_address_mask(&mask->ip4_addr[0], r->src_prefixlen);
1014 hi->match.ip4_addr[0] = r->src.ip4;
1015 make_ip4_address_mask(&mask->ip4_addr[1], r->dst_prefixlen);
1016 hi->match.ip4_addr[1] = r->dst.ip4;
1019 if (r->proto != 0) {
1020 mask->l4.proto = ~0; /* L4 proto needs to be matched */
1021 hi->match.l4.proto = r->proto;
1023 /* Calculate the src/dst port masks and make the src/dst port matches accordingly */
1024 make_port_mask(&mask->l4.port[0], r->src_port_or_type_first, r->src_port_or_type_last);
1025 hi->match.l4.port[0] = r->src_port_or_type_first & mask->l4.port[0];
1027 make_port_mask(&mask->l4.port[1], r->dst_port_or_code_first, r->dst_port_or_code_last);
1028 hi->match.l4.port[1] = r->dst_port_or_code_first & mask->l4.port[1];
1029 /* L4 info must be valid in order to match */
1030 mask->pkt.l4_valid = 1;
1031 hi->match.pkt.l4_valid = 1;
1032 /* And we must set the mask to check that it is an initial fragment */
1033 mask->pkt.is_nonfirst_fragment = 1;
1034 hi->match.pkt.is_nonfirst_fragment = 0;
1035 if ((r->proto == IPPROTO_TCP) && (r->tcp_flags_mask != 0)) {
1036 /* if we want to match on TCP flags, they must be masked off as well */
1037 mask->pkt.tcp_flags = r->tcp_flags_mask;
1038 hi->match.pkt.tcp_flags = r->tcp_flags_value;
1039 /* and the flags need to be present within the packet being matched */
1040 mask->pkt.tcp_flags_valid = 1;
1041 hi->match.pkt.tcp_flags_valid = 1;
1044 /* Sanitize the mask and the match */
1045 u64 *pmask = (u64 *)mask;
1046 u64 *pmatch = (u64 *)&hi->match;
1048 for(j=0; j<6; j++) {
1049 pmatch[j] = pmatch[j] & pmask[j];
1054 int hash_acl_exists(acl_main_t *am, int acl_index)
1056 if (acl_index >= vec_len(am->hash_acl_infos))
1059 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1060 return ha->hash_acl_exists;
1063 void hash_acl_add(acl_main_t *am, int acl_index)
1065 void *oldheap = hash_acl_set_heap(am);
1066 DBG("HASH ACL add : %d", acl_index);
1068 acl_list_t *a = &am->acls[acl_index];
1069 vec_validate(am->hash_acl_infos, acl_index);
1070 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1071 memset(ha, 0, sizeof(*ha));
1072 ha->hash_acl_exists = 1;
1074 /* walk the newly added ACL entries and ensure that for each of them there
1075 is a mask type, increment a reference count for that mask type */
1076 for(i=0; i < a->count; i++) {
1077 hash_ace_info_t ace_info;
1079 memset(&ace_info, 0, sizeof(ace_info));
1080 ace_info.acl_index = acl_index;
1081 ace_info.ace_index = i;
1083 make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info);
1084 mask.pkt.flags_reserved = 0b000;
1085 ace_info.base_mask_type_index = assign_mask_type_index(am, &mask);
1086 /* assign the mask type index for matching itself */
1087 ace_info.match.pkt.mask_type_index_lsb = ace_info.base_mask_type_index;
1088 DBG("ACE: %d mask_type_index: %d", i, ace_info.base_mask_type_index);
1089 vec_add1(ha->rules, ace_info);
1092 * if an ACL is applied somewhere, fill the corresponding lookup data structures.
1093 * We need to take care if the ACL is not the last one in the vector of ACLs applied to the interface.
1095 if (acl_index < vec_len(am->lc_index_vec_by_acl)) {
1097 vec_foreach(lc_index, am->lc_index_vec_by_acl[acl_index]) {
1098 hash_acl_reapply(am, *lc_index, acl_index);
1101 clib_mem_set_heap (oldheap);
1104 void hash_acl_delete(acl_main_t *am, int acl_index)
1106 void *oldheap = hash_acl_set_heap(am);
1107 DBG0("HASH ACL delete : %d", acl_index);
1109 * If the ACL is applied somewhere, remove the references of it (call hash_acl_unapply)
1110 * this is a different behavior from the linear lookup where an empty ACL is "deny all",
1112 * However, following vpp-dev discussion the ACL that is referenced elsewhere
1113 * should not be possible to delete, and the change adding this also adds
1114 * the safeguards to that respect, so this is not a problem.
1116 * The part to rememeber is that this routine is called in process of reapplication
1117 * during the acl_add_replace() API call - the old acl ruleset is deleted, then
1118 * the new one is added, without the change in the applied ACLs - so this case
1119 * has to be handled.
1121 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1122 u32 *lc_list_copy = 0;
1125 lc_list_copy = vec_dup(ha->lc_index_list);
1126 vec_foreach(lc_index, lc_list_copy) {
1127 hash_acl_unapply(am, *lc_index, acl_index);
1129 vec_free(lc_list_copy);
1132 /* walk the mask types for the ACL about-to-be-deleted, and decrease
1133 * the reference count, possibly freeing up some of them */
1135 for(i=0; i < vec_len(ha->rules); i++) {
1136 release_mask_type_index(am, ha->rules[i].base_mask_type_index);
1138 ha->hash_acl_exists = 0;
1139 vec_free(ha->rules);
1140 clib_mem_set_heap (oldheap);
1145 show_hash_acl_hash (vlib_main_t * vm, acl_main_t *am, u32 verbose)
1147 vlib_cli_output(vm, "\nACL lookup hash table:\n%U\n",
1148 BV (format_bihash), &am->acl_lookup_hash, verbose);
1152 acl_plugin_show_tables_mask_type (void)
1154 acl_main_t *am = &acl_main;
1155 vlib_main_t *vm = am->vlib_main;
1156 ace_mask_type_entry_t *mte;
1158 vlib_cli_output (vm, "Mask-type entries:");
1160 pool_foreach(mte, am->ace_mask_type_pool,
1162 vlib_cli_output(vm, " %3d: %016llx %016llx %016llx %016llx %016llx %016llx refcount %d",
1163 mte - am->ace_mask_type_pool,
1164 mte->mask.kv_40_8.key[0], mte->mask.kv_40_8.key[1], mte->mask.kv_40_8.key[2],
1165 mte->mask.kv_40_8.key[3], mte->mask.kv_40_8.key[4], mte->mask.kv_40_8.value, mte->refcount);
1171 acl_plugin_show_tables_acl_hash_info (u32 acl_index)
1173 acl_main_t *am = &acl_main;
1174 vlib_main_t *vm = am->vlib_main;
1177 vlib_cli_output (vm, "Mask-ready ACL representations\n");
1178 for (i = 0; i < vec_len (am->hash_acl_infos); i++)
1180 if ((acl_index != ~0) && (acl_index != i))
1184 hash_acl_info_t *ha = &am->hash_acl_infos[i];
1185 vlib_cli_output (vm, "acl-index %u bitmask-ready layout\n", i);
1186 vlib_cli_output (vm, " applied lc_index list: %U\n",
1187 format_vec32, ha->lc_index_list, "%d");
1188 for (j = 0; j < vec_len (ha->rules); j++)
1190 hash_ace_info_t *pa = &ha->rules[j];
1191 m = (u64 *) & pa->match;
1192 vlib_cli_output (vm,
1193 " %4d: %016llx %016llx %016llx %016llx %016llx %016llx base mask index %d acl %d rule %d action %d\n",
1194 j, m[0], m[1], m[2], m[3], m[4], m[5],
1195 pa->base_mask_type_index, pa->acl_index, pa->ace_index,
1202 acl_plugin_print_colliding_rule (vlib_main_t * vm, int j, collision_match_rule_t *cr) {
1204 " %4d: acl %d ace %d acl pos %d pae index: %d",
1205 j, cr->acl_index, cr->ace_index, cr->acl_position, cr->applied_entry_index);
1209 acl_plugin_print_pae (vlib_main_t * vm, int j, applied_hash_ace_entry_t * pae)
1211 vlib_cli_output (vm,
1212 " %4d: acl %d rule %d action %d bitmask-ready rule %d colliding_rules: %d next %d prev %d tail %d hitcount %lld acl_pos: %d",
1213 j, pae->acl_index, pae->ace_index, pae->action,
1214 pae->hash_ace_info_index, vec_len(pae->colliding_rules), pae->next_applied_entry_index,
1215 pae->prev_applied_entry_index,
1216 pae->tail_applied_entry_index, pae->hitcount, pae->acl_position);
1218 for(jj=0; jj<vec_len(pae->colliding_rules); jj++)
1219 acl_plugin_print_colliding_rule(vm, jj, vec_elt_at_index(pae->colliding_rules, jj));
1223 acl_plugin_print_applied_mask_info (vlib_main_t * vm, int j, hash_applied_mask_info_t *mi)
1225 vlib_cli_output (vm,
1226 " %4d: mask type index %d first rule index %d num_entries %d max_collisions %d",
1227 j, mi->mask_type_index, mi->first_rule_index, mi->num_entries, mi->max_collisions);
1231 acl_plugin_show_tables_applied_info (u32 lc_index)
1233 acl_main_t *am = &acl_main;
1234 vlib_main_t *vm = am->vlib_main;
1236 vlib_cli_output (vm, "Applied lookup entries for lookup contexts");
1239 (lci < vec_len(am->applied_hash_acl_info_by_lc_index)); lci++)
1241 if ((lc_index != ~0) && (lc_index != lci))
1245 vlib_cli_output (vm, "lc_index %d:", lci);
1246 if (lci < vec_len (am->applied_hash_acl_info_by_lc_index))
1248 applied_hash_acl_info_t *pal =
1249 &am->applied_hash_acl_info_by_lc_index[lci];
1250 vlib_cli_output (vm, " applied acls: %U", format_vec32,
1251 pal->applied_acls, "%d");
1253 if (lci < vec_len (am->hash_applied_mask_info_vec_by_lc_index))
1255 vlib_cli_output (vm, " applied mask info entries:");
1257 j < vec_len (am->hash_applied_mask_info_vec_by_lc_index[lci]);
1260 acl_plugin_print_applied_mask_info (vm, j,
1261 &am->hash_applied_mask_info_vec_by_lc_index
1265 if (lci < vec_len (am->hash_entry_vec_by_lc_index))
1267 vlib_cli_output (vm, " lookup applied entries:");
1269 j < vec_len (am->hash_entry_vec_by_lc_index[lci]);
1272 acl_plugin_print_pae (vm, j,
1273 &am->hash_entry_vec_by_lc_index
1281 acl_plugin_show_tables_bihash (u32 show_bihash_verbose)
1283 acl_main_t *am = &acl_main;
1284 vlib_main_t *vm = am->vlib_main;
1285 show_hash_acl_hash (vm, am, show_bihash_verbose);
1289 * Split of the partition needs to happen when the collision count
1290 * goes over a specified threshold.
1292 * This is a signal that we ignored too many bits in
1293 * mT and we need to split the table into two tables. We select
1294 * all of the colliding rules L and find their maximum common
1295 * tuple mL. Normally mL is specific enough to hash L with few
1296 * or no collisions. We then create a new table T2 with tuple mL
1297 * and transfer all compatible rules from T to T2. If mL is not
1298 * specific enough, we find the field with the biggest difference
1299 * between the minimum and maximum tuple lengths for all of
1300 * the rules in L and set that field to be the average of those two
1301 * values. We then transfer all compatible rules as before. This
1302 * guarantees that some rules from L will move and that T2 will
1303 * have a smaller number of collisions than T did.
1308 ensure_ip6_min_addr (ip6_address_t * min_addr, ip6_address_t * mask_addr)
1311 (clib_net_to_host_u64 (mask_addr->as_u64[0]) <
1312 clib_net_to_host_u64 (min_addr->as_u64[0]))
1314 ((clib_net_to_host_u64 (mask_addr->as_u64[0]) ==
1315 clib_net_to_host_u64 (min_addr->as_u64[0]))
1316 && (clib_net_to_host_u64 (mask_addr->as_u64[1]) <
1317 clib_net_to_host_u64 (min_addr->as_u64[1])));
1320 min_addr->as_u64[0] = mask_addr->as_u64[0];
1321 min_addr->as_u64[1] = mask_addr->as_u64[1];
1326 ensure_ip6_max_addr (ip6_address_t * max_addr, ip6_address_t * mask_addr)
1329 (clib_net_to_host_u64 (mask_addr->as_u64[0]) >
1330 clib_net_to_host_u64 (max_addr->as_u64[0]))
1332 ((clib_net_to_host_u64 (mask_addr->as_u64[0]) ==
1333 clib_net_to_host_u64 (max_addr->as_u64[0]))
1334 && (clib_net_to_host_u64 (mask_addr->as_u64[1]) >
1335 clib_net_to_host_u64 (max_addr->as_u64[1])));
1338 max_addr->as_u64[0] = mask_addr->as_u64[0];
1339 max_addr->as_u64[1] = mask_addr->as_u64[1];
1344 ensure_ip4_min_addr (ip4_address_t * min_addr, ip4_address_t * mask_addr)
1347 (clib_net_to_host_u32 (mask_addr->as_u32) <
1348 clib_net_to_host_u32 (min_addr->as_u32));
1350 min_addr->as_u32 = mask_addr->as_u32;
1354 ensure_ip4_max_addr (ip4_address_t * max_addr, ip4_address_t * mask_addr)
1357 (clib_net_to_host_u32 (mask_addr->as_u32) >
1358 clib_net_to_host_u32 (max_addr->as_u32));
1360 max_addr->as_u32 = mask_addr->as_u32;
1374 split_partition(acl_main_t *am, u32 first_index,
1375 u32 lc_index, int is_ip6){
1376 DBG( "TM-split_partition - first_entry:%d", first_index);
1377 applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
1378 ace_mask_type_entry_t *mte;
1379 fa_5tuple_t the_min_tuple, *min_tuple = &the_min_tuple;
1380 fa_5tuple_t the_max_tuple, *max_tuple = &the_max_tuple;
1381 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), first_index);
1382 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
1383 hash_ace_info_t *ace_info;
1384 u32 coll_mask_type_index = pae->mask_type_index;
1385 memset(&the_min_tuple, 0, sizeof(the_min_tuple));
1386 memset(&the_max_tuple, 0, sizeof(the_max_tuple));
1389 u64 collisions = vec_len(pae->colliding_rules);
1390 // while(pae->next_applied_entry_index == ~0){
1391 for(i=0; i<collisions; i++){
1393 DBG( "TM-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
1394 pae->ace_index, pae->mask_type_index, coll_mask_type_index);
1396 ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
1397 mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
1398 fa_5tuple_t *mask = &mte->mask;
1400 if(pae->mask_type_index != coll_mask_type_index) continue;
1401 /* Computing min_mask and max_mask for colliding rules */
1403 clib_memcpy(min_tuple, mask, sizeof(fa_5tuple_t));
1404 clib_memcpy(max_tuple, mask, sizeof(fa_5tuple_t));
1409 ensure_ip6_min_addr(&min_tuple->ip6_addr[j], &mask->ip6_addr[j]);
1411 ensure_ip4_min_addr(&min_tuple->ip4_addr[j], &mask->ip4_addr[j]);
1413 if ((mask->l4.port[j] < min_tuple->l4.port[j]))
1414 min_tuple->l4.port[j] = mask->l4.port[j];
1417 if ((mask->l4.proto < min_tuple->l4.proto))
1418 min_tuple->l4.proto = mask->l4.proto;
1420 if(mask->pkt.as_u64 < min_tuple->pkt.as_u64)
1421 min_tuple->pkt.as_u64 = mask->pkt.as_u64;
1426 ensure_ip6_max_addr(&max_tuple->ip6_addr[j], &mask->ip6_addr[j]);
1428 ensure_ip4_max_addr(&max_tuple->ip4_addr[j], &mask->ip4_addr[j]);
1430 if ((mask->l4.port[j] > max_tuple->l4.port[j]))
1431 max_tuple->l4.port[j] = mask->l4.port[j];
1434 if ((mask->l4.proto < max_tuple->l4.proto))
1435 max_tuple->l4.proto = mask->l4.proto;
1437 if(mask->pkt.as_u64 > max_tuple->pkt.as_u64)
1438 max_tuple->pkt.as_u64 = mask->pkt.as_u64;
1441 pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
1444 /* Computing field with max difference between (min/max)_mask */
1445 int best_dim=-1, best_delta=0, delta=0;
1447 /* SRC_addr dimension */
1451 delta += count_bits(max_tuple->ip6_addr[0].as_u64[i]) - count_bits(min_tuple->ip6_addr[0].as_u64[i]);
1454 delta += count_bits(max_tuple->ip4_addr[0].as_u32) - count_bits(min_tuple->ip4_addr[0].as_u32);
1456 if(delta > best_delta){
1458 best_dim = DIM_SRC_ADDR;
1461 /* DST_addr dimension */
1466 delta += count_bits(max_tuple->ip6_addr[1].as_u64[i]) - count_bits(min_tuple->ip6_addr[1].as_u64[i]);
1469 delta += count_bits(max_tuple->ip4_addr[1].as_u32) - count_bits(min_tuple->ip4_addr[1].as_u32);
1471 if(delta > best_delta){
1473 best_dim = DIM_DST_ADDR;
1476 /* SRC_port dimension */
1477 delta = count_bits(max_tuple->l4.port[0]) - count_bits(min_tuple->l4.port[0]);
1478 if(delta > best_delta){
1480 best_dim = DIM_SRC_PORT;
1483 /* DST_port dimension */
1484 delta = count_bits(max_tuple->l4.port[1]) - count_bits(min_tuple->l4.port[1]);
1485 if(delta > best_delta){
1487 best_dim = DIM_DST_PORT;
1490 /* Proto dimension */
1491 delta = count_bits(max_tuple->l4.proto) - count_bits(min_tuple->l4.proto);
1492 if(delta > best_delta){
1494 best_dim = DIM_PROTO;
1497 int shifting = 0; //, ipv4_block = 0;
1500 shifting = (best_delta)/2; // FIXME IPV4-only
1501 // ipv4_block = count_bits(max_tuple->ip4_addr[0].as_u32);
1502 min_tuple->ip4_addr[0].as_u32 =
1503 clib_host_to_net_u32((clib_net_to_host_u32(max_tuple->ip4_addr[0].as_u32) << (shifting))&0xFFFFFFFF);
1507 shifting = (best_delta)/2;
1509 ipv4_block = count_bits(max_tuple->addr[1].as_u64[1]);
1510 if(ipv4_block > shifting)
1511 min_tuple->addr[1].as_u64[1] =
1512 clib_host_to_net_u64((clib_net_to_host_u64(max_tuple->addr[1].as_u64[1]) << (shifting))&0xFFFFFFFF);
1514 shifting = shifting - ipv4_block;
1515 min_tuple->addr[1].as_u64[1] = 0;
1516 min_tuple->addr[1].as_u64[0] =
1517 clib_host_to_net_u64((clib_net_to_host_u64(max_tuple->addr[1].as_u64[0]) << (shifting))&0xFFFFFFFF);
1520 min_tuple->ip4_addr[1].as_u32 =
1521 clib_host_to_net_u32((clib_net_to_host_u32(max_tuple->ip4_addr[1].as_u32) << (shifting))&0xFFFFFFFF);
1524 case DIM_SRC_PORT: min_tuple->l4.port[0] = max_tuple->l4.port[0] << (best_delta)/2;
1526 case DIM_DST_PORT: min_tuple->l4.port[1] = max_tuple->l4.port[1] << (best_delta)/2;
1528 case DIM_PROTO: min_tuple->l4.proto = max_tuple->l4.proto << (best_delta)/2;
1530 default: relax_tuple(min_tuple, is_ip6, 1);
1534 min_tuple->pkt.is_nonfirst_fragment = 0;
1535 u32 new_mask_type_index = assign_mask_type_index(am, min_tuple);
1537 hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
1539 hash_applied_mask_info_t *minfo;
1540 //search in order pool if mask_type_index is already there
1542 for (search=0; search < vec_len((*hash_applied_mask_info_vec)); search++){
1543 minfo = vec_elt_at_index((*hash_applied_mask_info_vec), search);
1544 if(minfo->mask_type_index == new_mask_type_index)
1548 vec_validate((*hash_applied_mask_info_vec), search);
1549 minfo = vec_elt_at_index((*hash_applied_mask_info_vec), search);
1550 minfo->mask_type_index = new_mask_type_index;
1551 minfo->num_entries = 0;
1552 minfo->max_collisions = 0;
1553 minfo->first_rule_index = ~0;
1555 DBG( "TM-split_partition - mask type index-assigned!! -> %d", new_mask_type_index);
1557 if(coll_mask_type_index == new_mask_type_index){
1558 //vlib_cli_output(vm, "TM-There are collisions over threshold, but i'm not able to split! %d %d", coll_mask_type_index, new_mask_type_index);
1563 /* populate new partition */
1564 DBG( "TM-Populate new partition");
1565 u32 r_ace_index = first_index;
1567 // for(i=0; i<collisions; i++){
1568 for(r_ace_index=0; r_ace_index < vec_len((*applied_hash_aces)); r_ace_index++) {
1570 applied_hash_ace_entry_t *pop_pae = vec_elt_at_index((*applied_hash_aces), r_ace_index);
1571 DBG( "TM-Population-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
1572 pop_pae->ace_index, pop_pae->mask_type_index, coll_mask_type_index);
1574 if(pop_pae->mask_type_index != coll_mask_type_index) continue;
1575 u32 next_index = pop_pae->next_applied_entry_index;
1577 ace_info = vec_elt_at_index(ha->rules, pop_pae->hash_ace_info_index);
1578 mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
1580 //mte = vec_elt_at_index(am->ace_mask_type_pool, pop_pae->mask_type_index);
1581 fa_5tuple_t *pop_mask = &mte->mask;
1583 if(!first_mask_contains_second_mask(is_ip6, min_tuple, pop_mask)) continue;
1584 DBG( "TM-new partition can insert -> applied_ace:%d", r_ace_index);
1586 //delete and insert in new format
1587 deactivate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);
1589 /* insert the new entry */
1590 pop_pae->mask_type_index = new_mask_type_index;
1592 activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);
1594 r_ace_index = next_index;
1597 DBG( "TM-Populate new partition-END");
1598 DBG( "TM-split_partition - END");