2 *------------------------------------------------------------------
3 * Copyright (c) 2017 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
19 #include <netinet/in.h>
21 #include <vlibapi/api.h>
22 #include <vlibmemory/api.h>
24 #include <vlib/vlib.h>
25 #include <vnet/vnet.h>
26 #include <vnet/pg/pg.h>
27 #include <vppinfra/error.h>
28 #include <vnet/plugin/plugin.h>
30 #include <vppinfra/bihash_48_8.h>
32 #include "hash_lookup.h"
33 #include "hash_lookup_private.h"
36 always_inline applied_hash_ace_entry_t **get_applied_hash_aces(acl_main_t *am, u32 lc_index)
38 applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
40 /*is_input ? vec_elt_at_index(am->input_hash_entry_vec_by_sw_if_index, sw_if_index)
41 : vec_elt_at_index(am->output_hash_entry_vec_by_sw_if_index, sw_if_index);
43 return applied_hash_aces;
48 hashtable_add_del(acl_main_t *am, clib_bihash_kv_48_8_t *kv, int is_add)
50 DBG("HASH ADD/DEL: %016llx %016llx %016llx %016llx %016llx %016llx %016llx add %d",
51 kv->key[0], kv->key[1], kv->key[2],
52 kv->key[3], kv->key[4], kv->key[5], kv->value, is_add);
53 BV (clib_bihash_add_del) (&am->acl_lookup_hash, kv, is_add);
59 * Initial adaptation by Valerio Bruschi (valerio.bruschi@telecom-paristech.fr)
60 * based on the TupleMerge [1] simulator kindly made available
61 * by James Daly (dalyjamese@gmail.com) and Eric Torng (torng@cse.msu.edu)
62 * ( http://www.cse.msu.edu/~dalyjame/ or http://www.cse.msu.edu/~torng/ ),
63 * refactoring by Andrew Yourtchenko.
65 * [1] James Daly, Eric Torng "TupleMerge: Building Online Packet Classifiers
66 * by Omitting Bits", In Proc. IEEE ICCCN 2017, pp. 1-10
82 /* check if mask2 can be contained by mask1 */
84 first_mask_contains_second_mask(int is_ip6, fa_5tuple_t * mask1, fa_5tuple_t * mask2)
89 for (i = 0; i < 2; i++)
91 if ((mask1->ip6_addr[0].as_u64[i] & mask2->ip6_addr[0].as_u64[i]) !=
92 mask1->ip6_addr[0].as_u64[i])
94 if ((mask1->ip6_addr[1].as_u64[i] & mask2->ip6_addr[1].as_u64[i]) !=
95 mask1->ip6_addr[1].as_u64[i])
101 /* check the pads, both masks must have it 0 */
104 for (i=0; i<6; i++) {
105 padcheck |= mask1->l3_zero_pad[i];
106 padcheck |= mask2->l3_zero_pad[i];
110 if ((mask1->ip4_addr[0].as_u32 & mask2->ip4_addr[0].as_u32) !=
111 mask1->ip4_addr[0].as_u32)
113 if ((mask1->ip4_addr[1].as_u32 & mask2->ip4_addr[1].as_u32) !=
114 mask1->ip4_addr[1].as_u32)
118 /* take care if port are not exact-match */
119 if ((mask1->l4.as_u64 & mask2->l4.as_u64) != mask1->l4.as_u64)
122 if ((mask1->pkt.as_u64 & mask2->pkt.as_u64) != mask1->pkt.as_u64)
133 * Consider the situation when we have to create a new table
134 * T for a given rule R. This occurs for the first rule inserted and
135 * for later rules if it is incompatible with all existing tables.
136 * In this event, we need to determine mT for a new table.
137 * Setting mT = mR is not a good strategy; if another similar,
138 * but slightly less specific, rule appears we will be unable to
139 * add it to T and will thus have to create another new table. We
140 * thus consider two factors: is the rule more strongly aligned
141 * with source or destination addresses (usually the two most
142 * important fields) and how much slack needs to be given to
143 * allow for other rules. If the source and destination addresses
144 * are close together (within 4 bits for our experiments), we use
145 * both of them. Otherwise, we drop the smaller (less specific)
146 * address and its associated port field from consideration; R is
147 * predominantly aligned with one of the two fields and should
148 * be grouped with other similar rules. This is similar to TSS
149 * dropping port fields, but since it is based on observable rule
150 * characteristics it is more likely to keep important fields and
151 * discard less useful ones.
152 * We then look at the absolute lengths of the addresses. If
153 * the address is long, we are more likely to try to add shorter
154 * lengths and likewise the reverse. We thus remove a few bits
155 * from both address fields with more bits removed from longer
156 * addresses. For 32 bit addresses, we remove 4 bits, 3 for more
157 * than 24, 2 for more than 16, and so on (so 8 and fewer bits
158 * don’t have any removed). We only do this for prefix fields like
159 * addresses; both range fields (like ports) and exact match fields
160 * (like protocol) should remain as they are.
165 shift_ip4_if(u32 mask, u32 thresh, int numshifts, u32 else_val)
168 return clib_host_to_net_u32((clib_net_to_host_u32(mask) << numshifts) & 0xFFFFFFFF);
174 relax_ip4_addr(ip4_address_t *ip4_mask, int relax2) {
175 int shifts_per_relax[2][4] = { { 6, 5, 4, 2 }, { 3, 2, 1, 1 } };
177 int *shifts = shifts_per_relax[relax2];
178 if(ip4_mask->as_u32 == 0xffffffff)
179 ip4_mask->as_u32 = clib_host_to_net_u32((clib_net_to_host_u32(ip4_mask->as_u32) << shifts[0])&0xFFFFFFFF);
181 ip4_mask->as_u32 = shift_ip4_if(ip4_mask->as_u32, 0xffffff00, shifts[1],
182 shift_ip4_if(ip4_mask->as_u32, 0xffff0000, shifts[2],
183 shift_ip4_if(ip4_mask->as_u32, 0xff000000, shifts[3], ip4_mask->as_u32)));
187 relax_ip6_addr(ip6_address_t *ip6_mask, int relax2) {
189 * This "better than nothing" relax logic is based on heuristics
190 * from IPv6 knowledge, and may not be optimal.
191 * Some further tuning may be needed in the future.
193 if (ip6_mask->as_u64[0] == 0xffffffffffffffffULL) {
194 if (ip6_mask->as_u64[1] == 0xffffffffffffffffULL) {
195 /* relax a /128 down to /64 - likely to have more hosts */
196 ip6_mask->as_u64[1] = 0;
197 } else if (ip6_mask->as_u64[1] == 0) {
198 /* relax a /64 down to /56 - likely to have more subnets */
199 ip6_mask->as_u64[0] = clib_host_to_net_u64(0xffffffffffffff00ULL);
205 relax_tuple(fa_5tuple_t *mask, int is_ip6, int relax2){
206 fa_5tuple_t save_mask = *mask;
208 int counter_s = 0, counter_d = 0;
212 counter_s += count_bits(mask->ip6_addr[0].as_u64[i]);
213 counter_d += count_bits(mask->ip6_addr[1].as_u64[i]);
216 counter_s += count_bits(mask->ip4_addr[0].as_u32);
217 counter_d += count_bits(mask->ip4_addr[1].as_u32);
221 * is the rule more strongly aligned with source or destination addresses
222 * (usually the two most important fields) and how much slack needs to be
223 * given to allow for other rules. If the source and destination addresses
224 * are close together (within 4 bits for our experiments), we use both of them.
225 * Otherwise, we drop the smaller (less specific) address and its associated
226 * port field from consideration
228 const int deltaThreshold = 4;
229 /* const int deltaThreshold = 8; if IPV6? */
230 int delta = counter_s - counter_d;
231 if (-delta > deltaThreshold) {
233 mask->ip6_addr[0].as_u64[1] = mask->ip6_addr[0].as_u64[0] = 0;
235 mask->ip4_addr[0].as_u32 = 0;
236 mask->l4.port[0] = 0;
237 } else if (delta > deltaThreshold) {
239 mask->ip6_addr[1].as_u64[1] = mask->ip6_addr[1].as_u64[0] = 0;
241 mask->ip4_addr[1].as_u32 = 0;
242 mask->l4.port[1] = 0;
246 relax_ip6_addr(&mask->ip6_addr[0], relax2);
247 relax_ip6_addr(&mask->ip6_addr[1], relax2);
249 relax_ip4_addr(&mask->ip4_addr[0], relax2);
250 relax_ip4_addr(&mask->ip4_addr[1], relax2);
252 mask->pkt.is_nonfirst_fragment = 0;
253 mask->pkt.l4_valid = 0;
254 if(!first_mask_contains_second_mask(is_ip6, mask, &save_mask)){
255 DBG( "TM-relaxing-ERROR");
258 DBG( "TM-relaxing-end");
263 tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_index)
265 u32 mask_type_index = ~0;
266 u32 for_mask_type_index = ~0;
267 ace_mask_type_entry_t *mte;
268 /* look for existing mask comparable with the one in input */
270 hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
271 hash_applied_mask_info_t *minfo;
273 if (vec_len(*hash_applied_mask_info_vec) > 0) {
274 for(int order_index = vec_len((*hash_applied_mask_info_vec)) -1; order_index >= 0; order_index--) {
275 minfo = vec_elt_at_index((*hash_applied_mask_info_vec), order_index);
276 for_mask_type_index = minfo->mask_type_index;
277 mte = vec_elt_at_index(am->ace_mask_type_pool, for_mask_type_index);
278 if(first_mask_contains_second_mask(is_ip6, &mte->mask, mask)){
279 mask_type_index = (mte - am->ace_mask_type_pool);
285 if(~0 == mask_type_index) {
286 /* if no mask is found, then let's use a relaxed version of the original one, in order to be used by new ace_entries */
287 DBG( "TM-assigning mask type index-new one");
288 pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
289 mask_type_index = mte - am->ace_mask_type_pool;
291 hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
293 int spot = vec_len((*hash_applied_mask_info_vec));
294 vec_validate((*hash_applied_mask_info_vec), spot);
295 minfo = vec_elt_at_index((*hash_applied_mask_info_vec), spot);
296 minfo->mask_type_index = mask_type_index;
297 minfo->num_entries = 0;
298 minfo->max_collisions = 0;
299 minfo->first_rule_index = ~0;
301 clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
302 relax_tuple(&mte->mask, is_ip6, 0);
306 * We can use only 16 bits, since in the match there is only u16 field.
307 * Realistically, once you go to 64K of mask types, it is a huge
308 * problem anyway, so we might as well stop half way.
310 ASSERT(mask_type_index < 32768);
312 mte = am->ace_mask_type_pool + mask_type_index;
314 return mask_type_index;
319 fill_applied_hash_ace_kv(acl_main_t *am,
320 applied_hash_ace_entry_t **applied_hash_aces,
322 u32 new_index, clib_bihash_kv_48_8_t *kv)
324 fa_5tuple_t *kv_key = (fa_5tuple_t *)kv->key;
325 hash_acl_lookup_value_t *kv_val = (hash_acl_lookup_value_t *)&kv->value;
326 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
327 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
329 /* apply the mask to ace key */
330 hash_ace_info_t *ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
331 ace_mask_type_entry_t *mte = vec_elt_at_index(am->ace_mask_type_pool, pae->mask_type_index);
333 u64 *pmatch = (u64 *) &ace_info->match;
334 u64 *pmask = (u64 *)&mte->mask;
335 u64 *pkey = (u64 *)kv->key;
337 *pkey++ = *pmatch++ & *pmask++;
338 *pkey++ = *pmatch++ & *pmask++;
339 *pkey++ = *pmatch++ & *pmask++;
340 *pkey++ = *pmatch++ & *pmask++;
341 *pkey++ = *pmatch++ & *pmask++;
342 *pkey++ = *pmatch++ & *pmask++;
344 kv_key->pkt.mask_type_index_lsb = pae->mask_type_index;
345 kv_key->pkt.lc_index = lc_index;
347 kv_val->applied_entry_index = new_index;
351 add_del_hashtable_entry(acl_main_t *am,
353 applied_hash_ace_entry_t **applied_hash_aces,
354 u32 index, int is_add)
356 clib_bihash_kv_48_8_t kv;
358 fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, index, &kv);
359 hashtable_add_del(am, &kv, is_add);
364 find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
366 ace_mask_type_entry_t *mte;
368 pool_foreach(mte, am->ace_mask_type_pool,
370 if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
371 return (mte - am->ace_mask_type_pool);
378 assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
380 u32 mask_type_index = find_mask_type_index(am, mask);
381 ace_mask_type_entry_t *mte;
382 if(~0 == mask_type_index) {
383 pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
384 mask_type_index = mte - am->ace_mask_type_pool;
385 clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
388 * We can use only 16 bits, since in the match there is only u16 field.
389 * Realistically, once you go to 64K of mask types, it is a huge
390 * problem anyway, so we might as well stop half way.
392 ASSERT(mask_type_index < 32768);
394 mte = am->ace_mask_type_pool + mask_type_index;
396 return mask_type_index;
400 release_mask_type_index(acl_main_t *am, u32 mask_type_index)
402 ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
404 if (mte->refcount == 0) {
405 /* we are not using this entry anymore */
406 pool_put(am->ace_mask_type_pool, mte);
411 remake_hash_applied_mask_info_vec (acl_main_t * am,
412 applied_hash_ace_entry_t **
413 applied_hash_aces, u32 lc_index)
415 hash_applied_mask_info_t *new_hash_applied_mask_info_vec =
416 vec_new (hash_applied_mask_info_t, 0);
418 hash_applied_mask_info_t *minfo;
420 for (i = 0; i < vec_len ((*applied_hash_aces)); i++)
422 applied_hash_ace_entry_t *pae =
423 vec_elt_at_index ((*applied_hash_aces), i);
425 /* check if mask_type_index is already there */
426 u32 new_pointer = vec_len (new_hash_applied_mask_info_vec);
428 for (search = 0; search < vec_len (new_hash_applied_mask_info_vec);
431 minfo = vec_elt_at_index (new_hash_applied_mask_info_vec, search);
432 if (minfo->mask_type_index == pae->mask_type_index)
436 vec_validate ((new_hash_applied_mask_info_vec), search);
437 minfo = vec_elt_at_index ((new_hash_applied_mask_info_vec), search);
438 if (search == new_pointer)
440 minfo->mask_type_index = pae->mask_type_index;
441 minfo->num_entries = 0;
442 minfo->max_collisions = 0;
443 minfo->first_rule_index = ~0;
446 minfo->num_entries = minfo->num_entries + 1;
448 if (vec_len (pae->colliding_rules) > minfo->max_collisions)
449 minfo->max_collisions = vec_len (pae->colliding_rules);
451 if (minfo->first_rule_index > i)
452 minfo->first_rule_index = i;
455 hash_applied_mask_info_t **hash_applied_mask_info_vec =
456 vec_elt_at_index (am->hash_applied_mask_info_vec_by_lc_index, lc_index);
458 vec_free ((*hash_applied_mask_info_vec));
459 (*hash_applied_mask_info_vec) = new_hash_applied_mask_info_vec;
463 vec_del_collision_rule (collision_match_rule_t ** pvec,
464 u32 applied_entry_index)
467 for (i = 0; i < vec_len ((*pvec)); i++)
469 collision_match_rule_t *cr = vec_elt_at_index ((*pvec), i);
470 if (cr->applied_entry_index == applied_entry_index)
472 vec_del1 ((*pvec), i);
478 del_colliding_rule (applied_hash_ace_entry_t ** applied_hash_aces,
479 u32 head_index, u32 applied_entry_index)
481 applied_hash_ace_entry_t *head_pae =
482 vec_elt_at_index ((*applied_hash_aces), head_index);
483 vec_del_collision_rule (&head_pae->colliding_rules, applied_entry_index);
487 add_colliding_rule (acl_main_t * am,
488 applied_hash_ace_entry_t ** applied_hash_aces,
489 u32 head_index, u32 applied_entry_index)
491 applied_hash_ace_entry_t *head_pae =
492 vec_elt_at_index ((*applied_hash_aces), head_index);
493 applied_hash_ace_entry_t *pae =
494 vec_elt_at_index ((*applied_hash_aces), applied_entry_index);
496 collision_match_rule_t cr;
498 cr.acl_index = pae->acl_index;
499 cr.ace_index = pae->ace_index;
500 cr.acl_position = pae->acl_position;
501 cr.applied_entry_index = applied_entry_index;
502 cr.rule = am->acls[pae->acl_index].rules[pae->ace_index];
503 vec_add1 (head_pae->colliding_rules, cr);
507 activate_applied_ace_hash_entry(acl_main_t *am,
509 applied_hash_ace_entry_t **applied_hash_aces,
512 clib_bihash_kv_48_8_t kv;
513 ASSERT(new_index != ~0);
514 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
515 DBG("activate_applied_ace_hash_entry lc_index %d new_index %d", lc_index, new_index);
517 fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, new_index, &kv);
519 DBG("APPLY ADD KY: %016llx %016llx %016llx %016llx %016llx %016llx",
520 kv.key[0], kv.key[1], kv.key[2],
521 kv.key[3], kv.key[4], kv.key[5]);
523 clib_bihash_kv_48_8_t result;
524 hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
525 int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result);
526 ASSERT(new_index != ~0);
527 ASSERT(new_index < vec_len((*applied_hash_aces)));
529 /* There already exists an entry or more. Append at the end. */
530 u32 first_index = result_val->applied_entry_index;
531 ASSERT(first_index != ~0);
532 DBG("A key already exists, with applied entry index: %d", first_index);
533 applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index);
534 u32 last_index = first_pae->tail_applied_entry_index;
535 ASSERT(last_index != ~0);
536 applied_hash_ace_entry_t *last_pae = vec_elt_at_index((*applied_hash_aces), last_index);
537 DBG("...advance to chained entry index: %d", last_index);
538 /* link ourseves in */
539 last_pae->next_applied_entry_index = new_index;
540 pae->prev_applied_entry_index = last_index;
541 /* adjust the pointer to the new tail */
542 first_pae->tail_applied_entry_index = new_index;
543 add_colliding_rule(am, applied_hash_aces, first_index, new_index);
546 /* It's the very first entry */
547 hashtable_add_del(am, &kv, 1);
548 ASSERT(new_index != ~0);
549 pae->tail_applied_entry_index = new_index;
550 add_colliding_rule(am, applied_hash_aces, new_index, new_index);
557 hash_acl_set_heap(acl_main_t *am)
559 if (0 == am->hash_lookup_mheap) {
560 am->hash_lookup_mheap = mheap_alloc (0 /* use VM */ , am->hash_lookup_mheap_size);
561 if (0 == am->hash_lookup_mheap) {
562 clib_error("ACL plugin failed to allocate hash lookup heap of %U bytes, abort", format_memory_size, am->hash_lookup_mheap_size);
564 mheap_t *h = mheap_header (am->hash_lookup_mheap);
565 h->flags |= MHEAP_FLAG_THREAD_SAFE;
567 void *oldheap = clib_mem_set_heap(am->hash_lookup_mheap);
572 acl_plugin_hash_acl_set_validate_heap(int on)
574 acl_main_t *am = &acl_main;
575 clib_mem_set_heap(hash_acl_set_heap(am));
576 mheap_t *h = mheap_header (am->hash_lookup_mheap);
578 h->flags |= MHEAP_FLAG_VALIDATE;
579 h->flags &= ~MHEAP_FLAG_SMALL_OBJECT_CACHE;
582 h->flags &= ~MHEAP_FLAG_VALIDATE;
583 h->flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE;
588 acl_plugin_hash_acl_set_trace_heap(int on)
590 acl_main_t *am = &acl_main;
591 clib_mem_set_heap(hash_acl_set_heap(am));
592 mheap_t *h = mheap_header (am->hash_lookup_mheap);
594 h->flags |= MHEAP_FLAG_TRACE;
596 h->flags &= ~MHEAP_FLAG_TRACE;
601 assign_mask_type_index_to_pae(acl_main_t *am, u32 lc_index, int is_ip6, applied_hash_ace_entry_t *pae)
603 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
604 hash_ace_info_t *ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
606 ace_mask_type_entry_t *mte;
609 * Start taking base_mask associated to ace, and essentially copy it.
610 * With TupleMerge we will assign a relaxed mask here.
612 mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
614 if (am->use_tuple_merge)
615 pae->mask_type_index = tm_assign_mask_type_index(am, mask, is_ip6, lc_index);
617 pae->mask_type_index = assign_mask_type_index(am, mask);
621 split_partition(acl_main_t *am, u32 first_index,
622 u32 lc_index, int is_ip6);
626 check_collision_count_and_maybe_split(acl_main_t *am, u32 lc_index, int is_ip6, u32 first_index)
628 applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
629 applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index);
630 if (vec_len(first_pae->colliding_rules) > am->tuple_merge_split_threshold) {
631 split_partition(am, first_index, lc_index, is_ip6);
636 hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
640 DBG0("HASH ACL apply: lc_index %d acl %d", lc_index, acl_index);
641 if (!am->acl_lookup_hash_initialized) {
642 BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash",
643 am->hash_lookup_hash_buckets, am->hash_lookup_hash_memory);
644 am->acl_lookup_hash_initialized = 1;
647 void *oldheap = hash_acl_set_heap(am);
648 vec_validate(am->hash_entry_vec_by_lc_index, lc_index);
649 vec_validate(am->hash_acl_infos, acl_index);
650 applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
652 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
653 u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
655 int base_offset = vec_len(*applied_hash_aces);
657 /* Update the bitmap of the mask types with which the lookup
658 needs to happen for the ACLs applied to this lc_index */
659 applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
660 vec_validate((*applied_hash_acls), lc_index);
661 applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
663 /* ensure the list of applied hash acls is initialized and add this acl# to it */
664 u32 index = vec_search(pal->applied_acls, acl_index);
666 clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to lc",
667 acl_index, lc_index);
670 vec_add1(pal->applied_acls, acl_index);
671 u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
673 clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to hash h-acl info",
674 acl_index, lc_index);
677 vec_add1((*hash_acl_applied_lc_index), lc_index);
680 * if the applied ACL is empty, the current code will cause a
681 * different behavior compared to current linear search: an empty ACL will
682 * simply fallthrough to the next ACL, or the default deny in the end.
684 * This is not a problem, because after vpp-dev discussion,
685 * the consensus was it should not be possible to apply the non-existent
686 * ACL, so the change adding this code also takes care of that.
689 /* expand the applied aces vector by the necessary amount */
690 vec_resize((*applied_hash_aces), vec_len(ha->rules));
692 vec_validate(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
693 /* add the rules from the ACL to the hash table for lookup and append to the vector*/
694 for(i=0; i < vec_len(ha->rules); i++) {
695 int is_ip6 = ha->rules[i].match.pkt.is_ip6;
696 u32 new_index = base_offset + i;
697 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
698 pae->acl_index = acl_index;
699 pae->ace_index = ha->rules[i].ace_index;
700 pae->acl_position = acl_position;
701 pae->action = ha->rules[i].action;
703 pae->hash_ace_info_index = i;
704 /* we might link it in later */
705 pae->next_applied_entry_index = ~0;
706 pae->prev_applied_entry_index = ~0;
707 pae->tail_applied_entry_index = ~0;
708 pae->colliding_rules = NULL;
709 pae->mask_type_index = ~0;
710 assign_mask_type_index_to_pae(am, lc_index, is_ip6, pae);
711 u32 first_index = activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, new_index);
712 if (am->use_tuple_merge)
713 check_collision_count_and_maybe_split(am, lc_index, is_ip6, first_index);
715 remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
717 clib_mem_set_heap (oldheap);
721 find_head_applied_ace_index(applied_hash_ace_entry_t **applied_hash_aces, u32 curr_index)
724 * find back the first entry. Inefficient so might need to be a bit cleverer
725 * if this proves to be a problem..
727 u32 an_index = curr_index;
728 ASSERT(an_index != ~0);
729 applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), an_index);
730 while(head_pae->prev_applied_entry_index != ~0) {
731 an_index = head_pae->prev_applied_entry_index;
732 ASSERT(an_index != ~0);
733 head_pae = vec_elt_at_index((*applied_hash_aces), an_index);
739 move_applied_ace_hash_entry(acl_main_t *am,
741 applied_hash_ace_entry_t **applied_hash_aces,
742 u32 old_index, u32 new_index)
744 ASSERT(old_index != ~0);
745 ASSERT(new_index != ~0);
747 *vec_elt_at_index((*applied_hash_aces), new_index) = *vec_elt_at_index((*applied_hash_aces), old_index);
749 /* update the linkage and hash table if necessary */
750 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
752 if (pae->prev_applied_entry_index != ~0) {
753 applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
754 ASSERT(prev_pae->next_applied_entry_index == old_index);
755 prev_pae->next_applied_entry_index = new_index;
757 /* first entry - so the hash points to it, update */
758 add_del_hashtable_entry(am, lc_index,
759 applied_hash_aces, new_index, 1);
760 ASSERT(pae->tail_applied_entry_index != ~0);
762 if (pae->next_applied_entry_index != ~0) {
763 applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
764 ASSERT(next_pae->prev_applied_entry_index == old_index);
765 next_pae->prev_applied_entry_index = new_index;
768 * Moving the very last entry, so we need to update the tail pointer in the first one.
770 u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
771 ASSERT(head_index != ~0);
772 applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
774 ASSERT(head_pae->tail_applied_entry_index == old_index);
775 head_pae->tail_applied_entry_index = new_index;
777 /* invalidate the old entry */
778 pae->prev_applied_entry_index = ~0;
779 pae->next_applied_entry_index = ~0;
780 pae->tail_applied_entry_index = ~0;
784 deactivate_applied_ace_hash_entry(acl_main_t *am,
786 applied_hash_ace_entry_t **applied_hash_aces,
789 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
790 DBG("UNAPPLY DEACTIVATE: lc_index %d applied index %d", lc_index, old_index);
792 if (pae->prev_applied_entry_index != ~0) {
793 DBG("UNAPPLY = index %d has prev_applied_entry_index %d", old_index, pae->prev_applied_entry_index);
794 applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
795 ASSERT(prev_pae->next_applied_entry_index == old_index);
796 prev_pae->next_applied_entry_index = pae->next_applied_entry_index;
798 u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
799 ASSERT(head_index != ~0);
800 applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
801 del_colliding_rule(applied_hash_aces, head_index, old_index);
803 if (pae->next_applied_entry_index == ~0) {
804 /* it was a last entry we removed, update the pointer on the first one */
805 ASSERT(head_pae->tail_applied_entry_index == old_index);
806 head_pae->tail_applied_entry_index = pae->prev_applied_entry_index;
808 applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
809 next_pae->prev_applied_entry_index = pae->prev_applied_entry_index;
812 /* It was the first entry. We need either to reset the hash entry or delete it */
813 if (pae->next_applied_entry_index != ~0) {
814 /* the next element becomes the new first one, so needs the tail pointer to be set */
815 applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
816 ASSERT(pae->tail_applied_entry_index != ~0);
817 next_pae->tail_applied_entry_index = pae->tail_applied_entry_index;
818 /* Remove ourselves and transfer the ownership of the colliding rules vector */
819 del_colliding_rule(applied_hash_aces, old_index, old_index);
820 next_pae->colliding_rules = pae->colliding_rules;
821 /* unlink from the next element */
822 next_pae->prev_applied_entry_index = ~0;
823 add_del_hashtable_entry(am, lc_index,
824 applied_hash_aces, pae->next_applied_entry_index, 1);
826 /* no next entry, so just delete the entry in the hash table */
827 add_del_hashtable_entry(am, lc_index,
828 applied_hash_aces, old_index, 0);
832 release_mask_type_index(am, pae->mask_type_index);
833 /* invalidate the old entry */
834 pae->mask_type_index = ~0;
835 pae->prev_applied_entry_index = ~0;
836 pae->next_applied_entry_index = ~0;
837 pae->tail_applied_entry_index = ~0;
838 /* always has to be 0 */
839 pae->colliding_rules = NULL;
844 hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
848 DBG0("HASH ACL unapply: lc_index %d acl %d", lc_index, acl_index);
849 applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
850 applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
852 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
853 u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
855 /* remove this acl# from the list of applied hash acls */
856 u32 index = vec_search(pal->applied_acls, acl_index);
858 clib_warning("BUG: trying to unapply unapplied acl_index %d on lc_index %d, according to lc",
859 acl_index, lc_index);
862 vec_del1(pal->applied_acls, index);
864 u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
866 clib_warning("BUG: trying to unapply twice acl_index %d on lc_index %d, according to h-acl info",
867 acl_index, lc_index);
870 vec_del1((*hash_acl_applied_lc_index), index2);
872 applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
874 for(i=0; i < vec_len((*applied_hash_aces)); i++) {
875 if (vec_elt_at_index(*applied_hash_aces,i)->acl_index == acl_index) {
876 DBG("Found applied ACL#%d at applied index %d", acl_index, i);
880 if (vec_len((*applied_hash_aces)) <= i) {
881 DBG("Did not find applied ACL#%d at lc_index %d", acl_index, lc_index);
882 /* we went all the way without finding any entries. Probably a list was empty. */
886 void *oldheap = hash_acl_set_heap(am);
888 int tail_offset = base_offset + vec_len(ha->rules);
889 int tail_len = vec_len((*applied_hash_aces)) - tail_offset;
890 DBG("base_offset: %d, tail_offset: %d, tail_len: %d", base_offset, tail_offset, tail_len);
892 for(i=0; i < vec_len(ha->rules); i ++) {
893 deactivate_applied_ace_hash_entry(am, lc_index,
894 applied_hash_aces, base_offset + i);
896 for(i=0; i < tail_len; i ++) {
897 /* move the entry at tail offset to base offset */
898 /* that is, from (tail_offset+i) -> (base_offset+i) */
899 DBG("UNAPPLY MOVE: lc_index %d, applied index %d -> %d", lc_index, tail_offset+i, base_offset + i);
900 move_applied_ace_hash_entry(am, lc_index, applied_hash_aces, tail_offset + i, base_offset + i);
902 /* trim the end of the vector */
903 _vec_len((*applied_hash_aces)) -= vec_len(ha->rules);
905 remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
907 clib_mem_set_heap (oldheap);
911 * Create the applied ACEs and update the hash table,
912 * taking into account that the ACL may not be the last
913 * in the vector of applied ACLs.
915 * For now, walk from the end of the vector and unapply the ACLs,
916 * then apply the one in question and reapply the rest.
920 hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
922 acl_lookup_context_t *acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index);
923 u32 **applied_acls = &acontext->acl_indices;
925 int start_index = vec_search((*applied_acls), acl_index);
927 DBG0("Start index for acl %d in lc_index %d is %d", acl_index, lc_index, start_index);
929 * This function is called after we find out the lc_index where ACL is applied.
930 * If the by-lc_index vector does not have the ACL#, then it's a bug.
932 ASSERT(start_index < vec_len(*applied_acls));
934 /* unapply all the ACLs at the tail side, up to the current one */
935 for(i = vec_len(*applied_acls) - 1; i > start_index; i--) {
936 hash_acl_unapply(am, lc_index, *vec_elt_at_index(*applied_acls, i));
938 for(i = start_index; i < vec_len(*applied_acls); i++) {
939 hash_acl_apply(am, lc_index, *vec_elt_at_index(*applied_acls, i), i);
944 make_ip6_address_mask(ip6_address_t *addr, u8 prefix_len)
946 ip6_address_mask_from_width(addr, prefix_len);
950 /* Maybe should be moved into the core somewhere */
952 ip4_address_mask_from_width (ip4_address_t * a, u32 width)
954 int i, byte, bit, bitnum;
955 ASSERT (width <= 32);
956 memset (a, 0, sizeof (a[0]));
957 for (i = 0; i < width; i++)
959 bitnum = (7 - (i & 7));
962 a->as_u8[byte] |= bit;
968 make_ip4_address_mask(ip4_address_t *addr, u8 prefix_len)
970 ip4_address_mask_from_width(addr, prefix_len);
974 make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
976 if (port_first == port_last) {
978 /* single port is representable by masked value */
987 make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t *hi)
989 memset(mask, 0, sizeof(*mask));
990 memset(&hi->match, 0, sizeof(hi->match));
991 hi->action = r->is_permit;
993 /* we will need to be matching based on lc_index and mask_type_index when applied */
994 mask->pkt.lc_index = ~0;
995 /* we will assign the match of mask_type_index later when we find it*/
996 mask->pkt.mask_type_index_lsb = ~0;
998 mask->pkt.is_ip6 = 1;
999 hi->match.pkt.is_ip6 = r->is_ipv6;
1001 make_ip6_address_mask(&mask->ip6_addr[0], r->src_prefixlen);
1002 hi->match.ip6_addr[0] = r->src.ip6;
1003 make_ip6_address_mask(&mask->ip6_addr[1], r->dst_prefixlen);
1004 hi->match.ip6_addr[1] = r->dst.ip6;
1006 memset(hi->match.l3_zero_pad, 0, sizeof(hi->match.l3_zero_pad));
1007 make_ip4_address_mask(&mask->ip4_addr[0], r->src_prefixlen);
1008 hi->match.ip4_addr[0] = r->src.ip4;
1009 make_ip4_address_mask(&mask->ip4_addr[1], r->dst_prefixlen);
1010 hi->match.ip4_addr[1] = r->dst.ip4;
1013 if (r->proto != 0) {
1014 mask->l4.proto = ~0; /* L4 proto needs to be matched */
1015 hi->match.l4.proto = r->proto;
1017 /* Calculate the src/dst port masks and make the src/dst port matches accordingly */
1018 make_port_mask(&mask->l4.port[0], r->src_port_or_type_first, r->src_port_or_type_last);
1019 hi->match.l4.port[0] = r->src_port_or_type_first & mask->l4.port[0];
1021 make_port_mask(&mask->l4.port[1], r->dst_port_or_code_first, r->dst_port_or_code_last);
1022 hi->match.l4.port[1] = r->dst_port_or_code_first & mask->l4.port[1];
1023 /* L4 info must be valid in order to match */
1024 mask->pkt.l4_valid = 1;
1025 hi->match.pkt.l4_valid = 1;
1026 /* And we must set the mask to check that it is an initial fragment */
1027 mask->pkt.is_nonfirst_fragment = 1;
1028 hi->match.pkt.is_nonfirst_fragment = 0;
1029 if ((r->proto == IPPROTO_TCP) && (r->tcp_flags_mask != 0)) {
1030 /* if we want to match on TCP flags, they must be masked off as well */
1031 mask->pkt.tcp_flags = r->tcp_flags_mask;
1032 hi->match.pkt.tcp_flags = r->tcp_flags_value;
1033 /* and the flags need to be present within the packet being matched */
1034 mask->pkt.tcp_flags_valid = 1;
1035 hi->match.pkt.tcp_flags_valid = 1;
1038 /* Sanitize the mask and the match */
1039 u64 *pmask = (u64 *)mask;
1040 u64 *pmatch = (u64 *)&hi->match;
1042 for(j=0; j<6; j++) {
1043 pmatch[j] = pmatch[j] & pmask[j];
1048 int hash_acl_exists(acl_main_t *am, int acl_index)
1050 if (acl_index >= vec_len(am->hash_acl_infos))
1053 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1054 return ha->hash_acl_exists;
1057 void hash_acl_add(acl_main_t *am, int acl_index)
1059 void *oldheap = hash_acl_set_heap(am);
1060 DBG("HASH ACL add : %d", acl_index);
1062 acl_list_t *a = &am->acls[acl_index];
1063 vec_validate(am->hash_acl_infos, acl_index);
1064 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1065 memset(ha, 0, sizeof(*ha));
1066 ha->hash_acl_exists = 1;
1068 /* walk the newly added ACL entries and ensure that for each of them there
1069 is a mask type, increment a reference count for that mask type */
1070 for(i=0; i < a->count; i++) {
1071 hash_ace_info_t ace_info;
1073 memset(&ace_info, 0, sizeof(ace_info));
1074 ace_info.acl_index = acl_index;
1075 ace_info.ace_index = i;
1077 make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info);
1078 mask.pkt.flags_reserved = 0b000;
1079 ace_info.base_mask_type_index = assign_mask_type_index(am, &mask);
1080 /* assign the mask type index for matching itself */
1081 ace_info.match.pkt.mask_type_index_lsb = ace_info.base_mask_type_index;
1082 DBG("ACE: %d mask_type_index: %d", i, ace_info.base_mask_type_index);
1083 vec_add1(ha->rules, ace_info);
1086 * if an ACL is applied somewhere, fill the corresponding lookup data structures.
1087 * We need to take care if the ACL is not the last one in the vector of ACLs applied to the interface.
1089 if (acl_index < vec_len(am->lc_index_vec_by_acl)) {
1091 vec_foreach(lc_index, am->lc_index_vec_by_acl[acl_index]) {
1092 hash_acl_reapply(am, *lc_index, acl_index);
1095 clib_mem_set_heap (oldheap);
1098 void hash_acl_delete(acl_main_t *am, int acl_index)
1100 void *oldheap = hash_acl_set_heap(am);
1101 DBG0("HASH ACL delete : %d", acl_index);
1103 * If the ACL is applied somewhere, remove the references of it (call hash_acl_unapply)
1104 * this is a different behavior from the linear lookup where an empty ACL is "deny all",
1106 * However, following vpp-dev discussion the ACL that is referenced elsewhere
1107 * should not be possible to delete, and the change adding this also adds
1108 * the safeguards to that respect, so this is not a problem.
1110 * The part to rememeber is that this routine is called in process of reapplication
1111 * during the acl_add_replace() API call - the old acl ruleset is deleted, then
1112 * the new one is added, without the change in the applied ACLs - so this case
1113 * has to be handled.
1115 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1116 u32 *lc_list_copy = 0;
1119 lc_list_copy = vec_dup(ha->lc_index_list);
1120 vec_foreach(lc_index, lc_list_copy) {
1121 hash_acl_unapply(am, *lc_index, acl_index);
1123 vec_free(lc_list_copy);
1126 /* walk the mask types for the ACL about-to-be-deleted, and decrease
1127 * the reference count, possibly freeing up some of them */
1129 for(i=0; i < vec_len(ha->rules); i++) {
1130 release_mask_type_index(am, ha->rules[i].base_mask_type_index);
1132 ha->hash_acl_exists = 0;
1133 vec_free(ha->rules);
1134 clib_mem_set_heap (oldheap);
1139 show_hash_acl_hash (vlib_main_t * vm, acl_main_t *am, u32 verbose)
1141 vlib_cli_output(vm, "\nACL lookup hash table:\n%U\n",
1142 BV (format_bihash), &am->acl_lookup_hash, verbose);
1146 acl_plugin_show_tables_mask_type (void)
1148 acl_main_t *am = &acl_main;
1149 vlib_main_t *vm = am->vlib_main;
1150 ace_mask_type_entry_t *mte;
1152 vlib_cli_output (vm, "Mask-type entries:");
1154 pool_foreach(mte, am->ace_mask_type_pool,
1156 vlib_cli_output(vm, " %3d: %016llx %016llx %016llx %016llx %016llx %016llx refcount %d",
1157 mte - am->ace_mask_type_pool,
1158 mte->mask.kv_40_8.key[0], mte->mask.kv_40_8.key[1], mte->mask.kv_40_8.key[2],
1159 mte->mask.kv_40_8.key[3], mte->mask.kv_40_8.key[4], mte->mask.kv_40_8.value, mte->refcount);
1165 acl_plugin_show_tables_acl_hash_info (u32 acl_index)
1167 acl_main_t *am = &acl_main;
1168 vlib_main_t *vm = am->vlib_main;
1171 vlib_cli_output (vm, "Mask-ready ACL representations\n");
1172 for (i = 0; i < vec_len (am->hash_acl_infos); i++)
1174 if ((acl_index != ~0) && (acl_index != i))
1178 hash_acl_info_t *ha = &am->hash_acl_infos[i];
1179 vlib_cli_output (vm, "acl-index %u bitmask-ready layout\n", i);
1180 vlib_cli_output (vm, " applied lc_index list: %U\n",
1181 format_vec32, ha->lc_index_list, "%d");
1182 for (j = 0; j < vec_len (ha->rules); j++)
1184 hash_ace_info_t *pa = &ha->rules[j];
1185 m = (u64 *) & pa->match;
1186 vlib_cli_output (vm,
1187 " %4d: %016llx %016llx %016llx %016llx %016llx %016llx base mask index %d acl %d rule %d action %d\n",
1188 j, m[0], m[1], m[2], m[3], m[4], m[5],
1189 pa->base_mask_type_index, pa->acl_index, pa->ace_index,
1196 acl_plugin_print_colliding_rule (vlib_main_t * vm, int j, collision_match_rule_t *cr) {
1198 " %4d: acl %d ace %d acl pos %d pae index: %d",
1199 j, cr->acl_index, cr->ace_index, cr->acl_position, cr->applied_entry_index);
1203 acl_plugin_print_pae (vlib_main_t * vm, int j, applied_hash_ace_entry_t * pae)
1205 vlib_cli_output (vm,
1206 " %4d: acl %d rule %d action %d bitmask-ready rule %d colliding_rules: %d next %d prev %d tail %d hitcount %lld acl_pos: %d",
1207 j, pae->acl_index, pae->ace_index, pae->action,
1208 pae->hash_ace_info_index, vec_len(pae->colliding_rules), pae->next_applied_entry_index,
1209 pae->prev_applied_entry_index,
1210 pae->tail_applied_entry_index, pae->hitcount, pae->acl_position);
1212 for(jj=0; jj<vec_len(pae->colliding_rules); jj++)
1213 acl_plugin_print_colliding_rule(vm, jj, vec_elt_at_index(pae->colliding_rules, jj));
1217 acl_plugin_print_applied_mask_info (vlib_main_t * vm, int j, hash_applied_mask_info_t *mi)
1219 vlib_cli_output (vm,
1220 " %4d: mask type index %d first rule index %d num_entries %d max_collisions %d",
1221 j, mi->mask_type_index, mi->first_rule_index, mi->num_entries, mi->max_collisions);
1225 acl_plugin_show_tables_applied_info (u32 lc_index)
1227 acl_main_t *am = &acl_main;
1228 vlib_main_t *vm = am->vlib_main;
1230 vlib_cli_output (vm, "Applied lookup entries for lookup contexts");
1233 (lci < vec_len(am->applied_hash_acl_info_by_lc_index)); lci++)
1235 if ((lc_index != ~0) && (lc_index != lci))
1239 vlib_cli_output (vm, "lc_index %d:", lci);
1240 if (lci < vec_len (am->applied_hash_acl_info_by_lc_index))
1242 applied_hash_acl_info_t *pal =
1243 &am->applied_hash_acl_info_by_lc_index[lci];
1244 vlib_cli_output (vm, " applied acls: %U", format_vec32,
1245 pal->applied_acls, "%d");
1247 if (lci < vec_len (am->hash_applied_mask_info_vec_by_lc_index))
1249 vlib_cli_output (vm, " applied mask info entries:");
1251 j < vec_len (am->hash_applied_mask_info_vec_by_lc_index[lci]);
1254 acl_plugin_print_applied_mask_info (vm, j,
1255 &am->hash_applied_mask_info_vec_by_lc_index
1259 if (lci < vec_len (am->hash_entry_vec_by_lc_index))
1261 vlib_cli_output (vm, " lookup applied entries:");
1263 j < vec_len (am->hash_entry_vec_by_lc_index[lci]);
1266 acl_plugin_print_pae (vm, j,
1267 &am->hash_entry_vec_by_lc_index
1275 acl_plugin_show_tables_bihash (u32 show_bihash_verbose)
1277 acl_main_t *am = &acl_main;
1278 vlib_main_t *vm = am->vlib_main;
1279 show_hash_acl_hash (vm, am, show_bihash_verbose);
1283 * Split of the partition needs to happen when the collision count
1284 * goes over a specified threshold.
1286 * This is a signal that we ignored too many bits in
1287 * mT and we need to split the table into two tables. We select
1288 * all of the colliding rules L and find their maximum common
1289 * tuple mL. Normally mL is specific enough to hash L with few
1290 * or no collisions. We then create a new table T2 with tuple mL
1291 * and transfer all compatible rules from T to T2. If mL is not
1292 * specific enough, we find the field with the biggest difference
1293 * between the minimum and maximum tuple lengths for all of
1294 * the rules in L and set that field to be the average of those two
1295 * values. We then transfer all compatible rules as before. This
1296 * guarantees that some rules from L will move and that T2 will
1297 * have a smaller number of collisions than T did.
1302 ensure_ip6_min_addr (ip6_address_t * min_addr, ip6_address_t * mask_addr)
1305 (clib_net_to_host_u64 (mask_addr->as_u64[0]) <
1306 clib_net_to_host_u64 (min_addr->as_u64[0]))
1308 ((clib_net_to_host_u64 (mask_addr->as_u64[0]) ==
1309 clib_net_to_host_u64 (min_addr->as_u64[0]))
1310 && (clib_net_to_host_u64 (mask_addr->as_u64[1]) <
1311 clib_net_to_host_u64 (min_addr->as_u64[1])));
1314 min_addr->as_u64[0] = mask_addr->as_u64[0];
1315 min_addr->as_u64[1] = mask_addr->as_u64[1];
1320 ensure_ip6_max_addr (ip6_address_t * max_addr, ip6_address_t * mask_addr)
1323 (clib_net_to_host_u64 (mask_addr->as_u64[0]) >
1324 clib_net_to_host_u64 (max_addr->as_u64[0]))
1326 ((clib_net_to_host_u64 (mask_addr->as_u64[0]) ==
1327 clib_net_to_host_u64 (max_addr->as_u64[0]))
1328 && (clib_net_to_host_u64 (mask_addr->as_u64[1]) >
1329 clib_net_to_host_u64 (max_addr->as_u64[1])));
1332 max_addr->as_u64[0] = mask_addr->as_u64[0];
1333 max_addr->as_u64[1] = mask_addr->as_u64[1];
1338 ensure_ip4_min_addr (ip4_address_t * min_addr, ip4_address_t * mask_addr)
1341 (clib_net_to_host_u32 (mask_addr->as_u32) <
1342 clib_net_to_host_u32 (min_addr->as_u32));
1344 min_addr->as_u32 = mask_addr->as_u32;
1348 ensure_ip4_max_addr (ip4_address_t * max_addr, ip4_address_t * mask_addr)
1351 (clib_net_to_host_u32 (mask_addr->as_u32) >
1352 clib_net_to_host_u32 (max_addr->as_u32));
1354 max_addr->as_u32 = mask_addr->as_u32;
1368 split_partition(acl_main_t *am, u32 first_index,
1369 u32 lc_index, int is_ip6){
1370 DBG( "TM-split_partition - first_entry:%d", first_index);
1371 applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
1372 ace_mask_type_entry_t *mte;
1373 fa_5tuple_t the_min_tuple, *min_tuple = &the_min_tuple;
1374 fa_5tuple_t the_max_tuple, *max_tuple = &the_max_tuple;
1375 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), first_index);
1376 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
1377 hash_ace_info_t *ace_info;
1378 u32 coll_mask_type_index = pae->mask_type_index;
1379 memset(&the_min_tuple, 0, sizeof(the_min_tuple));
1380 memset(&the_max_tuple, 0, sizeof(the_max_tuple));
1383 u64 collisions = vec_len(pae->colliding_rules);
1384 // while(pae->next_applied_entry_index == ~0){
1385 for(i=0; i<collisions; i++){
1387 DBG( "TM-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
1388 pae->ace_index, pae->mask_type_index, coll_mask_type_index);
1390 ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
1391 mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
1392 fa_5tuple_t *mask = &mte->mask;
1394 if(pae->mask_type_index != coll_mask_type_index) continue;
1395 /* Computing min_mask and max_mask for colliding rules */
1397 clib_memcpy(min_tuple, mask, sizeof(fa_5tuple_t));
1398 clib_memcpy(max_tuple, mask, sizeof(fa_5tuple_t));
1403 ensure_ip6_min_addr(&min_tuple->ip6_addr[j], &mask->ip6_addr[j]);
1405 ensure_ip4_min_addr(&min_tuple->ip4_addr[j], &mask->ip4_addr[j]);
1407 if ((mask->l4.port[j] < min_tuple->l4.port[j]))
1408 min_tuple->l4.port[j] = mask->l4.port[j];
1411 if ((mask->l4.proto < min_tuple->l4.proto))
1412 min_tuple->l4.proto = mask->l4.proto;
1414 if(mask->pkt.as_u64 < min_tuple->pkt.as_u64)
1415 min_tuple->pkt.as_u64 = mask->pkt.as_u64;
1420 ensure_ip6_max_addr(&max_tuple->ip6_addr[j], &mask->ip6_addr[j]);
1422 ensure_ip4_max_addr(&max_tuple->ip4_addr[j], &mask->ip4_addr[j]);
1424 if ((mask->l4.port[j] > max_tuple->l4.port[j]))
1425 max_tuple->l4.port[j] = mask->l4.port[j];
1428 if ((mask->l4.proto < max_tuple->l4.proto))
1429 max_tuple->l4.proto = mask->l4.proto;
1431 if(mask->pkt.as_u64 > max_tuple->pkt.as_u64)
1432 max_tuple->pkt.as_u64 = mask->pkt.as_u64;
1435 pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
1438 /* Computing field with max difference between (min/max)_mask */
1439 int best_dim=-1, best_delta=0, delta=0;
1441 /* SRC_addr dimension */
1445 delta += count_bits(max_tuple->ip6_addr[0].as_u64[i]) - count_bits(min_tuple->ip6_addr[0].as_u64[i]);
1448 delta += count_bits(max_tuple->ip4_addr[0].as_u32) - count_bits(min_tuple->ip4_addr[0].as_u32);
1450 if(delta > best_delta){
1452 best_dim = DIM_SRC_ADDR;
1455 /* DST_addr dimension */
1460 delta += count_bits(max_tuple->ip6_addr[1].as_u64[i]) - count_bits(min_tuple->ip6_addr[1].as_u64[i]);
1463 delta += count_bits(max_tuple->ip4_addr[1].as_u32) - count_bits(min_tuple->ip4_addr[1].as_u32);
1465 if(delta > best_delta){
1467 best_dim = DIM_DST_ADDR;
1470 /* SRC_port dimension */
1471 delta = count_bits(max_tuple->l4.port[0]) - count_bits(min_tuple->l4.port[0]);
1472 if(delta > best_delta){
1474 best_dim = DIM_SRC_PORT;
1477 /* DST_port dimension */
1478 delta = count_bits(max_tuple->l4.port[1]) - count_bits(min_tuple->l4.port[1]);
1479 if(delta > best_delta){
1481 best_dim = DIM_DST_PORT;
1484 /* Proto dimension */
1485 delta = count_bits(max_tuple->l4.proto) - count_bits(min_tuple->l4.proto);
1486 if(delta > best_delta){
1488 best_dim = DIM_PROTO;
1491 int shifting = 0; //, ipv4_block = 0;
1494 shifting = (best_delta)/2; // FIXME IPV4-only
1495 // ipv4_block = count_bits(max_tuple->ip4_addr[0].as_u32);
1496 min_tuple->ip4_addr[0].as_u32 =
1497 clib_host_to_net_u32((clib_net_to_host_u32(max_tuple->ip4_addr[0].as_u32) << (shifting))&0xFFFFFFFF);
1501 shifting = (best_delta)/2;
1503 ipv4_block = count_bits(max_tuple->addr[1].as_u64[1]);
1504 if(ipv4_block > shifting)
1505 min_tuple->addr[1].as_u64[1] =
1506 clib_host_to_net_u64((clib_net_to_host_u64(max_tuple->addr[1].as_u64[1]) << (shifting))&0xFFFFFFFF);
1508 shifting = shifting - ipv4_block;
1509 min_tuple->addr[1].as_u64[1] = 0;
1510 min_tuple->addr[1].as_u64[0] =
1511 clib_host_to_net_u64((clib_net_to_host_u64(max_tuple->addr[1].as_u64[0]) << (shifting))&0xFFFFFFFF);
1514 min_tuple->ip4_addr[1].as_u32 =
1515 clib_host_to_net_u32((clib_net_to_host_u32(max_tuple->ip4_addr[1].as_u32) << (shifting))&0xFFFFFFFF);
1518 case DIM_SRC_PORT: min_tuple->l4.port[0] = max_tuple->l4.port[0] << (best_delta)/2;
1520 case DIM_DST_PORT: min_tuple->l4.port[1] = max_tuple->l4.port[1] << (best_delta)/2;
1522 case DIM_PROTO: min_tuple->l4.proto = max_tuple->l4.proto << (best_delta)/2;
1524 default: relax_tuple(min_tuple, is_ip6, 1);
1528 min_tuple->pkt.is_nonfirst_fragment = 0;
1529 u32 new_mask_type_index = assign_mask_type_index(am, min_tuple);
1531 hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
1533 hash_applied_mask_info_t *minfo;
1534 //search in order pool if mask_type_index is already there
1536 for (search=0; search < vec_len((*hash_applied_mask_info_vec)); search++){
1537 minfo = vec_elt_at_index((*hash_applied_mask_info_vec), search);
1538 if(minfo->mask_type_index == new_mask_type_index)
1542 vec_validate((*hash_applied_mask_info_vec), search);
1543 minfo = vec_elt_at_index((*hash_applied_mask_info_vec), search);
1544 minfo->mask_type_index = new_mask_type_index;
1545 minfo->num_entries = 0;
1546 minfo->max_collisions = 0;
1547 minfo->first_rule_index = ~0;
1549 DBG( "TM-split_partition - mask type index-assigned!! -> %d", new_mask_type_index);
1551 if(coll_mask_type_index == new_mask_type_index){
1552 //vlib_cli_output(vm, "TM-There are collisions over threshold, but i'm not able to split! %d %d", coll_mask_type_index, new_mask_type_index);
1557 /* populate new partition */
1558 DBG( "TM-Populate new partition");
1559 u32 r_ace_index = first_index;
1561 // for(i=0; i<collisions; i++){
1562 for(r_ace_index=0; r_ace_index < vec_len((*applied_hash_aces)); r_ace_index++) {
1564 applied_hash_ace_entry_t *pop_pae = vec_elt_at_index((*applied_hash_aces), r_ace_index);
1565 DBG( "TM-Population-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
1566 pop_pae->ace_index, pop_pae->mask_type_index, coll_mask_type_index);
1568 if(pop_pae->mask_type_index != coll_mask_type_index) continue;
1569 u32 next_index = pop_pae->next_applied_entry_index;
1571 ace_info = vec_elt_at_index(ha->rules, pop_pae->hash_ace_info_index);
1572 mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
1574 //mte = vec_elt_at_index(am->ace_mask_type_pool, pop_pae->mask_type_index);
1575 fa_5tuple_t *pop_mask = &mte->mask;
1577 if(!first_mask_contains_second_mask(is_ip6, min_tuple, pop_mask)) continue;
1578 DBG( "TM-new partition can insert -> applied_ace:%d", r_ace_index);
1580 //delete and insert in new format
1581 deactivate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);
1583 /* insert the new entry */
1584 pop_pae->mask_type_index = new_mask_type_index;
1586 activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);
1588 r_ace_index = next_index;
1591 DBG( "TM-Populate new partition-END");
1592 DBG( "TM-split_partition - END");