2 *------------------------------------------------------------------
3 * Copyright (c) 2017 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
19 #include <netinet/in.h>
21 #include <vlibapi/api.h>
22 #include <vlibmemory/api.h>
24 #include <vlib/vlib.h>
25 #include <vnet/vnet.h>
26 #include <vppinfra/error.h>
27 #include <vnet/plugin/plugin.h>
29 #include <vppinfra/bihash_48_8.h>
31 #include "hash_lookup.h"
32 #include "hash_lookup_private.h"
35 always_inline applied_hash_ace_entry_t **get_applied_hash_aces(acl_main_t *am, u32 lc_index)
37 applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
39 /*is_input ? vec_elt_at_index(am->input_hash_entry_vec_by_sw_if_index, sw_if_index)
40 : vec_elt_at_index(am->output_hash_entry_vec_by_sw_if_index, sw_if_index);
42 return applied_hash_aces;
47 hashtable_add_del(acl_main_t *am, clib_bihash_kv_48_8_t *kv, int is_add)
49 DBG("HASH ADD/DEL: %016llx %016llx %016llx %016llx %016llx %016llx %016llx add %d",
50 kv->key[0], kv->key[1], kv->key[2],
51 kv->key[3], kv->key[4], kv->key[5], kv->value, is_add);
52 BV (clib_bihash_add_del) (&am->acl_lookup_hash, kv, is_add);
58 * Initial adaptation by Valerio Bruschi (valerio.bruschi@telecom-paristech.fr)
59 * based on the TupleMerge [1] simulator kindly made available
60 * by James Daly (dalyjamese@gmail.com) and Eric Torng (torng@cse.msu.edu)
61 * ( http://www.cse.msu.edu/~dalyjame/ or http://www.cse.msu.edu/~torng/ ),
62 * refactoring by Andrew Yourtchenko.
64 * [1] James Daly, Eric Torng "TupleMerge: Building Online Packet Classifiers
65 * by Omitting Bits", In Proc. IEEE ICCCN 2017, pp. 1-10
81 /* check if mask2 can be contained by mask1 */
83 first_mask_contains_second_mask(int is_ip6, fa_5tuple_t * mask1, fa_5tuple_t * mask2)
88 for (i = 0; i < 2; i++)
90 if ((mask1->ip6_addr[0].as_u64[i] & mask2->ip6_addr[0].as_u64[i]) !=
91 mask1->ip6_addr[0].as_u64[i])
93 if ((mask1->ip6_addr[1].as_u64[i] & mask2->ip6_addr[1].as_u64[i]) !=
94 mask1->ip6_addr[1].as_u64[i])
100 /* check the pads, both masks must have it 0 */
103 for (i=0; i<6; i++) {
104 padcheck |= mask1->l3_zero_pad[i];
105 padcheck |= mask2->l3_zero_pad[i];
109 if ((mask1->ip4_addr[0].as_u32 & mask2->ip4_addr[0].as_u32) !=
110 mask1->ip4_addr[0].as_u32)
112 if ((mask1->ip4_addr[1].as_u32 & mask2->ip4_addr[1].as_u32) !=
113 mask1->ip4_addr[1].as_u32)
117 /* take care if port are not exact-match */
118 if ((mask1->l4.as_u64 & mask2->l4.as_u64) != mask1->l4.as_u64)
121 if ((mask1->pkt.as_u64 & mask2->pkt.as_u64) != mask1->pkt.as_u64)
132 * Consider the situation when we have to create a new table
133 * T for a given rule R. This occurs for the first rule inserted and
134 * for later rules if it is incompatible with all existing tables.
135 * In this event, we need to determine mT for a new table.
136 * Setting mT = mR is not a good strategy; if another similar,
137 * but slightly less specific, rule appears we will be unable to
138 * add it to T and will thus have to create another new table. We
139 * thus consider two factors: is the rule more strongly aligned
140 * with source or destination addresses (usually the two most
141 * important fields) and how much slack needs to be given to
142 * allow for other rules. If the source and destination addresses
143 * are close together (within 4 bits for our experiments), we use
144 * both of them. Otherwise, we drop the smaller (less specific)
145 * address and its associated port field from consideration; R is
146 * predominantly aligned with one of the two fields and should
147 * be grouped with other similar rules. This is similar to TSS
148 * dropping port fields, but since it is based on observable rule
149 * characteristics it is more likely to keep important fields and
150 * discard less useful ones.
151 * We then look at the absolute lengths of the addresses. If
152 * the address is long, we are more likely to try to add shorter
153 * lengths and likewise the reverse. We thus remove a few bits
154 * from both address fields with more bits removed from longer
155 * addresses. For 32 bit addresses, we remove 4 bits, 3 for more
156 * than 24, 2 for more than 16, and so on (so 8 and fewer bits
157 * don’t have any removed). We only do this for prefix fields like
158 * addresses; both range fields (like ports) and exact match fields
159 * (like protocol) should remain as they are.
164 shift_ip4_if(u32 mask, u32 thresh, int numshifts, u32 else_val)
167 return clib_host_to_net_u32((clib_net_to_host_u32(mask) << numshifts) & 0xFFFFFFFF);
173 relax_ip4_addr(ip4_address_t *ip4_mask, int relax2) {
174 int shifts_per_relax[2][4] = { { 6, 5, 4, 2 }, { 3, 2, 1, 1 } };
176 int *shifts = shifts_per_relax[relax2];
177 if(ip4_mask->as_u32 == 0xffffffff)
178 ip4_mask->as_u32 = clib_host_to_net_u32((clib_net_to_host_u32(ip4_mask->as_u32) << shifts[0])&0xFFFFFFFF);
180 ip4_mask->as_u32 = shift_ip4_if(ip4_mask->as_u32, 0xffffff00, shifts[1],
181 shift_ip4_if(ip4_mask->as_u32, 0xffff0000, shifts[2],
182 shift_ip4_if(ip4_mask->as_u32, 0xff000000, shifts[3], ip4_mask->as_u32)));
186 relax_ip6_addr(ip6_address_t *ip6_mask, int relax2) {
188 * This "better than nothing" relax logic is based on heuristics
189 * from IPv6 knowledge, and may not be optimal.
190 * Some further tuning may be needed in the future.
192 if (ip6_mask->as_u64[0] == 0xffffffffffffffffULL) {
193 if (ip6_mask->as_u64[1] == 0xffffffffffffffffULL) {
194 /* relax a /128 down to /64 - likely to have more hosts */
195 ip6_mask->as_u64[1] = 0;
196 } else if (ip6_mask->as_u64[1] == 0) {
197 /* relax a /64 down to /56 - likely to have more subnets */
198 ip6_mask->as_u64[0] = clib_host_to_net_u64(0xffffffffffffff00ULL);
204 relax_tuple(fa_5tuple_t *mask, int is_ip6, int relax2){
205 fa_5tuple_t save_mask = *mask;
207 int counter_s = 0, counter_d = 0;
211 counter_s += count_bits(mask->ip6_addr[0].as_u64[i]);
212 counter_d += count_bits(mask->ip6_addr[1].as_u64[i]);
215 counter_s += count_bits(mask->ip4_addr[0].as_u32);
216 counter_d += count_bits(mask->ip4_addr[1].as_u32);
220 * is the rule more strongly aligned with source or destination addresses
221 * (usually the two most important fields) and how much slack needs to be
222 * given to allow for other rules. If the source and destination addresses
223 * are close together (within 4 bits for our experiments), we use both of them.
224 * Otherwise, we drop the smaller (less specific) address and its associated
225 * port field from consideration
227 const int deltaThreshold = 4;
228 /* const int deltaThreshold = 8; if IPV6? */
229 int delta = counter_s - counter_d;
230 if (-delta > deltaThreshold) {
232 mask->ip6_addr[0].as_u64[1] = mask->ip6_addr[0].as_u64[0] = 0;
234 mask->ip4_addr[0].as_u32 = 0;
235 mask->l4.port[0] = 0;
236 } else if (delta > deltaThreshold) {
238 mask->ip6_addr[1].as_u64[1] = mask->ip6_addr[1].as_u64[0] = 0;
240 mask->ip4_addr[1].as_u32 = 0;
241 mask->l4.port[1] = 0;
245 relax_ip6_addr(&mask->ip6_addr[0], relax2);
246 relax_ip6_addr(&mask->ip6_addr[1], relax2);
248 relax_ip4_addr(&mask->ip4_addr[0], relax2);
249 relax_ip4_addr(&mask->ip4_addr[1], relax2);
251 mask->pkt.is_nonfirst_fragment = 0;
252 mask->pkt.l4_valid = 0;
253 if(!first_mask_contains_second_mask(is_ip6, mask, &save_mask)){
254 DBG( "TM-relaxing-ERROR");
257 DBG( "TM-relaxing-end");
261 find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
263 ace_mask_type_entry_t *mte;
265 pool_foreach (mte, am->ace_mask_type_pool)
267 if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
268 return (mte - am->ace_mask_type_pool);
275 assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
277 u32 mask_type_index = find_mask_type_index(am, mask);
278 ace_mask_type_entry_t *mte;
279 if(~0 == mask_type_index) {
280 pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
281 mask_type_index = mte - am->ace_mask_type_pool;
282 clib_memcpy_fast(&mte->mask, mask, sizeof(mte->mask));
286 * We can use only 16 bits, since in the match there is only u16 field.
287 * Realistically, once you go to 64K of mask types, it is a huge
288 * problem anyway, so we might as well stop half way.
290 ASSERT(mask_type_index < 32768);
292 mte = am->ace_mask_type_pool + mask_type_index;
294 DBG0("ASSIGN MTE index %d new refcount %d", mask_type_index, mte->refcount);
295 return mask_type_index;
299 lock_mask_type_index(acl_main_t *am, u32 mask_type_index)
301 DBG0("LOCK MTE index %d", mask_type_index);
302 ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
304 DBG0("LOCK MTE index %d new refcount %d", mask_type_index, mte->refcount);
309 release_mask_type_index(acl_main_t *am, u32 mask_type_index)
311 DBG0("RELEAS MTE index %d", mask_type_index);
312 ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
314 DBG0("RELEAS MTE index %d new refcount %d", mask_type_index, mte->refcount);
315 if (mte->refcount == 0) {
316 /* we are not using this entry anymore */
317 clib_memset(mte, 0xae, sizeof(*mte));
318 pool_put(am->ace_mask_type_pool, mte);
324 tm_assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask, int is_ip6, u32 lc_index)
326 u32 mask_type_index = ~0;
327 u32 for_mask_type_index = ~0;
328 ace_mask_type_entry_t *mte = 0;
330 /* look for existing mask comparable with the one in input */
332 hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
333 hash_applied_mask_info_t *minfo;
335 if (vec_len(*hash_applied_mask_info_vec) > 0) {
336 for(order_index = vec_len((*hash_applied_mask_info_vec)) -1; order_index >= 0; order_index--) {
337 minfo = vec_elt_at_index((*hash_applied_mask_info_vec), order_index);
338 for_mask_type_index = minfo->mask_type_index;
339 mte = vec_elt_at_index(am->ace_mask_type_pool, for_mask_type_index);
340 if(first_mask_contains_second_mask(is_ip6, &mte->mask, mask)){
341 mask_type_index = (mte - am->ace_mask_type_pool);
342 lock_mask_type_index(am, mask_type_index);
348 if(~0 == mask_type_index) {
349 /* if no mask is found, then let's use a relaxed version of the original one, in order to be used by new ace_entries */
350 DBG( "TM-assigning mask type index-new one");
351 fa_5tuple_t relaxed_mask = *mask;
352 relax_tuple(&relaxed_mask, is_ip6, 0);
353 mask_type_index = assign_mask_type_index(am, &relaxed_mask);
355 hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
357 int spot = vec_len((*hash_applied_mask_info_vec));
358 vec_validate((*hash_applied_mask_info_vec), spot);
359 minfo = vec_elt_at_index((*hash_applied_mask_info_vec), spot);
360 minfo->mask_type_index = mask_type_index;
361 minfo->num_entries = 0;
362 minfo->max_collisions = 0;
363 minfo->first_rule_index = ~0;
366 * We can use only 16 bits, since in the match there is only u16 field.
367 * Realistically, once you go to 64K of mask types, it is a huge
368 * problem anyway, so we might as well stop half way.
370 ASSERT(mask_type_index < 32768);
372 mte = am->ace_mask_type_pool + mask_type_index;
373 DBG0("TM-ASSIGN MTE index %d new refcount %d", mask_type_index, mte->refcount);
374 return mask_type_index;
379 fill_applied_hash_ace_kv(acl_main_t *am,
380 applied_hash_ace_entry_t **applied_hash_aces,
382 u32 new_index, clib_bihash_kv_48_8_t *kv)
384 fa_5tuple_t *kv_key = (fa_5tuple_t *)kv->key;
385 hash_acl_lookup_value_t *kv_val = (hash_acl_lookup_value_t *)&kv->value;
386 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
387 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
389 /* apply the mask to ace key */
390 hash_ace_info_t *ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
391 ace_mask_type_entry_t *mte = vec_elt_at_index(am->ace_mask_type_pool, pae->mask_type_index);
393 u64 *pmatch = (u64 *) &ace_info->match;
394 u64 *pmask = (u64 *)&mte->mask;
395 u64 *pkey = (u64 *)kv->key;
397 *pkey++ = *pmatch++ & *pmask++;
398 *pkey++ = *pmatch++ & *pmask++;
399 *pkey++ = *pmatch++ & *pmask++;
400 *pkey++ = *pmatch++ & *pmask++;
401 *pkey++ = *pmatch++ & *pmask++;
402 *pkey++ = *pmatch++ & *pmask++;
404 kv_key->pkt.mask_type_index_lsb = pae->mask_type_index;
405 kv_key->pkt.lc_index = lc_index;
407 kv_val->applied_entry_index = new_index;
411 add_del_hashtable_entry(acl_main_t *am,
413 applied_hash_ace_entry_t **applied_hash_aces,
414 u32 index, int is_add)
416 clib_bihash_kv_48_8_t kv;
418 fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, index, &kv);
419 hashtable_add_del(am, &kv, is_add);
424 remake_hash_applied_mask_info_vec (acl_main_t * am,
425 applied_hash_ace_entry_t **
426 applied_hash_aces, u32 lc_index)
428 DBG0("remake applied hash mask info lc_index %d", lc_index);
429 hash_applied_mask_info_t *new_hash_applied_mask_info_vec =
430 vec_new (hash_applied_mask_info_t, 0);
432 hash_applied_mask_info_t *minfo;
434 for (i = 0; i < vec_len ((*applied_hash_aces)); i++)
436 applied_hash_ace_entry_t *pae =
437 vec_elt_at_index ((*applied_hash_aces), i);
439 /* check if mask_type_index is already there */
440 u32 new_pointer = vec_len (new_hash_applied_mask_info_vec);
442 for (search = 0; search < vec_len (new_hash_applied_mask_info_vec);
445 minfo = vec_elt_at_index (new_hash_applied_mask_info_vec, search);
446 if (minfo->mask_type_index == pae->mask_type_index)
450 vec_validate ((new_hash_applied_mask_info_vec), search);
451 minfo = vec_elt_at_index ((new_hash_applied_mask_info_vec), search);
452 if (search == new_pointer)
454 DBG0("remaking index %d", search);
455 minfo->mask_type_index = pae->mask_type_index;
456 minfo->num_entries = 0;
457 minfo->max_collisions = 0;
458 minfo->first_rule_index = ~0;
461 minfo->num_entries = minfo->num_entries + 1;
463 if (vec_len (pae->colliding_rules) > minfo->max_collisions)
464 minfo->max_collisions = vec_len (pae->colliding_rules);
466 if (minfo->first_rule_index > i)
467 minfo->first_rule_index = i;
470 hash_applied_mask_info_t **hash_applied_mask_info_vec =
471 vec_elt_at_index (am->hash_applied_mask_info_vec_by_lc_index, lc_index);
473 vec_free ((*hash_applied_mask_info_vec));
474 (*hash_applied_mask_info_vec) = new_hash_applied_mask_info_vec;
478 vec_del_collision_rule (collision_match_rule_t ** pvec,
479 u32 applied_entry_index)
483 while (i < _vec_len ((*pvec)))
485 collision_match_rule_t *cr = vec_elt_at_index ((*pvec), i);
486 if (cr->applied_entry_index == applied_entry_index)
488 /* vec_del1 ((*pvec), i) would be more efficient but would reorder the elements. */
489 vec_delete((*pvec), 1, i);
491 DBG0("vec_del_collision_rule deleting one at index %d", i);
502 acl_plugin_print_pae (vlib_main_t * vm, int j, applied_hash_ace_entry_t * pae);
505 del_colliding_rule (applied_hash_ace_entry_t ** applied_hash_aces,
506 u32 head_index, u32 applied_entry_index)
508 DBG0("DEL COLLIDING RULE: head_index %d applied index %d", head_index, applied_entry_index);
511 applied_hash_ace_entry_t *head_pae =
512 vec_elt_at_index ((*applied_hash_aces), head_index);
513 if (ACL_HASH_LOOKUP_DEBUG > 0)
514 acl_plugin_print_pae(acl_main.vlib_main, head_index, head_pae);
515 vec_del_collision_rule (&head_pae->colliding_rules, applied_entry_index);
516 if (vec_len(head_pae->colliding_rules) == 0) {
517 vec_free(head_pae->colliding_rules);
519 if (ACL_HASH_LOOKUP_DEBUG > 0)
520 acl_plugin_print_pae(acl_main.vlib_main, head_index, head_pae);
524 add_colliding_rule (acl_main_t * am,
525 applied_hash_ace_entry_t ** applied_hash_aces,
526 u32 head_index, u32 applied_entry_index)
528 applied_hash_ace_entry_t *head_pae =
529 vec_elt_at_index ((*applied_hash_aces), head_index);
530 applied_hash_ace_entry_t *pae =
531 vec_elt_at_index ((*applied_hash_aces), applied_entry_index);
532 DBG0("ADD COLLIDING RULE: head_index %d applied index %d", head_index, applied_entry_index);
533 if (ACL_HASH_LOOKUP_DEBUG > 0)
534 acl_plugin_print_pae(acl_main.vlib_main, head_index, head_pae);
536 collision_match_rule_t cr;
538 cr.acl_index = pae->acl_index;
539 cr.ace_index = pae->ace_index;
540 cr.acl_position = pae->acl_position;
541 cr.applied_entry_index = applied_entry_index;
542 cr.rule = am->acls[pae->acl_index].rules[pae->ace_index];
543 pae->collision_head_ae_index = head_index;
544 vec_add1 (head_pae->colliding_rules, cr);
545 if (ACL_HASH_LOOKUP_DEBUG > 0)
546 acl_plugin_print_pae(acl_main.vlib_main, head_index, head_pae);
550 activate_applied_ace_hash_entry(acl_main_t *am,
552 applied_hash_ace_entry_t **applied_hash_aces,
555 clib_bihash_kv_48_8_t kv;
556 ASSERT(new_index != ~0);
557 DBG("activate_applied_ace_hash_entry lc_index %d new_index %d", lc_index, new_index);
559 fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, new_index, &kv);
561 DBG("APPLY ADD KY: %016llx %016llx %016llx %016llx %016llx %016llx",
562 kv.key[0], kv.key[1], kv.key[2],
563 kv.key[3], kv.key[4], kv.key[5]);
565 clib_bihash_kv_48_8_t result;
566 hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
567 int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result);
568 ASSERT(new_index != ~0);
569 ASSERT(new_index < vec_len((*applied_hash_aces)));
571 u32 first_index = result_val->applied_entry_index;
572 ASSERT(first_index != ~0);
573 ASSERT(first_index < vec_len((*applied_hash_aces)));
574 /* There already exists an entry or more. Append at the end. */
575 DBG("A key already exists, with applied entry index: %d", first_index);
576 add_colliding_rule(am, applied_hash_aces, first_index, new_index);
579 /* It's the very first entry */
580 hashtable_add_del(am, &kv, 1);
581 ASSERT(new_index != ~0);
582 add_colliding_rule(am, applied_hash_aces, new_index, new_index);
589 assign_mask_type_index_to_pae(acl_main_t *am, u32 lc_index, int is_ip6, applied_hash_ace_entry_t *pae)
591 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
592 hash_ace_info_t *ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
594 ace_mask_type_entry_t *mte;
597 * Start taking base_mask associated to ace, and essentially copy it.
598 * With TupleMerge we will assign a relaxed mask here.
600 mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
602 if (am->use_tuple_merge)
603 pae->mask_type_index = tm_assign_mask_type_index(am, &mask, is_ip6, lc_index);
605 pae->mask_type_index = assign_mask_type_index(am, &mask);
609 split_partition(acl_main_t *am, u32 first_index,
610 u32 lc_index, int is_ip6);
614 check_collision_count_and_maybe_split(acl_main_t *am, u32 lc_index, int is_ip6, u32 first_index)
616 applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
617 applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index);
618 if (vec_len(first_pae->colliding_rules) > am->tuple_merge_split_threshold) {
619 split_partition(am, first_index, lc_index, is_ip6);
624 hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
628 DBG0("HASH ACL apply: lc_index %d acl %d", lc_index, acl_index);
629 if (!am->acl_lookup_hash_initialized) {
630 BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash",
631 am->hash_lookup_hash_buckets, am->hash_lookup_hash_memory);
632 am->acl_lookup_hash_initialized = 1;
635 vec_validate(am->hash_entry_vec_by_lc_index, lc_index);
636 vec_validate(am->hash_acl_infos, acl_index);
637 applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
639 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
640 u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
642 int base_offset = vec_len(*applied_hash_aces);
644 /* Update the bitmap of the mask types with which the lookup
645 needs to happen for the ACLs applied to this lc_index */
646 applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
647 vec_validate((*applied_hash_acls), lc_index);
648 applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
650 /* ensure the list of applied hash acls is initialized and add this acl# to it */
651 u32 index = vec_search(pal->applied_acls, acl_index);
653 clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to lc",
654 acl_index, lc_index);
658 vec_add1(pal->applied_acls, acl_index);
659 u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
661 clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to hash h-acl info",
662 acl_index, lc_index);
666 vec_add1((*hash_acl_applied_lc_index), lc_index);
669 * if the applied ACL is empty, the current code will cause a
670 * different behavior compared to current linear search: an empty ACL will
671 * simply fallthrough to the next ACL, or the default deny in the end.
673 * This is not a problem, because after vpp-dev discussion,
674 * the consensus was it should not be possible to apply the non-existent
675 * ACL, so the change adding this code also takes care of that.
679 vec_validate(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
681 /* since we know (in case of no split) how much we expand, preallocate that space */
682 if (vec_len(ha->rules) > 0) {
683 int old_vec_len = vec_len(*applied_hash_aces);
684 vec_validate((*applied_hash_aces), old_vec_len + vec_len(ha->rules) - 1);
685 vec_set_len ((*applied_hash_aces), old_vec_len);
688 /* add the rules from the ACL to the hash table for lookup and append to the vector*/
689 for(i=0; i < vec_len(ha->rules); i++) {
691 * Expand the applied aces vector to fit a new entry.
692 * One by one not to upset split_partition() if it is called.
694 vec_resize((*applied_hash_aces), 1);
696 int is_ip6 = ha->rules[i].match.pkt.is_ip6;
697 u32 new_index = base_offset + i;
698 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
699 pae->acl_index = acl_index;
700 pae->ace_index = ha->rules[i].ace_index;
701 pae->acl_position = acl_position;
702 pae->action = ha->rules[i].action;
704 pae->hash_ace_info_index = i;
705 /* we might link it in later */
706 pae->collision_head_ae_index = ~0;
707 pae->colliding_rules = NULL;
708 pae->mask_type_index = ~0;
709 assign_mask_type_index_to_pae(am, lc_index, is_ip6, pae);
710 u32 first_index = activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, new_index);
711 if (am->use_tuple_merge)
712 check_collision_count_and_maybe_split(am, lc_index, is_ip6, first_index);
714 remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
718 find_head_applied_ace_index(applied_hash_ace_entry_t **applied_hash_aces, u32 curr_index)
720 ASSERT(curr_index != ~0);
721 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), curr_index);
723 ASSERT(pae->collision_head_ae_index != ~0);
724 return pae->collision_head_ae_index;
728 set_collision_head_ae_index(applied_hash_ace_entry_t **applied_hash_aces, collision_match_rule_t *colliding_rules, u32 new_index)
730 collision_match_rule_t *cr;
731 vec_foreach(cr, colliding_rules) {
732 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), cr->applied_entry_index);
733 pae->collision_head_ae_index = new_index;
738 move_applied_ace_hash_entry(acl_main_t *am,
740 applied_hash_ace_entry_t **applied_hash_aces,
741 u32 old_index, u32 new_index)
743 ASSERT(old_index != ~0);
744 ASSERT(new_index != ~0);
746 *vec_elt_at_index((*applied_hash_aces), new_index) = *vec_elt_at_index((*applied_hash_aces), old_index);
748 /* update the linkage and hash table if necessary */
749 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
750 applied_hash_ace_entry_t *new_pae = vec_elt_at_index((*applied_hash_aces), new_index);
752 if (ACL_HASH_LOOKUP_DEBUG > 0) {
753 clib_warning("Moving pae from %d to %d", old_index, new_index);
754 acl_plugin_print_pae(am->vlib_main, old_index, pae);
757 if (pae->collision_head_ae_index == old_index) {
758 /* first entry - so the hash points to it, update */
759 add_del_hashtable_entry(am, lc_index,
760 applied_hash_aces, new_index, 1);
762 if (new_pae->colliding_rules) {
763 /* update the information within the collision rule entry */
764 ASSERT(vec_len(new_pae->colliding_rules) > 0);
765 collision_match_rule_t *cr = vec_elt_at_index (new_pae->colliding_rules, 0);
766 ASSERT(cr->applied_entry_index == old_index);
767 cr->applied_entry_index = new_index;
768 set_collision_head_ae_index(applied_hash_aces, new_pae->colliding_rules, new_index);
770 /* find the index in the collision rule entry on the head element */
771 u32 head_index = find_head_applied_ace_index(applied_hash_aces, new_index);
772 ASSERT(head_index != ~0);
773 applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
774 ASSERT(vec_len(head_pae->colliding_rules) > 0);
776 for (i=0; i<vec_len(head_pae->colliding_rules); i++) {
777 collision_match_rule_t *cr = vec_elt_at_index (head_pae->colliding_rules, i);
778 if (cr->applied_entry_index == old_index) {
779 cr->applied_entry_index = new_index;
782 if (ACL_HASH_LOOKUP_DEBUG > 0) {
783 clib_warning("Head pae at index %d after adjustment", head_index);
784 acl_plugin_print_pae(am->vlib_main, head_index, head_pae);
787 /* invalidate the old entry */
788 pae->collision_head_ae_index = ~0;
789 pae->colliding_rules = NULL;
793 deactivate_applied_ace_hash_entry(acl_main_t *am,
795 applied_hash_ace_entry_t **applied_hash_aces,
798 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
799 DBG("UNAPPLY DEACTIVATE: lc_index %d applied index %d", lc_index, old_index);
800 if (ACL_HASH_LOOKUP_DEBUG > 0) {
801 clib_warning("Deactivating pae at index %d", old_index);
802 acl_plugin_print_pae(am->vlib_main, old_index, pae);
805 if (pae->collision_head_ae_index != old_index) {
806 DBG("UNAPPLY = index %d has collision head %d", old_index, pae->collision_head_ae_index);
808 u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
809 ASSERT(head_index != ~0);
810 del_colliding_rule(applied_hash_aces, head_index, old_index);
813 /* It was the first entry. We need either to reset the hash entry or delete it */
814 /* delete our entry from the collision vector first */
815 del_colliding_rule(applied_hash_aces, old_index, old_index);
816 if (vec_len(pae->colliding_rules) > 0) {
817 u32 next_pae_index = pae->colliding_rules[0].applied_entry_index;
818 applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), next_pae_index);
819 /* Remove ourselves and transfer the ownership of the colliding rules vector */
820 next_pae->colliding_rules = pae->colliding_rules;
821 set_collision_head_ae_index(applied_hash_aces, next_pae->colliding_rules, next_pae_index);
822 add_del_hashtable_entry(am, lc_index,
823 applied_hash_aces, next_pae_index, 1);
825 /* no next entry, so just delete the entry in the hash table */
826 add_del_hashtable_entry(am, lc_index,
827 applied_hash_aces, old_index, 0);
830 DBG0("Releasing mask type index %d for pae index %d on lc_index %d", pae->mask_type_index, old_index, lc_index);
831 release_mask_type_index(am, pae->mask_type_index);
832 /* invalidate the old entry */
833 pae->mask_type_index = ~0;
834 pae->collision_head_ae_index = ~0;
835 /* always has to be 0 */
836 pae->colliding_rules = NULL;
841 hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
845 DBG0("HASH ACL unapply: lc_index %d acl %d", lc_index, acl_index);
846 applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
847 applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
849 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
850 u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
852 if (ACL_HASH_LOOKUP_DEBUG > 0) {
853 clib_warning("unapplying acl %d", acl_index);
854 acl_plugin_show_tables_mask_type();
855 acl_plugin_show_tables_acl_hash_info(acl_index);
856 acl_plugin_show_tables_applied_info(lc_index);
859 /* remove this acl# from the list of applied hash acls */
860 u32 index = vec_search(pal->applied_acls, acl_index);
862 clib_warning("BUG: trying to unapply unapplied acl_index %d on lc_index %d, according to lc",
863 acl_index, lc_index);
866 vec_del1(pal->applied_acls, index);
868 u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
870 clib_warning("BUG: trying to unapply twice acl_index %d on lc_index %d, according to h-acl info",
871 acl_index, lc_index);
874 vec_del1((*hash_acl_applied_lc_index), index2);
876 applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
878 for(i=0; i < vec_len((*applied_hash_aces)); i++) {
879 if (vec_elt_at_index(*applied_hash_aces,i)->acl_index == acl_index) {
880 DBG("Found applied ACL#%d at applied index %d", acl_index, i);
884 if (vec_len((*applied_hash_aces)) <= i) {
885 DBG("Did not find applied ACL#%d at lc_index %d", acl_index, lc_index);
886 /* we went all the way without finding any entries. Probably a list was empty. */
891 int tail_offset = base_offset + vec_len(ha->rules);
892 int tail_len = vec_len((*applied_hash_aces)) - tail_offset;
893 DBG("base_offset: %d, tail_offset: %d, tail_len: %d", base_offset, tail_offset, tail_len);
895 for(i=0; i < vec_len(ha->rules); i ++) {
896 deactivate_applied_ace_hash_entry(am, lc_index,
897 applied_hash_aces, base_offset + i);
899 for(i=0; i < tail_len; i ++) {
900 /* move the entry at tail offset to base offset */
901 /* that is, from (tail_offset+i) -> (base_offset+i) */
902 DBG0("UNAPPLY MOVE: lc_index %d, applied index %d -> %d", lc_index, tail_offset+i, base_offset + i);
903 move_applied_ace_hash_entry(am, lc_index, applied_hash_aces, tail_offset + i, base_offset + i);
905 /* trim the end of the vector */
906 vec_dec_len ((*applied_hash_aces), vec_len (ha->rules));
908 remake_hash_applied_mask_info_vec(am, applied_hash_aces, lc_index);
910 if (vec_len((*applied_hash_aces)) == 0) {
911 vec_free((*applied_hash_aces));
916 * Create the applied ACEs and update the hash table,
917 * taking into account that the ACL may not be the last
918 * in the vector of applied ACLs.
920 * For now, walk from the end of the vector and unapply the ACLs,
921 * then apply the one in question and reapply the rest.
925 hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
927 acl_lookup_context_t *acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index);
928 u32 **applied_acls = &acontext->acl_indices;
930 int start_index = vec_search((*applied_acls), acl_index);
932 DBG0("Start index for acl %d in lc_index %d is %d", acl_index, lc_index, start_index);
934 * This function is called after we find out the lc_index where ACL is applied.
935 * If the by-lc_index vector does not have the ACL#, then it's a bug.
937 ASSERT(start_index < vec_len(*applied_acls));
939 /* unapply all the ACLs at the tail side, up to the current one */
940 for(i = vec_len(*applied_acls) - 1; i > start_index; i--) {
941 hash_acl_unapply(am, lc_index, *vec_elt_at_index(*applied_acls, i));
943 for(i = start_index; i < vec_len(*applied_acls); i++) {
944 hash_acl_apply(am, lc_index, *vec_elt_at_index(*applied_acls, i), i);
949 make_ip6_address_mask(ip6_address_t *addr, u8 prefix_len)
951 ip6_address_mask_from_width(addr, prefix_len);
955 /* Maybe should be moved into the core somewhere */
957 ip4_address_mask_from_width (ip4_address_t * a, u32 width)
959 int i, byte, bit, bitnum;
960 ASSERT (width <= 32);
961 clib_memset (a, 0, sizeof (a[0]));
962 for (i = 0; i < width; i++)
964 bitnum = (7 - (i & 7));
967 a->as_u8[byte] |= bit;
973 make_ip4_address_mask(ip4_address_t *addr, u8 prefix_len)
975 ip4_address_mask_from_width(addr, prefix_len);
979 make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
981 if (port_first == port_last) {
983 /* single port is representable by masked value */
992 make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t *hi)
994 clib_memset(mask, 0, sizeof(*mask));
995 clib_memset(&hi->match, 0, sizeof(hi->match));
996 hi->action = r->is_permit;
998 /* we will need to be matching based on lc_index and mask_type_index when applied */
999 mask->pkt.lc_index = ~0;
1000 /* we will assign the match of mask_type_index later when we find it*/
1001 mask->pkt.mask_type_index_lsb = ~0;
1003 mask->pkt.is_ip6 = 1;
1004 hi->match.pkt.is_ip6 = r->is_ipv6;
1006 make_ip6_address_mask(&mask->ip6_addr[0], r->src_prefixlen);
1007 hi->match.ip6_addr[0] = r->src.ip6;
1008 make_ip6_address_mask(&mask->ip6_addr[1], r->dst_prefixlen);
1009 hi->match.ip6_addr[1] = r->dst.ip6;
1011 clib_memset(hi->match.l3_zero_pad, 0, sizeof(hi->match.l3_zero_pad));
1012 make_ip4_address_mask(&mask->ip4_addr[0], r->src_prefixlen);
1013 hi->match.ip4_addr[0] = r->src.ip4;
1014 make_ip4_address_mask(&mask->ip4_addr[1], r->dst_prefixlen);
1015 hi->match.ip4_addr[1] = r->dst.ip4;
1018 if (r->proto != 0) {
1019 mask->l4.proto = ~0; /* L4 proto needs to be matched */
1020 hi->match.l4.proto = r->proto;
1022 /* Calculate the src/dst port masks and make the src/dst port matches accordingly */
1023 make_port_mask(&mask->l4.port[0], r->src_port_or_type_first, r->src_port_or_type_last);
1024 hi->match.l4.port[0] = r->src_port_or_type_first & mask->l4.port[0];
1026 make_port_mask(&mask->l4.port[1], r->dst_port_or_code_first, r->dst_port_or_code_last);
1027 hi->match.l4.port[1] = r->dst_port_or_code_first & mask->l4.port[1];
1028 /* L4 info must be valid in order to match */
1029 mask->pkt.l4_valid = 1;
1030 hi->match.pkt.l4_valid = 1;
1031 /* And we must set the mask to check that it is an initial fragment */
1032 mask->pkt.is_nonfirst_fragment = 1;
1033 hi->match.pkt.is_nonfirst_fragment = 0;
1034 if ((r->proto == IPPROTO_TCP) && (r->tcp_flags_mask != 0)) {
1035 /* if we want to match on TCP flags, they must be masked off as well */
1036 mask->pkt.tcp_flags = r->tcp_flags_mask;
1037 hi->match.pkt.tcp_flags = r->tcp_flags_value;
1038 /* and the flags need to be present within the packet being matched */
1039 mask->pkt.tcp_flags_valid = 1;
1040 hi->match.pkt.tcp_flags_valid = 1;
1043 /* Sanitize the mask and the match */
1044 u64 *pmask = (u64 *)mask;
1045 u64 *pmatch = (u64 *)&hi->match;
1047 for(j=0; j<6; j++) {
1048 pmatch[j] = pmatch[j] & pmask[j];
1053 int hash_acl_exists(acl_main_t *am, int acl_index)
1055 if (acl_index >= vec_len(am->hash_acl_infos))
1058 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1059 return ha->hash_acl_exists;
1062 void hash_acl_add(acl_main_t *am, int acl_index)
1064 DBG("HASH ACL add : %d", acl_index);
1066 acl_rule_t *acl_rules = am->acls[acl_index].rules;
1067 vec_validate(am->hash_acl_infos, acl_index);
1068 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1069 clib_memset(ha, 0, sizeof(*ha));
1070 ha->hash_acl_exists = 1;
1072 /* walk the newly added ACL entries and ensure that for each of them there
1073 is a mask type, increment a reference count for that mask type */
1075 /* avoid small requests by preallocating the entire vector before running the additions */
1076 if (vec_len(acl_rules) > 0) {
1077 vec_validate(ha->rules, vec_len(acl_rules)-1);
1078 vec_reset_length(ha->rules);
1081 for(i=0; i < vec_len(acl_rules); i++) {
1082 hash_ace_info_t ace_info;
1084 clib_memset(&ace_info, 0, sizeof(ace_info));
1085 ace_info.acl_index = acl_index;
1086 ace_info.ace_index = i;
1088 make_mask_and_match_from_rule(&mask, &acl_rules[i], &ace_info);
1089 mask.pkt.flags_reserved = 0b000;
1090 ace_info.base_mask_type_index = assign_mask_type_index(am, &mask);
1091 /* assign the mask type index for matching itself */
1092 ace_info.match.pkt.mask_type_index_lsb = ace_info.base_mask_type_index;
1093 DBG("ACE: %d mask_type_index: %d", i, ace_info.base_mask_type_index);
1094 vec_add1(ha->rules, ace_info);
1097 * if an ACL is applied somewhere, fill the corresponding lookup data structures.
1098 * We need to take care if the ACL is not the last one in the vector of ACLs applied to the interface.
1100 if (acl_index < vec_len(am->lc_index_vec_by_acl)) {
1102 vec_foreach(lc_index, am->lc_index_vec_by_acl[acl_index]) {
1103 hash_acl_reapply(am, *lc_index, acl_index);
1108 void hash_acl_delete(acl_main_t *am, int acl_index)
1110 DBG0("HASH ACL delete : %d", acl_index);
1112 * If the ACL is applied somewhere, remove the references of it (call hash_acl_unapply)
1113 * this is a different behavior from the linear lookup where an empty ACL is "deny all",
1115 * However, following vpp-dev discussion the ACL that is referenced elsewhere
1116 * should not be possible to delete, and the change adding this also adds
1117 * the safeguards to that respect, so this is not a problem.
1119 * The part to remember is that this routine is called in process of reapplication
1120 * during the acl_add_replace() API call - the old acl ruleset is deleted, then
1121 * the new one is added, without the change in the applied ACLs - so this case
1122 * has to be handled.
1124 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
1125 u32 *lc_list_copy = 0;
1128 lc_list_copy = vec_dup(ha->lc_index_list);
1129 vec_foreach(lc_index, lc_list_copy) {
1130 hash_acl_unapply(am, *lc_index, acl_index);
1132 vec_free(lc_list_copy);
1134 vec_free(ha->lc_index_list);
1136 /* walk the mask types for the ACL about-to-be-deleted, and decrease
1137 * the reference count, possibly freeing up some of them */
1139 for(i=0; i < vec_len(ha->rules); i++) {
1140 release_mask_type_index(am, ha->rules[i].base_mask_type_index);
1142 ha->hash_acl_exists = 0;
1143 vec_free(ha->rules);
1148 show_hash_acl_hash (vlib_main_t * vm, acl_main_t *am, u32 verbose)
1150 vlib_cli_output(vm, "\nACL lookup hash table:\n%U\n",
1151 BV (format_bihash), &am->acl_lookup_hash, verbose);
1155 acl_plugin_show_tables_mask_type (void)
1157 acl_main_t *am = &acl_main;
1158 vlib_main_t *vm = am->vlib_main;
1159 ace_mask_type_entry_t *mte;
1161 vlib_cli_output (vm, "Mask-type entries:");
1163 pool_foreach (mte, am->ace_mask_type_pool)
1165 vlib_cli_output(vm, " %3d: %016llx %016llx %016llx %016llx %016llx %016llx refcount %d",
1166 mte - am->ace_mask_type_pool,
1167 mte->mask.kv_40_8.key[0], mte->mask.kv_40_8.key[1], mte->mask.kv_40_8.key[2],
1168 mte->mask.kv_40_8.key[3], mte->mask.kv_40_8.key[4], mte->mask.kv_40_8.value, mte->refcount);
1174 acl_plugin_show_tables_acl_hash_info (u32 acl_index)
1176 acl_main_t *am = &acl_main;
1177 vlib_main_t *vm = am->vlib_main;
1180 vlib_cli_output (vm, "Mask-ready ACL representations\n");
1181 for (i = 0; i < vec_len (am->hash_acl_infos); i++)
1183 if ((acl_index != ~0) && (acl_index != i))
1187 hash_acl_info_t *ha = &am->hash_acl_infos[i];
1188 vlib_cli_output (vm, "acl-index %u bitmask-ready layout\n", i);
1189 vlib_cli_output (vm, " applied lc_index list: %U\n",
1190 format_vec32, ha->lc_index_list, "%d");
1191 for (j = 0; j < vec_len (ha->rules); j++)
1193 hash_ace_info_t *pa = &ha->rules[j];
1194 m = (u64 *) & pa->match;
1195 vlib_cli_output (vm,
1196 " %4d: %016llx %016llx %016llx %016llx %016llx %016llx base mask index %d acl %d rule %d action %d\n",
1197 j, m[0], m[1], m[2], m[3], m[4], m[5],
1198 pa->base_mask_type_index, pa->acl_index, pa->ace_index,
1205 acl_plugin_print_colliding_rule (vlib_main_t * vm, int j, collision_match_rule_t *cr) {
1207 " %4d: acl %d ace %d acl pos %d pae index: %d",
1208 j, cr->acl_index, cr->ace_index, cr->acl_position, cr->applied_entry_index);
1212 acl_plugin_print_pae (vlib_main_t * vm, int j, applied_hash_ace_entry_t * pae)
1214 vlib_cli_output (vm,
1215 " %4d: acl %d rule %d action %d bitmask-ready rule %d mask type index: %d colliding_rules: %d collision_head_ae_idx %d hitcount %lld acl_pos: %d",
1216 j, pae->acl_index, pae->ace_index, pae->action,
1217 pae->hash_ace_info_index, pae->mask_type_index, vec_len(pae->colliding_rules), pae->collision_head_ae_index,
1218 pae->hitcount, pae->acl_position);
1220 for(jj=0; jj<vec_len(pae->colliding_rules); jj++)
1221 acl_plugin_print_colliding_rule(vm, jj, vec_elt_at_index(pae->colliding_rules, jj));
1225 acl_plugin_print_applied_mask_info (vlib_main_t * vm, int j, hash_applied_mask_info_t *mi)
1227 vlib_cli_output (vm,
1228 " %4d: mask type index %d first rule index %d num_entries %d max_collisions %d",
1229 j, mi->mask_type_index, mi->first_rule_index, mi->num_entries, mi->max_collisions);
1233 acl_plugin_show_tables_applied_info (u32 lc_index)
1235 acl_main_t *am = &acl_main;
1236 vlib_main_t *vm = am->vlib_main;
1238 vlib_cli_output (vm, "Applied lookup entries for lookup contexts");
1241 (lci < vec_len(am->applied_hash_acl_info_by_lc_index)); lci++)
1243 if ((lc_index != ~0) && (lc_index != lci))
1247 vlib_cli_output (vm, "lc_index %d:", lci);
1248 if (lci < vec_len (am->applied_hash_acl_info_by_lc_index))
1250 applied_hash_acl_info_t *pal =
1251 &am->applied_hash_acl_info_by_lc_index[lci];
1252 vlib_cli_output (vm, " applied acls: %U", format_vec32,
1253 pal->applied_acls, "%d");
1255 if (lci < vec_len (am->hash_applied_mask_info_vec_by_lc_index))
1257 vlib_cli_output (vm, " applied mask info entries:");
1259 j < vec_len (am->hash_applied_mask_info_vec_by_lc_index[lci]);
1262 acl_plugin_print_applied_mask_info (vm, j,
1263 &am->hash_applied_mask_info_vec_by_lc_index
1267 if (lci < vec_len (am->hash_entry_vec_by_lc_index))
1269 vlib_cli_output (vm, " lookup applied entries:");
1271 j < vec_len (am->hash_entry_vec_by_lc_index[lci]);
1274 acl_plugin_print_pae (vm, j,
1275 &am->hash_entry_vec_by_lc_index
1283 acl_plugin_show_tables_bihash (u32 show_bihash_verbose)
1285 acl_main_t *am = &acl_main;
1286 vlib_main_t *vm = am->vlib_main;
1287 show_hash_acl_hash (vm, am, show_bihash_verbose);
1291 * Split of the partition needs to happen when the collision count
1292 * goes over a specified threshold.
1294 * This is a signal that we ignored too many bits in
1295 * mT and we need to split the table into two tables. We select
1296 * all of the colliding rules L and find their maximum common
1297 * tuple mL. Normally mL is specific enough to hash L with few
1298 * or no collisions. We then create a new table T2 with tuple mL
1299 * and transfer all compatible rules from T to T2. If mL is not
1300 * specific enough, we find the field with the biggest difference
1301 * between the minimum and maximum tuple lengths for all of
1302 * the rules in L and set that field to be the average of those two
1303 * values. We then transfer all compatible rules as before. This
1304 * guarantees that some rules from L will move and that T2 will
1305 * have a smaller number of collisions than T did.
1310 ensure_ip6_min_addr (ip6_address_t * min_addr, ip6_address_t * mask_addr)
1313 (clib_net_to_host_u64 (mask_addr->as_u64[0]) <
1314 clib_net_to_host_u64 (min_addr->as_u64[0]))
1316 ((clib_net_to_host_u64 (mask_addr->as_u64[0]) ==
1317 clib_net_to_host_u64 (min_addr->as_u64[0]))
1318 && (clib_net_to_host_u64 (mask_addr->as_u64[1]) <
1319 clib_net_to_host_u64 (min_addr->as_u64[1])));
1322 min_addr->as_u64[0] = mask_addr->as_u64[0];
1323 min_addr->as_u64[1] = mask_addr->as_u64[1];
1328 ensure_ip6_max_addr (ip6_address_t * max_addr, ip6_address_t * mask_addr)
1331 (clib_net_to_host_u64 (mask_addr->as_u64[0]) >
1332 clib_net_to_host_u64 (max_addr->as_u64[0]))
1334 ((clib_net_to_host_u64 (mask_addr->as_u64[0]) ==
1335 clib_net_to_host_u64 (max_addr->as_u64[0]))
1336 && (clib_net_to_host_u64 (mask_addr->as_u64[1]) >
1337 clib_net_to_host_u64 (max_addr->as_u64[1])));
1340 max_addr->as_u64[0] = mask_addr->as_u64[0];
1341 max_addr->as_u64[1] = mask_addr->as_u64[1];
1346 ensure_ip4_min_addr (ip4_address_t * min_addr, ip4_address_t * mask_addr)
1349 (clib_net_to_host_u32 (mask_addr->as_u32) <
1350 clib_net_to_host_u32 (min_addr->as_u32));
1352 min_addr->as_u32 = mask_addr->as_u32;
1356 ensure_ip4_max_addr (ip4_address_t * max_addr, ip4_address_t * mask_addr)
1359 (clib_net_to_host_u32 (mask_addr->as_u32) >
1360 clib_net_to_host_u32 (max_addr->as_u32));
1362 max_addr->as_u32 = mask_addr->as_u32;
1376 split_partition(acl_main_t *am, u32 first_index,
1377 u32 lc_index, int is_ip6){
1378 DBG( "TM-split_partition - first_entry:%d", first_index);
1379 applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
1380 ace_mask_type_entry_t *mte;
1381 fa_5tuple_t the_min_tuple, *min_tuple = &the_min_tuple;
1382 fa_5tuple_t the_max_tuple, *max_tuple = &the_max_tuple;
1383 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), first_index);
1384 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
1385 hash_ace_info_t *ace_info;
1386 u32 coll_mask_type_index = pae->mask_type_index;
1387 clib_memset(&the_min_tuple, 0, sizeof(the_min_tuple));
1388 clib_memset(&the_max_tuple, 0, sizeof(the_max_tuple));
1391 collision_match_rule_t *colliding_rules = pae->colliding_rules;
1392 u64 collisions = vec_len(pae->colliding_rules);
1393 for(i=0; i<collisions; i++){
1394 /* reload the hash acl info as it might be a different ACL# */
1395 pae = vec_elt_at_index((*applied_hash_aces), colliding_rules[i].applied_entry_index);
1396 ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
1398 DBG( "TM-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
1399 pae->ace_index, pae->mask_type_index, coll_mask_type_index);
1401 ace_info = vec_elt_at_index(ha->rules, pae->hash_ace_info_index);
1402 mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
1403 fa_5tuple_t *mask = &mte->mask;
1405 if(pae->mask_type_index != coll_mask_type_index) continue;
1406 /* Computing min_mask and max_mask for colliding rules */
1408 clib_memcpy_fast(min_tuple, mask, sizeof(fa_5tuple_t));
1409 clib_memcpy_fast(max_tuple, mask, sizeof(fa_5tuple_t));
1414 ensure_ip6_min_addr(&min_tuple->ip6_addr[j], &mask->ip6_addr[j]);
1416 ensure_ip4_min_addr(&min_tuple->ip4_addr[j], &mask->ip4_addr[j]);
1418 if ((mask->l4.port[j] < min_tuple->l4.port[j]))
1419 min_tuple->l4.port[j] = mask->l4.port[j];
1422 if ((mask->l4.proto < min_tuple->l4.proto))
1423 min_tuple->l4.proto = mask->l4.proto;
1425 if(mask->pkt.as_u64 < min_tuple->pkt.as_u64)
1426 min_tuple->pkt.as_u64 = mask->pkt.as_u64;
1431 ensure_ip6_max_addr(&max_tuple->ip6_addr[j], &mask->ip6_addr[j]);
1433 ensure_ip4_max_addr(&max_tuple->ip4_addr[j], &mask->ip4_addr[j]);
1435 if ((mask->l4.port[j] > max_tuple->l4.port[j]))
1436 max_tuple->l4.port[j] = mask->l4.port[j];
1439 if ((mask->l4.proto < max_tuple->l4.proto))
1440 max_tuple->l4.proto = mask->l4.proto;
1442 if(mask->pkt.as_u64 > max_tuple->pkt.as_u64)
1443 max_tuple->pkt.as_u64 = mask->pkt.as_u64;
1447 /* Computing field with max difference between (min/max)_mask */
1448 int best_dim=-1, best_delta=0, delta=0;
1450 /* SRC_addr dimension */
1454 delta += count_bits(max_tuple->ip6_addr[0].as_u64[i]) - count_bits(min_tuple->ip6_addr[0].as_u64[i]);
1457 delta += count_bits(max_tuple->ip4_addr[0].as_u32) - count_bits(min_tuple->ip4_addr[0].as_u32);
1459 if(delta > best_delta){
1461 best_dim = DIM_SRC_ADDR;
1464 /* DST_addr dimension */
1469 delta += count_bits(max_tuple->ip6_addr[1].as_u64[i]) - count_bits(min_tuple->ip6_addr[1].as_u64[i]);
1472 delta += count_bits(max_tuple->ip4_addr[1].as_u32) - count_bits(min_tuple->ip4_addr[1].as_u32);
1474 if(delta > best_delta){
1476 best_dim = DIM_DST_ADDR;
1479 /* SRC_port dimension */
1480 delta = count_bits(max_tuple->l4.port[0]) - count_bits(min_tuple->l4.port[0]);
1481 if(delta > best_delta){
1483 best_dim = DIM_SRC_PORT;
1486 /* DST_port dimension */
1487 delta = count_bits(max_tuple->l4.port[1]) - count_bits(min_tuple->l4.port[1]);
1488 if(delta > best_delta){
1490 best_dim = DIM_DST_PORT;
1493 /* Proto dimension */
1494 delta = count_bits(max_tuple->l4.proto) - count_bits(min_tuple->l4.proto);
1495 if(delta > best_delta){
1497 best_dim = DIM_PROTO;
1500 int shifting = 0; //, ipv4_block = 0;
1503 shifting = (best_delta)/2; // FIXME IPV4-only
1504 // ipv4_block = count_bits(max_tuple->ip4_addr[0].as_u32);
1505 min_tuple->ip4_addr[0].as_u32 =
1506 clib_host_to_net_u32((clib_net_to_host_u32(max_tuple->ip4_addr[0].as_u32) << (shifting))&0xFFFFFFFF);
1510 shifting = (best_delta)/2;
1512 ipv4_block = count_bits(max_tuple->addr[1].as_u64[1]);
1513 if(ipv4_block > shifting)
1514 min_tuple->addr[1].as_u64[1] =
1515 clib_host_to_net_u64((clib_net_to_host_u64(max_tuple->addr[1].as_u64[1]) << (shifting))&0xFFFFFFFF);
1517 shifting = shifting - ipv4_block;
1518 min_tuple->addr[1].as_u64[1] = 0;
1519 min_tuple->addr[1].as_u64[0] =
1520 clib_host_to_net_u64((clib_net_to_host_u64(max_tuple->addr[1].as_u64[0]) << (shifting))&0xFFFFFFFF);
1523 min_tuple->ip4_addr[1].as_u32 =
1524 clib_host_to_net_u32((clib_net_to_host_u32(max_tuple->ip4_addr[1].as_u32) << (shifting))&0xFFFFFFFF);
1527 case DIM_SRC_PORT: min_tuple->l4.port[0] = max_tuple->l4.port[0] << (best_delta)/2;
1529 case DIM_DST_PORT: min_tuple->l4.port[1] = max_tuple->l4.port[1] << (best_delta)/2;
1531 case DIM_PROTO: min_tuple->l4.proto = max_tuple->l4.proto << (best_delta)/2;
1533 default: relax_tuple(min_tuple, is_ip6, 1);
1537 min_tuple->pkt.is_nonfirst_fragment = 0;
1538 u32 new_mask_type_index = assign_mask_type_index(am, min_tuple);
1540 hash_applied_mask_info_t **hash_applied_mask_info_vec = vec_elt_at_index(am->hash_applied_mask_info_vec_by_lc_index, lc_index);
1542 hash_applied_mask_info_t *minfo;
1543 //search in order pool if mask_type_index is already there
1545 for (search=0; search < vec_len((*hash_applied_mask_info_vec)); search++){
1546 minfo = vec_elt_at_index((*hash_applied_mask_info_vec), search);
1547 if(minfo->mask_type_index == new_mask_type_index)
1551 vec_validate((*hash_applied_mask_info_vec), search);
1552 minfo = vec_elt_at_index((*hash_applied_mask_info_vec), search);
1553 minfo->mask_type_index = new_mask_type_index;
1554 minfo->num_entries = 0;
1555 minfo->max_collisions = 0;
1556 minfo->first_rule_index = ~0;
1558 DBG( "TM-split_partition - mask type index-assigned!! -> %d", new_mask_type_index);
1560 if(coll_mask_type_index == new_mask_type_index){
1561 //vlib_cli_output(vm, "TM-There are collisions over threshold, but i'm not able to split! %d %d", coll_mask_type_index, new_mask_type_index);
1566 /* populate new partition */
1567 DBG( "TM-Populate new partition");
1568 u32 r_ace_index = first_index;
1569 int repopulate_count = 0;
1571 collision_match_rule_t *temp_colliding_rules = vec_dup(colliding_rules);
1572 collisions = vec_len(temp_colliding_rules);
1574 for(i=0; i<collisions; i++){
1576 r_ace_index = temp_colliding_rules[i].applied_entry_index;
1578 applied_hash_ace_entry_t *pop_pae = vec_elt_at_index((*applied_hash_aces), r_ace_index);
1579 ha = vec_elt_at_index(am->hash_acl_infos, pop_pae->acl_index);
1580 DBG( "TM-Population-collision: base_ace:%d (ace_mask:%d, first_collision_mask:%d)",
1581 pop_pae->ace_index, pop_pae->mask_type_index, coll_mask_type_index);
1583 ASSERT(pop_pae->mask_type_index == coll_mask_type_index);
1585 ace_info = vec_elt_at_index(ha->rules, pop_pae->hash_ace_info_index);
1586 mte = vec_elt_at_index(am->ace_mask_type_pool, ace_info->base_mask_type_index);
1588 //mte = vec_elt_at_index(am->ace_mask_type_pool, pop_pae->mask_type_index);
1589 fa_5tuple_t *pop_mask = &mte->mask;
1591 if(!first_mask_contains_second_mask(is_ip6, min_tuple, pop_mask)) continue;
1592 DBG( "TM-new partition can insert -> applied_ace:%d", r_ace_index);
1594 //delete and insert in new format
1595 deactivate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);
1597 /* insert the new entry */
1598 pop_pae->mask_type_index = new_mask_type_index;
1599 /* The very first repopulation gets the lock by virtue of a new mask being created above */
1600 if (++repopulate_count > 1)
1601 lock_mask_type_index(am, new_mask_type_index);
1603 activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, r_ace_index);
1606 vec_free(temp_colliding_rules);
1608 DBG( "TM-Populate new partition-END");
1609 DBG( "TM-split_partition - END");