2 *------------------------------------------------------------------
3 * Copyright (c) 2017 Cisco and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
19 #include <netinet/in.h>
21 #include <vlibapi/api.h>
22 #include <vlibmemory/api.h>
24 #include <vlib/vlib.h>
25 #include <vnet/vnet.h>
26 #include <vnet/pg/pg.h>
27 #include <vppinfra/error.h>
28 #include <vnet/plugin/plugin.h>
30 #include <vppinfra/bihash_48_8.h>
32 #include "hash_lookup.h"
33 #include "hash_lookup_private.h"
36 always_inline applied_hash_ace_entry_t **get_applied_hash_aces(acl_main_t *am, u32 lc_index)
38 applied_hash_ace_entry_t **applied_hash_aces = vec_elt_at_index(am->hash_entry_vec_by_lc_index, lc_index);
40 /*is_input ? vec_elt_at_index(am->input_hash_entry_vec_by_sw_if_index, sw_if_index)
41 : vec_elt_at_index(am->output_hash_entry_vec_by_sw_if_index, sw_if_index);
43 return applied_hash_aces;
48 hashtable_add_del(acl_main_t *am, clib_bihash_kv_48_8_t *kv, int is_add)
50 DBG("HASH ADD/DEL: %016llx %016llx %016llx %016llx %016llx %016llx %016llx add %d",
51 kv->key[0], kv->key[1], kv->key[2],
52 kv->key[3], kv->key[4], kv->key[5], kv->value, is_add);
53 BV (clib_bihash_add_del) (&am->acl_lookup_hash, kv, is_add);
57 fill_applied_hash_ace_kv(acl_main_t *am,
58 applied_hash_ace_entry_t **applied_hash_aces,
60 u32 new_index, clib_bihash_kv_48_8_t *kv)
62 fa_5tuple_t *kv_key = (fa_5tuple_t *)kv->key;
63 hash_acl_lookup_value_t *kv_val = (hash_acl_lookup_value_t *)&kv->value;
64 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
65 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, pae->acl_index);
67 memcpy(kv_key, &(vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->match), sizeof(*kv_key));
68 /* initialize the sw_if_index and direction */
69 kv_key->pkt.lc_index = lc_index;
71 kv_val->applied_entry_index = new_index;
72 kv_val->need_portrange_check = vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->src_portrange_not_powerof2 ||
73 vec_elt_at_index(ha->rules, pae->hash_ace_info_index)->dst_portrange_not_powerof2;
74 /* by default assume all values are shadowed -> check all mask types */
79 add_del_hashtable_entry(acl_main_t *am,
81 applied_hash_ace_entry_t **applied_hash_aces,
82 u32 index, int is_add)
84 clib_bihash_kv_48_8_t kv;
86 fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, index, &kv);
87 hashtable_add_del(am, &kv, is_add);
93 activate_applied_ace_hash_entry(acl_main_t *am,
95 applied_hash_ace_entry_t **applied_hash_aces,
98 clib_bihash_kv_48_8_t kv;
99 ASSERT(new_index != ~0);
100 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
101 DBG("activate_applied_ace_hash_entry lc_index %d new_index %d", lc_index, new_index);
103 fill_applied_hash_ace_kv(am, applied_hash_aces, lc_index, new_index, &kv);
105 DBG("APPLY ADD KY: %016llx %016llx %016llx %016llx %016llx %016llx",
106 kv.key[0], kv.key[1], kv.key[2],
107 kv.key[3], kv.key[4], kv.key[5]);
109 clib_bihash_kv_48_8_t result;
110 hash_acl_lookup_value_t *result_val = (hash_acl_lookup_value_t *)&result.value;
111 int res = BV (clib_bihash_search) (&am->acl_lookup_hash, &kv, &result);
112 ASSERT(new_index != ~0);
113 ASSERT(new_index < vec_len((*applied_hash_aces)));
115 /* There already exists an entry or more. Append at the end. */
116 u32 first_index = result_val->applied_entry_index;
117 ASSERT(first_index != ~0);
118 DBG("A key already exists, with applied entry index: %d", first_index);
119 applied_hash_ace_entry_t *first_pae = vec_elt_at_index((*applied_hash_aces), first_index);
120 u32 last_index = first_pae->tail_applied_entry_index;
121 ASSERT(last_index != ~0);
122 applied_hash_ace_entry_t *last_pae = vec_elt_at_index((*applied_hash_aces), last_index);
123 DBG("...advance to chained entry index: %d", last_index);
124 /* link ourseves in */
125 last_pae->next_applied_entry_index = new_index;
126 pae->prev_applied_entry_index = last_index;
127 /* adjust the pointer to the new tail */
128 first_pae->tail_applied_entry_index = new_index;
130 /* It's the very first entry */
131 hashtable_add_del(am, &kv, 1);
132 ASSERT(new_index != ~0);
133 pae->tail_applied_entry_index = new_index;
138 applied_hash_entries_analyze(acl_main_t *am, applied_hash_ace_entry_t **applied_hash_aces)
141 * Go over the rules and check which ones are shadowed and which aren't.
142 * Naive approach: try to match the match value from every ACE as if it
143 * was a live packet, and see if the resulting match happens earlier in the list.
144 * if it does not match or it is later in the ACL - then the entry is not shadowed.
146 * This approach fails, an example:
147 * deny tcp 2001:db8::/32 2001:db8::/32
148 * permit ip 2001:db8::1/128 2001:db8::2/128
153 hash_acl_set_heap(acl_main_t *am)
155 if (0 == am->hash_lookup_mheap) {
156 am->hash_lookup_mheap = mheap_alloc (0 /* use VM */ , am->hash_lookup_mheap_size);
157 if (0 == am->hash_lookup_mheap) {
158 clib_error("ACL plugin failed to allocate hash lookup heap of %U bytes, abort", format_memory_size, am->hash_lookup_mheap_size);
160 mheap_t *h = mheap_header (am->hash_lookup_mheap);
161 h->flags |= MHEAP_FLAG_THREAD_SAFE;
163 void *oldheap = clib_mem_set_heap(am->hash_lookup_mheap);
168 acl_plugin_hash_acl_set_validate_heap(int on)
170 acl_main_t *am = &acl_main;
171 clib_mem_set_heap(hash_acl_set_heap(am));
172 mheap_t *h = mheap_header (am->hash_lookup_mheap);
174 h->flags |= MHEAP_FLAG_VALIDATE;
175 h->flags &= ~MHEAP_FLAG_SMALL_OBJECT_CACHE;
178 h->flags &= ~MHEAP_FLAG_VALIDATE;
179 h->flags |= MHEAP_FLAG_SMALL_OBJECT_CACHE;
184 acl_plugin_hash_acl_set_trace_heap(int on)
186 acl_main_t *am = &acl_main;
187 clib_mem_set_heap(hash_acl_set_heap(am));
188 mheap_t *h = mheap_header (am->hash_lookup_mheap);
190 h->flags |= MHEAP_FLAG_TRACE;
192 h->flags &= ~MHEAP_FLAG_TRACE;
197 hash_acl_apply(acl_main_t *am, u32 lc_index, int acl_index, u32 acl_position)
201 DBG0("HASH ACL apply: lc_index %d acl %d", lc_index, acl_index);
202 if (!am->acl_lookup_hash_initialized) {
203 BV (clib_bihash_init) (&am->acl_lookup_hash, "ACL plugin rule lookup bihash",
204 am->hash_lookup_hash_buckets, am->hash_lookup_hash_memory);
205 am->acl_lookup_hash_initialized = 1;
208 void *oldheap = hash_acl_set_heap(am);
209 vec_validate(am->hash_entry_vec_by_lc_index, lc_index);
210 vec_validate(am->hash_acl_infos, acl_index);
211 applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
213 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
214 u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
216 int base_offset = vec_len(*applied_hash_aces);
218 /* Update the bitmap of the mask types with which the lookup
219 needs to happen for the ACLs applied to this lc_index */
220 applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
221 vec_validate((*applied_hash_acls), lc_index);
222 applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
224 /* ensure the list of applied hash acls is initialized and add this acl# to it */
225 u32 index = vec_search(pal->applied_acls, acl_index);
227 clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to lc",
228 acl_index, lc_index);
231 vec_add1(pal->applied_acls, acl_index);
232 u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
234 clib_warning("BUG: trying to apply twice acl_index %d on lc_index %d, according to hash h-acl info",
235 acl_index, lc_index);
238 vec_add1((*hash_acl_applied_lc_index), lc_index);
240 pal->mask_type_index_bitmap = clib_bitmap_or(pal->mask_type_index_bitmap,
241 ha->mask_type_index_bitmap);
243 * if the applied ACL is empty, the current code will cause a
244 * different behavior compared to current linear search: an empty ACL will
245 * simply fallthrough to the next ACL, or the default deny in the end.
247 * This is not a problem, because after vpp-dev discussion,
248 * the consensus was it should not be possible to apply the non-existent
249 * ACL, so the change adding this code also takes care of that.
252 /* expand the applied aces vector by the necessary amount */
253 vec_resize((*applied_hash_aces), vec_len(ha->rules));
255 /* add the rules from the ACL to the hash table for lookup and append to the vector*/
256 for(i=0; i < vec_len(ha->rules); i++) {
257 u32 new_index = base_offset + i;
258 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), new_index);
259 pae->acl_index = acl_index;
260 pae->ace_index = ha->rules[i].ace_index;
261 pae->acl_position = acl_position;
262 pae->action = ha->rules[i].action;
264 pae->hash_ace_info_index = i;
265 /* we might link it in later */
266 pae->next_applied_entry_index = ~0;
267 pae->prev_applied_entry_index = ~0;
268 pae->tail_applied_entry_index = ~0;
269 activate_applied_ace_hash_entry(am, lc_index, applied_hash_aces, new_index);
271 applied_hash_entries_analyze(am, applied_hash_aces);
273 clib_mem_set_heap (oldheap);
277 find_head_applied_ace_index(applied_hash_ace_entry_t **applied_hash_aces, u32 curr_index)
280 * find back the first entry. Inefficient so might need to be a bit cleverer
281 * if this proves to be a problem..
283 u32 an_index = curr_index;
284 ASSERT(an_index != ~0);
285 applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), an_index);
286 while(head_pae->prev_applied_entry_index != ~0) {
287 an_index = head_pae->prev_applied_entry_index;
288 ASSERT(an_index != ~0);
289 head_pae = vec_elt_at_index((*applied_hash_aces), an_index);
295 move_applied_ace_hash_entry(acl_main_t *am,
297 applied_hash_ace_entry_t **applied_hash_aces,
298 u32 old_index, u32 new_index)
300 ASSERT(old_index != ~0);
301 ASSERT(new_index != ~0);
303 *vec_elt_at_index((*applied_hash_aces), new_index) = *vec_elt_at_index((*applied_hash_aces), old_index);
305 /* update the linkage and hash table if necessary */
306 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
308 if (pae->prev_applied_entry_index != ~0) {
309 applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
310 ASSERT(prev_pae->next_applied_entry_index == old_index);
311 prev_pae->next_applied_entry_index = new_index;
313 /* first entry - so the hash points to it, update */
314 add_del_hashtable_entry(am, lc_index,
315 applied_hash_aces, new_index, 1);
316 ASSERT(pae->tail_applied_entry_index != ~0);
318 if (pae->next_applied_entry_index != ~0) {
319 applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
320 ASSERT(next_pae->prev_applied_entry_index == old_index);
321 next_pae->prev_applied_entry_index = new_index;
324 * Moving the very last entry, so we need to update the tail pointer in the first one.
326 u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
327 ASSERT(head_index != ~0);
328 applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
330 ASSERT(head_pae->tail_applied_entry_index == old_index);
331 head_pae->tail_applied_entry_index = new_index;
333 /* invalidate the old entry */
334 pae->prev_applied_entry_index = ~0;
335 pae->next_applied_entry_index = ~0;
336 pae->tail_applied_entry_index = ~0;
340 deactivate_applied_ace_hash_entry(acl_main_t *am,
342 applied_hash_ace_entry_t **applied_hash_aces,
345 applied_hash_ace_entry_t *pae = vec_elt_at_index((*applied_hash_aces), old_index);
346 DBG("UNAPPLY DEACTIVATE: lc_index %d applied index %d", lc_index, old_index);
348 if (pae->prev_applied_entry_index != ~0) {
349 DBG("UNAPPLY = index %d has prev_applied_entry_index %d", old_index, pae->prev_applied_entry_index);
350 applied_hash_ace_entry_t *prev_pae = vec_elt_at_index((*applied_hash_aces), pae->prev_applied_entry_index);
351 ASSERT(prev_pae->next_applied_entry_index == old_index);
352 prev_pae->next_applied_entry_index = pae->next_applied_entry_index;
353 if (pae->next_applied_entry_index == ~0) {
354 /* it was a last entry we removed, update the pointer on the first one */
355 u32 head_index = find_head_applied_ace_index(applied_hash_aces, old_index);
356 DBG("UNAPPLY = index %d head index to update %d", old_index, head_index);
357 ASSERT(head_index != ~0);
358 applied_hash_ace_entry_t *head_pae = vec_elt_at_index((*applied_hash_aces), head_index);
360 ASSERT(head_pae->tail_applied_entry_index == old_index);
361 head_pae->tail_applied_entry_index = pae->prev_applied_entry_index;
363 applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
364 next_pae->prev_applied_entry_index = pae->prev_applied_entry_index;
367 /* It was the first entry. We need either to reset the hash entry or delete it */
368 if (pae->next_applied_entry_index != ~0) {
369 /* the next element becomes the new first one, so needs the tail pointer to be set */
370 applied_hash_ace_entry_t *next_pae = vec_elt_at_index((*applied_hash_aces), pae->next_applied_entry_index);
371 ASSERT(pae->tail_applied_entry_index != ~0);
372 next_pae->tail_applied_entry_index = pae->tail_applied_entry_index;
373 DBG("Resetting the hash table entry from %d to %d, setting tail index to %d", old_index, pae->next_applied_entry_index, pae->tail_applied_entry_index);
374 /* unlink from the next element */
375 next_pae->prev_applied_entry_index = ~0;
376 add_del_hashtable_entry(am, lc_index,
377 applied_hash_aces, pae->next_applied_entry_index, 1);
379 /* no next entry, so just delete the entry in the hash table */
380 add_del_hashtable_entry(am, lc_index,
381 applied_hash_aces, old_index, 0);
384 /* invalidate the old entry */
385 pae->prev_applied_entry_index = ~0;
386 pae->next_applied_entry_index = ~0;
387 pae->tail_applied_entry_index = ~0;
392 hash_acl_build_applied_lookup_bitmap(acl_main_t *am, u32 lc_index)
395 uword *new_lookup_bitmap = 0;
397 applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
398 vec_validate((*applied_hash_acls), lc_index);
399 applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
401 for(i=0; i < vec_len(pal->applied_acls); i++) {
402 u32 a_acl_index = *vec_elt_at_index((pal->applied_acls), i);
403 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, a_acl_index);
404 DBG("Update bitmask = %U or %U (acl_index %d)\n", format_bitmap_hex, new_lookup_bitmap,
405 format_bitmap_hex, ha->mask_type_index_bitmap, a_acl_index);
406 new_lookup_bitmap = clib_bitmap_or(new_lookup_bitmap,
407 ha->mask_type_index_bitmap);
409 uword *old_lookup_bitmap = pal->mask_type_index_bitmap;
410 pal->mask_type_index_bitmap = new_lookup_bitmap;
411 clib_bitmap_free(old_lookup_bitmap);
415 hash_acl_unapply(acl_main_t *am, u32 lc_index, int acl_index)
419 DBG0("HASH ACL unapply: lc_index %d acl %d", lc_index, acl_index);
420 applied_hash_acl_info_t **applied_hash_acls = &am->applied_hash_acl_info_by_lc_index;
421 applied_hash_acl_info_t *pal = vec_elt_at_index((*applied_hash_acls), lc_index);
423 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
424 u32 **hash_acl_applied_lc_index = &ha->lc_index_list;
426 /* remove this acl# from the list of applied hash acls */
427 u32 index = vec_search(pal->applied_acls, acl_index);
429 clib_warning("BUG: trying to unapply unapplied acl_index %d on lc_index %d, according to lc",
430 acl_index, lc_index);
433 vec_del1(pal->applied_acls, index);
435 u32 index2 = vec_search((*hash_acl_applied_lc_index), lc_index);
437 clib_warning("BUG: trying to unapply twice acl_index %d on lc_index %d, according to h-acl info",
438 acl_index, lc_index);
441 vec_del1((*hash_acl_applied_lc_index), index2);
443 applied_hash_ace_entry_t **applied_hash_aces = get_applied_hash_aces(am, lc_index);
445 for(i=0; i < vec_len((*applied_hash_aces)); i++) {
446 if (vec_elt_at_index(*applied_hash_aces,i)->acl_index == acl_index) {
447 DBG("Found applied ACL#%d at applied index %d", acl_index, i);
451 if (vec_len((*applied_hash_aces)) <= i) {
452 DBG("Did not find applied ACL#%d at lc_index %d", acl_index, lc_index);
453 /* we went all the way without finding any entries. Probably a list was empty. */
457 void *oldheap = hash_acl_set_heap(am);
459 int tail_offset = base_offset + vec_len(ha->rules);
460 int tail_len = vec_len((*applied_hash_aces)) - tail_offset;
461 DBG("base_offset: %d, tail_offset: %d, tail_len: %d", base_offset, tail_offset, tail_len);
463 for(i=0; i < vec_len(ha->rules); i ++) {
464 deactivate_applied_ace_hash_entry(am, lc_index,
465 applied_hash_aces, base_offset + i);
467 for(i=0; i < tail_len; i ++) {
468 /* move the entry at tail offset to base offset */
469 /* that is, from (tail_offset+i) -> (base_offset+i) */
470 DBG("UNAPPLY MOVE: lc_index %d, applied index %d -> %d", lc_index, tail_offset+i, base_offset + i);
471 move_applied_ace_hash_entry(am, lc_index, applied_hash_aces, tail_offset + i, base_offset + i);
473 /* trim the end of the vector */
474 _vec_len((*applied_hash_aces)) -= vec_len(ha->rules);
476 applied_hash_entries_analyze(am, applied_hash_aces);
478 /* After deletion we might not need some of the mask-types anymore... */
479 hash_acl_build_applied_lookup_bitmap(am, lc_index);
480 clib_mem_set_heap (oldheap);
484 * Create the applied ACEs and update the hash table,
485 * taking into account that the ACL may not be the last
486 * in the vector of applied ACLs.
488 * For now, walk from the end of the vector and unapply the ACLs,
489 * then apply the one in question and reapply the rest.
493 hash_acl_reapply(acl_main_t *am, u32 lc_index, int acl_index)
495 acl_lookup_context_t *acontext = pool_elt_at_index(am->acl_lookup_contexts, lc_index);
496 u32 **applied_acls = &acontext->acl_indices;
498 int start_index = vec_search((*applied_acls), acl_index);
500 DBG0("Start index for acl %d in lc_index %d is %d", acl_index, lc_index, start_index);
502 * This function is called after we find out the sw_if_index where ACL is applied.
503 * If the by-sw_if_index vector does not have the ACL#, then it's a bug.
505 ASSERT(start_index < vec_len(*applied_acls));
507 /* unapply all the ACLs at the tail side, up to the current one */
508 for(i = vec_len(*applied_acls) - 1; i > start_index; i--) {
509 hash_acl_unapply(am, lc_index, *vec_elt_at_index(*applied_acls, i));
511 for(i = start_index; i < vec_len(*applied_acls); i++) {
512 hash_acl_apply(am, lc_index, *vec_elt_at_index(*applied_acls, i), i);
517 make_address_mask(ip46_address_t *addr, u8 is_ipv6, u8 prefix_len)
520 ip6_address_mask_from_width(&addr->ip6, prefix_len);
522 /* FIXME: this may not be correct way */
523 ip6_address_mask_from_width(&addr->ip6, prefix_len + 3*32);
524 ip46_address_mask_ip4(addr);
529 make_port_mask(u16 *portmask, u16 port_first, u16 port_last)
531 if (port_first == port_last) {
533 /* single port is representable by masked value */
536 if ((port_first == 0) && (port_last == 65535)) {
538 /* wildcard port is representable by a masked value */
543 * For now match all the ports, later
544 * here might be a better optimization which would
545 * pick out bitmaskable portranges.
547 * However, adding a new mask type potentially
548 * adds a per-packet extra lookup, so the benefit is not clear.
551 /* This port range can't be represented via bitmask exactly. */
556 make_mask_and_match_from_rule(fa_5tuple_t *mask, acl_rule_t *r, hash_ace_info_t *hi, int match_nonfirst_fragment)
558 memset(mask, 0, sizeof(*mask));
559 memset(&hi->match, 0, sizeof(hi->match));
560 hi->action = r->is_permit;
562 /* we will need to be matching based on lc_index and mask_type_index when applied */
563 mask->pkt.lc_index = ~0;
564 /* we will assign the match of mask_type_index later when we find it*/
565 mask->pkt.mask_type_index_lsb = ~0;
567 mask->pkt.is_ip6 = 1;
568 hi->match.pkt.is_ip6 = r->is_ipv6;
570 make_address_mask(&mask->addr[0], r->is_ipv6, r->src_prefixlen);
571 hi->match.addr[0] = r->src;
572 make_address_mask(&mask->addr[1], r->is_ipv6, r->dst_prefixlen);
573 hi->match.addr[1] = r->dst;
576 mask->l4.proto = ~0; /* L4 proto needs to be matched */
577 hi->match.l4.proto = r->proto;
578 if (match_nonfirst_fragment) {
579 /* match the non-first fragments only */
580 mask->pkt.is_nonfirst_fragment = 1;
581 hi->match.pkt.is_nonfirst_fragment = 1;
583 /* Calculate the src/dst port masks and make the src/dst port matches accordingly */
584 hi->src_portrange_not_powerof2 = make_port_mask(&mask->l4.port[0], r->src_port_or_type_first, r->src_port_or_type_last);
585 hi->match.l4.port[0] = r->src_port_or_type_first & mask->l4.port[0];
586 hi->dst_portrange_not_powerof2 = make_port_mask(&mask->l4.port[1], r->dst_port_or_code_first, r->dst_port_or_code_last);
587 hi->match.l4.port[1] = r->dst_port_or_code_first & mask->l4.port[1];
588 /* L4 info must be valid in order to match */
589 mask->pkt.l4_valid = 1;
590 hi->match.pkt.l4_valid = 1;
591 /* And we must set the mask to check that it is an initial fragment */
592 mask->pkt.is_nonfirst_fragment = 1;
593 hi->match.pkt.is_nonfirst_fragment = 0;
594 if ((r->proto == IPPROTO_TCP) && (r->tcp_flags_mask != 0)) {
595 /* if we want to match on TCP flags, they must be masked off as well */
596 mask->pkt.tcp_flags = r->tcp_flags_mask;
597 hi->match.pkt.tcp_flags = r->tcp_flags_value;
598 /* and the flags need to be present within the packet being matched */
599 mask->pkt.tcp_flags_valid = 1;
600 hi->match.pkt.tcp_flags_valid = 1;
604 /* Sanitize the mask and the match */
605 u64 *pmask = (u64 *)mask;
606 u64 *pmatch = (u64 *)&hi->match;
609 pmatch[j] = pmatch[j] & pmask[j];
614 find_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
616 ace_mask_type_entry_t *mte;
618 pool_foreach(mte, am->ace_mask_type_pool,
620 if(memcmp(&mte->mask, mask, sizeof(*mask)) == 0)
621 return (mte - am->ace_mask_type_pool);
628 assign_mask_type_index(acl_main_t *am, fa_5tuple_t *mask)
630 u32 mask_type_index = find_mask_type_index(am, mask);
631 ace_mask_type_entry_t *mte;
632 if(~0 == mask_type_index) {
633 pool_get_aligned (am->ace_mask_type_pool, mte, CLIB_CACHE_LINE_BYTES);
634 mask_type_index = mte - am->ace_mask_type_pool;
635 clib_memcpy(&mte->mask, mask, sizeof(mte->mask));
638 * We can use only 16 bits, since in the match there is only u16 field.
639 * Realistically, once you go to 64K of mask types, it is a huge
640 * problem anyway, so we might as well stop half way.
642 ASSERT(mask_type_index < 32768);
644 mte = am->ace_mask_type_pool + mask_type_index;
646 return mask_type_index;
650 release_mask_type_index(acl_main_t *am, u32 mask_type_index)
652 ace_mask_type_entry_t *mte = pool_elt_at_index(am->ace_mask_type_pool, mask_type_index);
654 if (mte->refcount == 0) {
655 /* we are not using this entry anymore */
656 pool_put(am->ace_mask_type_pool, mte);
660 int hash_acl_exists(acl_main_t *am, int acl_index)
662 if (acl_index >= vec_len(am->hash_acl_infos))
665 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
666 return ha->hash_acl_exists;
669 void hash_acl_add(acl_main_t *am, int acl_index)
671 void *oldheap = hash_acl_set_heap(am);
672 DBG("HASH ACL add : %d", acl_index);
674 acl_list_t *a = &am->acls[acl_index];
675 vec_validate(am->hash_acl_infos, acl_index);
676 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
677 memset(ha, 0, sizeof(*ha));
678 ha->hash_acl_exists = 1;
680 /* walk the newly added ACL entries and ensure that for each of them there
681 is a mask type, increment a reference count for that mask type */
682 for(i=0; i < a->count; i++) {
683 hash_ace_info_t ace_info;
685 memset(&ace_info, 0, sizeof(ace_info));
686 ace_info.acl_index = acl_index;
687 ace_info.ace_index = i;
689 make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info, 0);
690 ace_info.mask_type_index = assign_mask_type_index(am, &mask);
691 /* assign the mask type index for matching itself */
692 ace_info.match.pkt.mask_type_index_lsb = ace_info.mask_type_index;
693 DBG("ACE: %d mask_type_index: %d", i, ace_info.mask_type_index);
694 /* Ensure a given index is set in the mask type index bitmap for this ACL */
695 ha->mask_type_index_bitmap = clib_bitmap_set(ha->mask_type_index_bitmap, ace_info.mask_type_index, 1);
696 vec_add1(ha->rules, ace_info);
697 if (am->l4_match_nonfirst_fragment) {
698 /* add the second rule which matches the noninitial fragments with the respective mask */
699 make_mask_and_match_from_rule(&mask, &a->rules[i], &ace_info, 1);
700 ace_info.mask_type_index = assign_mask_type_index(am, &mask);
701 ace_info.match.pkt.mask_type_index_lsb = ace_info.mask_type_index;
702 DBG("ACE: %d (non-initial frags) mask_type_index: %d", i, ace_info.mask_type_index);
703 /* Ensure a given index is set in the mask type index bitmap for this ACL */
704 ha->mask_type_index_bitmap = clib_bitmap_set(ha->mask_type_index_bitmap, ace_info.mask_type_index, 1);
705 vec_add1(ha->rules, ace_info);
709 * if an ACL is applied somewhere, fill the corresponding lookup data structures.
710 * We need to take care if the ACL is not the last one in the vector of ACLs applied to the interface.
712 if (acl_index < vec_len(am->lc_index_vec_by_acl)) {
714 vec_foreach(lc_index, am->lc_index_vec_by_acl[acl_index]) {
715 hash_acl_reapply(am, *lc_index, acl_index);
718 clib_mem_set_heap (oldheap);
721 void hash_acl_delete(acl_main_t *am, int acl_index)
723 void *oldheap = hash_acl_set_heap(am);
724 DBG0("HASH ACL delete : %d", acl_index);
726 * If the ACL is applied somewhere, remove the references of it (call hash_acl_unapply)
727 * this is a different behavior from the linear lookup where an empty ACL is "deny all",
729 * However, following vpp-dev discussion the ACL that is referenced elsewhere
730 * should not be possible to delete, and the change adding this also adds
731 * the safeguards to that respect, so this is not a problem.
733 * The part to rememeber is that this routine is called in process of reapplication
734 * during the acl_add_replace() API call - the old acl ruleset is deleted, then
735 * the new one is added, without the change in the applied ACLs - so this case
738 hash_acl_info_t *ha = vec_elt_at_index(am->hash_acl_infos, acl_index);
739 u32 *lc_list_copy = 0;
742 lc_list_copy = vec_dup(ha->lc_index_list);
743 vec_foreach(lc_index, lc_list_copy) {
744 hash_acl_unapply(am, *lc_index, acl_index);
746 vec_free(lc_list_copy);
749 /* walk the mask types for the ACL about-to-be-deleted, and decrease
750 * the reference count, possibly freeing up some of them */
752 for(i=0; i < vec_len(ha->rules); i++) {
753 release_mask_type_index(am, ha->rules[i].mask_type_index);
755 clib_bitmap_free(ha->mask_type_index_bitmap);
756 ha->hash_acl_exists = 0;
758 clib_mem_set_heap (oldheap);
763 show_hash_acl_hash (vlib_main_t * vm, acl_main_t *am, u32 verbose)
765 vlib_cli_output(vm, "\nACL lookup hash table:\n%U\n",
766 BV (format_bihash), &am->acl_lookup_hash, verbose);
770 acl_plugin_show_tables_mask_type (void)
772 acl_main_t *am = &acl_main;
773 vlib_main_t *vm = am->vlib_main;
774 ace_mask_type_entry_t *mte;
776 vlib_cli_output (vm, "Mask-type entries:");
778 pool_foreach(mte, am->ace_mask_type_pool,
780 vlib_cli_output(vm, " %3d: %016llx %016llx %016llx %016llx %016llx %016llx refcount %d",
781 mte - am->ace_mask_type_pool,
782 mte->mask.kv.key[0], mte->mask.kv.key[1], mte->mask.kv.key[2],
783 mte->mask.kv.key[3], mte->mask.kv.key[4], mte->mask.kv.value, mte->refcount);
789 acl_plugin_show_tables_acl_hash_info (u32 acl_index)
791 acl_main_t *am = &acl_main;
792 vlib_main_t *vm = am->vlib_main;
795 vlib_cli_output (vm, "Mask-ready ACL representations\n");
796 for (i = 0; i < vec_len (am->hash_acl_infos); i++)
798 if ((acl_index != ~0) && (acl_index != i))
802 hash_acl_info_t *ha = &am->hash_acl_infos[i];
803 vlib_cli_output (vm, "acl-index %u bitmask-ready layout\n", i);
804 vlib_cli_output (vm, " applied lc_index list: %U\n",
805 format_vec32, ha->lc_index_list, "%d");
806 vlib_cli_output (vm, " mask type index bitmap: %U\n",
807 format_bitmap_hex, ha->mask_type_index_bitmap);
808 for (j = 0; j < vec_len (ha->rules); j++)
810 hash_ace_info_t *pa = &ha->rules[j];
811 m = (u64 *) & pa->match;
813 " %4d: %016llx %016llx %016llx %016llx %016llx %016llx mask index %d acl %d rule %d action %d src/dst portrange not ^2: %d,%d\n",
814 j, m[0], m[1], m[2], m[3], m[4], m[5],
815 pa->mask_type_index, pa->acl_index, pa->ace_index,
816 pa->action, pa->src_portrange_not_powerof2,
817 pa->dst_portrange_not_powerof2);
823 acl_plugin_print_pae (vlib_main_t * vm, int j, applied_hash_ace_entry_t * pae)
826 " %4d: acl %d rule %d action %d bitmask-ready rule %d next %d prev %d tail %d hitcount %lld",
827 j, pae->acl_index, pae->ace_index, pae->action,
828 pae->hash_ace_info_index, pae->next_applied_entry_index,
829 pae->prev_applied_entry_index,
830 pae->tail_applied_entry_index, pae->hitcount);
834 acl_plugin_show_tables_applied_info (u32 lc_index)
836 acl_main_t *am = &acl_main;
837 vlib_main_t *vm = am->vlib_main;
839 vlib_cli_output (vm, "Applied lookup entries for lookup contexts");
842 (lci < vec_len(am->applied_hash_acl_info_by_lc_index)); lci++)
844 if ((lc_index != ~0) && (lc_index != lci))
848 vlib_cli_output (vm, "lc_index %d:", lci);
849 if (lci < vec_len (am->applied_hash_acl_info_by_lc_index))
851 applied_hash_acl_info_t *pal =
852 &am->applied_hash_acl_info_by_lc_index[lci];
853 vlib_cli_output (vm, " lookup mask_type_index_bitmap: %U",
854 format_bitmap_hex, pal->mask_type_index_bitmap);
855 vlib_cli_output (vm, " applied acls: %U", format_vec32,
856 pal->applied_acls, "%d");
858 if (lci < vec_len (am->hash_entry_vec_by_lc_index))
860 vlib_cli_output (vm, " lookup applied entries:");
862 j < vec_len (am->hash_entry_vec_by_lc_index[lci]);
865 acl_plugin_print_pae (vm, j,
866 &am->hash_entry_vec_by_lc_index
874 acl_plugin_show_tables_bihash (u32 show_bihash_verbose)
876 acl_main_t *am = &acl_main;
877 vlib_main_t *vm = am->vlib_main;
878 show_hash_acl_hash (vm, am, show_bihash_verbose);