X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fbihash_template.c;h=47e9bd0113dc6433a42fdf8b992912bb3d180691;hb=a90ba644b2c5463b89a91d019b3c22780242acc3;hp=882f81cc14bf0c3b5843d45a8262a2e2521b050c;hpb=ffb14b9554afa1e58c3657e0c91dda3135008274;p=vpp.git diff --git a/src/vppinfra/bihash_template.c b/src/vppinfra/bihash_template.c index 882f81cc14b..47e9bd0113d 100644 --- a/src/vppinfra/bihash_template.c +++ b/src/vppinfra/bihash_template.c @@ -15,6 +15,10 @@ /** @cond DOCUMENTATION_IS_IN_BIHASH_DOC_H */ +#ifndef MAP_HUGE_SHIFT +#define MAP_HUGE_SHIFT 26 +#endif + static inline void *BV (alloc_aligned) (BVT (clib_bihash) * h, uword nbytes) { uword rv; @@ -26,23 +30,102 @@ static inline void *BV (alloc_aligned) (BVT (clib_bihash) * h, uword nbytes) rv = alloc_arena_next (h); alloc_arena_next (h) += nbytes; - if (rv >= (alloc_arena (h) + alloc_arena_size (h))) + if (alloc_arena_next (h) > alloc_arena_size (h)) os_out_of_memory (); + if (alloc_arena_next (h) > alloc_arena_mapped (h)) + { + void *base, *rv; + uword alloc = alloc_arena_next (h) - alloc_arena_mapped (h); + int mmap_flags = MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS; + int mmap_flags_huge = (mmap_flags | MAP_HUGETLB | + BIHASH_LOG2_HUGEPAGE_SIZE << MAP_HUGE_SHIFT); + + /* new allocation is 25% of existing one */ + if (alloc_arena_mapped (h) >> 2 > alloc) + alloc = alloc_arena_mapped (h) >> 2; + + /* round allocation to page size */ + alloc = round_pow2 (alloc, 1 << BIHASH_LOG2_HUGEPAGE_SIZE); + + base = (void *) (uword) (alloc_arena (h) + alloc_arena_mapped (h)); + + rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags_huge, -1, 0); + + /* fallback - maybe we are still able to allocate normal pages */ + if (rv == MAP_FAILED) + rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags, -1, 0); + + if (rv == MAP_FAILED) + os_out_of_memory (); + + alloc_arena_mapped (h) += alloc; + } + return (void *) (uword) (rv + alloc_arena (h)); } - -void BV (clib_bihash_init) - (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size) +static void BV (clib_bihash_instantiate) (BVT (clib_bihash) * h) { uword bucket_size; - nbuckets = 1 << (max_log2 (nbuckets)); + alloc_arena (h) = clib_mem_vm_reserve (0, h->memory_size, + BIHASH_LOG2_HUGEPAGE_SIZE); + if (alloc_arena (h) == ~0) + os_out_of_memory (); + alloc_arena_next (h) = 0; + alloc_arena_size (h) = h->memory_size; + alloc_arena_mapped (h) = 0; - h->name = (u8 *) name; - h->nbuckets = nbuckets; - h->log2_nbuckets = max_log2 (nbuckets); + bucket_size = h->nbuckets * sizeof (h->buckets[0]); + + if (BIHASH_KVP_AT_BUCKET_LEVEL) + bucket_size += + h->nbuckets * BIHASH_KVP_PER_PAGE * sizeof (BVT (clib_bihash_kv)); + + h->buckets = BV (alloc_aligned) (h, bucket_size); + + if (BIHASH_KVP_AT_BUCKET_LEVEL) + { + int i; + BVT (clib_bihash_bucket) * b; + + b = h->buckets; + + for (i = 0; i < h->nbuckets; i++) + { + b->offset = BV (clib_bihash_get_offset) (h, (void *) (b + 1)); + b->refcnt = 1; + /* Mark all elements free */ + clib_memset ((b + 1), 0xff, + BIHASH_KVP_PER_PAGE * sizeof (BVT (clib_bihash_kv))); + + /* Compute next bucket start address */ + b = (void *) (((uword) b) + sizeof (*b) + + (BIHASH_KVP_PER_PAGE * + sizeof (BVT (clib_bihash_kv)))); + } + } + CLIB_MEMORY_STORE_BARRIER (); + h->instantiated = 1; +} + +void BV (clib_bihash_init2) (BVT (clib_bihash_init2_args) * a) +{ + int i; + void *oldheap; + BVT (clib_bihash) * h = a->h; + + a->nbuckets = 1 << (max_log2 (a->nbuckets)); + + h->name = (u8 *) a->name; + h->nbuckets = a->nbuckets; + h->log2_nbuckets = max_log2 (a->nbuckets); + h->memory_size = a->memory_size; + h->instantiated = 0; + h->fmt_fn = a->fmt_fn; + + alloc_arena (h) = 0; /* * Make sure the requested size is rational. The max table @@ -50,19 +133,50 @@ void BV (clib_bihash_init) * If someone starts complaining that's not enough, we can shift * the offset by CLIB_LOG2_CACHE_LINE_BYTES... */ - ASSERT (memory_size < (1ULL << BIHASH_BUCKET_OFFSET_BITS)); + ASSERT (h->memory_size < (1ULL << BIHASH_BUCKET_OFFSET_BITS)); - alloc_arena (h) = (uword) clib_mem_vm_alloc (memory_size); - alloc_arena_next (h) = 0; - alloc_arena_size (h) = memory_size; + /* Add this hash table to the list */ + if (a->dont_add_to_all_bihash_list == 0) + { + for (i = 0; i < vec_len (clib_all_bihashes); i++) + if (clib_all_bihashes[i] == h) + goto do_lock; + oldheap = clib_all_bihash_set_heap (); + vec_add1 (clib_all_bihashes, (void *) h); + clib_mem_set_heap (oldheap); + } - bucket_size = nbuckets * sizeof (h->buckets[0]); - h->buckets = BV (alloc_aligned) (h, bucket_size); +do_lock: + if (h->alloc_lock) + clib_mem_free ((void *) h->alloc_lock); - h->alloc_lock = BV (alloc_aligned) (h, CLIB_CACHE_LINE_BYTES); + /* + * Set up the lock now, so we can use it to make the first add + * thread-safe + */ + h->alloc_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, + CLIB_CACHE_LINE_BYTES); h->alloc_lock[0] = 0; - h->fmt_fn = NULL; +#if BIHASH_LAZY_INSTANTIATE + if (a->instantiate_immediately) +#endif + BV (clib_bihash_instantiate) (h); +} + +void BV (clib_bihash_init) + (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size) +{ + BVT (clib_bihash_init2_args) _a, *a = &_a; + + memset (a, 0, sizeof (*a)); + + a->h = h; + a->name = name; + a->nbuckets = nbuckets; + a->memory_size = memory_size; + + BV (clib_bihash_init2) (a); } #if BIHASH_32_64_SVM @@ -131,12 +245,12 @@ void BV (clib_bihash_master_init_svm) sizeof (vec_header_t) + BIHASH_FREELIST_LENGTH * sizeof (u64)); freelist_vh->len = BIHASH_FREELIST_LENGTH; - freelist_vh->dlmalloc_header_offset = 0xDEADBEEF; h->sh->freelists_as_u64 = (u64) BV (clib_bihash_get_offset) (h, freelist_vh->vector_data); h->freelists = (void *) (freelist_vh->vector_data); h->fmt_fn = NULL; + h->instantiated = 1; } void BV (clib_bihash_slave_init_svm) @@ -195,7 +309,14 @@ void BV (clib_bihash_set_kvp_format_fn) (BVT (clib_bihash) * h, void BV (clib_bihash_free) (BVT (clib_bihash) * h) { + int i; + + if (PREDICT_FALSE (h->instantiated == 0)) + goto never_initialized; + + h->instantiated = 0; vec_free (h->working_copies); + vec_free (h->working_copy_lengths); #if BIHASH_32_64_SVM == 0 vec_free (h->freelists); #else @@ -203,7 +324,18 @@ void BV (clib_bihash_free) (BVT (clib_bihash) * h) (void) close (h->memfd); #endif clib_mem_vm_free ((void *) (uword) (alloc_arena (h)), alloc_arena_size (h)); - memset (h, 0, sizeof (*h)); +never_initialized: + clib_memset (h, 0, sizeof (*h)); + for (i = 0; i < vec_len (clib_all_bihashes); i++) + { + if ((void *) h == clib_all_bihashes[i]) + { + vec_delete (clib_all_bihashes, 1, i); + return; + } + } + clib_warning ("Couldn't find hash table %llx on clib_all_bihashes...", + (u64) (uword) h); } static @@ -234,7 +366,7 @@ initialize: * if we replace (1<freelists) > log2_pages); if (CLIB_DEBUG > 0) - memset (v, 0xFE, sizeof (*v) * (1 << log2_pages)); + clib_memset (v, 0xFE, sizeof (*v) * (1 << log2_pages)); v->next_free_as_u64 = (u64) h->freelists[log2_pages]; h->freelists[log2_pages] = (u64) BV (clib_bihash_get_offset) (h, v); @@ -291,14 +423,17 @@ BV (make_working_copy) (BVT (clib_bihash) * h, BVT (clib_bihash_bucket) * b) (h, sizeof (working_copy[0]) * (1 << b->log2_pages)); h->working_copy_lengths[thread_index] = b->log2_pages; h->working_copies[thread_index] = working_copy; + + BV (clib_bihash_increment_stat) (h, BIHASH_STAT_working_copy_lost, + 1ULL << b->log2_pages); } v = BV (clib_bihash_get_value) (h, b->offset); - clib_memcpy (working_copy, v, sizeof (*v) * (1 << b->log2_pages)); + clib_memcpy_fast (working_copy, v, sizeof (*v) * (1 << b->log2_pages)); working_bucket.as_u64 = b->as_u64; working_bucket.offset = BV (clib_bihash_get_offset) (h, working_copy); - CLIB_MEMORY_BARRIER (); + CLIB_MEMORY_STORE_BARRIER (); b->as_u64 = working_bucket.as_u64; h->working_copies[thread_index] = working_copy; } @@ -328,8 +463,7 @@ BV (split_and_rehash) /* rehash the item onto its new home-page */ new_hash = BV (clib_bihash_hash) (&(old_values->kvp[i])); - new_hash >>= h->log2_nbuckets; - new_hash &= (1 << new_log2_pages) - 1; + new_hash = extract_bits (new_hash, h->log2_nbuckets, new_log2_pages); new_v = &new_values[new_hash]; /* Across the new home-page */ @@ -338,8 +472,8 @@ BV (split_and_rehash) /* Empty slot */ if (BV (clib_bihash_is_free) (&(new_v->kvp[j]))) { - clib_memcpy (&(new_v->kvp[j]), &(old_values->kvp[i]), - sizeof (new_v->kvp[j])); + clib_memcpy_fast (&(new_v->kvp[j]), &(old_values->kvp[i]), + sizeof (new_v->kvp[j])); goto doublebreak; } } @@ -383,8 +517,8 @@ BV (split_and_rehash_linear) if (BV (clib_bihash_is_free) (&(new_values->kvp[j]))) { /* Copy the old value and move along */ - clib_memcpy (&(new_values->kvp[j]), &(old_values->kvp[i]), - sizeof (new_values->kvp[j])); + clib_memcpy_fast (&(new_values->kvp[j]), &(old_values->kvp[i]), + sizeof (new_values->kvp[j])); j++; goto doublebreak; } @@ -399,31 +533,52 @@ BV (split_and_rehash_linear) return new_values; } -static inline int BV (clib_bihash_add_del_inline) - (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add, +static_always_inline int BV (clib_bihash_add_del_inline_with_hash) + (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, u64 hash, int is_add, int (*is_stale_cb) (BVT (clib_bihash_kv) *, void *), void *arg) { - u32 bucket_index; BVT (clib_bihash_bucket) * b, tmp_b; BVT (clib_bihash_value) * v, *new_v, *save_new_v, *working_copy; int i, limit; - u64 hash, new_hash; + u64 new_hash; u32 new_log2_pages, old_log2_pages; u32 thread_index = os_get_thread_index (); int mark_bucket_linear; int resplit_once; - hash = BV (clib_bihash_hash) (add_v); + /* *INDENT-OFF* */ + static const BVT (clib_bihash_bucket) mask = { + .linear_search = 1, + .log2_pages = -1 + }; + /* *INDENT-ON* */ - bucket_index = hash & (h->nbuckets - 1); - b = &h->buckets[bucket_index]; +#if BIHASH_LAZY_INSTANTIATE + /* + * Create the table (is_add=1,2), or flunk the request now (is_add=0) + * Use the alloc_lock to protect the instantiate operation. + */ + if (PREDICT_FALSE (h->instantiated == 0)) + { + if (is_add == 0) + return (-1); + + BV (clib_bihash_alloc_lock) (h); + if (h->instantiated == 0) + BV (clib_bihash_instantiate) (h); + BV (clib_bihash_alloc_unlock) (h); + } +#else + /* Debug image: make sure the table has been instantiated */ + ASSERT (h->instantiated != 0); +#endif - hash >>= h->log2_nbuckets; + b = BV (clib_bihash_get_bucket) (h, hash); BV (clib_bihash_lock_bucket) (b); /* First elt in the bucket? */ - if (BV (clib_bihash_bucket_is_empty) (b)) + if (BIHASH_KVP_AT_BUCKET_LEVEL == 0 && BV (clib_bihash_bucket_is_empty) (b)) { if (is_add == 0) { @@ -439,10 +594,11 @@ static inline int BV (clib_bihash_add_del_inline) tmp_b.as_u64 = 0; /* clears bucket lock */ tmp_b.offset = BV (clib_bihash_get_offset) (h, v); tmp_b.refcnt = 1; - CLIB_MEMORY_BARRIER (); + CLIB_MEMORY_STORE_BARRIER (); + + b->as_u64 = tmp_b.as_u64; /* unlocks the bucket */ + BV (clib_bihash_increment_stat) (h, BIHASH_STAT_alloc_add, 1); - b->as_u64 = tmp_b.as_u64; - BV (clib_bihash_unlock_bucket) (b); return (0); } @@ -450,9 +606,13 @@ static inline int BV (clib_bihash_add_del_inline) limit = BIHASH_KVP_PER_PAGE; v = BV (clib_bihash_get_value) (h, b->offset); - v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0; - if (b->linear_search) - limit <<= b->log2_pages; + if (PREDICT_FALSE (b->as_u64 & mask.as_u64)) + { + if (PREDICT_FALSE (b->linear_search)) + limit <<= b->log2_pages; + else + v += extract_bits (hash, h->log2_nbuckets, b->log2_pages); + } if (is_add) { @@ -469,11 +629,19 @@ static inline int BV (clib_bihash_add_del_inline) */ for (i = 0; i < limit; i++) { - if (!memcmp (&(v->kvp[i]), &add_v->key, sizeof (add_v->key))) + if (BV (clib_bihash_key_compare) (v->kvp[i].key, add_v->key)) { - CLIB_MEMORY_BARRIER (); /* Add a delay */ - clib_memcpy (&(v->kvp[i]), add_v, sizeof (*add_v)); + /* Add but do not overwrite? */ + if (is_add == 2) + { + BV (clib_bihash_unlock_bucket) (b); + return (-2); + } + + clib_memcpy_fast (&(v->kvp[i].value), + &add_v->value, sizeof (add_v->value)); BV (clib_bihash_unlock_bucket) (b); + BV (clib_bihash_increment_stat) (h, BIHASH_STAT_replace, 1); return (0); } } @@ -488,13 +656,15 @@ static inline int BV (clib_bihash_add_del_inline) * Copy the value first, so that if a reader manages * to match the new key, the value will be right... */ - clib_memcpy (&(v->kvp[i].value), - &add_v->value, sizeof (add_v->value)); - CLIB_MEMORY_BARRIER (); /* Make sure the value has settled */ - clib_memcpy (&(v->kvp[i]), &add_v->key, sizeof (add_v->key)); + clib_memcpy_fast (&(v->kvp[i].value), + &add_v->value, sizeof (add_v->value)); + CLIB_MEMORY_STORE_BARRIER (); /* Make sure the value has settled */ + clib_memcpy_fast (&(v->kvp[i]), &add_v->key, + sizeof (add_v->key)); b->refcnt++; ASSERT (b->refcnt > 0); BV (clib_bihash_unlock_bucket) (b); + BV (clib_bihash_increment_stat) (h, BIHASH_STAT_add, 1); return (0); } } @@ -505,9 +675,10 @@ static inline int BV (clib_bihash_add_del_inline) { if (is_stale_cb (&(v->kvp[i]), arg)) { - CLIB_MEMORY_BARRIER (); - clib_memcpy (&(v->kvp[i]), add_v, sizeof (*add_v)); + clib_memcpy_fast (&(v->kvp[i]), add_v, sizeof (*add_v)); + CLIB_MEMORY_STORE_BARRIER (); BV (clib_bihash_unlock_bucket) (b); + BV (clib_bihash_increment_stat) (h, BIHASH_STAT_replace, 1); return (0); } } @@ -519,31 +690,53 @@ static inline int BV (clib_bihash_add_del_inline) for (i = 0; i < limit; i++) { /* Found the key? Kill it... */ - if (!memcmp (&(v->kvp[i]), &add_v->key, sizeof (add_v->key))) + if (BV (clib_bihash_key_compare) (v->kvp[i].key, add_v->key)) { - memset (&(v->kvp[i]), 0xff, sizeof (*(add_v))); + clib_memset_u8 (&(v->kvp[i]), 0xff, sizeof (*(add_v))); /* Is the bucket empty? */ if (PREDICT_TRUE (b->refcnt > 1)) { b->refcnt--; + /* Switch back to the bucket-level kvp array? */ + if (BIHASH_KVP_AT_BUCKET_LEVEL && b->refcnt == 1 + && b->log2_pages > 0) + { + tmp_b.as_u64 = b->as_u64; + b->offset = BV (clib_bihash_get_offset) + (h, (void *) (b + 1)); + b->linear_search = 0; + b->log2_pages = 0; + /* Clean up the bucket-level kvp array */ + clib_memset_u8 ((b + 1), 0xff, BIHASH_KVP_PER_PAGE * + sizeof (BVT (clib_bihash_kv))); + CLIB_MEMORY_STORE_BARRIER (); + BV (clib_bihash_unlock_bucket) (b); + BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del, 1); + goto free_backing_store; + } + + CLIB_MEMORY_STORE_BARRIER (); BV (clib_bihash_unlock_bucket) (b); + BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del, 1); return (0); } else /* yes, free it */ { /* Save old bucket value, need log2_pages to free it */ tmp_b.as_u64 = b->as_u64; - CLIB_MEMORY_BARRIER (); /* Kill and unlock the bucket */ b->as_u64 = 0; + free_backing_store: /* And free the backing storage */ BV (clib_bihash_alloc_lock) (h); /* Note: v currently points into the middle of the bucket */ v = BV (clib_bihash_get_value) (h, tmp_b.offset); BV (value_free) (h, v, tmp_b.log2_pages); BV (clib_bihash_alloc_unlock) (h); + BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del_free, + 1); return (0); } } @@ -562,9 +755,12 @@ static inline int BV (clib_bihash_add_del_inline) old_log2_pages = h->saved_bucket.log2_pages; new_log2_pages = old_log2_pages + 1; mark_bucket_linear = 0; + BV (clib_bihash_increment_stat) (h, BIHASH_STAT_split_add, 1); + BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, old_log2_pages); working_copy = h->working_copies[thread_index]; resplit_once = 0; + BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, 1); new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages, new_log2_pages); @@ -585,7 +781,11 @@ static inline int BV (clib_bihash_add_del_inline) BV (split_and_rehash_linear) (h, working_copy, old_log2_pages, new_log2_pages); mark_bucket_linear = 1; + BV (clib_bihash_increment_stat) (h, BIHASH_STAT_linear, 1); } + BV (clib_bihash_increment_stat) (h, BIHASH_STAT_resplit, 1); + BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, + old_log2_pages + 1); } /* Try to add the new entry */ @@ -594,15 +794,14 @@ static inline int BV (clib_bihash_add_del_inline) limit = BIHASH_KVP_PER_PAGE; if (mark_bucket_linear) limit <<= new_log2_pages; - new_hash >>= h->log2_nbuckets; - new_hash &= (1 << new_log2_pages) - 1; - new_v += mark_bucket_linear ? 0 : new_hash; + else + new_v += extract_bits (new_hash, h->log2_nbuckets, new_log2_pages); for (i = 0; i < limit; i++) { if (BV (clib_bihash_is_free) (&(new_v->kvp[i]))) { - clib_memcpy (&(new_v->kvp[i]), add_v, sizeof (*add_v)); + clib_memcpy_fast (&(new_v->kvp[i]), add_v, sizeof (*add_v)); goto expand_ok; } } @@ -622,15 +821,43 @@ expand_ok: tmp_b.log2_pages = new_log2_pages; tmp_b.offset = BV (clib_bihash_get_offset) (h, save_new_v); tmp_b.linear_search = mark_bucket_linear; - tmp_b.refcnt = h->saved_bucket.refcnt + 1; +#if BIHASH_KVP_AT_BUCKET_LEVEL + /* Compensate for permanent refcount bump at the bucket level */ + if (new_log2_pages > 0) +#endif + tmp_b.refcnt = h->saved_bucket.refcnt + 1; ASSERT (tmp_b.refcnt > 0); tmp_b.lock = 0; - CLIB_MEMORY_BARRIER (); + CLIB_MEMORY_STORE_BARRIER (); b->as_u64 = tmp_b.as_u64; + +#if BIHASH_KVP_AT_BUCKET_LEVEL + if (h->saved_bucket.log2_pages > 0) + { +#endif + + /* free the old bucket, except at the bucket level if so configured */ + v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset); + BV (value_free) (h, v, h->saved_bucket.log2_pages); + +#if BIHASH_KVP_AT_BUCKET_LEVEL + } +#endif + + BV (clib_bihash_alloc_unlock) (h); return (0); } +static_always_inline int BV (clib_bihash_add_del_inline) + (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add, + int (*is_stale_cb) (BVT (clib_bihash_kv) *, void *), void *arg) +{ + u64 hash = BV (clib_bihash_hash) (add_v); + return BV (clib_bihash_add_del_inline_with_hash) (h, add_v, hash, is_add, + is_stale_cb, arg); +} + int BV (clib_bihash_add_del) (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add) { @@ -648,46 +875,7 @@ int BV (clib_bihash_search) (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep) { - u64 hash; - u32 bucket_index; - BVT (clib_bihash_value) * v; - BVT (clib_bihash_bucket) * b; - int i, limit; - - ASSERT (valuep); - - hash = BV (clib_bihash_hash) (search_key); - - bucket_index = hash & (h->nbuckets - 1); - b = &h->buckets[bucket_index]; - - if (BV (clib_bihash_bucket_is_empty) (b)) - return -1; - - if (PREDICT_FALSE (b->lock)) - { - volatile BVT (clib_bihash_bucket) * bv = b; - while (bv->lock) - CLIB_PAUSE (); - } - - hash >>= h->log2_nbuckets; - - v = BV (clib_bihash_get_value) (h, b->offset); - limit = BIHASH_KVP_PER_PAGE; - v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0; - if (PREDICT_FALSE (b->linear_search)) - limit <<= b->log2_pages; - - for (i = 0; i < limit; i++) - { - if (BV (clib_bihash_key_compare) (v->kvp[i].key, search_key->key)) - { - *valuep = v->kvp[i]; - return 0; - } - } - return -1; + return BV (clib_bihash_search_inline_2) (h, search_key, valuep); } u8 *BV (format_bihash) (u8 * s, va_list * args) @@ -704,9 +892,14 @@ u8 *BV (format_bihash) (u8 * s, va_list * args) s = format (s, "Hash table %s\n", h->name ? h->name : (u8 *) "(unnamed)"); +#if BIHASH_LAZY_INSTANTIATE + if (PREDICT_FALSE (alloc_arena (h) == 0)) + return format (s, "[empty, uninitialized]"); +#endif + for (i = 0; i < h->nbuckets; i++) { - b = &h->buckets[i]; + b = BV (clib_bihash_get_bucket) (h, i); if (BV (clib_bihash_bucket_is_empty) (b)) { if (verbose > 1) @@ -721,8 +914,9 @@ u8 *BV (format_bihash) (u8 * s, va_list * args) if (verbose) { - s = format (s, "[%d]: heap offset %lld, len %d, linear %d\n", i, - b->offset, (1 << b->log2_pages), b->linear_search); + s = format + (s, "[%d]: heap offset %lld, len %d, refcnt %d, linear %d\n", i, + b->offset, (1 << b->log2_pages), b->refcnt, b->linear_search); } v = BV (clib_bihash_get_value) (h, b->offset); @@ -743,7 +937,7 @@ u8 *BV (format_bihash) (u8 * s, va_list * args) { s = format (s, " %d: %U\n", j * BIHASH_KVP_PER_PAGE + k, - h->fmt_fn, &(v->kvp[k])); + h->fmt_fn, &(v->kvp[k]), verbose); } else { @@ -791,16 +985,22 @@ u8 *BV (format_bihash) (u8 * s, va_list * args) } void BV (clib_bihash_foreach_key_value_pair) - (BVT (clib_bihash) * h, void *callback, void *arg) + (BVT (clib_bihash) * h, + BV (clib_bihash_foreach_key_value_pair_cb) cb, void *arg) { int i, j, k; BVT (clib_bihash_bucket) * b; BVT (clib_bihash_value) * v; - void (*fp) (BVT (clib_bihash_kv) *, void *) = callback; + + +#if BIHASH_LAZY_INSTANTIATE + if (PREDICT_FALSE (alloc_arena (h) == 0)) + return; +#endif for (i = 0; i < h->nbuckets; i++) { - b = &h->buckets[i]; + b = BV (clib_bihash_get_bucket) (h, i); if (BV (clib_bihash_bucket_is_empty) (b)) continue; @@ -812,7 +1012,8 @@ void BV (clib_bihash_foreach_key_value_pair) if (BV (clib_bihash_is_free) (&v->kvp[k])) continue; - (*fp) (&v->kvp[k], arg); + if (BIHASH_WALK_STOP == cb (&v->kvp[k], arg)) + return; /* * In case the callback deletes the last entry in the bucket... */