oldheap = clib_mem_set_heap (h->mheap);
vec_validate (h->freelists, log2_pages);
- vec_validate_aligned (rv, (1 << log2_pages) - 1, CLIB_CACHE_LINE_BYTES);
+ rv = clib_mem_alloc_aligned ((sizeof (*rv) * (1 << log2_pages)),
+ CLIB_CACHE_LINE_BYTES);
clib_mem_set_heap (oldheap);
goto initialize;
}
initialize:
ASSERT (rv);
- ASSERT (vec_len (rv) == (1 << log2_pages));
/*
* Latest gcc complains that the length arg is zero
* if we replace (1<<log2_pages) with vec_len(rv).
}
static void
-BV (value_free) (BVT (clib_bihash) * h, BVT (clib_bihash_value) * v)
+BV (value_free) (BVT (clib_bihash) * h, BVT (clib_bihash_value) * v,
+ u32 log2_pages)
{
- u32 log2_pages;
-
ASSERT (h->writer_lock[0]);
- log2_pages = min_log2 (vec_len (v));
-
ASSERT (vec_len (h->freelists) > log2_pages);
v->next_free = h->freelists[log2_pages];
clib_bihash_bucket_t working_bucket __attribute__ ((aligned (8)));
void *oldheap;
BVT (clib_bihash_value) * working_copy;
- u32 cpu_number = os_get_cpu_number ();
+ u32 thread_index = os_get_thread_index ();
+ int log2_working_copy_length;
- if (cpu_number >= vec_len (h->working_copies))
+ if (thread_index >= vec_len (h->working_copies))
{
oldheap = clib_mem_set_heap (h->mheap);
- vec_validate (h->working_copies, cpu_number);
+ vec_validate (h->working_copies, thread_index);
+ vec_validate (h->working_copy_lengths, thread_index);
+ h->working_copy_lengths[thread_index] = -1;
clib_mem_set_heap (oldheap);
}
* updates from multiple threads will not result in sporadic, spurious
* lookup failures.
*/
- working_copy = h->working_copies[cpu_number];
+ working_copy = h->working_copies[thread_index];
+ log2_working_copy_length = h->working_copy_lengths[thread_index];
h->saved_bucket.as_u64 = b->as_u64;
oldheap = clib_mem_set_heap (h->mheap);
- if ((1 << b->log2_pages) > vec_len (working_copy))
+ if (b->log2_pages > log2_working_copy_length)
{
- vec_validate_aligned (working_copy, (1 << b->log2_pages) - 1,
- sizeof (u64));
- h->working_copies[cpu_number] = working_copy;
+ if (working_copy)
+ clib_mem_free (working_copy);
+
+ working_copy = clib_mem_alloc_aligned
+ (sizeof (working_copy[0]) * (1 << b->log2_pages),
+ CLIB_CACHE_LINE_BYTES);
+ h->working_copy_lengths[thread_index] = b->log2_pages;
+ h->working_copies[thread_index] = working_copy;
}
- _vec_len (working_copy) = 1 << b->log2_pages;
clib_mem_set_heap (oldheap);
v = BV (clib_bihash_get_value) (h, b->offset);
working_bucket.offset = BV (clib_bihash_get_offset) (h, working_copy);
CLIB_MEMORY_BARRIER ();
b->as_u64 = working_bucket.as_u64;
- h->working_copies[cpu_number] = working_copy;
+ h->working_copies[thread_index] = working_copy;
}
static
BVT (clib_bihash_value) *
BV (split_and_rehash)
(BVT (clib_bihash) * h,
- BVT (clib_bihash_value) * old_values, u32 new_log2_pages)
+ BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
+ u32 new_log2_pages)
{
- BVT (clib_bihash_value) * new_values, *v, *new_v;
- int i, j, k;
+ BVT (clib_bihash_value) * new_values, *new_v;
+ int i, j, length_in_kvs;
new_values = BV (value_alloc) (h, new_log2_pages);
+ length_in_kvs = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
- v = old_values;
- for (i = 0; i < vec_len (old_values); i++)
+ for (i = 0; i < length_in_kvs; i++)
{
u64 new_hash;
+ /* Entry not in use? Forget it */
+ if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
+ continue;
+
+ /* rehash the item onto its new home-page */
+ new_hash = BV (clib_bihash_hash) (&(old_values->kvp[i]));
+ new_hash >>= h->log2_nbuckets;
+ new_hash &= (1 << new_log2_pages) - 1;
+ new_v = &new_values[new_hash];
+
+ /* Across the new home-page */
for (j = 0; j < BIHASH_KVP_PER_PAGE; j++)
{
- if (BV (clib_bihash_is_free) (&(v->kvp[j])) == 0)
+ /* Empty slot */
+ if (BV (clib_bihash_is_free) (&(new_v->kvp[j])))
{
- new_hash = BV (clib_bihash_hash) (&(v->kvp[j]));
- new_hash >>= h->log2_nbuckets;
- new_hash &= (1 << new_log2_pages) - 1;
+ clib_memcpy (&(new_v->kvp[j]), &(old_values->kvp[i]),
+ sizeof (new_v->kvp[j]));
+ goto doublebreak;
+ }
+ }
+ /* Crap. Tell caller to try again */
+ BV (value_free) (h, new_values, new_log2_pages);
+ return 0;
+ doublebreak:;
+ }
- new_v = &new_values[new_hash];
+ return new_values;
+}
- for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
- {
- if (BV (clib_bihash_is_free) (&(new_v->kvp[k])))
- {
- clib_memcpy (&(new_v->kvp[k]), &(v->kvp[j]),
- sizeof (new_v->kvp[k]));
- goto doublebreak;
- }
- }
- /* Crap. Tell caller to try again */
- BV (value_free) (h, new_values);
- return 0;
+static
+BVT (clib_bihash_value) *
+BV (split_and_rehash_linear)
+ (BVT (clib_bihash) * h,
+ BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
+ u32 new_log2_pages)
+{
+ BVT (clib_bihash_value) * new_values;
+ int i, j, new_length, old_length;
+
+ new_values = BV (value_alloc) (h, new_log2_pages);
+ new_length = (1 << new_log2_pages) * BIHASH_KVP_PER_PAGE;
+ old_length = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
+
+ j = 0;
+ /* Across the old value array */
+ for (i = 0; i < old_length; i++)
+ {
+ /* Find a free slot in the new linear scan bucket */
+ for (; j < new_length; j++)
+ {
+ /* Old value not in use? Forget it. */
+ if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
+ goto doublebreak;
+
+ /* New value should never be in use */
+ if (BV (clib_bihash_is_free) (&(new_values->kvp[j])))
+ {
+ /* Copy the old value and move along */
+ clib_memcpy (&(new_values->kvp[j]), &(old_values->kvp[i]),
+ sizeof (new_values->kvp[j]));
+ j++;
+ goto doublebreak;
}
- doublebreak:
- ;
}
- v++;
+ /* This should never happen... */
+ clib_warning ("BUG: linear rehash failed!");
+ BV (value_free) (h, new_values, new_log2_pages);
+ return 0;
+
+ doublebreak:;
}
return new_values;
}
u32 bucket_index;
clib_bihash_bucket_t *b, tmp_b;
BVT (clib_bihash_value) * v, *new_v, *save_new_v, *working_copy;
- u32 value_index;
int rv = 0;
- int i;
+ int i, limit;
u64 hash, new_hash;
- u32 new_log2_pages;
- u32 cpu_number = os_get_cpu_number ();
+ u32 new_log2_pages, old_log2_pages;
+ u32 thread_index = os_get_thread_index ();
+ int mark_bucket_linear;
+ int resplit_once;
hash = BV (clib_bihash_hash) (add_v);
}
v = BV (value_alloc) (h, 0);
+
*v->kvp = *add_v;
tmp_b.as_u64 = 0;
tmp_b.offset = BV (clib_bihash_get_offset) (h, v);
BV (make_working_copy) (h, b);
v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
- value_index = hash & ((1 << h->saved_bucket.log2_pages) - 1);
- v += value_index;
+
+ limit = BIHASH_KVP_PER_PAGE;
+ v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
+ if (b->linear_search)
+ limit <<= b->log2_pages;
if (is_add)
{
* For obvious (in hindsight) reasons, see if we're supposed to
* replace an existing key, then look for an empty slot.
*/
- for (i = 0; i < BIHASH_KVP_PER_PAGE; i++)
+ for (i = 0; i < limit; i++)
{
if (!memcmp (&(v->kvp[i]), &add_v->key, sizeof (add_v->key)))
{
goto unlock;
}
}
- for (i = 0; i < BIHASH_KVP_PER_PAGE; i++)
+ for (i = 0; i < limit; i++)
{
if (BV (clib_bihash_is_free) (&(v->kvp[i])))
{
}
else
{
- for (i = 0; i < BIHASH_KVP_PER_PAGE; i++)
+ for (i = 0; i < limit; i++)
{
if (!memcmp (&(v->kvp[i]), &add_v->key, sizeof (add_v->key)))
{
goto unlock;
}
- new_log2_pages = h->saved_bucket.log2_pages + 1;
+ old_log2_pages = h->saved_bucket.log2_pages;
+ new_log2_pages = old_log2_pages + 1;
+ mark_bucket_linear = 0;
-expand_again:
- working_copy = h->working_copies[cpu_number];
- new_v = BV (split_and_rehash) (h, working_copy, new_log2_pages);
+ working_copy = h->working_copies[thread_index];
+ resplit_once = 0;
+
+ new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
+ new_log2_pages);
if (new_v == 0)
{
+ try_resplit:
+ resplit_once = 1;
new_log2_pages++;
- goto expand_again;
+ /* Try re-splitting. If that fails, fall back to linear search */
+ new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
+ new_log2_pages);
+ if (new_v == 0)
+ {
+ mark_linear:
+ new_log2_pages--;
+ /* pinned collisions, use linear search */
+ new_v =
+ BV (split_and_rehash_linear) (h, working_copy, old_log2_pages,
+ new_log2_pages);
+ mark_bucket_linear = 1;
+ }
}
/* Try to add the new entry */
save_new_v = new_v;
new_hash = BV (clib_bihash_hash) (add_v);
+ limit = BIHASH_KVP_PER_PAGE;
+ if (mark_bucket_linear)
+ limit <<= new_log2_pages;
new_hash >>= h->log2_nbuckets;
- new_hash &= (1 << min_log2 (vec_len (new_v))) - 1;
- new_v += new_hash;
+ new_hash &= (1 << new_log2_pages) - 1;
+ new_v += mark_bucket_linear ? 0 : new_hash;
- for (i = 0; i < BIHASH_KVP_PER_PAGE; i++)
+ for (i = 0; i < limit; i++)
{
if (BV (clib_bihash_is_free) (&(new_v->kvp[i])))
{
goto expand_ok;
}
}
+
/* Crap. Try again */
- new_log2_pages++;
- BV (value_free) (h, save_new_v);
- goto expand_again;
+ BV (value_free) (h, save_new_v, new_log2_pages);
+ /*
+ * If we've already doubled the size of the bucket once,
+ * fall back to linear search now.
+ */
+ if (resplit_once)
+ goto mark_linear;
+ else
+ goto try_resplit;
expand_ok:
- tmp_b.log2_pages = min_log2 (vec_len (save_new_v));
+ /* Keep track of the number of linear-scan buckets */
+ if (tmp_b.linear_search ^ mark_bucket_linear)
+ h->linear_buckets += (mark_bucket_linear == 1) ? 1 : -1;
+
+ tmp_b.log2_pages = new_log2_pages;
tmp_b.offset = BV (clib_bihash_get_offset) (h, save_new_v);
+ tmp_b.linear_search = mark_bucket_linear;
+
CLIB_MEMORY_BARRIER ();
b->as_u64 = tmp_b.as_u64;
v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
- BV (value_free) (h, v);
+ BV (value_free) (h, v, old_log2_pages);
unlock:
CLIB_MEMORY_BARRIER ();
{
u64 hash;
u32 bucket_index;
- uword value_index;
BVT (clib_bihash_value) * v;
clib_bihash_bucket_t *b;
- int i;
+ int i, limit;
ASSERT (valuep);
hash >>= h->log2_nbuckets;
v = BV (clib_bihash_get_value) (h, b->offset);
- value_index = hash & ((1 << b->log2_pages) - 1);
- v += value_index;
+ limit = BIHASH_KVP_PER_PAGE;
+ v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
+ if (PREDICT_FALSE (b->linear_search))
+ limit <<= b->log2_pages;
- for (i = 0; i < BIHASH_KVP_PER_PAGE; i++)
+ for (i = 0; i < limit; i++)
{
if (BV (clib_bihash_key_compare) (v->kvp[i].key, search_key->key))
{
if (verbose)
{
- s = format (s, "[%d]: heap offset %d, len %d\n", i,
- b->offset, (1 << b->log2_pages));
+ s = format (s, "[%d]: heap offset %d, len %d, linear %d\n", i,
+ b->offset, (1 << b->log2_pages), b->linear_search);
}
v = BV (clib_bihash_get_value) (h, b->offset);
s = format (s, " %lld active elements\n", active_elements);
s = format (s, " %d free lists\n", vec_len (h->freelists));
+ s = format (s, " %d linear search buckets\n", h->linear_buckets);
return s;
}