(BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size)
{
void *oldheap;
+ int i;
nbuckets = 1 << (max_log2 (nbuckets));
h->name = (u8 *) name;
h->nbuckets = nbuckets;
h->log2_nbuckets = max_log2 (nbuckets);
+ h->cache_hits = 0;
+ h->cache_misses = 0;
h->mheap = mheap_alloc (0 /* use VM */ , memory_size);
vec_validate_aligned (h->buckets, nbuckets - 1, CLIB_CACHE_LINE_BYTES);
h->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
CLIB_CACHE_LINE_BYTES);
+ h->writer_lock[0] = 0;
+
+ for (i = 0; i < nbuckets; i++)
+ BV (clib_bihash_reset_cache) (h->buckets + i);
clib_mem_set_heap (oldheap);
+
+ h->fmt_fn = NULL;
+}
+
+void BV (clib_bihash_set_kvp_format_fn) (BVT (clib_bihash) * h,
+ format_function_t * fmt_fn)
+{
+ h->fmt_fn = fmt_fn;
}
void BV (clib_bihash_free) (BVT (clib_bihash) * h)
}
static inline void
-BV (make_working_copy) (BVT (clib_bihash) * h, clib_bihash_bucket_t * b)
+BV (make_working_copy) (BVT (clib_bihash) * h, BVT (clib_bihash_bucket) * b)
{
BVT (clib_bihash_value) * v;
- clib_bihash_bucket_t working_bucket __attribute__ ((aligned (8)));
+ BVT (clib_bihash_bucket) working_bucket __attribute__ ((aligned (8)));
void *oldheap;
BVT (clib_bihash_value) * working_copy;
u32 thread_index = os_get_thread_index ();
{
oldheap = clib_mem_set_heap (h->mheap);
vec_validate (h->working_copies, thread_index);
- vec_validate (h->working_copy_lengths, thread_index);
- h->working_copy_lengths[thread_index] = -1;
+ vec_validate_init_empty (h->working_copy_lengths, thread_index, ~0);
clib_mem_set_heap (oldheap);
}
clib_mem_set_heap (oldheap);
+ /* Lock the bucket... */
+ while (BV (clib_bihash_lock_bucket) (b) == 0)
+ ;
+
v = BV (clib_bihash_get_value) (h, b->offset);
clib_memcpy (working_copy, v, sizeof (*v) * (1 << b->log2_pages));
(BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add)
{
u32 bucket_index;
- clib_bihash_bucket_t *b, tmp_b;
+ BVT (clib_bihash_bucket) * b, tmp_b;
BVT (clib_bihash_value) * v, *new_v, *save_new_v, *working_copy;
int rv = 0;
int i, limit;
hash >>= h->log2_nbuckets;
+ tmp_b.linear_search = 0;
+
while (__sync_lock_test_and_set (h->writer_lock, 1))
;
*v->kvp = *add_v;
tmp_b.as_u64 = 0;
tmp_b.offset = BV (clib_bihash_get_offset) (h, v);
+ tmp_b.refcnt = 1;
b->as_u64 = tmp_b.as_u64;
goto unlock;
}
+ /* Note: this leaves the cache disabled */
BV (make_working_copy) (h, b);
v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
clib_memcpy (&(v->kvp[i]), add_v, sizeof (*add_v));
CLIB_MEMORY_BARRIER ();
b->as_u64 = h->saved_bucket.as_u64;
+ b->refcnt++;
goto unlock;
}
}
{
memset (&(v->kvp[i]), 0xff, sizeof (*(add_v)));
CLIB_MEMORY_BARRIER ();
- b->as_u64 = h->saved_bucket.as_u64;
- goto unlock;
+ if (PREDICT_TRUE (h->saved_bucket.refcnt > 1))
+ {
+ h->saved_bucket.refcnt -= 1;
+ b->as_u64 = h->saved_bucket.as_u64;
+ goto unlock;
+ }
+ else
+ {
+ tmp_b.as_u64 = 0;
+ goto free_old_bucket;
+ }
}
}
rv = -3;
goto try_resplit;
expand_ok:
- /* Keep track of the number of linear-scan buckets */
- if (tmp_b.linear_search ^ mark_bucket_linear)
- h->linear_buckets += (mark_bucket_linear == 1) ? 1 : -1;
-
tmp_b.log2_pages = new_log2_pages;
tmp_b.offset = BV (clib_bihash_get_offset) (h, save_new_v);
tmp_b.linear_search = mark_bucket_linear;
+ tmp_b.refcnt = h->saved_bucket.refcnt + 1;
+
+free_old_bucket:
CLIB_MEMORY_BARRIER ();
b->as_u64 = tmp_b.as_u64;
v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
- BV (value_free) (h, v, old_log2_pages);
+
+ BV (value_free) (h, v, h->saved_bucket.log2_pages);
unlock:
+ BV (clib_bihash_reset_cache) (b);
+ BV (clib_bihash_unlock_bucket) (b);
CLIB_MEMORY_BARRIER ();
h->writer_lock[0] = 0;
return rv;
}
int BV (clib_bihash_search)
- (const BVT (clib_bihash) * h,
+ (BVT (clib_bihash) * h,
BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep)
{
u64 hash;
u32 bucket_index;
BVT (clib_bihash_value) * v;
- clib_bihash_bucket_t *b;
+#if BIHASH_KVP_CACHE_SIZE > 0
+ BVT (clib_bihash_kv) * kvp;
+#endif
+ BVT (clib_bihash_bucket) * b;
int i, limit;
ASSERT (valuep);
if (b->offset == 0)
return -1;
+#if BIHASH_KVP_CACHE_SIZE > 0
+ /* Check the cache, if currently enabled */
+ if (PREDICT_TRUE ((b->cache_lru & (1 << 15)) == 0))
+ {
+ limit = BIHASH_KVP_CACHE_SIZE;
+ kvp = b->cache;
+ for (i = 0; i < limit; i++)
+ {
+ if (BV (clib_bihash_key_compare) (kvp[i].key, search_key->key))
+ {
+ *valuep = kvp[i];
+ h->cache_hits++;
+ return 0;
+ }
+ }
+ }
+#endif
+
hash >>= h->log2_nbuckets;
v = BV (clib_bihash_get_value) (h, b->offset);
if (BV (clib_bihash_key_compare) (v->kvp[i].key, search_key->key))
{
*valuep = v->kvp[i];
+
+#if BIHASH_KVP_CACHE_SIZE > 0
+ u8 cache_slot;
+ /* Shut off the cache */
+ if (BV (clib_bihash_lock_bucket) (b))
+ {
+ cache_slot = BV (clib_bihash_get_lru) (b);
+ b->cache[cache_slot] = v->kvp[i];
+ BV (clib_bihash_update_lru) (b, cache_slot);
+
+ /* Reenable the cache */
+ BV (clib_bihash_unlock_bucket) (b);
+ h->cache_misses++;
+ }
+#endif
return 0;
}
}
return -1;
}
+u8 *BV (format_bihash_lru) (u8 * s, va_list * args)
+{
+#if BIHASH_KVP_SIZE > 0
+ int i;
+ BVT (clib_bihash_bucket) * b = va_arg (*args, BVT (clib_bihash_bucket) *);
+ u16 cache_lru = b->cache_lru;
+
+ s = format (s, "cache %s, order ", cache_lru & (1 << 15) ? "on" : "off");
+
+ for (i = 0; i < BIHASH_KVP_CACHE_SIZE; i++)
+ s = format (s, "[%d] ", ((cache_lru >> (3 * i)) & 7));
+
+ return (s);
+#else
+ return format (s, "cache not configured");
+#endif
+}
+
+void
+BV (clib_bihash_update_lru_not_inline) (BVT (clib_bihash_bucket) * b, u8 slot)
+{
+#if BIHASH_KVP_SIZE > 0
+ BV (clib_bihash_update_lru) (b, slot);
+#endif
+}
+
u8 *BV (format_bihash) (u8 * s, va_list * args)
{
BVT (clib_bihash) * h = va_arg (*args, BVT (clib_bihash) *);
int verbose = va_arg (*args, int);
- clib_bihash_bucket_t *b;
+ BVT (clib_bihash_bucket) * b;
BVT (clib_bihash_value) * v;
int i, j, k;
u64 active_elements = 0;
+ u64 active_buckets = 0;
+ u64 linear_buckets = 0;
s = format (s, "Hash table %s\n", h->name ? h->name : (u8 *) "(unnamed)");
continue;
}
+ active_buckets++;
+
+ if (b->linear_search)
+ linear_buckets++;
+
if (verbose)
{
s = format (s, "[%d]: heap offset %d, len %d, linear %d\n", i,
}
if (verbose)
{
- s = format (s, " %d: %U\n",
- j * BIHASH_KVP_PER_PAGE + k,
- BV (format_bihash_kvp), &(v->kvp[k]));
+ if (h->fmt_fn)
+ {
+ s = format (s, " %d: %U\n",
+ j * BIHASH_KVP_PER_PAGE + k,
+ h->fmt_fn, &(v->kvp[k]));
+ }
+ else
+ {
+ s = format (s, " %d: %U\n",
+ j * BIHASH_KVP_PER_PAGE + k,
+ BV (format_bihash_kvp), &(v->kvp[k]));
+ }
}
active_elements++;
}
}
}
- s = format (s, " %lld active elements\n", active_elements);
+ s = format (s, " %lld active elements %lld active buckets\n",
+ active_elements, active_buckets);
s = format (s, " %d free lists\n", vec_len (h->freelists));
- s = format (s, " %d linear search buckets\n", h->linear_buckets);
+ for (i = 0; i < vec_len (h->freelists); i++)
+ {
+ u32 nfree = 0;
+ BVT (clib_bihash_value) * free_elt;
+
+ free_elt = h->freelists[i];
+ while (free_elt)
+ {
+ nfree++;
+ free_elt = free_elt->next_free;
+ }
+
+ s = format (s, " [len %d] %u free elts\n", 1 << i, nfree);
+ }
+
+ s = format (s, " %lld linear search buckets\n", linear_buckets);
+ s = format (s, " %lld cache hits, %lld cache misses\n",
+ h->cache_hits, h->cache_misses);
+ if (h->mheap)
+ s = format (s, " mheap: %U", format_mheap, h->mheap, 0 /* verbose */ );
return s;
}
(BVT (clib_bihash) * h, void *callback, void *arg)
{
int i, j, k;
- clib_bihash_bucket_t *b;
+ BVT (clib_bihash_bucket) * b;
BVT (clib_bihash_value) * v;
void (*fp) (BVT (clib_bihash_kv) *, void *) = callback;