#endif
#ifdef BIHASH_32_64_SVM
-#undef HAVE_MEMFD_CREATE
#include <vppinfra/linux/syscall.h>
#include <fcntl.h>
#define F_LINUX_SPECIFIC_BASE 1024
#define BIHASH_FREELIST_LENGTH 17
#endif
+/* default is 2MB, use 30 for 1GB */
+#ifndef BIHASH_LOG2_HUGEPAGE_SIZE
+#define BIHASH_LOG2_HUGEPAGE_SIZE 21
+#endif
+
#define _bv(a,b) a##b
#define __bv(a,b) _bv(a,b)
#define BV(a) __bv(a,BIHASH_TYPE)
*/
u64 alloc_arena_next; /* Next offset from alloc_arena to allocate, definitely NOT a constant */
u64 alloc_arena_size; /* Size of the arena */
+ u64 alloc_arena_mapped; /* Size of the mapped memory in the arena */
/* Two SVM pointers stored as 8-byte integers */
u64 alloc_lock_as_u64;
u64 buckets_as_u64;
u32 nbuckets; /* Number of buckets */
/* Set when header valid */
volatile u32 ready;
- u64 pad[2];
+ u64 pad[1];
}) BVT (clib_bihash_shared_header);
/* *INDENT-ON* */
STATIC_ASSERT_SIZEOF (BVT (clib_bihash_shared_header), 8 * sizeof (u64));
+typedef
+BVS (clib_bihash_alloc_chunk)
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ /* chunk size */
+ uword size;
+
+ /* pointer to the next allocation */
+ u8 *next_alloc;
+
+ /* number of bytes left in this chunk */
+ uword bytes_left;
+
+ /* doubly linked list of heap allocated chunks */
+ BVS (clib_bihash_alloc_chunk) * prev, *next;
+
+} BVT (clib_bihash_alloc_chunk);
+
typedef
BVS (clib_bihash)
{
u32 nbuckets;
u32 log2_nbuckets;
+ u64 memory_size;
u8 *name;
+ format_function_t *fmt_fn;
+ void *heap;
+ BVT (clib_bihash_alloc_chunk) * chunks;
u64 *freelists;
#endif
u64 alloc_arena; /* Base of the allocation arena */
+ volatile u8 instantiated;
+ u8 dont_add_to_all_bihash_list;
/**
* A custom format function to print the Key and Value of bihash_key instead of default hexdump
*/
- format_function_t *fmt_fn;
+ format_function_t *kvp_fmt_fn;
/** Optional statistics-gathering callback */
#if BIHASH_ENABLE_STATS
} BVT (clib_bihash);
+typedef struct
+{
+ BVT (clib_bihash) * h;
+ char *name;
+ u32 nbuckets;
+ uword memory_size;
+ format_function_t *kvp_fmt_fn;
+ u8 instantiate_immediately;
+ u8 dont_add_to_all_bihash_list;
+} BVT (clib_bihash_init2_args);
+
+extern void **clib_all_bihashes;
+
#if BIHASH_32_64_SVM
#undef alloc_arena_next
#undef alloc_arena_size
+#undef alloc_arena_mapped
#undef alloc_arena
#undef CLIB_BIHASH_READY_MAGIC
#define alloc_arena_next(h) (((h)->sh)->alloc_arena_next)
#define alloc_arena_size(h) (((h)->sh)->alloc_arena_size)
+#define alloc_arena_mapped(h) (((h)->sh)->alloc_arena_mapped)
#define alloc_arena(h) ((h)->alloc_arena)
#define CLIB_BIHASH_READY_MAGIC 0xFEEDFACE
#else
#undef alloc_arena_next
#undef alloc_arena_size
+#undef alloc_arena_mapped
#undef alloc_arena
#undef CLIB_BIHASH_READY_MAGIC
#define alloc_arena_next(h) ((h)->sh.alloc_arena_next)
#define alloc_arena_size(h) ((h)->sh.alloc_arena_size)
+#define alloc_arena_mapped(h) ((h)->sh.alloc_arena_mapped)
#define alloc_arena(h) ((h)->alloc_arena)
#define CLIB_BIHASH_READY_MAGIC 0
#endif
static inline void BV (clib_bihash_lock_bucket) (BVT (clib_bihash_bucket) * b)
{
- BVT (clib_bihash_bucket) unlocked_bucket, locked_bucket;
+ /* *INDENT-OFF* */
+ BVT (clib_bihash_bucket) mask = { .lock = 1 };
+ /* *INDENT-ON* */
+ u64 old;
+
+try_again:
+ old = clib_atomic_fetch_or (&b->as_u64, mask.as_u64);
- do
+ if (PREDICT_FALSE (old & mask.as_u64))
{
- locked_bucket.as_u64 = unlocked_bucket.as_u64 = b->as_u64;
- unlocked_bucket.lock = 0;
- locked_bucket.lock = 1;
+ /* somebody else flipped the bit, try again */
CLIB_PAUSE ();
+ goto try_again;
}
- while (__atomic_compare_exchange_n (&b->as_u64, &unlocked_bucket.as_u64,
- locked_bucket.as_u64, 1 /* weak */ ,
- __ATOMIC_ACQUIRE,
- __ATOMIC_ACQUIRE) == 0);
}
static inline void BV (clib_bihash_unlock_bucket)
(BVT (clib_bihash_bucket) * b)
{
- CLIB_MEMORY_BARRIER ();
b->lock = 0;
}
(BVT (clib_bihash_bucket) * b)
{
/* Note: applied to locked buckets, test offset */
- return b->offset == 0;
+ if (BIHASH_KVP_AT_BUCKET_LEVEL == 0)
+ return b->offset == 0;
+ else
+ return (b->log2_pages == 0 && b->refcnt == 1);
}
static inline uword BV (clib_bihash_get_offset) (BVT (clib_bihash) * h,
return vp - hp;
}
+#define BIHASH_ADD 1
+#define BIHASH_DEL 0
+
void BV (clib_bihash_init)
(BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size);
+void BV (clib_bihash_init2) (BVT (clib_bihash_init2_args) * a);
+
#if BIHASH_32_64_SVM
-void BV (clib_bihash_master_init_svm)
+void BV (clib_bihash_initiator_init_svm)
(BVT (clib_bihash) * h, char *name, u32 nbuckets, u64 memory_size);
-void BV (clib_bihash_slave_init_svm)
+void BV (clib_bihash_responder_init_svm)
(BVT (clib_bihash) * h, char *name, int fd);
#endif
void BV (clib_bihash_set_kvp_format_fn) (BVT (clib_bihash) * h,
- format_function_t * fmt_fn);
+ format_function_t * kvp_fmt_fn);
void BV (clib_bihash_free) (BVT (clib_bihash) * h);
BVT (clib_bihash_kv) * search_v,
BVT (clib_bihash_kv) * return_v);
+int BV (clib_bihash_is_initialised) (const BVT (clib_bihash) * h);
+
+#define BIHASH_WALK_STOP 0
+#define BIHASH_WALK_CONTINUE 1
+
+typedef
+ int (*BV (clib_bihash_foreach_key_value_pair_cb)) (BVT (clib_bihash_kv) *,
+ void *);
void BV (clib_bihash_foreach_key_value_pair) (BVT (clib_bihash) * h,
- void *callback, void *arg);
+ BV
+ (clib_bihash_foreach_key_value_pair_cb)
+ cb, void *arg);
+void *clib_all_bihash_set_heap (void);
+void clib_bihash_copied (void *dst, void *src);
format_function_t BV (format_bihash);
format_function_t BV (format_bihash_kvp);
format_function_t BV (format_bihash_lru);
+static inline
+BVT (clib_bihash_bucket) *
+BV (clib_bihash_get_bucket) (BVT (clib_bihash) * h, u64 hash)
+{
+#if BIHASH_KVP_AT_BUCKET_LEVEL
+ uword offset;
+ offset = (hash & (h->nbuckets - 1));
+ offset = offset * (sizeof (BVT (clib_bihash_bucket))
+ + (BIHASH_KVP_PER_PAGE * sizeof (BVT (clib_bihash_kv))));
+ return ((BVT (clib_bihash_bucket) *) (((u8 *) h->buckets) + offset));
+#else
+ return h->buckets + (hash & (h->nbuckets - 1));
+#endif
+}
+
static inline int BV (clib_bihash_search_inline_with_hash)
(BVT (clib_bihash) * h, u64 hash, BVT (clib_bihash_kv) * key_result)
{
- u32 bucket_index;
BVT (clib_bihash_value) * v;
BVT (clib_bihash_bucket) * b;
int i, limit;
- bucket_index = hash & (h->nbuckets - 1);
- b = &h->buckets[bucket_index];
+ /* *INDENT-OFF* */
+ static const BVT (clib_bihash_bucket) mask = {
+ .linear_search = 1,
+ .log2_pages = -1
+ };
+ /* *INDENT-ON* */
+
+#if BIHASH_LAZY_INSTANTIATE
+ if (PREDICT_FALSE (h->instantiated == 0))
+ return -1;
+#endif
+
+ b = BV (clib_bihash_get_bucket) (h, hash);
if (PREDICT_FALSE (BV (clib_bihash_bucket_is_empty) (b)))
return -1;
CLIB_PAUSE ();
}
- hash >>= h->log2_nbuckets;
-
v = BV (clib_bihash_get_value) (h, b->offset);
/* If the bucket has unresolvable collisions, use linear search */
limit = BIHASH_KVP_PER_PAGE;
- v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
- if (PREDICT_FALSE (b->linear_search))
- limit <<= b->log2_pages;
+
+ if (PREDICT_FALSE (b->as_u64 & mask.as_u64))
+ {
+ if (PREDICT_FALSE (b->linear_search))
+ limit <<= b->log2_pages;
+ else
+ v += extract_bits (hash, h->log2_nbuckets, b->log2_pages);
+ }
for (i = 0; i < limit; i++)
{
static inline void BV (clib_bihash_prefetch_bucket)
(BVT (clib_bihash) * h, u64 hash)
{
- u32 bucket_index;
- BVT (clib_bihash_bucket) * b;
-
- bucket_index = hash & (h->nbuckets - 1);
- b = &h->buckets[bucket_index];
-
- CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, READ);
+ CLIB_PREFETCH (BV (clib_bihash_get_bucket) (h, hash),
+ BIHASH_BUCKET_PREFETCH_CACHE_LINES * CLIB_CACHE_LINE_BYTES,
+ LOAD);
}
static inline void BV (clib_bihash_prefetch_data)
(BVT (clib_bihash) * h, u64 hash)
{
- u32 bucket_index;
BVT (clib_bihash_value) * v;
BVT (clib_bihash_bucket) * b;
- bucket_index = hash & (h->nbuckets - 1);
- b = &h->buckets[bucket_index];
+#if BIHASH_LAZY_INSTANTIATE
+ if (PREDICT_FALSE (h->instantiated == 0))
+ return;
+#endif
+
+ b = BV (clib_bihash_get_bucket) (h, hash);
if (PREDICT_FALSE (BV (clib_bihash_bucket_is_empty) (b)))
return;
- hash >>= h->log2_nbuckets;
v = BV (clib_bihash_get_value) (h, b->offset);
- v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
+ if (PREDICT_FALSE (b->log2_pages && b->linear_search == 0))
+ v += extract_bits (hash, h->log2_nbuckets, b->log2_pages);
- CLIB_PREFETCH (v, CLIB_CACHE_LINE_BYTES, READ);
+ CLIB_PREFETCH (v, BIHASH_KVP_PER_PAGE * sizeof (BVT (clib_bihash_kv)),
+ LOAD);
}
static inline int BV (clib_bihash_search_inline_2_with_hash)
(BVT (clib_bihash) * h,
u64 hash, BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep)
{
- u32 bucket_index;
BVT (clib_bihash_value) * v;
BVT (clib_bihash_bucket) * b;
int i, limit;
+/* *INDENT-OFF* */
+ static const BVT (clib_bihash_bucket) mask = {
+ .linear_search = 1,
+ .log2_pages = -1
+ };
+/* *INDENT-ON* */
+
ASSERT (valuep);
- bucket_index = hash & (h->nbuckets - 1);
- b = &h->buckets[bucket_index];
+#if BIHASH_LAZY_INSTANTIATE
+ if (PREDICT_FALSE (h->instantiated == 0))
+ return -1;
+#endif
+
+ b = BV (clib_bihash_get_bucket) (h, hash);
if (PREDICT_FALSE (BV (clib_bihash_bucket_is_empty) (b)))
return -1;
CLIB_PAUSE ();
}
- hash >>= h->log2_nbuckets;
v = BV (clib_bihash_get_value) (h, b->offset);
/* If the bucket has unresolvable collisions, use linear search */
limit = BIHASH_KVP_PER_PAGE;
- v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
- if (PREDICT_FALSE (b->linear_search))
- limit <<= b->log2_pages;
+
+ if (PREDICT_FALSE (b->as_u64 & mask.as_u64))
+ {
+ if (PREDICT_FALSE (b->linear_search))
+ limit <<= b->log2_pages;
+ else
+ v += extract_bits (hash, h->log2_nbuckets, b->log2_pages);
+ }
for (i = 0; i < limit; i++)
{