+#ifndef BIHASH_STAT_IDS
+#define BIHASH_STAT_IDS 1
+
+#define foreach_bihash_stat \
+_(alloc_add) \
+_(add) \
+_(split_add) \
+_(replace) \
+_(update) \
+_(del) \
+_(del_free) \
+_(linear) \
+_(resplit) \
+_(working_copy_lost) \
+_(splits) /* must be last */
+
+typedef enum
+{
+#define _(a) BIHASH_STAT_##a,
+ foreach_bihash_stat
+#undef _
+ BIHASH_STAT_N_STATS,
+} BVT (clib_bihash_stat_id);
+#endif /* BIHASH_STAT_IDS */
+
+static inline void BV (clib_bihash_increment_stat) (BVT (clib_bihash) * h,
+ int stat_id, u64 count)
+{
+#if BIHASH_ENABLE_STATS
+ if (PREDICT_FALSE (h->inc_stats_callback != 0))
+ h->inc_stats_callback (h, stat_id, count);
+#endif
+}
+
+#if BIHASH_ENABLE_STATS
+static inline void BV (clib_bihash_set_stats_callback)
+ (BVT (clib_bihash) * h, void (*cb) (BVT (clib_bihash) *, int, u64),
+ void *ctx)
+{
+ h->inc_stats_callback = cb;
+ h->inc_stats_context = ctx;
+}
+#endif
+
+
+static inline void BV (clib_bihash_alloc_lock) (BVT (clib_bihash) * h)
+{
+ while (__atomic_test_and_set (h->alloc_lock, __ATOMIC_ACQUIRE))
+ CLIB_PAUSE ();
+}
+
+static inline void BV (clib_bihash_alloc_unlock) (BVT (clib_bihash) * h)
+{
+ __atomic_clear (h->alloc_lock, __ATOMIC_RELEASE);
+}
+
+static inline void BV (clib_bihash_lock_bucket) (BVT (clib_bihash_bucket) * b)
+{
+ BVT (clib_bihash_bucket) unlocked_bucket, locked_bucket;
+
+ do
+ {
+ locked_bucket.as_u64 = unlocked_bucket.as_u64 = b->as_u64;
+ unlocked_bucket.lock = 0;
+ locked_bucket.lock = 1;
+ CLIB_PAUSE ();
+ }
+ while (__atomic_compare_exchange_n (&b->as_u64, &unlocked_bucket.as_u64,
+ locked_bucket.as_u64, 1 /* weak */ ,
+ __ATOMIC_ACQUIRE,
+ __ATOMIC_ACQUIRE) == 0);
+}
+
+static inline void BV (clib_bihash_unlock_bucket)
+ (BVT (clib_bihash_bucket) * b)
+{
+ CLIB_MEMORY_BARRIER ();
+ b->lock = 0;
+}
+
+static inline void *BV (clib_bihash_get_value) (BVT (clib_bihash) * h,