* be 100% reliable (write can fail). It also needs to recycle
* old entries in a lazy way.
*
- * This hash table is the most dummy hash table you can do.
+ * This hash table is the most trivial hash table you can do.
* Fixed total size, fixed bucket size.
* Advantage is that it could be very efficient (maybe).
*
#define LB_PLUGIN_LB_LBHASH_H_
#include <vnet/vnet.h>
+#include <vppinfra/lb_hash_hash.h>
#if defined (__SSE4_2__)
#include <immintrin.h>
return NULL;
// Allocate 1 more bucket for prefetch
- u32 size = ((u64)&((lb_hash_t *)(0))->buckets[0]) +
+ u32 size = ((uword)&((lb_hash_t *)(0))->buckets[0]) +
sizeof(lb_hash_bucket_t) * (buckets + 1);
u8 *mem = 0;
lb_hash_t *h;
vec_alloc_aligned(mem, size, CLIB_CACHE_LINE_BYTES);
+ clib_memset(mem, 0, size);
h = (lb_hash_t *)mem;
h->buckets_mask = (buckets - 1);
h->timeout = timeout;
vec_free(mem);
}
-#if __SSE4_2__ && !defined (__i386__)
-static_always_inline
-u32 lb_hash_hash(u64 k0, u64 k1, u64 k2, u64 k3, u64 k4)
-{
- u64 val = 0;
- val = _mm_crc32_u64(val, k0);
- val = _mm_crc32_u64(val, k1);
- val = _mm_crc32_u64(val, k2);
- val = _mm_crc32_u64(val, k3);
- val = _mm_crc32_u64(val, k4);
- return (u32) val;
-}
-#else
-static_always_inline
-u32 lb_hash_hash(u64 k0, u64 k1, u64 k2, u64 k3, u64 k4)
-{
- u64 tmp = k0 ^ k1 ^ k2 ^ k3 ^ k4;
- return (u32)clib_xxhash (tmp);
-}
-#endif
-
static_always_inline
void lb_hash_prefetch_bucket(lb_hash_t *ht, u32 hash)
{
u32 *available_index, u32 *found_value)
{
lb_hash_bucket_t *bucket = &ht->buckets[hash & ht->buckets_mask];
- *found_value = ~0;
+ *found_value = 0;
*available_index = ~0;
#if __SSE4_2__ && LB_HASH_DO_NOT_USE_SSE_BUCKETS == 0
u32 bitmask, found_index;