always_inline void
zero_pair (hash_t * h, hash_pair_t * p)
{
- memset (p, 0, hash_pair_bytes (h));
+ clib_memset (p, 0, hash_pair_bytes (h));
}
always_inline void
init_pair (hash_t * h, hash_pair_t * p)
{
- memset (p->value, ~0, hash_value_bytes (h));
+ clib_memset (p->value, ~0, hash_value_bytes (h));
}
always_inline hash_pair_union_t *
static u8 *hash_format_pair_default (u8 * s, va_list * args);
-#if uword_bits == 64
-
-static inline u64
-zap64 (u64 x, word n)
-{
-#define _(n) (((u64) 1 << (u64) (8*(n))) - (u64) 1)
- static u64 masks_little_endian[] = {
- 0, _(1), _(2), _(3), _(4), _(5), _(6), _(7),
- };
- static u64 masks_big_endian[] = {
- 0, ~_(7), ~_(6), ~_(5), ~_(4), ~_(3), ~_(2), ~_(1),
- };
-#undef _
- if (clib_arch_is_big_endian)
- return x & masks_big_endian[n];
- else
- return x & masks_little_endian[n];
-}
-
-/**
- * make address-sanitizer skip this:
- * clib_mem_unaligned + zap64 casts its input as u64, computes a mask
- * according to the input length, and returns the casted maked value.
- * Therefore all the 8 Bytes of the u64 are systematically read, which
- * rightfully causes address-sanitizer to raise an error on smaller inputs.
- *
- * However the invalid Bytes are discarded within zap64(), whicj is why
- * this can be silenced safely.
- */
-static inline u64 __attribute__ ((no_sanitize_address))
-hash_memory64 (void *p, word n_bytes, u64 state)
+__clib_export uword
+hash_memory (void *p, word n_bytes, uword state)
{
- u64 *q = p;
+ uword last[3] = {};
+ uwordu *q = p;
u64 a, b, c, n;
- a = b = 0x9e3779b97f4a7c13LL;
+ a = b = (uword_bits == 64) ? 0x9e3779b97f4a7c13LL : 0x9e3779b9;
c = state;
n = n_bytes;
- while (n >= 3 * sizeof (u64))
+ while (n >= 3 * sizeof (uword))
{
- a += clib_mem_unaligned (q + 0, u64);
- b += clib_mem_unaligned (q + 1, u64);
- c += clib_mem_unaligned (q + 2, u64);
- hash_mix64 (a, b, c);
- n -= 3 * sizeof (u64);
+ a += q[0];
+ b += q[1];
+ c += q[2];
+ hash_mix (a, b, c);
+ n -= 3 * sizeof (uword);
q += 3;
}
c += n_bytes;
- switch (n / sizeof (u64))
- {
- case 2:
- a += clib_mem_unaligned (q + 0, u64);
- b += clib_mem_unaligned (q + 1, u64);
- if (n % sizeof (u64))
- c += zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64)) << 8;
- break;
-
- case 1:
- a += clib_mem_unaligned (q + 0, u64);
- if (n % sizeof (u64))
- b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64));
- break;
-
- case 0:
- if (n % sizeof (u64))
- a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64));
- break;
- }
-
- hash_mix64 (a, b, c);
- return c;
-}
-
-#else /* if uword_bits == 64 */
-
-static inline u32
-zap32 (u32 x, word n)
-{
-#define _(n) (((u32) 1 << (u32) (8*(n))) - (u32) 1)
- static u32 masks_little_endian[] = {
- 0, _(1), _(2), _(3),
- };
- static u32 masks_big_endian[] = {
- 0, ~_(3), ~_(2), ~_(1),
- };
-#undef _
- if (clib_arch_is_big_endian)
- return x & masks_big_endian[n];
- else
- return x & masks_little_endian[n];
-}
-
-static inline u32
-hash_memory32 (void *p, word n_bytes, u32 state)
-{
- u32 *q = p;
- u32 a, b, c, n;
-
- a = b = 0x9e3779b9;
- c = state;
- n = n_bytes;
-
- while (n >= 3 * sizeof (u32))
- {
- a += clib_mem_unaligned (q + 0, u32);
- b += clib_mem_unaligned (q + 1, u32);
- c += clib_mem_unaligned (q + 2, u32);
- hash_mix32 (a, b, c);
- n -= 3 * sizeof (u32);
- q += 3;
- }
-
- c += n_bytes;
- switch (n / sizeof (u32))
+ if (n > 0)
{
- case 2:
- a += clib_mem_unaligned (q + 0, u32);
- b += clib_mem_unaligned (q + 1, u32);
- if (n % sizeof (u32))
- c += zap32 (clib_mem_unaligned (q + 2, u32), n % sizeof (u32)) << 8;
- break;
-
- case 1:
- a += clib_mem_unaligned (q + 0, u32);
- if (n % sizeof (u32))
- b += zap32 (clib_mem_unaligned (q + 1, u32), n % sizeof (u32));
- break;
-
- case 0:
- if (n % sizeof (u32))
- a += zap32 (clib_mem_unaligned (q + 0, u32), n % sizeof (u32));
- break;
+ clib_memcpy_fast (&last, q, n);
+ a += last[0];
+ b += last[1];
+ c += last[2];
}
- hash_mix32 (a, b, c);
+ hash_mix (a, b, c);
return c;
}
-#endif
-
-uword
-hash_memory (void *p, word n_bytes, uword state)
-{
- uword *q = p;
-
-#if uword_bits == 64
- return hash_memory64 (q, n_bytes, state);
-#else
- return hash_memory32 (q, n_bytes, state);
-#endif
-}
-#if uword_bits == 64
-always_inline uword
-hash_uword (uword x)
-{
- u64 a, b, c;
-
- a = b = 0x9e3779b97f4a7c13LL;
- c = 0;
- a += x;
- hash_mix64 (a, b, c);
- return c;
-}
-#else
always_inline uword
hash_uword (uword x)
{
- u32 a, b, c;
+ uword a, b, c;
- a = b = 0x9e3779b9;
+ a = b = (uword_bits == 64) ? 0x9e3779b97f4a7c13LL : 0x9e3779b9;
c = 0;
a += x;
- hash_mix32 (a, b, c);
+ hash_mix (a, b, c);
return c;
}
-#endif
/* Call sum function. Hash code will be sum function value
modulo the prime length of the hash table. */
sum = string_key_sum (h, key);
break;
+ case KEY_FUNC_MEM:
+ sum = mem_key_sum (h, key);
+ break;
+
default:
sum = h->key_sum (h, key);
break;
e = string_key_equal (h, key1, key2);
break;
+ case KEY_FUNC_MEM:
+ e = mem_key_equal (h, key1, key2);
+ break;
+
default:
e = h->key_equal (h, key1, key2);
break;
log2_bytes = 1 + hash_pair_log2_bytes (h);
q = clib_mem_alloc (1ULL << log2_bytes);
}
- clib_memcpy (q, &p->direct, hash_pair_bytes (h));
+ clib_memcpy_fast (q, &p->direct, hash_pair_bytes (h));
pi->pairs = q;
if (h->log2_pair_size > 0)
new_len = len + 1;
if (new_len * hash_pair_bytes (h) > (1ULL << log2_bytes))
{
- pi->pairs = clib_mem_realloc (pi->pairs,
- 1ULL << (log2_bytes + 1),
- 1ULL << log2_bytes);
+ pi->pairs = clib_mem_realloc (pi->pairs, 1ULL << (log2_bytes + 1));
log2_bytes++;
}
if (len == 2)
{
- clib_memcpy (p, q == r ? hash_forward1 (h, r) : r,
- hash_pair_bytes (h));
+ clib_memcpy_fast (p, q == r ? hash_forward1 (h, r) : r,
+ hash_pair_bytes (h));
set_is_user (v, i, 1);
}
else
{
/* If deleting a pair we need to keep non-null pairs together. */
if (q < e)
- clib_memcpy (q, e, hash_pair_bytes (h));
+ clib_memcpy_fast (q, e, hash_pair_bytes (h));
else
zero_pair (h, q);
if (is_vec)
- _vec_len (pi->pairs) -= 1;
+ vec_dec_len (pi->pairs, 1);
else
indirect_pair_set (pi, indirect_pair_get_log2_bytes (pi), len - 1);
}
hash_t *h = hash_header (v);
hash_pair_union_t *p = 0;
uword found_key = 0;
+ uword value_bytes;
uword i;
if (!v)
i = key_sum (h, key) & (_vec_len (v) - 1);
p = get_pair (v, i);
+ value_bytes = hash_value_bytes (h);
if (hash_is_user (v, i))
{
if (op == UNSET)
{
set_is_user (v, i, 0);
- if (old_value)
- clib_memcpy (old_value, p->direct.value,
- hash_value_bytes (h));
+ if (old_value && value_bytes)
+ clib_memcpy_fast (old_value, p->direct.value, value_bytes);
zero_pair (h, &p->direct);
}
}
found_key = p != 0;
if (found_key && op == UNSET)
{
- if (old_value)
- clib_memcpy (old_value, &p->direct.value,
- hash_value_bytes (h));
+ if (old_value && value_bytes)
+ clib_memcpy_fast (old_value, &p->direct.value, value_bytes);
unset_indirect (v, i, &p->direct);
}
}
- if (op == SET && p != 0)
+ if (op == SET && p != 0 && value_bytes)
{
/* Save away old value for caller. */
if (old_value && found_key)
- clib_memcpy (old_value, &p->direct.value, hash_value_bytes (h));
- clib_memcpy (&p->direct.value, new_value, hash_value_bytes (h));
+ clib_memcpy_fast (old_value, &p->direct.value, value_bytes);
+ clib_memcpy_fast (&p->direct.value, new_value, value_bytes);
}
if (op == SET)
}
/* Fetch value of key. */
-uword *
+__clib_export uword *
_hash_get (void *v, uword key)
{
hash_t *h = hash_header (v);
return &p->value[0];
}
-hash_pair_t *
+__clib_export hash_pair_t *
_hash_get_pair (void *v, uword key)
{
return lookup (v, key, GET, 0, 0);
}
-hash_pair_t *
-hash_next (void *v, hash_next_t * hn)
+__clib_export hash_pair_t *
+hash_next (void *v, hash_next_t *hn)
{
hash_t *h = hash_header (v);
hash_pair_t *p;
{
/* Restore flags. */
h->flags = hn->f;
- memset (hn, 0, sizeof (hn[0]));
+ clib_memset (hn, 0, sizeof (hn[0]));
return 0;
}
}
/* Remove key from table. */
-void *
+__clib_export void *
_hash_unset (void *v, uword key, void *old_value)
{
hash_t *h;
return v;
}
-void *
+__clib_export void *
_hash_create (uword elts, hash_t * h_user)
{
hash_t *h;
uword log2_pair_size;
void *v;
+ vec_attr_t va = { .hdr_sz = sizeof (h[0]), .align = sizeof (hash_pair_t) };
/* Size of hash is power of 2 >= ELTS and larger than
number of bits in is_user bitmap elements. */
if (h_user)
log2_pair_size = h_user->log2_pair_size;
- v = _vec_resize ((void *) 0,
- /* vec len: */ elts,
- /* data bytes: */
- (elts << log2_pair_size) * sizeof (hash_pair_t),
- /* header bytes: */
- sizeof (h[0]) +
- (elts / BITS (h->is_user[0])) * sizeof (h->is_user[0]),
- /* alignment */ sizeof (hash_pair_t));
+ va.elt_sz = (1 << log2_pair_size) * sizeof (hash_pair_t),
+ v = _vec_alloc_internal (elts, &va);
h = hash_header (v);
if (h_user)
- h[0] = h_user[0];
+ {
+ h[0] = h_user[0];
+ h->is_user = 0;
+ }
+ vec_validate_aligned (
+ h->is_user, ((elts / BITS (h->is_user[0])) * sizeof (h->is_user[0])) - 1,
+ CLIB_CACHE_LINE_BYTES);
h->log2_pair_size = log2_pair_size;
h->elts = 0;
return v;
}
-void *
+__clib_export void *
_hash_free (void *v)
{
hash_t *h = hash_header (v);
clib_mem_free (p->indirect.pairs);
}
+ vec_free (h->is_user);
vec_free_header (h);
return 0;
return new;
}
-void *
+__clib_export void *
hash_resize (void *old, uword new_size)
{
return hash_resize_internal (old, new_size, 1);
}
-void *
+__clib_export void *
hash_dup (void *old)
{
return hash_resize_internal (old, vec_len (old), 0);
}
-void *
+__clib_export void *
_hash_set3 (void *v, uword key, void *value, void *old_value)
{
hash_t *h;
return v;
}
-uword
+__clib_export uword
vec_key_sum (hash_t * h, uword key)
{
void *v = uword_to_pointer (key, void *);
return hash_memory (v, vec_len (v) * h->user, 0);
}
-uword
+__clib_export uword
vec_key_equal (hash_t * h, uword key1, uword key2)
{
void *v1 = uword_to_pointer (key1, void *);
return l1 == l2 && 0 == memcmp (v1, v2, l1 * h->user);
}
-u8 *
+__clib_export u8 *
vec_key_format_pair (u8 * s, va_list * args)
{
void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
return s;
}
-uword
+__clib_export uword
mem_key_sum (hash_t * h, uword key)
{
uword *v = uword_to_pointer (key, void *);
return hash_memory (v, h->user, 0);
}
-uword
+__clib_export uword
mem_key_equal (hash_t * h, uword key1, uword key2)
{
void *v1 = uword_to_pointer (key1, void *);
return s;
}
-uword
+__clib_export uword
hash_bytes (void *v)
{
uword i, bytes;
if (!v)
return 0;
- bytes = vec_capacity (v, hash_header_bytes (v));
+ bytes = vec_mem_size (v);
for (i = 0; i < hash_capacity (v); i++)
{
if (h->log2_pair_size > 0)
bytes += 1 << indirect_pair_get_log2_bytes (&p->indirect);
else
- bytes += vec_capacity (p->indirect.pairs, 0);
+ bytes += vec_mem_size (p->indirect.pairs);
}
}
return bytes;
}
-u8 *
-format_hash (u8 * s, va_list * va)
+__clib_export u8 *
+format_hash (u8 *s, va_list *va)
{
void *v = va_arg (*va, void *);
int verbose = va_arg (*va, int);
return p ? 1 : 0;
}
-uword
+__clib_export uword
unformat_hash_vec_string (unformat_input_t * input, va_list * va)
{
return unformat_hash_string_internal (input, va, /* is_vec */ 1);
}
-uword
+__clib_export uword
unformat_hash_string (unformat_input_t * input, va_list * va)
{
return unformat_hash_string_internal (input, va, /* is_vec */ 0);
}
-clib_error_t *
+__clib_export clib_error_t *
hash_validate (void *v)
{
hash_t *h = hash_header (v);