X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvppinfra%2Fhash.c;h=eae79d485925ac362112a5138c0e308daf063d8a;hb=25ab6cfed6db18bdd8cdb57b7e751751aa64c576;hp=b3db9f82b9c83ab3306d690dea3505454827afad;hpb=53ae29e0608868be4f6a9cced21c39e72e294d0b;p=vpp.git diff --git a/src/vppinfra/hash.c b/src/vppinfra/hash.c index b3db9f82b9c..eae79d48592 100644 --- a/src/vppinfra/hash.c +++ b/src/vppinfra/hash.c @@ -43,13 +43,13 @@ always_inline void zero_pair (hash_t * h, hash_pair_t * p) { - memset (p, 0, hash_pair_bytes (h)); + clib_memset (p, 0, hash_pair_bytes (h)); } always_inline void init_pair (hash_t * h, hash_pair_t * p) { - memset (p->value, ~0, hash_value_bytes (h)); + clib_memset (p->value, ~0, hash_value_bytes (h)); } always_inline hash_pair_union_t * @@ -80,27 +80,33 @@ static u8 *hash_format_pair_default (u8 * s, va_list * args); #if uword_bits == 64 static inline u64 -get_unaligned_as_u64 (void const *data, int n) +zap64 (u64 x, word n) { - int i; - u64 r = 0; - u8 const *p = (u8 const *) data; - +#define _(n) (((u64) 1 << (u64) (8*(n))) - (u64) 1) + static u64 masks_little_endian[] = { + 0, _(1), _(2), _(3), _(4), _(5), _(6), _(7), + }; + static u64 masks_big_endian[] = { + 0, ~_(7), ~_(6), ~_(5), ~_(4), ~_(3), ~_(2), ~_(1), + }; +#undef _ if (clib_arch_is_big_endian) - { - for (i = 0; i < n; i++) - r |= ((u64) ((*(p + i)) << (u8) (1 << (8 - i)))); - } + return x & masks_big_endian[n]; else - { - for (i = 0; i < n; i++) - r |= ((u64) ((*(p + i)) << (u8) (1 << i))); - } - - return r; + return x & masks_little_endian[n]; } -static inline u64 +/** + * make address-sanitizer skip this: + * clib_mem_unaligned + zap64 casts its input as u64, computes a mask + * according to the input length, and returns the casted maked value. + * Therefore all the 8 Bytes of the u64 are systematically read, which + * rightfully causes address-sanitizer to raise an error on smaller inputs. + * + * However the invalid Bytes are discarded within zap64(), whicj is why + * this can be silenced safely. + */ +static inline u64 __attribute__ ((no_sanitize_address)) hash_memory64 (void *p, word n_bytes, u64 state) { u64 *q = p; @@ -126,16 +132,19 @@ hash_memory64 (void *p, word n_bytes, u64 state) case 2: a += clib_mem_unaligned (q + 0, u64); b += clib_mem_unaligned (q + 1, u64); - c += get_unaligned_as_u64 (q + 2, n % sizeof (u64)) << 8; + if (n % sizeof (u64)) + c += zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64)) << 8; break; case 1: a += clib_mem_unaligned (q + 0, u64); - b += get_unaligned_as_u64 (q + 1, n % sizeof (u64)); + if (n % sizeof (u64)) + b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64)); break; case 0: - a += get_unaligned_as_u64 (q + 0, n % sizeof (u64)); + if (n % sizeof (u64)) + a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64)); break; } @@ -273,6 +282,10 @@ key_sum (hash_t * h, uword key) sum = string_key_sum (h, key); break; + case KEY_FUNC_MEM: + sum = mem_key_sum (h, key); + break; + default: sum = h->key_sum (h, key); break; @@ -303,6 +316,10 @@ key_equal1 (hash_t * h, uword key1, uword key2, uword e) e = string_key_equal (h, key1, key2); break; + case KEY_FUNC_MEM: + e = mem_key_equal (h, key1, key2); + break; + default: e = h->key_equal (h, key1, key2); break; @@ -359,7 +376,7 @@ set_indirect_is_user (void *v, uword i, hash_pair_union_t * p, uword key) log2_bytes = 1 + hash_pair_log2_bytes (h); q = clib_mem_alloc (1ULL << log2_bytes); } - clib_memcpy (q, &p->direct, hash_pair_bytes (h)); + clib_memcpy_fast (q, &p->direct, hash_pair_bytes (h)); pi->pairs = q; if (h->log2_pair_size > 0) @@ -440,8 +457,8 @@ unset_indirect (void *v, uword i, hash_pair_t * q) if (len == 2) { - clib_memcpy (p, q == r ? hash_forward1 (h, r) : r, - hash_pair_bytes (h)); + clib_memcpy_fast (p, q == r ? hash_forward1 (h, r) : r, + hash_pair_bytes (h)); set_is_user (v, i, 1); } else @@ -456,7 +473,7 @@ unset_indirect (void *v, uword i, hash_pair_t * q) { /* If deleting a pair we need to keep non-null pairs together. */ if (q < e) - clib_memcpy (q, e, hash_pair_bytes (h)); + clib_memcpy_fast (q, e, hash_pair_bytes (h)); else zero_pair (h, q); if (is_vec) @@ -497,8 +514,8 @@ lookup (void *v, uword key, enum lookup_opcode op, { set_is_user (v, i, 0); if (old_value) - clib_memcpy (old_value, p->direct.value, - hash_value_bytes (h)); + clib_memcpy_fast (old_value, p->direct.value, + hash_value_bytes (h)); zero_pair (h, &p->direct); } } @@ -531,8 +548,8 @@ lookup (void *v, uword key, enum lookup_opcode op, if (found_key && op == UNSET) { if (old_value) - clib_memcpy (old_value, &p->direct.value, - hash_value_bytes (h)); + clib_memcpy_fast (old_value, &p->direct.value, + hash_value_bytes (h)); unset_indirect (v, i, &p->direct); @@ -547,8 +564,8 @@ lookup (void *v, uword key, enum lookup_opcode op, { /* Save away old value for caller. */ if (old_value && found_key) - clib_memcpy (old_value, &p->direct.value, hash_value_bytes (h)); - clib_memcpy (&p->direct.value, new_value, hash_value_bytes (h)); + clib_memcpy_fast (old_value, &p->direct.value, hash_value_bytes (h)); + clib_memcpy_fast (&p->direct.value, new_value, hash_value_bytes (h)); } if (op == SET) @@ -607,7 +624,7 @@ hash_next (void *v, hash_next_t * hn) { /* Restore flags. */ h->flags = hn->f; - memset (hn, 0, sizeof (hn[0])); + clib_memset (hn, 0, sizeof (hn[0])); return 0; } @@ -676,7 +693,7 @@ _hash_create (uword elts, hash_t * h_user) if (h_user) log2_pair_size = h_user->log2_pair_size; - v = _vec_resize (0, + v = _vec_resize ((void *) 0, /* vec len: */ elts, /* data bytes: */ (elts << log2_pair_size) * sizeof (hash_pair_t),