summary |
shortlog |
log |
commit | commitdiff |
review |
tree
raw |
patch |
inline | side by side (from parent 1:
a30d903)
clib_mem_unaligned + zap64 casts its input as u64, computes a mask
according to the input length, and returns the casted maked value.
Therefore all the 8 Bytes of the u64 are systematically read, and
the invalid ones are discarded.
Since they are discarded correctly, this invalid read can safely be
ignored.
Revert "fix clib_mem_unaligned() invalid read"
This reverts commit
0ed3d81a5fa274283ae69b69a405c385189897d3.
Change-Id: I5cc33ad36063c414085636debe93707d9a75157a
Signed-off-by: Gabriel Ganne <gabriel.ganne@enea.com>
#if uword_bits == 64
static inline u64
#if uword_bits == 64
static inline u64
-get_unaligned_as_u64 (void const *data, int n)
- int i;
- u64 r = 0;
- u8 const *p = (u8 const *) data;
-
+#define _(n) (((u64) 1 << (u64) (8*(n))) - (u64) 1)
+ static u64 masks_little_endian[] = {
+ 0, _(1), _(2), _(3), _(4), _(5), _(6), _(7),
+ };
+ static u64 masks_big_endian[] = {
+ 0, ~_(7), ~_(6), ~_(5), ~_(4), ~_(3), ~_(2), ~_(1),
+ };
+#undef _
if (clib_arch_is_big_endian)
if (clib_arch_is_big_endian)
- {
- for (i = 0; i < n; i++)
- r |= ((u64) ((*(p + i)) << (u8) (1 << (8 - i))));
- }
+ return x & masks_big_endian[n];
- {
- for (i = 0; i < n; i++)
- r |= ((u64) ((*(p + i)) << (u8) (1 << i)));
- }
-
- return r;
+ return x & masks_little_endian[n];
+/**
+ * make address-sanitizer skip this:
+ * clib_mem_unaligned + zap64 casts its input as u64, computes a mask
+ * according to the input length, and returns the casted maked value.
+ * Therefore all the 8 Bytes of the u64 are systematically read, which
+ * rightfully causes address-sanitizer to raise an error on smaller inputs.
+ *
+ * However the invalid Bytes are discarded within zap64(), whicj is why
+ * this can be silenced safely.
+ */
+static inline u64 __attribute__ ((no_sanitize_address))
hash_memory64 (void *p, word n_bytes, u64 state)
{
u64 *q = p;
hash_memory64 (void *p, word n_bytes, u64 state)
{
u64 *q = p;
case 2:
a += clib_mem_unaligned (q + 0, u64);
b += clib_mem_unaligned (q + 1, u64);
case 2:
a += clib_mem_unaligned (q + 0, u64);
b += clib_mem_unaligned (q + 1, u64);
- c += get_unaligned_as_u64 (q + 2, n % sizeof (u64)) << 8;
+ if (n % sizeof (u64))
+ c += zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64)) << 8;
break;
case 1:
a += clib_mem_unaligned (q + 0, u64);
break;
case 1:
a += clib_mem_unaligned (q + 0, u64);
- b += get_unaligned_as_u64 (q + 1, n % sizeof (u64));
+ if (n % sizeof (u64))
+ b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64));
- a += get_unaligned_as_u64 (q + 0, n % sizeof (u64));
+ if (n % sizeof (u64))
+ a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64));