2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001-2005 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #include <vppinfra/hash.h>
39 #include <vppinfra/error.h>
40 #include <vppinfra/mem.h>
41 #include <vppinfra/byte_order.h> /* for clib_arch_is_big_endian */
44 zero_pair (hash_t * h, hash_pair_t * p)
46 clib_memset (p, 0, hash_pair_bytes (h));
50 init_pair (hash_t * h, hash_pair_t * p)
52 clib_memset (p->value, ~0, hash_value_bytes (h));
55 always_inline hash_pair_union_t *
56 get_pair (void *v, uword i)
58 hash_t *h = hash_header (v);
60 ASSERT (i < vec_len (v));
62 p += i << h->log2_pair_size;
63 return (hash_pair_union_t *) p;
67 set_is_user (void *v, uword i, uword is_user)
69 hash_t *h = hash_header (v);
70 uword i0 = i / BITS (h->is_user[0]);
71 uword i1 = (uword) 1 << (i % BITS (h->is_user[0]));
75 h->is_user[i0] &= ~i1;
78 static u8 *hash_format_pair_default (u8 * s, va_list * args);
85 #define _(n) (((u64) 1 << (u64) (8*(n))) - (u64) 1)
86 static u64 masks_little_endian[] = {
87 0, _(1), _(2), _(3), _(4), _(5), _(6), _(7),
89 static u64 masks_big_endian[] = {
90 0, ~_(7), ~_(6), ~_(5), ~_(4), ~_(3), ~_(2), ~_(1),
93 if (clib_arch_is_big_endian)
94 return x & masks_big_endian[n];
96 return x & masks_little_endian[n];
100 * make address-sanitizer skip this:
101 * clib_mem_unaligned + zap64 casts its input as u64, computes a mask
102 * according to the input length, and returns the casted maked value.
103 * Therefore all the 8 Bytes of the u64 are systematically read, which
104 * rightfully causes address-sanitizer to raise an error on smaller inputs.
106 * However the invalid Bytes are discarded within zap64(), which is why
107 * this can be silenced safely.
109 * The above is true *unless* the extra bytes cross a page boundary
110 * into unmapped or no-access space, hence the boundary crossing check.
113 hash_memory64 (void *p, word n_bytes, u64 state)
117 int page_boundary_crossing;
118 u64 start_addr, end_addr;
126 * If the request crosses a 4k boundary, it's not OK to assume
127 * that the zap64 game is safe. 4k is the minimum known page size.
129 start_addr = (u64) p;
130 end_addr = start_addr + n_bytes + 7;
131 page_boundary_crossing = (start_addr >> 12) != (end_addr >> 12);
133 a = b = 0x9e3779b97f4a7c13LL;
137 while (n >= 3 * sizeof (u64))
139 a += clib_mem_unaligned (q + 0, u64);
140 b += clib_mem_unaligned (q + 1, u64);
141 c += clib_mem_unaligned (q + 2, u64);
142 hash_mix64 (a, b, c);
143 n -= 3 * sizeof (u64);
148 switch (n / sizeof (u64))
151 a += clib_mem_unaligned (q + 0, u64);
152 b += clib_mem_unaligned (q + 1, u64);
153 if (n % sizeof (u64))
155 if (PREDICT_TRUE (page_boundary_crossing == 0))
157 CLIB_MEM_OVERFLOW_PUSH (q + 2, sizeof (u64));
158 c += zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64))
160 CLIB_MEM_OVERFLOW_POP ();
164 clib_memcpy_fast (tmp.as_u8, q + 2, n % sizeof (u64));
165 c += zap64 (tmp.as_u64, n % sizeof (u64)) << 8;
171 a += clib_mem_unaligned (q + 0, u64);
172 if (n % sizeof (u64))
174 if (PREDICT_TRUE (page_boundary_crossing == 0))
176 CLIB_MEM_OVERFLOW_PUSH (q + 1, sizeof (u64));
177 b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64));
178 CLIB_MEM_OVERFLOW_POP ();
182 clib_memcpy_fast (tmp.as_u8, q + 1, n % sizeof (u64));
183 b += zap64 (tmp.as_u64, n % sizeof (u64));
189 if (n % sizeof (u64))
191 if (PREDICT_TRUE (page_boundary_crossing == 0))
193 CLIB_MEM_OVERFLOW_PUSH (q + 0, sizeof (u64));
194 a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64));
195 CLIB_MEM_OVERFLOW_POP ();
199 clib_memcpy_fast (tmp.as_u8, q, n % sizeof (u64));
200 a += zap64 (tmp.as_u64, n % sizeof (u64));
206 hash_mix64 (a, b, c);
211 #else /* if uword_bits == 64 */
214 zap32 (u32 x, word n)
216 #define _(n) (((u32) 1 << (u32) (8*(n))) - (u32) 1)
217 static u32 masks_little_endian[] = {
220 static u32 masks_big_endian[] = {
221 0, ~_(3), ~_(2), ~_(1),
224 if (clib_arch_is_big_endian)
225 return x & masks_big_endian[n];
227 return x & masks_little_endian[n];
231 hash_memory32 (void *p, word n_bytes, u32 state)
240 while (n >= 3 * sizeof (u32))
242 a += clib_mem_unaligned (q + 0, u32);
243 b += clib_mem_unaligned (q + 1, u32);
244 c += clib_mem_unaligned (q + 2, u32);
245 hash_mix32 (a, b, c);
246 n -= 3 * sizeof (u32);
251 switch (n / sizeof (u32))
254 a += clib_mem_unaligned (q + 0, u32);
255 b += clib_mem_unaligned (q + 1, u32);
256 if (n % sizeof (u32))
257 c += zap32 (clib_mem_unaligned (q + 2, u32), n % sizeof (u32)) << 8;
261 a += clib_mem_unaligned (q + 0, u32);
262 if (n % sizeof (u32))
263 b += zap32 (clib_mem_unaligned (q + 1, u32), n % sizeof (u32));
267 if (n % sizeof (u32))
268 a += zap32 (clib_mem_unaligned (q + 0, u32), n % sizeof (u32));
272 hash_mix32 (a, b, c);
279 hash_memory (void *p, word n_bytes, uword state)
284 return hash_memory64 (q, n_bytes, state);
286 return hash_memory32 (q, n_bytes, state);
296 a = b = 0x9e3779b97f4a7c13LL;
299 hash_mix64 (a, b, c);
311 hash_mix32 (a, b, c);
316 /* Call sum function. Hash code will be sum function value
317 modulo the prime length of the hash table. */
319 key_sum (hash_t * h, uword key)
322 switch (pointer_to_uword ((void *) h->key_sum))
325 sum = hash_uword (key);
328 case KEY_FUNC_POINTER_UWORD:
329 sum = hash_uword (*uword_to_pointer (key, uword *));
332 case KEY_FUNC_POINTER_U32:
333 sum = hash_uword (*uword_to_pointer (key, u32 *));
336 case KEY_FUNC_STRING:
337 sum = string_key_sum (h, key);
341 sum = mem_key_sum (h, key);
345 sum = h->key_sum (h, key);
353 key_equal1 (hash_t * h, uword key1, uword key2, uword e)
355 switch (pointer_to_uword ((void *) h->key_equal))
360 case KEY_FUNC_POINTER_UWORD:
362 *uword_to_pointer (key1, uword *) == *uword_to_pointer (key2,
366 case KEY_FUNC_POINTER_U32:
367 e = *uword_to_pointer (key1, u32 *) == *uword_to_pointer (key2, u32 *);
370 case KEY_FUNC_STRING:
371 e = string_key_equal (h, key1, key2);
375 e = mem_key_equal (h, key1, key2);
379 e = h->key_equal (h, key1, key2);
385 /* Compares two keys: returns 1 if equal, 0 if not. */
387 key_equal (hash_t * h, uword key1, uword key2)
389 uword e = key1 == key2;
390 if (CLIB_DEBUG > 0 && key1 == key2)
391 ASSERT (key_equal1 (h, key1, key2, e));
393 e = key_equal1 (h, key1, key2, e);
397 static hash_pair_union_t *
398 get_indirect (void *v, hash_pair_indirect_t * pi, uword key)
400 hash_t *h = hash_header (v);
401 hash_pair_t *p0, *p1;
404 if (h->log2_pair_size > 0)
405 p1 = hash_forward (h, p0, indirect_pair_get_len (pi));
411 if (key_equal (h, p0->key, key))
412 return (hash_pair_union_t *) p0;
413 p0 = hash_forward1 (h, p0);
416 return (hash_pair_union_t *) 0;
419 static hash_pair_union_t *
420 set_indirect_is_user (void *v, uword i, hash_pair_union_t * p, uword key)
422 hash_t *h = hash_header (v);
424 hash_pair_indirect_t *pi = &p->indirect;
425 uword log2_bytes = 0;
427 if (h->log2_pair_size == 0)
428 q = vec_new (hash_pair_t, 2);
431 log2_bytes = 1 + hash_pair_log2_bytes (h);
432 q = clib_mem_alloc (1ULL << log2_bytes);
434 clib_memcpy_fast (q, &p->direct, hash_pair_bytes (h));
437 if (h->log2_pair_size > 0)
438 indirect_pair_set (pi, log2_bytes, 2);
440 set_is_user (v, i, 0);
442 /* First element is used by existing pair, second will be used by caller. */
443 q = hash_forward1 (h, q);
446 return (hash_pair_union_t *) q;
449 static hash_pair_union_t *
450 set_indirect (void *v, hash_pair_indirect_t * pi, uword key,
453 hash_t *h = hash_header (v);
454 hash_pair_t *new_pair;
455 hash_pair_union_t *q;
457 q = get_indirect (v, pi, key);
464 if (h->log2_pair_size == 0)
465 vec_add2 (pi->pairs, new_pair, 1);
468 uword len, new_len, log2_bytes;
470 len = indirect_pair_get_len (pi);
471 log2_bytes = indirect_pair_get_log2_bytes (pi);
474 if (new_len * hash_pair_bytes (h) > (1ULL << log2_bytes))
476 pi->pairs = clib_mem_realloc (pi->pairs,
477 1ULL << (log2_bytes + 1),
482 indirect_pair_set (pi, log2_bytes, new_len);
483 new_pair = pi->pairs + (len << h->log2_pair_size);
486 init_pair (h, new_pair);
488 return (hash_pair_union_t *) new_pair;
492 unset_indirect (void *v, uword i, hash_pair_t * q)
494 hash_t *h = hash_header (v);
495 hash_pair_union_t *p = get_pair (v, i);
497 hash_pair_indirect_t *pi = &p->indirect;
500 is_vec = h->log2_pair_size == 0;
502 ASSERT (!hash_is_user (v, i));
503 len = is_vec ? vec_len (pi->pairs) : indirect_pair_get_len (pi);
504 e = hash_forward (h, pi->pairs, len - 1);
505 ASSERT (q >= pi->pairs && q <= e);
507 /* We have two or fewer pairs and we are delete one pair.
508 Make indirect pointer direct and free indirect memory. */
511 hash_pair_t *r = pi->pairs;
515 clib_memcpy_fast (p, q == r ? hash_forward1 (h, r) : r,
516 hash_pair_bytes (h));
517 set_is_user (v, i, 1);
520 zero_pair (h, &p->direct);
529 /* If deleting a pair we need to keep non-null pairs together. */
531 clib_memcpy_fast (q, e, hash_pair_bytes (h));
535 _vec_len (pi->pairs) -= 1;
537 indirect_pair_set (pi, indirect_pair_get_log2_bytes (pi), len - 1);
549 lookup (void *v, uword key, enum lookup_opcode op,
550 void *new_value, void *old_value)
552 hash_t *h = hash_header (v);
553 hash_pair_union_t *p = 0;
561 i = key_sum (h, key) & (_vec_len (v) - 1);
563 value_bytes = hash_value_bytes (h);
565 if (hash_is_user (v, i))
567 found_key = key_equal (h, p->direct.key, key);
572 set_is_user (v, i, 0);
573 if (old_value && value_bytes)
574 clib_memcpy_fast (old_value, p->direct.value, value_bytes);
575 zero_pair (h, &p->direct);
581 p = set_indirect_is_user (v, i, p, key);
588 hash_pair_indirect_t *pi = &p->indirect;
595 set_is_user (v, i, 1);
598 p = set_indirect (v, pi, key, &found_key);
602 p = get_indirect (v, pi, key);
604 if (found_key && op == UNSET)
606 if (old_value && value_bytes)
607 clib_memcpy_fast (old_value, &p->direct.value, value_bytes);
609 unset_indirect (v, i, &p->direct);
611 /* Nullify p (since it's just been deleted).
612 Otherwise we might be tempted to play with it. */
618 if (op == SET && p != 0 && value_bytes)
620 /* Save away old value for caller. */
621 if (old_value && found_key)
622 clib_memcpy_fast (old_value, &p->direct.value, value_bytes);
623 clib_memcpy_fast (&p->direct.value, new_value, value_bytes);
627 h->elts += !found_key;
629 h->elts -= found_key;
634 /* Fetch value of key. */
635 __clib_export uword *
636 _hash_get (void *v, uword key)
638 hash_t *h = hash_header (v);
641 /* Don't even search table if its empty. */
642 if (!v || h->elts == 0)
645 p = lookup (v, key, GET, 0, 0);
648 if (h->log2_pair_size == 0)
654 __clib_export hash_pair_t *
655 _hash_get_pair (void *v, uword key)
657 return lookup (v, key, GET, 0, 0);
660 __clib_export hash_pair_t *
661 hash_next (void *v, hash_next_t *hn)
663 hash_t *h = hash_header (v);
668 if (hn->i == 0 && hn->j == 0)
673 /* Prevent others from re-sizing hash table. */
675 (HASH_FLAG_NO_AUTO_GROW
676 | HASH_FLAG_NO_AUTO_SHRINK | HASH_FLAG_HASH_NEXT_IN_PROGRESS);
678 else if (hn->i >= hash_capacity (v))
682 clib_memset (hn, 0, sizeof (hn[0]));
686 p = hash_forward (h, v, hn->i);
687 if (hash_is_user (v, hn->i))
694 hash_pair_indirect_t *pi = (void *) p;
697 if (h->log2_pair_size > 0)
698 n = indirect_pair_get_len (pi);
700 n = vec_len (pi->pairs);
708 return hash_forward (h, pi->pairs, hn->j++);
713 /* Remove key from table. */
715 _hash_unset (void *v, uword key, void *old_value)
722 (void) lookup (v, key, UNSET, 0, old_value);
725 if (!(h->flags & HASH_FLAG_NO_AUTO_SHRINK))
727 /* Resize when 1/4 full. */
728 if (h->elts > 32 && 4 * (h->elts + 1) < vec_len (v))
729 v = hash_resize (v, vec_len (v) / 2);
736 _hash_create (uword elts, hash_t * h_user)
739 uword log2_pair_size;
742 /* Size of hash is power of 2 >= ELTS and larger than
743 number of bits in is_user bitmap elements. */
744 elts = clib_max (elts, BITS (h->is_user[0]));
745 elts = 1ULL << max_log2 (elts);
749 log2_pair_size = h_user->log2_pair_size;
751 v = _vec_resize ((void *) 0,
754 (elts << log2_pair_size) * sizeof (hash_pair_t),
757 (elts / BITS (h->is_user[0])) * sizeof (h->is_user[0]),
758 /* alignment */ sizeof (hash_pair_t));
764 h->log2_pair_size = log2_pair_size;
767 /* Default flags to never shrinking hash tables.
768 Shrinking tables can cause "jackpot" cases. */
770 h->flags = HASH_FLAG_NO_AUTO_SHRINK;
774 h->format_pair = hash_format_pair_default;
775 h->format_pair_arg = 0;
784 hash_t *h = hash_header (v);
785 hash_pair_union_t *p;
791 /* We zero all freed memory in case user would be tempted to use it. */
792 for (i = 0; i < hash_capacity (v); i++)
794 if (hash_is_user (v, i))
797 if (h->log2_pair_size == 0)
798 vec_free (p->indirect.pairs);
799 else if (p->indirect.pairs)
800 clib_mem_free (p->indirect.pairs);
809 hash_resize_internal (void *old, uword new_size, uword free_old)
817 hash_t *h = old ? hash_header (old) : 0;
818 new = _hash_create (new_size, h);
820 hash_foreach_pair (p, old, {
821 new = _hash_set3 (new, p->key, &p->value[0], 0);
832 hash_resize (void *old, uword new_size)
834 return hash_resize_internal (old, new_size, 1);
840 return hash_resize_internal (old, vec_len (old), 0);
844 _hash_set3 (void *v, uword key, void *value, void *old_value)
849 v = hash_create (0, sizeof (uword));
852 (void) lookup (v, key, SET, value, old_value);
854 if (!(h->flags & HASH_FLAG_NO_AUTO_GROW))
856 /* Resize when 3/4 full. */
857 if (4 * (h->elts + 1) > 3 * vec_len (v))
858 v = hash_resize (v, 2 * vec_len (v));
865 vec_key_sum (hash_t * h, uword key)
867 void *v = uword_to_pointer (key, void *);
868 return hash_memory (v, vec_len (v) * h->user, 0);
872 vec_key_equal (hash_t * h, uword key1, uword key2)
874 void *v1 = uword_to_pointer (key1, void *);
875 void *v2 = uword_to_pointer (key2, void *);
876 uword l1 = vec_len (v1);
877 uword l2 = vec_len (v2);
878 return l1 == l2 && 0 == memcmp (v1, v2, l1 * h->user);
882 vec_key_format_pair (u8 * s, va_list * args)
884 void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
885 void *v = va_arg (*args, void *);
886 hash_pair_t *p = va_arg (*args, hash_pair_t *);
887 hash_t *h = hash_header (v);
888 void *u = uword_to_pointer (p->key, void *);
894 s = format (s, "%v", u);
900 for (i = 0; i < vec_len (w); i++)
901 s = format (s, "0x%x, ", w[i]);
908 for (i = 0; i < vec_len (w); i++)
909 s = format (s, "0x%x, ", w[i]);
916 for (i = 0; i < vec_len (w); i++)
917 s = format (s, "0x%Lx, ", w[i]);
922 s = format (s, "0x%U", format_hex_bytes, u, vec_len (u) * h->user);
926 if (hash_value_bytes (h) > 0)
927 s = format (s, " -> 0x%wx", p->value[0]);
933 mem_key_sum (hash_t * h, uword key)
935 uword *v = uword_to_pointer (key, void *);
936 return hash_memory (v, h->user, 0);
940 mem_key_equal (hash_t * h, uword key1, uword key2)
942 void *v1 = uword_to_pointer (key1, void *);
943 void *v2 = uword_to_pointer (key2, void *);
944 return v1 && v2 && 0 == memcmp (v1, v2, h->user);
948 string_key_sum (hash_t * h, uword key)
950 char *v = uword_to_pointer (key, char *);
951 return hash_memory (v, strlen (v), 0);
955 string_key_equal (hash_t * h, uword key1, uword key2)
957 void *v1 = uword_to_pointer (key1, void *);
958 void *v2 = uword_to_pointer (key2, void *);
959 return v1 && v2 && 0 == strcmp (v1, v2);
963 string_key_format_pair (u8 * s, va_list * args)
965 void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
966 void *v = va_arg (*args, void *);
967 hash_pair_t *p = va_arg (*args, hash_pair_t *);
968 hash_t *h = hash_header (v);
969 void *u = uword_to_pointer (p->key, void *);
971 s = format (s, "%s", u);
973 if (hash_value_bytes (h) > 0)
975 format (s, " -> 0x%8U", format_hex_bytes, &p->value[0],
976 hash_value_bytes (h));
982 hash_format_pair_default (u8 * s, va_list * args)
984 void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
985 void *v = va_arg (*args, void *);
986 hash_pair_t *p = va_arg (*args, hash_pair_t *);
987 hash_t *h = hash_header (v);
989 s = format (s, "0x%08x", p->key);
990 if (hash_value_bytes (h) > 0)
992 format (s, " -> 0x%8U", format_hex_bytes, &p->value[0],
993 hash_value_bytes (h));
1001 hash_t *h = hash_header (v);
1006 bytes = vec_capacity (v, hash_header_bytes (v));
1008 for (i = 0; i < hash_capacity (v); i++)
1010 if (!hash_is_user (v, i))
1012 hash_pair_union_t *p = get_pair (v, i);
1013 if (h->log2_pair_size > 0)
1014 bytes += 1 << indirect_pair_get_log2_bytes (&p->indirect);
1016 bytes += vec_capacity (p->indirect.pairs, 0);
1023 format_hash (u8 *s, va_list *va)
1025 void *v = va_arg (*va, void *);
1026 int verbose = va_arg (*va, int);
1028 hash_t *h = hash_header (v);
1031 s = format (s, "hash %p, %wd elts, capacity %wd, %wd bytes used,\n",
1032 v, hash_elts (v), hash_capacity (v), hash_bytes (v));
1035 uword *occupancy = 0;
1037 /* Count number of buckets with each occupancy. */
1038 for (i = 0; i < hash_capacity (v); i++)
1042 if (hash_is_user (v, i))
1048 hash_pair_union_t *p = get_pair (v, i);
1049 if (h->log2_pair_size > 0)
1050 j = indirect_pair_get_len (&p->indirect);
1052 j = vec_len (p->indirect.pairs);
1055 vec_validate (occupancy, j);
1059 s = format (s, " profile ");
1060 for (i = 0; i < vec_len (occupancy); i++)
1061 s = format (s, "%wd%c", occupancy[i],
1062 i + 1 == vec_len (occupancy) ? '\n' : ' ');
1064 s = format (s, " lookup # of compares: ");
1065 for (i = 1; i < vec_len (occupancy); i++)
1066 s = format (s, "%wd: .%03d%c", i,
1067 (1000 * i * occupancy[i]) / hash_elts (v),
1068 i + 1 == vec_len (occupancy) ? '\n' : ' ');
1070 vec_free (occupancy);
1076 hash_foreach_pair (p, v, {
1077 s = format (s, " %U\n", h->format_pair, h->format_pair_arg, v, p);
1086 unformat_hash_string_internal (unformat_input_t * input,
1087 va_list * va, int is_vec)
1089 uword *hash = va_arg (*va, uword *);
1090 int *result = va_arg (*va, int *);
1094 if (!unformat (input, is_vec ? "%v%_" : "%s%_", &string))
1097 p = hash_get_mem (hash, string);
1106 unformat_hash_vec_string (unformat_input_t * input, va_list * va)
1108 return unformat_hash_string_internal (input, va, /* is_vec */ 1);
1112 unformat_hash_string (unformat_input_t * input, va_list * va)
1114 return unformat_hash_string_internal (input, va, /* is_vec */ 0);
1117 __clib_export clib_error_t *
1118 hash_validate (void *v)
1120 hash_t *h = hash_header (v);
1123 clib_error_t *error = 0;
1125 #define CHECK(x) if ((error = ERROR_ASSERT (x))) goto done;
1127 for (i = 0; i < hash_capacity (v); i++)
1129 hash_pair_union_t *pu = get_pair (v, i);
1131 if (hash_is_user (v, i))
1133 CHECK (pu->direct.key != 0);
1134 vec_add1 (keys, pu->direct.key);
1139 hash_pair_indirect_t *pi = &pu->indirect;
1142 n = h->log2_pair_size > 0
1143 ? indirect_pair_get_len (pi) : vec_len (pi->pairs);
1145 for (p = pi->pairs; n-- > 0; p = hash_forward1 (h, p))
1147 /* Assert key uniqueness. */
1148 for (j = 0; j < vec_len (keys); j++)
1149 CHECK (keys[j] != p->key);
1150 vec_add1 (keys, p->key);
1155 CHECK (vec_len (keys) == h->elts);
1163 * fd.io coding-style-patch-verification: ON
1166 * eval: (c-set-style "gnu")