2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 Copyright (c) 2001-2005 Eliot Dresselhaus
18 Permission is hereby granted, free of charge, to any person obtaining
19 a copy of this software and associated documentation files (the
20 "Software"), to deal in the Software without restriction, including
21 without limitation the rights to use, copy, modify, merge, publish,
22 distribute, sublicense, and/or sell copies of the Software, and to
23 permit persons to whom the Software is furnished to do so, subject to
24 the following conditions:
26 The above copyright notice and this permission notice shall be
27 included in all copies or substantial portions of the Software.
29 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
33 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
34 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
35 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
38 #include <vppinfra/hash.h>
39 #include <vppinfra/error.h>
40 #include <vppinfra/mem.h>
41 #include <vppinfra/byte_order.h> /* for clib_arch_is_big_endian */
44 zero_pair (hash_t * h, hash_pair_t * p)
46 clib_memset (p, 0, hash_pair_bytes (h));
50 init_pair (hash_t * h, hash_pair_t * p)
52 clib_memset (p->value, ~0, hash_value_bytes (h));
55 always_inline hash_pair_union_t *
56 get_pair (void *v, uword i)
58 hash_t *h = hash_header (v);
60 ASSERT (i < vec_len (v));
62 p += i << h->log2_pair_size;
63 return (hash_pair_union_t *) p;
67 set_is_user (void *v, uword i, uword is_user)
69 hash_t *h = hash_header (v);
70 uword i0 = i / BITS (h->is_user[0]);
71 uword i1 = (uword) 1 << (i % BITS (h->is_user[0]));
75 h->is_user[i0] &= ~i1;
78 static u8 *hash_format_pair_default (u8 * s, va_list * args);
85 #define _(n) (((u64) 1 << (u64) (8*(n))) - (u64) 1)
86 static u64 masks_little_endian[] = {
87 0, _(1), _(2), _(3), _(4), _(5), _(6), _(7),
89 static u64 masks_big_endian[] = {
90 0, ~_(7), ~_(6), ~_(5), ~_(4), ~_(3), ~_(2), ~_(1),
93 if (clib_arch_is_big_endian)
94 return x & masks_big_endian[n];
96 return x & masks_little_endian[n];
100 * make address-sanitizer skip this:
101 * clib_mem_unaligned + zap64 casts its input as u64, computes a mask
102 * according to the input length, and returns the casted maked value.
103 * Therefore all the 8 Bytes of the u64 are systematically read, which
104 * rightfully causes address-sanitizer to raise an error on smaller inputs.
106 * However the invalid Bytes are discarded within zap64(), which is why
107 * this can be silenced safely.
109 * The above is true *unless* the extra bytes cross a page boundary
110 * into unmapped or no-access space, hence the boundary crossing check.
112 static inline u64 __attribute__ ((no_sanitize_address))
113 hash_memory64 (void *p, word n_bytes, u64 state)
117 int page_boundary_crossing;
118 u64 start_addr, end_addr;
126 * If the request crosses a 4k boundary, it's not OK to assume
127 * that the zap64 game is safe. 4k is the minimum known page size.
129 start_addr = (u64) p;
130 end_addr = start_addr + n_bytes + 7;
131 page_boundary_crossing = (start_addr >> 12) != (end_addr >> 12);
133 a = b = 0x9e3779b97f4a7c13LL;
137 while (n >= 3 * sizeof (u64))
139 a += clib_mem_unaligned (q + 0, u64);
140 b += clib_mem_unaligned (q + 1, u64);
141 c += clib_mem_unaligned (q + 2, u64);
142 hash_mix64 (a, b, c);
143 n -= 3 * sizeof (u64);
148 switch (n / sizeof (u64))
151 a += clib_mem_unaligned (q + 0, u64);
152 b += clib_mem_unaligned (q + 1, u64);
153 if (n % sizeof (u64))
155 if (PREDICT_TRUE (page_boundary_crossing == 0))
157 zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64)) << 8;
160 clib_memcpy_fast (tmp.as_u8, q + 2, n % sizeof (u64));
161 c += zap64 (tmp.as_u64, n % sizeof (u64)) << 8;
167 a += clib_mem_unaligned (q + 0, u64);
168 if (n % sizeof (u64))
170 if (PREDICT_TRUE (page_boundary_crossing == 0))
171 b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64));
174 clib_memcpy_fast (tmp.as_u8, q + 1, n % sizeof (u64));
175 b += zap64 (tmp.as_u64, n % sizeof (u64));
181 if (n % sizeof (u64))
183 if (PREDICT_TRUE (page_boundary_crossing == 0))
184 a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64));
187 clib_memcpy_fast (tmp.as_u8, q, n % sizeof (u64));
188 a += zap64 (tmp.as_u64, n % sizeof (u64));
194 hash_mix64 (a, b, c);
199 #else /* if uword_bits == 64 */
202 zap32 (u32 x, word n)
204 #define _(n) (((u32) 1 << (u32) (8*(n))) - (u32) 1)
205 static u32 masks_little_endian[] = {
208 static u32 masks_big_endian[] = {
209 0, ~_(3), ~_(2), ~_(1),
212 if (clib_arch_is_big_endian)
213 return x & masks_big_endian[n];
215 return x & masks_little_endian[n];
219 hash_memory32 (void *p, word n_bytes, u32 state)
228 while (n >= 3 * sizeof (u32))
230 a += clib_mem_unaligned (q + 0, u32);
231 b += clib_mem_unaligned (q + 1, u32);
232 c += clib_mem_unaligned (q + 2, u32);
233 hash_mix32 (a, b, c);
234 n -= 3 * sizeof (u32);
239 switch (n / sizeof (u32))
242 a += clib_mem_unaligned (q + 0, u32);
243 b += clib_mem_unaligned (q + 1, u32);
244 if (n % sizeof (u32))
245 c += zap32 (clib_mem_unaligned (q + 2, u32), n % sizeof (u32)) << 8;
249 a += clib_mem_unaligned (q + 0, u32);
250 if (n % sizeof (u32))
251 b += zap32 (clib_mem_unaligned (q + 1, u32), n % sizeof (u32));
255 if (n % sizeof (u32))
256 a += zap32 (clib_mem_unaligned (q + 0, u32), n % sizeof (u32));
260 hash_mix32 (a, b, c);
267 hash_memory (void *p, word n_bytes, uword state)
272 return hash_memory64 (q, n_bytes, state);
274 return hash_memory32 (q, n_bytes, state);
284 a = b = 0x9e3779b97f4a7c13LL;
287 hash_mix64 (a, b, c);
299 hash_mix32 (a, b, c);
304 /* Call sum function. Hash code will be sum function value
305 modulo the prime length of the hash table. */
307 key_sum (hash_t * h, uword key)
310 switch (pointer_to_uword ((void *) h->key_sum))
313 sum = hash_uword (key);
316 case KEY_FUNC_POINTER_UWORD:
317 sum = hash_uword (*uword_to_pointer (key, uword *));
320 case KEY_FUNC_POINTER_U32:
321 sum = hash_uword (*uword_to_pointer (key, u32 *));
324 case KEY_FUNC_STRING:
325 sum = string_key_sum (h, key);
329 sum = mem_key_sum (h, key);
333 sum = h->key_sum (h, key);
341 key_equal1 (hash_t * h, uword key1, uword key2, uword e)
343 switch (pointer_to_uword ((void *) h->key_equal))
348 case KEY_FUNC_POINTER_UWORD:
350 *uword_to_pointer (key1, uword *) == *uword_to_pointer (key2,
354 case KEY_FUNC_POINTER_U32:
355 e = *uword_to_pointer (key1, u32 *) == *uword_to_pointer (key2, u32 *);
358 case KEY_FUNC_STRING:
359 e = string_key_equal (h, key1, key2);
363 e = mem_key_equal (h, key1, key2);
367 e = h->key_equal (h, key1, key2);
373 /* Compares two keys: returns 1 if equal, 0 if not. */
375 key_equal (hash_t * h, uword key1, uword key2)
377 uword e = key1 == key2;
378 if (CLIB_DEBUG > 0 && key1 == key2)
379 ASSERT (key_equal1 (h, key1, key2, e));
381 e = key_equal1 (h, key1, key2, e);
385 static hash_pair_union_t *
386 get_indirect (void *v, hash_pair_indirect_t * pi, uword key)
388 hash_t *h = hash_header (v);
389 hash_pair_t *p0, *p1;
392 if (h->log2_pair_size > 0)
393 p1 = hash_forward (h, p0, indirect_pair_get_len (pi));
399 if (key_equal (h, p0->key, key))
400 return (hash_pair_union_t *) p0;
401 p0 = hash_forward1 (h, p0);
404 return (hash_pair_union_t *) 0;
407 static hash_pair_union_t *
408 set_indirect_is_user (void *v, uword i, hash_pair_union_t * p, uword key)
410 hash_t *h = hash_header (v);
412 hash_pair_indirect_t *pi = &p->indirect;
413 uword log2_bytes = 0;
415 if (h->log2_pair_size == 0)
416 q = vec_new (hash_pair_t, 2);
419 log2_bytes = 1 + hash_pair_log2_bytes (h);
420 q = clib_mem_alloc (1ULL << log2_bytes);
422 clib_memcpy_fast (q, &p->direct, hash_pair_bytes (h));
425 if (h->log2_pair_size > 0)
426 indirect_pair_set (pi, log2_bytes, 2);
428 set_is_user (v, i, 0);
430 /* First element is used by existing pair, second will be used by caller. */
431 q = hash_forward1 (h, q);
434 return (hash_pair_union_t *) q;
437 static hash_pair_union_t *
438 set_indirect (void *v, hash_pair_indirect_t * pi, uword key,
441 hash_t *h = hash_header (v);
442 hash_pair_t *new_pair;
443 hash_pair_union_t *q;
445 q = get_indirect (v, pi, key);
452 if (h->log2_pair_size == 0)
453 vec_add2 (pi->pairs, new_pair, 1);
456 uword len, new_len, log2_bytes;
458 len = indirect_pair_get_len (pi);
459 log2_bytes = indirect_pair_get_log2_bytes (pi);
462 if (new_len * hash_pair_bytes (h) > (1ULL << log2_bytes))
464 pi->pairs = clib_mem_realloc (pi->pairs,
465 1ULL << (log2_bytes + 1),
470 indirect_pair_set (pi, log2_bytes, new_len);
471 new_pair = pi->pairs + (len << h->log2_pair_size);
474 init_pair (h, new_pair);
476 return (hash_pair_union_t *) new_pair;
480 unset_indirect (void *v, uword i, hash_pair_t * q)
482 hash_t *h = hash_header (v);
483 hash_pair_union_t *p = get_pair (v, i);
485 hash_pair_indirect_t *pi = &p->indirect;
488 is_vec = h->log2_pair_size == 0;
490 ASSERT (!hash_is_user (v, i));
491 len = is_vec ? vec_len (pi->pairs) : indirect_pair_get_len (pi);
492 e = hash_forward (h, pi->pairs, len - 1);
493 ASSERT (q >= pi->pairs && q <= e);
495 /* We have two or fewer pairs and we are delete one pair.
496 Make indirect pointer direct and free indirect memory. */
499 hash_pair_t *r = pi->pairs;
503 clib_memcpy_fast (p, q == r ? hash_forward1 (h, r) : r,
504 hash_pair_bytes (h));
505 set_is_user (v, i, 1);
508 zero_pair (h, &p->direct);
517 /* If deleting a pair we need to keep non-null pairs together. */
519 clib_memcpy_fast (q, e, hash_pair_bytes (h));
523 _vec_len (pi->pairs) -= 1;
525 indirect_pair_set (pi, indirect_pair_get_log2_bytes (pi), len - 1);
537 lookup (void *v, uword key, enum lookup_opcode op,
538 void *new_value, void *old_value)
540 hash_t *h = hash_header (v);
541 hash_pair_union_t *p = 0;
548 i = key_sum (h, key) & (_vec_len (v) - 1);
551 if (hash_is_user (v, i))
553 found_key = key_equal (h, p->direct.key, key);
558 set_is_user (v, i, 0);
560 clib_memcpy_fast (old_value, p->direct.value,
561 hash_value_bytes (h));
562 zero_pair (h, &p->direct);
568 p = set_indirect_is_user (v, i, p, key);
575 hash_pair_indirect_t *pi = &p->indirect;
582 set_is_user (v, i, 1);
585 p = set_indirect (v, pi, key, &found_key);
589 p = get_indirect (v, pi, key);
591 if (found_key && op == UNSET)
594 clib_memcpy_fast (old_value, &p->direct.value,
595 hash_value_bytes (h));
597 unset_indirect (v, i, &p->direct);
599 /* Nullify p (since it's just been deleted).
600 Otherwise we might be tempted to play with it. */
606 if (op == SET && p != 0)
608 /* Save away old value for caller. */
609 if (old_value && found_key)
610 clib_memcpy_fast (old_value, &p->direct.value, hash_value_bytes (h));
611 clib_memcpy_fast (&p->direct.value, new_value, hash_value_bytes (h));
615 h->elts += !found_key;
617 h->elts -= found_key;
622 /* Fetch value of key. */
624 _hash_get (void *v, uword key)
626 hash_t *h = hash_header (v);
629 /* Don't even search table if its empty. */
630 if (!v || h->elts == 0)
633 p = lookup (v, key, GET, 0, 0);
636 if (h->log2_pair_size == 0)
643 _hash_get_pair (void *v, uword key)
645 return lookup (v, key, GET, 0, 0);
649 hash_next (void *v, hash_next_t * hn)
651 hash_t *h = hash_header (v);
656 if (hn->i == 0 && hn->j == 0)
661 /* Prevent others from re-sizing hash table. */
663 (HASH_FLAG_NO_AUTO_GROW
664 | HASH_FLAG_NO_AUTO_SHRINK | HASH_FLAG_HASH_NEXT_IN_PROGRESS);
666 else if (hn->i >= hash_capacity (v))
670 clib_memset (hn, 0, sizeof (hn[0]));
674 p = hash_forward (h, v, hn->i);
675 if (hash_is_user (v, hn->i))
682 hash_pair_indirect_t *pi = (void *) p;
685 if (h->log2_pair_size > 0)
686 n = indirect_pair_get_len (pi);
688 n = vec_len (pi->pairs);
696 return hash_forward (h, pi->pairs, hn->j++);
701 /* Remove key from table. */
703 _hash_unset (void *v, uword key, void *old_value)
710 (void) lookup (v, key, UNSET, 0, old_value);
713 if (!(h->flags & HASH_FLAG_NO_AUTO_SHRINK))
715 /* Resize when 1/4 full. */
716 if (h->elts > 32 && 4 * (h->elts + 1) < vec_len (v))
717 v = hash_resize (v, vec_len (v) / 2);
724 _hash_create (uword elts, hash_t * h_user)
727 uword log2_pair_size;
730 /* Size of hash is power of 2 >= ELTS and larger than
731 number of bits in is_user bitmap elements. */
732 elts = clib_max (elts, BITS (h->is_user[0]));
733 elts = 1ULL << max_log2 (elts);
737 log2_pair_size = h_user->log2_pair_size;
739 v = _vec_resize ((void *) 0,
742 (elts << log2_pair_size) * sizeof (hash_pair_t),
745 (elts / BITS (h->is_user[0])) * sizeof (h->is_user[0]),
746 /* alignment */ sizeof (hash_pair_t));
752 h->log2_pair_size = log2_pair_size;
755 /* Default flags to never shrinking hash tables.
756 Shrinking tables can cause "jackpot" cases. */
758 h->flags = HASH_FLAG_NO_AUTO_SHRINK;
762 h->format_pair = hash_format_pair_default;
763 h->format_pair_arg = 0;
772 hash_t *h = hash_header (v);
773 hash_pair_union_t *p;
779 /* We zero all freed memory in case user would be tempted to use it. */
780 for (i = 0; i < hash_capacity (v); i++)
782 if (hash_is_user (v, i))
785 if (h->log2_pair_size == 0)
786 vec_free (p->indirect.pairs);
787 else if (p->indirect.pairs)
788 clib_mem_free (p->indirect.pairs);
797 hash_resize_internal (void *old, uword new_size, uword free_old)
805 hash_t *h = old ? hash_header (old) : 0;
806 new = _hash_create (new_size, h);
808 hash_foreach_pair (p, old, {
809 new = _hash_set3 (new, p->key, &p->value[0], 0);
820 hash_resize (void *old, uword new_size)
822 return hash_resize_internal (old, new_size, 1);
828 return hash_resize_internal (old, vec_len (old), 0);
832 _hash_set3 (void *v, uword key, void *value, void *old_value)
837 v = hash_create (0, sizeof (uword));
840 (void) lookup (v, key, SET, value, old_value);
842 if (!(h->flags & HASH_FLAG_NO_AUTO_GROW))
844 /* Resize when 3/4 full. */
845 if (4 * (h->elts + 1) > 3 * vec_len (v))
846 v = hash_resize (v, 2 * vec_len (v));
853 vec_key_sum (hash_t * h, uword key)
855 void *v = uword_to_pointer (key, void *);
856 return hash_memory (v, vec_len (v) * h->user, 0);
860 vec_key_equal (hash_t * h, uword key1, uword key2)
862 void *v1 = uword_to_pointer (key1, void *);
863 void *v2 = uword_to_pointer (key2, void *);
864 uword l1 = vec_len (v1);
865 uword l2 = vec_len (v2);
866 return l1 == l2 && 0 == memcmp (v1, v2, l1 * h->user);
870 vec_key_format_pair (u8 * s, va_list * args)
872 void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
873 void *v = va_arg (*args, void *);
874 hash_pair_t *p = va_arg (*args, hash_pair_t *);
875 hash_t *h = hash_header (v);
876 void *u = uword_to_pointer (p->key, void *);
882 s = format (s, "%v", u);
888 for (i = 0; i < vec_len (w); i++)
889 s = format (s, "0x%x, ", w[i]);
896 for (i = 0; i < vec_len (w); i++)
897 s = format (s, "0x%x, ", w[i]);
904 for (i = 0; i < vec_len (w); i++)
905 s = format (s, "0x%Lx, ", w[i]);
910 s = format (s, "0x%U", format_hex_bytes, u, vec_len (u) * h->user);
914 if (hash_value_bytes (h) > 0)
915 s = format (s, " -> 0x%wx", p->value[0]);
921 mem_key_sum (hash_t * h, uword key)
923 uword *v = uword_to_pointer (key, void *);
924 return hash_memory (v, h->user, 0);
928 mem_key_equal (hash_t * h, uword key1, uword key2)
930 void *v1 = uword_to_pointer (key1, void *);
931 void *v2 = uword_to_pointer (key2, void *);
932 return v1 && v2 && 0 == memcmp (v1, v2, h->user);
936 string_key_sum (hash_t * h, uword key)
938 char *v = uword_to_pointer (key, char *);
939 return hash_memory (v, strlen (v), 0);
943 string_key_equal (hash_t * h, uword key1, uword key2)
945 void *v1 = uword_to_pointer (key1, void *);
946 void *v2 = uword_to_pointer (key2, void *);
947 return v1 && v2 && 0 == strcmp (v1, v2);
951 string_key_format_pair (u8 * s, va_list * args)
953 void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
954 void *v = va_arg (*args, void *);
955 hash_pair_t *p = va_arg (*args, hash_pair_t *);
956 hash_t *h = hash_header (v);
957 void *u = uword_to_pointer (p->key, void *);
959 s = format (s, "%s", u);
961 if (hash_value_bytes (h) > 0)
963 format (s, " -> 0x%8U", format_hex_bytes, &p->value[0],
964 hash_value_bytes (h));
970 hash_format_pair_default (u8 * s, va_list * args)
972 void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
973 void *v = va_arg (*args, void *);
974 hash_pair_t *p = va_arg (*args, hash_pair_t *);
975 hash_t *h = hash_header (v);
977 s = format (s, "0x%08x", p->key);
978 if (hash_value_bytes (h) > 0)
980 format (s, " -> 0x%8U", format_hex_bytes, &p->value[0],
981 hash_value_bytes (h));
989 hash_t *h = hash_header (v);
994 bytes = vec_capacity (v, hash_header_bytes (v));
996 for (i = 0; i < hash_capacity (v); i++)
998 if (!hash_is_user (v, i))
1000 hash_pair_union_t *p = get_pair (v, i);
1001 if (h->log2_pair_size > 0)
1002 bytes += 1 << indirect_pair_get_log2_bytes (&p->indirect);
1004 bytes += vec_capacity (p->indirect.pairs, 0);
1011 format_hash (u8 * s, va_list * va)
1013 void *v = va_arg (*va, void *);
1014 int verbose = va_arg (*va, int);
1016 hash_t *h = hash_header (v);
1019 s = format (s, "hash %p, %wd elts, capacity %wd, %wd bytes used,\n",
1020 v, hash_elts (v), hash_capacity (v), hash_bytes (v));
1023 uword *occupancy = 0;
1025 /* Count number of buckets with each occupancy. */
1026 for (i = 0; i < hash_capacity (v); i++)
1030 if (hash_is_user (v, i))
1036 hash_pair_union_t *p = get_pair (v, i);
1037 if (h->log2_pair_size > 0)
1038 j = indirect_pair_get_len (&p->indirect);
1040 j = vec_len (p->indirect.pairs);
1043 vec_validate (occupancy, j);
1047 s = format (s, " profile ");
1048 for (i = 0; i < vec_len (occupancy); i++)
1049 s = format (s, "%wd%c", occupancy[i],
1050 i + 1 == vec_len (occupancy) ? '\n' : ' ');
1052 s = format (s, " lookup # of compares: ");
1053 for (i = 1; i < vec_len (occupancy); i++)
1054 s = format (s, "%wd: .%03d%c", i,
1055 (1000 * i * occupancy[i]) / hash_elts (v),
1056 i + 1 == vec_len (occupancy) ? '\n' : ' ');
1058 vec_free (occupancy);
1064 hash_foreach_pair (p, v, {
1065 s = format (s, " %U\n", h->format_pair, h->format_pair_arg, v, p);
1074 unformat_hash_string_internal (unformat_input_t * input,
1075 va_list * va, int is_vec)
1077 uword *hash = va_arg (*va, uword *);
1078 int *result = va_arg (*va, int *);
1082 if (!unformat (input, is_vec ? "%v%_" : "%s%_", &string))
1085 p = hash_get_mem (hash, string);
1094 unformat_hash_vec_string (unformat_input_t * input, va_list * va)
1096 return unformat_hash_string_internal (input, va, /* is_vec */ 1);
1100 unformat_hash_string (unformat_input_t * input, va_list * va)
1102 return unformat_hash_string_internal (input, va, /* is_vec */ 0);
1106 hash_validate (void *v)
1108 hash_t *h = hash_header (v);
1111 clib_error_t *error = 0;
1113 #define CHECK(x) if ((error = ERROR_ASSERT (x))) goto done;
1115 for (i = 0; i < hash_capacity (v); i++)
1117 hash_pair_union_t *pu = get_pair (v, i);
1119 if (hash_is_user (v, i))
1121 CHECK (pu->direct.key != 0);
1122 vec_add1 (keys, pu->direct.key);
1127 hash_pair_indirect_t *pi = &pu->indirect;
1130 n = h->log2_pair_size > 0
1131 ? indirect_pair_get_len (pi) : vec_len (pi->pairs);
1133 for (p = pi->pairs; n-- > 0; p = hash_forward1 (h, p))
1135 /* Assert key uniqueness. */
1136 for (j = 0; j < vec_len (keys); j++)
1137 CHECK (keys[j] != p->key);
1138 vec_add1 (keys, p->key);
1143 CHECK (vec_len (keys) == h->elts);
1151 * fd.io coding-style-patch-verification: ON
1154 * eval: (c-set-style "gnu")