2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 /** @cond DOCUMENTATION_IS_IN_BIHASH_DOC_H */
18 #ifndef MAP_HUGE_SHIFT
19 #define MAP_HUGE_SHIFT 26
22 #ifndef BIIHASH_MIN_ALLOC_LOG2_PAGES
23 #define BIIHASH_MIN_ALLOC_LOG2_PAGES 10
26 #ifndef BIHASH_USE_HEAP
27 #define BIHASH_USE_HEAP 1
30 static inline void *BV (alloc_aligned) (BVT (clib_bihash) * h, uword nbytes)
34 /* Round to an even number of cache lines */
35 nbytes = round_pow2 (nbytes, CLIB_CACHE_LINE_BYTES);
40 uword page_sz = sizeof (BVT (clib_bihash_value));
41 uword chunk_sz = round_pow2 (page_sz << BIIHASH_MIN_ALLOC_LOG2_PAGES,
42 CLIB_CACHE_LINE_BYTES);
44 BVT (clib_bihash_alloc_chunk) * chunk = h->chunks;
46 /* if there is enough space in the currenrt chunk */
47 if (chunk && chunk->bytes_left >= nbytes)
49 rv = chunk->next_alloc;
50 chunk->bytes_left -= nbytes;
51 chunk->next_alloc += nbytes;
55 /* requested allocation is bigger than chunk size */
56 if (nbytes >= chunk_sz)
58 oldheap = clib_mem_set_heap (h->heap);
59 chunk = clib_mem_alloc_aligned (nbytes + sizeof (*chunk),
60 CLIB_CACHE_LINE_BYTES);
61 clib_mem_set_heap (oldheap);
62 clib_memset_u8 (chunk, 0, sizeof (*chunk));
64 rv = (u8 *) (chunk + 1);
67 /* take 2nd place in the list */
68 chunk->next = h->chunks->next;
69 chunk->prev = h->chunks;
70 h->chunks->next = chunk;
72 chunk->next->prev = chunk;
80 oldheap = clib_mem_set_heap (h->heap);
81 chunk = clib_mem_alloc_aligned (chunk_sz + sizeof (*chunk),
82 CLIB_CACHE_LINE_BYTES);
83 clib_mem_set_heap (oldheap);
84 chunk->size = chunk_sz;
85 chunk->bytes_left = chunk_sz;
86 chunk->next_alloc = (u8 *) (chunk + 1);
87 chunk->next = h->chunks;
90 chunk->next->prev = chunk;
92 rv = chunk->next_alloc;
93 chunk->bytes_left -= nbytes;
94 chunk->next_alloc += nbytes;
98 rv = alloc_arena_next (h);
99 alloc_arena_next (h) += nbytes;
101 if (alloc_arena_next (h) > alloc_arena_size (h))
104 if (alloc_arena_next (h) > alloc_arena_mapped (h))
107 uword alloc = alloc_arena_next (h) - alloc_arena_mapped (h);
108 int mmap_flags = MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS;
110 int mmap_flags_huge = (mmap_flags | MAP_HUGETLB | MAP_LOCKED |
111 BIHASH_LOG2_HUGEPAGE_SIZE << MAP_HUGE_SHIFT);
112 #endif /* __linux__ */
114 /* new allocation is 25% of existing one */
115 if (alloc_arena_mapped (h) >> 2 > alloc)
116 alloc = alloc_arena_mapped (h) >> 2;
118 /* round allocation to page size */
119 alloc = round_pow2 (alloc, 1 << BIHASH_LOG2_HUGEPAGE_SIZE);
121 base = (void *) (uword) (alloc_arena (h) + alloc_arena_mapped (h));
124 rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags_huge, -1, 0);
127 #endif /* __linux__ */
129 /* fallback - maybe we are still able to allocate normal pages */
130 if (rv == MAP_FAILED || mlock (base, alloc) != 0)
131 rv = mmap (base, alloc, PROT_READ | PROT_WRITE, mmap_flags, -1, 0);
133 if (rv == MAP_FAILED)
136 alloc_arena_mapped (h) += alloc;
139 return (void *) (uword) (rv + alloc_arena (h));
142 static void BV (clib_bihash_instantiate) (BVT (clib_bihash) * h)
148 h->heap = clib_mem_get_heap ();
150 alloc_arena (h) = (uword) clib_mem_get_heap_base (h->heap);
154 alloc_arena (h) = clib_mem_vm_reserve (0, h->memory_size,
155 BIHASH_LOG2_HUGEPAGE_SIZE);
156 if (alloc_arena (h) == ~0)
158 alloc_arena_next (h) = 0;
159 alloc_arena_size (h) = h->memory_size;
160 alloc_arena_mapped (h) = 0;
163 bucket_size = h->nbuckets * sizeof (h->buckets[0]);
165 if (BIHASH_KVP_AT_BUCKET_LEVEL)
167 h->nbuckets * BIHASH_KVP_PER_PAGE * sizeof (BVT (clib_bihash_kv));
169 h->buckets = BV (alloc_aligned) (h, bucket_size);
170 clib_memset_u8 (h->buckets, 0, bucket_size);
172 if (BIHASH_KVP_AT_BUCKET_LEVEL)
175 BVT (clib_bihash_bucket) * b;
179 for (i = 0; i < h->nbuckets; i++)
181 BVT (clib_bihash_kv) * v;
182 b->offset = BV (clib_bihash_get_offset) (h, (void *) (b + 1));
184 /* Mark all elements free */
185 v = (void *) (b + 1);
186 for (j = 0; j < BIHASH_KVP_PER_PAGE; j++)
188 BV (clib_bihash_mark_free) (v);
191 /* Compute next bucket start address */
192 b = (void *) (((uword) b) + sizeof (*b) +
193 (BIHASH_KVP_PER_PAGE *
194 sizeof (BVT (clib_bihash_kv))));
197 CLIB_MEMORY_STORE_BARRIER ();
201 void BV (clib_bihash_init2) (BVT (clib_bihash_init2_args) * a)
205 BVT (clib_bihash) * h = a->h;
207 a->nbuckets = 1 << (max_log2 (a->nbuckets));
209 h->name = (u8 *) a->name;
210 h->nbuckets = a->nbuckets;
211 h->log2_nbuckets = max_log2 (a->nbuckets);
212 h->memory_size = BIHASH_USE_HEAP ? 0 : a->memory_size;
214 h->dont_add_to_all_bihash_list = a->dont_add_to_all_bihash_list;
215 h->fmt_fn = BV (format_bihash);
216 h->kvp_fmt_fn = a->kvp_fmt_fn;
221 * Make sure the requested size is rational. The max table
222 * size without playing the alignment card is 64 Gbytes.
223 * If someone starts complaining that's not enough, we can shift
224 * the offset by CLIB_LOG2_CACHE_LINE_BYTES...
227 ASSERT (h->memory_size < (1ULL << BIHASH_BUCKET_OFFSET_BITS));
229 /* Add this hash table to the list */
230 if (a->dont_add_to_all_bihash_list == 0)
232 for (i = 0; i < vec_len (clib_all_bihashes); i++)
233 if (clib_all_bihashes[i] == h)
235 oldheap = clib_all_bihash_set_heap ();
236 vec_add1 (clib_all_bihashes, (void *) h);
237 clib_mem_set_heap (oldheap);
242 clib_mem_free ((void *) h->alloc_lock);
245 * Set up the lock now, so we can use it to make the first add
248 h->alloc_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
249 CLIB_CACHE_LINE_BYTES);
250 h->alloc_lock[0] = 0;
252 #if BIHASH_LAZY_INSTANTIATE
253 if (a->instantiate_immediately)
255 BV (clib_bihash_instantiate) (h);
258 void BV (clib_bihash_init)
259 (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size)
261 BVT (clib_bihash_init2_args) _a, *a = &_a;
263 memset (a, 0, sizeof (*a));
267 a->nbuckets = nbuckets;
268 a->memory_size = memory_size;
270 BV (clib_bihash_init2) (a);
274 #if !defined (MFD_ALLOW_SEALING)
275 #define MFD_ALLOW_SEALING 0x0002U
278 void BV (clib_bihash_initiator_init_svm)
279 (BVT (clib_bihash) * h, char *name, u32 nbuckets, u64 memory_size)
283 vec_header_t *freelist_vh;
286 ASSERT (BIHASH_USE_HEAP == 0);
288 ASSERT (memory_size < (1ULL << 32));
289 /* Set up for memfd sharing */
290 if ((fd = clib_mem_vm_create_fd (CLIB_MEM_PAGE_SZ_DEFAULT, name) == -1)
292 clib_unix_warning ("memfd_create");
296 if (ftruncate (fd, memory_size) < 0)
298 clib_unix_warning ("ftruncate");
302 /* Not mission-critical, complain and continue */
303 if ((fcntl (fd, F_ADD_SEALS, F_SEAL_SHRINK)) == -1)
304 clib_unix_warning ("fcntl (F_ADD_SEALS)");
306 mmap_addr = mmap (0, memory_size,
307 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 /* offset */ );
309 if (mmap_addr == MAP_FAILED)
311 clib_unix_warning ("mmap failed");
315 h->sh = (void *) mmap_addr;
317 nbuckets = 1 << (max_log2 (nbuckets));
319 h->name = (u8 *) name;
320 h->sh->nbuckets = h->nbuckets = nbuckets;
321 h->log2_nbuckets = max_log2 (nbuckets);
323 alloc_arena (h) = (u64) (uword) mmap_addr;
324 alloc_arena_next (h) = CLIB_CACHE_LINE_BYTES;
325 alloc_arena_size (h) = memory_size;
327 bucket_size = nbuckets * sizeof (h->buckets[0]);
328 h->buckets = BV (alloc_aligned) (h, bucket_size);
329 clib_memset_u8 (h->buckets, 0, bucket_size);
330 h->sh->buckets_as_u64 = (u64) BV (clib_bihash_get_offset) (h, h->buckets);
332 h->alloc_lock = BV (alloc_aligned) (h, CLIB_CACHE_LINE_BYTES);
333 h->alloc_lock[0] = 0;
335 h->sh->alloc_lock_as_u64 =
336 (u64) BV (clib_bihash_get_offset) (h, (void *) h->alloc_lock);
338 BV (alloc_aligned) (h,
339 sizeof (vec_header_t) +
340 BIHASH_FREELIST_LENGTH * sizeof (u64));
341 freelist_vh->len = BIHASH_FREELIST_LENGTH;
342 h->sh->freelists_as_u64 =
343 (u64) BV (clib_bihash_get_offset) (h, freelist_vh->vector_data);
344 h->freelists = (void *) (freelist_vh->vector_data);
346 h->fmt_fn = BV (format_bihash);
347 h->kvp_fmt_fn = NULL;
351 void BV (clib_bihash_responder_init_svm)
352 (BVT (clib_bihash) * h, char *name, int fd)
356 BVT (clib_bihash_shared_header) * sh;
358 ASSERT (BIHASH_USE_HEAP == 0);
360 /* Trial mapping, to learn the segment size */
361 mmap_addr = mmap (0, 4096, PROT_READ, MAP_SHARED, fd, 0 /* offset */ );
362 if (mmap_addr == MAP_FAILED)
364 clib_unix_warning ("trial mmap failed");
368 sh = (BVT (clib_bihash_shared_header) *) mmap_addr;
370 memory_size = sh->alloc_arena_size;
372 munmap (mmap_addr, 4096);
374 /* Actual mapping, at the required size */
375 mmap_addr = mmap (0, memory_size,
376 PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0 /* offset */ );
378 if (mmap_addr == MAP_FAILED)
380 clib_unix_warning ("mmap failed");
386 h->sh = (void *) mmap_addr;
387 alloc_arena (h) = (u64) (uword) mmap_addr;
390 h->name = (u8 *) name;
391 h->buckets = BV (clib_bihash_get_value) (h, h->sh->buckets_as_u64);
392 h->nbuckets = h->sh->nbuckets;
393 h->log2_nbuckets = max_log2 (h->nbuckets);
395 h->alloc_lock = BV (clib_bihash_get_value) (h, h->sh->alloc_lock_as_u64);
396 h->freelists = BV (clib_bihash_get_value) (h, h->sh->freelists_as_u64);
397 h->fmt_fn = BV (format_bihash);
398 h->kvp_fmt_fn = NULL;
400 #endif /* BIHASH_32_64_SVM */
402 void BV (clib_bihash_set_kvp_format_fn) (BVT (clib_bihash) * h,
403 format_function_t * kvp_fmt_fn)
405 h->kvp_fmt_fn = kvp_fmt_fn;
408 int BV (clib_bihash_is_initialised) (const BVT (clib_bihash) * h)
410 return (h->instantiated != 0);
413 void BV (clib_bihash_free) (BVT (clib_bihash) * h)
417 if (PREDICT_FALSE (h->instantiated == 0))
418 goto never_initialized;
424 BVT (clib_bihash_alloc_chunk) * next, *chunk;
425 void *oldheap = clib_mem_set_heap (h->heap);
431 clib_mem_free (chunk);
434 clib_mem_set_heap (oldheap);
437 vec_free (h->working_copies);
438 vec_free (h->working_copy_lengths);
439 clib_mem_free ((void *) h->alloc_lock);
440 #if BIHASH_32_64_SVM == 0
441 vec_free (h->freelists);
444 (void) close (h->memfd);
446 if (BIHASH_USE_HEAP == 0)
447 clib_mem_vm_free ((void *) (uword) (alloc_arena (h)),
448 alloc_arena_size (h));
450 if (h->dont_add_to_all_bihash_list)
452 clib_memset_u8 (h, 0, sizeof (*h));
455 clib_memset_u8 (h, 0, sizeof (*h));
456 for (i = 0; i < vec_len (clib_all_bihashes); i++)
458 if ((void *) h == clib_all_bihashes[i])
460 vec_delete (clib_all_bihashes, 1, i);
464 clib_warning ("Couldn't find hash table %llx on clib_all_bihashes...",
469 BVT (clib_bihash_value) *
470 BV (value_alloc) (BVT (clib_bihash) * h, u32 log2_pages)
473 BVT (clib_bihash_value) * rv = 0;
475 ASSERT (h->alloc_lock[0]);
478 ASSERT (log2_pages < vec_len (h->freelists));
481 if (log2_pages >= vec_len (h->freelists) || h->freelists[log2_pages] == 0)
483 vec_validate_init_empty (h->freelists, log2_pages, 0);
484 rv = BV (alloc_aligned) (h, (sizeof (*rv) * (1 << log2_pages)));
487 rv = BV (clib_bihash_get_value) (h, (uword) h->freelists[log2_pages]);
488 h->freelists[log2_pages] = rv->next_free_as_u64;
493 BVT (clib_bihash_kv) * v;
494 v = (BVT (clib_bihash_kv) *) rv;
496 for (i = 0; i < BIHASH_KVP_PER_PAGE * (1 << log2_pages); i++)
498 BV (clib_bihash_mark_free) (v);
505 BV (value_free) (BVT (clib_bihash) * h, BVT (clib_bihash_value) * v,
508 ASSERT (h->alloc_lock[0]);
510 ASSERT (vec_len (h->freelists) > log2_pages);
512 if (BIHASH_USE_HEAP && log2_pages >= BIIHASH_MIN_ALLOC_LOG2_PAGES)
514 /* allocations bigger or equal to chunk size always contain single
515 * alloc and they can be given back to heap */
517 BVT (clib_bihash_alloc_chunk) * c;
518 c = (BVT (clib_bihash_alloc_chunk) *) v - 1;
521 c->prev->next = c->next;
526 c->next->prev = c->prev;
528 oldheap = clib_mem_set_heap (h->heap);
530 clib_mem_set_heap (oldheap);
535 clib_memset_u8 (v, 0xFE, sizeof (*v) * (1 << log2_pages));
537 v->next_free_as_u64 = (u64) h->freelists[log2_pages];
538 h->freelists[log2_pages] = (u64) BV (clib_bihash_get_offset) (h, v);
542 BV (make_working_copy) (BVT (clib_bihash) * h, BVT (clib_bihash_bucket) * b)
544 BVT (clib_bihash_value) * v;
545 BVT (clib_bihash_bucket) working_bucket __attribute__ ((aligned (8)));
546 BVT (clib_bihash_value) * working_copy;
547 u32 thread_index = os_get_thread_index ();
548 int log2_working_copy_length;
550 ASSERT (h->alloc_lock[0]);
552 if (thread_index >= vec_len (h->working_copies))
554 vec_validate (h->working_copies, thread_index);
555 vec_validate_init_empty (h->working_copy_lengths, thread_index, ~0);
559 * working_copies are per-cpu so that near-simultaneous
560 * updates from multiple threads will not result in sporadic, spurious
563 working_copy = h->working_copies[thread_index];
564 log2_working_copy_length = h->working_copy_lengths[thread_index];
566 h->saved_bucket.as_u64 = b->as_u64;
568 if (b->log2_pages > log2_working_copy_length)
571 * It's not worth the bookkeeping to free working copies
573 * clib_mem_free (working_copy);
575 working_copy = BV (alloc_aligned)
576 (h, sizeof (working_copy[0]) * (1 << b->log2_pages));
577 h->working_copy_lengths[thread_index] = b->log2_pages;
578 h->working_copies[thread_index] = working_copy;
580 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_working_copy_lost,
581 1ULL << b->log2_pages);
584 v = BV (clib_bihash_get_value) (h, b->offset);
586 clib_memcpy_fast (working_copy, v, sizeof (*v) * (1 << b->log2_pages));
587 working_bucket.as_u64 = b->as_u64;
588 working_bucket.offset = BV (clib_bihash_get_offset) (h, working_copy);
589 CLIB_MEMORY_STORE_BARRIER ();
590 b->as_u64 = working_bucket.as_u64;
591 h->working_copies[thread_index] = working_copy;
595 BVT (clib_bihash_value) *
596 BV (split_and_rehash)
597 (BVT (clib_bihash) * h,
598 BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
601 BVT (clib_bihash_value) * new_values, *new_v;
602 int i, j, length_in_kvs;
604 ASSERT (h->alloc_lock[0]);
606 new_values = BV (value_alloc) (h, new_log2_pages);
607 length_in_kvs = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
609 for (i = 0; i < length_in_kvs; i++)
613 /* Entry not in use? Forget it */
614 if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
617 /* rehash the item onto its new home-page */
618 new_hash = BV (clib_bihash_hash) (&(old_values->kvp[i]));
619 new_hash = extract_bits (new_hash, h->log2_nbuckets, new_log2_pages);
620 new_v = &new_values[new_hash];
622 /* Across the new home-page */
623 for (j = 0; j < BIHASH_KVP_PER_PAGE; j++)
626 if (BV (clib_bihash_is_free) (&(new_v->kvp[j])))
628 clib_memcpy_fast (&(new_v->kvp[j]), &(old_values->kvp[i]),
629 sizeof (new_v->kvp[j]));
633 /* Crap. Tell caller to try again */
634 BV (value_free) (h, new_values, new_log2_pages);
643 BVT (clib_bihash_value) *
644 BV (split_and_rehash_linear)
645 (BVT (clib_bihash) * h,
646 BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
649 BVT (clib_bihash_value) * new_values;
650 int i, j, new_length, old_length;
652 ASSERT (h->alloc_lock[0]);
654 new_values = BV (value_alloc) (h, new_log2_pages);
655 new_length = (1 << new_log2_pages) * BIHASH_KVP_PER_PAGE;
656 old_length = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
659 /* Across the old value array */
660 for (i = 0; i < old_length; i++)
662 /* Find a free slot in the new linear scan bucket */
663 for (; j < new_length; j++)
665 /* Old value not in use? Forget it. */
666 if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
669 /* New value should never be in use */
670 if (BV (clib_bihash_is_free) (&(new_values->kvp[j])))
672 /* Copy the old value and move along */
673 clib_memcpy_fast (&(new_values->kvp[j]), &(old_values->kvp[i]),
674 sizeof (new_values->kvp[j]));
679 /* This should never happen... */
680 clib_warning ("BUG: linear rehash failed!");
681 BV (value_free) (h, new_values, new_log2_pages);
689 static_always_inline int BV (clib_bihash_add_del_inline_with_hash) (
690 BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, u64 hash, int is_add,
691 int (*is_stale_cb) (BVT (clib_bihash_kv) *, void *), void *is_stale_arg,
692 void (*overwrite_cb) (BVT (clib_bihash_kv) *, void *), void *overwrite_arg)
694 BVT (clib_bihash_bucket) * b, tmp_b;
695 BVT (clib_bihash_value) * v, *new_v, *save_new_v, *working_copy;
698 u32 new_log2_pages, old_log2_pages;
699 u32 thread_index = os_get_thread_index ();
700 int mark_bucket_linear;
703 static const BVT (clib_bihash_bucket) mask = {
708 #if BIHASH_LAZY_INSTANTIATE
710 * Create the table (is_add=1,2), or flunk the request now (is_add=0)
711 * Use the alloc_lock to protect the instantiate operation.
713 if (PREDICT_FALSE (h->instantiated == 0))
718 BV (clib_bihash_alloc_lock) (h);
719 if (h->instantiated == 0)
720 BV (clib_bihash_instantiate) (h);
721 BV (clib_bihash_alloc_unlock) (h);
724 /* Debug image: make sure the table has been instantiated */
725 ASSERT (h->instantiated != 0);
729 * Debug image: make sure that an item being added doesn't accidentally
730 * look like a free item.
732 ASSERT ((is_add && BV (clib_bihash_is_free) (add_v)) == 0);
734 b = BV (clib_bihash_get_bucket) (h, hash);
736 BV (clib_bihash_lock_bucket) (b);
738 /* First elt in the bucket? */
739 if (BIHASH_KVP_AT_BUCKET_LEVEL == 0 && BV (clib_bihash_bucket_is_empty) (b))
743 BV (clib_bihash_unlock_bucket) (b);
747 BV (clib_bihash_alloc_lock) (h);
748 v = BV (value_alloc) (h, 0);
749 BV (clib_bihash_alloc_unlock) (h);
752 tmp_b.as_u64 = 0; /* clears bucket lock */
753 tmp_b.offset = BV (clib_bihash_get_offset) (h, v);
755 CLIB_MEMORY_STORE_BARRIER ();
757 b->as_u64 = tmp_b.as_u64; /* unlocks the bucket */
758 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_alloc_add, 1);
763 /* WARNING: we're still looking at the live copy... */
764 limit = BIHASH_KVP_PER_PAGE;
765 v = BV (clib_bihash_get_value) (h, b->offset);
767 if (PREDICT_FALSE (b->as_u64 & mask.as_u64))
769 if (PREDICT_FALSE (b->linear_search))
770 limit <<= b->log2_pages;
772 v += extract_bits (hash, h->log2_nbuckets, b->log2_pages);
778 * Because reader threads are looking at live data,
779 * we have to be extra careful. Readers do NOT hold the
780 * bucket lock. We need to be SLOWER than a search, past the
781 * point where readers CHECK the bucket lock.
785 * For obvious (in hindsight) reasons, see if we're supposed to
786 * replace an existing key, then look for an empty slot.
788 for (i = 0; i < limit; i++)
790 if (BV (clib_bihash_is_free) (&(v->kvp[i])))
792 if (BV (clib_bihash_key_compare) (v->kvp[i].key, add_v->key))
794 /* Add but do not overwrite? */
797 BV (clib_bihash_unlock_bucket) (b);
801 overwrite_cb (&(v->kvp[i]), overwrite_arg);
802 clib_memcpy_fast (&(v->kvp[i].value),
803 &add_v->value, sizeof (add_v->value));
804 BV (clib_bihash_unlock_bucket) (b);
805 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_replace, 1);
810 * Look for an empty slot. If found, use it
812 for (i = 0; i < limit; i++)
814 if (BV (clib_bihash_is_free) (&(v->kvp[i])))
817 * Copy the value first, so that if a reader manages
818 * to match the new key, the value will be right...
820 clib_memcpy_fast (&(v->kvp[i].value),
821 &add_v->value, sizeof (add_v->value));
822 CLIB_MEMORY_STORE_BARRIER (); /* Make sure the value has settled */
823 clib_memcpy_fast (&(v->kvp[i]), &add_v->key,
824 sizeof (add_v->key));
826 ASSERT (b->refcnt > 0);
827 BV (clib_bihash_unlock_bucket) (b);
828 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_add, 1);
832 /* look for stale data to overwrite */
835 for (i = 0; i < limit; i++)
837 if (is_stale_cb (&(v->kvp[i]), is_stale_arg))
839 clib_memcpy_fast (&(v->kvp[i]), add_v, sizeof (*add_v));
840 CLIB_MEMORY_STORE_BARRIER ();
841 BV (clib_bihash_unlock_bucket) (b);
842 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_replace, 1);
847 /* Out of space in this bucket, split the bucket... */
849 else /* delete case */
851 for (i = 0; i < limit; i++)
853 /* no sense even looking at this one */
854 if (BV (clib_bihash_is_free) (&(v->kvp[i])))
856 /* Found the key? Kill it... */
857 if (BV (clib_bihash_key_compare) (v->kvp[i].key, add_v->key))
859 BV (clib_bihash_mark_free) (&(v->kvp[i]));
860 /* Is the bucket empty? */
861 if (PREDICT_TRUE (b->refcnt > 1))
864 /* Switch back to the bucket-level kvp array? */
865 if (BIHASH_KVP_AT_BUCKET_LEVEL && b->refcnt == 1
866 && b->log2_pages > 0)
868 tmp_b.as_u64 = b->as_u64;
869 b->offset = BV (clib_bihash_get_offset)
870 (h, (void *) (b + 1));
871 b->linear_search = 0;
873 /* Clean up the bucket-level kvp array */
874 BVT (clib_bihash_kv) *v = (void *) (b + 1);
876 for (j = 0; j < BIHASH_KVP_PER_PAGE; j++)
878 BV (clib_bihash_mark_free) (v);
881 CLIB_MEMORY_STORE_BARRIER ();
882 BV (clib_bihash_unlock_bucket) (b);
883 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del, 1);
884 goto free_backing_store;
887 CLIB_MEMORY_STORE_BARRIER ();
888 BV (clib_bihash_unlock_bucket) (b);
889 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del, 1);
892 else /* yes, free it */
894 /* Save old bucket value, need log2_pages to free it */
895 tmp_b.as_u64 = b->as_u64;
897 /* Kill and unlock the bucket */
901 /* And free the backing storage */
902 BV (clib_bihash_alloc_lock) (h);
903 /* Note: v currently points into the middle of the bucket */
904 v = BV (clib_bihash_get_value) (h, tmp_b.offset);
905 BV (value_free) (h, v, tmp_b.log2_pages);
906 BV (clib_bihash_alloc_unlock) (h);
907 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_del_free,
914 BV (clib_bihash_unlock_bucket) (b);
918 /* Move readers to a (locked) temp copy of the bucket */
919 BV (clib_bihash_alloc_lock) (h);
920 BV (make_working_copy) (h, b);
922 v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
924 old_log2_pages = h->saved_bucket.log2_pages;
925 new_log2_pages = old_log2_pages + 1;
926 mark_bucket_linear = 0;
927 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_split_add, 1);
928 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, old_log2_pages);
930 working_copy = h->working_copies[thread_index];
932 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits, 1);
934 new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
941 /* Try re-splitting. If that fails, fall back to linear search */
942 new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
948 /* pinned collisions, use linear search */
950 BV (split_and_rehash_linear) (h, working_copy, old_log2_pages,
952 mark_bucket_linear = 1;
953 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_linear, 1);
955 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_resplit, 1);
956 BV (clib_bihash_increment_stat) (h, BIHASH_STAT_splits,
960 /* Try to add the new entry */
962 new_hash = BV (clib_bihash_hash) (add_v);
963 limit = BIHASH_KVP_PER_PAGE;
964 if (mark_bucket_linear)
965 limit <<= new_log2_pages;
967 new_v += extract_bits (new_hash, h->log2_nbuckets, new_log2_pages);
969 for (i = 0; i < limit; i++)
971 if (BV (clib_bihash_is_free) (&(new_v->kvp[i])))
973 clib_memcpy_fast (&(new_v->kvp[i]), add_v, sizeof (*add_v));
978 /* Crap. Try again */
979 BV (value_free) (h, save_new_v, new_log2_pages);
981 * If we've already doubled the size of the bucket once,
982 * fall back to linear search now.
990 tmp_b.log2_pages = new_log2_pages;
991 tmp_b.offset = BV (clib_bihash_get_offset) (h, save_new_v);
992 tmp_b.linear_search = mark_bucket_linear;
993 #if BIHASH_KVP_AT_BUCKET_LEVEL
994 /* Compensate for permanent refcount bump at the bucket level */
995 if (new_log2_pages > 0)
997 tmp_b.refcnt = h->saved_bucket.refcnt + 1;
998 ASSERT (tmp_b.refcnt > 0);
1000 CLIB_MEMORY_STORE_BARRIER ();
1001 b->as_u64 = tmp_b.as_u64;
1003 #if BIHASH_KVP_AT_BUCKET_LEVEL
1004 if (h->saved_bucket.log2_pages > 0)
1008 /* free the old bucket, except at the bucket level if so configured */
1009 v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
1010 BV (value_free) (h, v, h->saved_bucket.log2_pages);
1012 #if BIHASH_KVP_AT_BUCKET_LEVEL
1017 BV (clib_bihash_alloc_unlock) (h);
1021 static_always_inline int BV (clib_bihash_add_del_inline)
1022 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add,
1023 int (*is_stale_cb) (BVT (clib_bihash_kv) *, void *), void *arg)
1025 u64 hash = BV (clib_bihash_hash) (add_v);
1026 return BV (clib_bihash_add_del_inline_with_hash) (h, add_v, hash, is_add,
1027 is_stale_cb, arg, 0, 0);
1030 int BV (clib_bihash_add_del_with_hash) (BVT (clib_bihash) * h,
1031 BVT (clib_bihash_kv) * add_v, u64 hash,
1034 return BV (clib_bihash_add_del_inline_with_hash) (h, add_v, hash, is_add, 0,
1038 int BV (clib_bihash_add_del)
1039 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add)
1041 return BV (clib_bihash_add_del_inline) (h, add_v, is_add, 0, 0);
1044 int BV (clib_bihash_add_or_overwrite_stale)
1045 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v,
1046 int (*stale_callback) (BVT (clib_bihash_kv) *, void *), void *arg)
1048 return BV (clib_bihash_add_del_inline) (h, add_v, 1, stale_callback, arg);
1051 int BV (clib_bihash_add_with_overwrite_cb) (
1052 BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v,
1053 void (overwrite_cb) (BVT (clib_bihash_kv) *, void *), void *arg)
1055 u64 hash = BV (clib_bihash_hash) (add_v);
1056 return BV (clib_bihash_add_del_inline_with_hash) (h, add_v, hash, 1, 0, 0,
1060 int BV (clib_bihash_search)
1061 (BVT (clib_bihash) * h,
1062 BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep)
1064 return BV (clib_bihash_search_inline_2) (h, search_key, valuep);
1067 u8 *BV (format_bihash) (u8 * s, va_list * args)
1069 BVT (clib_bihash) * h = va_arg (*args, BVT (clib_bihash) *);
1070 int verbose = va_arg (*args, int);
1071 BVT (clib_bihash_bucket) * b;
1072 BVT (clib_bihash_value) * v;
1074 u64 active_elements = 0;
1075 u64 active_buckets = 0;
1076 u64 linear_buckets = 0;
1078 s = format (s, "Hash table '%s'\n", h->name ? h->name : (u8 *) "(unnamed)");
1080 #if BIHASH_LAZY_INSTANTIATE
1081 if (PREDICT_FALSE (h->instantiated == 0))
1082 return format (s, " empty, uninitialized");
1085 for (i = 0; i < h->nbuckets; i++)
1087 b = BV (clib_bihash_get_bucket) (h, i);
1088 if (BV (clib_bihash_bucket_is_empty) (b))
1091 s = format (s, "[%d]: empty\n", i);
1097 if (b->linear_search)
1103 (s, "[%d]: heap offset %lld, len %d, refcnt %d, linear %d\n", i,
1104 b->offset, (1 << b->log2_pages), b->refcnt, b->linear_search);
1107 v = BV (clib_bihash_get_value) (h, b->offset);
1108 for (j = 0; j < (1 << b->log2_pages); j++)
1110 for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
1112 if (BV (clib_bihash_is_free) (&v->kvp[k]))
1115 s = format (s, " %d: empty\n",
1116 j * BIHASH_KVP_PER_PAGE + k);
1123 s = format (s, " %d: %U\n",
1124 j * BIHASH_KVP_PER_PAGE + k,
1125 h->kvp_fmt_fn, &(v->kvp[k]), verbose);
1129 s = format (s, " %d: %U\n",
1130 j * BIHASH_KVP_PER_PAGE + k,
1131 BV (format_bihash_kvp), &(v->kvp[k]));
1140 s = format (s, " %lld active elements %lld active buckets\n",
1141 active_elements, active_buckets);
1142 s = format (s, " %d free lists\n", vec_len (h->freelists));
1144 for (i = 0; i < vec_len (h->freelists); i++)
1147 BVT (clib_bihash_value) * free_elt;
1148 u64 free_elt_as_u64 = h->freelists[i];
1150 while (free_elt_as_u64)
1152 free_elt = BV (clib_bihash_get_value) (h, free_elt_as_u64);
1154 free_elt_as_u64 = free_elt->next_free_as_u64;
1157 if (nfree || verbose)
1158 s = format (s, " [len %d] %u free elts\n", 1 << i, nfree);
1161 s = format (s, " %lld linear search buckets\n", linear_buckets);
1162 if (BIHASH_USE_HEAP)
1164 BVT (clib_bihash_alloc_chunk) * c = h->chunks;
1165 uword bytes_left = 0, total_size = 0, n_chunks = 0;
1169 bytes_left += c->bytes_left;
1170 total_size += c->size;
1175 " heap: %u chunk(s) allocated\n"
1176 " bytes: used %U, scrap %U\n", n_chunks,
1177 format_memory_size, total_size,
1178 format_memory_size, bytes_left);
1182 u64 used_bytes = alloc_arena_next (h);
1184 " arena: base %llx, next %llx\n"
1185 " used %lld b (%lld Mbytes) of %lld b (%lld Mbytes)\n",
1186 alloc_arena (h), alloc_arena_next (h),
1187 used_bytes, used_bytes >> 20,
1188 alloc_arena_size (h), alloc_arena_size (h) >> 20);
1193 void BV (clib_bihash_foreach_key_value_pair)
1194 (BVT (clib_bihash) * h,
1195 BV (clib_bihash_foreach_key_value_pair_cb) cb, void *arg)
1198 BVT (clib_bihash_bucket) * b;
1199 BVT (clib_bihash_value) * v;
1202 #if BIHASH_LAZY_INSTANTIATE
1203 if (PREDICT_FALSE (h->instantiated == 0))
1207 for (i = 0; i < h->nbuckets; i++)
1209 b = BV (clib_bihash_get_bucket) (h, i);
1210 if (BV (clib_bihash_bucket_is_empty) (b))
1213 v = BV (clib_bihash_get_value) (h, b->offset);
1214 for (j = 0; j < (1 << b->log2_pages); j++)
1216 for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
1218 if (BV (clib_bihash_is_free) (&v->kvp[k]))
1221 if (BIHASH_WALK_STOP == cb (&v->kvp[k], arg))
1224 * In case the callback deletes the last entry in the bucket...
1226 if (BV (clib_bihash_bucket_is_empty) (b))
1239 * fd.io coding-style-patch-verification: ON
1242 * eval: (c-set-style "gnu")