2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 /** @cond DOCUMENTATION_IS_IN_BIHASH_DOC_H */
18 void BV (clib_bihash_init)
19 (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size)
24 nbuckets = 1 << (max_log2 (nbuckets));
26 h->name = (u8 *) name;
27 h->nbuckets = nbuckets;
28 h->log2_nbuckets = max_log2 (nbuckets);
32 h->mheap = mheap_alloc (0 /* use VM */ , memory_size);
34 oldheap = clib_mem_set_heap (h->mheap);
35 vec_validate_aligned (h->buckets, nbuckets - 1, CLIB_CACHE_LINE_BYTES);
36 h->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
37 CLIB_CACHE_LINE_BYTES);
39 for (i = 0; i < nbuckets; i++)
40 BV (clib_bihash_reset_cache) (h->buckets + i);
42 clib_mem_set_heap (oldheap);
45 void BV (clib_bihash_free) (BVT (clib_bihash) * h)
47 mheap_free (h->mheap);
48 memset (h, 0, sizeof (*h));
52 BVT (clib_bihash_value) *
53 BV (value_alloc) (BVT (clib_bihash) * h, u32 log2_pages)
55 BVT (clib_bihash_value) * rv = 0;
58 ASSERT (h->writer_lock[0]);
59 if (log2_pages >= vec_len (h->freelists) || h->freelists[log2_pages] == 0)
61 oldheap = clib_mem_set_heap (h->mheap);
63 vec_validate (h->freelists, log2_pages);
64 rv = clib_mem_alloc_aligned ((sizeof (*rv) * (1 << log2_pages)),
65 CLIB_CACHE_LINE_BYTES);
66 clib_mem_set_heap (oldheap);
69 rv = h->freelists[log2_pages];
70 h->freelists[log2_pages] = rv->next_free;
75 * Latest gcc complains that the length arg is zero
76 * if we replace (1<<log2_pages) with vec_len(rv).
79 memset (rv, 0xff, sizeof (*rv) * (1 << log2_pages));
84 BV (value_free) (BVT (clib_bihash) * h, BVT (clib_bihash_value) * v,
87 ASSERT (h->writer_lock[0]);
89 ASSERT (vec_len (h->freelists) > log2_pages);
91 v->next_free = h->freelists[log2_pages];
92 h->freelists[log2_pages] = v;
96 BV (make_working_copy) (BVT (clib_bihash) * h, BVT (clib_bihash_bucket) * b)
98 BVT (clib_bihash_value) * v;
99 BVT (clib_bihash_bucket) working_bucket __attribute__ ((aligned (8)));
101 BVT (clib_bihash_value) * working_copy;
102 u32 thread_index = os_get_thread_index ();
103 int log2_working_copy_length;
105 if (thread_index >= vec_len (h->working_copies))
107 oldheap = clib_mem_set_heap (h->mheap);
108 vec_validate (h->working_copies, thread_index);
109 vec_validate_init_empty (h->working_copy_lengths, thread_index, ~0);
110 clib_mem_set_heap (oldheap);
114 * working_copies are per-cpu so that near-simultaneous
115 * updates from multiple threads will not result in sporadic, spurious
118 working_copy = h->working_copies[thread_index];
119 log2_working_copy_length = h->working_copy_lengths[thread_index];
121 h->saved_bucket.as_u64 = b->as_u64;
122 oldheap = clib_mem_set_heap (h->mheap);
124 if (b->log2_pages > log2_working_copy_length)
127 clib_mem_free (working_copy);
129 working_copy = clib_mem_alloc_aligned
130 (sizeof (working_copy[0]) * (1 << b->log2_pages),
131 CLIB_CACHE_LINE_BYTES);
132 h->working_copy_lengths[thread_index] = b->log2_pages;
133 h->working_copies[thread_index] = working_copy;
136 clib_mem_set_heap (oldheap);
138 /* Lock the bucket... */
139 while (BV (clib_bihash_lock_bucket) (b) == 0)
142 v = BV (clib_bihash_get_value) (h, b->offset);
144 clib_memcpy (working_copy, v, sizeof (*v) * (1 << b->log2_pages));
145 working_bucket.as_u64 = b->as_u64;
146 working_bucket.offset = BV (clib_bihash_get_offset) (h, working_copy);
147 CLIB_MEMORY_BARRIER ();
148 b->as_u64 = working_bucket.as_u64;
149 h->working_copies[thread_index] = working_copy;
153 BVT (clib_bihash_value) *
154 BV (split_and_rehash)
155 (BVT (clib_bihash) * h,
156 BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
159 BVT (clib_bihash_value) * new_values, *new_v;
160 int i, j, length_in_kvs;
162 new_values = BV (value_alloc) (h, new_log2_pages);
163 length_in_kvs = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
165 for (i = 0; i < length_in_kvs; i++)
169 /* Entry not in use? Forget it */
170 if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
173 /* rehash the item onto its new home-page */
174 new_hash = BV (clib_bihash_hash) (&(old_values->kvp[i]));
175 new_hash >>= h->log2_nbuckets;
176 new_hash &= (1 << new_log2_pages) - 1;
177 new_v = &new_values[new_hash];
179 /* Across the new home-page */
180 for (j = 0; j < BIHASH_KVP_PER_PAGE; j++)
183 if (BV (clib_bihash_is_free) (&(new_v->kvp[j])))
185 clib_memcpy (&(new_v->kvp[j]), &(old_values->kvp[i]),
186 sizeof (new_v->kvp[j]));
190 /* Crap. Tell caller to try again */
191 BV (value_free) (h, new_values, new_log2_pages);
200 BVT (clib_bihash_value) *
201 BV (split_and_rehash_linear)
202 (BVT (clib_bihash) * h,
203 BVT (clib_bihash_value) * old_values, u32 old_log2_pages,
206 BVT (clib_bihash_value) * new_values;
207 int i, j, new_length, old_length;
209 new_values = BV (value_alloc) (h, new_log2_pages);
210 new_length = (1 << new_log2_pages) * BIHASH_KVP_PER_PAGE;
211 old_length = (1 << old_log2_pages) * BIHASH_KVP_PER_PAGE;
214 /* Across the old value array */
215 for (i = 0; i < old_length; i++)
217 /* Find a free slot in the new linear scan bucket */
218 for (; j < new_length; j++)
220 /* Old value not in use? Forget it. */
221 if (BV (clib_bihash_is_free) (&(old_values->kvp[i])))
224 /* New value should never be in use */
225 if (BV (clib_bihash_is_free) (&(new_values->kvp[j])))
227 /* Copy the old value and move along */
228 clib_memcpy (&(new_values->kvp[j]), &(old_values->kvp[i]),
229 sizeof (new_values->kvp[j]));
234 /* This should never happen... */
235 clib_warning ("BUG: linear rehash failed!");
236 BV (value_free) (h, new_values, new_log2_pages);
244 int BV (clib_bihash_add_del)
245 (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add)
248 BVT (clib_bihash_bucket) * b, tmp_b;
249 BVT (clib_bihash_value) * v, *new_v, *save_new_v, *working_copy;
253 u32 new_log2_pages, old_log2_pages;
254 u32 thread_index = os_get_thread_index ();
255 int mark_bucket_linear;
258 hash = BV (clib_bihash_hash) (add_v);
260 bucket_index = hash & (h->nbuckets - 1);
261 b = &h->buckets[bucket_index];
263 hash >>= h->log2_nbuckets;
265 tmp_b.linear_search = 0;
267 while (__sync_lock_test_and_set (h->writer_lock, 1))
270 /* First elt in the bucket? */
279 v = BV (value_alloc) (h, 0);
283 tmp_b.offset = BV (clib_bihash_get_offset) (h, v);
285 b->as_u64 = tmp_b.as_u64;
289 /* Note: this leaves the cache disabled */
290 BV (make_working_copy) (h, b);
292 v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
294 limit = BIHASH_KVP_PER_PAGE;
295 v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
296 if (b->linear_search)
297 limit <<= b->log2_pages;
302 * For obvious (in hindsight) reasons, see if we're supposed to
303 * replace an existing key, then look for an empty slot.
305 for (i = 0; i < limit; i++)
307 if (!memcmp (&(v->kvp[i]), &add_v->key, sizeof (add_v->key)))
309 clib_memcpy (&(v->kvp[i]), add_v, sizeof (*add_v));
310 CLIB_MEMORY_BARRIER ();
311 /* Restore the previous (k,v) pairs */
312 b->as_u64 = h->saved_bucket.as_u64;
316 for (i = 0; i < limit; i++)
318 if (BV (clib_bihash_is_free) (&(v->kvp[i])))
320 clib_memcpy (&(v->kvp[i]), add_v, sizeof (*add_v));
321 CLIB_MEMORY_BARRIER ();
322 b->as_u64 = h->saved_bucket.as_u64;
326 /* no room at the inn... split case... */
330 for (i = 0; i < limit; i++)
332 if (!memcmp (&(v->kvp[i]), &add_v->key, sizeof (add_v->key)))
334 memset (&(v->kvp[i]), 0xff, sizeof (*(add_v)));
335 CLIB_MEMORY_BARRIER ();
336 b->as_u64 = h->saved_bucket.as_u64;
341 b->as_u64 = h->saved_bucket.as_u64;
345 old_log2_pages = h->saved_bucket.log2_pages;
346 new_log2_pages = old_log2_pages + 1;
347 mark_bucket_linear = 0;
349 working_copy = h->working_copies[thread_index];
352 new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
359 /* Try re-splitting. If that fails, fall back to linear search */
360 new_v = BV (split_and_rehash) (h, working_copy, old_log2_pages,
366 /* pinned collisions, use linear search */
368 BV (split_and_rehash_linear) (h, working_copy, old_log2_pages,
370 mark_bucket_linear = 1;
374 /* Try to add the new entry */
376 new_hash = BV (clib_bihash_hash) (add_v);
377 limit = BIHASH_KVP_PER_PAGE;
378 if (mark_bucket_linear)
379 limit <<= new_log2_pages;
380 new_hash >>= h->log2_nbuckets;
381 new_hash &= (1 << new_log2_pages) - 1;
382 new_v += mark_bucket_linear ? 0 : new_hash;
384 for (i = 0; i < limit; i++)
386 if (BV (clib_bihash_is_free) (&(new_v->kvp[i])))
388 clib_memcpy (&(new_v->kvp[i]), add_v, sizeof (*add_v));
393 /* Crap. Try again */
394 BV (value_free) (h, save_new_v, new_log2_pages);
396 * If we've already doubled the size of the bucket once,
397 * fall back to linear search now.
405 /* Keep track of the number of linear-scan buckets */
406 if (tmp_b.linear_search ^ mark_bucket_linear)
407 h->linear_buckets += (mark_bucket_linear == 1) ? 1 : -1;
409 tmp_b.log2_pages = new_log2_pages;
410 tmp_b.offset = BV (clib_bihash_get_offset) (h, save_new_v);
411 tmp_b.linear_search = mark_bucket_linear;
413 CLIB_MEMORY_BARRIER ();
414 b->as_u64 = tmp_b.as_u64;
415 v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
416 BV (value_free) (h, v, old_log2_pages);
419 BV (clib_bihash_reset_cache) (b);
420 BV (clib_bihash_unlock_bucket) (b);
421 CLIB_MEMORY_BARRIER ();
422 h->writer_lock[0] = 0;
426 int BV (clib_bihash_search)
427 (BVT (clib_bihash) * h,
428 BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep)
432 BVT (clib_bihash_value) * v;
433 #if BIHASH_KVP_CACHE_SIZE > 0
434 BVT (clib_bihash_kv) * kvp;
436 BVT (clib_bihash_bucket) * b;
441 hash = BV (clib_bihash_hash) (search_key);
443 bucket_index = hash & (h->nbuckets - 1);
444 b = &h->buckets[bucket_index];
449 #if BIHASH_KVP_CACHE_SIZE > 0
450 /* Check the cache, if currently enabled */
451 if (PREDICT_TRUE ((b->cache_lru & (1 << 15)) == 0))
453 limit = BIHASH_KVP_CACHE_SIZE;
455 for (i = 0; i < limit; i++)
457 if (BV (clib_bihash_key_compare) (kvp[i].key, search_key->key))
467 hash >>= h->log2_nbuckets;
469 v = BV (clib_bihash_get_value) (h, b->offset);
470 limit = BIHASH_KVP_PER_PAGE;
471 v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0;
472 if (PREDICT_FALSE (b->linear_search))
473 limit <<= b->log2_pages;
475 for (i = 0; i < limit; i++)
477 if (BV (clib_bihash_key_compare) (v->kvp[i].key, search_key->key))
481 #if BIHASH_KVP_CACHE_SIZE > 0
483 /* Shut off the cache */
484 if (BV (clib_bihash_lock_bucket) (b))
486 cache_slot = BV (clib_bihash_get_lru) (b);
487 b->cache[cache_slot] = v->kvp[i];
488 BV (clib_bihash_update_lru) (b, cache_slot);
490 /* Reenable the cache */
491 BV (clib_bihash_unlock_bucket) (b);
501 u8 *BV (format_bihash_lru) (u8 * s, va_list * args)
503 #if BIHASH_KVP_SIZE > 0
505 BVT (clib_bihash_bucket) * b = va_arg (*args, BVT (clib_bihash_bucket) *);
506 u16 cache_lru = b->cache_lru;
508 s = format (s, "cache %s, order ", cache_lru & (1 << 15) ? "on" : "off");
510 for (i = 0; i < BIHASH_KVP_CACHE_SIZE; i++)
511 s = format (s, "[%d] ", ((cache_lru >> (3 * i)) & 7));
515 return format (s, "cache not configured");
520 BV (clib_bihash_update_lru_not_inline) (BVT (clib_bihash_bucket) * b, u8 slot)
522 #if BIHASH_KVP_SIZE > 0
523 BV (clib_bihash_update_lru) (b, slot);
527 u8 *BV (format_bihash) (u8 * s, va_list * args)
529 BVT (clib_bihash) * h = va_arg (*args, BVT (clib_bihash) *);
530 int verbose = va_arg (*args, int);
531 BVT (clib_bihash_bucket) * b;
532 BVT (clib_bihash_value) * v;
534 u64 active_elements = 0;
536 s = format (s, "Hash table %s\n", h->name ? h->name : (u8 *) "(unnamed)");
538 for (i = 0; i < h->nbuckets; i++)
544 s = format (s, "[%d]: empty\n", i);
550 s = format (s, "[%d]: heap offset %d, len %d, linear %d\n", i,
551 b->offset, (1 << b->log2_pages), b->linear_search);
554 v = BV (clib_bihash_get_value) (h, b->offset);
555 for (j = 0; j < (1 << b->log2_pages); j++)
557 for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
559 if (BV (clib_bihash_is_free) (&v->kvp[k]))
562 s = format (s, " %d: empty\n",
563 j * BIHASH_KVP_PER_PAGE + k);
568 s = format (s, " %d: %U\n",
569 j * BIHASH_KVP_PER_PAGE + k,
570 BV (format_bihash_kvp), &(v->kvp[k]));
578 s = format (s, " %lld active elements\n", active_elements);
579 s = format (s, " %d free lists\n", vec_len (h->freelists));
580 s = format (s, " %d linear search buckets\n", h->linear_buckets);
581 s = format (s, " %lld cache hits, %lld cache misses\n",
582 h->cache_hits, h->cache_misses);
586 void BV (clib_bihash_foreach_key_value_pair)
587 (BVT (clib_bihash) * h, void *callback, void *arg)
590 BVT (clib_bihash_bucket) * b;
591 BVT (clib_bihash_value) * v;
592 void (*fp) (BVT (clib_bihash_kv) *, void *) = callback;
594 for (i = 0; i < h->nbuckets; i++)
600 v = BV (clib_bihash_get_value) (h, b->offset);
601 for (j = 0; j < (1 << b->log2_pages); j++)
603 for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
605 if (BV (clib_bihash_is_free) (&v->kvp[k]))
608 (*fp) (&v->kvp[k], arg);
618 * fd.io coding-style-patch-verification: ON
621 * eval: (c-set-style "gnu")