New upstream version 18.11-rc3
[deb_dpdk.git] / lib / librte_hash / rte_cuckoo_hash.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  * Copyright(c) 2018 Arm Limited
4  */
5
6 #include <string.h>
7 #include <stdint.h>
8 #include <errno.h>
9 #include <stdio.h>
10 #include <stdarg.h>
11 #include <sys/queue.h>
12
13 #include <rte_common.h>
14 #include <rte_memory.h>         /* for definition of RTE_CACHE_LINE_SIZE */
15 #include <rte_log.h>
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
19 #include <rte_eal.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
27 #include <rte_ring.h>
28 #include <rte_compat.h>
29
30 #include "rte_hash.h"
31 #include "rte_cuckoo_hash.h"
32
33 #define FOR_EACH_BUCKET(CURRENT_BKT, START_BUCKET)                            \
34         for (CURRENT_BKT = START_BUCKET;                                      \
35                 CURRENT_BKT != NULL;                                          \
36                 CURRENT_BKT = CURRENT_BKT->next)
37
38 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
39
40 static struct rte_tailq_elem rte_hash_tailq = {
41         .name = "RTE_HASH",
42 };
43 EAL_REGISTER_TAILQ(rte_hash_tailq)
44
45 struct rte_hash *
46 rte_hash_find_existing(const char *name)
47 {
48         struct rte_hash *h = NULL;
49         struct rte_tailq_entry *te;
50         struct rte_hash_list *hash_list;
51
52         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
53
54         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
55         TAILQ_FOREACH(te, hash_list, next) {
56                 h = (struct rte_hash *) te->data;
57                 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
58                         break;
59         }
60         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
61
62         if (te == NULL) {
63                 rte_errno = ENOENT;
64                 return NULL;
65         }
66         return h;
67 }
68
69 static inline struct rte_hash_bucket *
70 rte_hash_get_last_bkt(struct rte_hash_bucket *lst_bkt)
71 {
72         while (lst_bkt->next != NULL)
73                 lst_bkt = lst_bkt->next;
74         return lst_bkt;
75 }
76
77 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
78 {
79         h->cmp_jump_table_idx = KEY_CUSTOM;
80         h->rte_hash_custom_cmp_eq = func;
81 }
82
83 static inline int
84 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
85 {
86         if (h->cmp_jump_table_idx == KEY_CUSTOM)
87                 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
88         else
89                 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
90 }
91
92 /*
93  * We use higher 16 bits of hash as the signature value stored in table.
94  * We use the lower bits for the primary bucket
95  * location. Then we XOR primary bucket location and the signature
96  * to get the secondary bucket location. This is same as
97  * proposed in Bin Fan, et al's paper
98  * "MemC3: Compact and Concurrent MemCache with Dumber Caching and
99  * Smarter Hashing". The benefit to use
100  * XOR is that one could derive the alternative bucket location
101  * by only using the current bucket location and the signature.
102  */
103 static inline uint16_t
104 get_short_sig(const hash_sig_t hash)
105 {
106         return hash >> 16;
107 }
108
109 static inline uint32_t
110 get_prim_bucket_index(const struct rte_hash *h, const hash_sig_t hash)
111 {
112         return hash & h->bucket_bitmask;
113 }
114
115 static inline uint32_t
116 get_alt_bucket_index(const struct rte_hash *h,
117                         uint32_t cur_bkt_idx, uint16_t sig)
118 {
119         return (cur_bkt_idx ^ sig) & h->bucket_bitmask;
120 }
121
122 struct rte_hash *
123 rte_hash_create(const struct rte_hash_parameters *params)
124 {
125         struct rte_hash *h = NULL;
126         struct rte_tailq_entry *te = NULL;
127         struct rte_hash_list *hash_list;
128         struct rte_ring *r = NULL;
129         struct rte_ring *r_ext = NULL;
130         char hash_name[RTE_HASH_NAMESIZE];
131         void *k = NULL;
132         void *buckets = NULL;
133         void *buckets_ext = NULL;
134         char ring_name[RTE_RING_NAMESIZE];
135         char ext_ring_name[RTE_RING_NAMESIZE];
136         unsigned num_key_slots;
137         unsigned i;
138         unsigned int hw_trans_mem_support = 0, use_local_cache = 0;
139         unsigned int ext_table_support = 0;
140         unsigned int readwrite_concur_support = 0;
141         unsigned int writer_takes_lock = 0;
142         unsigned int no_free_on_del = 0;
143         uint32_t *tbl_chng_cnt = NULL;
144         unsigned int readwrite_concur_lf_support = 0;
145
146         rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
147
148         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
149
150         if (params == NULL) {
151                 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
152                 return NULL;
153         }
154
155         /* Check for valid parameters */
156         if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
157                         (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
158                         (params->key_len == 0)) {
159                 rte_errno = EINVAL;
160                 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
161                 return NULL;
162         }
163
164         /* Validate correct usage of extra options */
165         if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) &&
166             (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF)) {
167                 rte_errno = EINVAL;
168                 RTE_LOG(ERR, HASH, "rte_hash_create: choose rw concurrency or "
169                         "rw concurrency lock free\n");
170                 return NULL;
171         }
172
173         if ((params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) &&
174             (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)) {
175                 rte_errno = EINVAL;
176                 RTE_LOG(ERR, HASH, "rte_hash_create: extendable bucket "
177                         "feature not supported with rw concurrency "
178                         "lock free\n");
179                 return NULL;
180         }
181
182         /* Check extra flags field to check extra options. */
183         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
184                 hw_trans_mem_support = 1;
185
186         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD) {
187                 use_local_cache = 1;
188                 writer_takes_lock = 1;
189         }
190
191         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
192                 readwrite_concur_support = 1;
193                 writer_takes_lock = 1;
194         }
195
196         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_EXT_TABLE)
197                 ext_table_support = 1;
198
199         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_NO_FREE_ON_DEL)
200                 no_free_on_del = 1;
201
202         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF) {
203                 readwrite_concur_lf_support = 1;
204                 /* Enable not freeing internal memory/index on delete */
205                 no_free_on_del = 1;
206         }
207
208         /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
209         if (use_local_cache)
210                 /*
211                  * Increase number of slots by total number of indices
212                  * that can be stored in the lcore caches
213                  * except for the first cache
214                  */
215                 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
216                                         (LCORE_CACHE_SIZE - 1) + 1;
217         else
218                 num_key_slots = params->entries + 1;
219
220         snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
221         /* Create ring (Dummy slot index is not enqueued) */
222         r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
223                         params->socket_id, 0);
224         if (r == NULL) {
225                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
226                 goto err;
227         }
228
229         const uint32_t num_buckets = rte_align32pow2(params->entries) /
230                                                 RTE_HASH_BUCKET_ENTRIES;
231
232         /* Create ring for extendable buckets. */
233         if (ext_table_support) {
234                 snprintf(ext_ring_name, sizeof(ext_ring_name), "HT_EXT_%s",
235                                                                 params->name);
236                 r_ext = rte_ring_create(ext_ring_name,
237                                 rte_align32pow2(num_buckets + 1),
238                                 params->socket_id, 0);
239
240                 if (r_ext == NULL) {
241                         RTE_LOG(ERR, HASH, "ext buckets memory allocation "
242                                                                 "failed\n");
243                         goto err;
244                 }
245         }
246
247         snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
248
249         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
250
251         /* guarantee there's no existing: this is normally already checked
252          * by ring creation above */
253         TAILQ_FOREACH(te, hash_list, next) {
254                 h = (struct rte_hash *) te->data;
255                 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
256                         break;
257         }
258         h = NULL;
259         if (te != NULL) {
260                 rte_errno = EEXIST;
261                 te = NULL;
262                 goto err_unlock;
263         }
264
265         te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
266         if (te == NULL) {
267                 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
268                 goto err_unlock;
269         }
270
271         h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
272                                         RTE_CACHE_LINE_SIZE, params->socket_id);
273
274         if (h == NULL) {
275                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
276                 goto err_unlock;
277         }
278
279         buckets = rte_zmalloc_socket(NULL,
280                                 num_buckets * sizeof(struct rte_hash_bucket),
281                                 RTE_CACHE_LINE_SIZE, params->socket_id);
282
283         if (buckets == NULL) {
284                 RTE_LOG(ERR, HASH, "buckets memory allocation failed\n");
285                 goto err_unlock;
286         }
287
288         /* Allocate same number of extendable buckets */
289         if (ext_table_support) {
290                 buckets_ext = rte_zmalloc_socket(NULL,
291                                 num_buckets * sizeof(struct rte_hash_bucket),
292                                 RTE_CACHE_LINE_SIZE, params->socket_id);
293                 if (buckets_ext == NULL) {
294                         RTE_LOG(ERR, HASH, "ext buckets memory allocation "
295                                                         "failed\n");
296                         goto err_unlock;
297                 }
298                 /* Populate ext bkt ring. We reserve 0 similar to the
299                  * key-data slot, just in case in future we want to
300                  * use bucket index for the linked list and 0 means NULL
301                  * for next bucket
302                  */
303                 for (i = 1; i <= num_buckets; i++)
304                         rte_ring_sp_enqueue(r_ext, (void *)((uintptr_t) i));
305         }
306
307         const uint32_t key_entry_size =
308                 RTE_ALIGN(sizeof(struct rte_hash_key) + params->key_len,
309                           KEY_ALIGNMENT);
310         const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
311
312         k = rte_zmalloc_socket(NULL, key_tbl_size,
313                         RTE_CACHE_LINE_SIZE, params->socket_id);
314
315         if (k == NULL) {
316                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
317                 goto err_unlock;
318         }
319
320         tbl_chng_cnt = rte_zmalloc_socket(NULL, sizeof(uint32_t),
321                         RTE_CACHE_LINE_SIZE, params->socket_id);
322
323         if (tbl_chng_cnt == NULL) {
324                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
325                 goto err_unlock;
326         }
327
328 /*
329  * If x86 architecture is used, select appropriate compare function,
330  * which may use x86 intrinsics, otherwise use memcmp
331  */
332 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
333         /* Select function to compare keys */
334         switch (params->key_len) {
335         case 16:
336                 h->cmp_jump_table_idx = KEY_16_BYTES;
337                 break;
338         case 32:
339                 h->cmp_jump_table_idx = KEY_32_BYTES;
340                 break;
341         case 48:
342                 h->cmp_jump_table_idx = KEY_48_BYTES;
343                 break;
344         case 64:
345                 h->cmp_jump_table_idx = KEY_64_BYTES;
346                 break;
347         case 80:
348                 h->cmp_jump_table_idx = KEY_80_BYTES;
349                 break;
350         case 96:
351                 h->cmp_jump_table_idx = KEY_96_BYTES;
352                 break;
353         case 112:
354                 h->cmp_jump_table_idx = KEY_112_BYTES;
355                 break;
356         case 128:
357                 h->cmp_jump_table_idx = KEY_128_BYTES;
358                 break;
359         default:
360                 /* If key is not multiple of 16, use generic memcmp */
361                 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
362         }
363 #else
364         h->cmp_jump_table_idx = KEY_OTHER_BYTES;
365 #endif
366
367         if (use_local_cache) {
368                 h->local_free_slots = rte_zmalloc_socket(NULL,
369                                 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
370                                 RTE_CACHE_LINE_SIZE, params->socket_id);
371         }
372
373         /* Default hash function */
374 #if defined(RTE_ARCH_X86)
375         default_hash_func = (rte_hash_function)rte_hash_crc;
376 #elif defined(RTE_ARCH_ARM64)
377         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
378                 default_hash_func = (rte_hash_function)rte_hash_crc;
379 #endif
380         /* Setup hash context */
381         snprintf(h->name, sizeof(h->name), "%s", params->name);
382         h->entries = params->entries;
383         h->key_len = params->key_len;
384         h->key_entry_size = key_entry_size;
385         h->hash_func_init_val = params->hash_func_init_val;
386
387         h->num_buckets = num_buckets;
388         h->bucket_bitmask = h->num_buckets - 1;
389         h->buckets = buckets;
390         h->buckets_ext = buckets_ext;
391         h->free_ext_bkts = r_ext;
392         h->hash_func = (params->hash_func == NULL) ?
393                 default_hash_func : params->hash_func;
394         h->key_store = k;
395         h->free_slots = r;
396         h->tbl_chng_cnt = tbl_chng_cnt;
397         *h->tbl_chng_cnt = 0;
398         h->hw_trans_mem_support = hw_trans_mem_support;
399         h->use_local_cache = use_local_cache;
400         h->readwrite_concur_support = readwrite_concur_support;
401         h->ext_table_support = ext_table_support;
402         h->writer_takes_lock = writer_takes_lock;
403         h->no_free_on_del = no_free_on_del;
404         h->readwrite_concur_lf_support = readwrite_concur_lf_support;
405
406 #if defined(RTE_ARCH_X86)
407         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
408                 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
409         else
410 #endif
411                 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
412
413         /* Writer threads need to take the lock when:
414          * 1) RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY is enabled OR
415          * 2) RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD is enabled
416          */
417         if (h->writer_takes_lock) {
418                 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
419                                                 RTE_CACHE_LINE_SIZE);
420                 if (h->readwrite_lock == NULL)
421                         goto err_unlock;
422
423                 rte_rwlock_init(h->readwrite_lock);
424         }
425
426         /* Populate free slots ring. Entry zero is reserved for key misses. */
427         for (i = 1; i < num_key_slots; i++)
428                 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
429
430         te->data = (void *) h;
431         TAILQ_INSERT_TAIL(hash_list, te, next);
432         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
433
434         return h;
435 err_unlock:
436         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
437 err:
438         rte_ring_free(r);
439         rte_ring_free(r_ext);
440         rte_free(te);
441         rte_free(h);
442         rte_free(buckets);
443         rte_free(buckets_ext);
444         rte_free(k);
445         rte_free(tbl_chng_cnt);
446         return NULL;
447 }
448
449 void
450 rte_hash_free(struct rte_hash *h)
451 {
452         struct rte_tailq_entry *te;
453         struct rte_hash_list *hash_list;
454
455         if (h == NULL)
456                 return;
457
458         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
459
460         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
461
462         /* find out tailq entry */
463         TAILQ_FOREACH(te, hash_list, next) {
464                 if (te->data == (void *) h)
465                         break;
466         }
467
468         if (te == NULL) {
469                 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
470                 return;
471         }
472
473         TAILQ_REMOVE(hash_list, te, next);
474
475         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
476
477         if (h->use_local_cache)
478                 rte_free(h->local_free_slots);
479         if (h->writer_takes_lock)
480                 rte_free(h->readwrite_lock);
481         rte_ring_free(h->free_slots);
482         rte_ring_free(h->free_ext_bkts);
483         rte_free(h->key_store);
484         rte_free(h->buckets);
485         rte_free(h->buckets_ext);
486         rte_free(h->tbl_chng_cnt);
487         rte_free(h);
488         rte_free(te);
489 }
490
491 hash_sig_t
492 rte_hash_hash(const struct rte_hash *h, const void *key)
493 {
494         /* calc hash result by key */
495         return h->hash_func(key, h->key_len, h->hash_func_init_val);
496 }
497
498 int32_t
499 rte_hash_count(const struct rte_hash *h)
500 {
501         uint32_t tot_ring_cnt, cached_cnt = 0;
502         uint32_t i, ret;
503
504         if (h == NULL)
505                 return -EINVAL;
506
507         if (h->use_local_cache) {
508                 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
509                                         (LCORE_CACHE_SIZE - 1);
510                 for (i = 0; i < RTE_MAX_LCORE; i++)
511                         cached_cnt += h->local_free_slots[i].len;
512
513                 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
514                                                                 cached_cnt;
515         } else {
516                 tot_ring_cnt = h->entries;
517                 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
518         }
519         return ret;
520 }
521
522 /* Read write locks implemented using rte_rwlock */
523 static inline void
524 __hash_rw_writer_lock(const struct rte_hash *h)
525 {
526         if (h->writer_takes_lock && h->hw_trans_mem_support)
527                 rte_rwlock_write_lock_tm(h->readwrite_lock);
528         else if (h->writer_takes_lock)
529                 rte_rwlock_write_lock(h->readwrite_lock);
530 }
531
532 static inline void
533 __hash_rw_reader_lock(const struct rte_hash *h)
534 {
535         if (h->readwrite_concur_support && h->hw_trans_mem_support)
536                 rte_rwlock_read_lock_tm(h->readwrite_lock);
537         else if (h->readwrite_concur_support)
538                 rte_rwlock_read_lock(h->readwrite_lock);
539 }
540
541 static inline void
542 __hash_rw_writer_unlock(const struct rte_hash *h)
543 {
544         if (h->writer_takes_lock && h->hw_trans_mem_support)
545                 rte_rwlock_write_unlock_tm(h->readwrite_lock);
546         else if (h->writer_takes_lock)
547                 rte_rwlock_write_unlock(h->readwrite_lock);
548 }
549
550 static inline void
551 __hash_rw_reader_unlock(const struct rte_hash *h)
552 {
553         if (h->readwrite_concur_support && h->hw_trans_mem_support)
554                 rte_rwlock_read_unlock_tm(h->readwrite_lock);
555         else if (h->readwrite_concur_support)
556                 rte_rwlock_read_unlock(h->readwrite_lock);
557 }
558
559 void
560 rte_hash_reset(struct rte_hash *h)
561 {
562         void *ptr;
563         uint32_t tot_ring_cnt, i;
564
565         if (h == NULL)
566                 return;
567
568         __hash_rw_writer_lock(h);
569         memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
570         memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
571         *h->tbl_chng_cnt = 0;
572
573         /* clear the free ring */
574         while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
575                 continue;
576
577         /* clear free extendable bucket ring and memory */
578         if (h->ext_table_support) {
579                 memset(h->buckets_ext, 0, h->num_buckets *
580                                                 sizeof(struct rte_hash_bucket));
581                 while (rte_ring_dequeue(h->free_ext_bkts, &ptr) == 0)
582                         continue;
583         }
584
585         /* Repopulate the free slots ring. Entry zero is reserved for key misses */
586         if (h->use_local_cache)
587                 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
588                                         (LCORE_CACHE_SIZE - 1);
589         else
590                 tot_ring_cnt = h->entries;
591
592         for (i = 1; i < tot_ring_cnt + 1; i++)
593                 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
594
595         /* Repopulate the free ext bkt ring. */
596         if (h->ext_table_support) {
597                 for (i = 1; i <= h->num_buckets; i++)
598                         rte_ring_sp_enqueue(h->free_ext_bkts,
599                                                 (void *)((uintptr_t) i));
600         }
601
602         if (h->use_local_cache) {
603                 /* Reset local caches per lcore */
604                 for (i = 0; i < RTE_MAX_LCORE; i++)
605                         h->local_free_slots[i].len = 0;
606         }
607         __hash_rw_writer_unlock(h);
608 }
609
610 /*
611  * Function called to enqueue back an index in the cache/ring,
612  * as slot has not being used and it can be used in the
613  * next addition attempt.
614  */
615 static inline void
616 enqueue_slot_back(const struct rte_hash *h,
617                 struct lcore_cache *cached_free_slots,
618                 void *slot_id)
619 {
620         if (h->use_local_cache) {
621                 cached_free_slots->objs[cached_free_slots->len] = slot_id;
622                 cached_free_slots->len++;
623         } else
624                 rte_ring_sp_enqueue(h->free_slots, slot_id);
625 }
626
627 /* Search a key from bucket and update its data.
628  * Writer holds the lock before calling this.
629  */
630 static inline int32_t
631 search_and_update(const struct rte_hash *h, void *data, const void *key,
632         struct rte_hash_bucket *bkt, uint16_t sig)
633 {
634         int i;
635         struct rte_hash_key *k, *keys = h->key_store;
636
637         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
638                 if (bkt->sig_current[i] == sig) {
639                         k = (struct rte_hash_key *) ((char *)keys +
640                                         bkt->key_idx[i] * h->key_entry_size);
641                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
642                                 /* 'pdata' acts as the synchronization point
643                                  * when an existing hash entry is updated.
644                                  * Key is not updated in this case.
645                                  */
646                                 __atomic_store_n(&k->pdata,
647                                         data,
648                                         __ATOMIC_RELEASE);
649                                 /*
650                                  * Return index where key is stored,
651                                  * subtracting the first dummy index
652                                  */
653                                 return bkt->key_idx[i] - 1;
654                         }
655                 }
656         }
657         return -1;
658 }
659
660 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
661  * buckets around.
662  * return 1 if matching existing key, return 0 if succeeds, return -1 for no
663  * empty entry.
664  */
665 static inline int32_t
666 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
667                 struct rte_hash_bucket *prim_bkt,
668                 struct rte_hash_bucket *sec_bkt,
669                 const struct rte_hash_key *key, void *data,
670                 uint16_t sig, uint32_t new_idx,
671                 int32_t *ret_val)
672 {
673         unsigned int i;
674         struct rte_hash_bucket *cur_bkt;
675         int32_t ret;
676
677         __hash_rw_writer_lock(h);
678         /* Check if key was inserted after last check but before this
679          * protected region in case of inserting duplicated keys.
680          */
681         ret = search_and_update(h, data, key, prim_bkt, sig);
682         if (ret != -1) {
683                 __hash_rw_writer_unlock(h);
684                 *ret_val = ret;
685                 return 1;
686         }
687
688         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
689                 ret = search_and_update(h, data, key, cur_bkt, sig);
690                 if (ret != -1) {
691                         __hash_rw_writer_unlock(h);
692                         *ret_val = ret;
693                         return 1;
694                 }
695         }
696
697         /* Insert new entry if there is room in the primary
698          * bucket.
699          */
700         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
701                 /* Check if slot is available */
702                 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
703                         prim_bkt->sig_current[i] = sig;
704                         /* Key can be of arbitrary length, so it is
705                          * not possible to store it atomically.
706                          * Hence the new key element's memory stores
707                          * (key as well as data) should be complete
708                          * before it is referenced.
709                          */
710                         __atomic_store_n(&prim_bkt->key_idx[i],
711                                          new_idx,
712                                          __ATOMIC_RELEASE);
713                         break;
714                 }
715         }
716         __hash_rw_writer_unlock(h);
717
718         if (i != RTE_HASH_BUCKET_ENTRIES)
719                 return 0;
720
721         /* no empty entry */
722         return -1;
723 }
724
725 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
726  * the path head with new entry (sig, alt_hash, new_idx)
727  * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
728  * return 0 if succeeds.
729  */
730 static inline int
731 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
732                         struct rte_hash_bucket *bkt,
733                         struct rte_hash_bucket *alt_bkt,
734                         const struct rte_hash_key *key, void *data,
735                         struct queue_node *leaf, uint32_t leaf_slot,
736                         uint16_t sig, uint32_t new_idx,
737                         int32_t *ret_val)
738 {
739         uint32_t prev_alt_bkt_idx;
740         struct rte_hash_bucket *cur_bkt;
741         struct queue_node *prev_node, *curr_node = leaf;
742         struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
743         uint32_t prev_slot, curr_slot = leaf_slot;
744         int32_t ret;
745
746         __hash_rw_writer_lock(h);
747
748         /* In case empty slot was gone before entering protected region */
749         if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
750                 __hash_rw_writer_unlock(h);
751                 return -1;
752         }
753
754         /* Check if key was inserted after last check but before this
755          * protected region.
756          */
757         ret = search_and_update(h, data, key, bkt, sig);
758         if (ret != -1) {
759                 __hash_rw_writer_unlock(h);
760                 *ret_val = ret;
761                 return 1;
762         }
763
764         FOR_EACH_BUCKET(cur_bkt, alt_bkt) {
765                 ret = search_and_update(h, data, key, cur_bkt, sig);
766                 if (ret != -1) {
767                         __hash_rw_writer_unlock(h);
768                         *ret_val = ret;
769                         return 1;
770                 }
771         }
772
773         while (likely(curr_node->prev != NULL)) {
774                 prev_node = curr_node->prev;
775                 prev_bkt = prev_node->bkt;
776                 prev_slot = curr_node->prev_slot;
777
778                 prev_alt_bkt_idx = get_alt_bucket_index(h,
779                                         prev_node->cur_bkt_idx,
780                                         prev_bkt->sig_current[prev_slot]);
781
782                 if (unlikely(&h->buckets[prev_alt_bkt_idx]
783                                 != curr_bkt)) {
784                         /* revert it to empty, otherwise duplicated keys */
785                         __atomic_store_n(&curr_bkt->key_idx[curr_slot],
786                                 EMPTY_SLOT,
787                                 __ATOMIC_RELEASE);
788                         __hash_rw_writer_unlock(h);
789                         return -1;
790                 }
791
792                 if (h->readwrite_concur_lf_support) {
793                         /* Inform the previous move. The current move need
794                          * not be informed now as the current bucket entry
795                          * is present in both primary and secondary.
796                          * Since there is one writer, load acquires on
797                          * tbl_chng_cnt are not required.
798                          */
799                         __atomic_store_n(h->tbl_chng_cnt,
800                                          *h->tbl_chng_cnt + 1,
801                                          __ATOMIC_RELEASE);
802                         /* The stores to sig_alt and sig_current should not
803                          * move above the store to tbl_chng_cnt.
804                          */
805                         __atomic_thread_fence(__ATOMIC_RELEASE);
806                 }
807
808                 /* Need to swap current/alt sig to allow later
809                  * Cuckoo insert to move elements back to its
810                  * primary bucket if available
811                  */
812                 curr_bkt->sig_current[curr_slot] =
813                         prev_bkt->sig_current[prev_slot];
814                 /* Release the updated bucket entry */
815                 __atomic_store_n(&curr_bkt->key_idx[curr_slot],
816                         prev_bkt->key_idx[prev_slot],
817                         __ATOMIC_RELEASE);
818
819                 curr_slot = prev_slot;
820                 curr_node = prev_node;
821                 curr_bkt = curr_node->bkt;
822         }
823
824         if (h->readwrite_concur_lf_support) {
825                 /* Inform the previous move. The current move need
826                  * not be informed now as the current bucket entry
827                  * is present in both primary and secondary.
828                  * Since there is one writer, load acquires on
829                  * tbl_chng_cnt are not required.
830                  */
831                 __atomic_store_n(h->tbl_chng_cnt,
832                                  *h->tbl_chng_cnt + 1,
833                                  __ATOMIC_RELEASE);
834                 /* The stores to sig_alt and sig_current should not
835                  * move above the store to tbl_chng_cnt.
836                  */
837                 __atomic_thread_fence(__ATOMIC_RELEASE);
838         }
839
840         curr_bkt->sig_current[curr_slot] = sig;
841         /* Release the new bucket entry */
842         __atomic_store_n(&curr_bkt->key_idx[curr_slot],
843                          new_idx,
844                          __ATOMIC_RELEASE);
845
846         __hash_rw_writer_unlock(h);
847
848         return 0;
849
850 }
851
852 /*
853  * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
854  * Cuckoo
855  */
856 static inline int
857 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
858                         struct rte_hash_bucket *bkt,
859                         struct rte_hash_bucket *sec_bkt,
860                         const struct rte_hash_key *key, void *data,
861                         uint16_t sig, uint32_t bucket_idx,
862                         uint32_t new_idx, int32_t *ret_val)
863 {
864         unsigned int i;
865         struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
866         struct queue_node *tail, *head;
867         struct rte_hash_bucket *curr_bkt, *alt_bkt;
868         uint32_t cur_idx, alt_idx;
869
870         tail = queue;
871         head = queue + 1;
872         tail->bkt = bkt;
873         tail->prev = NULL;
874         tail->prev_slot = -1;
875         tail->cur_bkt_idx = bucket_idx;
876
877         /* Cuckoo bfs Search */
878         while (likely(tail != head && head <
879                                         queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
880                                         RTE_HASH_BUCKET_ENTRIES)) {
881                 curr_bkt = tail->bkt;
882                 cur_idx = tail->cur_bkt_idx;
883                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
884                         if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
885                                 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
886                                                 bkt, sec_bkt, key, data,
887                                                 tail, i, sig,
888                                                 new_idx, ret_val);
889                                 if (likely(ret != -1))
890                                         return ret;
891                         }
892
893                         /* Enqueue new node and keep prev node info */
894                         alt_idx = get_alt_bucket_index(h, cur_idx,
895                                                 curr_bkt->sig_current[i]);
896                         alt_bkt = &(h->buckets[alt_idx]);
897                         head->bkt = alt_bkt;
898                         head->cur_bkt_idx = alt_idx;
899                         head->prev = tail;
900                         head->prev_slot = i;
901                         head++;
902                 }
903                 tail++;
904         }
905
906         return -ENOSPC;
907 }
908
909 static inline int32_t
910 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
911                                                 hash_sig_t sig, void *data)
912 {
913         uint16_t short_sig;
914         uint32_t prim_bucket_idx, sec_bucket_idx;
915         struct rte_hash_bucket *prim_bkt, *sec_bkt, *cur_bkt;
916         struct rte_hash_key *new_k, *keys = h->key_store;
917         void *slot_id = NULL;
918         void *ext_bkt_id = NULL;
919         uint32_t new_idx, bkt_id;
920         int ret;
921         unsigned n_slots;
922         unsigned lcore_id;
923         unsigned int i;
924         struct lcore_cache *cached_free_slots = NULL;
925         int32_t ret_val;
926         struct rte_hash_bucket *last;
927
928         short_sig = get_short_sig(sig);
929         prim_bucket_idx = get_prim_bucket_index(h, sig);
930         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
931         prim_bkt = &h->buckets[prim_bucket_idx];
932         sec_bkt = &h->buckets[sec_bucket_idx];
933         rte_prefetch0(prim_bkt);
934         rte_prefetch0(sec_bkt);
935
936         /* Check if key is already inserted in primary location */
937         __hash_rw_writer_lock(h);
938         ret = search_and_update(h, data, key, prim_bkt, short_sig);
939         if (ret != -1) {
940                 __hash_rw_writer_unlock(h);
941                 return ret;
942         }
943
944         /* Check if key is already inserted in secondary location */
945         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
946                 ret = search_and_update(h, data, key, cur_bkt, short_sig);
947                 if (ret != -1) {
948                         __hash_rw_writer_unlock(h);
949                         return ret;
950                 }
951         }
952
953         __hash_rw_writer_unlock(h);
954
955         /* Did not find a match, so get a new slot for storing the new key */
956         if (h->use_local_cache) {
957                 lcore_id = rte_lcore_id();
958                 cached_free_slots = &h->local_free_slots[lcore_id];
959                 /* Try to get a free slot from the local cache */
960                 if (cached_free_slots->len == 0) {
961                         /* Need to get another burst of free slots from global ring */
962                         n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
963                                         cached_free_slots->objs,
964                                         LCORE_CACHE_SIZE, NULL);
965                         if (n_slots == 0) {
966                                 return -ENOSPC;
967                         }
968
969                         cached_free_slots->len += n_slots;
970                 }
971
972                 /* Get a free slot from the local cache */
973                 cached_free_slots->len--;
974                 slot_id = cached_free_slots->objs[cached_free_slots->len];
975         } else {
976                 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
977                         return -ENOSPC;
978                 }
979         }
980
981         new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
982         new_idx = (uint32_t)((uintptr_t) slot_id);
983         /* Copy key */
984         memcpy(new_k->key, key, h->key_len);
985         /* Key can be of arbitrary length, so it is not possible to store
986          * it atomically. Hence the new key element's memory stores
987          * (key as well as data) should be complete before it is referenced.
988          * 'pdata' acts as the synchronization point when an existing hash
989          * entry is updated.
990          */
991         __atomic_store_n(&new_k->pdata,
992                 data,
993                 __ATOMIC_RELEASE);
994
995         /* Find an empty slot and insert */
996         ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
997                                         short_sig, new_idx, &ret_val);
998         if (ret == 0)
999                 return new_idx - 1;
1000         else if (ret == 1) {
1001                 enqueue_slot_back(h, cached_free_slots, slot_id);
1002                 return ret_val;
1003         }
1004
1005         /* Primary bucket full, need to make space for new entry */
1006         ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
1007                                 short_sig, prim_bucket_idx, new_idx, &ret_val);
1008         if (ret == 0)
1009                 return new_idx - 1;
1010         else if (ret == 1) {
1011                 enqueue_slot_back(h, cached_free_slots, slot_id);
1012                 return ret_val;
1013         }
1014
1015         /* Also search secondary bucket to get better occupancy */
1016         ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
1017                                 short_sig, sec_bucket_idx, new_idx, &ret_val);
1018
1019         if (ret == 0)
1020                 return new_idx - 1;
1021         else if (ret == 1) {
1022                 enqueue_slot_back(h, cached_free_slots, slot_id);
1023                 return ret_val;
1024         }
1025
1026         /* if ext table not enabled, we failed the insertion */
1027         if (!h->ext_table_support) {
1028                 enqueue_slot_back(h, cached_free_slots, slot_id);
1029                 return ret;
1030         }
1031
1032         /* Now we need to go through the extendable bucket. Protection is needed
1033          * to protect all extendable bucket processes.
1034          */
1035         __hash_rw_writer_lock(h);
1036         /* We check for duplicates again since could be inserted before the lock */
1037         ret = search_and_update(h, data, key, prim_bkt, short_sig);
1038         if (ret != -1) {
1039                 enqueue_slot_back(h, cached_free_slots, slot_id);
1040                 goto failure;
1041         }
1042
1043         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1044                 ret = search_and_update(h, data, key, cur_bkt, short_sig);
1045                 if (ret != -1) {
1046                         enqueue_slot_back(h, cached_free_slots, slot_id);
1047                         goto failure;
1048                 }
1049         }
1050
1051         /* Search sec and ext buckets to find an empty entry to insert. */
1052         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1053                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1054                         /* Check if slot is available */
1055                         if (likely(cur_bkt->key_idx[i] == EMPTY_SLOT)) {
1056                                 cur_bkt->sig_current[i] = short_sig;
1057                                 cur_bkt->key_idx[i] = new_idx;
1058                                 __hash_rw_writer_unlock(h);
1059                                 return new_idx - 1;
1060                         }
1061                 }
1062         }
1063
1064         /* Failed to get an empty entry from extendable buckets. Link a new
1065          * extendable bucket. We first get a free bucket from ring.
1066          */
1067         if (rte_ring_sc_dequeue(h->free_ext_bkts, &ext_bkt_id) != 0) {
1068                 ret = -ENOSPC;
1069                 goto failure;
1070         }
1071
1072         bkt_id = (uint32_t)((uintptr_t)ext_bkt_id) - 1;
1073         /* Use the first location of the new bucket */
1074         (h->buckets_ext[bkt_id]).sig_current[0] = short_sig;
1075         (h->buckets_ext[bkt_id]).key_idx[0] = new_idx;
1076         /* Link the new bucket to sec bucket linked list */
1077         last = rte_hash_get_last_bkt(sec_bkt);
1078         last->next = &h->buckets_ext[bkt_id];
1079         __hash_rw_writer_unlock(h);
1080         return new_idx - 1;
1081
1082 failure:
1083         __hash_rw_writer_unlock(h);
1084         return ret;
1085
1086 }
1087
1088 int32_t
1089 rte_hash_add_key_with_hash(const struct rte_hash *h,
1090                         const void *key, hash_sig_t sig)
1091 {
1092         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1093         return __rte_hash_add_key_with_hash(h, key, sig, 0);
1094 }
1095
1096 int32_t
1097 rte_hash_add_key(const struct rte_hash *h, const void *key)
1098 {
1099         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1100         return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
1101 }
1102
1103 int
1104 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
1105                         const void *key, hash_sig_t sig, void *data)
1106 {
1107         int ret;
1108
1109         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1110         ret = __rte_hash_add_key_with_hash(h, key, sig, data);
1111         if (ret >= 0)
1112                 return 0;
1113         else
1114                 return ret;
1115 }
1116
1117 int
1118 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
1119 {
1120         int ret;
1121
1122         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1123
1124         ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
1125         if (ret >= 0)
1126                 return 0;
1127         else
1128                 return ret;
1129 }
1130
1131 /* Search one bucket to find the match key - uses rw lock */
1132 static inline int32_t
1133 search_one_bucket_l(const struct rte_hash *h, const void *key,
1134                 uint16_t sig, void **data,
1135                 const struct rte_hash_bucket *bkt)
1136 {
1137         int i;
1138         struct rte_hash_key *k, *keys = h->key_store;
1139
1140         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1141                 if (bkt->sig_current[i] == sig &&
1142                                 bkt->key_idx[i] != EMPTY_SLOT) {
1143                         k = (struct rte_hash_key *) ((char *)keys +
1144                                         bkt->key_idx[i] * h->key_entry_size);
1145
1146                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1147                                 if (data != NULL)
1148                                         *data = k->pdata;
1149                                 /*
1150                                  * Return index where key is stored,
1151                                  * subtracting the first dummy index
1152                                  */
1153                                 return bkt->key_idx[i] - 1;
1154                         }
1155                 }
1156         }
1157         return -1;
1158 }
1159
1160 /* Search one bucket to find the match key */
1161 static inline int32_t
1162 search_one_bucket_lf(const struct rte_hash *h, const void *key, uint16_t sig,
1163                         void **data, const struct rte_hash_bucket *bkt)
1164 {
1165         int i;
1166         uint32_t key_idx;
1167         void *pdata;
1168         struct rte_hash_key *k, *keys = h->key_store;
1169
1170         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1171                 key_idx = __atomic_load_n(&bkt->key_idx[i],
1172                                           __ATOMIC_ACQUIRE);
1173                 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
1174                         k = (struct rte_hash_key *) ((char *)keys +
1175                                         key_idx * h->key_entry_size);
1176                         pdata = __atomic_load_n(&k->pdata,
1177                                         __ATOMIC_ACQUIRE);
1178
1179                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1180                                 if (data != NULL)
1181                                         *data = pdata;
1182                                 /*
1183                                  * Return index where key is stored,
1184                                  * subtracting the first dummy index
1185                                  */
1186                                 return key_idx - 1;
1187                         }
1188                 }
1189         }
1190         return -1;
1191 }
1192
1193 static inline int32_t
1194 __rte_hash_lookup_with_hash_l(const struct rte_hash *h, const void *key,
1195                                 hash_sig_t sig, void **data)
1196 {
1197         uint32_t prim_bucket_idx, sec_bucket_idx;
1198         struct rte_hash_bucket *bkt, *cur_bkt;
1199         int ret;
1200         uint16_t short_sig;
1201
1202         short_sig = get_short_sig(sig);
1203         prim_bucket_idx = get_prim_bucket_index(h, sig);
1204         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1205
1206         bkt = &h->buckets[prim_bucket_idx];
1207
1208         __hash_rw_reader_lock(h);
1209
1210         /* Check if key is in primary location */
1211         ret = search_one_bucket_l(h, key, short_sig, data, bkt);
1212         if (ret != -1) {
1213                 __hash_rw_reader_unlock(h);
1214                 return ret;
1215         }
1216         /* Calculate secondary hash */
1217         bkt = &h->buckets[sec_bucket_idx];
1218
1219         /* Check if key is in secondary location */
1220         FOR_EACH_BUCKET(cur_bkt, bkt) {
1221                 ret = search_one_bucket_l(h, key, short_sig,
1222                                         data, cur_bkt);
1223                 if (ret != -1) {
1224                         __hash_rw_reader_unlock(h);
1225                         return ret;
1226                 }
1227         }
1228
1229         __hash_rw_reader_unlock(h);
1230
1231         return -ENOENT;
1232 }
1233
1234 static inline int32_t
1235 __rte_hash_lookup_with_hash_lf(const struct rte_hash *h, const void *key,
1236                                         hash_sig_t sig, void **data)
1237 {
1238         uint32_t prim_bucket_idx, sec_bucket_idx;
1239         struct rte_hash_bucket *bkt, *cur_bkt;
1240         uint32_t cnt_b, cnt_a;
1241         int ret;
1242         uint16_t short_sig;
1243
1244         short_sig = get_short_sig(sig);
1245         prim_bucket_idx = get_prim_bucket_index(h, sig);
1246         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1247
1248         do {
1249                 /* Load the table change counter before the lookup
1250                  * starts. Acquire semantics will make sure that
1251                  * loads in search_one_bucket are not hoisted.
1252                  */
1253                 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1254                                 __ATOMIC_ACQUIRE);
1255
1256                 /* Check if key is in primary location */
1257                 bkt = &h->buckets[prim_bucket_idx];
1258                 ret = search_one_bucket_lf(h, key, short_sig, data, bkt);
1259                 if (ret != -1) {
1260                         __hash_rw_reader_unlock(h);
1261                         return ret;
1262                 }
1263                 /* Calculate secondary hash */
1264                 bkt = &h->buckets[sec_bucket_idx];
1265
1266                 /* Check if key is in secondary location */
1267                 FOR_EACH_BUCKET(cur_bkt, bkt) {
1268                         ret = search_one_bucket_lf(h, key, short_sig,
1269                                                 data, cur_bkt);
1270                         if (ret != -1) {
1271                                 __hash_rw_reader_unlock(h);
1272                                 return ret;
1273                         }
1274                 }
1275
1276                 /* The loads of sig_current in search_one_bucket
1277                  * should not move below the load from tbl_chng_cnt.
1278                  */
1279                 __atomic_thread_fence(__ATOMIC_ACQUIRE);
1280                 /* Re-read the table change counter to check if the
1281                  * table has changed during search. If yes, re-do
1282                  * the search.
1283                  * This load should not get hoisted. The load
1284                  * acquires on cnt_b, key index in primary bucket
1285                  * and key index in secondary bucket will make sure
1286                  * that it does not get hoisted.
1287                  */
1288                 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
1289                                         __ATOMIC_ACQUIRE);
1290         } while (cnt_b != cnt_a);
1291
1292         return -ENOENT;
1293 }
1294
1295 static inline int32_t
1296 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
1297                                         hash_sig_t sig, void **data)
1298 {
1299         if (h->readwrite_concur_lf_support)
1300                 return __rte_hash_lookup_with_hash_lf(h, key, sig, data);
1301         else
1302                 return __rte_hash_lookup_with_hash_l(h, key, sig, data);
1303 }
1304
1305 int32_t
1306 rte_hash_lookup_with_hash(const struct rte_hash *h,
1307                         const void *key, hash_sig_t sig)
1308 {
1309         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1310         return __rte_hash_lookup_with_hash(h, key, sig, NULL);
1311 }
1312
1313 int32_t
1314 rte_hash_lookup(const struct rte_hash *h, const void *key)
1315 {
1316         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1317         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
1318 }
1319
1320 int
1321 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
1322                         const void *key, hash_sig_t sig, void **data)
1323 {
1324         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1325         return __rte_hash_lookup_with_hash(h, key, sig, data);
1326 }
1327
1328 int
1329 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
1330 {
1331         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1332         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
1333 }
1334
1335 static inline void
1336 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
1337 {
1338         unsigned lcore_id, n_slots;
1339         struct lcore_cache *cached_free_slots;
1340
1341         if (h->use_local_cache) {
1342                 lcore_id = rte_lcore_id();
1343                 cached_free_slots = &h->local_free_slots[lcore_id];
1344                 /* Cache full, need to free it. */
1345                 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1346                         /* Need to enqueue the free slots in global ring. */
1347                         n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1348                                                 cached_free_slots->objs,
1349                                                 LCORE_CACHE_SIZE, NULL);
1350                         cached_free_slots->len -= n_slots;
1351                 }
1352                 /* Put index of new free slot in cache. */
1353                 cached_free_slots->objs[cached_free_slots->len] =
1354                                 (void *)((uintptr_t)bkt->key_idx[i]);
1355                 cached_free_slots->len++;
1356         } else {
1357                 rte_ring_sp_enqueue(h->free_slots,
1358                                 (void *)((uintptr_t)bkt->key_idx[i]));
1359         }
1360 }
1361
1362 /* Compact the linked list by moving key from last entry in linked list to the
1363  * empty slot.
1364  */
1365 static inline void
1366 __rte_hash_compact_ll(struct rte_hash_bucket *cur_bkt, int pos) {
1367         int i;
1368         struct rte_hash_bucket *last_bkt;
1369
1370         if (!cur_bkt->next)
1371                 return;
1372
1373         last_bkt = rte_hash_get_last_bkt(cur_bkt);
1374
1375         for (i = RTE_HASH_BUCKET_ENTRIES - 1; i >= 0; i--) {
1376                 if (last_bkt->key_idx[i] != EMPTY_SLOT) {
1377                         cur_bkt->key_idx[pos] = last_bkt->key_idx[i];
1378                         cur_bkt->sig_current[pos] = last_bkt->sig_current[i];
1379                         last_bkt->sig_current[i] = NULL_SIGNATURE;
1380                         last_bkt->key_idx[i] = EMPTY_SLOT;
1381                         return;
1382                 }
1383         }
1384 }
1385
1386 /* Search one bucket and remove the matched key.
1387  * Writer is expected to hold the lock while calling this
1388  * function.
1389  */
1390 static inline int32_t
1391 search_and_remove(const struct rte_hash *h, const void *key,
1392                         struct rte_hash_bucket *bkt, uint16_t sig, int *pos)
1393 {
1394         struct rte_hash_key *k, *keys = h->key_store;
1395         unsigned int i;
1396         uint32_t key_idx;
1397
1398         /* Check if key is in bucket */
1399         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1400                 key_idx = __atomic_load_n(&bkt->key_idx[i],
1401                                           __ATOMIC_ACQUIRE);
1402                 if (bkt->sig_current[i] == sig && key_idx != EMPTY_SLOT) {
1403                         k = (struct rte_hash_key *) ((char *)keys +
1404                                         key_idx * h->key_entry_size);
1405                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
1406                                 bkt->sig_current[i] = NULL_SIGNATURE;
1407                                 /* Free the key store index if
1408                                  * no_free_on_del is disabled.
1409                                  */
1410                                 if (!h->no_free_on_del)
1411                                         remove_entry(h, bkt, i);
1412
1413                                 __atomic_store_n(&bkt->key_idx[i],
1414                                                  EMPTY_SLOT,
1415                                                  __ATOMIC_RELEASE);
1416
1417                                 *pos = i;
1418                                 /*
1419                                  * Return index where key is stored,
1420                                  * subtracting the first dummy index
1421                                  */
1422                                 return key_idx - 1;
1423                         }
1424                 }
1425         }
1426         return -1;
1427 }
1428
1429 static inline int32_t
1430 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1431                                                 hash_sig_t sig)
1432 {
1433         uint32_t prim_bucket_idx, sec_bucket_idx;
1434         struct rte_hash_bucket *prim_bkt, *sec_bkt, *prev_bkt, *last_bkt;
1435         struct rte_hash_bucket *cur_bkt;
1436         int pos;
1437         int32_t ret, i;
1438         uint16_t short_sig;
1439
1440         short_sig = get_short_sig(sig);
1441         prim_bucket_idx = get_prim_bucket_index(h, sig);
1442         sec_bucket_idx = get_alt_bucket_index(h, prim_bucket_idx, short_sig);
1443         prim_bkt = &h->buckets[prim_bucket_idx];
1444
1445         __hash_rw_writer_lock(h);
1446         /* look for key in primary bucket */
1447         ret = search_and_remove(h, key, prim_bkt, short_sig, &pos);
1448         if (ret != -1) {
1449                 __rte_hash_compact_ll(prim_bkt, pos);
1450                 last_bkt = prim_bkt->next;
1451                 prev_bkt = prim_bkt;
1452                 goto return_bkt;
1453         }
1454
1455         /* Calculate secondary hash */
1456         sec_bkt = &h->buckets[sec_bucket_idx];
1457
1458         FOR_EACH_BUCKET(cur_bkt, sec_bkt) {
1459                 ret = search_and_remove(h, key, cur_bkt, short_sig, &pos);
1460                 if (ret != -1) {
1461                         __rte_hash_compact_ll(cur_bkt, pos);
1462                         last_bkt = sec_bkt->next;
1463                         prev_bkt = sec_bkt;
1464                         goto return_bkt;
1465                 }
1466         }
1467
1468         __hash_rw_writer_unlock(h);
1469         return -ENOENT;
1470
1471 /* Search last bucket to see if empty to be recycled */
1472 return_bkt:
1473         if (!last_bkt) {
1474                 __hash_rw_writer_unlock(h);
1475                 return ret;
1476         }
1477         while (last_bkt->next) {
1478                 prev_bkt = last_bkt;
1479                 last_bkt = last_bkt->next;
1480         }
1481
1482         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1483                 if (last_bkt->key_idx[i] != EMPTY_SLOT)
1484                         break;
1485         }
1486         /* found empty bucket and recycle */
1487         if (i == RTE_HASH_BUCKET_ENTRIES) {
1488                 prev_bkt->next = last_bkt->next = NULL;
1489                 uint32_t index = last_bkt - h->buckets_ext + 1;
1490                 rte_ring_sp_enqueue(h->free_ext_bkts, (void *)(uintptr_t)index);
1491         }
1492
1493         __hash_rw_writer_unlock(h);
1494         return ret;
1495 }
1496
1497 int32_t
1498 rte_hash_del_key_with_hash(const struct rte_hash *h,
1499                         const void *key, hash_sig_t sig)
1500 {
1501         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1502         return __rte_hash_del_key_with_hash(h, key, sig);
1503 }
1504
1505 int32_t
1506 rte_hash_del_key(const struct rte_hash *h, const void *key)
1507 {
1508         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1509         return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1510 }
1511
1512 int
1513 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1514                                void **key)
1515 {
1516         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1517
1518         struct rte_hash_key *k, *keys = h->key_store;
1519         k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1520                                      h->key_entry_size);
1521         *key = k->key;
1522
1523         if (position !=
1524             __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1525                                         NULL)) {
1526                 return -ENOENT;
1527         }
1528
1529         return 0;
1530 }
1531
1532 int __rte_experimental
1533 rte_hash_free_key_with_position(const struct rte_hash *h,
1534                                 const int32_t position)
1535 {
1536         RETURN_IF_TRUE(((h == NULL) || (position == EMPTY_SLOT)), -EINVAL);
1537
1538         unsigned int lcore_id, n_slots;
1539         struct lcore_cache *cached_free_slots;
1540         const int32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1541
1542         /* Out of bounds */
1543         if (position >= total_entries)
1544                 return -EINVAL;
1545
1546         if (h->use_local_cache) {
1547                 lcore_id = rte_lcore_id();
1548                 cached_free_slots = &h->local_free_slots[lcore_id];
1549                 /* Cache full, need to free it. */
1550                 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
1551                         /* Need to enqueue the free slots in global ring. */
1552                         n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
1553                                                 cached_free_slots->objs,
1554                                                 LCORE_CACHE_SIZE, NULL);
1555                         cached_free_slots->len -= n_slots;
1556                 }
1557                 /* Put index of new free slot in cache. */
1558                 cached_free_slots->objs[cached_free_slots->len] =
1559                                         (void *)((uintptr_t)position);
1560                 cached_free_slots->len++;
1561         } else {
1562                 rte_ring_sp_enqueue(h->free_slots,
1563                                 (void *)((uintptr_t)position));
1564         }
1565
1566         return 0;
1567 }
1568
1569 static inline void
1570 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1571                         const struct rte_hash_bucket *prim_bkt,
1572                         const struct rte_hash_bucket *sec_bkt,
1573                         uint16_t sig,
1574                         enum rte_hash_sig_compare_function sig_cmp_fn)
1575 {
1576         unsigned int i;
1577
1578         /* For match mask the first bit of every two bits indicates the match */
1579         switch (sig_cmp_fn) {
1580 #ifdef RTE_MACHINE_CPUFLAG_SSE2
1581         case RTE_HASH_COMPARE_SSE:
1582                 /* Compare all signatures in the bucket */
1583                 *prim_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1584                                 _mm_load_si128(
1585                                         (__m128i const *)prim_bkt->sig_current),
1586                                 _mm_set1_epi16(sig)));
1587                 /* Compare all signatures in the bucket */
1588                 *sec_hash_matches = _mm_movemask_epi8(_mm_cmpeq_epi16(
1589                                 _mm_load_si128(
1590                                         (__m128i const *)sec_bkt->sig_current),
1591                                 _mm_set1_epi16(sig)));
1592                 break;
1593 #endif
1594         default:
1595                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1596                         *prim_hash_matches |=
1597                                 ((sig == prim_bkt->sig_current[i]) << (i << 1));
1598                         *sec_hash_matches |=
1599                                 ((sig == sec_bkt->sig_current[i]) << (i << 1));
1600                 }
1601         }
1602 }
1603
1604 #define PREFETCH_OFFSET 4
1605 static inline void
1606 __rte_hash_lookup_bulk_l(const struct rte_hash *h, const void **keys,
1607                         int32_t num_keys, int32_t *positions,
1608                         uint64_t *hit_mask, void *data[])
1609 {
1610         uint64_t hits = 0;
1611         int32_t i;
1612         int32_t ret;
1613         uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1614         uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
1615         uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
1616         uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
1617         const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1618         const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1619         uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1620         uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1621         struct rte_hash_bucket *cur_bkt, *next_bkt;
1622
1623         /* Prefetch first keys */
1624         for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1625                 rte_prefetch0(keys[i]);
1626
1627         /*
1628          * Prefetch rest of the keys, calculate primary and
1629          * secondary bucket and prefetch them
1630          */
1631         for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1632                 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1633
1634                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1635
1636                 sig[i] = get_short_sig(prim_hash[i]);
1637                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1638                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1639
1640                 primary_bkt[i] = &h->buckets[prim_index[i]];
1641                 secondary_bkt[i] = &h->buckets[sec_index[i]];
1642
1643                 rte_prefetch0(primary_bkt[i]);
1644                 rte_prefetch0(secondary_bkt[i]);
1645         }
1646
1647         /* Calculate and prefetch rest of the buckets */
1648         for (; i < num_keys; i++) {
1649                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1650
1651                 sig[i] = get_short_sig(prim_hash[i]);
1652                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1653                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1654
1655                 primary_bkt[i] = &h->buckets[prim_index[i]];
1656                 secondary_bkt[i] = &h->buckets[sec_index[i]];
1657
1658                 rte_prefetch0(primary_bkt[i]);
1659                 rte_prefetch0(secondary_bkt[i]);
1660         }
1661
1662         __hash_rw_reader_lock(h);
1663
1664         /* Compare signatures and prefetch key slot of first hit */
1665         for (i = 0; i < num_keys; i++) {
1666                 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1667                         primary_bkt[i], secondary_bkt[i],
1668                         sig[i], h->sig_cmp_fn);
1669
1670                 if (prim_hitmask[i]) {
1671                         uint32_t first_hit =
1672                                         __builtin_ctzl(prim_hitmask[i])
1673                                         >> 1;
1674                         uint32_t key_idx =
1675                                 primary_bkt[i]->key_idx[first_hit];
1676                         const struct rte_hash_key *key_slot =
1677                                 (const struct rte_hash_key *)(
1678                                 (const char *)h->key_store +
1679                                 key_idx * h->key_entry_size);
1680                         rte_prefetch0(key_slot);
1681                         continue;
1682                 }
1683
1684                 if (sec_hitmask[i]) {
1685                         uint32_t first_hit =
1686                                         __builtin_ctzl(sec_hitmask[i])
1687                                         >> 1;
1688                         uint32_t key_idx =
1689                                 secondary_bkt[i]->key_idx[first_hit];
1690                         const struct rte_hash_key *key_slot =
1691                                 (const struct rte_hash_key *)(
1692                                 (const char *)h->key_store +
1693                                 key_idx * h->key_entry_size);
1694                         rte_prefetch0(key_slot);
1695                 }
1696         }
1697
1698         /* Compare keys, first hits in primary first */
1699         for (i = 0; i < num_keys; i++) {
1700                 positions[i] = -ENOENT;
1701                 while (prim_hitmask[i]) {
1702                         uint32_t hit_index =
1703                                         __builtin_ctzl(prim_hitmask[i])
1704                                         >> 1;
1705                         uint32_t key_idx =
1706                                 primary_bkt[i]->key_idx[hit_index];
1707                         const struct rte_hash_key *key_slot =
1708                                 (const struct rte_hash_key *)(
1709                                 (const char *)h->key_store +
1710                                 key_idx * h->key_entry_size);
1711
1712                         /*
1713                          * If key index is 0, do not compare key,
1714                          * as it is checking the dummy slot
1715                          */
1716                         if (!!key_idx &
1717                                 !rte_hash_cmp_eq(
1718                                         key_slot->key, keys[i], h)) {
1719                                 if (data != NULL)
1720                                         data[i] = key_slot->pdata;
1721
1722                                 hits |= 1ULL << i;
1723                                 positions[i] = key_idx - 1;
1724                                 goto next_key;
1725                         }
1726                         prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1727                 }
1728
1729                 while (sec_hitmask[i]) {
1730                         uint32_t hit_index =
1731                                         __builtin_ctzl(sec_hitmask[i])
1732                                         >> 1;
1733                         uint32_t key_idx =
1734                                 secondary_bkt[i]->key_idx[hit_index];
1735                         const struct rte_hash_key *key_slot =
1736                                 (const struct rte_hash_key *)(
1737                                 (const char *)h->key_store +
1738                                 key_idx * h->key_entry_size);
1739
1740                         /*
1741                          * If key index is 0, do not compare key,
1742                          * as it is checking the dummy slot
1743                          */
1744
1745                         if (!!key_idx &
1746                                 !rte_hash_cmp_eq(
1747                                         key_slot->key, keys[i], h)) {
1748                                 if (data != NULL)
1749                                         data[i] = key_slot->pdata;
1750
1751                                 hits |= 1ULL << i;
1752                                 positions[i] = key_idx - 1;
1753                                 goto next_key;
1754                         }
1755                         sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
1756                 }
1757 next_key:
1758                 continue;
1759         }
1760
1761         /* all found, do not need to go through ext bkt */
1762         if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
1763                 if (hit_mask != NULL)
1764                         *hit_mask = hits;
1765                 __hash_rw_reader_unlock(h);
1766                 return;
1767         }
1768
1769         /* need to check ext buckets for match */
1770         for (i = 0; i < num_keys; i++) {
1771                 if ((hits & (1ULL << i)) != 0)
1772                         continue;
1773                 next_bkt = secondary_bkt[i]->next;
1774                 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
1775                         if (data != NULL)
1776                                 ret = search_one_bucket_l(h, keys[i],
1777                                                 sig[i], &data[i], cur_bkt);
1778                         else
1779                                 ret = search_one_bucket_l(h, keys[i],
1780                                                 sig[i], NULL, cur_bkt);
1781                         if (ret != -1) {
1782                                 positions[i] = ret;
1783                                 hits |= 1ULL << i;
1784                                 break;
1785                         }
1786                 }
1787         }
1788
1789         __hash_rw_reader_unlock(h);
1790
1791         if (hit_mask != NULL)
1792                 *hit_mask = hits;
1793 }
1794
1795 static inline void
1796 __rte_hash_lookup_bulk_lf(const struct rte_hash *h, const void **keys,
1797                         int32_t num_keys, int32_t *positions,
1798                         uint64_t *hit_mask, void *data[])
1799 {
1800         uint64_t hits = 0;
1801         int32_t i;
1802         int32_t ret;
1803         uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1804         uint32_t prim_index[RTE_HASH_LOOKUP_BULK_MAX];
1805         uint32_t sec_index[RTE_HASH_LOOKUP_BULK_MAX];
1806         uint16_t sig[RTE_HASH_LOOKUP_BULK_MAX];
1807         const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1808         const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1809         uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1810         uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1811         struct rte_hash_bucket *cur_bkt, *next_bkt;
1812         void *pdata[RTE_HASH_LOOKUP_BULK_MAX];
1813         uint32_t cnt_b, cnt_a;
1814
1815         /* Prefetch first keys */
1816         for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1817                 rte_prefetch0(keys[i]);
1818
1819         /*
1820          * Prefetch rest of the keys, calculate primary and
1821          * secondary bucket and prefetch them
1822          */
1823         for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1824                 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1825
1826                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1827
1828                 sig[i] = get_short_sig(prim_hash[i]);
1829                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1830                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1831
1832                 primary_bkt[i] = &h->buckets[prim_index[i]];
1833                 secondary_bkt[i] = &h->buckets[sec_index[i]];
1834
1835                 rte_prefetch0(primary_bkt[i]);
1836                 rte_prefetch0(secondary_bkt[i]);
1837         }
1838
1839         /* Calculate and prefetch rest of the buckets */
1840         for (; i < num_keys; i++) {
1841                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1842
1843                 sig[i] = get_short_sig(prim_hash[i]);
1844                 prim_index[i] = get_prim_bucket_index(h, prim_hash[i]);
1845                 sec_index[i] = get_alt_bucket_index(h, prim_index[i], sig[i]);
1846
1847                 primary_bkt[i] = &h->buckets[prim_index[i]];
1848                 secondary_bkt[i] = &h->buckets[sec_index[i]];
1849
1850                 rte_prefetch0(primary_bkt[i]);
1851                 rte_prefetch0(secondary_bkt[i]);
1852         }
1853
1854         do {
1855                 /* Load the table change counter before the lookup
1856                  * starts. Acquire semantics will make sure that
1857                  * loads in compare_signatures are not hoisted.
1858                  */
1859                 cnt_b = __atomic_load_n(h->tbl_chng_cnt,
1860                                         __ATOMIC_ACQUIRE);
1861
1862                 /* Compare signatures and prefetch key slot of first hit */
1863                 for (i = 0; i < num_keys; i++) {
1864                         compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1865                                 primary_bkt[i], secondary_bkt[i],
1866                                 sig[i], h->sig_cmp_fn);
1867
1868                         if (prim_hitmask[i]) {
1869                                 uint32_t first_hit =
1870                                                 __builtin_ctzl(prim_hitmask[i])
1871                                                 >> 1;
1872                                 uint32_t key_idx =
1873                                         primary_bkt[i]->key_idx[first_hit];
1874                                 const struct rte_hash_key *key_slot =
1875                                         (const struct rte_hash_key *)(
1876                                         (const char *)h->key_store +
1877                                         key_idx * h->key_entry_size);
1878                                 rte_prefetch0(key_slot);
1879                                 continue;
1880                         }
1881
1882                         if (sec_hitmask[i]) {
1883                                 uint32_t first_hit =
1884                                                 __builtin_ctzl(sec_hitmask[i])
1885                                                 >> 1;
1886                                 uint32_t key_idx =
1887                                         secondary_bkt[i]->key_idx[first_hit];
1888                                 const struct rte_hash_key *key_slot =
1889                                         (const struct rte_hash_key *)(
1890                                         (const char *)h->key_store +
1891                                         key_idx * h->key_entry_size);
1892                                 rte_prefetch0(key_slot);
1893                         }
1894                 }
1895
1896                 /* Compare keys, first hits in primary first */
1897                 for (i = 0; i < num_keys; i++) {
1898                         positions[i] = -ENOENT;
1899                         while (prim_hitmask[i]) {
1900                                 uint32_t hit_index =
1901                                                 __builtin_ctzl(prim_hitmask[i])
1902                                                 >> 1;
1903                                 uint32_t key_idx =
1904                                 __atomic_load_n(
1905                                         &primary_bkt[i]->key_idx[hit_index],
1906                                         __ATOMIC_ACQUIRE);
1907                                 const struct rte_hash_key *key_slot =
1908                                         (const struct rte_hash_key *)(
1909                                         (const char *)h->key_store +
1910                                         key_idx * h->key_entry_size);
1911
1912                                 if (key_idx != EMPTY_SLOT)
1913                                         pdata[i] = __atomic_load_n(
1914                                                         &key_slot->pdata,
1915                                                         __ATOMIC_ACQUIRE);
1916                                 /*
1917                                  * If key index is 0, do not compare key,
1918                                  * as it is checking the dummy slot
1919                                  */
1920                                 if (!!key_idx &
1921                                         !rte_hash_cmp_eq(
1922                                                 key_slot->key, keys[i], h)) {
1923                                         if (data != NULL)
1924                                                 data[i] = pdata[i];
1925
1926                                         hits |= 1ULL << i;
1927                                         positions[i] = key_idx - 1;
1928                                         goto next_key;
1929                                 }
1930                                 prim_hitmask[i] &= ~(3ULL << (hit_index << 1));
1931                         }
1932
1933                         while (sec_hitmask[i]) {
1934                                 uint32_t hit_index =
1935                                                 __builtin_ctzl(sec_hitmask[i])
1936                                                 >> 1;
1937                                 uint32_t key_idx =
1938                                 __atomic_load_n(
1939                                         &secondary_bkt[i]->key_idx[hit_index],
1940                                         __ATOMIC_ACQUIRE);
1941                                 const struct rte_hash_key *key_slot =
1942                                         (const struct rte_hash_key *)(
1943                                         (const char *)h->key_store +
1944                                         key_idx * h->key_entry_size);
1945
1946                                 if (key_idx != EMPTY_SLOT)
1947                                         pdata[i] = __atomic_load_n(
1948                                                         &key_slot->pdata,
1949                                                         __ATOMIC_ACQUIRE);
1950                                 /*
1951                                  * If key index is 0, do not compare key,
1952                                  * as it is checking the dummy slot
1953                                  */
1954
1955                                 if (!!key_idx &
1956                                         !rte_hash_cmp_eq(
1957                                                 key_slot->key, keys[i], h)) {
1958                                         if (data != NULL)
1959                                                 data[i] = pdata[i];
1960
1961                                         hits |= 1ULL << i;
1962                                         positions[i] = key_idx - 1;
1963                                         goto next_key;
1964                                 }
1965                                 sec_hitmask[i] &= ~(3ULL << (hit_index << 1));
1966                         }
1967 next_key:
1968                         continue;
1969                 }
1970
1971                 /* The loads of sig_current in compare_signatures
1972                  * should not move below the load from tbl_chng_cnt.
1973                  */
1974                 __atomic_thread_fence(__ATOMIC_ACQUIRE);
1975                 /* Re-read the table change counter to check if the
1976                  * table has changed during search. If yes, re-do
1977                  * the search.
1978                  * This load should not get hoisted. The load
1979                  * acquires on cnt_b, primary key index and secondary
1980                  * key index will make sure that it does not get
1981                  * hoisted.
1982                  */
1983                 cnt_a = __atomic_load_n(h->tbl_chng_cnt,
1984                                         __ATOMIC_ACQUIRE);
1985         } while (cnt_b != cnt_a);
1986
1987         /* all found, do not need to go through ext bkt */
1988         if ((hits == ((1ULL << num_keys) - 1)) || !h->ext_table_support) {
1989                 if (hit_mask != NULL)
1990                         *hit_mask = hits;
1991                 __hash_rw_reader_unlock(h);
1992                 return;
1993         }
1994
1995         /* need to check ext buckets for match */
1996         for (i = 0; i < num_keys; i++) {
1997                 if ((hits & (1ULL << i)) != 0)
1998                         continue;
1999                 next_bkt = secondary_bkt[i]->next;
2000                 FOR_EACH_BUCKET(cur_bkt, next_bkt) {
2001                         if (data != NULL)
2002                                 ret = search_one_bucket_lf(h, keys[i],
2003                                                 sig[i], &data[i], cur_bkt);
2004                         else
2005                                 ret = search_one_bucket_lf(h, keys[i],
2006                                                 sig[i], NULL, cur_bkt);
2007                         if (ret != -1) {
2008                                 positions[i] = ret;
2009                                 hits |= 1ULL << i;
2010                                 break;
2011                         }
2012                 }
2013         }
2014
2015         if (hit_mask != NULL)
2016                 *hit_mask = hits;
2017 }
2018
2019 static inline void
2020 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2021                         int32_t num_keys, int32_t *positions,
2022                         uint64_t *hit_mask, void *data[])
2023 {
2024         if (h->readwrite_concur_lf_support)
2025                 return __rte_hash_lookup_bulk_lf(h, keys, num_keys,
2026                                                 positions, hit_mask, data);
2027         else
2028                 return __rte_hash_lookup_bulk_l(h, keys, num_keys,
2029                                                 positions, hit_mask, data);
2030 }
2031
2032 int
2033 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
2034                       uint32_t num_keys, int32_t *positions)
2035 {
2036         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2037                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2038                         (positions == NULL)), -EINVAL);
2039
2040         __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
2041         return 0;
2042 }
2043
2044 int
2045 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
2046                       uint32_t num_keys, uint64_t *hit_mask, void *data[])
2047 {
2048         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
2049                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
2050                         (hit_mask == NULL)), -EINVAL);
2051
2052         int32_t positions[num_keys];
2053
2054         __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
2055
2056         /* Return number of hits */
2057         return __builtin_popcountl(*hit_mask);
2058 }
2059
2060 int32_t
2061 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
2062 {
2063         uint32_t bucket_idx, idx, position;
2064         struct rte_hash_key *next_key;
2065
2066         RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
2067
2068         const uint32_t total_entries_main = h->num_buckets *
2069                                                         RTE_HASH_BUCKET_ENTRIES;
2070         const uint32_t total_entries = total_entries_main << 1;
2071
2072         /* Out of bounds of all buckets (both main table and ext table) */
2073         if (*next >= total_entries_main)
2074                 goto extend_table;
2075
2076         /* Calculate bucket and index of current iterator */
2077         bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2078         idx = *next % RTE_HASH_BUCKET_ENTRIES;
2079
2080         /* If current position is empty, go to the next one */
2081         while ((position = __atomic_load_n(&h->buckets[bucket_idx].key_idx[idx],
2082                                         __ATOMIC_ACQUIRE)) == EMPTY_SLOT) {
2083                 (*next)++;
2084                 /* End of table */
2085                 if (*next == total_entries_main)
2086                         goto extend_table;
2087                 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
2088                 idx = *next % RTE_HASH_BUCKET_ENTRIES;
2089         }
2090
2091         __hash_rw_reader_lock(h);
2092         next_key = (struct rte_hash_key *) ((char *)h->key_store +
2093                                 position * h->key_entry_size);
2094         /* Return key and data */
2095         *key = next_key->key;
2096         *data = next_key->pdata;
2097
2098         __hash_rw_reader_unlock(h);
2099
2100         /* Increment iterator */
2101         (*next)++;
2102
2103         return position - 1;
2104
2105 /* Begin to iterate extendable buckets */
2106 extend_table:
2107         /* Out of total bound or if ext bucket feature is not enabled */
2108         if (*next >= total_entries || !h->ext_table_support)
2109                 return -ENOENT;
2110
2111         bucket_idx = (*next - total_entries_main) / RTE_HASH_BUCKET_ENTRIES;
2112         idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2113
2114         while ((position = h->buckets_ext[bucket_idx].key_idx[idx]) == EMPTY_SLOT) {
2115                 (*next)++;
2116                 if (*next == total_entries)
2117                         return -ENOENT;
2118                 bucket_idx = (*next - total_entries_main) /
2119                                                 RTE_HASH_BUCKET_ENTRIES;
2120                 idx = (*next - total_entries_main) % RTE_HASH_BUCKET_ENTRIES;
2121         }
2122         __hash_rw_reader_lock(h);
2123         next_key = (struct rte_hash_key *) ((char *)h->key_store +
2124                                 position * h->key_entry_size);
2125         /* Return key and data */
2126         *key = next_key->key;
2127         *data = next_key->pdata;
2128
2129         __hash_rw_reader_unlock(h);
2130
2131         /* Increment iterator */
2132         (*next)++;
2133         return position - 1;
2134 }