New upstream version 18.08
[deb_dpdk.git] / lib / librte_hash / rte_cuckoo_hash.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <stdint.h>
7 #include <errno.h>
8 #include <stdio.h>
9 #include <stdarg.h>
10 #include <sys/queue.h>
11
12 #include <rte_common.h>
13 #include <rte_memory.h>         /* for definition of RTE_CACHE_LINE_SIZE */
14 #include <rte_log.h>
15 #include <rte_memcpy.h>
16 #include <rte_prefetch.h>
17 #include <rte_branch_prediction.h>
18 #include <rte_malloc.h>
19 #include <rte_eal.h>
20 #include <rte_eal_memconfig.h>
21 #include <rte_per_lcore.h>
22 #include <rte_errno.h>
23 #include <rte_string_fns.h>
24 #include <rte_cpuflags.h>
25 #include <rte_rwlock.h>
26 #include <rte_spinlock.h>
27 #include <rte_ring.h>
28 #include <rte_compat.h>
29 #include <rte_pause.h>
30
31 #include "rte_hash.h"
32 #include "rte_cuckoo_hash.h"
33
34
35 TAILQ_HEAD(rte_hash_list, rte_tailq_entry);
36
37 static struct rte_tailq_elem rte_hash_tailq = {
38         .name = "RTE_HASH",
39 };
40 EAL_REGISTER_TAILQ(rte_hash_tailq)
41
42 struct rte_hash *
43 rte_hash_find_existing(const char *name)
44 {
45         struct rte_hash *h = NULL;
46         struct rte_tailq_entry *te;
47         struct rte_hash_list *hash_list;
48
49         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
50
51         rte_rwlock_read_lock(RTE_EAL_TAILQ_RWLOCK);
52         TAILQ_FOREACH(te, hash_list, next) {
53                 h = (struct rte_hash *) te->data;
54                 if (strncmp(name, h->name, RTE_HASH_NAMESIZE) == 0)
55                         break;
56         }
57         rte_rwlock_read_unlock(RTE_EAL_TAILQ_RWLOCK);
58
59         if (te == NULL) {
60                 rte_errno = ENOENT;
61                 return NULL;
62         }
63         return h;
64 }
65
66 void rte_hash_set_cmp_func(struct rte_hash *h, rte_hash_cmp_eq_t func)
67 {
68         h->cmp_jump_table_idx = KEY_CUSTOM;
69         h->rte_hash_custom_cmp_eq = func;
70 }
71
72 static inline int
73 rte_hash_cmp_eq(const void *key1, const void *key2, const struct rte_hash *h)
74 {
75         if (h->cmp_jump_table_idx == KEY_CUSTOM)
76                 return h->rte_hash_custom_cmp_eq(key1, key2, h->key_len);
77         else
78                 return cmp_jump_table[h->cmp_jump_table_idx](key1, key2, h->key_len);
79 }
80
81 struct rte_hash *
82 rte_hash_create(const struct rte_hash_parameters *params)
83 {
84         struct rte_hash *h = NULL;
85         struct rte_tailq_entry *te = NULL;
86         struct rte_hash_list *hash_list;
87         struct rte_ring *r = NULL;
88         char hash_name[RTE_HASH_NAMESIZE];
89         void *k = NULL;
90         void *buckets = NULL;
91         char ring_name[RTE_RING_NAMESIZE];
92         unsigned num_key_slots;
93         unsigned i;
94         unsigned int hw_trans_mem_support = 0, multi_writer_support = 0;
95         unsigned int readwrite_concur_support = 0;
96
97         rte_hash_function default_hash_func = (rte_hash_function)rte_jhash;
98
99         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
100
101         if (params == NULL) {
102                 RTE_LOG(ERR, HASH, "rte_hash_create has no parameters\n");
103                 return NULL;
104         }
105
106         /* Check for valid parameters */
107         if ((params->entries > RTE_HASH_ENTRIES_MAX) ||
108                         (params->entries < RTE_HASH_BUCKET_ENTRIES) ||
109                         (params->key_len == 0)) {
110                 rte_errno = EINVAL;
111                 RTE_LOG(ERR, HASH, "rte_hash_create has invalid parameters\n");
112                 return NULL;
113         }
114
115         /* Check extra flags field to check extra options. */
116         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_TRANS_MEM_SUPPORT)
117                 hw_trans_mem_support = 1;
118
119         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_MULTI_WRITER_ADD)
120                 multi_writer_support = 1;
121
122         if (params->extra_flag & RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY) {
123                 readwrite_concur_support = 1;
124                 multi_writer_support = 1;
125         }
126
127         /* Store all keys and leave the first entry as a dummy entry for lookup_bulk */
128         if (multi_writer_support)
129                 /*
130                  * Increase number of slots by total number of indices
131                  * that can be stored in the lcore caches
132                  * except for the first cache
133                  */
134                 num_key_slots = params->entries + (RTE_MAX_LCORE - 1) *
135                                         (LCORE_CACHE_SIZE - 1) + 1;
136         else
137                 num_key_slots = params->entries + 1;
138
139         snprintf(ring_name, sizeof(ring_name), "HT_%s", params->name);
140         /* Create ring (Dummy slot index is not enqueued) */
141         r = rte_ring_create(ring_name, rte_align32pow2(num_key_slots),
142                         params->socket_id, 0);
143         if (r == NULL) {
144                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
145                 goto err;
146         }
147
148         snprintf(hash_name, sizeof(hash_name), "HT_%s", params->name);
149
150         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
151
152         /* guarantee there's no existing: this is normally already checked
153          * by ring creation above */
154         TAILQ_FOREACH(te, hash_list, next) {
155                 h = (struct rte_hash *) te->data;
156                 if (strncmp(params->name, h->name, RTE_HASH_NAMESIZE) == 0)
157                         break;
158         }
159         h = NULL;
160         if (te != NULL) {
161                 rte_errno = EEXIST;
162                 te = NULL;
163                 goto err_unlock;
164         }
165
166         te = rte_zmalloc("HASH_TAILQ_ENTRY", sizeof(*te), 0);
167         if (te == NULL) {
168                 RTE_LOG(ERR, HASH, "tailq entry allocation failed\n");
169                 goto err_unlock;
170         }
171
172         h = (struct rte_hash *)rte_zmalloc_socket(hash_name, sizeof(struct rte_hash),
173                                         RTE_CACHE_LINE_SIZE, params->socket_id);
174
175         if (h == NULL) {
176                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
177                 goto err_unlock;
178         }
179
180         const uint32_t num_buckets = rte_align32pow2(params->entries)
181                                         / RTE_HASH_BUCKET_ENTRIES;
182
183         buckets = rte_zmalloc_socket(NULL,
184                                 num_buckets * sizeof(struct rte_hash_bucket),
185                                 RTE_CACHE_LINE_SIZE, params->socket_id);
186
187         if (buckets == NULL) {
188                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
189                 goto err_unlock;
190         }
191
192         const uint32_t key_entry_size = sizeof(struct rte_hash_key) + params->key_len;
193         const uint64_t key_tbl_size = (uint64_t) key_entry_size * num_key_slots;
194
195         k = rte_zmalloc_socket(NULL, key_tbl_size,
196                         RTE_CACHE_LINE_SIZE, params->socket_id);
197
198         if (k == NULL) {
199                 RTE_LOG(ERR, HASH, "memory allocation failed\n");
200                 goto err_unlock;
201         }
202
203 /*
204  * If x86 architecture is used, select appropriate compare function,
205  * which may use x86 intrinsics, otherwise use memcmp
206  */
207 #if defined(RTE_ARCH_X86) || defined(RTE_ARCH_ARM64)
208         /* Select function to compare keys */
209         switch (params->key_len) {
210         case 16:
211                 h->cmp_jump_table_idx = KEY_16_BYTES;
212                 break;
213         case 32:
214                 h->cmp_jump_table_idx = KEY_32_BYTES;
215                 break;
216         case 48:
217                 h->cmp_jump_table_idx = KEY_48_BYTES;
218                 break;
219         case 64:
220                 h->cmp_jump_table_idx = KEY_64_BYTES;
221                 break;
222         case 80:
223                 h->cmp_jump_table_idx = KEY_80_BYTES;
224                 break;
225         case 96:
226                 h->cmp_jump_table_idx = KEY_96_BYTES;
227                 break;
228         case 112:
229                 h->cmp_jump_table_idx = KEY_112_BYTES;
230                 break;
231         case 128:
232                 h->cmp_jump_table_idx = KEY_128_BYTES;
233                 break;
234         default:
235                 /* If key is not multiple of 16, use generic memcmp */
236                 h->cmp_jump_table_idx = KEY_OTHER_BYTES;
237         }
238 #else
239         h->cmp_jump_table_idx = KEY_OTHER_BYTES;
240 #endif
241
242         if (multi_writer_support) {
243                 h->local_free_slots = rte_zmalloc_socket(NULL,
244                                 sizeof(struct lcore_cache) * RTE_MAX_LCORE,
245                                 RTE_CACHE_LINE_SIZE, params->socket_id);
246         }
247
248         /* Default hash function */
249 #if defined(RTE_ARCH_X86)
250         default_hash_func = (rte_hash_function)rte_hash_crc;
251 #elif defined(RTE_ARCH_ARM64)
252         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_CRC32))
253                 default_hash_func = (rte_hash_function)rte_hash_crc;
254 #endif
255         /* Setup hash context */
256         snprintf(h->name, sizeof(h->name), "%s", params->name);
257         h->entries = params->entries;
258         h->key_len = params->key_len;
259         h->key_entry_size = key_entry_size;
260         h->hash_func_init_val = params->hash_func_init_val;
261
262         h->num_buckets = num_buckets;
263         h->bucket_bitmask = h->num_buckets - 1;
264         h->buckets = buckets;
265         h->hash_func = (params->hash_func == NULL) ?
266                 default_hash_func : params->hash_func;
267         h->key_store = k;
268         h->free_slots = r;
269         h->hw_trans_mem_support = hw_trans_mem_support;
270         h->multi_writer_support = multi_writer_support;
271         h->readwrite_concur_support = readwrite_concur_support;
272
273 #if defined(RTE_ARCH_X86)
274         if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2))
275                 h->sig_cmp_fn = RTE_HASH_COMPARE_AVX2;
276         else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE2))
277                 h->sig_cmp_fn = RTE_HASH_COMPARE_SSE;
278         else
279 #endif
280                 h->sig_cmp_fn = RTE_HASH_COMPARE_SCALAR;
281
282         /* Turn on multi-writer only with explicit flag from user and TM
283          * support.
284          */
285         if (h->multi_writer_support) {
286                 h->readwrite_lock = rte_malloc(NULL, sizeof(rte_rwlock_t),
287                                                 RTE_CACHE_LINE_SIZE);
288                 if (h->readwrite_lock == NULL)
289                         goto err_unlock;
290
291                 rte_rwlock_init(h->readwrite_lock);
292         }
293
294         /* Populate free slots ring. Entry zero is reserved for key misses. */
295         for (i = 1; i < num_key_slots; i++)
296                 rte_ring_sp_enqueue(r, (void *)((uintptr_t) i));
297
298         te->data = (void *) h;
299         TAILQ_INSERT_TAIL(hash_list, te, next);
300         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
301
302         return h;
303 err_unlock:
304         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
305 err:
306         rte_ring_free(r);
307         rte_free(te);
308         rte_free(h);
309         rte_free(buckets);
310         rte_free(k);
311         return NULL;
312 }
313
314 void
315 rte_hash_free(struct rte_hash *h)
316 {
317         struct rte_tailq_entry *te;
318         struct rte_hash_list *hash_list;
319
320         if (h == NULL)
321                 return;
322
323         hash_list = RTE_TAILQ_CAST(rte_hash_tailq.head, rte_hash_list);
324
325         rte_rwlock_write_lock(RTE_EAL_TAILQ_RWLOCK);
326
327         /* find out tailq entry */
328         TAILQ_FOREACH(te, hash_list, next) {
329                 if (te->data == (void *) h)
330                         break;
331         }
332
333         if (te == NULL) {
334                 rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
335                 return;
336         }
337
338         TAILQ_REMOVE(hash_list, te, next);
339
340         rte_rwlock_write_unlock(RTE_EAL_TAILQ_RWLOCK);
341
342         if (h->multi_writer_support) {
343                 rte_free(h->local_free_slots);
344                 rte_free(h->readwrite_lock);
345         }
346         rte_ring_free(h->free_slots);
347         rte_free(h->key_store);
348         rte_free(h->buckets);
349         rte_free(h);
350         rte_free(te);
351 }
352
353 hash_sig_t
354 rte_hash_hash(const struct rte_hash *h, const void *key)
355 {
356         /* calc hash result by key */
357         return h->hash_func(key, h->key_len, h->hash_func_init_val);
358 }
359
360 /* Calc the secondary hash value from the primary hash value of a given key */
361 static inline hash_sig_t
362 rte_hash_secondary_hash(const hash_sig_t primary_hash)
363 {
364         static const unsigned all_bits_shift = 12;
365         static const unsigned alt_bits_xor = 0x5bd1e995;
366
367         uint32_t tag = primary_hash >> all_bits_shift;
368
369         return primary_hash ^ ((tag + 1) * alt_bits_xor);
370 }
371
372 int32_t
373 rte_hash_count(const struct rte_hash *h)
374 {
375         uint32_t tot_ring_cnt, cached_cnt = 0;
376         uint32_t i, ret;
377
378         if (h == NULL)
379                 return -EINVAL;
380
381         if (h->multi_writer_support) {
382                 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
383                                         (LCORE_CACHE_SIZE - 1);
384                 for (i = 0; i < RTE_MAX_LCORE; i++)
385                         cached_cnt += h->local_free_slots[i].len;
386
387                 ret = tot_ring_cnt - rte_ring_count(h->free_slots) -
388                                                                 cached_cnt;
389         } else {
390                 tot_ring_cnt = h->entries;
391                 ret = tot_ring_cnt - rte_ring_count(h->free_slots);
392         }
393         return ret;
394 }
395
396 /* Read write locks implemented using rte_rwlock */
397 static inline void
398 __hash_rw_writer_lock(const struct rte_hash *h)
399 {
400         if (h->multi_writer_support && h->hw_trans_mem_support)
401                 rte_rwlock_write_lock_tm(h->readwrite_lock);
402         else if (h->multi_writer_support)
403                 rte_rwlock_write_lock(h->readwrite_lock);
404 }
405
406
407 static inline void
408 __hash_rw_reader_lock(const struct rte_hash *h)
409 {
410         if (h->readwrite_concur_support && h->hw_trans_mem_support)
411                 rte_rwlock_read_lock_tm(h->readwrite_lock);
412         else if (h->readwrite_concur_support)
413                 rte_rwlock_read_lock(h->readwrite_lock);
414 }
415
416 static inline void
417 __hash_rw_writer_unlock(const struct rte_hash *h)
418 {
419         if (h->multi_writer_support && h->hw_trans_mem_support)
420                 rte_rwlock_write_unlock_tm(h->readwrite_lock);
421         else if (h->multi_writer_support)
422                 rte_rwlock_write_unlock(h->readwrite_lock);
423 }
424
425 static inline void
426 __hash_rw_reader_unlock(const struct rte_hash *h)
427 {
428         if (h->readwrite_concur_support && h->hw_trans_mem_support)
429                 rte_rwlock_read_unlock_tm(h->readwrite_lock);
430         else if (h->readwrite_concur_support)
431                 rte_rwlock_read_unlock(h->readwrite_lock);
432 }
433
434 void
435 rte_hash_reset(struct rte_hash *h)
436 {
437         void *ptr;
438         uint32_t tot_ring_cnt, i;
439
440         if (h == NULL)
441                 return;
442
443         __hash_rw_writer_lock(h);
444         memset(h->buckets, 0, h->num_buckets * sizeof(struct rte_hash_bucket));
445         memset(h->key_store, 0, h->key_entry_size * (h->entries + 1));
446
447         /* clear the free ring */
448         while (rte_ring_dequeue(h->free_slots, &ptr) == 0)
449                 rte_pause();
450
451         /* Repopulate the free slots ring. Entry zero is reserved for key misses */
452         if (h->multi_writer_support)
453                 tot_ring_cnt = h->entries + (RTE_MAX_LCORE - 1) *
454                                         (LCORE_CACHE_SIZE - 1);
455         else
456                 tot_ring_cnt = h->entries;
457
458         for (i = 1; i < tot_ring_cnt + 1; i++)
459                 rte_ring_sp_enqueue(h->free_slots, (void *)((uintptr_t) i));
460
461         if (h->multi_writer_support) {
462                 /* Reset local caches per lcore */
463                 for (i = 0; i < RTE_MAX_LCORE; i++)
464                         h->local_free_slots[i].len = 0;
465         }
466         __hash_rw_writer_unlock(h);
467 }
468
469 /*
470  * Function called to enqueue back an index in the cache/ring,
471  * as slot has not being used and it can be used in the
472  * next addition attempt.
473  */
474 static inline void
475 enqueue_slot_back(const struct rte_hash *h,
476                 struct lcore_cache *cached_free_slots,
477                 void *slot_id)
478 {
479         if (h->multi_writer_support) {
480                 cached_free_slots->objs[cached_free_slots->len] = slot_id;
481                 cached_free_slots->len++;
482         } else
483                 rte_ring_sp_enqueue(h->free_slots, slot_id);
484 }
485
486 /* Search a key from bucket and update its data */
487 static inline int32_t
488 search_and_update(const struct rte_hash *h, void *data, const void *key,
489         struct rte_hash_bucket *bkt, hash_sig_t sig, hash_sig_t alt_hash)
490 {
491         int i;
492         struct rte_hash_key *k, *keys = h->key_store;
493
494         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
495                 if (bkt->sig_current[i] == sig &&
496                                 bkt->sig_alt[i] == alt_hash) {
497                         k = (struct rte_hash_key *) ((char *)keys +
498                                         bkt->key_idx[i] * h->key_entry_size);
499                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
500                                 /* Update data */
501                                 k->pdata = data;
502                                 /*
503                                  * Return index where key is stored,
504                                  * subtracting the first dummy index
505                                  */
506                                 return bkt->key_idx[i] - 1;
507                         }
508                 }
509         }
510         return -1;
511 }
512
513 /* Only tries to insert at one bucket (@prim_bkt) without trying to push
514  * buckets around.
515  * return 1 if matching existing key, return 0 if succeeds, return -1 for no
516  * empty entry.
517  */
518 static inline int32_t
519 rte_hash_cuckoo_insert_mw(const struct rte_hash *h,
520                 struct rte_hash_bucket *prim_bkt,
521                 struct rte_hash_bucket *sec_bkt,
522                 const struct rte_hash_key *key, void *data,
523                 hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx,
524                 int32_t *ret_val)
525 {
526         unsigned int i;
527         struct rte_hash_bucket *cur_bkt = prim_bkt;
528         int32_t ret;
529
530         __hash_rw_writer_lock(h);
531         /* Check if key was inserted after last check but before this
532          * protected region in case of inserting duplicated keys.
533          */
534         ret = search_and_update(h, data, key, cur_bkt, sig, alt_hash);
535         if (ret != -1) {
536                 __hash_rw_writer_unlock(h);
537                 *ret_val = ret;
538                 return 1;
539         }
540         ret = search_and_update(h, data, key, sec_bkt, alt_hash, sig);
541         if (ret != -1) {
542                 __hash_rw_writer_unlock(h);
543                 *ret_val = ret;
544                 return 1;
545         }
546
547         /* Insert new entry if there is room in the primary
548          * bucket.
549          */
550         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
551                 /* Check if slot is available */
552                 if (likely(prim_bkt->key_idx[i] == EMPTY_SLOT)) {
553                         prim_bkt->sig_current[i] = sig;
554                         prim_bkt->sig_alt[i] = alt_hash;
555                         prim_bkt->key_idx[i] = new_idx;
556                         break;
557                 }
558         }
559         __hash_rw_writer_unlock(h);
560
561         if (i != RTE_HASH_BUCKET_ENTRIES)
562                 return 0;
563
564         /* no empty entry */
565         return -1;
566 }
567
568 /* Shift buckets along provided cuckoo_path (@leaf and @leaf_slot) and fill
569  * the path head with new entry (sig, alt_hash, new_idx)
570  * return 1 if matched key found, return -1 if cuckoo path invalided and fail,
571  * return 0 if succeeds.
572  */
573 static inline int
574 rte_hash_cuckoo_move_insert_mw(const struct rte_hash *h,
575                         struct rte_hash_bucket *bkt,
576                         struct rte_hash_bucket *alt_bkt,
577                         const struct rte_hash_key *key, void *data,
578                         struct queue_node *leaf, uint32_t leaf_slot,
579                         hash_sig_t sig, hash_sig_t alt_hash, uint32_t new_idx,
580                         int32_t *ret_val)
581 {
582         uint32_t prev_alt_bkt_idx;
583         struct rte_hash_bucket *cur_bkt = bkt;
584         struct queue_node *prev_node, *curr_node = leaf;
585         struct rte_hash_bucket *prev_bkt, *curr_bkt = leaf->bkt;
586         uint32_t prev_slot, curr_slot = leaf_slot;
587         int32_t ret;
588
589         __hash_rw_writer_lock(h);
590
591         /* In case empty slot was gone before entering protected region */
592         if (curr_bkt->key_idx[curr_slot] != EMPTY_SLOT) {
593                 __hash_rw_writer_unlock(h);
594                 return -1;
595         }
596
597         /* Check if key was inserted after last check but before this
598          * protected region.
599          */
600         ret = search_and_update(h, data, key, cur_bkt, sig, alt_hash);
601         if (ret != -1) {
602                 __hash_rw_writer_unlock(h);
603                 *ret_val = ret;
604                 return 1;
605         }
606
607         ret = search_and_update(h, data, key, alt_bkt, alt_hash, sig);
608         if (ret != -1) {
609                 __hash_rw_writer_unlock(h);
610                 *ret_val = ret;
611                 return 1;
612         }
613
614         while (likely(curr_node->prev != NULL)) {
615                 prev_node = curr_node->prev;
616                 prev_bkt = prev_node->bkt;
617                 prev_slot = curr_node->prev_slot;
618
619                 prev_alt_bkt_idx =
620                         prev_bkt->sig_alt[prev_slot] & h->bucket_bitmask;
621
622                 if (unlikely(&h->buckets[prev_alt_bkt_idx]
623                                 != curr_bkt)) {
624                         /* revert it to empty, otherwise duplicated keys */
625                         curr_bkt->key_idx[curr_slot] = EMPTY_SLOT;
626                         __hash_rw_writer_unlock(h);
627                         return -1;
628                 }
629
630                 /* Need to swap current/alt sig to allow later
631                  * Cuckoo insert to move elements back to its
632                  * primary bucket if available
633                  */
634                 curr_bkt->sig_alt[curr_slot] =
635                          prev_bkt->sig_current[prev_slot];
636                 curr_bkt->sig_current[curr_slot] =
637                         prev_bkt->sig_alt[prev_slot];
638                 curr_bkt->key_idx[curr_slot] =
639                         prev_bkt->key_idx[prev_slot];
640
641                 curr_slot = prev_slot;
642                 curr_node = prev_node;
643                 curr_bkt = curr_node->bkt;
644         }
645
646         curr_bkt->sig_current[curr_slot] = sig;
647         curr_bkt->sig_alt[curr_slot] = alt_hash;
648         curr_bkt->key_idx[curr_slot] = new_idx;
649
650         __hash_rw_writer_unlock(h);
651
652         return 0;
653
654 }
655
656 /*
657  * Make space for new key, using bfs Cuckoo Search and Multi-Writer safe
658  * Cuckoo
659  */
660 static inline int
661 rte_hash_cuckoo_make_space_mw(const struct rte_hash *h,
662                         struct rte_hash_bucket *bkt,
663                         struct rte_hash_bucket *sec_bkt,
664                         const struct rte_hash_key *key, void *data,
665                         hash_sig_t sig, hash_sig_t alt_hash,
666                         uint32_t new_idx, int32_t *ret_val)
667 {
668         unsigned int i;
669         struct queue_node queue[RTE_HASH_BFS_QUEUE_MAX_LEN];
670         struct queue_node *tail, *head;
671         struct rte_hash_bucket *curr_bkt, *alt_bkt;
672
673         tail = queue;
674         head = queue + 1;
675         tail->bkt = bkt;
676         tail->prev = NULL;
677         tail->prev_slot = -1;
678
679         /* Cuckoo bfs Search */
680         while (likely(tail != head && head <
681                                         queue + RTE_HASH_BFS_QUEUE_MAX_LEN -
682                                         RTE_HASH_BUCKET_ENTRIES)) {
683                 curr_bkt = tail->bkt;
684                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
685                         if (curr_bkt->key_idx[i] == EMPTY_SLOT) {
686                                 int32_t ret = rte_hash_cuckoo_move_insert_mw(h,
687                                                 bkt, sec_bkt, key, data,
688                                                 tail, i, sig, alt_hash,
689                                                 new_idx, ret_val);
690                                 if (likely(ret != -1))
691                                         return ret;
692                         }
693
694                         /* Enqueue new node and keep prev node info */
695                         alt_bkt = &(h->buckets[curr_bkt->sig_alt[i]
696                                                     & h->bucket_bitmask]);
697                         head->bkt = alt_bkt;
698                         head->prev = tail;
699                         head->prev_slot = i;
700                         head++;
701                 }
702                 tail++;
703         }
704
705         return -ENOSPC;
706 }
707
708 static inline int32_t
709 __rte_hash_add_key_with_hash(const struct rte_hash *h, const void *key,
710                                                 hash_sig_t sig, void *data)
711 {
712         hash_sig_t alt_hash;
713         uint32_t prim_bucket_idx, sec_bucket_idx;
714         struct rte_hash_bucket *prim_bkt, *sec_bkt;
715         struct rte_hash_key *new_k, *keys = h->key_store;
716         void *slot_id = NULL;
717         uint32_t new_idx;
718         int ret;
719         unsigned n_slots;
720         unsigned lcore_id;
721         struct lcore_cache *cached_free_slots = NULL;
722         int32_t ret_val;
723
724         prim_bucket_idx = sig & h->bucket_bitmask;
725         prim_bkt = &h->buckets[prim_bucket_idx];
726         rte_prefetch0(prim_bkt);
727
728         alt_hash = rte_hash_secondary_hash(sig);
729         sec_bucket_idx = alt_hash & h->bucket_bitmask;
730         sec_bkt = &h->buckets[sec_bucket_idx];
731         rte_prefetch0(sec_bkt);
732
733         /* Check if key is already inserted in primary location */
734         __hash_rw_writer_lock(h);
735         ret = search_and_update(h, data, key, prim_bkt, sig, alt_hash);
736         if (ret != -1) {
737                 __hash_rw_writer_unlock(h);
738                 return ret;
739         }
740
741         /* Check if key is already inserted in secondary location */
742         ret = search_and_update(h, data, key, sec_bkt, alt_hash, sig);
743         if (ret != -1) {
744                 __hash_rw_writer_unlock(h);
745                 return ret;
746         }
747         __hash_rw_writer_unlock(h);
748
749         /* Did not find a match, so get a new slot for storing the new key */
750         if (h->multi_writer_support) {
751                 lcore_id = rte_lcore_id();
752                 cached_free_slots = &h->local_free_slots[lcore_id];
753                 /* Try to get a free slot from the local cache */
754                 if (cached_free_slots->len == 0) {
755                         /* Need to get another burst of free slots from global ring */
756                         n_slots = rte_ring_mc_dequeue_burst(h->free_slots,
757                                         cached_free_slots->objs,
758                                         LCORE_CACHE_SIZE, NULL);
759                         if (n_slots == 0) {
760                                 return -ENOSPC;
761                         }
762
763                         cached_free_slots->len += n_slots;
764                 }
765
766                 /* Get a free slot from the local cache */
767                 cached_free_slots->len--;
768                 slot_id = cached_free_slots->objs[cached_free_slots->len];
769         } else {
770                 if (rte_ring_sc_dequeue(h->free_slots, &slot_id) != 0) {
771                         return -ENOSPC;
772                 }
773         }
774
775         new_k = RTE_PTR_ADD(keys, (uintptr_t)slot_id * h->key_entry_size);
776         new_idx = (uint32_t)((uintptr_t) slot_id);
777         /* Copy key */
778         rte_memcpy(new_k->key, key, h->key_len);
779         new_k->pdata = data;
780
781
782         /* Find an empty slot and insert */
783         ret = rte_hash_cuckoo_insert_mw(h, prim_bkt, sec_bkt, key, data,
784                                         sig, alt_hash, new_idx, &ret_val);
785         if (ret == 0)
786                 return new_idx - 1;
787         else if (ret == 1) {
788                 enqueue_slot_back(h, cached_free_slots, slot_id);
789                 return ret_val;
790         }
791
792         /* Primary bucket full, need to make space for new entry */
793         ret = rte_hash_cuckoo_make_space_mw(h, prim_bkt, sec_bkt, key, data,
794                                         sig, alt_hash, new_idx, &ret_val);
795         if (ret == 0)
796                 return new_idx - 1;
797         else if (ret == 1) {
798                 enqueue_slot_back(h, cached_free_slots, slot_id);
799                 return ret_val;
800         }
801
802         /* Also search secondary bucket to get better occupancy */
803         ret = rte_hash_cuckoo_make_space_mw(h, sec_bkt, prim_bkt, key, data,
804                                         alt_hash, sig, new_idx, &ret_val);
805
806         if (ret == 0)
807                 return new_idx - 1;
808         else if (ret == 1) {
809                 enqueue_slot_back(h, cached_free_slots, slot_id);
810                 return ret_val;
811         } else {
812                 enqueue_slot_back(h, cached_free_slots, slot_id);
813                 return ret;
814         }
815 }
816
817 int32_t
818 rte_hash_add_key_with_hash(const struct rte_hash *h,
819                         const void *key, hash_sig_t sig)
820 {
821         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
822         return __rte_hash_add_key_with_hash(h, key, sig, 0);
823 }
824
825 int32_t
826 rte_hash_add_key(const struct rte_hash *h, const void *key)
827 {
828         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
829         return __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), 0);
830 }
831
832 int
833 rte_hash_add_key_with_hash_data(const struct rte_hash *h,
834                         const void *key, hash_sig_t sig, void *data)
835 {
836         int ret;
837
838         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
839         ret = __rte_hash_add_key_with_hash(h, key, sig, data);
840         if (ret >= 0)
841                 return 0;
842         else
843                 return ret;
844 }
845
846 int
847 rte_hash_add_key_data(const struct rte_hash *h, const void *key, void *data)
848 {
849         int ret;
850
851         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
852
853         ret = __rte_hash_add_key_with_hash(h, key, rte_hash_hash(h, key), data);
854         if (ret >= 0)
855                 return 0;
856         else
857                 return ret;
858 }
859
860 /* Search one bucket to find the match key */
861 static inline int32_t
862 search_one_bucket(const struct rte_hash *h, const void *key, hash_sig_t sig,
863                         void **data, const struct rte_hash_bucket *bkt)
864 {
865         int i;
866         struct rte_hash_key *k, *keys = h->key_store;
867
868         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
869                 if (bkt->sig_current[i] == sig &&
870                                 bkt->key_idx[i] != EMPTY_SLOT) {
871                         k = (struct rte_hash_key *) ((char *)keys +
872                                         bkt->key_idx[i] * h->key_entry_size);
873                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
874                                 if (data != NULL)
875                                         *data = k->pdata;
876                                 /*
877                                  * Return index where key is stored,
878                                  * subtracting the first dummy index
879                                  */
880                                 return bkt->key_idx[i] - 1;
881                         }
882                 }
883         }
884         return -1;
885 }
886
887 static inline int32_t
888 __rte_hash_lookup_with_hash(const struct rte_hash *h, const void *key,
889                                         hash_sig_t sig, void **data)
890 {
891         uint32_t bucket_idx;
892         hash_sig_t alt_hash;
893         struct rte_hash_bucket *bkt;
894         int ret;
895
896         bucket_idx = sig & h->bucket_bitmask;
897         bkt = &h->buckets[bucket_idx];
898
899         __hash_rw_reader_lock(h);
900
901         /* Check if key is in primary location */
902         ret = search_one_bucket(h, key, sig, data, bkt);
903         if (ret != -1) {
904                 __hash_rw_reader_unlock(h);
905                 return ret;
906         }
907         /* Calculate secondary hash */
908         alt_hash = rte_hash_secondary_hash(sig);
909         bucket_idx = alt_hash & h->bucket_bitmask;
910         bkt = &h->buckets[bucket_idx];
911
912         /* Check if key is in secondary location */
913         ret = search_one_bucket(h, key, alt_hash, data, bkt);
914         if (ret != -1) {
915                 __hash_rw_reader_unlock(h);
916                 return ret;
917         }
918         __hash_rw_reader_unlock(h);
919         return -ENOENT;
920 }
921
922 int32_t
923 rte_hash_lookup_with_hash(const struct rte_hash *h,
924                         const void *key, hash_sig_t sig)
925 {
926         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
927         return __rte_hash_lookup_with_hash(h, key, sig, NULL);
928 }
929
930 int32_t
931 rte_hash_lookup(const struct rte_hash *h, const void *key)
932 {
933         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
934         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), NULL);
935 }
936
937 int
938 rte_hash_lookup_with_hash_data(const struct rte_hash *h,
939                         const void *key, hash_sig_t sig, void **data)
940 {
941         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
942         return __rte_hash_lookup_with_hash(h, key, sig, data);
943 }
944
945 int
946 rte_hash_lookup_data(const struct rte_hash *h, const void *key, void **data)
947 {
948         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
949         return __rte_hash_lookup_with_hash(h, key, rte_hash_hash(h, key), data);
950 }
951
952 static inline void
953 remove_entry(const struct rte_hash *h, struct rte_hash_bucket *bkt, unsigned i)
954 {
955         unsigned lcore_id, n_slots;
956         struct lcore_cache *cached_free_slots;
957
958         bkt->sig_current[i] = NULL_SIGNATURE;
959         bkt->sig_alt[i] = NULL_SIGNATURE;
960         if (h->multi_writer_support) {
961                 lcore_id = rte_lcore_id();
962                 cached_free_slots = &h->local_free_slots[lcore_id];
963                 /* Cache full, need to free it. */
964                 if (cached_free_slots->len == LCORE_CACHE_SIZE) {
965                         /* Need to enqueue the free slots in global ring. */
966                         n_slots = rte_ring_mp_enqueue_burst(h->free_slots,
967                                                 cached_free_slots->objs,
968                                                 LCORE_CACHE_SIZE, NULL);
969                         cached_free_slots->len -= n_slots;
970                 }
971                 /* Put index of new free slot in cache. */
972                 cached_free_slots->objs[cached_free_slots->len] =
973                                 (void *)((uintptr_t)bkt->key_idx[i]);
974                 cached_free_slots->len++;
975         } else {
976                 rte_ring_sp_enqueue(h->free_slots,
977                                 (void *)((uintptr_t)bkt->key_idx[i]));
978         }
979 }
980
981 /* Search one bucket and remove the matched key */
982 static inline int32_t
983 search_and_remove(const struct rte_hash *h, const void *key,
984                         struct rte_hash_bucket *bkt, hash_sig_t sig)
985 {
986         struct rte_hash_key *k, *keys = h->key_store;
987         unsigned int i;
988         int32_t ret;
989
990         /* Check if key is in primary location */
991         for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
992                 if (bkt->sig_current[i] == sig &&
993                                 bkt->key_idx[i] != EMPTY_SLOT) {
994                         k = (struct rte_hash_key *) ((char *)keys +
995                                         bkt->key_idx[i] * h->key_entry_size);
996                         if (rte_hash_cmp_eq(key, k->key, h) == 0) {
997                                 remove_entry(h, bkt, i);
998
999                                 /*
1000                                  * Return index where key is stored,
1001                                  * subtracting the first dummy index
1002                                  */
1003                                 ret = bkt->key_idx[i] - 1;
1004                                 bkt->key_idx[i] = EMPTY_SLOT;
1005                                 return ret;
1006                         }
1007                 }
1008         }
1009         return -1;
1010 }
1011
1012 static inline int32_t
1013 __rte_hash_del_key_with_hash(const struct rte_hash *h, const void *key,
1014                                                 hash_sig_t sig)
1015 {
1016         uint32_t bucket_idx;
1017         hash_sig_t alt_hash;
1018         struct rte_hash_bucket *bkt;
1019         int32_t ret;
1020
1021         bucket_idx = sig & h->bucket_bitmask;
1022         bkt = &h->buckets[bucket_idx];
1023
1024         __hash_rw_writer_lock(h);
1025         /* look for key in primary bucket */
1026         ret = search_and_remove(h, key, bkt, sig);
1027         if (ret != -1) {
1028                 __hash_rw_writer_unlock(h);
1029                 return ret;
1030         }
1031
1032         /* Calculate secondary hash */
1033         alt_hash = rte_hash_secondary_hash(sig);
1034         bucket_idx = alt_hash & h->bucket_bitmask;
1035         bkt = &h->buckets[bucket_idx];
1036
1037         /* look for key in secondary bucket */
1038         ret = search_and_remove(h, key, bkt, alt_hash);
1039         if (ret != -1) {
1040                 __hash_rw_writer_unlock(h);
1041                 return ret;
1042         }
1043
1044         __hash_rw_writer_unlock(h);
1045         return -ENOENT;
1046 }
1047
1048 int32_t
1049 rte_hash_del_key_with_hash(const struct rte_hash *h,
1050                         const void *key, hash_sig_t sig)
1051 {
1052         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1053         return __rte_hash_del_key_with_hash(h, key, sig);
1054 }
1055
1056 int32_t
1057 rte_hash_del_key(const struct rte_hash *h, const void *key)
1058 {
1059         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1060         return __rte_hash_del_key_with_hash(h, key, rte_hash_hash(h, key));
1061 }
1062
1063 int
1064 rte_hash_get_key_with_position(const struct rte_hash *h, const int32_t position,
1065                                void **key)
1066 {
1067         RETURN_IF_TRUE(((h == NULL) || (key == NULL)), -EINVAL);
1068
1069         struct rte_hash_key *k, *keys = h->key_store;
1070         k = (struct rte_hash_key *) ((char *) keys + (position + 1) *
1071                                      h->key_entry_size);
1072         *key = k->key;
1073
1074         if (position !=
1075             __rte_hash_lookup_with_hash(h, *key, rte_hash_hash(h, *key),
1076                                         NULL)) {
1077                 return -ENOENT;
1078         }
1079
1080         return 0;
1081 }
1082
1083 static inline void
1084 compare_signatures(uint32_t *prim_hash_matches, uint32_t *sec_hash_matches,
1085                         const struct rte_hash_bucket *prim_bkt,
1086                         const struct rte_hash_bucket *sec_bkt,
1087                         hash_sig_t prim_hash, hash_sig_t sec_hash,
1088                         enum rte_hash_sig_compare_function sig_cmp_fn)
1089 {
1090         unsigned int i;
1091
1092         switch (sig_cmp_fn) {
1093 #ifdef RTE_MACHINE_CPUFLAG_AVX2
1094         case RTE_HASH_COMPARE_AVX2:
1095                 *prim_hash_matches = _mm256_movemask_ps((__m256)_mm256_cmpeq_epi32(
1096                                 _mm256_load_si256(
1097                                         (__m256i const *)prim_bkt->sig_current),
1098                                 _mm256_set1_epi32(prim_hash)));
1099                 *sec_hash_matches = _mm256_movemask_ps((__m256)_mm256_cmpeq_epi32(
1100                                 _mm256_load_si256(
1101                                         (__m256i const *)sec_bkt->sig_current),
1102                                 _mm256_set1_epi32(sec_hash)));
1103                 break;
1104 #endif
1105 #ifdef RTE_MACHINE_CPUFLAG_SSE2
1106         case RTE_HASH_COMPARE_SSE:
1107                 /* Compare the first 4 signatures in the bucket */
1108                 *prim_hash_matches = _mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1109                                 _mm_load_si128(
1110                                         (__m128i const *)prim_bkt->sig_current),
1111                                 _mm_set1_epi32(prim_hash)));
1112                 *prim_hash_matches |= (_mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1113                                 _mm_load_si128(
1114                                         (__m128i const *)&prim_bkt->sig_current[4]),
1115                                 _mm_set1_epi32(prim_hash)))) << 4;
1116                 /* Compare the first 4 signatures in the bucket */
1117                 *sec_hash_matches = _mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1118                                 _mm_load_si128(
1119                                         (__m128i const *)sec_bkt->sig_current),
1120                                 _mm_set1_epi32(sec_hash)));
1121                 *sec_hash_matches |= (_mm_movemask_ps((__m128)_mm_cmpeq_epi16(
1122                                 _mm_load_si128(
1123                                         (__m128i const *)&sec_bkt->sig_current[4]),
1124                                 _mm_set1_epi32(sec_hash)))) << 4;
1125                 break;
1126 #endif
1127         default:
1128                 for (i = 0; i < RTE_HASH_BUCKET_ENTRIES; i++) {
1129                         *prim_hash_matches |=
1130                                 ((prim_hash == prim_bkt->sig_current[i]) << i);
1131                         *sec_hash_matches |=
1132                                 ((sec_hash == sec_bkt->sig_current[i]) << i);
1133                 }
1134         }
1135
1136 }
1137
1138 #define PREFETCH_OFFSET 4
1139 static inline void
1140 __rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1141                         int32_t num_keys, int32_t *positions,
1142                         uint64_t *hit_mask, void *data[])
1143 {
1144         uint64_t hits = 0;
1145         int32_t i;
1146         uint32_t prim_hash[RTE_HASH_LOOKUP_BULK_MAX];
1147         uint32_t sec_hash[RTE_HASH_LOOKUP_BULK_MAX];
1148         const struct rte_hash_bucket *primary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1149         const struct rte_hash_bucket *secondary_bkt[RTE_HASH_LOOKUP_BULK_MAX];
1150         uint32_t prim_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1151         uint32_t sec_hitmask[RTE_HASH_LOOKUP_BULK_MAX] = {0};
1152
1153         /* Prefetch first keys */
1154         for (i = 0; i < PREFETCH_OFFSET && i < num_keys; i++)
1155                 rte_prefetch0(keys[i]);
1156
1157         /*
1158          * Prefetch rest of the keys, calculate primary and
1159          * secondary bucket and prefetch them
1160          */
1161         for (i = 0; i < (num_keys - PREFETCH_OFFSET); i++) {
1162                 rte_prefetch0(keys[i + PREFETCH_OFFSET]);
1163
1164                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1165                 sec_hash[i] = rte_hash_secondary_hash(prim_hash[i]);
1166
1167                 primary_bkt[i] = &h->buckets[prim_hash[i] & h->bucket_bitmask];
1168                 secondary_bkt[i] = &h->buckets[sec_hash[i] & h->bucket_bitmask];
1169
1170                 rte_prefetch0(primary_bkt[i]);
1171                 rte_prefetch0(secondary_bkt[i]);
1172         }
1173
1174         /* Calculate and prefetch rest of the buckets */
1175         for (; i < num_keys; i++) {
1176                 prim_hash[i] = rte_hash_hash(h, keys[i]);
1177                 sec_hash[i] = rte_hash_secondary_hash(prim_hash[i]);
1178
1179                 primary_bkt[i] = &h->buckets[prim_hash[i] & h->bucket_bitmask];
1180                 secondary_bkt[i] = &h->buckets[sec_hash[i] & h->bucket_bitmask];
1181
1182                 rte_prefetch0(primary_bkt[i]);
1183                 rte_prefetch0(secondary_bkt[i]);
1184         }
1185
1186         __hash_rw_reader_lock(h);
1187         /* Compare signatures and prefetch key slot of first hit */
1188         for (i = 0; i < num_keys; i++) {
1189                 compare_signatures(&prim_hitmask[i], &sec_hitmask[i],
1190                                 primary_bkt[i], secondary_bkt[i],
1191                                 prim_hash[i], sec_hash[i], h->sig_cmp_fn);
1192
1193                 if (prim_hitmask[i]) {
1194                         uint32_t first_hit = __builtin_ctzl(prim_hitmask[i]);
1195                         uint32_t key_idx = primary_bkt[i]->key_idx[first_hit];
1196                         const struct rte_hash_key *key_slot =
1197                                 (const struct rte_hash_key *)(
1198                                 (const char *)h->key_store +
1199                                 key_idx * h->key_entry_size);
1200                         rte_prefetch0(key_slot);
1201                         continue;
1202                 }
1203
1204                 if (sec_hitmask[i]) {
1205                         uint32_t first_hit = __builtin_ctzl(sec_hitmask[i]);
1206                         uint32_t key_idx = secondary_bkt[i]->key_idx[first_hit];
1207                         const struct rte_hash_key *key_slot =
1208                                 (const struct rte_hash_key *)(
1209                                 (const char *)h->key_store +
1210                                 key_idx * h->key_entry_size);
1211                         rte_prefetch0(key_slot);
1212                 }
1213         }
1214
1215         /* Compare keys, first hits in primary first */
1216         for (i = 0; i < num_keys; i++) {
1217                 positions[i] = -ENOENT;
1218                 while (prim_hitmask[i]) {
1219                         uint32_t hit_index = __builtin_ctzl(prim_hitmask[i]);
1220
1221                         uint32_t key_idx = primary_bkt[i]->key_idx[hit_index];
1222                         const struct rte_hash_key *key_slot =
1223                                 (const struct rte_hash_key *)(
1224                                 (const char *)h->key_store +
1225                                 key_idx * h->key_entry_size);
1226                         /*
1227                          * If key index is 0, do not compare key,
1228                          * as it is checking the dummy slot
1229                          */
1230                         if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) {
1231                                 if (data != NULL)
1232                                         data[i] = key_slot->pdata;
1233
1234                                 hits |= 1ULL << i;
1235                                 positions[i] = key_idx - 1;
1236                                 goto next_key;
1237                         }
1238                         prim_hitmask[i] &= ~(1 << (hit_index));
1239                 }
1240
1241                 while (sec_hitmask[i]) {
1242                         uint32_t hit_index = __builtin_ctzl(sec_hitmask[i]);
1243
1244                         uint32_t key_idx = secondary_bkt[i]->key_idx[hit_index];
1245                         const struct rte_hash_key *key_slot =
1246                                 (const struct rte_hash_key *)(
1247                                 (const char *)h->key_store +
1248                                 key_idx * h->key_entry_size);
1249                         /*
1250                          * If key index is 0, do not compare key,
1251                          * as it is checking the dummy slot
1252                          */
1253
1254                         if (!!key_idx & !rte_hash_cmp_eq(key_slot->key, keys[i], h)) {
1255                                 if (data != NULL)
1256                                         data[i] = key_slot->pdata;
1257
1258                                 hits |= 1ULL << i;
1259                                 positions[i] = key_idx - 1;
1260                                 goto next_key;
1261                         }
1262                         sec_hitmask[i] &= ~(1 << (hit_index));
1263                 }
1264
1265 next_key:
1266                 continue;
1267         }
1268
1269         __hash_rw_reader_unlock(h);
1270
1271         if (hit_mask != NULL)
1272                 *hit_mask = hits;
1273 }
1274
1275 int
1276 rte_hash_lookup_bulk(const struct rte_hash *h, const void **keys,
1277                       uint32_t num_keys, int32_t *positions)
1278 {
1279         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1280                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1281                         (positions == NULL)), -EINVAL);
1282
1283         __rte_hash_lookup_bulk(h, keys, num_keys, positions, NULL, NULL);
1284         return 0;
1285 }
1286
1287 int
1288 rte_hash_lookup_bulk_data(const struct rte_hash *h, const void **keys,
1289                       uint32_t num_keys, uint64_t *hit_mask, void *data[])
1290 {
1291         RETURN_IF_TRUE(((h == NULL) || (keys == NULL) || (num_keys == 0) ||
1292                         (num_keys > RTE_HASH_LOOKUP_BULK_MAX) ||
1293                         (hit_mask == NULL)), -EINVAL);
1294
1295         int32_t positions[num_keys];
1296
1297         __rte_hash_lookup_bulk(h, keys, num_keys, positions, hit_mask, data);
1298
1299         /* Return number of hits */
1300         return __builtin_popcountl(*hit_mask);
1301 }
1302
1303 int32_t
1304 rte_hash_iterate(const struct rte_hash *h, const void **key, void **data, uint32_t *next)
1305 {
1306         uint32_t bucket_idx, idx, position;
1307         struct rte_hash_key *next_key;
1308
1309         RETURN_IF_TRUE(((h == NULL) || (next == NULL)), -EINVAL);
1310
1311         const uint32_t total_entries = h->num_buckets * RTE_HASH_BUCKET_ENTRIES;
1312         /* Out of bounds */
1313         if (*next >= total_entries)
1314                 return -ENOENT;
1315
1316         /* Calculate bucket and index of current iterator */
1317         bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1318         idx = *next % RTE_HASH_BUCKET_ENTRIES;
1319
1320         /* If current position is empty, go to the next one */
1321         while (h->buckets[bucket_idx].key_idx[idx] == EMPTY_SLOT) {
1322                 (*next)++;
1323                 /* End of table */
1324                 if (*next == total_entries)
1325                         return -ENOENT;
1326                 bucket_idx = *next / RTE_HASH_BUCKET_ENTRIES;
1327                 idx = *next % RTE_HASH_BUCKET_ENTRIES;
1328         }
1329         __hash_rw_reader_lock(h);
1330         /* Get position of entry in key table */
1331         position = h->buckets[bucket_idx].key_idx[idx];
1332         next_key = (struct rte_hash_key *) ((char *)h->key_store +
1333                                 position * h->key_entry_size);
1334         /* Return key and data */
1335         *key = next_key->key;
1336         *data = next_key->pdata;
1337
1338         __hash_rw_reader_unlock(h);
1339
1340         /* Increment iterator */
1341         (*next)++;
1342
1343         return position - 1;
1344 }