New upstream version 18.02
[deb_dpdk.git] / lib / librte_table / rte_table_hash_ext.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2017 Intel Corporation
3  */
4
5 #include <string.h>
6 #include <stdio.h>
7
8 #include <rte_common.h>
9 #include <rte_mbuf.h>
10 #include <rte_memory.h>
11 #include <rte_malloc.h>
12 #include <rte_log.h>
13
14 #include "rte_table_hash.h"
15
16 #define KEYS_PER_BUCKET 4
17
18 struct bucket {
19         union {
20                 uintptr_t next;
21                 uint64_t lru_list;
22         };
23         uint16_t sig[KEYS_PER_BUCKET];
24         uint32_t key_pos[KEYS_PER_BUCKET];
25 };
26
27 #define BUCKET_NEXT(bucket)                                             \
28         ((void *) ((bucket)->next & (~1LU)))
29
30 #define BUCKET_NEXT_VALID(bucket)                                       \
31         ((bucket)->next & 1LU)
32
33 #define BUCKET_NEXT_SET(bucket, bucket_next)                            \
34 do                                                                      \
35         (bucket)->next = (((uintptr_t) ((void *) (bucket_next))) | 1LU);\
36 while (0)
37
38 #define BUCKET_NEXT_SET_NULL(bucket)                                    \
39 do                                                                      \
40         (bucket)->next = 0;                                             \
41 while (0)
42
43 #define BUCKET_NEXT_COPY(bucket, bucket2)                               \
44 do                                                                      \
45         (bucket)->next = (bucket2)->next;                               \
46 while (0)
47
48 #ifdef RTE_TABLE_STATS_COLLECT
49
50 #define RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(table, val) \
51         table->stats.n_pkts_in += val
52 #define RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(table, val) \
53         table->stats.n_pkts_lookup_miss += val
54
55 #else
56
57 #define RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(table, val)
58 #define RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(table, val)
59
60 #endif
61
62 struct grinder {
63         struct bucket *bkt;
64         uint64_t sig;
65         uint64_t match;
66         uint32_t key_index;
67 };
68
69 struct rte_table_hash {
70         struct rte_table_stats stats;
71
72         /* Input parameters */
73         uint32_t key_size;
74         uint32_t entry_size;
75         uint32_t n_keys;
76         uint32_t n_buckets;
77         uint32_t n_buckets_ext;
78         rte_table_hash_op_hash f_hash;
79         uint64_t seed;
80         uint32_t key_offset;
81
82         /* Internal */
83         uint64_t bucket_mask;
84         uint32_t key_size_shl;
85         uint32_t data_size_shl;
86         uint32_t key_stack_tos;
87         uint32_t bkt_ext_stack_tos;
88
89         /* Grinder */
90         struct grinder grinders[RTE_PORT_IN_BURST_SIZE_MAX];
91
92         /* Tables */
93         uint64_t *key_mask;
94         struct bucket *buckets;
95         struct bucket *buckets_ext;
96         uint8_t *key_mem;
97         uint8_t *data_mem;
98         uint32_t *key_stack;
99         uint32_t *bkt_ext_stack;
100
101         /* Table memory */
102         uint8_t memory[0] __rte_cache_aligned;
103 };
104
105 static int
106 keycmp(void *a, void *b, void *b_mask, uint32_t n_bytes)
107 {
108         uint64_t *a64 = a, *b64 = b, *b_mask64 = b_mask;
109         uint32_t i;
110
111         for (i = 0; i < n_bytes / sizeof(uint64_t); i++)
112                 if (a64[i] != (b64[i] & b_mask64[i]))
113                         return 1;
114
115         return 0;
116 }
117
118 static void
119 keycpy(void *dst, void *src, void *src_mask, uint32_t n_bytes)
120 {
121         uint64_t *dst64 = dst, *src64 = src, *src_mask64 = src_mask;
122         uint32_t i;
123
124         for (i = 0; i < n_bytes / sizeof(uint64_t); i++)
125                 dst64[i] = src64[i] & src_mask64[i];
126 }
127
128 static int
129 check_params_create(struct rte_table_hash_params *params)
130 {
131         /* name */
132         if (params->name == NULL) {
133                 RTE_LOG(ERR, TABLE, "%s: name invalid value\n", __func__);
134                 return -EINVAL;
135         }
136
137         /* key_size */
138         if ((params->key_size < sizeof(uint64_t)) ||
139                 (!rte_is_power_of_2(params->key_size))) {
140                 RTE_LOG(ERR, TABLE, "%s: key_size invalid value\n", __func__);
141                 return -EINVAL;
142         }
143
144         /* n_keys */
145         if (params->n_keys == 0) {
146                 RTE_LOG(ERR, TABLE, "%s: n_keys invalid value\n", __func__);
147                 return -EINVAL;
148         }
149
150         /* n_buckets */
151         if ((params->n_buckets == 0) ||
152                 (!rte_is_power_of_2(params->n_buckets))) {
153                 RTE_LOG(ERR, TABLE, "%s: n_buckets invalid value\n", __func__);
154                 return -EINVAL;
155         }
156
157         /* f_hash */
158         if (params->f_hash == NULL) {
159                 RTE_LOG(ERR, TABLE, "%s: f_hash invalid value\n", __func__);
160                 return -EINVAL;
161         }
162
163         return 0;
164 }
165
166 static void *
167 rte_table_hash_ext_create(void *params, int socket_id, uint32_t entry_size)
168 {
169         struct rte_table_hash_params *p = params;
170         struct rte_table_hash *t;
171         uint64_t table_meta_sz, key_mask_sz, bucket_sz, bucket_ext_sz, key_sz;
172         uint64_t key_stack_sz, bkt_ext_stack_sz, data_sz, total_size;
173         uint64_t key_mask_offset, bucket_offset, bucket_ext_offset, key_offset;
174         uint64_t key_stack_offset, bkt_ext_stack_offset, data_offset;
175         uint32_t n_buckets_ext, i;
176
177         /* Check input parameters */
178         if ((check_params_create(p) != 0) ||
179                 (!rte_is_power_of_2(entry_size)) ||
180                 ((sizeof(struct rte_table_hash) % RTE_CACHE_LINE_SIZE) != 0) ||
181                 (sizeof(struct bucket) != (RTE_CACHE_LINE_SIZE / 2)))
182                 return NULL;
183
184         /*
185          * Table dimensioning
186          *
187          * Objective: Pick the number of bucket extensions (n_buckets_ext) so that
188          * it is guaranteed that n_keys keys can be stored in the table at any time.
189          *
190          * The worst case scenario takes place when all the n_keys keys fall into
191          * the same bucket. Actually, due to the KEYS_PER_BUCKET scheme, the worst
192          * case takes place when (n_keys - KEYS_PER_BUCKET + 1) keys fall into the
193          * same bucket, while the remaining (KEYS_PER_BUCKET - 1) keys each fall
194          * into a different bucket. This case defeats the purpose of the hash table.
195          * It indicates unsuitable f_hash or n_keys to n_buckets ratio.
196          *
197          * n_buckets_ext = n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1
198          */
199         n_buckets_ext = p->n_keys / KEYS_PER_BUCKET + KEYS_PER_BUCKET - 1;
200
201         /* Memory allocation */
202         table_meta_sz = RTE_CACHE_LINE_ROUNDUP(sizeof(struct rte_table_hash));
203         key_mask_sz = RTE_CACHE_LINE_ROUNDUP(p->key_size);
204         bucket_sz = RTE_CACHE_LINE_ROUNDUP(p->n_buckets * sizeof(struct bucket));
205         bucket_ext_sz =
206                 RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(struct bucket));
207         key_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * p->key_size);
208         key_stack_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * sizeof(uint32_t));
209         bkt_ext_stack_sz =
210                 RTE_CACHE_LINE_ROUNDUP(n_buckets_ext * sizeof(uint32_t));
211         data_sz = RTE_CACHE_LINE_ROUNDUP(p->n_keys * entry_size);
212         total_size = table_meta_sz + key_mask_sz + bucket_sz + bucket_ext_sz +
213                 key_sz + key_stack_sz + bkt_ext_stack_sz + data_sz;
214
215         if (total_size > SIZE_MAX) {
216                 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes"
217                         " for hash table %s\n",
218                         __func__, total_size, p->name);
219                 return NULL;
220         }
221
222         t = rte_zmalloc_socket(p->name,
223                 (size_t)total_size,
224                 RTE_CACHE_LINE_SIZE,
225                 socket_id);
226         if (t == NULL) {
227                 RTE_LOG(ERR, TABLE, "%s: Cannot allocate %" PRIu64 " bytes"
228                         " for hash table %s\n",
229                         __func__, total_size, p->name);
230                 return NULL;
231         }
232         RTE_LOG(INFO, TABLE, "%s (%u-byte key): Hash table %s memory "
233                 "footprint is %" PRIu64 " bytes\n",
234                 __func__, p->key_size, p->name, total_size);
235
236         /* Memory initialization */
237         t->key_size = p->key_size;
238         t->entry_size = entry_size;
239         t->n_keys = p->n_keys;
240         t->n_buckets = p->n_buckets;
241         t->n_buckets_ext = n_buckets_ext;
242         t->f_hash = p->f_hash;
243         t->seed = p->seed;
244         t->key_offset = p->key_offset;
245
246         /* Internal */
247         t->bucket_mask = t->n_buckets - 1;
248         t->key_size_shl = __builtin_ctzl(p->key_size);
249         t->data_size_shl = __builtin_ctzl(entry_size);
250
251         /* Tables */
252         key_mask_offset = 0;
253         bucket_offset = key_mask_offset + key_mask_sz;
254         bucket_ext_offset = bucket_offset + bucket_sz;
255         key_offset = bucket_ext_offset + bucket_ext_sz;
256         key_stack_offset = key_offset + key_sz;
257         bkt_ext_stack_offset = key_stack_offset + key_stack_sz;
258         data_offset = bkt_ext_stack_offset + bkt_ext_stack_sz;
259
260         t->key_mask = (uint64_t *) &t->memory[key_mask_offset];
261         t->buckets = (struct bucket *) &t->memory[bucket_offset];
262         t->buckets_ext = (struct bucket *) &t->memory[bucket_ext_offset];
263         t->key_mem = &t->memory[key_offset];
264         t->key_stack = (uint32_t *) &t->memory[key_stack_offset];
265         t->bkt_ext_stack = (uint32_t *) &t->memory[bkt_ext_stack_offset];
266         t->data_mem = &t->memory[data_offset];
267
268         /* Key mask */
269         if (p->key_mask == NULL)
270                 memset(t->key_mask, 0xFF, p->key_size);
271         else
272                 memcpy(t->key_mask, p->key_mask, p->key_size);
273
274         /* Key stack */
275         for (i = 0; i < t->n_keys; i++)
276                 t->key_stack[i] = t->n_keys - 1 - i;
277         t->key_stack_tos = t->n_keys;
278
279         /* Bucket ext stack */
280         for (i = 0; i < t->n_buckets_ext; i++)
281                 t->bkt_ext_stack[i] = t->n_buckets_ext - 1 - i;
282         t->bkt_ext_stack_tos = t->n_buckets_ext;
283
284         return t;
285 }
286
287 static int
288 rte_table_hash_ext_free(void *table)
289 {
290         struct rte_table_hash *t = table;
291
292         /* Check input parameters */
293         if (t == NULL)
294                 return -EINVAL;
295
296         rte_free(t);
297         return 0;
298 }
299
300 static int
301 rte_table_hash_ext_entry_add(void *table, void *key, void *entry,
302         int *key_found, void **entry_ptr)
303 {
304         struct rte_table_hash *t = table;
305         struct bucket *bkt0, *bkt, *bkt_prev;
306         uint64_t sig;
307         uint32_t bkt_index, i;
308
309         sig = t->f_hash(key, t->key_mask, t->key_size, t->seed);
310         bkt_index = sig & t->bucket_mask;
311         bkt0 = &t->buckets[bkt_index];
312         sig = (sig >> 16) | 1LLU;
313
314         /* Key is present in the bucket */
315         for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt))
316                 for (i = 0; i < KEYS_PER_BUCKET; i++) {
317                         uint64_t bkt_sig = (uint64_t) bkt->sig[i];
318                         uint32_t bkt_key_index = bkt->key_pos[i];
319                         uint8_t *bkt_key =
320                                 &t->key_mem[bkt_key_index << t->key_size_shl];
321
322                         if ((sig == bkt_sig) && (keycmp(bkt_key, key, t->key_mask,
323                                 t->key_size) == 0)) {
324                                 uint8_t *data = &t->data_mem[bkt_key_index <<
325                                         t->data_size_shl];
326
327                                 memcpy(data, entry, t->entry_size);
328                                 *key_found = 1;
329                                 *entry_ptr = (void *) data;
330                                 return 0;
331                         }
332                 }
333
334         /* Key is not present in the bucket */
335         for (bkt_prev = NULL, bkt = bkt0; bkt != NULL; bkt_prev = bkt,
336                 bkt = BUCKET_NEXT(bkt))
337                 for (i = 0; i < KEYS_PER_BUCKET; i++) {
338                         uint64_t bkt_sig = (uint64_t) bkt->sig[i];
339
340                         if (bkt_sig == 0) {
341                                 uint32_t bkt_key_index;
342                                 uint8_t *bkt_key, *data;
343
344                                 /* Allocate new key */
345                                 if (t->key_stack_tos == 0) /* No free keys */
346                                         return -ENOSPC;
347
348                                 bkt_key_index = t->key_stack[
349                                         --t->key_stack_tos];
350
351                                 /* Install new key */
352                                 bkt_key = &t->key_mem[bkt_key_index <<
353                                         t->key_size_shl];
354                                 data = &t->data_mem[bkt_key_index <<
355                                         t->data_size_shl];
356
357                                 bkt->sig[i] = (uint16_t) sig;
358                                 bkt->key_pos[i] = bkt_key_index;
359                                 keycpy(bkt_key, key, t->key_mask, t->key_size);
360                                 memcpy(data, entry, t->entry_size);
361
362                                 *key_found = 0;
363                                 *entry_ptr = (void *) data;
364                                 return 0;
365                         }
366                 }
367
368         /* Bucket full: extend bucket */
369         if ((t->bkt_ext_stack_tos > 0) && (t->key_stack_tos > 0)) {
370                 uint32_t bkt_key_index;
371                 uint8_t *bkt_key, *data;
372
373                 /* Allocate new bucket ext */
374                 bkt_index = t->bkt_ext_stack[--t->bkt_ext_stack_tos];
375                 bkt = &t->buckets_ext[bkt_index];
376
377                 /* Chain the new bucket ext */
378                 BUCKET_NEXT_SET(bkt_prev, bkt);
379                 BUCKET_NEXT_SET_NULL(bkt);
380
381                 /* Allocate new key */
382                 bkt_key_index = t->key_stack[--t->key_stack_tos];
383                 bkt_key = &t->key_mem[bkt_key_index << t->key_size_shl];
384
385                 data = &t->data_mem[bkt_key_index << t->data_size_shl];
386
387                 /* Install new key into bucket */
388                 bkt->sig[0] = (uint16_t) sig;
389                 bkt->key_pos[0] = bkt_key_index;
390                 keycpy(bkt_key, key, t->key_mask, t->key_size);
391                 memcpy(data, entry, t->entry_size);
392
393                 *key_found = 0;
394                 *entry_ptr = (void *) data;
395                 return 0;
396         }
397
398         return -ENOSPC;
399 }
400
401 static int
402 rte_table_hash_ext_entry_delete(void *table, void *key, int *key_found,
403 void *entry)
404 {
405         struct rte_table_hash *t = table;
406         struct bucket *bkt0, *bkt, *bkt_prev;
407         uint64_t sig;
408         uint32_t bkt_index, i;
409
410         sig = t->f_hash(key, t->key_mask, t->key_size, t->seed);
411         bkt_index = sig & t->bucket_mask;
412         bkt0 = &t->buckets[bkt_index];
413         sig = (sig >> 16) | 1LLU;
414
415         /* Key is present in the bucket */
416         for (bkt_prev = NULL, bkt = bkt0; bkt != NULL; bkt_prev = bkt,
417                 bkt = BUCKET_NEXT(bkt))
418                 for (i = 0; i < KEYS_PER_BUCKET; i++) {
419                         uint64_t bkt_sig = (uint64_t) bkt->sig[i];
420                         uint32_t bkt_key_index = bkt->key_pos[i];
421                         uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
422                                 t->key_size_shl];
423
424                         if ((sig == bkt_sig) && (keycmp(bkt_key, key, t->key_mask,
425                                 t->key_size) == 0)) {
426                                 uint8_t *data = &t->data_mem[bkt_key_index <<
427                                         t->data_size_shl];
428
429                                 /* Uninstall key from bucket */
430                                 bkt->sig[i] = 0;
431                                 *key_found = 1;
432                                 if (entry)
433                                         memcpy(entry, data, t->entry_size);
434
435                                 /* Free key */
436                                 t->key_stack[t->key_stack_tos++] =
437                                         bkt_key_index;
438
439                                 /*Check if bucket is unused */
440                                 if ((bkt_prev != NULL) &&
441                                     (bkt->sig[0] == 0) && (bkt->sig[1] == 0) &&
442                                     (bkt->sig[2] == 0) && (bkt->sig[3] == 0)) {
443                                         /* Unchain bucket */
444                                         BUCKET_NEXT_COPY(bkt_prev, bkt);
445
446                                         /* Clear bucket */
447                                         memset(bkt, 0, sizeof(struct bucket));
448
449                                         /* Free bucket back to buckets ext */
450                                         bkt_index = bkt - t->buckets_ext;
451                                         t->bkt_ext_stack[t->bkt_ext_stack_tos++]
452                                                 = bkt_index;
453                                 }
454
455                                 return 0;
456                         }
457                 }
458
459         /* Key is not present in the bucket */
460         *key_found = 0;
461         return 0;
462 }
463
464 static int rte_table_hash_ext_lookup_unoptimized(
465         void *table,
466         struct rte_mbuf **pkts,
467         uint64_t pkts_mask,
468         uint64_t *lookup_hit_mask,
469         void **entries)
470 {
471         struct rte_table_hash *t = (struct rte_table_hash *) table;
472         uint64_t pkts_mask_out = 0;
473
474         __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
475
476         for ( ; pkts_mask; ) {
477                 struct bucket *bkt0, *bkt;
478                 struct rte_mbuf *pkt;
479                 uint8_t *key;
480                 uint64_t pkt_mask, sig;
481                 uint32_t pkt_index, bkt_index, i;
482
483                 pkt_index = __builtin_ctzll(pkts_mask);
484                 pkt_mask = 1LLU << pkt_index;
485                 pkts_mask &= ~pkt_mask;
486
487                 pkt = pkts[pkt_index];
488                 key = RTE_MBUF_METADATA_UINT8_PTR(pkt, t->key_offset);
489                 sig = (uint64_t) t->f_hash(key, t->key_mask, t->key_size, t->seed);
490
491                 bkt_index = sig & t->bucket_mask;
492                 bkt0 = &t->buckets[bkt_index];
493                 sig = (sig >> 16) | 1LLU;
494
495                 /* Key is present in the bucket */
496                 for (bkt = bkt0; bkt != NULL; bkt = BUCKET_NEXT(bkt))
497                         for (i = 0; i < KEYS_PER_BUCKET; i++) {
498                                 uint64_t bkt_sig = (uint64_t) bkt->sig[i];
499                                 uint32_t bkt_key_index = bkt->key_pos[i];
500                                 uint8_t *bkt_key = &t->key_mem[bkt_key_index <<
501                                         t->key_size_shl];
502
503                                 if ((sig == bkt_sig) && (keycmp(bkt_key, key,
504                                         t->key_mask, t->key_size) == 0)) {
505                                         uint8_t *data = &t->data_mem[
506                                         bkt_key_index << t->data_size_shl];
507
508                                         pkts_mask_out |= pkt_mask;
509                                         entries[pkt_index] = (void *) data;
510                                         break;
511                                 }
512                         }
513         }
514
515         *lookup_hit_mask = pkts_mask_out;
516         return 0;
517 }
518
519 /***
520  *
521  * mask = match bitmask
522  * match = at least one match
523  * match_many = more than one match
524  * match_pos = position of first match
525  *
526  *----------------------------------------
527  * mask          match   match_many       match_pos
528  *----------------------------------------
529  * 0000          0               0                        00
530  * 0001          1               0                        00
531  * 0010          1               0                        01
532  * 0011          1               1                        00
533  *----------------------------------------
534  * 0100          1               0                        10
535  * 0101          1               1                        00
536  * 0110          1               1                        01
537  * 0111          1               1                        00
538  *----------------------------------------
539  * 1000          1               0                        11
540  * 1001          1               1                        00
541  * 1010          1               1                        01
542  * 1011          1               1                        00
543  *----------------------------------------
544  * 1100          1               1                        10
545  * 1101          1               1                        00
546  * 1110          1               1                        01
547  * 1111          1               1                        00
548  *----------------------------------------
549  *
550  * match = 1111_1111_1111_1110
551  * match_many = 1111_1110_1110_1000
552  * match_pos = 0001_0010_0001_0011__0001_0010_0001_0000
553  *
554  * match = 0xFFFELLU
555  * match_many = 0xFEE8LLU
556  * match_pos = 0x12131210LLU
557  *
558  ***/
559
560 #define LUT_MATCH                                               0xFFFELLU
561 #define LUT_MATCH_MANY                                          0xFEE8LLU
562 #define LUT_MATCH_POS                                           0x12131210LLU
563
564 #define lookup_cmp_sig(mbuf_sig, bucket, match, match_many, match_pos)  \
565 {                                                                       \
566         uint64_t bucket_sig[4], mask[4], mask_all;                      \
567                                                                         \
568         bucket_sig[0] = bucket->sig[0];                                 \
569         bucket_sig[1] = bucket->sig[1];                                 \
570         bucket_sig[2] = bucket->sig[2];                                 \
571         bucket_sig[3] = bucket->sig[3];                                 \
572                                                                         \
573         bucket_sig[0] ^= mbuf_sig;                                      \
574         bucket_sig[1] ^= mbuf_sig;                                      \
575         bucket_sig[2] ^= mbuf_sig;                                      \
576         bucket_sig[3] ^= mbuf_sig;                                      \
577                                                                         \
578         mask[0] = 0;                                                    \
579         mask[1] = 0;                                                    \
580         mask[2] = 0;                                                    \
581         mask[3] = 0;                                                    \
582                                                                         \
583         if (bucket_sig[0] == 0)                                         \
584                 mask[0] = 1;                                            \
585         if (bucket_sig[1] == 0)                                         \
586                 mask[1] = 2;                                            \
587         if (bucket_sig[2] == 0)                                         \
588                 mask[2] = 4;                                            \
589         if (bucket_sig[3] == 0)                                         \
590                 mask[3] = 8;                                            \
591                                                                         \
592         mask_all = (mask[0] | mask[1]) | (mask[2] | mask[3]);           \
593                                                                         \
594         match = (LUT_MATCH >> mask_all) & 1;                            \
595         match_many = (LUT_MATCH_MANY >> mask_all) & 1;                  \
596         match_pos = (LUT_MATCH_POS >> (mask_all << 1)) & 3;             \
597 }
598
599 #define lookup_cmp_key(mbuf, key, match_key, f)                         \
600 {                                                                       \
601         uint64_t *pkt_key = RTE_MBUF_METADATA_UINT64_PTR(mbuf, f->key_offset);\
602         uint64_t *bkt_key = (uint64_t *) key;                           \
603         uint64_t *key_mask = f->key_mask;                                       \
604                                                                         \
605         switch (f->key_size) {                                          \
606         case 8:                                                         \
607         {                                                               \
608                 uint64_t xor = (pkt_key[0] & key_mask[0]) ^ bkt_key[0]; \
609                 match_key = 0;                                          \
610                 if (xor == 0)                                           \
611                         match_key = 1;                                  \
612         }                                                               \
613         break;                                                          \
614                                                                         \
615         case 16:                                                        \
616         {                                                               \
617                 uint64_t xor[2], or;                                    \
618                                                                         \
619                 xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0];               \
620                 xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1];               \
621                 or = xor[0] | xor[1];                                   \
622                 match_key = 0;                                          \
623                 if (or == 0)                                            \
624                         match_key = 1;                                  \
625         }                                                               \
626         break;                                                          \
627                                                                         \
628         case 32:                                                        \
629         {                                                               \
630                 uint64_t xor[4], or;                                    \
631                                                                         \
632                 xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0];               \
633                 xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1];               \
634                 xor[2] = (pkt_key[2] & key_mask[2]) ^ bkt_key[2];               \
635                 xor[3] = (pkt_key[3] & key_mask[3]) ^ bkt_key[3];               \
636                 or = xor[0] | xor[1] | xor[2] | xor[3];                 \
637                 match_key = 0;                                          \
638                 if (or == 0)                                            \
639                         match_key = 1;                                  \
640         }                                                               \
641         break;                                                          \
642                                                                         \
643         case 64:                                                        \
644         {                                                               \
645                 uint64_t xor[8], or;                                    \
646                                                                         \
647                 xor[0] = (pkt_key[0] & key_mask[0]) ^ bkt_key[0];               \
648                 xor[1] = (pkt_key[1] & key_mask[1]) ^ bkt_key[1];               \
649                 xor[2] = (pkt_key[2] & key_mask[2]) ^ bkt_key[2];               \
650                 xor[3] = (pkt_key[3] & key_mask[3]) ^ bkt_key[3];               \
651                 xor[4] = (pkt_key[4] & key_mask[4]) ^ bkt_key[4];               \
652                 xor[5] = (pkt_key[5] & key_mask[5]) ^ bkt_key[5];               \
653                 xor[6] = (pkt_key[6] & key_mask[6]) ^ bkt_key[6];               \
654                 xor[7] = (pkt_key[7] & key_mask[7]) ^ bkt_key[7];               \
655                 or = xor[0] | xor[1] | xor[2] | xor[3] |                \
656                         xor[4] | xor[5] | xor[6] | xor[7];              \
657                 match_key = 0;                                          \
658                 if (or == 0)                                            \
659                         match_key = 1;                                  \
660         }                                                               \
661         break;                                                          \
662                                                                         \
663         default:                                                        \
664                 match_key = 0;                                          \
665                 if (keycmp(bkt_key, pkt_key, key_mask, f->key_size) == 0)       \
666                         match_key = 1;                                  \
667         }                                                               \
668 }
669
670 #define lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index) \
671 {                                                                       \
672         uint64_t pkt00_mask, pkt01_mask;                                \
673         struct rte_mbuf *mbuf00, *mbuf01;                               \
674         uint32_t key_offset = t->key_offset;                    \
675                                                                         \
676         pkt00_index = __builtin_ctzll(pkts_mask);                       \
677         pkt00_mask = 1LLU << pkt00_index;                               \
678         pkts_mask &= ~pkt00_mask;                                       \
679         mbuf00 = pkts[pkt00_index];                                     \
680                                                                         \
681         pkt01_index = __builtin_ctzll(pkts_mask);                       \
682         pkt01_mask = 1LLU << pkt01_index;                               \
683         pkts_mask &= ~pkt01_mask;                                       \
684         mbuf01 = pkts[pkt01_index];                                     \
685                                                                         \
686         rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
687         rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
688 }
689
690 #define lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask, pkt00_index, \
691         pkt01_index)                                                    \
692 {                                                                       \
693         uint64_t pkt00_mask, pkt01_mask;                                \
694         struct rte_mbuf *mbuf00, *mbuf01;                               \
695         uint32_t key_offset = t->key_offset;                    \
696                                                                         \
697         pkt00_index = __builtin_ctzll(pkts_mask);                       \
698         pkt00_mask = 1LLU << pkt00_index;                               \
699         pkts_mask &= ~pkt00_mask;                                       \
700         mbuf00 = pkts[pkt00_index];                                     \
701                                                                         \
702         pkt01_index = __builtin_ctzll(pkts_mask);                       \
703         if (pkts_mask == 0)                                             \
704                 pkt01_index = pkt00_index;                              \
705         pkt01_mask = 1LLU << pkt01_index;                               \
706         pkts_mask &= ~pkt01_mask;                                       \
707         mbuf01 = pkts[pkt01_index];                                     \
708                                                                         \
709         rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf00, key_offset));\
710         rte_prefetch0(RTE_MBUF_METADATA_UINT8_PTR(mbuf01, key_offset));\
711 }
712
713 #define lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index)    \
714 {                                                                       \
715         struct grinder *g10, *g11;                                      \
716         uint64_t sig10, sig11, bkt10_index, bkt11_index;                \
717         struct rte_mbuf *mbuf10, *mbuf11;                               \
718         struct bucket *bkt10, *bkt11, *buckets = t->buckets;            \
719         uint8_t *key10, *key11;                                         \
720         uint64_t bucket_mask = t->bucket_mask;                          \
721         rte_table_hash_op_hash f_hash = t->f_hash;                      \
722         uint64_t seed = t->seed;                                        \
723         uint32_t key_size = t->key_size;                                \
724         uint32_t key_offset = t->key_offset;                            \
725                                                                         \
726         mbuf10 = pkts[pkt10_index];                                     \
727         key10 = RTE_MBUF_METADATA_UINT8_PTR(mbuf10, key_offset);        \
728         sig10 = (uint64_t) f_hash(key10, t->key_mask, key_size, seed);  \
729         bkt10_index = sig10 & bucket_mask;                              \
730         bkt10 = &buckets[bkt10_index];                                  \
731                                                                         \
732         mbuf11 = pkts[pkt11_index];                                     \
733         key11 = RTE_MBUF_METADATA_UINT8_PTR(mbuf11, key_offset);        \
734         sig11 = (uint64_t) f_hash(key11, t->key_mask, key_size, seed);  \
735         bkt11_index = sig11 & bucket_mask;                              \
736         bkt11 = &buckets[bkt11_index];                                  \
737                                                                         \
738         rte_prefetch0(bkt10);                                           \
739         rte_prefetch0(bkt11);                                           \
740                                                                         \
741         g10 = &g[pkt10_index];                                          \
742         g10->sig = sig10;                                               \
743         g10->bkt = bkt10;                                               \
744                                                                         \
745         g11 = &g[pkt11_index];                                          \
746         g11->sig = sig11;                                               \
747         g11->bkt = bkt11;                                               \
748 }
749
750 #define lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many)\
751 {                                                                       \
752         struct grinder *g20, *g21;                                      \
753         uint64_t sig20, sig21;                                          \
754         struct bucket *bkt20, *bkt21;                                   \
755         uint8_t *key20, *key21, *key_mem = t->key_mem;                  \
756         uint64_t match20, match21, match_many20, match_many21;          \
757         uint64_t match_pos20, match_pos21;                              \
758         uint32_t key20_index, key21_index, key_size_shl = t->key_size_shl;\
759                                                                         \
760         g20 = &g[pkt20_index];                                          \
761         sig20 = g20->sig;                                               \
762         bkt20 = g20->bkt;                                               \
763         sig20 = (sig20 >> 16) | 1LLU;                                   \
764         lookup_cmp_sig(sig20, bkt20, match20, match_many20, match_pos20);\
765         match20 <<= pkt20_index;                                        \
766         match_many20 |= BUCKET_NEXT_VALID(bkt20);                       \
767         match_many20 <<= pkt20_index;                                   \
768         key20_index = bkt20->key_pos[match_pos20];                      \
769         key20 = &key_mem[key20_index << key_size_shl];                  \
770                                                                         \
771         g21 = &g[pkt21_index];                                          \
772         sig21 = g21->sig;                                               \
773         bkt21 = g21->bkt;                                               \
774         sig21 = (sig21 >> 16) | 1LLU;                                   \
775         lookup_cmp_sig(sig21, bkt21, match21, match_many21, match_pos21);\
776         match21 <<= pkt21_index;                                        \
777         match_many21 |= BUCKET_NEXT_VALID(bkt21);                       \
778         match_many21 <<= pkt21_index;                                   \
779         key21_index = bkt21->key_pos[match_pos21];                      \
780         key21 = &key_mem[key21_index << key_size_shl];                  \
781                                                                         \
782         rte_prefetch0(key20);                                           \
783         rte_prefetch0(key21);                                           \
784                                                                         \
785         pkts_mask_match_many |= match_many20 | match_many21;            \
786                                                                         \
787         g20->match = match20;                                           \
788         g20->key_index = key20_index;                                   \
789                                                                         \
790         g21->match = match21;                                           \
791         g21->key_index = key21_index;                                   \
792 }
793
794 #define lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out, \
795         entries)                                                        \
796 {                                                                       \
797         struct grinder *g30, *g31;                                      \
798         struct rte_mbuf *mbuf30, *mbuf31;                               \
799         uint8_t *key30, *key31, *key_mem = t->key_mem;                  \
800         uint8_t *data30, *data31, *data_mem = t->data_mem;              \
801         uint64_t match30, match31, match_key30, match_key31, match_keys;\
802         uint32_t key30_index, key31_index;                              \
803         uint32_t key_size_shl = t->key_size_shl;                        \
804         uint32_t data_size_shl = t->data_size_shl;                      \
805                                                                         \
806         mbuf30 = pkts[pkt30_index];                                     \
807         g30 = &g[pkt30_index];                                          \
808         match30 = g30->match;                                           \
809         key30_index = g30->key_index;                                   \
810         key30 = &key_mem[key30_index << key_size_shl];                  \
811         lookup_cmp_key(mbuf30, key30, match_key30, t);                  \
812         match_key30 <<= pkt30_index;                                    \
813         match_key30 &= match30;                                         \
814         data30 = &data_mem[key30_index << data_size_shl];               \
815         entries[pkt30_index] = data30;                                  \
816                                                                         \
817         mbuf31 = pkts[pkt31_index];                                     \
818         g31 = &g[pkt31_index];                                          \
819         match31 = g31->match;                                           \
820         key31_index = g31->key_index;                                   \
821         key31 = &key_mem[key31_index << key_size_shl];                  \
822         lookup_cmp_key(mbuf31, key31, match_key31, t);                  \
823         match_key31 <<= pkt31_index;                                    \
824         match_key31 &= match31;                                         \
825         data31 = &data_mem[key31_index << data_size_shl];               \
826         entries[pkt31_index] = data31;                                  \
827                                                                         \
828         rte_prefetch0(data30);                                          \
829         rte_prefetch0(data31);                                          \
830                                                                         \
831         match_keys = match_key30 | match_key31;                         \
832         pkts_mask_out |= match_keys;                                    \
833 }
834
835 /***
836 * The lookup function implements a 4-stage pipeline, with each stage processing
837 * two different packets. The purpose of pipelined implementation is to hide the
838 * latency of prefetching the data structures and loosen the data dependency
839 * between instructions.
840 *
841 *  p00  _______   p10  _______   p20  _______   p30  _______
842 *----->|       |----->|       |----->|       |----->|       |----->
843 *      |   0   |      |   1   |      |   2   |      |   3   |
844 *----->|_______|----->|_______|----->|_______|----->|_______|----->
845 *  p01            p11            p21            p31
846 *
847 * The naming convention is:
848 *    pXY = packet Y of stage X, X = 0 .. 3, Y = 0 .. 1
849 *
850 ***/
851 static int rte_table_hash_ext_lookup(
852         void *table,
853         struct rte_mbuf **pkts,
854         uint64_t pkts_mask,
855         uint64_t *lookup_hit_mask,
856         void **entries)
857 {
858         struct rte_table_hash *t = (struct rte_table_hash *) table;
859         struct grinder *g = t->grinders;
860         uint64_t pkt00_index, pkt01_index, pkt10_index, pkt11_index;
861         uint64_t pkt20_index, pkt21_index, pkt30_index, pkt31_index;
862         uint64_t pkts_mask_out = 0, pkts_mask_match_many = 0;
863         int status = 0;
864
865         __rte_unused uint32_t n_pkts_in = __builtin_popcountll(pkts_mask);
866         RTE_TABLE_HASH_EXT_STATS_PKTS_IN_ADD(t, n_pkts_in);
867
868         /* Cannot run the pipeline with less than 7 packets */
869         if (__builtin_popcountll(pkts_mask) < 7) {
870                 status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
871                         pkts_mask, lookup_hit_mask, entries);
872                 RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in -
873                                 __builtin_popcountll(*lookup_hit_mask));
874                 return status;
875         }
876
877         /* Pipeline stage 0 */
878         lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
879
880         /* Pipeline feed */
881         pkt10_index = pkt00_index;
882         pkt11_index = pkt01_index;
883
884         /* Pipeline stage 0 */
885         lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
886
887         /* Pipeline stage 1 */
888         lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
889
890         /* Pipeline feed */
891         pkt20_index = pkt10_index;
892         pkt21_index = pkt11_index;
893         pkt10_index = pkt00_index;
894         pkt11_index = pkt01_index;
895
896         /* Pipeline stage 0 */
897         lookup2_stage0(t, g, pkts, pkts_mask, pkt00_index, pkt01_index);
898
899         /* Pipeline stage 1 */
900         lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
901
902         /* Pipeline stage 2 */
903         lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
904
905         /*
906         * Pipeline run
907         *
908         */
909         for ( ; pkts_mask; ) {
910                 /* Pipeline feed */
911                 pkt30_index = pkt20_index;
912                 pkt31_index = pkt21_index;
913                 pkt20_index = pkt10_index;
914                 pkt21_index = pkt11_index;
915                 pkt10_index = pkt00_index;
916                 pkt11_index = pkt01_index;
917
918                 /* Pipeline stage 0 */
919                 lookup2_stage0_with_odd_support(t, g, pkts, pkts_mask,
920                         pkt00_index, pkt01_index);
921
922                 /* Pipeline stage 1 */
923                 lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
924
925                 /* Pipeline stage 2 */
926                 lookup2_stage2(t, g, pkt20_index, pkt21_index,
927                         pkts_mask_match_many);
928
929                 /* Pipeline stage 3 */
930                 lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index,
931                         pkts_mask_out, entries);
932         }
933
934         /* Pipeline feed */
935         pkt30_index = pkt20_index;
936         pkt31_index = pkt21_index;
937         pkt20_index = pkt10_index;
938         pkt21_index = pkt11_index;
939         pkt10_index = pkt00_index;
940         pkt11_index = pkt01_index;
941
942         /* Pipeline stage 1 */
943         lookup2_stage1(t, g, pkts, pkt10_index, pkt11_index);
944
945         /* Pipeline stage 2 */
946         lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
947
948         /* Pipeline stage 3 */
949         lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
950                 entries);
951
952         /* Pipeline feed */
953         pkt30_index = pkt20_index;
954         pkt31_index = pkt21_index;
955         pkt20_index = pkt10_index;
956         pkt21_index = pkt11_index;
957
958         /* Pipeline stage 2 */
959         lookup2_stage2(t, g, pkt20_index, pkt21_index, pkts_mask_match_many);
960
961         /* Pipeline stage 3 */
962         lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
963                 entries);
964
965         /* Pipeline feed */
966         pkt30_index = pkt20_index;
967         pkt31_index = pkt21_index;
968
969         /* Pipeline stage 3 */
970         lookup2_stage3(t, g, pkts, pkt30_index, pkt31_index, pkts_mask_out,
971                 entries);
972
973         /* Slow path */
974         pkts_mask_match_many &= ~pkts_mask_out;
975         if (pkts_mask_match_many) {
976                 uint64_t pkts_mask_out_slow = 0;
977
978                 status = rte_table_hash_ext_lookup_unoptimized(table, pkts,
979                         pkts_mask_match_many, &pkts_mask_out_slow, entries);
980                 pkts_mask_out |= pkts_mask_out_slow;
981         }
982
983         *lookup_hit_mask = pkts_mask_out;
984         RTE_TABLE_HASH_EXT_STATS_PKTS_LOOKUP_MISS(t, n_pkts_in - __builtin_popcountll(pkts_mask_out));
985         return status;
986 }
987
988 static int
989 rte_table_hash_ext_stats_read(void *table, struct rte_table_stats *stats, int clear)
990 {
991         struct rte_table_hash *t = table;
992
993         if (stats != NULL)
994                 memcpy(stats, &t->stats, sizeof(t->stats));
995
996         if (clear)
997                 memset(&t->stats, 0, sizeof(t->stats));
998
999         return 0;
1000 }
1001
1002 struct rte_table_ops rte_table_hash_ext_ops      = {
1003         .f_create = rte_table_hash_ext_create,
1004         .f_free = rte_table_hash_ext_free,
1005         .f_add = rte_table_hash_ext_entry_add,
1006         .f_delete = rte_table_hash_ext_entry_delete,
1007         .f_add_bulk = NULL,
1008         .f_delete_bulk = NULL,
1009         .f_lookup = rte_table_hash_ext_lookup,
1010         .f_stats = rte_table_hash_ext_stats_read,
1011 };