2 * Copyright (c) 2017 Intel and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * vppinfra already includes tons of different hash tables.
18 * MagLev flow table is a bit different. It has to be very efficient
19 * for both writing and reading operations. But it does not need to
20 * be 100% reliable (write can fail). It also needs to recycle
21 * old entries in a lazy way.
23 * This hash table is the most dummy hash table you can do.
24 * Fixed total size, fixed bucket size.
25 * Advantage is that it could be very efficient (maybe).
29 #ifndef KP_PLUGIN_KP_KPHASH_H_
30 #define KP_PLUGIN_KP_KPHASH_H_
32 #include <vnet/vnet.h>
33 #include <vppinfra/xxhash.h>
34 #include <vppinfra/crc32.h>
37 * @brief Number of entries per bucket.
39 #define KPHASH_ENTRY_PER_BUCKET 4
41 #define KP_HASH_DO_NOT_USE_SSE_BUCKETS 0
44 * 32 bits integer comparison for running values.
45 * 1 > 0 is true. But 1 > 0xffffffff also is.
47 #define clib_u32_loop_gt(a, b) (((u32)(a)) - ((u32)(b)) < 0x7fffffff)
50 * @brief One bucket contains 4 entries.
51 * Each bucket takes one 64B cache line in memory.
54 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
55 u32 hash[KPHASH_ENTRY_PER_BUCKET];
56 u32 timeout[KPHASH_ENTRY_PER_BUCKET];
57 u32 vip[KPHASH_ENTRY_PER_BUCKET];
58 u32 value[KPHASH_ENTRY_PER_BUCKET];
64 kp_hash_bucket_t buckets[];
67 #define kp_hash_nbuckets(h) (((h)->buckets_mask) + 1)
68 #define kp_hash_size(h) ((h)->buckets_mask + KPHASH_ENTRY_PER_BUCKET)
70 #define kp_hash_foreach_bucket(h, bucket) \
71 for (bucket = (h)->buckets; \
72 bucket < (h)->buckets + kp_hash_nbuckets(h); \
75 #define kp_hash_foreach_entry(h, bucket, i) \
76 kp_hash_foreach_bucket(h, bucket) \
77 for (i = 0; i < KPHASH_ENTRY_PER_BUCKET; i++)
79 #define kp_hash_foreach_valid_entry(h, bucket, i, now) \
80 kp_hash_foreach_entry(h, bucket, i) \
81 if (!clib_u32_loop_gt((now), bucket->timeout[i]))
84 kp_hash_t *kp_hash_alloc(u32 buckets, u32 timeout)
86 if (!is_pow2(buckets))
89 // Allocate 1 more bucket for prefetch
90 u32 size = ((u64)&((kp_hash_t *)(0))->buckets[0]) +
91 sizeof(kp_hash_bucket_t) * (buckets + 1);
94 vec_alloc_aligned(mem, size, CLIB_CACHE_LINE_BYTES);
96 h->buckets_mask = (buckets - 1);
102 void kp_hash_free(kp_hash_t *h)
109 u32 kp_hash_hash(u64 k0, u64 k1, u64 k2, u64 k3, u64 k4)
111 #ifdef clib_crc32c_uses_intrinsics
118 return clib_crc32c ((u8 *) key, 40);
120 u64 tmp = k0 ^ k1 ^ k2 ^ k3 ^ k4;
121 return (u32)clib_xxhash (tmp);
126 void kp_hash_prefetch_bucket(kp_hash_t *ht, u32 hash)
128 kp_hash_bucket_t *bucket = &ht->buckets[hash & ht->buckets_mask];
129 CLIB_PREFETCH(bucket, sizeof(*bucket), READ);
133 void kp_hash_get(kp_hash_t *ht, u32 hash, u32 vip, u32 time_now,
134 u32 *available_index, u32 *found_value)
136 kp_hash_bucket_t *bucket = &ht->buckets[hash & ht->buckets_mask];
138 *available_index = ~0;
139 #if __SSE4_2__ && KP_HASH_DO_NOT_USE_SSE_BUCKETS == 0
140 u32 bitmask, found_index;
143 // mask[*] = timeout[*] > now
144 mask = _mm_cmpgt_epi32(_mm_loadu_si128 ((__m128i *) bucket->timeout),
145 _mm_set1_epi32 (time_now));
146 // bitmask[*] = now <= timeout[*/4]
147 bitmask = (~_mm_movemask_epi8(mask)) & 0xffff;
148 // Get first index with now <= timeout[*], if any.
149 *available_index = (bitmask)?__builtin_ctz(bitmask)/4:*available_index;
151 // mask[*] = (timeout[*] > now) && (hash[*] == hash)
152 mask = _mm_and_si128(mask,
154 _mm_loadu_si128 ((__m128i *) bucket->hash),
155 _mm_set1_epi32 (hash)));
157 // Load the array of vip values
158 // mask[*] = (timeout[*] > now) && (hash[*] == hash) && (vip[*] == vip)
159 mask = _mm_and_si128(mask,
161 _mm_loadu_si128 ((__m128i *) bucket->vip),
162 _mm_set1_epi32 (vip)));
164 // mask[*] = (timeout[*x4] > now) && (hash[*x4] == hash) && (vip[*x4] == vip)
165 bitmask = _mm_movemask_epi8(mask);
166 // Get first index, if any
167 found_index = (bitmask)?__builtin_ctzll(bitmask)/4:0;
168 ASSERT(found_index < 4);
169 *found_value = (bitmask)?bucket->value[found_index]:*found_value;
170 bucket->timeout[found_index] =
171 (bitmask)?time_now + ht->timeout:bucket->timeout[found_index];
174 for (i = 0; i < KPHASH_ENTRY_PER_BUCKET; i++) {
175 u8 cmp = (bucket->hash[i] == hash && bucket->vip[i] == vip);
176 u8 timeouted = clib_u32_loop_gt(time_now, bucket->timeout[i]);
177 *found_value = (cmp || timeouted)?*found_value:bucket->value[i];
178 bucket->timeout[i] = (cmp || timeouted)?time_now + ht->timeout:bucket->timeout[i];
179 *available_index = (timeouted && (*available_index == ~0))?i:*available_index;
188 u32 kp_hash_available_value(kp_hash_t *h, u32 hash, u32 available_index)
190 return h->buckets[hash & h->buckets_mask].value[available_index];
194 void kp_hash_put(kp_hash_t *h, u32 hash, u32 value, u32 vip,
195 u32 available_index, u32 time_now)
197 kp_hash_bucket_t *bucket = &h->buckets[hash & h->buckets_mask];
198 bucket->hash[available_index] = hash;
199 bucket->value[available_index] = value;
200 bucket->timeout[available_index] = time_now + h->timeout;
201 bucket->vip[available_index] = vip;
205 u32 kp_hash_elts(kp_hash_t *h, u32 time_now)
208 kp_hash_bucket_t *bucket;
210 kp_hash_foreach_valid_entry(h, bucket, i, time_now) {
216 #endif /* KP_PLUGIN_KP_KPHASH_H_ */