2 * Copyright (c) 2012 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
17 * vppinfra already includes tons of different hash tables.
18 * MagLev flow table is a bit different. It has to be very efficient
19 * for both writing and reading operations. But it does not need to
20 * be 100% reliable (write can fail). It also needs to recycle
21 * old entries in a lazy way.
23 * This hash table is the most dummy hash table you can do.
24 * Fixed total size, fixed bucket size.
25 * Advantage is that it could be very efficient (maybe).
29 #ifndef LB_PLUGIN_LB_LBHASH_H_
30 #define LB_PLUGIN_LB_LBHASH_H_
32 #include <vnet/vnet.h>
33 #include <vppinfra/lb_hash_hash.h>
35 #if defined (__SSE4_2__)
36 #include <immintrin.h>
40 * @brief Number of entries per bucket.
42 #define LBHASH_ENTRY_PER_BUCKET 4
44 #define LB_HASH_DO_NOT_USE_SSE_BUCKETS 0
47 * @brief One bucket contains 4 entries.
48 * Each bucket takes one 64B cache line in memory.
51 CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
52 u32 hash[LBHASH_ENTRY_PER_BUCKET];
53 u32 timeout[LBHASH_ENTRY_PER_BUCKET];
54 u32 vip[LBHASH_ENTRY_PER_BUCKET];
55 u32 value[LBHASH_ENTRY_PER_BUCKET];
61 lb_hash_bucket_t buckets[];
64 #define lb_hash_nbuckets(h) (((h)->buckets_mask) + 1)
65 #define lb_hash_size(h) ((h)->buckets_mask + LBHASH_ENTRY_PER_BUCKET)
67 #define lb_hash_foreach_bucket(h, bucket) \
68 for (bucket = (h)->buckets; \
69 bucket < (h)->buckets + lb_hash_nbuckets(h); \
72 #define lb_hash_foreach_entry(h, bucket, i) \
73 lb_hash_foreach_bucket(h, bucket) \
74 for (i = 0; i < LBHASH_ENTRY_PER_BUCKET; i++)
76 #define lb_hash_foreach_valid_entry(h, bucket, i, now) \
77 lb_hash_foreach_entry(h, bucket, i) \
78 if (!clib_u32_loop_gt((now), bucket->timeout[i]))
81 lb_hash_t *lb_hash_alloc(u32 buckets, u32 timeout)
83 if (!is_pow2(buckets))
86 // Allocate 1 more bucket for prefetch
87 u32 size = ((u64)&((lb_hash_t *)(0))->buckets[0]) +
88 sizeof(lb_hash_bucket_t) * (buckets + 1);
91 vec_alloc_aligned(mem, size, CLIB_CACHE_LINE_BYTES);
93 h->buckets_mask = (buckets - 1);
99 void lb_hash_free(lb_hash_t *h)
106 void lb_hash_prefetch_bucket(lb_hash_t *ht, u32 hash)
108 lb_hash_bucket_t *bucket = &ht->buckets[hash & ht->buckets_mask];
109 CLIB_PREFETCH(bucket, sizeof(*bucket), READ);
113 void lb_hash_get(lb_hash_t *ht, u32 hash, u32 vip, u32 time_now,
114 u32 *available_index, u32 *found_value)
116 lb_hash_bucket_t *bucket = &ht->buckets[hash & ht->buckets_mask];
118 *available_index = ~0;
119 #if __SSE4_2__ && LB_HASH_DO_NOT_USE_SSE_BUCKETS == 0
120 u32 bitmask, found_index;
123 // mask[*] = timeout[*] > now
124 mask = _mm_cmpgt_epi32(_mm_loadu_si128 ((__m128i *) bucket->timeout),
125 _mm_set1_epi32 (time_now));
126 // bitmask[*] = now <= timeout[*/4]
127 bitmask = (~_mm_movemask_epi8(mask)) & 0xffff;
128 // Get first index with now <= timeout[*], if any.
129 *available_index = (bitmask)?__builtin_ctz(bitmask)/4:*available_index;
131 // mask[*] = (timeout[*] > now) && (hash[*] == hash)
132 mask = _mm_and_si128(mask,
134 _mm_loadu_si128 ((__m128i *) bucket->hash),
135 _mm_set1_epi32 (hash)));
137 // Load the array of vip values
138 // mask[*] = (timeout[*] > now) && (hash[*] == hash) && (vip[*] == vip)
139 mask = _mm_and_si128(mask,
141 _mm_loadu_si128 ((__m128i *) bucket->vip),
142 _mm_set1_epi32 (vip)));
144 // mask[*] = (timeout[*x4] > now) && (hash[*x4] == hash) && (vip[*x4] == vip)
145 bitmask = _mm_movemask_epi8(mask);
146 // Get first index, if any
147 found_index = (bitmask)?__builtin_ctzll(bitmask)/4:0;
148 ASSERT(found_index < 4);
149 *found_value = (bitmask)?bucket->value[found_index]:*found_value;
150 bucket->timeout[found_index] =
151 (bitmask)?time_now + ht->timeout:bucket->timeout[found_index];
154 for (i = 0; i < LBHASH_ENTRY_PER_BUCKET; i++) {
155 u8 cmp = (bucket->hash[i] == hash && bucket->vip[i] == vip);
156 u8 timeouted = clib_u32_loop_gt(time_now, bucket->timeout[i]);
157 *found_value = (cmp || timeouted)?*found_value:bucket->value[i];
158 bucket->timeout[i] = (cmp || timeouted)?time_now + ht->timeout:bucket->timeout[i];
159 *available_index = (timeouted && (*available_index == ~0))?i:*available_index;
168 u32 lb_hash_available_value(lb_hash_t *h, u32 hash, u32 available_index)
170 return h->buckets[hash & h->buckets_mask].value[available_index];
174 void lb_hash_put(lb_hash_t *h, u32 hash, u32 value, u32 vip,
175 u32 available_index, u32 time_now)
177 lb_hash_bucket_t *bucket = &h->buckets[hash & h->buckets_mask];
178 bucket->hash[available_index] = hash;
179 bucket->value[available_index] = value;
180 bucket->timeout[available_index] = time_now + h->timeout;
181 bucket->vip[available_index] = vip;
185 u32 lb_hash_elts(lb_hash_t *h, u32 time_now)
188 lb_hash_bucket_t *bucket;
190 lb_hash_foreach_valid_entry(h, bucket, i, time_now) {
196 #endif /* LB_PLUGIN_LB_LBHASH_H_ */