2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __included_vnet_classify_h__
16 #define __included_vnet_classify_h__
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/pg/pg.h>
23 #include <vnet/ethernet/ethernet.h>
24 #include <vnet/ethernet/packet.h>
25 #include <vnet/ip/ip_packet.h>
26 #include <vnet/ip/ip4_packet.h>
27 #include <vnet/ip/ip6_packet.h>
29 #include <vnet/l2/l2_input.h>
30 #include <vnet/l2/feat_bitmap.h>
31 #include <vnet/api_errno.h> /* for API error numbers */
33 #include <vppinfra/error.h>
34 #include <vppinfra/hash.h>
35 #include <vppinfra/cache.h>
36 #include <vppinfra/xxhash.h>
38 extern vlib_node_registration_t ip4_classify_node;
39 extern vlib_node_registration_t ip6_classify_node;
41 #define CLASSIFY_TRACE 0
44 #define CLASSIFY_USE_SSE //Allow usage of SSE operations
47 #define U32X4_ALIGNED(p) PREDICT_TRUE((((u64)p) & 0xf) == 0)
49 struct _vnet_classify_main;
50 typedef struct _vnet_classify_main vnet_classify_main_t;
52 #define foreach_size_in_u32x4 \
59 typedef CLIB_PACKED(struct _vnet_classify_entry {
60 /* Graph node next index */
63 /* put into vnet_buffer(b)->l2_classfy.opaque_index */
67 /* advance on hit, note it's a signed quantity... */
73 /* Really only need 1 bit */
75 #define VNET_CLASSIFY_ENTRY_FREE (1<<0)
77 /* Hit counter, last heard time */
80 struct _vnet_classify_entry * next_free;
85 /* Must be aligned to a 16-octet boundary */
87 }) vnet_classify_entry_t;
89 static inline int vnet_classify_entry_is_free (vnet_classify_entry_t * e)
91 return e->flags & VNET_CLASSIFY_ENTRY_FREE;
94 static inline int vnet_classify_entry_is_busy (vnet_classify_entry_t * e)
96 return ((e->flags & VNET_CLASSIFY_ENTRY_FREE) == 0);
99 /* Need these to con the vector allocator */
101 typedef CLIB_PACKED(struct { \
105 }) vnet_classify_entry_##size##_t;
106 foreach_size_in_u32x4;
118 } vnet_classify_bucket_t;
121 /* Mask to apply after skipping N vectors */
123 /* Buckets and entries */
124 vnet_classify_bucket_t * buckets;
125 vnet_classify_entry_t * entries;
127 /* Config parameters */
132 int entries_per_page;
134 /* Index of next table to try */
135 u32 next_table_index;
137 /* Miss next index, return if next_table_index = 0 */
140 /* Per-bucket working copies, one per thread */
141 vnet_classify_entry_t ** working_copies;
142 vnet_classify_bucket_t saved_bucket;
144 /* Free entry freelists */
145 vnet_classify_entry_t **freelists;
149 /* Private allocation arena, protected by the writer lock */
152 /* Writer (only) lock for this table */
153 volatile u32 * writer_lock;
155 } vnet_classify_table_t;
157 struct _vnet_classify_main {
159 vnet_classify_table_t * tables;
161 /* convenience variables */
162 vlib_main_t * vlib_main;
163 vnet_main_t * vnet_main;
166 vnet_classify_main_t vnet_classify_main;
168 u8 * format_classify_table (u8 * s, va_list * args);
170 u64 vnet_classify_hash_packet (vnet_classify_table_t * t, u8 * h);
173 vnet_classify_hash_packet_inline (vnet_classify_table_t * t,
181 } xor_sum __attribute__((aligned(sizeof(u32x4))));
185 #ifdef CLASSIFY_USE_SSE
186 if (U32X4_ALIGNED(h)) { //SSE can't handle unaligned data
187 u32x4 *data = (u32x4 *)h;
188 xor_sum.as_u32x4 = data[0 + t->skip_n_vectors] & mask[0];
189 switch (t->match_n_vectors)
192 xor_sum.as_u32x4 ^= data[4 + t->skip_n_vectors] & mask[4];
195 xor_sum.as_u32x4 ^= data[3 + t->skip_n_vectors] & mask[3];
198 xor_sum.as_u32x4 ^= data[2 + t->skip_n_vectors] & mask[2];
201 xor_sum.as_u32x4 ^= data[1 + t->skip_n_vectors] & mask[1];
209 #endif /* CLASSIFY_USE_SSE */
211 u32 skip_u64 = t->skip_n_vectors * 2;
212 u64 *data64 = (u64 *)h;
213 xor_sum.as_u64[0] = data64[0 + skip_u64] & ((u64 *)mask)[0];
214 xor_sum.as_u64[1] = data64[1 + skip_u64] & ((u64 *)mask)[1];
215 switch (t->match_n_vectors)
218 xor_sum.as_u64[0] ^= data64[8 + skip_u64] & ((u64 *)mask)[8];
219 xor_sum.as_u64[1] ^= data64[9 + skip_u64] & ((u64 *)mask)[9];
222 xor_sum.as_u64[0] ^= data64[6 + skip_u64] & ((u64 *)mask)[6];
223 xor_sum.as_u64[1] ^= data64[7 + skip_u64] & ((u64 *)mask)[7];
226 xor_sum.as_u64[0] ^= data64[4 + skip_u64] & ((u64 *)mask)[4];
227 xor_sum.as_u64[1] ^= data64[5 + skip_u64] & ((u64 *)mask)[5];
230 xor_sum.as_u64[0] ^= data64[2 + skip_u64] & ((u64 *)mask)[2];
231 xor_sum.as_u64[1] ^= data64[3 + skip_u64] & ((u64 *)mask)[3];
241 return clib_xxhash (xor_sum.as_u64[0] ^ xor_sum.as_u64[1]);
245 vnet_classify_prefetch_bucket (vnet_classify_table_t * t, u64 hash)
249 ASSERT (is_pow2(t->nbuckets));
251 bucket_index = hash & (t->nbuckets - 1);
253 CLIB_PREFETCH(&t->buckets[bucket_index], CLIB_CACHE_LINE_BYTES, LOAD);
256 static inline vnet_classify_entry_t *
257 vnet_classify_get_entry (vnet_classify_table_t * t, uword offset)
260 u8 * vp = hp + offset;
265 static inline uword vnet_classify_get_offset (vnet_classify_table_t * t,
266 vnet_classify_entry_t * v)
270 hp = (u8 *) t->mheap;
273 ASSERT((vp - hp) < 0x100000000ULL);
277 static inline vnet_classify_entry_t *
278 vnet_classify_entry_at_index (vnet_classify_table_t * t,
279 vnet_classify_entry_t * e,
286 eu8 += index * (sizeof (vnet_classify_entry_t) +
287 (t->match_n_vectors * sizeof (u32x4)));
289 return (vnet_classify_entry_t *) eu8;
293 vnet_classify_prefetch_entry (vnet_classify_table_t * t,
298 vnet_classify_bucket_t * b;
299 vnet_classify_entry_t * e;
301 bucket_index = hash & (t->nbuckets - 1);
303 b = &t->buckets[bucket_index];
308 hash >>= t->log2_nbuckets;
310 e = vnet_classify_get_entry (t, b->offset);
311 value_index = hash & ((1<<b->log2_pages)-1);
313 e = vnet_classify_entry_at_index (t, e, value_index);
315 CLIB_PREFETCH(e, CLIB_CACHE_LINE_BYTES, LOAD);
318 vnet_classify_entry_t *
319 vnet_classify_find_entry (vnet_classify_table_t * t,
320 u8 * h, u64 hash, f64 now);
322 static inline vnet_classify_entry_t *
323 vnet_classify_find_entry_inline (vnet_classify_table_t * t,
324 u8 * h, u64 hash, f64 now)
326 vnet_classify_entry_t * v;
331 } result __attribute__((aligned(sizeof(u32x4))));
332 vnet_classify_bucket_t * b;
337 bucket_index = hash & (t->nbuckets-1);
338 b = &t->buckets[bucket_index];
344 hash >>= t->log2_nbuckets;
346 v = vnet_classify_get_entry (t, b->offset);
347 value_index = hash & ((1<<b->log2_pages)-1);
348 v = vnet_classify_entry_at_index (t, v, value_index);
350 #ifdef CLASSIFY_USE_SSE
351 if (U32X4_ALIGNED(h)) {
352 u32x4 *data = (u32x4 *) h;
353 for (i = 0; i < t->entries_per_page; i++) {
355 result.as_u32x4 = (data[0 + t->skip_n_vectors] & mask[0]) ^ key[0];
356 switch (t->match_n_vectors)
359 result.as_u32x4 |= (data[4 + t->skip_n_vectors] & mask[4]) ^ key[4];
362 result.as_u32x4 |= (data[3 + t->skip_n_vectors] & mask[3]) ^ key[3];
365 result.as_u32x4 |= (data[2 + t->skip_n_vectors] & mask[2]) ^ key[2];
368 result.as_u32x4 |= (data[1 + t->skip_n_vectors] & mask[1]) ^ key[1];
376 if (u32x4_zero_byte_mask (result.as_u32x4) == 0xffff) {
377 if (PREDICT_TRUE(now)) {
383 v = vnet_classify_entry_at_index (t, v, 1);
386 #endif /* CLASSIFY_USE_SSE */
388 u32 skip_u64 = t->skip_n_vectors * 2;
389 u64 *data64 = (u64 *)h;
390 for (i = 0; i < t->entries_per_page; i++) {
393 result.as_u64[0] = (data64[0 + skip_u64] & ((u64 *)mask)[0]) ^ ((u64 *)key)[0];
394 result.as_u64[1] = (data64[1 + skip_u64] & ((u64 *)mask)[1]) ^ ((u64 *)key)[1];
395 switch (t->match_n_vectors)
398 result.as_u64[0] |= (data64[8 + skip_u64] & ((u64 *)mask)[8]) ^ ((u64 *)key)[8];
399 result.as_u64[1] |= (data64[9 + skip_u64] & ((u64 *)mask)[9]) ^ ((u64 *)key)[9];
402 result.as_u64[0] |= (data64[6 + skip_u64] & ((u64 *)mask)[6]) ^ ((u64 *)key)[6];
403 result.as_u64[1] |= (data64[7 + skip_u64] & ((u64 *)mask)[7]) ^ ((u64 *)key)[7];
406 result.as_u64[0] |= (data64[4 + skip_u64] & ((u64 *)mask)[4]) ^ ((u64 *)key)[4];
407 result.as_u64[1] |= (data64[5 + skip_u64] & ((u64 *)mask)[5]) ^ ((u64 *)key)[5];
410 result.as_u64[0] |= (data64[2 + skip_u64] & ((u64 *)mask)[2]) ^ ((u64 *)key)[2];
411 result.as_u64[1] |= (data64[3 + skip_u64] & ((u64 *)mask)[3]) ^ ((u64 *)key)[3];
419 if (result.as_u64[0] == 0 && result.as_u64[1] == 0) {
420 if (PREDICT_TRUE(now)) {
427 v = vnet_classify_entry_at_index (t, v, 1);
433 vnet_classify_table_t *
434 vnet_classify_new_table (vnet_classify_main_t *cm,
435 u8 * mask, u32 nbuckets, u32 memory_size,
437 u32 match_n_vectors);
439 int vnet_classify_add_del_session (vnet_classify_main_t * cm,
447 int vnet_classify_add_del_table (vnet_classify_main_t * cm,
453 u32 next_table_index,
458 unformat_function_t unformat_ip4_mask;
459 unformat_function_t unformat_ip6_mask;
460 unformat_function_t unformat_l3_mask;
461 unformat_function_t unformat_l2_mask;
462 unformat_function_t unformat_classify_mask;
463 unformat_function_t unformat_l2_next_index;
464 unformat_function_t unformat_ip_next_index;
465 unformat_function_t unformat_ip4_match;
466 unformat_function_t unformat_ip6_match;
467 unformat_function_t unformat_l3_match;
468 unformat_function_t unformat_vlan_tag;
469 unformat_function_t unformat_l2_match;
470 unformat_function_t unformat_classify_match;
472 #endif /* __included_vnet_classify_h__ */