2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #ifndef __included_vnet_classify_h__
16 #define __included_vnet_classify_h__
20 #include <vlib/vlib.h>
21 #include <vnet/vnet.h>
22 #include <vnet/pg/pg.h>
23 #include <vnet/ethernet/ethernet.h>
24 #include <vnet/ethernet/packet.h>
25 #include <vnet/ip/ip_packet.h>
26 #include <vnet/ip/ip4_packet.h>
27 #include <vnet/ip/ip6_packet.h>
29 #include <vnet/l2/l2_input.h>
30 #include <vnet/l2/feat_bitmap.h>
31 #include <vnet/api_errno.h> /* for API error numbers */
33 #include <vppinfra/error.h>
34 #include <vppinfra/hash.h>
35 #include <vppinfra/cache.h>
36 #include <vppinfra/xxhash.h>
38 extern vlib_node_registration_t ip4_classify_node;
39 extern vlib_node_registration_t ip6_classify_node;
41 #define CLASSIFY_TRACE 0
43 #if !defined( __aarch64__) && !defined(__arm__)
44 #define CLASSIFY_USE_SSE //Allow usage of SSE operations
47 #define U32X4_ALIGNED(p) PREDICT_TRUE((((intptr_t)p) & 0xf) == 0)
50 * Classify table option to process packets
51 * CLASSIFY_FLAG_USE_CURR_DATA:
52 * - classify packets starting from VPP node’s current data pointer
54 #define CLASSIFY_FLAG_USE_CURR_DATA 1
57 * Classify session action
58 * CLASSIFY_ACTION_SET_IP4_FIB_INDEX:
59 * - Classified IP packets will be looked up
60 * from the specified ipv4 fib table
61 * CLASSIFY_ACTION_SET_IP6_FIB_INDEX:
62 * - Classified IP packets will be looked up
63 * from the specified ipv6 fib table
65 typedef enum vnet_classify_action_t_
67 CLASSIFY_ACTION_SET_IP4_FIB_INDEX = 1,
68 CLASSIFY_ACTION_SET_IP6_FIB_INDEX = 2,
69 CLASSIFY_ACTION_SET_METADATA = 3,
70 } __attribute__ ((packed)) vnet_classify_action_t;
72 struct _vnet_classify_main;
73 typedef struct _vnet_classify_main vnet_classify_main_t;
75 #define foreach_size_in_u32x4 \
83 typedef CLIB_PACKED(struct _vnet_classify_entry {
84 /* Graph node next index */
87 /* put into vnet_buffer(b)->l2_classfy.opaque_index */
91 /* advance on hit, note it's a signed quantity... */
97 /* Really only need 1 bit */
99 #define VNET_CLASSIFY_ENTRY_FREE (1<<0)
101 vnet_classify_action_t action;
104 /* Hit counter, last heard time */
107 struct _vnet_classify_entry * next_free;
112 /* Must be aligned to a 16-octet boundary */
114 }) vnet_classify_entry_t;
118 vnet_classify_entry_is_free (vnet_classify_entry_t * e)
120 return e->flags & VNET_CLASSIFY_ENTRY_FREE;
124 vnet_classify_entry_is_busy (vnet_classify_entry_t * e)
126 return ((e->flags & VNET_CLASSIFY_ENTRY_FREE) == 0);
129 /* Need these to con the vector allocator */
132 typedef CLIB_PACKED(struct { \
136 }) vnet_classify_entry_##size##_t;
137 foreach_size_in_u32x4;
154 } vnet_classify_bucket_t;
158 /* Mask to apply after skipping N vectors */
160 /* Buckets and entries */
161 vnet_classify_bucket_t *buckets;
162 vnet_classify_entry_t *entries;
164 /* Config parameters */
170 int entries_per_page;
172 u32 current_data_flag;
173 int current_data_offset;
175 /* Index of next table to try */
176 u32 next_table_index;
178 /* Miss next index, return if next_table_index = 0 */
181 /* Per-bucket working copies, one per thread */
182 vnet_classify_entry_t **working_copies;
183 int *working_copy_lengths;
184 vnet_classify_bucket_t saved_bucket;
186 /* Free entry freelists */
187 vnet_classify_entry_t **freelists;
191 /* Private allocation arena, protected by the writer lock */
194 /* Writer (only) lock for this table */
195 volatile u32 *writer_lock;
197 } vnet_classify_table_t;
199 struct _vnet_classify_main
202 vnet_classify_table_t *tables;
204 /* Registered next-index, opaque unformat fcns */
205 unformat_function_t **unformat_l2_next_index_fns;
206 unformat_function_t **unformat_ip_next_index_fns;
207 unformat_function_t **unformat_acl_next_index_fns;
208 unformat_function_t **unformat_policer_next_index_fns;
209 unformat_function_t **unformat_opaque_index_fns;
211 /* convenience variables */
212 vlib_main_t *vlib_main;
213 vnet_main_t *vnet_main;
216 extern vnet_classify_main_t vnet_classify_main;
218 u8 *format_classify_table (u8 * s, va_list * args);
220 u64 vnet_classify_hash_packet (vnet_classify_table_t * t, u8 * h);
223 vnet_classify_hash_packet_inline (vnet_classify_table_t * t, u8 * h)
231 } xor_sum __attribute__ ((aligned (sizeof (u32x4))));
235 #ifdef CLASSIFY_USE_SSE
236 if (U32X4_ALIGNED (h))
237 { //SSE can't handle unaligned data
238 u32x4 *data = (u32x4 *) h;
239 xor_sum.as_u32x4 = data[0 + t->skip_n_vectors] & mask[0];
240 switch (t->match_n_vectors)
243 xor_sum.as_u32x4 ^= data[4 + t->skip_n_vectors] & mask[4];
246 xor_sum.as_u32x4 ^= data[3 + t->skip_n_vectors] & mask[3];
249 xor_sum.as_u32x4 ^= data[2 + t->skip_n_vectors] & mask[2];
252 xor_sum.as_u32x4 ^= data[1 + t->skip_n_vectors] & mask[1];
261 #endif /* CLASSIFY_USE_SSE */
263 u32 skip_u64 = t->skip_n_vectors * 2;
264 u64 *data64 = (u64 *) h;
265 xor_sum.as_u64[0] = data64[0 + skip_u64] & ((u64 *) mask)[0];
266 xor_sum.as_u64[1] = data64[1 + skip_u64] & ((u64 *) mask)[1];
267 switch (t->match_n_vectors)
270 xor_sum.as_u64[0] ^= data64[8 + skip_u64] & ((u64 *) mask)[8];
271 xor_sum.as_u64[1] ^= data64[9 + skip_u64] & ((u64 *) mask)[9];
274 xor_sum.as_u64[0] ^= data64[6 + skip_u64] & ((u64 *) mask)[6];
275 xor_sum.as_u64[1] ^= data64[7 + skip_u64] & ((u64 *) mask)[7];
278 xor_sum.as_u64[0] ^= data64[4 + skip_u64] & ((u64 *) mask)[4];
279 xor_sum.as_u64[1] ^= data64[5 + skip_u64] & ((u64 *) mask)[5];
282 xor_sum.as_u64[0] ^= data64[2 + skip_u64] & ((u64 *) mask)[2];
283 xor_sum.as_u64[1] ^= data64[3 + skip_u64] & ((u64 *) mask)[3];
293 return clib_xxhash (xor_sum.as_u64[0] ^ xor_sum.as_u64[1]);
297 vnet_classify_prefetch_bucket (vnet_classify_table_t * t, u64 hash)
301 ASSERT (is_pow2 (t->nbuckets));
303 bucket_index = hash & (t->nbuckets - 1);
305 CLIB_PREFETCH (&t->buckets[bucket_index], CLIB_CACHE_LINE_BYTES, LOAD);
308 static inline vnet_classify_entry_t *
309 vnet_classify_get_entry (vnet_classify_table_t * t, uword offset)
312 u8 *vp = hp + offset;
318 vnet_classify_get_offset (vnet_classify_table_t * t,
319 vnet_classify_entry_t * v)
323 hp = (u8 *) t->mheap;
326 ASSERT ((vp - hp) < 0x100000000ULL);
330 static inline vnet_classify_entry_t *
331 vnet_classify_entry_at_index (vnet_classify_table_t * t,
332 vnet_classify_entry_t * e, u32 index)
338 eu8 += index * (sizeof (vnet_classify_entry_t) +
339 (t->match_n_vectors * sizeof (u32x4)));
341 return (vnet_classify_entry_t *) eu8;
345 vnet_classify_prefetch_entry (vnet_classify_table_t * t, u64 hash)
349 vnet_classify_bucket_t *b;
350 vnet_classify_entry_t *e;
352 bucket_index = hash & (t->nbuckets - 1);
354 b = &t->buckets[bucket_index];
359 hash >>= t->log2_nbuckets;
361 e = vnet_classify_get_entry (t, b->offset);
362 value_index = hash & ((1 << b->log2_pages) - 1);
364 e = vnet_classify_entry_at_index (t, e, value_index);
366 CLIB_PREFETCH (e, CLIB_CACHE_LINE_BYTES, LOAD);
369 vnet_classify_entry_t *vnet_classify_find_entry (vnet_classify_table_t * t,
370 u8 * h, u64 hash, f64 now);
372 static inline vnet_classify_entry_t *
373 vnet_classify_find_entry_inline (vnet_classify_table_t * t,
374 u8 * h, u64 hash, f64 now)
376 vnet_classify_entry_t *v;
382 } result __attribute__ ((aligned (sizeof (u32x4))));
383 vnet_classify_bucket_t *b;
389 bucket_index = hash & (t->nbuckets - 1);
390 b = &t->buckets[bucket_index];
396 hash >>= t->log2_nbuckets;
398 v = vnet_classify_get_entry (t, b->offset);
399 value_index = hash & ((1 << b->log2_pages) - 1);
400 limit = t->entries_per_page;
401 if (PREDICT_FALSE (b->linear_search))
404 limit *= (1 << b->log2_pages);
407 v = vnet_classify_entry_at_index (t, v, value_index);
409 #ifdef CLASSIFY_USE_SSE
410 if (U32X4_ALIGNED (h))
412 u32x4 *data = (u32x4 *) h;
413 for (i = 0; i < limit; i++)
416 result.as_u32x4 = (data[0 + t->skip_n_vectors] & mask[0]) ^ key[0];
417 switch (t->match_n_vectors)
421 (data[4 + t->skip_n_vectors] & mask[4]) ^ key[4];
425 (data[3 + t->skip_n_vectors] & mask[3]) ^ key[3];
429 (data[2 + t->skip_n_vectors] & mask[2]) ^ key[2];
433 (data[1 + t->skip_n_vectors] & mask[1]) ^ key[1];
441 if (u32x4_zero_byte_mask (result.as_u32x4) == 0xffff)
443 if (PREDICT_TRUE (now))
450 v = vnet_classify_entry_at_index (t, v, 1);
454 #endif /* CLASSIFY_USE_SSE */
456 u32 skip_u64 = t->skip_n_vectors * 2;
457 u64 *data64 = (u64 *) h;
458 for (i = 0; i < limit; i++)
463 (data64[0 + skip_u64] & ((u64 *) mask)[0]) ^ ((u64 *) key)[0];
465 (data64[1 + skip_u64] & ((u64 *) mask)[1]) ^ ((u64 *) key)[1];
466 switch (t->match_n_vectors)
470 (data64[8 + skip_u64] & ((u64 *) mask)[8]) ^ ((u64 *) key)[8];
472 (data64[9 + skip_u64] & ((u64 *) mask)[9]) ^ ((u64 *) key)[9];
476 (data64[6 + skip_u64] & ((u64 *) mask)[6]) ^ ((u64 *) key)[6];
478 (data64[7 + skip_u64] & ((u64 *) mask)[7]) ^ ((u64 *) key)[7];
482 (data64[4 + skip_u64] & ((u64 *) mask)[4]) ^ ((u64 *) key)[4];
484 (data64[5 + skip_u64] & ((u64 *) mask)[5]) ^ ((u64 *) key)[5];
488 (data64[2 + skip_u64] & ((u64 *) mask)[2]) ^ ((u64 *) key)[2];
490 (data64[3 + skip_u64] & ((u64 *) mask)[3]) ^ ((u64 *) key)[3];
498 if (result.as_u64[0] == 0 && result.as_u64[1] == 0)
500 if (PREDICT_TRUE (now))
508 v = vnet_classify_entry_at_index (t, v, 1);
514 vnet_classify_table_t *vnet_classify_new_table (vnet_classify_main_t * cm,
515 u8 * mask, u32 nbuckets,
518 u32 match_n_vectors);
520 int vnet_classify_add_del_session (vnet_classify_main_t * cm,
526 u8 action, u32 metadata, int is_add);
528 int vnet_classify_add_del_table (vnet_classify_main_t * cm,
534 u32 next_table_index,
537 u8 current_data_flag,
538 i16 current_data_offset,
539 int is_add, int del_chain);
541 unformat_function_t unformat_ip4_mask;
542 unformat_function_t unformat_ip6_mask;
543 unformat_function_t unformat_l3_mask;
544 unformat_function_t unformat_l2_mask;
545 unformat_function_t unformat_classify_mask;
546 unformat_function_t unformat_l2_next_index;
547 unformat_function_t unformat_ip_next_index;
548 unformat_function_t unformat_ip4_match;
549 unformat_function_t unformat_ip6_match;
550 unformat_function_t unformat_l3_match;
551 unformat_function_t unformat_l4_match;
552 unformat_function_t unformat_vlan_tag;
553 unformat_function_t unformat_l2_match;
554 unformat_function_t unformat_classify_match;
556 void vnet_classify_register_unformat_ip_next_index_fn
557 (unformat_function_t * fn);
559 void vnet_classify_register_unformat_l2_next_index_fn
560 (unformat_function_t * fn);
562 void vnet_classify_register_unformat_acl_next_index_fn
563 (unformat_function_t * fn);
565 void vnet_classify_register_unformat_policer_next_index_fn
566 (unformat_function_t * fn);
568 void vnet_classify_register_unformat_opaque_index_fn (unformat_function_t *
571 #endif /* __included_vnet_classify_h__ */
574 * fd.io coding-style-patch-verification: ON
577 * eval: (c-set-style "gnu")