Code Review
/
vpp.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
Use thread local storage for thread index
[vpp.git]
/
src
/
vnet
/
classify
/
vnet_classify.c
diff --git
a/src/vnet/classify/vnet_classify.c
b/src/vnet/classify/vnet_classify.c
index
ce38f9f
..
70a189b
100644
(file)
--- a/
src/vnet/classify/vnet_classify.c
+++ b/
src/vnet/classify/vnet_classify.c
@@
-251,12
+251,12
@@
static inline void make_working_copy
vnet_classify_entry_##size##_t * working_copy##size = 0;
foreach_size_in_u32x4;
#undef _
vnet_classify_entry_##size##_t * working_copy##size = 0;
foreach_size_in_u32x4;
#undef _
- u32
cpu_number = os_get_cpu_number
();
+ u32
thread_index = vlib_get_thread_index
();
- if (
cpu_number
>= vec_len (t->working_copies))
+ if (
thread_index
>= vec_len (t->working_copies))
{
oldheap = clib_mem_set_heap (t->mheap);
{
oldheap = clib_mem_set_heap (t->mheap);
- vec_validate (t->working_copies,
cpu_number
);
+ vec_validate (t->working_copies,
thread_index
);
clib_mem_set_heap (oldheap);
}
clib_mem_set_heap (oldheap);
}
@@
-265,7
+265,7
@@
static inline void make_working_copy
* updates from multiple threads will not result in sporadic, spurious
* lookup failures.
*/
* updates from multiple threads will not result in sporadic, spurious
* lookup failures.
*/
- working_copy = t->working_copies[
cpu_number
];
+ working_copy = t->working_copies[
thread_index
];
t->saved_bucket.as_u64 = b->as_u64;
oldheap = clib_mem_set_heap (t->mheap);
t->saved_bucket.as_u64 = b->as_u64;
oldheap = clib_mem_set_heap (t->mheap);
@@
-290,7
+290,7
@@
static inline void make_working_copy
default:
abort();
}
default:
abort();
}
- t->working_copies[
cpu_number
] = working_copy;
+ t->working_copies[
thread_index
] = working_copy;
}
_vec_len(working_copy) = (1<<b->log2_pages)*t->entries_per_page;
}
_vec_len(working_copy) = (1<<b->log2_pages)*t->entries_per_page;
@@
-318,7
+318,7
@@
static inline void make_working_copy
working_bucket.offset = vnet_classify_get_offset (t, working_copy);
CLIB_MEMORY_BARRIER();
b->as_u64 = working_bucket.as_u64;
working_bucket.offset = vnet_classify_get_offset (t, working_copy);
CLIB_MEMORY_BARRIER();
b->as_u64 = working_bucket.as_u64;
- t->working_copies[
cpu_number
] = working_copy;
+ t->working_copies[
thread_index
] = working_copy;
}
static vnet_classify_entry_t *
}
static vnet_classify_entry_t *
@@
-387,7
+387,7
@@
int vnet_classify_add_del (vnet_classify_table_t * t,
int i;
u64 hash, new_hash;
u32 new_log2_pages;
int i;
u64 hash, new_hash;
u32 new_log2_pages;
- u32
cpu_number = os_get_cpu_number
();
+ u32
thread_index = vlib_get_thread_index
();
u8 * key_minus_skip;
ASSERT ((add_v->flags & VNET_CLASSIFY_ENTRY_FREE) == 0);
u8 * key_minus_skip;
ASSERT ((add_v->flags & VNET_CLASSIFY_ENTRY_FREE) == 0);
@@
-498,7
+498,7
@@
int vnet_classify_add_del (vnet_classify_table_t * t,
new_log2_pages = t->saved_bucket.log2_pages + 1;
expand_again:
new_log2_pages = t->saved_bucket.log2_pages + 1;
expand_again:
- working_copy = t->working_copies[
cpu_number
];
+ working_copy = t->working_copies[
thread_index
];
new_v = split_and_rehash (t, working_copy, new_log2_pages);
if (new_v == 0)
new_v = split_and_rehash (t, working_copy, new_log2_pages);
if (new_v == 0)
@@
-695,8
+695,8
@@
int vnet_classify_add_del_table (vnet_classify_main_t * cm,
}
#define foreach_tcp_proto_field \
}
#define foreach_tcp_proto_field \
-_(src
_port)
\
-_(dst
_port
)
+_(src
)
\
+_(dst)
#define foreach_udp_proto_field \
_(src_port) \
#define foreach_udp_proto_field \
_(src_port) \
@@
-1093,8
+1093,6
@@
uword unformat_l2_mask (unformat_input_t * input, va_list * args)
uword unformat_classify_mask (unformat_input_t * input, va_list * args)
{
uword unformat_classify_mask (unformat_input_t * input, va_list * args)
{
- vnet_classify_main_t * CLIB_UNUSED(cm)
- = va_arg (*args, vnet_classify_main_t *);
u8 ** maskp = va_arg (*args, u8 **);
u32 * skipp = va_arg (*args, u32 *);
u32 * matchp = va_arg (*args, u32 *);
u8 ** maskp = va_arg (*args, u8 **);
u32 * skipp = va_arg (*args, u32 *);
u32 * matchp = va_arg (*args, u32 *);
@@
-1417,7
+1415,7
@@
classify_table_command_fn (vlib_main_t * vm,
else if (unformat (input, "table %d", &table_index))
;
else if (unformat (input, "mask %U", unformat_classify_mask,
else if (unformat (input, "table %d", &table_index))
;
else if (unformat (input, "mask %U", unformat_classify_mask,
-
cm,
&mask, &skip, &match))
+ &mask, &skip, &match))
;
else if (unformat (input, "memory-size %uM", &tmp))
memory_size = tmp<<20;
;
else if (unformat (input, "memory-size %uM", &tmp))
memory_size = tmp<<20;
@@
-1501,7
+1499,7
@@
static u8 * format_vnet_classify_table (u8 * s, va_list * args)
s = format (s, "%10u%10d%10d%10d", index, t->active_elements,
t->next_table_index, t->miss_next_index);
s = format (s, "%10u%10d%10d%10d", index, t->active_elements,
t->next_table_index, t->miss_next_index);
- s = format (s, "\n Heap: %U", format_mheap, t->mheap, 0 /*verbose*/);
+ s = format (s, "\n Heap: %U", format_mheap, t->mheap, 0 /*verbose*/);
s = format (s, "\n nbuckets %d, skip %d match %d flag %d offset %d",
t->nbuckets, t->skip_n_vectors, t->match_n_vectors,
s = format (s, "\n nbuckets %d, skip %d match %d flag %d offset %d",
t->nbuckets, t->skip_n_vectors, t->match_n_vectors,