Code Review
/
vpp.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
review
|
tree
raw
|
inline
| side by side
flow-hash: Add symmetric flag for flow hashing
[vpp.git]
/
src
/
vnet
/
classify
/
vnet_classify.h
diff --git
a/src/vnet/classify/vnet_classify.h
b/src/vnet/classify/vnet_classify.h
index
6cbbf10
..
4fea95d
100644
(file)
--- a/
src/vnet/classify/vnet_classify.h
+++ b/
src/vnet/classify/vnet_classify.h
@@
-26,9
+26,6
@@
#include <vnet/ip/ip4_packet.h>
#include <vnet/ip/ip6_packet.h>
#include <vlib/cli.h>
#include <vnet/ip/ip4_packet.h>
#include <vnet/ip/ip6_packet.h>
#include <vlib/cli.h>
-#include <vnet/l2/l2_input.h>
-#include <vnet/l2/l2_output.h>
-#include <vnet/l2/feat_bitmap.h>
#include <vnet/api_errno.h> /* for API error numbers */
#include <vppinfra/error.h>
#include <vnet/api_errno.h> /* for API error numbers */
#include <vppinfra/error.h>
@@
-41,10
+38,6
@@
extern vlib_node_registration_t ip6_classify_node;
#define CLASSIFY_TRACE 0
#define CLASSIFY_TRACE 0
-#ifdef CLIB_HAVE_VEC128
-#define CLASSIFY_USE_SSE //Allow usage of SSE operations
-#endif
-
#define U32X4_ALIGNED(p) PREDICT_TRUE((((intptr_t)p) & 0xf) == 0)
/*
#define U32X4_ALIGNED(p) PREDICT_TRUE((((intptr_t)p) & 0xf) == 0)
/*
@@
-233,7
+226,7
@@
vnet_classify_hash_packet_inline (vnet_classify_table_t * t, u8 * h)
ASSERT (t);
mask = t->mask;
ASSERT (t);
mask = t->mask;
-#ifdef CL
ASSIFY_USE_SSE
+#ifdef CL
IB_HAVE_VEC128
if (U32X4_ALIGNED (h))
{ //SSE can't handle unaligned data
u32x4 *data = (u32x4 *) h;
if (U32X4_ALIGNED (h))
{ //SSE can't handle unaligned data
u32x4 *data = (u32x4 *) h;
@@
-259,7
+252,7
@@
vnet_classify_hash_packet_inline (vnet_classify_table_t * t, u8 * h)
}
}
else
}
}
else
-#endif /* CL
ASSIFY_USE_SSE
*/
+#endif /* CL
IB_HAVE_VEC128
*/
{
u32 skip_u64 = t->skip_n_vectors * 2;
u64 *data64 = (u64 *) h;
{
u32 skip_u64 = t->skip_n_vectors * 2;
u64 *data64 = (u64 *) h;
@@
-407,7
+400,7
@@
vnet_classify_find_entry_inline (vnet_classify_table_t * t,
v = vnet_classify_entry_at_index (t, v, value_index);
v = vnet_classify_entry_at_index (t, v, value_index);
-#ifdef CL
ASSIFY_USE_SSE
+#ifdef CL
IB_HAVE_VEC128
if (U32X4_ALIGNED (h))
{
u32x4 *data = (u32x4 *) h;
if (U32X4_ALIGNED (h))
{
u32x4 *data = (u32x4 *) h;
@@
-452,7
+445,7
@@
vnet_classify_find_entry_inline (vnet_classify_table_t * t,
}
}
else
}
}
else
-#endif /* CL
ASSIFY_USE_SSE
*/
+#endif /* CL
IB_HAVE_VEC128
*/
{
u32 skip_u64 = t->skip_n_vectors * 2;
u64 *data64 = (u64 *) h;
{
u32 skip_u64 = t->skip_n_vectors * 2;
u64 *data64 = (u64 *) h;