#include <vlib/vlib.h>
#include <vnet/l2/feat_bitmap.h>
#include <vnet/l2/l2_rw.h>
+#include <vnet/classify/vnet_classify.h>
/**
* @file
* @brief Layer 2 Rewrite.
*
* Layer 2-Rewrite node uses classify tables to match packets. Then, using
- * the provisioned mask and value, modfies the packet header.
+ * the provisioned mask and value, modifies the packet header.
*/
+#ifndef CLIB_MARCH_VARIANT
l2_rw_main_t l2_rw_main;
-
-vlib_node_registration_t l2_rw_node;
+#endif /* CLIB_MARCH_VARIANT */
typedef struct
{
static_always_inline void
l2_rw_rewrite (l2_rw_entry_t * rwe, u8 * h)
{
- if (U32X4_ALIGNED (h))
- {
- u32x4 *d = ((u32x4 *) h) + rwe->skip_n_vectors;
- switch (rwe->rewrite_n_vectors)
- {
- case 5:
- d[4] = (d[4] & ~rwe->mask[4]) | rwe->value[4];
- /* FALLTHROUGH */
- case 4:
- d[3] = (d[3] & ~rwe->mask[3]) | rwe->value[3];
- /* FALLTHROUGH */
- case 3:
- d[2] = (d[2] & ~rwe->mask[2]) | rwe->value[2];
- /* FALLTHROUGH */
- case 2:
- d[1] = (d[1] & ~rwe->mask[1]) | rwe->value[1];
- /* FALLTHROUGH */
- case 1:
- d[0] = (d[0] & ~rwe->mask[0]) | rwe->value[0];
- break;
- default:
- abort ();
- }
- }
- else
+ u32x4u *d = ((u32x4u *) h) + rwe->skip_n_vectors;
+ switch (rwe->rewrite_n_vectors)
{
- u64 *d = ((u64 *) h) + rwe->skip_n_vectors * 2;
- switch (rwe->rewrite_n_vectors)
- {
- case 5:
- d[8] =
- (d[8] & ~(((u64 *) rwe->mask)[8])) | (((u64 *) rwe->value)[8]);
- d[9] =
- (d[9] & ~(((u64 *) rwe->mask)[9])) | (((u64 *) rwe->value)[9]);
- /* FALLTHROUGH */
- case 4:
- d[6] =
- (d[6] & ~(((u64 *) rwe->mask)[6])) | (((u64 *) rwe->value)[6]);
- d[7] =
- (d[7] & ~(((u64 *) rwe->mask)[7])) | (((u64 *) rwe->value)[7]);
- /* FALLTHROUGH */
- case 3:
- d[4] =
- (d[4] & ~(((u64 *) rwe->mask)[4])) | (((u64 *) rwe->value)[4]);
- d[5] =
- (d[5] & ~(((u64 *) rwe->mask)[5])) | (((u64 *) rwe->value)[5]);
- /* FALLTHROUGH */
- case 2:
- d[2] =
- (d[2] & ~(((u64 *) rwe->mask)[2])) | (((u64 *) rwe->value)[2]);
- d[3] =
- (d[3] & ~(((u64 *) rwe->mask)[3])) | (((u64 *) rwe->value)[3]);
- /* FALLTHROUGH */
- case 1:
- d[0] =
- (d[0] & ~(((u64 *) rwe->mask)[0])) | (((u64 *) rwe->value)[0]);
- d[1] =
- (d[1] & ~(((u64 *) rwe->mask)[1])) | (((u64 *) rwe->value)[1]);
- break;
- default:
- abort ();
- }
+ case 5:
+ d[4] = (d[4] & ~rwe->mask[4]) | rwe->value[4];
+ /* FALLTHROUGH */
+ case 4:
+ d[3] = (d[3] & ~rwe->mask[3]) | rwe->value[3];
+ /* FALLTHROUGH */
+ case 3:
+ d[2] = (d[2] & ~rwe->mask[2]) | rwe->value[2];
+ /* FALLTHROUGH */
+ case 2:
+ d[1] = (d[1] & ~rwe->mask[1]) | rwe->value[1];
+ /* FALLTHROUGH */
+ case 1:
+ d[0] = (d[0] & ~rwe->mask[0]) | rwe->value[0];
+ break;
+ default:
+ abort ();
}
}
-static uword
-l2_rw_node_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (l2_rw_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
{
l2_rw_main_t *rw = &l2_rw_main;
u32 n_left_from, *from, *to_next, next_index;
vnet_classify_main_t *vcm = &vnet_classify_main;
f64 now = vlib_time_now (vlib_get_main ());
- u32 prefetch_size = 0;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors; /* number of packets to process */
/* get space to enqueue frame to graph node "next_index" */
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
- while (n_left_from >= 4 && n_left_to_next >= 2)
+ while (n_left_from >= 6 && n_left_to_next >= 2)
{
u32 bi0, next0, sw_if_index0, rwe_index0;
u32 bi1, next1, sw_if_index1, rwe_index1;
l2_rw_entry_t *rwe0, *rwe1;
{
- vlib_buffer_t *p2, *p3;
+ vlib_buffer_t *p2, *p3, *p4, *p5;
p2 = vlib_get_buffer (vm, from[2]);
p3 = vlib_get_buffer (vm, from[3]);
+ p4 = vlib_get_buffer (vm, from[4]);
+ p5 = vlib_get_buffer (vm, from[5]);
- vlib_prefetch_buffer_header (p2, LOAD);
- vlib_prefetch_buffer_header (p3, LOAD);
- CLIB_PREFETCH (vlib_buffer_get_current (p2), prefetch_size, LOAD);
- CLIB_PREFETCH (vlib_buffer_get_current (p3), prefetch_size, LOAD);
+ vlib_prefetch_buffer_header (p4, LOAD);
+ vlib_prefetch_buffer_header (p5, LOAD);
+ vlib_prefetch_buffer_data (p2, LOAD);
+ vlib_prefetch_buffer_data (p3, LOAD);
}
bi0 = from[0];
config1 = l2_rw_get_config (sw_if_index1); /*TODO: check sw_if_index0 value */
t0 = pool_elt_at_index (vcm->tables, config0->table_index);
t1 = pool_elt_at_index (vcm->tables, config1->table_index);
- prefetch_size =
- (t1->skip_n_vectors + t1->match_n_vectors) * sizeof (u32x4);
hash0 = vnet_classify_hash_packet (t0, (u8 *) h0);
hash1 = vnet_classify_hash_packet (t1, (u8 *) h1);
return frame->n_vectors;
}
+#ifndef CLIB_MARCH_VARIANT
int
l2_rw_mod_entry (u32 * index,
u8 * mask, u8 * value, u32 len, u32 skip, u8 is_del)
skip -= e->skip_n_vectors * sizeof (u32x4);
e->rewrite_n_vectors = (skip + len - 1) / sizeof (u32x4) + 1;
vec_alloc_aligned (e->mask, e->rewrite_n_vectors, sizeof (u32x4));
- memset (e->mask, 0, e->rewrite_n_vectors * sizeof (u32x4));
+ clib_memset (e->mask, 0, e->rewrite_n_vectors * sizeof (u32x4));
vec_alloc_aligned (e->value, e->rewrite_n_vectors, sizeof (u32x4));
- memset (e->value, 0, e->rewrite_n_vectors * sizeof (u32x4));
+ clib_memset (e->value, 0, e->rewrite_n_vectors * sizeof (u32x4));
clib_memcpy (((u8 *) e->value) + skip, value, len);
clib_memcpy (((u8 *) e->mask) + skip, mask, len);
return 0;
}
+#endif /* CLIB_MARCH_VARIANT */
static clib_error_t *
l2_rw_entry_cli_fn (vlib_main_t * vm,
/*?
* Layer 2-Rewrite node uses classify tables to match packets. Then, using
- * the provisioned mask and value, modfies the packet header.
+ * the provisioned mask and value, modifies the packet header.
*
* @cliexpar
* @todo This is incomplete. This needs a detailed description and a
};
/* *INDENT-ON* */
+#ifndef CLIB_MARCH_VARIANT
int
l2_rw_interface_set_table (u32 sw_if_index, u32 table_index, u32 miss_index)
{
return 0;
}
+#endif /* CLIB_MARCH_VARIANT */
static clib_error_t *
l2_rw_interface_cli_fn (vlib_main_t * vm,
/*?
* Layer 2-Rewrite node uses classify tables to match packets. Then, using
- * the provisioned mask and value, modfies the packet header.
+ * the provisioned mask and value, modifies the packet header.
*
* @cliexpar
* @todo This is incomplete. This needs a detailed description and a
/*?
* Layer 2-Rewrite node uses classify tables to match packets. Then, using
- * the provisioned mask and value, modfies the packet header.
+ * the provisioned mask and value, modifies the packet header.
*
* @cliexpar
* @todo This is incomplete. This needs a detailed description and a
/*?
* Layer 2-Rewrite node uses classify tables to match packets. Then, using
- * the provisioned mask and value, modfies the packet header.
+ * the provisioned mask and value, modifies the packet header.
*
* @cliexpar
* @todo This is incomplete. This needs a detailed description and a
};
/* *INDENT-ON* */
-int
+static int
l2_rw_enable_disable (u32 bridge_domain, u8 disable)
{
u32 mask = L2INPUT_FEAT_RW;
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (l2_rw_node) = {
- .function = l2_rw_node_fn,
.name = "l2-rw",
.vector_size = sizeof (u32),
.format_trace = format_l2_rw_trace,
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (l2_rw_node, l2_rw_node_fn)
/*
* fd.io coding-style-patch-verification: ON
*