* For interfaces in Layer 3 mode, the packets will be routed.
*/
+#ifndef CLIB_MARCH_VARIANT
+
/* Feature graph node names */
static char *l2input_feat_names[] = {
#define _(sym,name) name,
#undef _
};
u32 feature_bitmap = va_arg (*args, u32);
+ u32 verbose = va_arg (*args, u32);
if (feature_bitmap == 0)
{
feature_bitmap &= ~L2INPUT_FEAT_DROP; /* Not a feature */
int i;
- for (i = L2INPUT_N_FEAT; i >= 0; i--)
- if (feature_bitmap & (1 << i))
- s = format (s, "%17s (%s)\n", display_names[i], l2input_feat_names[i]);
+ for (i = L2INPUT_N_FEAT - 1; i >= 0; i--)
+ {
+ if (feature_bitmap & (1 << i))
+ {
+ if (verbose)
+ s = format (s, "%17s (%s)\n",
+ display_names[i], l2input_feat_names[i]);
+ else
+ s = format (s, "%s ", l2input_feat_names[i]);
+ }
+ }
return s;
}
+#endif /* CLIB_MARCH_VARIANT */
typedef struct
{
/* per-pkt trace data */
- u8 src[6];
- u8 dst[6];
+ u8 dst_and_src[12];
u32 next_index;
u32 sw_if_index;
} l2input_trace_t;
s = format (s, "l2-input: sw_if_index %d dst %U src %U",
t->sw_if_index,
- format_ethernet_address, t->dst,
- format_ethernet_address, t->src);
+ format_ethernet_address, t->dst_and_src,
+ format_ethernet_address, t->dst_and_src + 6);
return s;
}
+extern l2input_main_t l2input_main;
+
+#ifndef CLIB_MARCH_VARIANT
l2input_main_t l2input_main;
+#endif /* CLIB_MARCH_VARIANT */
#define foreach_l2input_error \
_(L2INPUT, "L2 input packets") \
L2INPUT_FEAT_UU_FLOOD |
L2INPUT_FEAT_UU_FWD | L2INPUT_FEAT_GBP_FWD);
+ if (ethertype != ETHERNET_TYPE_ARP)
+ feat_mask &= ~(L2INPUT_FEAT_ARP_UFWD);
+
/* Disable ARP-term for non-ARP and non-ICMP6 packet */
if (ethertype != ETHERNET_TYPE_ARP &&
(ethertype != ETHERNET_TYPE_IP6 || protocol != IP_PROTOCOL_ICMP6))
feat_mask &= ~(L2INPUT_FEAT_ARP_TERM);
-
/*
* For packet from BVI - set SHG of ARP request or ICMPv6 neighbor
* solicitation packet from BVI to 0 so it can also flood to VXLAN
u32 n_left_from, *from, *to_next;
l2input_next_t next_index;
l2input_main_t *msm = &l2input_main;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors; /* number of packets to process */
next_index = node->cached_next_index;
+ vlib_get_buffers (vm, from, bufs, n_left_from);
while (n_left_from > 0)
{
while (n_left_from >= 8 && n_left_to_next >= 4)
{
- u32 bi0, bi1, bi2, bi3;
- vlib_buffer_t *b0, *b1, *b2, *b3;
u32 next0, next1, next2, next3;
u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3;
/* Prefetch next iteration. */
{
- vlib_buffer_t *p4, *p5, *p6, *p7;
-
- p4 = vlib_get_buffer (vm, from[4]);
- p5 = vlib_get_buffer (vm, from[5]);
- p6 = vlib_get_buffer (vm, from[6]);
- p7 = vlib_get_buffer (vm, from[7]);
/* Prefetch the buffer header and packet for the N+2 loop iteration */
- vlib_prefetch_buffer_header (p4, LOAD);
- vlib_prefetch_buffer_header (p5, LOAD);
- vlib_prefetch_buffer_header (p6, LOAD);
- vlib_prefetch_buffer_header (p7, LOAD);
+ vlib_prefetch_buffer_header (b[4], LOAD);
+ vlib_prefetch_buffer_header (b[5], LOAD);
+ vlib_prefetch_buffer_header (b[6], LOAD);
+ vlib_prefetch_buffer_header (b[7], LOAD);
- CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b[4]->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b[5]->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b[6]->data, CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (b[7]->data, CLIB_CACHE_LINE_BYTES, STORE);
/*
* Don't bother prefetching the bridge-domain config (which
/* speculatively enqueue b0 and b1 to the current next frame */
/* bi is "buffer index", b is pointer to the buffer */
- to_next[0] = bi0 = from[0];
- to_next[1] = bi1 = from[1];
- to_next[2] = bi2 = from[2];
- to_next[3] = bi3 = from[3];
- from += 4;
- to_next += 4;
- n_left_from -= 4;
- n_left_to_next -= 4;
-
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
- b2 = vlib_get_buffer (vm, bi2);
- b3 = vlib_get_buffer (vm, bi3);
if (do_trace)
{
/* RX interface handles */
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
- sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
- sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX];
- sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX];
+ sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX];
+ sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX];
+ sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX];
- if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
{
- ethernet_header_t *h0 = vlib_buffer_get_current (b0);
+ ethernet_header_t *h0 = vlib_buffer_get_current (b[0]);
l2input_trace_t *t =
- vlib_add_trace (vm, node, b0, sizeof (*t));
+ vlib_add_trace (vm, node, b[0], sizeof (*t));
t->sw_if_index = sw_if_index0;
- clib_memcpy (t->src, h0->src_address, 6);
- clib_memcpy (t->dst, h0->dst_address, 6);
+ clib_memcpy_fast (t->dst_and_src, h0->dst_address,
+ sizeof (h0->dst_address) +
+ sizeof (h0->src_address));
}
- if (b1->flags & VLIB_BUFFER_IS_TRACED)
+ if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
{
- ethernet_header_t *h1 = vlib_buffer_get_current (b1);
+ ethernet_header_t *h1 = vlib_buffer_get_current (b[1]);
l2input_trace_t *t =
- vlib_add_trace (vm, node, b1, sizeof (*t));
+ vlib_add_trace (vm, node, b[1], sizeof (*t));
t->sw_if_index = sw_if_index1;
- clib_memcpy (t->src, h1->src_address, 6);
- clib_memcpy (t->dst, h1->dst_address, 6);
+ clib_memcpy_fast (t->dst_and_src, h1->dst_address,
+ sizeof (h1->dst_address) +
+ sizeof (h1->src_address));
}
- if (b2->flags & VLIB_BUFFER_IS_TRACED)
+ if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
{
- ethernet_header_t *h2 = vlib_buffer_get_current (b2);
+ ethernet_header_t *h2 = vlib_buffer_get_current (b[2]);
l2input_trace_t *t =
- vlib_add_trace (vm, node, b2, sizeof (*t));
+ vlib_add_trace (vm, node, b[2], sizeof (*t));
t->sw_if_index = sw_if_index2;
- clib_memcpy (t->src, h2->src_address, 6);
- clib_memcpy (t->dst, h2->dst_address, 6);
+ clib_memcpy_fast (t->dst_and_src, h2->dst_address,
+ sizeof (h2->dst_address) +
+ sizeof (h2->src_address));
}
- if (b3->flags & VLIB_BUFFER_IS_TRACED)
+ if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
{
- ethernet_header_t *h3 = vlib_buffer_get_current (b3);
+ ethernet_header_t *h3 = vlib_buffer_get_current (b[3]);
l2input_trace_t *t =
- vlib_add_trace (vm, node, b3, sizeof (*t));
+ vlib_add_trace (vm, node, b[3], sizeof (*t));
t->sw_if_index = sw_if_index3;
- clib_memcpy (t->src, h3->src_address, 6);
- clib_memcpy (t->dst, h3->dst_address, 6);
+ clib_memcpy_fast (t->dst_and_src, h3->dst_address,
+ sizeof (h3->dst_address) +
+ sizeof (h3->src_address));
}
}
- classify_and_dispatch (msm, b0, &next0);
- classify_and_dispatch (msm, b1, &next1);
- classify_and_dispatch (msm, b2, &next2);
- classify_and_dispatch (msm, b3, &next3);
+ classify_and_dispatch (msm, b[0], &next0);
+ classify_and_dispatch (msm, b[1], &next1);
+ //show the better performance when clib_memcpy_fast is put here.
+ clib_memcpy_fast (to_next, from, sizeof (from[0]) * 4);
+ to_next += 4;
+ classify_and_dispatch (msm, b[2], &next2);
+ classify_and_dispatch (msm, b[3], &next3);
+ b += 4;
+ n_left_from -= 4;
+ n_left_to_next -= 4;
/* verify speculative enqueues, maybe switch current next frame */
/* if next0==next1==next_index then nothing special needs to be done */
vlib_validate_buffer_enqueue_x4 (vm, node, next_index,
to_next, n_left_to_next,
- bi0, bi1, bi2, bi3,
+ from[0], from[1], from[2], from[3],
next0, next1, next2, next3);
+ from += 4;
}
while (n_left_from > 0 && n_left_to_next > 0)
{
- u32 bi0;
- vlib_buffer_t *b0;
u32 next0;
u32 sw_if_index0;
/* speculatively enqueue b0 to the current next frame */
- bi0 = from[0];
- to_next[0] = bi0;
- from += 1;
- to_next += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
- b0 = vlib_get_buffer (vm, bi0);
-
- if (do_trace && PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ if (do_trace && PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
- ethernet_header_t *h0 = vlib_buffer_get_current (b0);
- l2input_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t));
- sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ ethernet_header_t *h0 = vlib_buffer_get_current (b[0]);
+ l2input_trace_t *t =
+ vlib_add_trace (vm, node, b[0], sizeof (*t));
+ sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX];
t->sw_if_index = sw_if_index0;
- clib_memcpy (t->src, h0->src_address, 6);
- clib_memcpy (t->dst, h0->dst_address, 6);
+ clib_memcpy_fast (t->dst_and_src, h0->dst_address,
+ sizeof (h0->dst_address) +
+ sizeof (h0->src_address));
}
- classify_and_dispatch (msm, b0, &next0);
+ classify_and_dispatch (msm, b[0], &next0);
+ b += 1;
+ to_next[0] = from[0];
+ to_next += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
/* verify speculative enqueue, maybe switch current next frame */
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next,
- bi0, next0);
+ from[0], next0);
+ from += 1;
}
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
return frame->n_vectors;
}
-static uword
-l2input_node_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (l2input_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node, vlib_frame_t * frame)
{
if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
return l2input_node_inline (vm, node, frame, 1 /* do_trace */ );
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (l2input_node) = {
- .function = l2input_node_fn,
.name = "l2-input",
.vector_size = sizeof (u32),
.format_trace = format_l2input_trace,
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (l2input_node, l2input_node_fn)
- clib_error_t *l2input_init (vlib_main_t * vm)
+#ifndef CLIB_MARCH_VARIANT
+clib_error_t *
+l2input_init (vlib_main_t * vm)
{
l2input_main_t *mp = &l2input_main;
L2INPUT_FEAT_UU_FWD |
L2INPUT_FEAT_FLOOD |
L2INPUT_FEAT_LEARN |
+ L2INPUT_FEAT_ARP_UFWD |
L2INPUT_FEAT_ARP_TERM);
/* Make sure last-chance drop is configured */
config->feature_bitmap &=
~(L2INPUT_FEAT_LEARN | L2INPUT_FEAT_FWD | L2INPUT_FEAT_FLOOD);
shg = 0; /* not used in xconnect */
-
- /* Insure all packets go to ethernet-input */
- ethernet_set_rx_redirect (vnet_main, hi, 1);
}
/* set up split-horizon group and set output feature bit */
{
if ((hi->l2_if_count == 1) && (l2_if_adjust == 1))
{
- /* Just added first L2 interface on this port */
-
- /* Set promiscuous mode on the l2 interface */
+ /* Just added first L2 interface on this port
+ * Set promiscuous mode on the l2 interface */
ethernet_set_flags (vnet_main, hi->hw_if_index,
ETHERNET_INTERFACE_FLAG_ACCEPT_ALL);
-
- /* ensure all packets go to ethernet-input */
- ethernet_set_rx_redirect (vnet_main, hi, 1);
-
}
else if ((hi->l2_if_count == 0) && (l2_if_adjust == -1))
{
- /* Just removed only L2 subinterface on this port */
-
- /* Disable promiscuous mode on the l2 interface */
- ethernet_set_flags (vnet_main, hi->hw_if_index, 0);
+ /* Just removed only L2 subinterface on this port
+ * Disable promiscuous mode on the l2 interface */
+ ethernet_set_flags (vnet_main, hi->hw_if_index,
+ /*ETHERNET_INTERFACE_FLAG_DEFAULT_L3 */ 0);
- /* Allow ip packets to go directly to ip4-input etc */
- ethernet_set_rx_redirect (vnet_main, hi, 0);
}
}
return 0;
}
+#endif /* CLIB_MARCH_VARIANT */
/**
* Set subinterface in bridging mode with a bridge-domain ID.
_(l2_patch_init) \
_(l2_xcrw_init)
+#ifndef CLIB_MARCH_VARIANT
clib_error_t *
l2_init (vlib_main_t * vm)
{
}
VLIB_INIT_FUNCTION (l2_init);
+#endif /* CLIB_MARCH_VARIANT */
/*
* fd.io coding-style-patch-verification: ON