vlan_count = 0;
// check for vlan encaps
- if ((*type == ETHERNET_TYPE_VLAN) ||
- (*type == ETHERNET_TYPE_DOT1AD) ||
- (*type == ETHERNET_TYPE_VLAN_9100) ||
- (*type == ETHERNET_TYPE_VLAN_9200))
+ if (ethernet_frame_is_tagged (*type))
{
ethernet_vlan_header_t *h0;
u16 tag;
{
*next0 = em->l2_next;
// record the L2 len and reset the buffer so the L2 header is preserved
- vnet_buffer (b0)->l2.l2_len = b0->current_data;
+ u32 eth_start = vnet_buffer (b0)->ethernet.start_of_ethernet_header;
+ vnet_buffer (b0)->l2.l2_len = b0->current_data - eth_start;
vlib_buffer_advance (b0, -ethernet_buffer_header_size (b0));
// check for common IP/MPLS ethertypes
SPARSE_VEC_INVALID_INDEX ? ETHERNET_ERROR_UNKNOWN_TYPE : *error0;
// The table is not populated with LLC values, so check that now.
- // If variant is variant_ethernet then we came from LLC processing. Don't
+ // If variant is variant_ethernet then we came from LLC processing. Don't
// go back there; drop instead using by keeping the drop/bad table result.
if ((type0 < 0x600) && (variant == ETHERNET_INPUT_VARIANT_ETHERNET))
{
u32 n_left_from, next_index, *from, *to_next;
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
u32 cpu_index = os_get_cpu_number ();
+ u32 cached_sw_if_index = ~0;
+ u32 cached_is_l2 = 0; /* shut up gcc */
if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
vlan_intf_t *vlan_intf0, *vlan_intf1;
qinq_intf_t *qinq_intf0, *qinq_intf1;
u32 is_l20, is_l21;
+ ethernet_header_t *e0, *e1;
/* Prefetch next iteration. */
{
b1 = vlib_get_buffer (vm, bi1);
error0 = error1 = ETHERNET_ERROR_NONE;
+ e0 = vlib_buffer_get_current (b0);
+ type0 = clib_net_to_host_u16 (e0->type);
+ e1 = vlib_buffer_get_current (b1);
+ type1 = clib_net_to_host_u16 (e1->type);
+
+ /* Speed-path for the untagged L2 case */
+ if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
+ && !ethernet_frame_is_tagged (type0)
+ && !ethernet_frame_is_tagged (type1)))
+ {
+ main_intf_t *intf0;
+ subint_config_t *subint0;
+ u32 sw_if_index0, sw_if_index1;
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
+ is_l20 = cached_is_l2;
+
+ /* This is probably wholly unnecessary */
+ if (PREDICT_FALSE (sw_if_index0 != sw_if_index1))
+ goto slowpath;
+
+ if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
+ {
+ cached_sw_if_index = sw_if_index0;
+ hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ intf0 = vec_elt_at_index (em->main_intfs, hi0->hw_if_index);
+ subint0 = &intf0->untagged_subint;
+ cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
+ }
+ if (PREDICT_TRUE (is_l20 != 0))
+ {
+ next0 = em->l2_next;
+ vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
+ vnet_buffer (b0)->ethernet.start_of_ethernet_header =
+ b0->current_data;
+ next1 = em->l2_next;
+ vnet_buffer (b1)->l2.l2_len = sizeof (ethernet_header_t);
+ vnet_buffer (b1)->ethernet.start_of_ethernet_header =
+ b1->current_data;
+ goto ship_it01;
+ }
+ /* FALLTHROUGH into the general case */
+ }
+ slowpath:
parse_header (variant,
b0,
b0->error = error_node->errors[error0];
b1->error = error_node->errors[error1];
+ ship_it01:
// verify speculative enqueue
vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
n_left_to_next, bi0, bi1, next0,
main_intf_t *main_intf0;
vlan_intf_t *vlan_intf0;
qinq_intf_t *qinq_intf0;
+ ethernet_header_t *e0;
u32 is_l20;
// Prefetch next iteration
b0 = vlib_get_buffer (vm, bi0);
error0 = ETHERNET_ERROR_NONE;
+ e0 = vlib_buffer_get_current (b0);
+ type0 = clib_net_to_host_u16 (e0->type);
+
+ /* Speed-path for the untagged L2 case */
+ if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
+ && !ethernet_frame_is_tagged (type0)))
+ {
+ main_intf_t *intf0;
+ subint_config_t *subint0;
+ u32 sw_if_index0;
+
+ sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
+ is_l20 = cached_is_l2;
+
+ if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
+ {
+ cached_sw_if_index = sw_if_index0;
+ hi0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ intf0 = vec_elt_at_index (em->main_intfs, hi0->hw_if_index);
+ subint0 = &intf0->untagged_subint;
+ cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
+ }
+ if (PREDICT_TRUE (is_l20 != 0))
+ {
+ next0 = em->l2_next;
+ vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
+ vnet_buffer (b0)->ethernet.start_of_ethernet_header =
+ b0->current_data;
+ goto ship_it0;
+ }
+ /* FALLTHROUGH into the general case */
+ }
parse_header (variant,
b0,
// prior to calling this function. Thus only subinterface counters
// are incremented here.
//
- // Interface level counters include packets received on the main
+ // Interface level counters include packets received on the main
// interface and all subinterfaces. Subinterface level counters
// include only those packets received on that subinterface
// Increment stats if the subint is valid and it is not the main intf
stats_n_bytes += len0;
// Batch stat increments from the same subinterface so counters
- // don't need to be incremented for every packet.
+ // don't need to be incremented for every packet.
if (PREDICT_FALSE (new_sw_if_index0 != stats_sw_if_index))
{
stats_n_packets -= 1;
b0->error = error_node->errors[error0];
// verify speculative enqueue
+ ship_it0:
vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
to_next, n_left_to_next,
bi0, next0);
// not implemented yet or not ethernet
if (unsupported)
{
- // this is the NYI case
+ // this is the NYI case
error = clib_error_return (0, "not implemented yet");
}
goto done;
l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_PUNT] =
SPARSE_VEC_INVALID_INDEX;
- /*
- * Make sure we don't wipe out an ethernet registration by mistake
+ /*
+ * Make sure we don't wipe out an ethernet registration by mistake
* Can happen if init function ordering constraints are missing.
*/
if (CLIB_DEBUG > 0)
em->l2_next =
vlib_node_add_next (vm, ethernet_input_node.index, node_index);
- /*
+ /*
* Even if we never use these arcs, we have to align the next indices...
*/
i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
ASSERT (i == em->redirect_l3_next);
+
+ i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
+
+ ASSERT (i == em->redirect_l3_next);
}
/*