#include <vnet/devices/pipe/pipe.h>
#include <vppinfra/sparse_vec.h>
#include <vnet/l2/l2_bvi.h>
-
+#include <vnet/classify/pcap_classify.h>
#define foreach_ethernet_input_next \
_ (PUNT, "error-punt") \
{
ethernet_header_t *e0;
- e0 = (void *) (b0->data + b0->current_data);
+ e0 = vlib_buffer_get_current (b0);
vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
// here when prior node was LLC/SNAP processing
u16 *e0;
- e0 = (void *) (b0->data + b0->current_data);
+ e0 = vlib_buffer_get_current (b0);
vlib_buffer_advance (b0, sizeof (e0[0]));
*match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_1_TAG;
- h0 = (void *) (b0->data + b0->current_data);
+ h0 = vlib_buffer_get_current (b0);
tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
// Double tagged packet
*match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_2_TAG;
- h0 = (void *) (b0->data + b0->current_data);
+ h0 = vlib_buffer_get_current (b0);
tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
ethernet_buffer_set_vlan_count (b0, vlan_count);
}
+static_always_inline void
+ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
+ u64 * dmacs, u8 * dmacs_bad,
+ u32 n_packets, ethernet_interface_t * ei,
+ u8 have_sec_dmac);
+
// Determine the subinterface for this packet, given the result of the
// vlan table lookups and vlan header parsing. Check the most specific
// matches first.
static_always_inline void
-identify_subint (vnet_hw_interface_t * hi,
+identify_subint (ethernet_main_t * em,
+ vnet_hw_interface_t * hi,
vlib_buffer_t * b0,
u32 match_flags,
main_intf_t * main_intf,
u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
{
u32 matched;
+ ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);
- matched = eth_identify_subint (hi, b0, match_flags,
- main_intf, vlan_intf, qinq_intf,
- new_sw_if_index, error0, is_l2);
+ matched = eth_identify_subint (hi, match_flags, main_intf, vlan_intf,
+ qinq_intf, new_sw_if_index, error0, is_l2);
if (matched)
{
-
// Perform L3 my-mac filter
- // A unicast packet arriving on an L3 interface must have a dmac matching the interface mac.
- // This is required for promiscuous mode, else we will forward packets we aren't supposed to.
- if (!(*is_l2))
+ // A unicast packet arriving on an L3 interface must have a dmac
+ // matching the interface mac. If interface has STATUS_L3 bit set
+ // mac filter is already done.
+ if ((!*is_l2) && ei &&
+ (!(ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3)))
{
+ u64 dmacs[2];
+ u8 dmacs_bad[2];
ethernet_header_t *e0;
+
e0 = (void *) (b0->data + vnet_buffer (b0)->l2_hdr_offset);
+ dmacs[0] = *(u64 *) e0;
- if (!(ethernet_address_cast (e0->dst_address)))
- {
- if (!eth_mac_equal ((u8 *) e0, hi->hw_address))
- {
- *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
- }
- }
+ if (vec_len (ei->secondary_addrs))
+ ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
+ 1 /* n_packets */, ei,
+ 1 /* have_sec_dmac */);
+ else
+ ethernet_input_inline_dmac_check (hi, dmacs, dmacs_bad,
+ 1 /* n_packets */, ei,
+ 0 /* have_sec_dmac */);
+ if (dmacs_bad[0])
+ *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
}
// Check for down subinterface
}
}
-typedef enum
-{
- ETYPE_ID_UNKNOWN = 0,
- ETYPE_ID_IP4,
- ETYPE_ID_IP6,
- ETYPE_ID_MPLS,
- ETYPE_N_IDS,
-} etype_id_t;
-
-static_always_inline void
-eth_input_advance_and_flags (vlib_main_t * vm, u32 * from, u32 n_left,
- i16 advance, u32 and_flags, u32 or_flags)
-{
- vlib_buffer_t *b[8];
- while (n_left >= 8)
- {
- vlib_get_buffers (vm, from, b, 8);
- vlib_buffer_advance (b[0], advance);
- vlib_buffer_advance (b[1], advance);
- vlib_buffer_advance (b[2], advance);
- vlib_buffer_advance (b[3], advance);
- vlib_buffer_advance (b[4], advance);
- vlib_buffer_advance (b[5], advance);
- vlib_buffer_advance (b[6], advance);
- vlib_buffer_advance (b[7], advance);
- b[0]->flags = (b[0]->flags & and_flags) | or_flags;
- b[1]->flags = (b[1]->flags & and_flags) | or_flags;
- b[2]->flags = (b[2]->flags & and_flags) | or_flags;
- b[3]->flags = (b[3]->flags & and_flags) | or_flags;
- b[4]->flags = (b[4]->flags & and_flags) | or_flags;
- b[5]->flags = (b[5]->flags & and_flags) | or_flags;
- b[6]->flags = (b[6]->flags & and_flags) | or_flags;
- b[7]->flags = (b[7]->flags & and_flags) | or_flags;
-
- n_left -= 8;
- from += 8;
- }
- while (n_left)
- {
- vlib_get_buffers (vm, from, b, 1);
- vlib_buffer_advance (b[0], advance);
- b[0]->flags = (b[0]->flags & and_flags) | or_flags;
-
- n_left -= 1;
- from += 1;
- }
-}
-
-typedef struct
-{
- u16 etypes[VLIB_FRAME_SIZE];
- u32 bufs_by_etype[ETYPE_N_IDS][VLIB_FRAME_SIZE];
- u16 n_bufs_by_etype[ETYPE_N_IDS];
-} eth_input_data_t;
/* following vector code relies on following assumptions */
STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_data, 0);
"l3_hdr_offset must follow l2_hdr_offset");
static_always_inline void
-eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, i16 adv, u32 flags, int is_l3)
+eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, int is_l3)
{
+ i16 adv = sizeof (ethernet_header_t);
+ u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
+
#ifdef CLIB_HAVE_VEC256
/* to reduce number of small loads/stores we are loading first 64 bits
of each buffer metadata into 256-bit register so we can advance
}
static_always_inline void
-eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, i16 adv, u32 flags, int is_l3)
+eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, int is_l3)
{
+ i16 adv = sizeof (ethernet_header_t);
+ u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
+
vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
vnet_buffer (b[0])->l2.l2_len = adv;
}
+
static_always_inline void
-eth_input_process_frame (vlib_main_t * vm, u32 * from, u16 * etype,
- u32 n_left, int is_l3)
+eth_input_get_etype_and_tags (vlib_buffer_t ** b, u16 * etype, u64 * tags,
+ u64 * dmacs, int offset, int dmac_check)
{
- vlib_buffer_t *b[16];
ethernet_header_t *e;
- int adv = sizeof (ethernet_header_t);
+ e = vlib_buffer_get_current (b[offset]);
+#ifdef CLIB_HAVE_VEC128
+ u64x2 r = u64x2_load_unaligned (((u8 *) & e->type) - 6);
+ etype[offset] = ((u16x8) r)[3];
+ tags[offset] = r[1];
+#else
+ etype[offset] = e->type;
+ tags[offset] = *(u64 *) (e + 1);
+#endif
- u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
- VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
+ if (dmac_check)
+ dmacs[offset] = *(u64 *) e;
+}
- while (n_left >= 16)
- {
- vlib_buffer_t **ph = b + 12, **pd = b + 8;
- vlib_get_buffers (vm, from, b, 4);
- vlib_get_buffers (vm, from + 8, b + 8, 8);
+static_always_inline u16
+eth_input_next_by_type (u16 etype)
+{
+ ethernet_main_t *em = ðernet_main;
- vlib_prefetch_buffer_header (ph[0], LOAD);
- vlib_prefetch_buffer_data (pd[0], LOAD);
- e = vlib_buffer_get_current (b[0]);
- etype[0] = e->type;
+ return (etype < 0x600) ? ETHERNET_INPUT_NEXT_LLC :
+ vec_elt (em->l3_next.input_next_by_type,
+ sparse_vec_index (em->l3_next.input_next_by_type, etype));
+}
- vlib_prefetch_buffer_header (ph[1], LOAD);
- vlib_prefetch_buffer_data (pd[1], LOAD);
- e = vlib_buffer_get_current (b[1]);
- etype[1] = e->type;
+typedef struct
+{
+ u64 tag, mask;
+ u32 sw_if_index;
+ u16 type, len, next;
+ i16 adv;
+ u8 err, n_tags;
+ u64 n_packets, n_bytes;
+} eth_input_tag_lookup_t;
- vlib_prefetch_buffer_header (ph[2], LOAD);
- vlib_prefetch_buffer_data (pd[2], LOAD);
- e = vlib_buffer_get_current (b[2]);
- etype[2] = e->type;
+static_always_inline void
+eth_input_update_if_counters (vlib_main_t * vm, vnet_main_t * vnm,
+ eth_input_tag_lookup_t * l)
+{
+ if (l->n_packets == 0 || l->sw_if_index == ~0)
+ return;
- vlib_prefetch_buffer_header (ph[3], LOAD);
- vlib_prefetch_buffer_data (pd[3], LOAD);
- e = vlib_buffer_get_current (b[3]);
- etype[3] = e->type;
+ if (l->adv > 0)
+ l->n_bytes += l->n_packets * l->len;
- eth_input_adv_and_flags_x4 (b, adv, flags, is_l3);
+ vlib_increment_combined_counter
+ (vnm->interface_main.combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_RX, vm->thread_index, l->sw_if_index,
+ l->n_packets, l->n_bytes);
+}
- /* next */
- n_left -= 4;
- etype += 4;
- from += 4;
+static_always_inline void
+eth_input_tag_lookup (vlib_main_t * vm, vnet_main_t * vnm,
+ vlib_node_runtime_t * node, vnet_hw_interface_t * hi,
+ u64 tag, u16 * next, vlib_buffer_t * b,
+ eth_input_tag_lookup_t * l, u8 dmac_bad, int is_dot1ad,
+ int main_is_l3, int check_dmac)
+{
+ ethernet_main_t *em = ðernet_main;
+
+ if ((tag ^ l->tag) & l->mask)
+ {
+ main_intf_t *mif = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
+ vlan_intf_t *vif;
+ qinq_intf_t *qif;
+ vlan_table_t *vlan_table;
+ qinq_table_t *qinq_table;
+ u16 *t = (u16 *) & tag;
+ u16 vlan1 = clib_net_to_host_u16 (t[0]) & 0xFFF;
+ u16 vlan2 = clib_net_to_host_u16 (t[2]) & 0xFFF;
+ u32 matched, is_l2, new_sw_if_index;
+
+ vlan_table = vec_elt_at_index (em->vlan_pool, is_dot1ad ?
+ mif->dot1ad_vlans : mif->dot1q_vlans);
+ vif = &vlan_table->vlans[vlan1];
+ qinq_table = vec_elt_at_index (em->qinq_pool, vif->qinqs);
+ qif = &qinq_table->vlans[vlan2];
+ l->err = ETHERNET_ERROR_NONE;
+ l->type = clib_net_to_host_u16 (t[1]);
+
+ if (l->type == ETHERNET_TYPE_VLAN)
+ {
+ l->type = clib_net_to_host_u16 (t[3]);
+ l->n_tags = 2;
+ matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
+ SUBINT_CONFIG_MATCH_2_TAG, mif, vif,
+ qif, &new_sw_if_index, &l->err,
+ &is_l2);
+ }
+ else
+ {
+ l->n_tags = 1;
+ if (vlan1 == 0)
+ {
+ new_sw_if_index = hi->sw_if_index;
+ l->err = ETHERNET_ERROR_NONE;
+ matched = 1;
+ is_l2 = main_is_l3 == 0;
+ }
+ else
+ matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
+ SUBINT_CONFIG_MATCH_1_TAG, mif,
+ vif, qif, &new_sw_if_index,
+ &l->err, &is_l2);
+ }
+
+ if (l->sw_if_index != new_sw_if_index)
+ {
+ eth_input_update_if_counters (vm, vnm, l);
+ l->n_packets = 0;
+ l->n_bytes = 0;
+ l->sw_if_index = new_sw_if_index;
+ }
+ l->tag = tag;
+ l->mask = (l->n_tags == 2) ?
+ clib_net_to_host_u64 (0xffffffffffffffff) :
+ clib_net_to_host_u64 (0xffffffff00000000);
+
+ if (matched && l->sw_if_index == ~0)
+ l->err = ETHERNET_ERROR_DOWN;
+
+ l->len = sizeof (ethernet_header_t) +
+ l->n_tags * sizeof (ethernet_vlan_header_t);
+ if (main_is_l3)
+ l->adv = is_l2 ? -(int) sizeof (ethernet_header_t) :
+ l->n_tags * sizeof (ethernet_vlan_header_t);
+ else
+ l->adv = is_l2 ? 0 : l->len;
+
+ if (PREDICT_FALSE (l->err != ETHERNET_ERROR_NONE))
+ l->next = ETHERNET_INPUT_NEXT_DROP;
+ else if (is_l2)
+ l->next = em->l2_next;
+ else if (l->type == ETHERNET_TYPE_IP4)
+ l->next = em->l3_next.input_next_ip4;
+ else if (l->type == ETHERNET_TYPE_IP6)
+ l->next = em->l3_next.input_next_ip6;
+ else if (l->type == ETHERNET_TYPE_MPLS)
+ l->next = em->l3_next.input_next_mpls;
+ else if (em->redirect_l3)
+ l->next = em->redirect_l3_next;
+ else
+ {
+ l->next = eth_input_next_by_type (l->type);
+ if (l->next == ETHERNET_INPUT_NEXT_PUNT)
+ l->err = ETHERNET_ERROR_UNKNOWN_TYPE;
+ }
}
- while (n_left >= 4)
+
+ if (check_dmac && l->adv > 0 && dmac_bad)
{
- vlib_get_buffers (vm, from, b, 4);
+ l->err = ETHERNET_ERROR_L3_MAC_MISMATCH;
+ next[0] = ETHERNET_INPUT_NEXT_PUNT;
+ }
+ else
+ next[0] = l->next;
- e = vlib_buffer_get_current (b[0]);
- etype[0] = e->type;
+ vlib_buffer_advance (b, l->adv);
+ vnet_buffer (b)->l2.l2_len = l->len;
+ vnet_buffer (b)->l3_hdr_offset = vnet_buffer (b)->l2_hdr_offset + l->len;
- e = vlib_buffer_get_current (b[1]);
- etype[1] = e->type;
+ if (l->err == ETHERNET_ERROR_NONE)
+ {
+ vnet_buffer (b)->sw_if_index[VLIB_RX] = l->sw_if_index;
+ ethernet_buffer_set_vlan_count (b, l->n_tags);
+ }
+ else
+ b->error = node->errors[l->err];
+
+ /* update counters */
+ l->n_packets += 1;
+ l->n_bytes += vlib_buffer_length_in_chain (vm, b);
+}
+
+#define DMAC_MASK clib_net_to_host_u64 (0xFFFFFFFFFFFF0000)
+#define DMAC_IGBIT clib_net_to_host_u64 (0x0100000000000000)
+
+#ifdef CLIB_HAVE_VEC256
+static_always_inline u32
+is_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
+{
+ u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
+ r0 = (r0 != u64x4_splat (hwaddr)) & ((r0 & u64x4_splat (DMAC_IGBIT)) == 0);
+ return u8x32_msb_mask ((u8x32) (r0));
+}
+#endif
+
+static_always_inline u8
+is_dmac_bad (u64 dmac, u64 hwaddr)
+{
+ u64 r0 = dmac & DMAC_MASK;
+ return (r0 != hwaddr) && ((r0 & DMAC_IGBIT) == 0);
+}
+
+static_always_inline u8
+is_sec_dmac_bad (u64 dmac, u64 hwaddr)
+{
+ return ((dmac & DMAC_MASK) != hwaddr);
+}
+
+#ifdef CLIB_HAVE_VEC256
+static_always_inline u32
+is_sec_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
+{
+ u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
+ r0 = (r0 != u64x4_splat (hwaddr));
+ return u8x32_msb_mask ((u8x32) (r0));
+}
+#endif
+
+static_always_inline u8
+eth_input_sec_dmac_check_x1 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
+{
+ dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
+ return dmac_bad[0];
+}
+
+static_always_inline u32
+eth_input_sec_dmac_check_x4 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
+{
+#ifdef CLIB_HAVE_VEC256
+ *(u32 *) (dmac_bad + 0) &= is_sec_dmac_bad_x4 (dmac + 0, hwaddr);
+#else
+ dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
+ dmac_bad[1] &= is_sec_dmac_bad (dmac[1], hwaddr);
+ dmac_bad[2] &= is_sec_dmac_bad (dmac[2], hwaddr);
+ dmac_bad[3] &= is_sec_dmac_bad (dmac[3], hwaddr);
+#endif
+ return *(u32 *) dmac_bad;
+}
+
+/*
+ * DMAC check for ethernet_input_inline()
+ *
+ * dmacs and dmacs_bad are arrays that are 2 elements long
+ * n_packets should be 1 or 2 for ethernet_input_inline()
+ */
+static_always_inline void
+ethernet_input_inline_dmac_check (vnet_hw_interface_t * hi,
+ u64 * dmacs, u8 * dmacs_bad,
+ u32 n_packets, ethernet_interface_t * ei,
+ u8 have_sec_dmac)
+{
+ u64 hwaddr = ei->address.as_u64;
+ u8 bad = 0;
+
+ ASSERT (0 == ei->address.zero);
- e = vlib_buffer_get_current (b[2]);
- etype[2] = e->type;
+ dmacs_bad[0] = is_dmac_bad (dmacs[0], hwaddr);
+ dmacs_bad[1] = ((n_packets > 1) & is_dmac_bad (dmacs[1], hwaddr));
+
+ bad = dmacs_bad[0] | dmacs_bad[1];
+
+ if (PREDICT_FALSE (bad && have_sec_dmac))
+ {
+ ethernet_interface_address_t *sec_addr;
- e = vlib_buffer_get_current (b[3]);
- etype[3] = e->type;
+ vec_foreach (sec_addr, ei->secondary_addrs)
+ {
+ ASSERT (0 == sec_addr->zero);
+ hwaddr = sec_addr->as_u64;
- eth_input_adv_and_flags_x4 (b, adv, flags, is_l3);
+ bad = (eth_input_sec_dmac_check_x1 (hwaddr, dmacs, dmacs_bad) |
+ eth_input_sec_dmac_check_x1 (hwaddr, dmacs + 1,
+ dmacs_bad + 1));
+
+ if (!bad)
+ return;
+ }
+ }
+}
+
+static_always_inline void
+eth_input_process_frame_dmac_check (vnet_hw_interface_t * hi,
+ u64 * dmacs, u8 * dmacs_bad,
+ u32 n_packets, ethernet_interface_t * ei,
+ u8 have_sec_dmac)
+{
+ u64 hwaddr = ei->address.as_u64;
+ u64 *dmac = dmacs;
+ u8 *dmac_bad = dmacs_bad;
+ u32 bad = 0;
+ i32 n_left = n_packets;
+
+ ASSERT (0 == ei->address.zero);
+
+#ifdef CLIB_HAVE_VEC256
+ while (n_left > 0)
+ {
+ bad |= *(u32 *) (dmac_bad + 0) = is_dmac_bad_x4 (dmac + 0, hwaddr);
+ bad |= *(u32 *) (dmac_bad + 4) = is_dmac_bad_x4 (dmac + 4, hwaddr);
+
+ /* next */
+ dmac += 8;
+ dmac_bad += 8;
+ n_left -= 8;
+ }
+#else
+ while (n_left > 0)
+ {
+ bad |= dmac_bad[0] = is_dmac_bad (dmac[0], hwaddr);
+ bad |= dmac_bad[1] = is_dmac_bad (dmac[1], hwaddr);
+ bad |= dmac_bad[2] = is_dmac_bad (dmac[2], hwaddr);
+ bad |= dmac_bad[3] = is_dmac_bad (dmac[3], hwaddr);
/* next */
+ dmac += 4;
+ dmac_bad += 4;
n_left -= 4;
- etype += 4;
- from += 4;
}
- while (n_left)
+#endif
+
+ if (have_sec_dmac && bad)
{
- vlib_get_buffers (vm, from, b, 1);
+ ethernet_interface_address_t *addr;
- e = vlib_buffer_get_current (b[0]);
- etype[0] = e->type;
+ vec_foreach (addr, ei->secondary_addrs)
+ {
+ u64 hwaddr = addr->as_u64;
+ i32 n_left = n_packets;
+ u64 *dmac = dmacs;
+ u8 *dmac_bad = dmacs_bad;
- eth_input_adv_and_flags_x1 (b, adv, flags, is_l3);
+ ASSERT (0 == addr->zero);
- /* next */
- n_left -= 1;
- etype += 1;
- from += 1;
+ bad = 0;
+
+ while (n_left > 0)
+ {
+ int adv = 0;
+ int n_bad;
+
+ /* skip any that have already matched */
+ if (!dmac_bad[0])
+ {
+ dmac += 1;
+ dmac_bad += 1;
+ n_left -= 1;
+ continue;
+ }
+
+ n_bad = clib_min (4, n_left);
+
+ /* If >= 4 left, compare 4 together */
+ if (n_bad == 4)
+ {
+ bad |= eth_input_sec_dmac_check_x4 (hwaddr, dmac, dmac_bad);
+ adv = 4;
+ n_bad = 0;
+ }
+
+ /* handle individually */
+ while (n_bad > 0)
+ {
+ bad |= eth_input_sec_dmac_check_x1 (hwaddr, dmac + adv,
+ dmac_bad + adv);
+ adv += 1;
+ n_bad -= 1;
+ }
+
+ dmac += adv;
+ dmac_bad += adv;
+ n_left -= adv;
+ }
+
+ if (!bad) /* can stop looping if everything matched */
+ break;
+ }
}
}
+/* process frame of buffers, store ethertype into array and update
+ buffer metadata fields depending on interface being l2 or l3 assuming that
+ packets are untagged. For tagged packets those fields are updated later.
+ Optionally store Destionation MAC address and tag data into arrays
+ for further processing */
+
+STATIC_ASSERT (VLIB_FRAME_SIZE % 8 == 0,
+ "VLIB_FRAME_SIZE must be power of 8");
static_always_inline void
-eth_input_sort (vlib_main_t * vm, u32 * from, u32 n_packets,
- eth_input_data_t * d)
+eth_input_process_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vnet_hw_interface_t * hi,
+ u32 * buffer_indices, u32 n_packets, int main_is_l3,
+ int ip4_cksum_ok, int dmac_check)
{
- u16 *etype = d->etypes;
+ ethernet_main_t *em = ðernet_main;
+ u16 nexts[VLIB_FRAME_SIZE], *next;
+ u16 etypes[VLIB_FRAME_SIZE], *etype = etypes;
+ u64 dmacs[VLIB_FRAME_SIZE], *dmac = dmacs;
+ u8 dmacs_bad[VLIB_FRAME_SIZE];
+ u64 tags[VLIB_FRAME_SIZE], *tag = tags;
+ u16 slowpath_indices[VLIB_FRAME_SIZE];
+ u16 n_slowpath, i;
+ u16 next_ip4, next_ip6, next_mpls, next_l2;
+ u16 et_ip4 = clib_host_to_net_u16 (ETHERNET_TYPE_IP4);
+ u16 et_ip6 = clib_host_to_net_u16 (ETHERNET_TYPE_IP6);
+ u16 et_mpls = clib_host_to_net_u16 (ETHERNET_TYPE_MPLS);
+ u16 et_vlan = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
+ u16 et_dot1ad = clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD);
i32 n_left = n_packets;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
+ vlib_buffer_t **b = bufs;
+ ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);
-#if defined (CLIB_HAVE_VEC256)
- u16x16 e16;
- u16x16 et16_ip4 = u16x16_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP4));
- u16x16 et16_ip6 = u16x16_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP6));
- u16x16 et16_mpls = u16x16_splat (clib_host_to_net_u16 (ETHERNET_TYPE_MPLS));
- u16x16 id16_ip4 = u16x16_splat (ETYPE_ID_IP4);
- u16x16 id16_ip6 = u16x16_splat (ETYPE_ID_IP6);
- u16x16 id16_mpls = u16x16_splat (ETYPE_ID_MPLS);
+ vlib_get_buffers (vm, buffer_indices, b, n_left);
- while (n_left > 0)
+ while (n_left >= 20)
{
- u16x16 r = { 0 };
- e16 = u16x16_load_unaligned (etype);
- r += (e16 == et16_ip4) & id16_ip4;
- r += (e16 == et16_ip6) & id16_ip6;
- r += (e16 == et16_mpls) & id16_mpls;
- u16x16_store_unaligned (r, etype);
- etype += 16;
- n_left -= 16;
- }
-#elif defined (CLIB_HAVE_VEC128)
- u16x8 e8;
- u16x8 et8_ip4 = u16x8_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP4));
- u16x8 et8_ip6 = u16x8_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP6));
- u16x8 et8_mpls = u16x8_splat (clib_host_to_net_u16 (ETHERNET_TYPE_MPLS));
- u16x8 id8_ip4 = u16x8_splat (ETYPE_ID_IP4);
- u16x8 id8_ip6 = u16x8_splat (ETYPE_ID_IP6);
- u16x8 id8_mpls = u16x8_splat (ETYPE_ID_MPLS);
+ vlib_buffer_t **ph = b + 16, **pd = b + 8;
- while (n_left > 0)
+ vlib_prefetch_buffer_header (ph[0], LOAD);
+ vlib_prefetch_buffer_data (pd[0], LOAD);
+ eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
+
+ vlib_prefetch_buffer_header (ph[1], LOAD);
+ vlib_prefetch_buffer_data (pd[1], LOAD);
+ eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
+
+ vlib_prefetch_buffer_header (ph[2], LOAD);
+ vlib_prefetch_buffer_data (pd[2], LOAD);
+ eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
+
+ vlib_prefetch_buffer_header (ph[3], LOAD);
+ vlib_prefetch_buffer_data (pd[3], LOAD);
+ eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
+
+ eth_input_adv_and_flags_x4 (b, main_is_l3);
+
+ /* next */
+ b += 4;
+ n_left -= 4;
+ etype += 4;
+ tag += 4;
+ dmac += 4;
+ }
+ while (n_left >= 4)
{
- u16x8 r = { 0 };
- e8 = u16x8_load_unaligned (etype);
- r += (e8 == et8_ip4) & id8_ip4;
- r += (e8 == et8_ip6) & id8_ip6;
- r += (e8 == et8_mpls) & id8_mpls;
- u16x8_store_unaligned (r, etype);
- etype += 8;
- n_left -= 8;
+ eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
+ eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
+ eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
+ eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
+ eth_input_adv_and_flags_x4 (b, main_is_l3);
+
+ /* next */
+ b += 4;
+ n_left -= 4;
+ etype += 4;
+ tag += 4;
+ dmac += 4;
}
-#else
while (n_left)
{
- if (etype[0] == ETHERNET_TYPE_IP4)
- etype[0] = ETYPE_ID_IP4;
- else if (etype[0] == ETHERNET_TYPE_IP6)
- etype[0] = ETYPE_ID_IP6;
- else if (etype[0] == ETHERNET_TYPE_MPLS)
- etype[0] = ETYPE_ID_MPLS;
- else
- etype[0] = ETYPE_ID_UNKNOWN;
+ eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
+ eth_input_adv_and_flags_x1 (b, main_is_l3);
- etype += 1;
+ /* next */
+ b += 1;
n_left -= 1;
+ etype += 1;
+ tag += 1;
+ dmac += 1;
}
+
+ if (dmac_check)
+ {
+ if (ei && vec_len (ei->secondary_addrs))
+ eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
+ ei, 1 /* have_sec_dmac */ );
+ else
+ eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
+ ei, 0 /* have_sec_dmac */ );
+ }
+
+ next_ip4 = em->l3_next.input_next_ip4;
+ next_ip6 = em->l3_next.input_next_ip6;
+ next_mpls = em->l3_next.input_next_mpls;
+ next_l2 = em->l2_next;
+
+ if (next_ip4 == ETHERNET_INPUT_NEXT_IP4_INPUT && ip4_cksum_ok)
+ next_ip4 = ETHERNET_INPUT_NEXT_IP4_INPUT_NCS;
+
+#ifdef CLIB_HAVE_VEC256
+ u16x16 et16_ip4 = u16x16_splat (et_ip4);
+ u16x16 et16_ip6 = u16x16_splat (et_ip6);
+ u16x16 et16_mpls = u16x16_splat (et_mpls);
+ u16x16 et16_vlan = u16x16_splat (et_vlan);
+ u16x16 et16_dot1ad = u16x16_splat (et_dot1ad);
+ u16x16 next16_ip4 = u16x16_splat (next_ip4);
+ u16x16 next16_ip6 = u16x16_splat (next_ip6);
+ u16x16 next16_mpls = u16x16_splat (next_mpls);
+ u16x16 next16_l2 = u16x16_splat (next_l2);
+ u16x16 zero = { 0 };
+ u16x16 stairs = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
#endif
- etype = d->etypes;
+ etype = etypes;
n_left = n_packets;
+ next = nexts;
+ n_slowpath = 0;
+ i = 0;
- clib_memset_u16 (d->n_bufs_by_etype, 0, ETYPE_N_IDS);
- while (n_left)
+ /* fastpath - in l3 mode hadles ip4, ip6 and mpls packets, other packets
+ are considered as slowpath, in l2 mode all untagged packets are
+ considered as fastpath */
+ while (n_left > 0)
{
- u16 x, y;
- x = etype[0];
- y = d->n_bufs_by_etype[x];
-
#ifdef CLIB_HAVE_VEC256
- if (n_left >= 16 && u16x16_is_all_equal (u16x16_load_unaligned (etype),
- etype[0]))
+ if (n_left >= 16)
{
- clib_memcpy_fast (&d->bufs_by_etype[x][y], from, 16 * sizeof (u32));
- d->n_bufs_by_etype[x] += 16;
+ u16x16 r = zero;
+ u16x16 e16 = u16x16_load_unaligned (etype);
+ if (main_is_l3)
+ {
+ r += (e16 == et16_ip4) & next16_ip4;
+ r += (e16 == et16_ip6) & next16_ip6;
+ r += (e16 == et16_mpls) & next16_mpls;
+ }
+ else
+ r = ((e16 != et16_vlan) & (e16 != et16_dot1ad)) & next16_l2;
+ u16x16_store_unaligned (r, next);
+
+ if (!u16x16_is_all_zero (r == zero))
+ {
+ if (u16x16_is_all_zero (r))
+ {
+ u16x16_store_unaligned (u16x16_splat (i) + stairs,
+ slowpath_indices + n_slowpath);
+ n_slowpath += 16;
+ }
+ else
+ {
+ for (int j = 0; j < 16; j++)
+ {
+ if (next[j] == 0)
+ slowpath_indices[n_slowpath++] = i + j;
+ else if (dmac_check && main_is_l3 && dmacs_bad[i + j])
+ {
+ next[j] = 0;
+ slowpath_indices[n_slowpath++] = i + j;
+ }
+ }
+ }
+ }
+ else
+ {
+ if (dmac_check && main_is_l3)
+ {
+ u8x16 dmac_bad = u8x16_load_unaligned (&dmacs_bad[i]);
+ if (!u8x16_is_all_zero (dmac_bad))
+ {
+ for (int j = 0; j < 16; j++)
+ if (dmacs_bad[i + j])
+ {
+ next[j] = 0;
+ slowpath_indices[n_slowpath++] = i + j;
+ }
+ }
+ }
+ }
- /* next */
- n_left -= 16;
etype += 16;
- from += 16;
+ next += 16;
+ n_left -= 16;
+ i += 16;
continue;
}
#endif
-#ifdef CLIB_HAVE_VEC128
- if (n_left >= 8 && u16x8_is_all_equal (u16x8_load_unaligned (etype),
- etype[0]))
+ if (dmac_check && main_is_l3 && dmacs_bad[i])
+ {
+ next[0] = 0;
+ slowpath_indices[n_slowpath++] = i;
+ }
+ else if (main_is_l3 && etype[0] == et_ip4)
+ next[0] = next_ip4;
+ else if (main_is_l3 && etype[0] == et_ip6)
+ next[0] = next_ip6;
+ else if (main_is_l3 && etype[0] == et_mpls)
+ next[0] = next_mpls;
+ else if (main_is_l3 == 0 &&
+ etype[0] != et_vlan && etype[0] != et_dot1ad)
+ next[0] = next_l2;
+ else
{
- clib_memcpy_fast (&d->bufs_by_etype[x][y], from, 8 * sizeof (u32));
- d->n_bufs_by_etype[x] += 8;
+ next[0] = 0;
+ slowpath_indices[n_slowpath++] = i;
+ }
+
+ etype += 1;
+ next += 1;
+ n_left -= 1;
+ i += 1;
+ }
+
+ if (n_slowpath)
+ {
+ vnet_main_t *vnm = vnet_get_main ();
+ n_left = n_slowpath;
+ u16 *si = slowpath_indices;
+ u32 last_unknown_etype = ~0;
+ u32 last_unknown_next = ~0;
+ eth_input_tag_lookup_t dot1ad_lookup, dot1q_lookup = {
+ .mask = -1LL,
+ .tag = tags[si[0]] ^ -1LL,
+ .sw_if_index = ~0
+ };
+
+ clib_memcpy_fast (&dot1ad_lookup, &dot1q_lookup, sizeof (dot1q_lookup));
+
+ while (n_left)
+ {
+ i = si[0];
+ u16 etype = etypes[i];
+
+ if (etype == et_vlan)
+ {
+ vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
+ eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
+ &dot1q_lookup, dmacs_bad[i], 0,
+ main_is_l3, dmac_check);
+
+ }
+ else if (etype == et_dot1ad)
+ {
+ vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
+ eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
+ &dot1ad_lookup, dmacs_bad[i], 1,
+ main_is_l3, dmac_check);
+ }
+ else
+ {
+ /* untagged packet with not well known ethertype */
+ if (last_unknown_etype != etype)
+ {
+ last_unknown_etype = etype;
+ etype = clib_host_to_net_u16 (etype);
+ last_unknown_next = eth_input_next_by_type (etype);
+ }
+ if (dmac_check && main_is_l3 && dmacs_bad[i])
+ {
+ vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
+ b->error = node->errors[ETHERNET_ERROR_L3_MAC_MISMATCH];
+ nexts[i] = ETHERNET_INPUT_NEXT_PUNT;
+ }
+ else
+ nexts[i] = last_unknown_next;
+ }
/* next */
- n_left -= 8;
- etype += 8;
- from += 8;
- continue;
+ n_left--;
+ si++;
}
-#endif
- d->bufs_by_etype[x][y] = from[0];
- d->n_bufs_by_etype[x]++;
- /* next */
- n_left -= 1;
- etype += 1;
- from += 1;
+ eth_input_update_if_counters (vm, vnm, &dot1q_lookup);
+ eth_input_update_if_counters (vm, vnm, &dot1ad_lookup);
+ }
+
+ vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts, n_packets);
+}
+
+static_always_inline void
+eth_input_single_int (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vnet_hw_interface_t * hi, u32 * from, u32 n_pkts,
+ int ip4_cksum_ok)
+{
+ ethernet_main_t *em = ðernet_main;
+ ethernet_interface_t *ei;
+ ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
+ main_intf_t *intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
+ subint_config_t *subint0 = &intf0->untagged_subint;
+
+ int main_is_l3 = (subint0->flags & SUBINT_CONFIG_L2) == 0;
+ int int_is_l3 = ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3;
+
+ if (main_is_l3)
+ {
+ if (int_is_l3 || /* DMAC filter already done by NIC */
+ ((hi->l2_if_count != 0) && (hi->l3_if_count == 0)))
+ { /* All L2 usage - DMAC check not needed */
+ eth_input_process_frame (vm, node, hi, from, n_pkts,
+ /*is_l3 */ 1, ip4_cksum_ok, 0);
+ }
+ else
+ { /* DMAC check needed for L3 */
+ eth_input_process_frame (vm, node, hi, from, n_pkts,
+ /*is_l3 */ 1, ip4_cksum_ok, 1);
+ }
+ return;
+ }
+ else
+ {
+ if (hi->l3_if_count == 0)
+ { /* All L2 usage - DMAC check not needed */
+ eth_input_process_frame (vm, node, hi, from, n_pkts,
+ /*is_l3 */ 0, ip4_cksum_ok, 0);
+ }
+ else
+ { /* DMAC check needed for L3 */
+ eth_input_process_frame (vm, node, hi, from, n_pkts,
+ /*is_l3 */ 0, ip4_cksum_ok, 1);
+ }
+ return;
}
}
ethernet_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
+ vnet_main_t *vnm = vnet_get_main ();
u32 *from, n_left;
- if ((node->flags & VLIB_NODE_FLAG_TRACE) == 0)
- return;
+ if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
+ {
+ from = vlib_frame_vector_args (from_frame);
+ n_left = from_frame->n_vectors;
- from = vlib_frame_vector_args (from_frame);
- n_left = from_frame->n_vectors;
+ while (n_left)
+ {
+ ethernet_input_trace_t *t0;
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, from[0]);
- while (n_left)
+ if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ t0 = vlib_add_trace (vm, node, b0,
+ sizeof (ethernet_input_trace_t));
+ clib_memcpy_fast (t0->packet_data, b0->data + b0->current_data,
+ sizeof (t0->packet_data));
+ t0->frame_flags = from_frame->flags;
+ clib_memcpy_fast (&t0->frame_data,
+ vlib_frame_scalar_args (from_frame),
+ sizeof (ethernet_input_frame_t));
+ }
+ from += 1;
+ n_left -= 1;
+ }
+ }
+
+ /* rx pcap capture if enabled */
+ if (PREDICT_FALSE (vnm->pcap.pcap_rx_enable))
{
- ethernet_input_trace_t *t0;
- vlib_buffer_t *b0 = vlib_get_buffer (vm, from[0]);
+ u32 bi0;
+ vnet_pcap_t *pp = &vnm->pcap;
- if (b0->flags & VLIB_BUFFER_IS_TRACED)
+ from = vlib_frame_vector_args (from_frame);
+ n_left = from_frame->n_vectors;
+ while (n_left > 0)
{
- t0 = vlib_add_trace (vm, node, b0, sizeof (ethernet_input_trace_t));
- clib_memcpy_fast (t0->packet_data, b0->data + b0->current_data,
- sizeof (t0->packet_data));
- t0->frame_flags = from_frame->flags;
- clib_memcpy_fast (&t0->frame_data,
- vlib_frame_scalar_args (from_frame),
- sizeof (ethernet_input_frame_t));
+ vlib_buffer_t *b0;
+ bi0 = from[0];
+ from++;
+ n_left--;
+ b0 = vlib_get_buffer (vm, bi0);
+ if (vnet_is_packet_pcaped (pp, b0, ~0))
+ pcap_add_buffer (&pp->pcap_main, vm, bi0, pp->max_bytes_per_pkt);
}
- from += 1;
- n_left -= 1;
}
}
u32 cached_sw_if_index = ~0;
u32 cached_is_l2 = 0; /* shut up gcc */
vnet_hw_interface_t *hi = NULL; /* used for main interface only */
+ ethernet_interface_t *ei = NULL;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
+ vlib_buffer_t **b = bufs;
if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
next_index = node->cached_next_index;
stats_sw_if_index = node->runtime_data[0];
stats_n_packets = stats_n_bytes = 0;
+ vlib_get_buffers (vm, from, bufs, n_left_from);
while (n_left_from > 0)
{
qinq_intf_t *qinq_intf0, *qinq_intf1;
u32 is_l20, is_l21;
ethernet_header_t *e0, *e1;
+ u64 dmacs[2];
+ u8 dmacs_bad[2];
/* Prefetch next iteration. */
{
- vlib_buffer_t *b2, *b3;
+ vlib_prefetch_buffer_header (b[2], STORE);
+ vlib_prefetch_buffer_header (b[3], STORE);
- b2 = vlib_get_buffer (vm, from[2]);
- b3 = vlib_get_buffer (vm, from[3]);
-
- vlib_prefetch_buffer_header (b2, STORE);
- vlib_prefetch_buffer_header (b3, STORE);
-
- CLIB_PREFETCH (b2->data, sizeof (ethernet_header_t), LOAD);
- CLIB_PREFETCH (b3->data, sizeof (ethernet_header_t), LOAD);
+ CLIB_PREFETCH (b[2]->data, sizeof (ethernet_header_t), LOAD);
+ CLIB_PREFETCH (b[3]->data, sizeof (ethernet_header_t), LOAD);
}
bi0 = from[0];
n_left_to_next -= 2;
n_left_from -= 2;
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
+ b0 = b[0];
+ b1 = b[1];
+ b += 2;
error0 = error1 = ETHERNET_ERROR_NONE;
e0 = vlib_buffer_get_current (b0);
{
cached_sw_if_index = sw_if_index0;
hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ ei = ethernet_get_interface (em, hi->hw_if_index);
intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
subint0 = &intf0->untagged_subint;
cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
}
else
{
- if (!ethernet_address_cast (e0->dst_address) &&
- (hi->hw_address != 0) &&
- !eth_mac_equal ((u8 *) e0, hi->hw_address))
+ if (ei && (ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3))
+ goto skip_dmac_check01;
+
+ dmacs[0] = *(u64 *) e0;
+ dmacs[1] = *(u64 *) e1;
+
+ if (ei && vec_len (ei->secondary_addrs))
+ ethernet_input_inline_dmac_check (hi, dmacs,
+ dmacs_bad,
+ 2 /* n_packets */ ,
+ ei,
+ 1 /* have_sec_dmac */ );
+ else
+ ethernet_input_inline_dmac_check (hi, dmacs,
+ dmacs_bad,
+ 2 /* n_packets */ ,
+ ei,
+ 0 /* have_sec_dmac */ );
+
+ if (dmacs_bad[0])
error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
- if (!ethernet_address_cast (e1->dst_address) &&
- (hi->hw_address != 0) &&
- !eth_mac_equal ((u8 *) e1, hi->hw_address))
+ if (dmacs_bad[1])
error1 = ETHERNET_ERROR_L3_MAC_MISMATCH;
+
+ skip_dmac_check01:
vlib_buffer_advance (b0, sizeof (ethernet_header_t));
determine_next_node (em, variant, 0, type0, b0,
&error0, &next0);
&hi1,
&main_intf1, &vlan_intf1, &qinq_intf1);
- identify_subint (hi0,
+ identify_subint (em,
+ hi0,
b0,
match_flags0,
main_intf0,
vlan_intf0,
qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
- identify_subint (hi1,
+ identify_subint (em,
+ hi1,
b1,
match_flags1,
main_intf1,
qinq_intf_t *qinq_intf0;
ethernet_header_t *e0;
u32 is_l20;
+ u64 dmacs[2];
+ u8 dmacs_bad[2];
// Prefetch next iteration
if (n_left_from > 1)
{
- vlib_buffer_t *p2;
-
- p2 = vlib_get_buffer (vm, from[1]);
- vlib_prefetch_buffer_header (p2, STORE);
- CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD);
+ vlib_prefetch_buffer_header (b[1], STORE);
+ clib_prefetch_load (b[1]->data);
}
bi0 = from[0];
n_left_from -= 1;
n_left_to_next -= 1;
- b0 = vlib_get_buffer (vm, bi0);
+ b0 = b[0];
+ b += 1;
error0 = ETHERNET_ERROR_NONE;
e0 = vlib_buffer_get_current (b0);
{
cached_sw_if_index = sw_if_index0;
hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
+ ei = ethernet_get_interface (em, hi->hw_if_index);
intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
subint0 = &intf0->untagged_subint;
cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
}
else
{
- if (!ethernet_address_cast (e0->dst_address) &&
- (hi->hw_address != 0) &&
- !eth_mac_equal ((u8 *) e0, hi->hw_address))
- error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
+ if (ei && ei->flags & ETHERNET_INTERFACE_FLAG_STATUS_L3)
+ goto skip_dmac_check0;
+
+ dmacs[0] = *(u64 *) e0;
+
+ if (ei)
+ {
+ if (vec_len (ei->secondary_addrs))
+ ethernet_input_inline_dmac_check (
+ hi, dmacs, dmacs_bad, 1 /* n_packets */, ei,
+ 1 /* have_sec_dmac */);
+ else
+ ethernet_input_inline_dmac_check (
+ hi, dmacs, dmacs_bad, 1 /* n_packets */, ei,
+ 0 /* have_sec_dmac */);
+
+ if (dmacs_bad[0])
+ error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
+ }
+
+ skip_dmac_check0:
vlib_buffer_advance (b0, sizeof (ethernet_header_t));
determine_next_node (em, variant, 0, type0, b0,
&error0, &next0);
&hi0,
&main_intf0, &vlan_intf0, &qinq_intf0);
- identify_subint (hi0,
+ identify_subint (em,
+ hi0,
b0,
match_flags0,
main_intf0,
}
}
-static_always_inline void
-eth_input_enqueue_untagged (vlib_main_t * vm, vlib_node_runtime_t * node,
- eth_input_data_t * d, int ip4_cksum_ok, int is_l3)
-{
- ethernet_main_t *em = ðernet_main;
- etype_id_t id;
- u32 next_index;
-
- id = ETYPE_ID_IP4;
- if (d->n_bufs_by_etype[id])
- {
- if (is_l3)
- {
- next_index = em->l3_next.input_next_ip4;
- if (next_index == ETHERNET_INPUT_NEXT_IP4_INPUT && ip4_cksum_ok)
- next_index = ETHERNET_INPUT_NEXT_IP4_INPUT_NCS;
- }
- else
- next_index = em->l2_next;
-
- vlib_buffer_enqueue_to_single_next (vm, node, d->bufs_by_etype[id],
- next_index, d->n_bufs_by_etype[id]);
- }
-
- id = ETYPE_ID_IP6;
- if (d->n_bufs_by_etype[id])
- {
- next_index = is_l3 ? em->l3_next.input_next_ip6 : em->l2_next;
- vlib_buffer_enqueue_to_single_next (vm, node, d->bufs_by_etype[id],
- next_index, d->n_bufs_by_etype[id]);
- }
-
- id = ETYPE_ID_MPLS;
- if (d->n_bufs_by_etype[id])
- {
- next_index = is_l3 ? em->l3_next.input_next_mpls : em->l2_next;
- vlib_buffer_enqueue_to_single_next (vm, node, d->bufs_by_etype[id],
- next_index, d->n_bufs_by_etype[id]);
- }
-
- id = ETYPE_ID_UNKNOWN;
- if (d->n_bufs_by_etype[id])
- {
- /* in case of l3 interfaces, we already advanced buffer so we need to
- roll back */
- if (is_l3)
- eth_input_advance_and_flags (vm, d->bufs_by_etype[id],
- d->n_bufs_by_etype[id],
- -(i16) sizeof (ethernet_header_t),
- ~VNET_BUFFER_F_L3_HDR_OFFSET_VALID, 0);
- ethernet_input_inline (vm, node, d->bufs_by_etype[id],
- d->n_bufs_by_etype[id],
- ETHERNET_INPUT_VARIANT_ETHERNET);
- }
-}
-
VLIB_NODE_FN (ethernet_input_node) (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
vnet_main_t *vnm = vnet_get_main ();
- ethernet_main_t *em = ðernet_main;
u32 *from = vlib_frame_vector_args (frame);
u32 n_packets = frame->n_vectors;
if (frame->flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
{
- eth_input_data_t data, *d = &data;
ethernet_input_frame_t *ef = vlib_frame_scalar_args (frame);
- vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ef->hw_if_index);
- main_intf_t *intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
- subint_config_t *subint0 = &intf0->untagged_subint;
int ip4_cksum_ok = (frame->flags & ETH_INPUT_FRAME_F_IP4_CKSUM_OK) != 0;
-
- if (subint0->flags & SUBINT_CONFIG_L2)
- {
- /* untagged packets are treated as L2 */
- eth_input_process_frame (vm, from, d->etypes, n_packets, 0);
- eth_input_sort (vm, from, n_packets, d);
- eth_input_enqueue_untagged (vm, node, d, ip4_cksum_ok, 0);
- }
- else
- {
- ethernet_interface_t *ei;
- ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
-
- /* currently only slowpath deals with dmac check */
- if (ei->flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
- goto slowpath;
-
- /* untagged packets are treated as L3 */
- eth_input_process_frame (vm, from, d->etypes, n_packets, 1);
- eth_input_sort (vm, from, n_packets, d);
- eth_input_enqueue_untagged (vm, node, d, ip4_cksum_ok, 1);
- }
- return n_packets;
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ef->hw_if_index);
+ eth_input_single_int (vm, node, hi, from, n_packets, ip4_cksum_ok);
}
-
-slowpath:
- ethernet_input_inline (vm, node, from, n_packets,
- ETHERNET_INPUT_VARIANT_ETHERNET);
+ else
+ ethernet_input_inline (vm, node, from, n_packets,
+ ETHERNET_INPUT_VARIANT_ETHERNET);
return n_packets;
}
}
else
{
- // a specific outer + specifc innner vlan id, a common case
+ // a specific outer + specific innner vlan id, a common case
// get the qinq table
if (vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs == 0)
ethernet_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
{
subint_config_t *subint;
- u32 dummy_flags;
- u32 dummy_unsup;
+ u32 placeholder_flags;
+ u32 placeholder_unsup;
clib_error_t *error = 0;
// Find the config for this subinterface
subint =
- ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
- &dummy_unsup);
+ ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
+ &placeholder_unsup);
if (subint == 0)
{
ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index, u32 l2)
{
subint_config_t *subint;
- u32 dummy_flags;
- u32 dummy_unsup;
+ u32 placeholder_flags;
+ u32 placeholder_unsup;
int is_port;
vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
// Find the config for this subinterface
subint =
- ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
- &dummy_unsup);
+ ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
+ &placeholder_unsup);
if (subint == 0)
{
u32 sw_if_index, u32 l2)
{
subint_config_t *subint;
- u32 dummy_flags;
- u32 dummy_unsup;
+ u32 placeholder_flags;
+ u32 placeholder_unsup;
/* Find the config for this subinterface */
subint =
- ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
- &dummy_unsup);
+ ethernet_sw_interface_get_config (vnm, sw_if_index, &placeholder_flags,
+ &placeholder_unsup);
if (subint == 0)
{
#undef ethernet_error
};
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ethernet_input_node) = {
.name = "ethernet-input",
/* Takes a vector of packets. */
#undef _
},
};
-/* *INDENT-ON* */
#ifndef CLIB_MARCH_VARIANT
void
return 0;
}
+void
+ethernet_setup_node (vlib_main_t *vm, u32 node_index)
+{
+ vlib_node_t *n = vlib_get_node (vm, node_index);
+ pg_node_t *pn = pg_get_node (node_index);
-static clib_error_t *
-ethernet_input_init (vlib_main_t * vm)
+ n->format_buffer = format_ethernet_header_with_length;
+ n->unformat_buffer = unformat_ethernet_header;
+ pn->unformat_edit = unformat_pg_ethernet_header;
+}
+
+void
+ethernet_input_init (vlib_main_t * vm, ethernet_main_t * em)
{
- ethernet_main_t *em = ðernet_main;
__attribute__ ((unused)) vlan_table_t *invalid_vlan_table;
__attribute__ ((unused)) qinq_table_t *invalid_qinq_table;
pool_get (em->vlan_pool, invalid_vlan_table); // first id = 0
// The first qinq pool will always be reserved for an invalid table
pool_get (em->qinq_pool, invalid_qinq_table); // first id = 0
-
- return 0;
}
-VLIB_INIT_FUNCTION (ethernet_input_init);
-
void
ethernet_register_input_type (vlib_main_t * vm,
ethernet_type_t type, u32 node_index)
}
ti = ethernet_get_type_info (em, type);
+ if (ti == 0)
+ {
+ clib_warning ("type_info NULL for type %d", type);
+ return;
+ }
ti->node_index = node_index;
ti->next_index = vlib_node_add_next (vm,
ethernet_input_node.index, node_index);