+
+/* following vector code relies on following assumptions */
+STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_data, 0);
+STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_length, 2);
+STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, flags, 4);
+STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l2_hdr_offset) ==
+ STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l3_hdr_offset) - 2,
+ "l3_hdr_offset must follow l2_hdr_offset");
+
+static_always_inline void
+eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, int is_l3)
+{
+ i16 adv = sizeof (ethernet_header_t);
+ u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
+
+#ifdef CLIB_HAVE_VEC256
+ /* to reduce number of small loads/stores we are loading first 64 bits
+ of each buffer metadata into 256-bit register so we can advance
+ current_data, current_length and flags.
+ Observed saving of this code is ~2 clocks per packet */
+ u64x4 r, radv;
+
+ /* vector if signed 16 bit integers used in signed vector add operation
+ to advnce current_data and current_length */
+ u32x8 flags4 = { 0, flags, 0, flags, 0, flags, 0, flags };
+ i16x16 adv4 = {
+ adv, -adv, 0, 0, adv, -adv, 0, 0,
+ adv, -adv, 0, 0, adv, -adv, 0, 0
+ };
+
+ /* load 4 x 64 bits */
+ r = u64x4_gather (b[0], b[1], b[2], b[3]);
+
+ /* set flags */
+ r |= (u64x4) flags4;
+
+ /* advance buffer */
+ radv = (u64x4) ((i16x16) r + adv4);
+
+ /* write 4 x 64 bits */
+ u64x4_scatter (is_l3 ? radv : r, b[0], b[1], b[2], b[3]);
+
+ /* use old current_data as l2_hdr_offset and new current_data as
+ l3_hdr_offset */
+ r = (u64x4) u16x16_blend (r, radv << 16, 0xaa);
+
+ /* store both l2_hdr_offset and l3_hdr_offset in single store operation */
+ u32x8_scatter_one ((u32x8) r, 0, &vnet_buffer (b[0])->l2_hdr_offset);
+ u32x8_scatter_one ((u32x8) r, 2, &vnet_buffer (b[1])->l2_hdr_offset);
+ u32x8_scatter_one ((u32x8) r, 4, &vnet_buffer (b[2])->l2_hdr_offset);
+ u32x8_scatter_one ((u32x8) r, 6, &vnet_buffer (b[3])->l2_hdr_offset);
+
+ if (is_l3)
+ {
+ ASSERT (b[0]->current_data == vnet_buffer (b[0])->l3_hdr_offset);
+ ASSERT (b[1]->current_data == vnet_buffer (b[1])->l3_hdr_offset);
+ ASSERT (b[2]->current_data == vnet_buffer (b[2])->l3_hdr_offset);
+ ASSERT (b[3]->current_data == vnet_buffer (b[3])->l3_hdr_offset);
+
+ ASSERT (b[0]->current_data - vnet_buffer (b[0])->l2_hdr_offset == adv);
+ ASSERT (b[1]->current_data - vnet_buffer (b[1])->l2_hdr_offset == adv);
+ ASSERT (b[2]->current_data - vnet_buffer (b[2])->l2_hdr_offset == adv);
+ ASSERT (b[3]->current_data - vnet_buffer (b[3])->l2_hdr_offset == adv);
+ }
+ else
+ {
+ ASSERT (b[0]->current_data == vnet_buffer (b[0])->l2_hdr_offset);
+ ASSERT (b[1]->current_data == vnet_buffer (b[1])->l2_hdr_offset);
+ ASSERT (b[2]->current_data == vnet_buffer (b[2])->l2_hdr_offset);
+ ASSERT (b[3]->current_data == vnet_buffer (b[3])->l2_hdr_offset);
+
+ ASSERT (b[0]->current_data - vnet_buffer (b[0])->l3_hdr_offset == -adv);
+ ASSERT (b[1]->current_data - vnet_buffer (b[1])->l3_hdr_offset == -adv);
+ ASSERT (b[2]->current_data - vnet_buffer (b[2])->l3_hdr_offset == -adv);
+ ASSERT (b[3]->current_data - vnet_buffer (b[3])->l3_hdr_offset == -adv);
+ }
+
+#else
+ vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
+ vnet_buffer (b[1])->l2_hdr_offset = b[1]->current_data;
+ vnet_buffer (b[2])->l2_hdr_offset = b[2]->current_data;
+ vnet_buffer (b[3])->l2_hdr_offset = b[3]->current_data;
+ vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
+ vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data + adv;
+ vnet_buffer (b[2])->l3_hdr_offset = b[2]->current_data + adv;
+ vnet_buffer (b[3])->l3_hdr_offset = b[3]->current_data + adv;
+
+ if (is_l3)
+ {
+ vlib_buffer_advance (b[0], adv);
+ vlib_buffer_advance (b[1], adv);
+ vlib_buffer_advance (b[2], adv);
+ vlib_buffer_advance (b[3], adv);
+ }
+
+ b[0]->flags |= flags;
+ b[1]->flags |= flags;
+ b[2]->flags |= flags;
+ b[3]->flags |= flags;
+#endif
+
+ if (!is_l3)
+ {
+ vnet_buffer (b[0])->l2.l2_len = adv;
+ vnet_buffer (b[1])->l2.l2_len = adv;
+ vnet_buffer (b[2])->l2.l2_len = adv;
+ vnet_buffer (b[3])->l2.l2_len = adv;
+ }
+}
+
+static_always_inline void
+eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, int is_l3)
+{
+ i16 adv = sizeof (ethernet_header_t);
+ u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
+
+ vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
+ vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
+
+ if (is_l3)
+ vlib_buffer_advance (b[0], adv);
+ b[0]->flags |= flags;
+ if (!is_l3)
+ vnet_buffer (b[0])->l2.l2_len = adv;
+}
+
+
+static_always_inline void
+eth_input_get_etype_and_tags (vlib_buffer_t ** b, u16 * etype, u64 * tags,
+ u64 * dmacs, int offset, int dmac_check)
+{
+ ethernet_header_t *e;
+ e = vlib_buffer_get_current (b[offset]);
+#ifdef CLIB_HAVE_VEC128
+ u64x2 r = u64x2_load_unaligned (((u8 *) & e->type) - 6);
+ etype[offset] = ((u16x8) r)[3];
+ tags[offset] = r[1];
+#else
+ etype[offset] = e->type;
+ tags[offset] = *(u64 *) (e + 1);
+#endif
+
+ if (dmac_check)
+ dmacs[offset] = *(u64 *) e;
+}
+
+static_always_inline u16
+eth_input_next_by_type (u16 etype)
+{
+ ethernet_main_t *em = ðernet_main;
+
+ return (etype < 0x600) ? ETHERNET_INPUT_NEXT_LLC :
+ vec_elt (em->l3_next.input_next_by_type,
+ sparse_vec_index (em->l3_next.input_next_by_type, etype));
+}
+
+typedef struct
+{
+ u64 tag, mask;
+ u32 sw_if_index;
+ u16 type, len, next;
+ i16 adv;
+ u8 err, n_tags;
+ u64 n_packets, n_bytes;
+} eth_input_tag_lookup_t;
+
+static_always_inline void
+eth_input_update_if_counters (vlib_main_t * vm, vnet_main_t * vnm,
+ eth_input_tag_lookup_t * l)
+{
+ if (l->n_packets == 0 || l->sw_if_index == ~0)
+ return;
+
+ if (l->adv > 0)
+ l->n_bytes += l->n_packets * l->len;
+
+ vlib_increment_combined_counter
+ (vnm->interface_main.combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_RX, vm->thread_index, l->sw_if_index,
+ l->n_packets, l->n_bytes);
+}
+
+static_always_inline void
+eth_input_tag_lookup (vlib_main_t * vm, vnet_main_t * vnm,
+ vlib_node_runtime_t * node, vnet_hw_interface_t * hi,
+ u64 tag, u16 * next, vlib_buffer_t * b,
+ eth_input_tag_lookup_t * l, u8 dmac_bad, int is_dot1ad,
+ int main_is_l3, int check_dmac)
+{
+ ethernet_main_t *em = ðernet_main;
+
+ if ((tag ^ l->tag) & l->mask)
+ {
+ main_intf_t *mif = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
+ vlan_intf_t *vif;
+ qinq_intf_t *qif;
+ vlan_table_t *vlan_table;
+ qinq_table_t *qinq_table;
+ u16 *t = (u16 *) & tag;
+ u16 vlan1 = clib_net_to_host_u16 (t[0]) & 0xFFF;
+ u16 vlan2 = clib_net_to_host_u16 (t[2]) & 0xFFF;
+ u32 matched, is_l2, new_sw_if_index;
+
+ vlan_table = vec_elt_at_index (em->vlan_pool, is_dot1ad ?
+ mif->dot1ad_vlans : mif->dot1q_vlans);
+ vif = &vlan_table->vlans[vlan1];
+ qinq_table = vec_elt_at_index (em->qinq_pool, vif->qinqs);
+ qif = &qinq_table->vlans[vlan2];
+ l->err = ETHERNET_ERROR_NONE;
+ l->type = clib_net_to_host_u16 (t[1]);
+
+ if (l->type == ETHERNET_TYPE_VLAN)
+ {
+ l->type = clib_net_to_host_u16 (t[3]);
+ l->n_tags = 2;
+ matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
+ SUBINT_CONFIG_MATCH_2_TAG, mif, vif,
+ qif, &new_sw_if_index, &l->err,
+ &is_l2);
+ }
+ else
+ {
+ l->n_tags = 1;
+ if (vlan1 == 0)
+ {
+ new_sw_if_index = hi->sw_if_index;
+ l->err = ETHERNET_ERROR_NONE;
+ matched = 1;
+ is_l2 = main_is_l3 == 0;
+ }
+ else
+ matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
+ SUBINT_CONFIG_MATCH_1_TAG, mif,
+ vif, qif, &new_sw_if_index,
+ &l->err, &is_l2);
+ }
+
+ if (l->sw_if_index != new_sw_if_index)
+ {
+ eth_input_update_if_counters (vm, vnm, l);
+ l->n_packets = 0;
+ l->n_bytes = 0;
+ l->sw_if_index = new_sw_if_index;
+ }
+ l->tag = tag;
+ l->mask = (l->n_tags == 2) ?
+ clib_net_to_host_u64 (0xffffffffffffffff) :
+ clib_net_to_host_u64 (0xffffffff00000000);
+
+ if (matched && l->sw_if_index == ~0)
+ l->err = ETHERNET_ERROR_DOWN;
+
+ l->len = sizeof (ethernet_header_t) +
+ l->n_tags * sizeof (ethernet_vlan_header_t);
+ if (main_is_l3)
+ l->adv = is_l2 ? -(int) sizeof (ethernet_header_t) :
+ l->n_tags * sizeof (ethernet_vlan_header_t);
+ else
+ l->adv = is_l2 ? 0 : l->len;
+
+ if (PREDICT_FALSE (l->err != ETHERNET_ERROR_NONE))
+ l->next = ETHERNET_INPUT_NEXT_DROP;
+ else if (is_l2)
+ l->next = em->l2_next;
+ else if (l->type == ETHERNET_TYPE_IP4)
+ l->next = em->l3_next.input_next_ip4;
+ else if (l->type == ETHERNET_TYPE_IP6)
+ l->next = em->l3_next.input_next_ip6;
+ else if (l->type == ETHERNET_TYPE_MPLS)
+ l->next = em->l3_next.input_next_mpls;
+ else if (em->redirect_l3)
+ l->next = em->redirect_l3_next;
+ else
+ {
+ l->next = eth_input_next_by_type (l->type);
+ if (l->next == ETHERNET_INPUT_NEXT_PUNT)
+ l->err = ETHERNET_ERROR_UNKNOWN_TYPE;
+ }
+ }
+
+ if (check_dmac && l->adv > 0 && dmac_bad)
+ {
+ l->err = ETHERNET_ERROR_L3_MAC_MISMATCH;
+ next[0] = ETHERNET_INPUT_NEXT_PUNT;
+ }
+ else
+ next[0] = l->next;
+
+ vlib_buffer_advance (b, l->adv);
+ vnet_buffer (b)->l2.l2_len = l->len;
+ vnet_buffer (b)->l3_hdr_offset = vnet_buffer (b)->l2_hdr_offset + l->len;
+
+ if (l->err == ETHERNET_ERROR_NONE)
+ {
+ vnet_buffer (b)->sw_if_index[VLIB_RX] = l->sw_if_index;
+ ethernet_buffer_set_vlan_count (b, l->n_tags);
+ }
+ else
+ b->error = node->errors[l->err];
+
+ /* update counters */
+ l->n_packets += 1;
+ l->n_bytes += vlib_buffer_length_in_chain (vm, b);
+}
+
+#define DMAC_MASK clib_net_to_host_u64 (0xFFFFFFFFFFFF0000)
+#define DMAC_IGBIT clib_net_to_host_u64 (0x0100000000000000)
+
+#ifdef CLIB_HAVE_VEC256
+static_always_inline u32
+is_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
+{
+ u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
+ r0 = (r0 != u64x4_splat (hwaddr)) & ((r0 & u64x4_splat (DMAC_IGBIT)) == 0);
+ return u8x32_msb_mask ((u8x32) (r0));
+}
+#else
+static_always_inline u8
+is_dmac_bad (u64 dmac, u64 hwaddr)
+{
+ u64 r0 = dmac & DMAC_MASK;
+ return (r0 != hwaddr) && ((r0 & DMAC_IGBIT) == 0);
+}
+#endif
+
+static_always_inline u8
+is_sec_dmac_bad (u64 dmac, u64 hwaddr)
+{
+ return ((dmac & DMAC_MASK) != hwaddr);
+}
+
+#ifdef CLIB_HAVE_VEC256
+static_always_inline u32
+is_sec_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
+{
+ u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
+ r0 = (r0 != u64x4_splat (hwaddr));
+ return u8x32_msb_mask ((u8x32) (r0));
+}
+#endif
+
+static_always_inline u8
+eth_input_sec_dmac_check_x1 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
+{
+ dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
+ return dmac_bad[0];
+}
+
+static_always_inline u32
+eth_input_sec_dmac_check_x4 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
+{
+#ifdef CLIB_HAVE_VEC256
+ *(u32 *) (dmac_bad + 0) &= is_sec_dmac_bad_x4 (dmac + 0, hwaddr);
+#else
+ dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
+ dmac_bad[1] &= is_sec_dmac_bad (dmac[1], hwaddr);
+ dmac_bad[2] &= is_sec_dmac_bad (dmac[2], hwaddr);
+ dmac_bad[3] &= is_sec_dmac_bad (dmac[3], hwaddr);
+#endif
+ return *(u32 *) dmac_bad;
+}
+
+static_always_inline void
+eth_input_process_frame_dmac_check (vnet_hw_interface_t * hi,
+ u64 * dmacs, u8 * dmacs_bad,
+ u32 n_packets, ethernet_interface_t * ei,
+ u8 have_sec_dmac)