+#define DMAC_MASK clib_net_to_host_u64 (0xFFFFFFFFFFFF0000)
+#define DMAC_IGBIT clib_net_to_host_u64 (0x0100000000000000)
+
+#ifdef CLIB_HAVE_VEC256
+static_always_inline u32
+is_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
+{
+ u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
+ r0 = (r0 != u64x4_splat (hwaddr)) & ((r0 & u64x4_splat (DMAC_IGBIT)) == 0);
+ return u8x32_msb_mask ((u8x32) (r0));
+}
+#else
+static_always_inline u8
+is_dmac_bad (u64 dmac, u64 hwaddr)
+{
+ u64 r0 = dmac & DMAC_MASK;
+ return (r0 != hwaddr) && ((r0 & DMAC_IGBIT) == 0);
+}
+#endif
+
+static_always_inline u8
+is_sec_dmac_bad (u64 dmac, u64 hwaddr)
+{
+ return ((dmac & DMAC_MASK) != hwaddr);
+}
+
+#ifdef CLIB_HAVE_VEC256
+static_always_inline u32
+is_sec_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
+{
+ u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
+ r0 = (r0 != u64x4_splat (hwaddr));
+ return u8x32_msb_mask ((u8x32) (r0));
+}
+#endif
+
+static_always_inline u8
+eth_input_sec_dmac_check_x1 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
+{
+ dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
+ return dmac_bad[0];
+}
+
+static_always_inline u32
+eth_input_sec_dmac_check_x4 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
+{
+#ifdef CLIB_HAVE_VEC256
+ *(u32 *) (dmac_bad + 0) &= is_sec_dmac_bad_x4 (dmac + 0, hwaddr);
+#else
+ dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
+ dmac_bad[1] &= is_sec_dmac_bad (dmac[1], hwaddr);
+ dmac_bad[2] &= is_sec_dmac_bad (dmac[2], hwaddr);
+ dmac_bad[3] &= is_sec_dmac_bad (dmac[3], hwaddr);
+#endif
+ return *(u32 *) dmac_bad;
+}
+