#undef _
} ip_multicast_group_t;
+
+/**
+ * The set of RFC defined DSCP values.
+ */
+#define foreach_ip_dscp \
+ _(0, CS0) \
+ _(8, CS1) \
+ _(10, AF11) \
+ _(12, AF12) \
+ _(14, AF13) \
+ _(16, CS2) \
+ _(18, AF21) \
+ _(20, AF22) \
+ _(22, AF23) \
+ _(24, CS3) \
+ _(26, AF31) \
+ _(28, AF32) \
+ _(30, AF33) \
+ _(32, CS4) \
+ _(34, AF41) \
+ _(36, AF42) \
+ _(38, AF43) \
+ _(40, CS5) \
+ _(46, EF) \
+ _(48, CS6) \
+ _(50, CS7)
+
+typedef enum ip_dscp_t_
+{
+#define _(n,f) IP_DSCP_##f = n,
+ foreach_ip_dscp
+#undef _
+} __clib_packed ip_dscp_t;
+
+STATIC_ASSERT_SIZEOF (ip_dscp_t, 1);
+
+extern u8 *format_ip_dscp (u8 * s, va_list * va);
+
/* IP checksum support. */
+static_always_inline u16
+ip_csum (void *data, u16 n_left)
+{
+ u32 sum;
+#ifdef CLIB_HAVE_VEC256
+ u16x16 v1, v2;
+ u32x8 zero = { 0 };
+ u32x8 sum8 = { 0 };
+ u32x4 sum4;
+#endif
+
+ /* if there is odd number of bytes, pad by zero and store in sum */
+ sum = (n_left & 1) ? ((u8 *) data)[n_left - 1] << 8 : 0;
+
+ /* we deal with words */
+ n_left >>= 1;
+
+#ifdef CLIB_HAVE_VEC256
+ while (n_left >= 32)
+ {
+ v1 = u16x16_load_unaligned (data);
+ v2 = u16x16_load_unaligned (data + 32);
+
+#ifdef CLIB_ARCH_IS_LITTLE_ENDIAN
+ v1 = u16x16_byte_swap (v1);
+ v2 = u16x16_byte_swap (v2);
+#endif
+ sum8 += u16x8_extend_to_u32x8 (u16x16_extract_lo (v1));
+ sum8 += u16x8_extend_to_u32x8 (u16x16_extract_hi (v1));
+ sum8 += u16x8_extend_to_u32x8 (u16x16_extract_lo (v2));
+ sum8 += u16x8_extend_to_u32x8 (u16x16_extract_hi (v2));
+ n_left -= 32;
+ data += 64;
+ }
+
+ if (n_left >= 16)
+ {
+ v1 = u16x16_load_unaligned (data);
+#ifdef CLIB_ARCH_IS_LITTLE_ENDIAN
+ v1 = u16x16_byte_swap (v1);
+#endif
+ v1 = u16x16_byte_swap (u16x16_load_unaligned (data));
+ sum8 += u16x8_extend_to_u32x8 (u16x16_extract_lo (v1));
+ sum8 += u16x8_extend_to_u32x8 (u16x16_extract_hi (v1));
+ n_left -= 16;
+ data += 32;
+ }
+
+ if (n_left)
+ {
+ v1 = u16x16_load_unaligned (data);
+#ifdef CLIB_ARCH_IS_LITTLE_ENDIAN
+ v1 = u16x16_byte_swap (v1);
+#endif
+ v1 = u16x16_mask_last (v1, 16 - n_left);
+ sum8 += u16x8_extend_to_u32x8 (u16x16_extract_lo (v1));
+ sum8 += u16x8_extend_to_u32x8 (u16x16_extract_hi (v1));
+ }
+
+ sum8 = u32x8_hadd (sum8, zero);
+ sum4 = u32x8_extract_lo (sum8) + u32x8_extract_hi (sum8);
+ sum = sum4[0] + sum4[1];
+
+#else
+ /* scalar version */
+ while (n_left >= 8)
+ {
+ sum += clib_net_to_host_u16 (*((u16 *) data + 0));
+ sum += clib_net_to_host_u16 (*((u16 *) data + 1));
+ sum += clib_net_to_host_u16 (*((u16 *) data + 2));
+ sum += clib_net_to_host_u16 (*((u16 *) data + 3));
+ sum += clib_net_to_host_u16 (*((u16 *) data + 4));
+ sum += clib_net_to_host_u16 (*((u16 *) data + 5));
+ sum += clib_net_to_host_u16 (*((u16 *) data + 6));
+ sum += clib_net_to_host_u16 (*((u16 *) data + 7));
+ n_left -= 8;
+ data += 16;
+ }
+ while (n_left)
+ {
+ sum += clib_net_to_host_u16 (*(u16 *) data);
+ n_left -= 1;
+ data += 2;
+ }
+#endif
+
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = (sum & 0xffff) + (sum >> 16);
+ return ~((u16) sum);
+}
+
/* Incremental checksum update. */
typedef uword ip_csum_t;
/* Fold in carry from high bit. */
d -= d > c;
- ASSERT (ip_csum_with_carry (d, x) == c);
+ ip_csum_t t = ip_csum_with_carry (d, x);
+ ASSERT ((t - c == 0) || (t - c == ~0));
return d;
}
return c;
}
-/* Copy data and checksum at the same time. */
-ip_csum_t ip_csum_and_memcpy (ip_csum_t sum, void *dst, void *src,
- uword n_bytes);
+extern ip_csum_t (*vnet_incremental_checksum_fp) (ip_csum_t, void *, uword);
+
+always_inline ip_csum_t
+ip_incremental_checksum (ip_csum_t sum, void *_data, uword n_bytes)
+{
+ return (*vnet_incremental_checksum_fp) (sum, _data, n_bytes);
+}
always_inline u16
ip_csum_and_memcpy_fold (ip_csum_t sum, void *dst)