#include <vnet/ip/ip.h>
#include <vnet/ethernet/ethernet.h>
#include <vnet/vxlan/vxlan.h>
+#include <vnet/qos/qos_types.h>
+#include <vnet/adj/rewrite.h>
/* Statistics (not all errors) */
#define foreach_vxlan_encap_error \
im->combined_sw_if_counters + VNET_INTERFACE_COUNTER_TX;
u32 pkts_encapsulated = 0;
u32 thread_index = vlib_get_thread_index();
- u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
u32 sw_if_index0 = 0, sw_if_index1 = 0;
u32 next0 = 0, next1 = 0;
vxlan_tunnel_t * t0 = NULL, * t1 = NULL;
n_left_from = from_frame->n_vectors;
next_index = node->cached_next_index;
- stats_sw_if_index = node->runtime_data[0];
- stats_n_packets = stats_n_bytes = 0;
STATIC_ASSERT_SIZEOF(ip6_vxlan_header_t, 56);
STATIC_ASSERT_SIZEOF(ip4_vxlan_header_t, 36);
- word const underlay_hdr_len = is_ip4 ?
+ u8 const underlay_hdr_len = is_ip4 ?
sizeof(ip4_vxlan_header_t) : sizeof(ip6_vxlan_header_t);
+ u8 const rw_hdr_offset = sizeof t0->rewrite_data - underlay_hdr_len;
u16 const l3_len = is_ip4 ? sizeof(ip4_header_t) : sizeof(ip6_header_t);
u32 const csum_flags = is_ip4 ?
VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4 |
vlib_prefetch_buffer_header (p2, LOAD);
vlib_prefetch_buffer_header (p3, LOAD);
- CLIB_PREFETCH (p2->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (p3->data, 2*CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p2->data - CLIB_CACHE_LINE_BYTES, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (p3->data - CLIB_CACHE_LINE_BYTES, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
}
- u32 bi0 = from[0];
- u32 bi1 = from[1];
+ u32 bi0 = to_next[0] = from[0];
+ u32 bi1 = to_next[1] = from[1];
+ from += 2;
+ to_next += 2;
+ n_left_to_next -= 2;
+ n_left_from -= 2;
vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0);
vlib_buffer_t * b1 = vlib_get_buffer (vm, bi1);
u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
- to_next[0] = bi0;
- to_next[1] = bi1;
- from += 2;
- to_next += 2;
- n_left_to_next -= 2;
- n_left_from -= 2;
-
/* Get next node index and adj index from tunnel next_dpo */
if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
{
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
vnet_buffer(b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
- ASSERT(vec_len(t0->rewrite) == underlay_hdr_len);
- ASSERT(vec_len(t1->rewrite) == underlay_hdr_len);
+ ASSERT(t0->rewrite_header.data_bytes == underlay_hdr_len);
+ ASSERT(t1->rewrite_header.data_bytes == underlay_hdr_len);
vlib_buffer_advance (b0, -underlay_hdr_len);
vlib_buffer_advance (b1, -underlay_hdr_len);
u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
+ void * underlay0 = vlib_buffer_get_current(b0);
+ void * underlay1 = vlib_buffer_get_current(b1);
+
+ /* vnet_rewrite_two_header writes only in (uword) 8 bytes chunks
+ * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
+ * use memcpy as a workaround */
+ clib_memcpy_fast(underlay0, t0->rewrite_header.data + rw_hdr_offset, underlay_hdr_len);
+ clib_memcpy_fast(underlay1, t1->rewrite_header.data + rw_hdr_offset, underlay_hdr_len);
+
ip4_header_t * ip4_0, * ip4_1;
+ qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
ip6_header_t * ip6_0, * ip6_1;
udp_header_t * udp0, * udp1;
u8 * l3_0, * l3_1;
if (is_ip4)
{
- ip4_vxlan_header_t * hdr0 = vlib_buffer_get_current(b0);
- ip4_vxlan_header_t * rewrite0 = (void *)t0->rewrite;
- ip4_vxlan_header_t * hdr1 = vlib_buffer_get_current(b1);
- ip4_vxlan_header_t * rewrite1 = (void *)t1->rewrite;
- *hdr0 = *rewrite0;
- *hdr1 = *rewrite1;
+ ip4_vxlan_header_t * hdr0 = underlay0;
+ ip4_vxlan_header_t * hdr1 = underlay1;
/* Fix the IP4 checksum and length */
ip4_0 = &hdr0->ip4;
ip4_0->length = clib_host_to_net_u16 (len0);
ip4_1->length = clib_host_to_net_u16 (len1);
+ if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
+ {
+ ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
+ ip4_0->tos = ip4_0_tos;
+ }
+ if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
+ {
+ ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
+ ip4_1->tos = ip4_1_tos;
+ }
+
l3_0 = (u8 *)ip4_0;
l3_1 = (u8 *)ip4_1;
udp0 = &hdr0->udp;
}
else /* ipv6 */
{
- ip6_vxlan_header_t * hdr0 = vlib_buffer_get_current(b0);
- ip6_vxlan_header_t * rewrite0 = (void *) t0->rewrite;
- ip6_vxlan_header_t * hdr1 = vlib_buffer_get_current(b0);
- ip6_vxlan_header_t * rewrite1 = (void *) t1->rewrite;
- *hdr0 = *rewrite0;
- *hdr1 = *rewrite1;
+ ip6_vxlan_header_t * hdr0 = underlay0;
+ ip6_vxlan_header_t * hdr1 = underlay1;
/* Fix IP6 payload length */
ip6_0 = &hdr0->ip6;
ip_csum_t sum0 = ip4_0->checksum;
sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
length /* changed member */);
+ if (PREDICT_FALSE (ip4_0_tos))
+ {
+ sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
+ tos /* changed member */);
+ }
ip4_0->checksum = ip_csum_fold (sum0);
ip_csum_t sum1 = ip4_1->checksum;
sum1 = ip_csum_update (sum1, 0, ip4_1->length, ip4_header_t,
length /* changed member */);
+ if (PREDICT_FALSE (ip4_1_tos))
+ {
+ sum1 = ip_csum_update (sum1, 0, ip4_1_tos, ip4_header_t,
+ tos /* changed member */);
+ }
ip4_1->checksum = ip_csum_fold (sum1);
}
/* IPv6 UDP checksum is mandatory */
udp1->checksum = 0xffff;
}
- /* Batch stats increment on the same vxlan tunnel so counter is not
- incremented per packet. Note stats are still incremented for deleted
- and admin-down tunnel where packets are dropped. It is not worthwhile
- to check for this rare case and affect normal path performance. */
- if (sw_if_index0 == sw_if_index1)
- {
- if (PREDICT_FALSE(sw_if_index0 != stats_sw_if_index))
- {
- if (stats_n_packets)
- {
- vlib_increment_combined_counter (tx_counter, thread_index,
- stats_sw_if_index, stats_n_packets, stats_n_bytes);
- stats_n_packets = stats_n_bytes = 0;
- }
- stats_sw_if_index = sw_if_index0;
- }
- stats_n_packets += 2;
- stats_n_bytes += len0 + len1;
- }
- else
- {
- vlib_increment_combined_counter (tx_counter, thread_index,
- sw_if_index0, 1, len0);
- vlib_increment_combined_counter (tx_counter, thread_index,
- sw_if_index1, 1, len1);
- }
+ if (sw_if_index0 == sw_if_index1)
+ {
+ vlib_increment_combined_counter (tx_counter, thread_index,
+ sw_if_index0, 2, len0 + len1);
+ }
+ else
+ {
+ vlib_increment_combined_counter (tx_counter, thread_index,
+ sw_if_index0, 1, len0);
+ vlib_increment_combined_counter (tx_counter, thread_index,
+ sw_if_index1, 1, len1);
+ }
pkts_encapsulated += 2;
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
while (n_left_from > 0 && n_left_to_next > 0)
{
- u32 bi0 = from[0];
- vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0);
- u32 flow_hash0 = vnet_l2_compute_flow_hash(b0);
-
- to_next[0] = bi0;
+ u32 bi0 = to_next[0] = from[0];
from += 1;
to_next += 1;
n_left_from -= 1;
n_left_to_next -= 1;
+ vlib_buffer_t * b0 = vlib_get_buffer (vm, bi0);
+ u32 flow_hash0 = vnet_l2_compute_flow_hash(b0);
+
/* Get next node index and adj index from tunnel next_dpo */
if (sw_if_index0 != vnet_buffer(b0)->sw_if_index[VLIB_TX])
{
}
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
- ASSERT(vec_len(t0->rewrite) == underlay_hdr_len);
+ ASSERT(t0->rewrite_header.data_bytes == underlay_hdr_len);
+
vlib_buffer_advance (b0, -underlay_hdr_len);
+ void * underlay0 = vlib_buffer_get_current(b0);
+
+ /* vnet_rewrite_one_header writes only in (uword) 8 bytes chunks
+ * and discards the first 4 bytes of the (36 bytes ip4 underlay) rewrite
+ * use memcpy as a workaround */
+ clib_memcpy_fast(underlay0, t0->rewrite_header.data + rw_hdr_offset, underlay_hdr_len);
u32 len0 = vlib_buffer_length_in_chain (vm, b0);
u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
udp_header_t * udp0;
ip4_header_t * ip4_0;
+ qos_bits_t ip4_0_tos = 0;
ip6_header_t * ip6_0;
u8 * l3_0;
if (is_ip4)
{
- ip4_vxlan_header_t * rewrite = (void *)t0->rewrite;
- ip4_vxlan_header_t * hdr = vlib_buffer_get_current(b0);
- *hdr = *rewrite;
+ ip4_vxlan_header_t * hdr = underlay0;
/* Fix the IP4 checksum and length */
ip4_0 = &hdr->ip4;
ip4_0->length = clib_host_to_net_u16 (len0);
+ if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
+ {
+ ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
+ ip4_0->tos = ip4_0_tos;
+ }
+
l3_0 = (u8*)ip4_0;
udp0 = &hdr->udp;
}
else /* ip6 path */
{
- ip6_vxlan_header_t * hdr = vlib_buffer_get_current(b0);
- ip6_vxlan_header_t * rewrite = (void *) t0->rewrite;
- *hdr = *rewrite;
+ ip6_vxlan_header_t * hdr = underlay0;
/* Fix IP6 payload length */
ip6_0 = &hdr->ip6;
ip_csum_t sum0 = ip4_0->checksum;
sum0 = ip_csum_update (sum0, 0, ip4_0->length, ip4_header_t,
length /* changed member */);
+ if (PREDICT_FALSE (ip4_0_tos))
+ {
+ sum0 = ip_csum_update (sum0, 0, ip4_0_tos, ip4_header_t,
+ tos /* changed member */);
+ }
ip4_0->checksum = ip_csum_fold (sum0);
}
/* IPv6 UDP checksum is mandatory */
udp0->checksum = 0xffff;
}
- /* Batch stats increment on the same vxlan tunnel so counter is not
- incremented per packet. Note stats are still incremented for deleted
- and admin-down tunnel where packets are dropped. It is not worthwhile
- to check for this rare case and affect normal path performance. */
- if (PREDICT_FALSE (sw_if_index0 != stats_sw_if_index))
- {
- if (stats_n_packets)
- {
- vlib_increment_combined_counter (tx_counter, thread_index,
- stats_sw_if_index, stats_n_packets, stats_n_bytes);
- stats_n_bytes = stats_n_packets = 0;
- }
- stats_sw_if_index = sw_if_index0;
- }
- stats_n_packets += 1;
- stats_n_bytes += len0;
+ vlib_increment_combined_counter (tx_counter, thread_index,
+ sw_if_index0, 1, len0);
pkts_encapsulated ++;
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
VXLAN_ENCAP_ERROR_ENCAPSULATED,
pkts_encapsulated);
- /* Increment any remaining batch stats */
- if (stats_n_packets)
- {
- vlib_increment_combined_counter (tx_counter, thread_index,
- stats_sw_if_index, stats_n_packets, stats_n_bytes);
- node->runtime_data[0] = stats_sw_if_index;
- }
-
return from_frame->n_vectors;
}