#include <vnet/ethernet/ethernet.h>
#include <vnet/feature/feature.h>
#include <vnet/gso/gso.h>
+#include <vnet/gso/hdr_offset_parser.h>
#include <vnet/ip/icmp46_packet.h>
#include <vnet/ip/ip4.h>
#include <vnet/ip/ip6.h>
u32 flags;
u16 gso_size;
u8 gso_l4_hdr_sz;
+ generic_header_offset_t gho;
} gso_trace_t;
static u8 *
if (t->flags & VNET_BUFFER_F_GSO)
{
- s = format (s, "gso_sz %d gso_l4_hdr_sz %d",
- t->gso_size, t->gso_l4_hdr_sz);
+ s = format (s, "gso_sz %d gso_l4_hdr_sz %d %U",
+ t->gso_size, t->gso_l4_hdr_sz, format_generic_header_offset,
+ &t->gho);
}
else
{
- s = format (s, "non-gso buffer");
+ s =
+ format (s, "non-gso buffer %U", format_generic_header_offset,
+ &t->gho);
}
return s;
}
+static_always_inline u16
+tso_segment_ipip_tunnel_fixup (vlib_main_t * vm,
+ vnet_interface_per_thread_data_t * ptd,
+ vlib_buffer_t * sb0,
+ generic_header_offset_t * gho)
+{
+ u16 n_tx_bufs = vec_len (ptd->split_buffers);
+ u16 i = 0, n_tx_bytes = 0;
+
+ while (i < n_tx_bufs)
+ {
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
+ vnet_get_outer_header (b0, gho);
+ clib_memcpy_fast (vlib_buffer_get_current (b0),
+ vlib_buffer_get_current (sb0), gho->outer_hdr_sz);
+
+ ip4_header_t *ip4 =
+ (ip4_header_t *) (vlib_buffer_get_current (b0) +
+ gho->outer_l3_hdr_offset);
+ ip6_header_t *ip6 =
+ (ip6_header_t *) (vlib_buffer_get_current (b0) +
+ gho->outer_l3_hdr_offset);
+
+ if (gho->gho_flags & GHO_F_OUTER_IP4)
+ {
+ ip4->length =
+ clib_host_to_net_u16 (b0->current_length -
+ gho->outer_l3_hdr_offset);
+ ip4->checksum = ip4_header_checksum (ip4);
+ }
+ else if (gho->gho_flags & GHO_F_OUTER_IP6)
+ {
+ ip6->payload_length =
+ clib_host_to_net_u16 (b0->current_length -
+ gho->outer_l4_hdr_offset);
+ }
+
+ n_tx_bytes += gho->outer_hdr_sz;
+ i++;
+ }
+ return n_tx_bytes;
+
+}
+
+static_always_inline void
+tso_segment_vxlan_tunnel_headers_fixup (vlib_main_t * vm, vlib_buffer_t * b,
+ generic_header_offset_t * gho)
+{
+ u8 proto = 0;
+ ip4_header_t *ip4 = 0;
+ ip6_header_t *ip6 = 0;
+ udp_header_t *udp = 0;
+
+ ip4 =
+ (ip4_header_t *) (vlib_buffer_get_current (b) + gho->outer_l3_hdr_offset);
+ ip6 =
+ (ip6_header_t *) (vlib_buffer_get_current (b) + gho->outer_l3_hdr_offset);
+ udp =
+ (udp_header_t *) (vlib_buffer_get_current (b) + gho->outer_l4_hdr_offset);
+
+ if (gho->gho_flags & GHO_F_OUTER_IP4)
+ {
+ proto = ip4->protocol;
+ ip4->length =
+ clib_host_to_net_u16 (b->current_length - gho->outer_l3_hdr_offset);
+ ip4->checksum = ip4_header_checksum (ip4);
+ }
+ else if (gho->gho_flags & GHO_F_OUTER_IP6)
+ {
+ proto = ip6->protocol;
+ ip6->payload_length =
+ clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
+ }
+ if (proto == IP_PROTOCOL_UDP)
+ {
+ int bogus;
+ udp->length =
+ clib_host_to_net_u16 (b->current_length - gho->outer_l4_hdr_offset);
+ udp->checksum = 0;
+ if (gho->gho_flags & GHO_F_OUTER_IP6)
+ {
+ udp->checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, b, ip6, &bogus);
+ }
+ else if (gho->gho_flags & GHO_F_OUTER_IP4)
+ {
+ udp->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
+ }
+ b->flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
+ }
+}
+
+static_always_inline u16
+tso_segment_vxlan_tunnel_fixup (vlib_main_t * vm,
+ vnet_interface_per_thread_data_t * ptd,
+ vlib_buffer_t * sb0,
+ generic_header_offset_t * gho)
+{
+ u16 n_tx_bufs = vec_len (ptd->split_buffers);
+ u16 i = 0, n_tx_bytes = 0;
+
+ while (i < n_tx_bufs)
+ {
+ vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
+ vnet_get_outer_header (b0, gho);
+ clib_memcpy_fast (vlib_buffer_get_current (b0),
+ vlib_buffer_get_current (sb0), gho->outer_hdr_sz);
+
+ tso_segment_vxlan_tunnel_headers_fixup (vm, b0, gho);
+ n_tx_bytes += gho->outer_hdr_sz;
+ i++;
+ }
+ return n_tx_bytes;
+}
+
static_always_inline u16
tso_alloc_tx_bufs (vlib_main_t * vm,
vnet_interface_per_thread_data_t * ptd,
vlib_buffer_t * b0, u32 n_bytes_b0, u16 l234_sz,
- u16 gso_size, gso_header_offset_t * gho)
+ u16 gso_size, u16 first_data_size,
+ generic_header_offset_t * gho)
{
- u16 size =
+ u16 n_alloc, size;
+ u16 first_packet_length = l234_sz + first_data_size;
+
+ /*
+ * size is the amount of data per segmented buffer except the 1st
+ * segmented buffer.
+ * l2_hdr_offset is an offset == current_data of vlib_buffer_t.
+ * l234_sz is hdr_sz from l2_hdr_offset.
+ */
+ size =
clib_min (gso_size, vlib_buffer_get_default_data_size (vm) - l234_sz
- gho->l2_hdr_offset);
- /* rounded-up division */
- u16 n_bufs = (n_bytes_b0 - l234_sz + (size - 1)) / size;
- u16 n_alloc;
+ /*
+ * First segmented buffer length is calculated separately.
+ * As it may contain less data than gso_size (when gso_size is
+ * greater than current_length of 1st buffer from GSO chained
+ * buffers) and/or size calculated above.
+ */
+ u16 n_bufs = 1;
+
+ /*
+ * Total packet length minus first packet length including l234 header.
+ * rounded-up division
+ */
+ ASSERT (n_bytes_b0 > first_packet_length);
+ n_bufs += ((n_bytes_b0 - first_packet_length + (size - 1)) / size);
- ASSERT (n_bufs > 0);
vec_validate (ptd->split_buffers, n_bufs - 1);
n_alloc = vlib_buffer_alloc (vm, ptd->split_buffers, n_bufs);
tso_init_buf_from_template_base (vlib_buffer_t * nb0, vlib_buffer_t * b0,
u32 flags, u16 length)
{
+ /* copying objects from cacheline 0 */
nb0->current_data = b0->current_data;
- nb0->total_length_not_including_first_buffer = 0;
+ nb0->current_length = length;
nb0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID | flags;
- nb0->trace_handle = b0->trace_handle;
+ nb0->flow_id = b0->flow_id;
+ nb0->error = b0->error;
+ nb0->current_config_index = b0->current_config_index;
clib_memcpy_fast (&nb0->opaque, &b0->opaque, sizeof (nb0->opaque));
+
+ /* copying objects from cacheline 1 */
+ nb0->trace_handle = b0->trace_handle;
+ nb0->total_length_not_including_first_buffer = 0;
+
+ /* copying data */
clib_memcpy_fast (vlib_buffer_get_current (nb0),
vlib_buffer_get_current (b0), length);
- nb0->current_length = length;
}
static_always_inline void
vlib_buffer_t * b0, u16 template_data_sz,
u16 gso_size, u8 ** p_dst_ptr, u16 * p_dst_left,
u32 next_tcp_seq, u32 flags,
- gso_header_offset_t * gho)
+ generic_header_offset_t * gho)
{
tso_init_buf_from_template_base (nb0, b0, flags, template_data_sz);
}
static_always_inline void
-tso_fixup_segmented_buf (vlib_buffer_t * b0, u8 tcp_flags, int is_ip6,
- gso_header_offset_t * gho)
+tso_fixup_segmented_buf (vlib_main_t * vm, vlib_buffer_t * b0, u8 tcp_flags,
+ int is_ip6, generic_header_offset_t * gho)
{
ip4_header_t *ip4 =
(ip4_header_t *) (vlib_buffer_get_current (b0) + gho->l3_hdr_offset);
tcp->flags = tcp_flags;
if (is_ip6)
- ip6->payload_length =
- clib_host_to_net_u16 (b0->current_length -
- (gho->l4_hdr_offset - gho->l2_hdr_offset));
+ {
+ ip6->payload_length =
+ clib_host_to_net_u16 (b0->current_length - gho->l4_hdr_offset);
+ if (gho->gho_flags & GHO_F_TCP)
+ {
+ int bogus = 0;
+ tcp->checksum = 0;
+ tcp->checksum =
+ ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6, &bogus);
+ b0->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
+ }
+ }
else
- ip4->length =
- clib_host_to_net_u16 (b0->current_length -
- (gho->l3_hdr_offset - gho->l2_hdr_offset));
+ {
+ ip4->length =
+ clib_host_to_net_u16 (b0->current_length - gho->l3_hdr_offset);
+ if (gho->gho_flags & GHO_F_IP4)
+ ip4->checksum = ip4_header_checksum (ip4);
+ if (gho->gho_flags & GHO_F_TCP)
+ {
+ tcp->checksum = 0;
+ tcp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip4);
+ }
+ b0->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
+ b0->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
+ }
+
+ if ((gho->gho_flags & GHO_F_TUNNEL) == 0)
+ {
+ u32 adj_index0 = vnet_buffer (b0)->ip.adj_index[VLIB_TX];
+
+ ip_adjacency_t *adj0 = adj_get (adj_index0);
+
+ if (adj0->lookup_next_index == IP_LOOKUP_NEXT_MIDCHAIN &&
+ adj0->sub_type.midchain.fixup_func)
+ /* calls e.g. ipip44_fixup */
+ adj0->sub_type.midchain.fixup_func
+ (vm, adj0, b0, adj0->sub_type.midchain.fixup_data);
+ }
}
/**
static_always_inline u32
tso_segment_buffer (vlib_main_t * vm, vnet_interface_per_thread_data_t * ptd,
- u32 sbi0, vlib_buffer_t * sb0, gso_header_offset_t * gho,
- u32 n_bytes_b0, int is_ip6)
+ u32 sbi0, vlib_buffer_t * sb0,
+ generic_header_offset_t * gho, u32 n_bytes_b0, int is_ip6)
{
u32 n_tx_bytes = 0;
u16 gso_size = vnet_buffer2 (sb0)->gso_size;
- int l4_hdr_sz = gho->l4_hdr_sz;
u8 save_tcp_flags = 0;
u8 tcp_flags_no_fin_psh = 0;
u32 next_tcp_seq = 0;
u32 default_bflags =
sb0->flags & ~(VNET_BUFFER_F_GSO | VLIB_BUFFER_NEXT_PRESENT);
- u16 l234_sz = gho->l4_hdr_offset + l4_hdr_sz - gho->l2_hdr_offset;
+ u16 l234_sz = gho->hdr_sz;
int first_data_size = clib_min (gso_size, sb0->current_length - l234_sz);
next_tcp_seq += first_data_size;
if (PREDICT_FALSE
- (!tso_alloc_tx_bufs (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size, gho)))
+ (!tso_alloc_tx_bufs
+ (vm, ptd, sb0, n_bytes_b0, l234_sz, gso_size, first_data_size, gho)))
return 0;
vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[0]);
src_ptr = vlib_buffer_get_current (sb0) + l234_sz + first_data_size;
src_left = sb0->current_length - l234_sz - first_data_size;
- tso_fixup_segmented_buf (b0, tcp_flags_no_fin_psh, is_ip6, gho);
+ tso_fixup_segmented_buf (vm, b0, tcp_flags_no_fin_psh, is_ip6, gho);
/* grab a second buffer and prepare the loop */
ASSERT (dbi < vec_len (ptd->split_buffers));
if (0 == dst_left && total_src_left)
{
n_tx_bytes += cdb0->current_length;
- tso_fixup_segmented_buf (cdb0, tcp_flags_no_fin_psh, is_ip6,
+ tso_fixup_segmented_buf (vm, cdb0, tcp_flags_no_fin_psh, is_ip6,
gho);
ASSERT (dbi < vec_len (ptd->split_buffers));
cdb0 = vlib_get_buffer (vm, ptd->split_buffers[dbi++]);
}
}
- tso_fixup_segmented_buf (cdb0, save_tcp_flags, is_ip6, gho);
+ tso_fixup_segmented_buf (vm, cdb0, save_tcp_flags, is_ip6, gho);
n_tx_bytes += cdb0->current_length;
}
vlib_frame_t * frame,
vnet_main_t * vnm,
vnet_hw_interface_t * hi,
- int is_ip6, int do_segmentation)
+ int is_l2, int is_ip4, int is_ip6, int do_segmentation)
{
u32 *to_next;
u32 next_index = node->cached_next_index;
t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
t0->gso_size = vnet_buffer2 (b[0])->gso_size;
t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
+ vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
+ is_ip4, is_ip6);
}
if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
{
t1->flags = b[1]->flags & VNET_BUFFER_F_GSO;
t1->gso_size = vnet_buffer2 (b[1])->gso_size;
t1->gso_l4_hdr_sz = vnet_buffer2 (b[1])->gso_l4_hdr_sz;
+ vnet_generic_header_offset_parser (b[1], &t1->gho, is_l2,
+ is_ip4, is_ip6);
}
if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
{
t2->flags = b[2]->flags & VNET_BUFFER_F_GSO;
t2->gso_size = vnet_buffer2 (b[2])->gso_size;
t2->gso_l4_hdr_sz = vnet_buffer2 (b[2])->gso_l4_hdr_sz;
+ vnet_generic_header_offset_parser (b[2], &t2->gho, is_l2,
+ is_ip4, is_ip6);
}
if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
{
t3->flags = b[3]->flags & VNET_BUFFER_F_GSO;
t3->gso_size = vnet_buffer2 (b[3])->gso_size;
t3->gso_l4_hdr_sz = vnet_buffer2 (b[3])->gso_l4_hdr_sz;
+ vnet_generic_header_offset_parser (b[3], &t3->gho, is_l2,
+ is_ip4, is_ip6);
}
from += 4;
t0->flags = b[0]->flags & VNET_BUFFER_F_GSO;
t0->gso_size = vnet_buffer2 (b[0])->gso_size;
t0->gso_l4_hdr_sz = vnet_buffer2 (b[0])->gso_l4_hdr_sz;
+ vnet_generic_header_offset_parser (b[0], &t0->gho, is_l2,
+ is_ip4, is_ip6);
}
if (do_segmentation0)
to_next -= 1;
n_left_to_next += 1;
/* undo the counting. */
- gso_header_offset_t gho;
+ generic_header_offset_t gho = { 0 };
u32 n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
u32 n_tx_bytes = 0;
- gho = vnet_gso_header_offset_parser (b[0], is_ip6);
+ vnet_generic_header_offset_parser (b[0], &gho, is_l2,
+ is_ip4, is_ip6);
+
+ if (PREDICT_FALSE (gho.gho_flags & GHO_F_TUNNEL))
+ {
+ if (PREDICT_FALSE
+ (gho.gho_flags & (GHO_F_GRE_TUNNEL |
+ GHO_F_GENEVE_TUNNEL)))
+ {
+ /* not supported yet */
+ drop_one_buffer_and_count (vm, vnm, node, from - 1,
+ hi->sw_if_index,
+ VNET_INTERFACE_OUTPUT_ERROR_UNHANDLED_GSO_TYPE);
+ b += 1;
+ continue;
+ }
+
+ vnet_get_inner_header (b[0], &gho);
+
+ n_bytes_b0 -= gho.outer_hdr_sz;
+ is_ip6 = (gho.gho_flags & GHO_F_IP6) != 0;
+ }
+
n_tx_bytes =
tso_segment_buffer (vm, ptd, bi0, b[0], &gho, n_bytes_b0,
is_ip6);
continue;
}
+
+ if (PREDICT_FALSE (gho.gho_flags & GHO_F_VXLAN_TUNNEL))
+ {
+ vnet_get_outer_header (b[0], &gho);
+ n_tx_bytes +=
+ tso_segment_vxlan_tunnel_fixup (vm, ptd, b[0], &gho);
+ }
+ else
+ if (PREDICT_FALSE
+ (gho.gho_flags & (GHO_F_IPIP_TUNNEL |
+ GHO_F_IPIP6_TUNNEL)))
+ {
+ vnet_get_outer_header (b[0], &gho);
+ n_tx_bytes +=
+ tso_segment_ipip_tunnel_fixup (vm, ptd, b[0], &gho);
+ }
+
u16 n_tx_bufs = vec_len (ptd->split_buffers);
u32 *from_seg = ptd->split_buffers;
{
u32 sbi0;
vlib_buffer_t *sb0;
- if (n_tx_bufs >= n_left_to_next)
- {
- while (n_left_to_next > 0)
- {
- sbi0 = to_next[0] = from_seg[0];
- sb0 = vlib_get_buffer (vm, sbi0);
- to_next += 1;
- from_seg += 1;
- n_left_to_next -= 1;
- n_tx_bufs -= 1;
- vnet_feature_next (&next0, sb0);
- vlib_validate_buffer_enqueue_x1 (vm, node,
- next_index,
- to_next,
- n_left_to_next,
- sbi0, next0);
- }
- vlib_put_next_frame (vm, node, next_index,
- n_left_to_next);
- vlib_get_new_next_frame (vm, node, next_index,
- to_next, n_left_to_next);
- }
- while (n_tx_bufs > 0)
+ while (n_tx_bufs > 0 && n_left_to_next > 0)
{
sbi0 = to_next[0] = from_seg[0];
sb0 = vlib_get_buffer (vm, sbi0);
+ ASSERT (sb0->current_length > 0);
to_next += 1;
from_seg += 1;
n_left_to_next -= 1;
n_tx_bufs -= 1;
+ next0 = 0;
vnet_feature_next (&next0, sb0);
vlib_validate_buffer_enqueue_x1 (vm, node,
next_index,
n_left_to_next,
sbi0, next0);
}
+ vlib_put_next_frame (vm, node, next_index,
+ n_left_to_next);
+ if (n_tx_bufs > 0)
+ vlib_get_next_frame (vm, node, next_index,
+ to_next, n_left_to_next);
}
/* The buffers were enqueued. Reset the length */
_vec_len (ptd->split_buffers) = 0;
static_always_inline uword
vnet_gso_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame, int is_ip6)
+ vlib_node_runtime_t * node, vlib_frame_t * frame, int is_l2,
+ int is_ip4, int is_ip6)
{
vnet_main_t *vnm = vnet_get_main ();
vnet_hw_interface_t *hi;
if (hi->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
return vnet_gso_node_inline (vm, node, frame, vnm, hi,
- is_ip6, /* do_segmentation */ 0);
+ is_l2, is_ip4, is_ip6,
+ /* do_segmentation */ 0);
else
return vnet_gso_node_inline (vm, node, frame, vnm, hi,
- is_ip6, /* do_segmentation */ 1);
+ is_l2, is_ip4, is_ip6,
+ /* do_segmentation */ 1);
}
return 0;
}
VLIB_NODE_FN (gso_l2_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
- return vnet_gso_inline (vm, node, frame, 0 /* ip6 */ );
+ return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 1 /* ip4 */ ,
+ 0 /* ip6 */ );
}
VLIB_NODE_FN (gso_l2_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
- return vnet_gso_inline (vm, node, frame, 1 /* ip6 */ );
+ return vnet_gso_inline (vm, node, frame, 1 /* l2 */ , 0 /* ip4 */ ,
+ 1 /* ip6 */ );
}
VLIB_NODE_FN (gso_ip4_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
- return vnet_gso_inline (vm, node, frame, 0 /* ip6 */ );
+ return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 1 /* ip4 */ ,
+ 0 /* ip6 */ );
}
VLIB_NODE_FN (gso_ip6_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame)
{
- return vnet_gso_inline (vm, node, frame, 1 /* ip6 */ );
+ return vnet_gso_inline (vm, node, frame, 0 /* l2 */ , 0 /* ip4 */ ,
+ 1 /* ip6 */ );
}
/* *INDENT-OFF* */
VNET_FEATURE_INIT (gso_ip4_node, static) = {
.arc_name = "ip4-output",
.node_name = "gso-ip4",
- .runs_after = VNET_FEATURES ("ipsec4-output-feature"),
- .runs_before = VNET_FEATURES ("interface-output"),
+ .runs_before = VNET_FEATURES ("esp4-encrypt-tun","ipsec4-output-feature"),
};
VNET_FEATURE_INIT (gso_ip6_node, static) = {
.arc_name = "ip6-output",
.node_name = "gso-ip6",
- .runs_after = VNET_FEATURES ("ipsec6-output-feature"),
- .runs_before = VNET_FEATURES ("interface-output"),
+ .runs_before = VNET_FEATURES ("esp6-encrypt-tun","ipsec6-output-feature"),
};
/*