#include <vnet/ip/ip6.h>
#include <vnet/udp/udp_packet.h>
+#define foreach_gso_error \
+ _ (NO_BUFFERS, "no buffers to segment GSO") \
+ _ (UNHANDLED_TYPE, "unhandled gso type")
+
+static char *gso_error_strings[] = {
+#define _(sym, string) string,
+ foreach_gso_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym, str) GSO_ERROR_##sym,
+ foreach_gso_error
+#undef _
+ GSO_N_ERROR,
+} gso_error_t;
+
+typedef enum
+{
+ GSO_NEXT_DROP,
+ GSO_N_NEXT,
+} gso_next_t;
+
typedef struct
{
u32 flags;
if (t->flags & VNET_BUFFER_F_GSO)
{
- s = format (s, "gso_sz %d gso_l4_hdr_sz %d %U",
+ s = format (s, "gso_sz %d gso_l4_hdr_sz %d\n%U",
t->gso_size, t->gso_l4_hdr_sz, format_generic_header_offset,
&t->gho);
}
else
{
s =
- format (s, "non-gso buffer %U", format_generic_header_offset,
+ format (s, "non-gso buffer\n%U", format_generic_header_offset,
&t->gho);
}
while (i < n_tx_bufs)
{
vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
- vnet_get_outer_header (b0, gho);
- clib_memcpy_fast (vlib_buffer_get_current (b0),
- vlib_buffer_get_current (sb0), gho->outer_hdr_sz);
ip4_header_t *ip4 =
(ip4_header_t *) (vlib_buffer_get_current (b0) +
i++;
}
return n_tx_bytes;
-
}
static_always_inline void
{
udp->checksum = ip4_tcp_udp_compute_checksum (vm, b, ip4);
}
- b->flags &= ~VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
+ /* FIXME: it should be OUTER_UDP_CKSUM */
+ vnet_buffer_offload_flags_clear (b, VNET_BUFFER_OFFLOAD_F_UDP_CKSUM);
}
}
while (i < n_tx_bufs)
{
vlib_buffer_t *b0 = vlib_get_buffer (vm, ptd->split_buffers[i]);
- vnet_get_outer_header (b0, gho);
- clib_memcpy_fast (vlib_buffer_get_current (b0),
- vlib_buffer_get_current (sb0), gho->outer_hdr_sz);
tso_segment_vxlan_tunnel_headers_fixup (vm, b0, gho);
n_tx_bytes += gho->outer_hdr_sz;
tcp->checksum = 0;
tcp->checksum =
ip6_tcp_udp_icmp_compute_checksum (vm, b0, ip6, &bogus);
- b0->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
+ vnet_buffer_offload_flags_clear (b0,
+ VNET_BUFFER_OFFLOAD_F_TCP_CKSUM);
}
}
else
tcp->checksum = 0;
tcp->checksum = ip4_tcp_udp_compute_checksum (vm, b0, ip4);
}
- b0->flags &= ~VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
- b0->flags &= ~VNET_BUFFER_F_OFFLOAD_IP_CKSUM;
+ vnet_buffer_offload_flags_clear (b0, (VNET_BUFFER_OFFLOAD_F_IP_CKSUM |
+ VNET_BUFFER_OFFLOAD_F_TCP_CKSUM));
}
if (!is_l2 && ((gho->gho_flags & GHO_F_TUNNEL) == 0))
return n_tx_bytes;
}
+__clib_unused u32
+gso_segment_buffer (vlib_main_t *vm, vnet_interface_per_thread_data_t *ptd,
+ u32 bi, vlib_buffer_t *b, generic_header_offset_t *gho,
+ u32 n_bytes_b, u8 is_l2, u8 is_ip6)
+{
+
+ return tso_segment_buffer (vm, ptd, bi, b, gho, n_bytes_b, is_l2, is_ip6);
+}
+
static_always_inline void
drop_one_buffer_and_count (vlib_main_t * vm, vnet_main_t * vnm,
vlib_node_runtime_t * node, u32 * pbi0,
vlib_error_drop_buffers (vm, node, pbi0,
/* buffer stride */ 1,
- /* n_buffers */ 1,
- VNET_INTERFACE_OUTPUT_NEXT_DROP,
- node->node_index, drop_error_code);
+ /* n_buffers */ 1, GSO_NEXT_DROP, node->node_index,
+ drop_error_code);
}
static_always_inline uword
if (PREDICT_FALSE (hi->sw_if_index != swif0))
{
hi0 = vnet_get_sup_hw_interface (vnm, swif0);
- if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
+ if ((hi0->caps & VNET_HW_IF_CAP_TCP_GSO) == 0 &&
(b[0]->flags & VNET_BUFFER_F_GSO))
break;
}
if (PREDICT_FALSE (hi->sw_if_index != swif1))
{
hi1 = vnet_get_sup_hw_interface (vnm, swif1);
- if (!(hi1->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) &&
+ if (!(hi1->caps & VNET_HW_IF_CAP_TCP_GSO) &&
(b[1]->flags & VNET_BUFFER_F_GSO))
break;
}
if (PREDICT_FALSE (hi->sw_if_index != swif2))
{
hi2 = vnet_get_sup_hw_interface (vnm, swif2);
- if ((hi2->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
+ if ((hi2->caps & VNET_HW_IF_CAP_TCP_GSO) == 0 &&
(b[2]->flags & VNET_BUFFER_F_GSO))
break;
}
if (PREDICT_FALSE (hi->sw_if_index != swif3))
{
hi3 = vnet_get_sup_hw_interface (vnm, swif3);
- if (!(hi3->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) &&
+ if (!(hi3->caps & VNET_HW_IF_CAP_TCP_GSO) &&
(b[3]->flags & VNET_BUFFER_F_GSO))
break;
}
if (PREDICT_FALSE (hi->sw_if_index != swif0))
{
hi0 = vnet_get_sup_hw_interface (vnm, swif0);
- if ((hi0->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO) == 0 &&
+ if ((hi0->caps & VNET_HW_IF_CAP_TCP_GSO) == 0 &&
(b[0]->flags & VNET_BUFFER_F_GSO))
do_segmentation0 = 1;
}
n_left_to_next += 1;
/* undo the counting. */
generic_header_offset_t gho = { 0 };
- u32 n_bytes_b0 = vlib_buffer_length_in_chain (vm, b[0]);
u32 n_tx_bytes = 0;
u32 inner_is_ip6 = is_ip6;
/* not supported yet */
drop_one_buffer_and_count (vm, vnm, node, from - 1,
hi->sw_if_index,
- VNET_INTERFACE_OUTPUT_ERROR_UNHANDLED_GSO_TYPE);
+ GSO_ERROR_UNHANDLED_TYPE);
b += 1;
continue;
}
- vnet_get_inner_header (b[0], &gho);
-
- n_bytes_b0 -= gho.outer_hdr_sz;
inner_is_ip6 = (gho.gho_flags & GHO_F_IP6) != 0;
}
- n_tx_bytes =
- tso_segment_buffer (vm, ptd, bi0, b[0], &gho, n_bytes_b0,
- is_l2, inner_is_ip6);
+ n_tx_bytes = gso_segment_buffer_inline (vm, ptd, b[0], &gho,
+ is_l2, inner_is_ip6);
if (PREDICT_FALSE (n_tx_bytes == 0))
{
drop_one_buffer_and_count (vm, vnm, node, from - 1,
hi->sw_if_index,
- VNET_INTERFACE_OUTPUT_ERROR_NO_BUFFERS_FOR_GSO);
+ GSO_ERROR_NO_BUFFERS);
b += 1;
continue;
}
if (PREDICT_FALSE (gho.gho_flags & GHO_F_VXLAN_TUNNEL))
{
- vnet_get_outer_header (b[0], &gho);
n_tx_bytes +=
tso_segment_vxlan_tunnel_fixup (vm, ptd, b[0], &gho);
}
(gho.gho_flags & (GHO_F_IPIP_TUNNEL |
GHO_F_IPIP6_TUNNEL)))
{
- vnet_get_outer_header (b[0], &gho);
n_tx_bytes +=
tso_segment_ipip_tunnel_fixup (vm, ptd, b[0], &gho);
}
to_next, n_left_to_next);
}
/* The buffers were enqueued. Reset the length */
- _vec_len (ptd->split_buffers) = 0;
+ vec_set_len (ptd->split_buffers, 0);
/* Free the now segmented buffer */
vlib_buffer_free_one (vm, bi0);
b += 1;
hi = vnet_get_sup_hw_interface (vnm,
vnet_buffer (b)->sw_if_index[VLIB_TX]);
- if (hi->flags & VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO)
+ if (hi->caps & (VNET_HW_IF_CAP_TCP_GSO | VNET_HW_IF_CAP_VXLAN_TNL_GSO))
return vnet_gso_node_inline (vm, node, frame, vnm, hi,
is_l2, is_ip4, is_ip6,
/* do_segmentation */ 0);
.vector_size = sizeof (u32),
.format_trace = format_gso_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = 0,
- .n_next_nodes = 0,
+ .n_errors = ARRAY_LEN(gso_error_strings),
+ .error_strings = gso_error_strings,
+ .n_next_nodes = GSO_N_NEXT,
+ .next_nodes = {
+ [GSO_NEXT_DROP] = "error-drop",
+ },
.name = "gso-l2-ip4",
};
.vector_size = sizeof (u32),
.format_trace = format_gso_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = 0,
- .n_next_nodes = 0,
+ .n_errors = ARRAY_LEN(gso_error_strings),
+ .error_strings = gso_error_strings,
+ .n_next_nodes = GSO_N_NEXT,
+ .next_nodes = {
+ [GSO_NEXT_DROP] = "error-drop",
+ },
.name = "gso-l2-ip6",
};
.vector_size = sizeof (u32),
.format_trace = format_gso_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = 0,
- .n_next_nodes = 0,
+ .n_errors = ARRAY_LEN(gso_error_strings),
+ .error_strings = gso_error_strings,
+ .n_next_nodes = GSO_N_NEXT,
+ .next_nodes = {
+ [GSO_NEXT_DROP] = "error-drop",
+ },
.name = "gso-ip4",
};
.vector_size = sizeof (u32),
.format_trace = format_gso_trace,
.type = VLIB_NODE_TYPE_INTERNAL,
- .n_errors = 0,
- .n_next_nodes = 0,
+ .n_errors = ARRAY_LEN(gso_error_strings),
+ .error_strings = gso_error_strings,
+ .n_next_nodes = GSO_N_NEXT,
+ .next_nodes = {
+ [GSO_NEXT_DROP] = "error-drop",
+ },
.name = "gso-ip6",
};