{ RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM, VNET_HW_IF_CAP_TX_IP4_OUTER_CKSUM },
{ RTE_ETH_TX_OFFLOAD_OUTER_UDP_CKSUM, VNET_HW_IF_CAP_TX_UDP_OUTER_CKSUM },
{ RTE_ETH_TX_OFFLOAD_TCP_TSO, VNET_HW_IF_CAP_TCP_GSO },
- { RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, VNET_HW_IF_CAP_VXLAN_TNL_GSO }
+ { RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO, VNET_HW_IF_CAP_VXLAN_TNL_GSO },
+ { RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO, VNET_HW_IF_CAP_IPIP_TNL_GSO }
};
void
/* per-device offload config */
if (xd->conf.enable_tso)
txo |= RTE_ETH_TX_OFFLOAD_TCP_CKSUM | RTE_ETH_TX_OFFLOAD_TCP_TSO |
- RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO;
+ RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO | RTE_ETH_TX_OFFLOAD_IPIP_TNL_TSO;
if (xd->conf.disable_rx_scatter)
rxo &= ~RTE_ETH_RX_OFFLOAD_SCATTER;
int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
u32 tso = b->flags & VNET_BUFFER_F_GSO, max_pkt_len;
u32 ip_cksum, tcp_cksum, udp_cksum, outer_hdr_len = 0;
- u32 outer_ip_cksum, vxlan_tunnel;
+ u32 outer_ip_cksum, vxlan_tunnel, ipip_tunnel;
u64 ol_flags;
vnet_buffer_oflags_t oflags = 0;
udp_cksum = oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
outer_ip_cksum = oflags & VNET_BUFFER_OFFLOAD_F_OUTER_IP_CKSUM;
vxlan_tunnel = oflags & VNET_BUFFER_OFFLOAD_F_TNL_VXLAN;
+ ipip_tunnel = oflags & VNET_BUFFER_OFFLOAD_F_TNL_IPIP;
ol_flags = is_ip4 ? RTE_MBUF_F_TX_IPV4 : RTE_MBUF_F_TX_IPV6;
ol_flags |= ip_cksum ? RTE_MBUF_F_TX_IP_CKSUM : 0;
vnet_buffer2 (b)->outer_l3_hdr_offset;
outer_hdr_len = mb->outer_l2_len + mb->outer_l3_len;
}
+ else if (ipip_tunnel)
+ {
+ ol_flags |= outer_ip_cksum ?
+ RTE_MBUF_F_TX_OUTER_IPV4 | RTE_MBUF_F_TX_OUTER_IP_CKSUM :
+ RTE_MBUF_F_TX_OUTER_IPV6;
+ ol_flags |= RTE_MBUF_F_TX_TUNNEL_IPIP;
+ mb->l2_len = 0;
+ mb->l3_len =
+ vnet_buffer (b)->l4_hdr_offset - vnet_buffer (b)->l3_hdr_offset;
+ mb->outer_l2_len =
+ vnet_buffer2 (b)->outer_l3_hdr_offset - b->current_data;
+ mb->outer_l3_len =
+ vnet_buffer (b)->l3_hdr_offset - vnet_buffer2 (b)->outer_l3_hdr_offset;
+ outer_hdr_len = mb->outer_l2_len + mb->outer_l3_len;
+ }
else
{
mb->l2_len = vnet_buffer (b)->l3_hdr_offset - b->current_data;
}
static_always_inline uword
-vnet_gso_node_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * frame,
- vnet_main_t * vnm,
- vnet_hw_interface_t * hi,
- int is_l2, int is_ip4, int is_ip6, int do_segmentation)
+vnet_gso_node_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, vnet_main_t *vnm,
+ vnet_hw_interface_t *hi, u32 supported_caps, int is_l2,
+ int is_ip4, int is_ip6, int do_segmentation)
{
u32 *to_next;
u32 next_index = node->cached_next_index;
if (PREDICT_FALSE (hi->sw_if_index != swif0))
{
hi0 = vnet_get_sup_hw_interface (vnm, swif0);
- if ((hi0->caps & VNET_HW_IF_CAP_TCP_GSO) == 0 &&
- (b[0]->flags & VNET_BUFFER_F_GSO))
+ if ((hi0->caps & supported_caps) != supported_caps)
break;
}
if (PREDICT_FALSE (hi->sw_if_index != swif1))
{
hi1 = vnet_get_sup_hw_interface (vnm, swif1);
- if (!(hi1->caps & VNET_HW_IF_CAP_TCP_GSO) &&
- (b[1]->flags & VNET_BUFFER_F_GSO))
+ if ((hi1->caps & supported_caps) != supported_caps)
break;
}
if (PREDICT_FALSE (hi->sw_if_index != swif2))
{
hi2 = vnet_get_sup_hw_interface (vnm, swif2);
- if ((hi2->caps & VNET_HW_IF_CAP_TCP_GSO) == 0 &&
- (b[2]->flags & VNET_BUFFER_F_GSO))
+ if ((hi2->caps & supported_caps) != supported_caps)
break;
}
if (PREDICT_FALSE (hi->sw_if_index != swif3))
{
hi3 = vnet_get_sup_hw_interface (vnm, swif3);
- if (!(hi3->caps & VNET_HW_IF_CAP_TCP_GSO) &&
- (b[3]->flags & VNET_BUFFER_F_GSO))
+ if ((hi3->caps & supported_caps) != supported_caps)
break;
}
vnet_hw_interface_t *hi0;
u32 next0 = 0;
u32 do_segmentation0 = 0;
+ u32 caps = hi->caps;
swif0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
if (PREDICT_FALSE (hi->sw_if_index != swif0))
{
hi0 = vnet_get_sup_hw_interface (vnm, swif0);
- if ((hi0->caps & VNET_HW_IF_CAP_TCP_GSO) == 0 &&
- (b[0]->flags & VNET_BUFFER_F_GSO))
+ caps = hi0->caps;
+ if ((hi0->caps & supported_caps) != supported_caps)
do_segmentation0 = 1;
}
else
do_segmentation0 = do_segmentation;
+ if (do_segmentation0)
+ {
+ u8 oflags = vnet_buffer (b[0])->oflags;
+ if ((caps & VNET_HW_IF_CAP_TCP_GSO) == VNET_HW_IF_CAP_TCP_GSO)
+ {
+ if (((oflags & VNET_BUFFER_OFFLOAD_F_TNL_VXLAN) !=
+ VNET_BUFFER_OFFLOAD_F_TNL_VXLAN) &&
+ ((oflags & VNET_BUFFER_OFFLOAD_F_TNL_IPIP) !=
+ VNET_BUFFER_OFFLOAD_F_TNL_IPIP))
+ do_segmentation0 = 0;
+ }
+ }
/* speculatively enqueue b0 to the current next frame */
to_next[0] = bi0 = from[0];
to_next += 1;
{
u32 *from = vlib_frame_vector_args (frame);
vlib_buffer_t *b = vlib_get_buffer (vm, from[0]);
+ u32 supported_caps =
+ (VNET_HW_IF_CAP_TCP_GSO | VNET_HW_IF_CAP_VXLAN_TNL_GSO |
+ VNET_HW_IF_CAP_IPIP_TNL_GSO);
+
hi = vnet_get_sup_hw_interface (vnm,
vnet_buffer (b)->sw_if_index[VLIB_TX]);
- if (hi->caps & (VNET_HW_IF_CAP_TCP_GSO | VNET_HW_IF_CAP_VXLAN_TNL_GSO))
- return vnet_gso_node_inline (vm, node, frame, vnm, hi,
+ if ((hi->caps & supported_caps) == supported_caps)
+ return vnet_gso_node_inline (vm, node, frame, vnm, hi, supported_caps,
is_l2, is_ip4, is_ip6,
/* do_segmentation */ 0);
else
- return vnet_gso_node_inline (vm, node, frame, vnm, hi,
+ return vnet_gso_node_inline (vm, node, frame, vnm, hi, supported_caps,
is_l2, is_ip4, is_ip6,
/* do_segmentation */ 1);
}