}
static_always_inline void
-fill_gso_buffer_flags (vlib_buffer_t * b0, struct virtio_net_hdr_v1 *hdr)
+virtio_needs_csum (vlib_buffer_t * b0, struct virtio_net_hdr_v1 *hdr,
+ u8 * l4_proto, u8 * l4_hdr_sz)
{
- u8 l4_proto = 0;
- u8 l4_hdr_sz = 0;
if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
{
ip4_header_t *ip4 =
(ip4_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4);
- l4_proto = ip4->protocol;
+ *l4_proto = ip4->protocol;
b0->flags |=
(VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_OFFLOAD_IP_CKSUM);
b0->flags |=
(ip6_header_t *) (vlib_buffer_get_current (b0) + l2hdr_sz);
vnet_buffer (b0)->l4_hdr_offset = l2hdr_sz + sizeof (ip6_header_t);
/* FIXME IPv6 EH traversal */
- l4_proto = ip6->protocol;
+ *l4_proto = ip6->protocol;
b0->flags |= (VNET_BUFFER_F_IS_IP6 |
VNET_BUFFER_F_L2_HDR_OFFSET_VALID
| VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
}
- if (l4_proto == IP_PROTOCOL_TCP)
+ if (*l4_proto == IP_PROTOCOL_TCP)
{
b0->flags |= VNET_BUFFER_F_OFFLOAD_TCP_CKSUM;
tcp_header_t *tcp = (tcp_header_t *) (vlib_buffer_get_current (b0) +
vnet_buffer
(b0)->l4_hdr_offset);
- l4_hdr_sz = tcp_header_bytes (tcp);
+ *l4_hdr_sz = tcp_header_bytes (tcp);
tcp->checksum = 0;
}
- else if (l4_proto == IP_PROTOCOL_UDP)
+ else if (*l4_proto == IP_PROTOCOL_UDP)
{
b0->flags |= VNET_BUFFER_F_OFFLOAD_UDP_CKSUM;
udp_header_t *udp = (udp_header_t *) (vlib_buffer_get_current (b0) +
vnet_buffer
(b0)->l4_hdr_offset);
- l4_hdr_sz = sizeof (*udp);
+ *l4_hdr_sz = sizeof (*udp);
udp->checksum = 0;
}
}
+}
+
+static_always_inline void
+fill_gso_buffer_flags (vlib_buffer_t * b0, struct virtio_net_hdr_v1 *hdr,
+ u8 l4_proto, u8 l4_hdr_sz)
+{
if (hdr->gso_type == VIRTIO_NET_HDR_GSO_TCPV4)
{
ASSERT (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM);
static_always_inline uword
virtio_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_frame_t * frame, virtio_if_t * vif, u16 qid,
- int gso_enabled)
+ int gso_enabled, int checksum_offload_enabled)
{
vnet_main_t *vnm = vnet_get_main ();
u32 thread_index = vm->thread_index;
while (n_left && n_left_to_next)
{
+ u8 l4_proto = 0, l4_hdr_sz = 0;
u16 num_buffers = 1;
struct vring_used_elem *e = &vring->used->ring[last & mask];
struct virtio_net_hdr_v1 *hdr;
b0->total_length_not_including_first_buffer = 0;
b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ if (checksum_offload_enabled)
+ virtio_needs_csum (b0, hdr, &l4_proto, &l4_hdr_sz);
+
if (gso_enabled)
- fill_gso_buffer_flags (b0, hdr);
+ fill_gso_buffer_flags (b0, hdr, l4_proto, l4_hdr_sz);
vnet_buffer (b0)->sw_if_index[VLIB_RX] = vif->sw_if_index;
vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
{
if (vif->gso_enabled)
n_rx += virtio_device_input_inline (vm, node, frame, vif,
- dq->queue_id, 1);
+ dq->queue_id, 1, 1);
+ else if (vif->csum_offload_enabled)
+ n_rx += virtio_device_input_inline (vm, node, frame, vif,
+ dq->queue_id, 0, 1);
else
n_rx += virtio_device_input_inline (vm, node, frame, vif,
- dq->queue_id, 0);
+ dq->queue_id, 0, 0);
}
}