set_checksum_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
const int is_l2)
{
+ u32 oflags = vnet_buffer2 (b)->oflags;
+
if (b->flags & VNET_BUFFER_F_IS_IP4)
{
ip4_header_t *ip4;
0 /* ip6 */ );
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
hdr->csum_start = gho.l4_hdr_offset; // 0x22;
- if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
+ if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
{
hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
}
- else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
+ else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
{
hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
}
*/
ip4 =
(ip4_header_t *) (vlib_buffer_get_current (b) + gho.l3_hdr_offset);
- if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
+ if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
ip4->checksum = ip4_header_checksum (ip4);
}
else if (b->flags & VNET_BUFFER_F_IS_IP6)
1 /* ip6 */ );
hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
hdr->csum_start = gho.l4_hdr_offset; // 0x36;
- if (b->flags & VNET_BUFFER_F_OFFLOAD_TCP_CKSUM)
+ if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
{
hdr->csum_offset = STRUCT_OFFSET_OF (tcp_header_t, checksum);
}
- else if (b->flags & VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)
+ else if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
{
hdr->csum_offset = STRUCT_OFFSET_OF (udp_header_t, checksum);
}
set_gso_offsets (vlib_buffer_t * b, virtio_net_hdr_v1_t * hdr,
const int is_l2)
{
+ u32 oflags = vnet_buffer2 (b)->oflags;
+
if (b->flags & VNET_BUFFER_F_IS_IP4)
{
ip4_header_t *ip4;
* virtio devices do not support IP4 checksum offload. So driver takes care
* of it while doing tx.
*/
- if (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM)
+ if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
ip4->checksum = ip4_header_checksum (ip4);
}
else if (b->flags & VNET_BUFFER_F_IS_IP6)
goto done;
}
}
- else if (b->flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
- VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
+ else if (b->flags & VNET_BUFFER_F_OFFLOAD)
{
if (csum_offload)
set_checksum_offsets (b, hdr, is_l2);
goto done;
}
}
- else if (b->flags & (VNET_BUFFER_F_OFFLOAD_TCP_CKSUM |
- VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))
+ else if (b->flags & VNET_BUFFER_F_OFFLOAD)
{
if (csum_offload)
set_checksum_offsets (b, hdr, is_l2);
if (n_left != n_vectors || n_buffers != n_buffers_left)
{
- CLIB_MEMORY_STORE_BARRIER ();
- vring->avail->idx = avail;
+ clib_atomic_store_seq_cst (&vring->avail->idx, avail);
vring->desc_next = next;
vring->desc_in_use = used;
- if ((vring->used->flags & VRING_USED_F_NO_NOTIFY) == 0)
+ if ((clib_atomic_load_seq_cst (&vring->used->flags) &
+ VRING_USED_F_NO_NOTIFY) == 0)
virtio_kick (vm, vring, vif);
}
if (packed)
return virtio_interface_tx_packed_gso_inline (vm, node, vif, type, vring,
buffers, n_left,
- 1 /* do_gso */ ,
- 1 /* checksum offload */ );
+ do_gso, csum_offload);
else
return virtio_interface_tx_split_gso_inline (vm, node, vif, type, vring,
buffers, n_left,
- 1 /* do_gso */ ,
- 1 /* checksum offload */ );
+ do_gso, csum_offload);
}
static_always_inline u16
u32 *buffers = vlib_frame_vector_args (frame);
u32 to[GRO_TO_VECTOR_SIZE (n_left)];
int packed = vif->is_packed;
+ u16 n_vectors = frame->n_vectors;
clib_spinlock_lock_if_init (&vring->lockp);
- if ((vring->used->flags & VRING_USED_F_NO_NOTIFY) == 0 &&
- (vring->last_kick_avail_idx != vring->avail->idx))
- virtio_kick (vm, vring, vif);
-
if (vif->packet_coalesce)
{
- n_left = vnet_gro_inline (vm, vring->flow_table, buffers, n_left, to);
+ n_vectors = n_left =
+ vnet_gro_inline (vm, vring->flow_table, buffers, n_left, to);
buffers = to;
}
if (vif->type == VIRTIO_IF_TYPE_TAP)
n_left = virtio_interface_tx_inline (vm, node, vif, vring,
VIRTIO_IF_TYPE_TAP,
- &buffers[frame->n_vectors - n_left],
+ &buffers[n_vectors - n_left],
n_left, packed);
else if (vif->type == VIRTIO_IF_TYPE_PCI)
n_left = virtio_interface_tx_inline (vm, node, vif, vring,
VIRTIO_IF_TYPE_PCI,
- &buffers[frame->n_vectors - n_left],
+ &buffers[n_vectors - n_left],
n_left, packed);
else if (vif->type == VIRTIO_IF_TYPE_TUN)
n_left = virtio_interface_tx_inline (vm, node, vif, vring,
VIRTIO_IF_TYPE_TUN,
- &buffers[frame->n_vectors - n_left],
+ &buffers[n_vectors - n_left],
n_left, packed);
else
ASSERT (0);
{
u16 n_buffered = virtio_vring_buffering_store_packets (vring->buffering,
&buffers
- [frame->n_vectors
+ [n_vectors
- n_left],
n_left);
n_left -= n_buffered;
}
if (n_left)
virtio_interface_drop_inline (vm, node->node_index,
- &buffers[frame->n_vectors - n_left], n_left,
+ &buffers[n_vectors - n_left], n_left,
VIRTIO_TX_ERROR_NO_FREE_SLOTS);
clib_spinlock_unlock_if_init (&vring->lockp);
/* Nothing for now */
}
+static_always_inline void
+virtio_set_rx_interrupt (virtio_if_t * vif, virtio_vring_t * vring)
+{
+ if (vif->is_packed)
+ vring->driver_event->flags &= ~VRING_EVENT_F_DISABLE;
+ else
+ vring->avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+}
+
+static_always_inline void
+virtio_set_rx_polling (virtio_if_t * vif, virtio_vring_t * vring)
+{
+ if (vif->is_packed)
+ vring->driver_event->flags |= VRING_EVENT_F_DISABLE;
+ else
+ vring->avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+}
+
static clib_error_t *
virtio_interface_rx_mode_change (vnet_main_t * vnm, u32 hw_if_index, u32 qid,
vnet_hw_if_rx_mode mode)
if (vif->type == VIRTIO_IF_TYPE_PCI && !(vif->support_int_mode))
{
- rx_vring->avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+ virtio_set_rx_polling (vif, rx_vring);
return clib_error_return (0, "interrupt mode is not supported");
}
virtio_send_interrupt_node.index,
VIRTIO_EVENT_STOP_TIMER, 0);
}
- rx_vring->avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+ virtio_set_rx_polling (vif, rx_vring);
}
else
{
virtio_send_interrupt_node.index,
VIRTIO_EVENT_START_TIMER, 0);
}
- rx_vring->avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+ virtio_set_rx_interrupt (vif, rx_vring);
}
rx_vring->mode = mode;