ASSERT (vui->virtio_net_hdr_sz < VLIB_BUFFER_PRE_DATA_SIZE);
vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, vui->hw_if_index);
if (vui->enable_gso &&
- (vui->features & (1ULL << FEAT_VIRTIO_NET_F_GUEST_CSUM)))
- hw->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO;
+ ((vui->features & FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS)
+ == FEATURE_VIRTIO_NET_F_HOST_GUEST_TSO_FEATURE_BITS))
+ hw->flags |=
+ (VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO |
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD);
else
- hw->flags &= ~VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO;
+ hw->flags &= ~(VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO |
+ VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD);
vnet_hw_interface_set_flags (vnm, vui->hw_if_index, 0);
vui->is_ready = 0;
vhost_user_update_iface_state (vui);
vui->nregions++;
}
+
+ /*
+ * Re-compute desc, used, and avail descriptor table if vring address
+ * is set.
+ */
+ for (q = 0; q < VHOST_VRING_MAX_N; q++)
+ {
+ if (vui->vrings[q].desc_user_addr &&
+ vui->vrings[q].used_user_addr && vui->vrings[q].avail_user_addr)
+ {
+ vui->vrings[q].desc =
+ map_user_mem (vui, vui->vrings[q].desc_user_addr);
+ vui->vrings[q].used =
+ map_user_mem (vui, vui->vrings[q].used_user_addr);
+ vui->vrings[q].avail =
+ map_user_mem (vui, vui->vrings[q].avail_user_addr);
+ }
+ }
vlib_worker_thread_barrier_release (vm);
break;
goto close_socket;
}
+ vui->vrings[msg.state.index].desc_user_addr = msg.addr.desc_user_addr;
+ vui->vrings[msg.state.index].used_user_addr = msg.addr.used_user_addr;
+ vui->vrings[msg.state.index].avail_user_addr = msg.addr.avail_user_addr;
+
vlib_worker_thread_barrier_sync (vm);
vui->vrings[msg.state.index].desc = desc;
vui->vrings[msg.state.index].used = used;