}
}
- memset(mz->addr, 0, sizeof(mz->len));
+ memset(mz->addr, 0, mz->len);
vq->vq_ring_mem = mz->phys_addr;
vq->vq_ring_virt_mem = mz->addr;
int queue_type;
uint16_t i;
+ if (hw->vqs == NULL)
+ return;
+
for (i = 0; i < nr_vq; i++) {
vq = hw->vqs[i];
if (!vq)
}
rte_free(vq);
+ hw->vqs[i] = NULL;
}
rte_free(hw->vqs);
+ hw->vqs = NULL;
}
static int
rte_eth_copy_pci_info(eth_dev, pci_dev);
- /* If host does not support status then disable LSC */
- if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS))
- eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
- else
+ /* If host does not support both status and MSI-X then disable LSC */
+ if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS) && hw->use_msix)
eth_dev->data->dev_flags |= RTE_ETH_DEV_INTR_LSC;
+ else
+ eth_dev->data->dev_flags &= ~RTE_ETH_DEV_INTR_LSC;
rx_func_get(eth_dev);
{
const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
struct virtio_hw *hw = dev->data->dev_private;
- uint64_t req_features;
- int ret;
PMD_INIT_LOG(DEBUG, "configure");
- req_features = VIRTIO_PMD_DEFAULT_GUEST_FEATURES;
- if (rxmode->hw_ip_checksum)
- req_features |= (1ULL << VIRTIO_NET_F_GUEST_CSUM);
- if (rxmode->enable_lro)
- req_features |=
- (1ULL << VIRTIO_NET_F_GUEST_TSO4) |
- (1ULL << VIRTIO_NET_F_GUEST_TSO6);
-
- /* if request features changed, reinit the device */
- if (req_features != hw->req_guest_features) {
- ret = virtio_init_device(dev, req_features);
- if (ret < 0)
- return ret;
- }
- if (rxmode->hw_ip_checksum &&
- !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_CSUM)) {
+ /* Virtio does L4 checksum but not L3! */
+ if (rxmode->hw_ip_checksum) {
PMD_DRV_LOG(NOTICE,
- "rx ip checksum not available on this host");
+ "virtio does not support IP checksum");
return -ENOTSUP;
}
- if (rxmode->enable_lro &&
- (!vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4) ||
- !vtpci_with_feature(hw, VIRTIO_NET_F_GUEST_TSO4))) {
+ if (rxmode->enable_lro) {
PMD_DRV_LOG(NOTICE,
- "lro not available on this host");
+ "virtio does not support Large Receive Offload");
return -ENOTSUP;
}
}
}
- /* Initialize Link state */
- virtio_dev_link_update(dev, 0);
-
/*Notify the backend
*Otherwise the tap backend might already stop its queue due to fullness.
*vhost backend will have no chance to be waked up
VIRTQUEUE_DUMP(txvq->vq);
}
+ hw->started = 1;
+
+ /* Initialize Link state */
+ virtio_dev_link_update(dev, 0);
+
return 0;
}
static void
virtio_dev_stop(struct rte_eth_dev *dev)
{
+ struct virtio_hw *hw = dev->data->dev_private;
struct rte_eth_link link;
PMD_INIT_LOG(DEBUG, "stop");
if (dev->data->dev_conf.intr_conf.lsc)
rte_intr_disable(&dev->pci_dev->intr_handle);
+ hw->started = 0;
memset(&link, 0, sizeof(link));
virtio_dev_atomic_write_link_status(dev, &link);
}
link.link_duplex = ETH_LINK_FULL_DUPLEX;
link.link_speed = SPEED_10G;
- if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
+ if (hw->started == 0) {
+ link.link_status = ETH_LINK_DOWN;
+ } else if (vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) {
PMD_INIT_LOG(DEBUG, "Get link status from hw");
vtpci_read_dev_config(hw,
offsetof(struct virtio_net_config, status),
};
dev_info->rx_offload_capa =
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_UDP_CKSUM |
- DEV_RX_OFFLOAD_TCP_LRO;
+ DEV_RX_OFFLOAD_UDP_CKSUM;
+
dev_info->tx_offload_capa = 0;
if (hw->guest_features & (1ULL << VIRTIO_NET_F_CSUM)) {