#include <rte_ethdev_vdev.h>
#include <rte_malloc.h>
#include <rte_memcpy.h>
-#include <rte_vdev.h>
+#include <rte_bus_vdev.h>
#include <rte_kvargs.h>
#include <rte_vhost.h>
#include <rte_spinlock.h>
#define ETH_VHOST_QUEUES_ARG "queues"
#define ETH_VHOST_CLIENT_ARG "client"
#define ETH_VHOST_DEQUEUE_ZERO_COPY "dequeue-zero-copy"
+#define ETH_VHOST_IOMMU_SUPPORT "iommu-support"
#define VHOST_MAX_PKT_BURST 32
static const char *valid_arguments[] = {
ETH_VHOST_QUEUES_ARG,
ETH_VHOST_CLIENT_ARG,
ETH_VHOST_DEQUEUE_ZERO_COPY,
+ ETH_VHOST_IOMMU_SUPPORT,
NULL
};
rte_atomic32_t while_queuing;
struct pmd_internal *internal;
struct rte_mempool *mb_pool;
- uint8_t port;
+ uint16_t port;
uint16_t virtqueue_id;
struct vhost_stats stats;
};
char *dev_name;
char *iface_name;
uint16_t max_queues;
+ int vid;
rte_atomic32_t started;
};
unsigned int i;
int allow_queuing = 1;
+ if (!dev->data->rx_queues || !dev->data->tx_queues)
+ return;
+
if (rte_atomic32_read(&internal->started) == 0 ||
rte_atomic32_read(&internal->dev_attached) == 0)
allow_queuing = 0;
}
}
+static void
+queue_setup(struct rte_eth_dev *eth_dev, struct pmd_internal *internal)
+{
+ struct vhost_queue *vq;
+ int i;
+
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ vq = eth_dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = internal->vid;
+ vq->internal = internal;
+ vq->port = eth_dev->data->port_id;
+ }
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ vq = eth_dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = internal->vid;
+ vq->internal = internal;
+ vq->port = eth_dev->data->port_id;
+ }
+}
+
static int
new_device(int vid)
{
struct rte_eth_dev *eth_dev;
struct internal_list *list;
struct pmd_internal *internal;
- struct vhost_queue *vq;
unsigned i;
char ifname[PATH_MAX];
#ifdef RTE_LIBRTE_VHOST_NUMA
eth_dev->data->numa_node = newnode;
#endif
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- vq = eth_dev->data->rx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = vid;
- vq->internal = internal;
- vq->port = eth_dev->data->port_id;
- }
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- vq = eth_dev->data->tx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = vid;
- vq->internal = internal;
- vq->port = eth_dev->data->port_id;
- }
+ internal->vid = vid;
+ if (rte_atomic32_read(&internal->started) == 1)
+ queue_setup(eth_dev, internal);
+ else
+ RTE_LOG(INFO, PMD, "RX/TX queues not exist yet\n");
for (i = 0; i < rte_vhost_get_vring_num(vid); i++)
rte_vhost_enable_guest_notification(vid, i, 0);
RTE_LOG(INFO, PMD, "New connection established\n");
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
return 0;
}
eth_dev->data->dev_link.link_status = ETH_LINK_DOWN;
- for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
- vq = eth_dev->data->rx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = -1;
- }
- for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
- vq = eth_dev->data->tx_queues[i];
- if (vq == NULL)
- continue;
- vq->vid = -1;
+ if (eth_dev->data->rx_queues && eth_dev->data->tx_queues) {
+ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
+ vq = eth_dev->data->rx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = -1;
+ }
+ for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
+ vq = eth_dev->data->tx_queues[i];
+ if (!vq)
+ continue;
+ vq->vid = -1;
+ }
}
state = vring_states[eth_dev->data->port_id];
RTE_LOG(INFO, PMD, "Connection closed\n");
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_INTR_LSC,
+ NULL, NULL);
}
static int
RTE_LOG(INFO, PMD, "vring%u is %s\n",
vring, enable ? "enabled" : "disabled");
- _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE, NULL);
+ _rte_eth_dev_callback_process(eth_dev, RTE_ETH_EVENT_QUEUE_STATE,
+ NULL, NULL);
return 0;
}
};
int
-rte_eth_vhost_get_queue_event(uint8_t port_id,
+rte_eth_vhost_get_queue_event(uint16_t port_id,
struct rte_eth_vhost_queue_event *event)
{
struct rte_vhost_vring_state *state;
}
int
-rte_eth_vhost_get_vid_from_port_id(uint8_t port_id)
+rte_eth_vhost_get_vid_from_port_id(uint16_t port_id)
{
struct internal_list *list;
struct rte_eth_dev *eth_dev;
}
static int
-eth_dev_start(struct rte_eth_dev *dev)
+eth_dev_start(struct rte_eth_dev *eth_dev)
{
- struct pmd_internal *internal = dev->data->dev_private;
+ struct pmd_internal *internal = eth_dev->data->dev_private;
+ queue_setup(eth_dev, internal);
rte_atomic32_set(&internal->started, 1);
- update_queuing_status(dev);
+ update_queuing_status(eth_dev);
return 0;
}
pthread_mutex_unlock(&internal_list_lock);
rte_free(list);
- for (i = 0; i < dev->data->nb_rx_queues; i++)
- rte_free(dev->data->rx_queues[i]);
- for (i = 0; i < dev->data->nb_tx_queues; i++)
- rte_free(dev->data->tx_queues[i]);
+ if (dev->data->rx_queues)
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ rte_free(dev->data->rx_queues[i]);
+
+ if (dev->data->tx_queues)
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ rte_free(dev->data->tx_queues[i]);
rte_free(dev->data->mac_addrs);
free(internal->dev_name);
dev_info->min_rx_bufsize = 0;
}
-static void
+static int
eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
unsigned i;
stats->oerrors = tx_missed_total;
stats->ibytes = rx_total_bytes;
stats->obytes = tx_total_bytes;
+
+ return 0;
}
static void
return 0;
}
+static uint32_t
+eth_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct vhost_queue *vq;
+
+ vq = dev->data->rx_queues[rx_queue_id];
+ if (vq == NULL)
+ return 0;
+
+ return rte_vhost_rx_queue_count(vq->vid, vq->virtqueue_id);
+}
+
static const struct eth_dev_ops ops = {
.dev_start = eth_dev_start,
.dev_stop = eth_dev_stop,
.rx_queue_release = eth_queue_release,
.tx_queue_release = eth_queue_release,
.tx_done_cleanup = eth_tx_done_cleanup,
+ .rx_queue_count = eth_rx_queue_count,
.link_update = eth_link_update,
.stats_get = eth_stats_get,
.stats_reset = eth_stats_reset,
data->nb_rx_queues = queues;
data->nb_tx_queues = queues;
internal->max_queues = queues;
+ internal->vid = -1;
data->dev_link = pmd_link;
data->mac_addrs = eth_addr;
- data->dev_flags =
- RTE_ETH_DEV_DETACHABLE | RTE_ETH_DEV_INTR_LSC;
+ data->dev_flags = RTE_ETH_DEV_INTR_LSC;
eth_dev->dev_ops = &ops;
uint64_t flags = 0;
int client_mode = 0;
int dequeue_zero_copy = 0;
+ int iommu_support = 0;
RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n",
rte_vdev_device_name(dev));
flags |= RTE_VHOST_USER_DEQUEUE_ZERO_COPY;
}
+ if (rte_kvargs_count(kvlist, ETH_VHOST_IOMMU_SUPPORT) == 1) {
+ ret = rte_kvargs_process(kvlist, ETH_VHOST_IOMMU_SUPPORT,
+ &open_int, &iommu_support);
+ if (ret < 0)
+ goto out_free;
+
+ if (iommu_support)
+ flags |= RTE_VHOST_USER_IOMMU_SUPPORT;
+ }
+
if (dev->device.numa_node == SOCKET_ID_ANY)
dev->device.numa_node = rte_socket_id();
RTE_PMD_REGISTER_ALIAS(net_vhost, eth_vhost);
RTE_PMD_REGISTER_PARAM_STRING(net_vhost,
"iface=<ifc> "
- "queues=<int>");
+ "queues=<int> "
+ "client=<0|1> "
+ "dequeue-zero-copy=<0|1> "
+ "iommu-support=<0|1>");