#define DBG_SOCK(args...)
#endif
+#if DPDK_VHOST_USER
+
static const char *vhost_message_str[] __attribute__((unused)) = {
[VHOST_USER_NONE] = "VHOST_USER_NONE",
[VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES",
[VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK",
[VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL",
[VHOST_USER_SET_VRING_ERR] = "VHOST_USER_SET_VRING_ERR",
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
[VHOST_USER_GET_PROTOCOL_FEATURES] = "VHOST_USER_GET_PROTOCOL_FEATURES",
[VHOST_USER_SET_PROTOCOL_FEATURES] = "VHOST_USER_SET_PROTOCOL_FEATURES",
[VHOST_USER_GET_QUEUE_NUM] = "VHOST_USER_GET_QUEUE_NUM",
[VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE",
-#endif
};
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
static int dpdk_vhost_user_set_vring_enable(u32 hw_if_index,
u8 idx, int enable);
-#endif
/*
* DPDK vhost-user functions
dpdk_device_t *xd =
dpdk_vhost_user_device_from_hw_if_index(hw_if_index);
assert(xd);
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
xd->vu_vhost_dev.virtqueue[idx]->enabled = 0;
-#else
- xd->vu_is_running = 0;
-#endif
}
static void disable_interface(dpdk_device_t * xd)
{
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
u8 idx;
int numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
for (idx = 0; idx < numqs; idx++)
xd->vu_vhost_dev.virtqueue[idx]->enabled = 0;
-#endif
xd->vu_is_running = 0;
}
int num_qpairs = 1;
dpdk_vu_intf_t *vui = NULL;
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
num_qpairs = dm->use_rss < 1 ? 1 : tm->n_vlib_mains;
-#endif
dpdk_device_t * xd = NULL;
u8 addr[6];
xd->dev_type = VNET_DPDK_DEV_VHOST_USER;
xd->rx_q_used = num_qpairs;
xd->tx_q_used = num_qpairs;
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
xd->vu_vhost_dev.virt_qp_nb = num_qpairs;
-#endif
vec_validate_aligned (xd->rx_vectors, xd->rx_q_used, CLIB_CACHE_LINE_BYTES);
if (tm->n_vlib_mains == 1 && dpdk_input_node.state != VLIB_NODE_STATE_POLLING)
vlib_node_set_state (vm, dpdk_input_node.index, VLIB_NODE_STATE_POLLING);
- if (tm->n_vlib_mains > 1 && tm->main_thread_is_io_node)
- vlib_node_set_state (vm, dpdk_io_input_node.index, VLIB_NODE_STATE_POLLING);
-
- if (tm->n_vlib_mains > 1 && !tm->main_thread_is_io_node)
+ if (tm->n_vlib_mains > 1)
vlib_node_set_state (vlib_mains[cpu], dpdk_input_node.index,
VLIB_NODE_STATE_POLLING);
next_cpu++;
}
#endif
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
static clib_error_t *
dpdk_vhost_user_set_protocol_features(u32 hw_if_index, u64 prot_features)
{
xd->vu_vhost_dev.protocol_features = prot_features;
return 0;
}
-#endif
static clib_error_t *
dpdk_vhost_user_get_features(u32 hw_if_index, u64 * features)
int numqs = VIRTIO_QNUM;
u8 idx;
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
int prot_feature = features &
(1ULL << VHOST_USER_F_PROTOCOL_FEATURES);
numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
-#endif
for (idx = 0; idx < numqs; idx++) {
xd->vu_vhost_dev.virtqueue[idx]->vhost_hlen = hdr_len;
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
/*
* Spec says, if F_PROTOCOL_FEATURE is not set by the
* slave, then all the vrings should start off as
*/
if (! prot_feature)
dpdk_vhost_user_set_vring_enable(hw_if_index, idx, 1);
-#endif
}
return 0;
mapped_address += vum->regions[i].mmap_offset;
vui->region_addr[i] = mapped_address;
vui->region_fd[i] = fd[i];
+ vui->region_offset[i] = vum->regions[i].mmap_offset;
mem->regions[i].address_offset = mapped_address - mem->regions[i].guest_phys_address;
+ DBG_SOCK("map memory region %d addr 0x%lx off 0x%lx len 0x%lx",
+ i, vui->region_addr[i], vui->region_offset[i], mapped_size);
+
if (vum->regions[i].guest_phys_addr == 0) {
mem->base_address = vum->regions[i].userspace_addr;
mem->mapped_address = mem->regions[i].address_offset;
* and stop ring upon receiving VHOST_USER_GET_VRING_BASE.
*/
DBG_SOCK("Stopping vring Q %u of device %d", idx, hw_if_index);
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
dpdk_vu_intf_t *vui = xd->vu_intf;
vui->vrings[idx].enabled = 0; /* Reset local copy */
vui->vrings[idx].callfd = -1; /* Reset FD */
DBG_SOCK("Device %d disabled", hw_if_index);
xd->vu_is_running = 0;
}
-#else
- vq->desc = NULL;
- vq->used = NULL;
- vq->avail = NULL;
- xd->vu_is_running = 0;
-#endif
return 0;
}
{
dpdk_main_t * dm = &dpdk_main;
dpdk_device_t * xd;
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
dpdk_vu_vring *vring;
-#endif
struct vhost_virtqueue *vq0, *vq1, *vq;
int index, vu_is_running = 0;
vq = xd->vu_vhost_dev.virtqueue[idx];
vq->kickfd = fd;
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
vring = &xd->vu_intf->vrings[idx];
vq->enabled = (vq->desc && vq->avail && vq->used && vring->enabled) ? 1 : 0;
-#endif
/*
* Set xd->vu_is_running if at least one pair of
* RX/TX queues are enabled.
*/
int numqs = VIRTIO_QNUM;
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
numqs = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
-#endif
for (index = 0; index < numqs; index += 2) {
vq0 = xd->vu_vhost_dev.virtqueue[index]; /* RX */
vq1 = xd->vu_vhost_dev.virtqueue[index + 1]; /* TX */
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
if (vq0->enabled && vq1->enabled)
-#else
- if (vq0->desc && vq0->avail && vq0->used &&
- vq1->desc && vq1->avail && vq1->used)
-#endif
{
vu_is_running = 1;
break;
return 0;
}
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
static int
dpdk_vhost_user_set_vring_enable(u32 hw_if_index, u8 idx, int enable)
{
return 0;
}
-#endif
static clib_error_t * dpdk_vhost_user_callfd_read_ready (unix_file_t * uf)
{
memset(vui, 0, sizeof(*vui));
vui->unix_fd = sockfd;
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
vui->num_vrings = xd->vu_vhost_dev.virt_qp_nb * VIRTIO_QNUM;
-#else
- vui->num_vrings = VIRTIO_QNUM;
-#endif
DBG_SOCK("dpdk_vhost_user_vui_init VRINGS: %d", vui->num_vrings);
vui->sock_is_server = is_server;
strncpy(vui->sock_filename, sock_filename, ARRAY_LEN(vui->sock_filename)-1);
xd->vlib_sw_if_index);
}
+static void dpdk_unmap_all_mem_regions(dpdk_device_t * xd)
+{
+ int i, r;
+ dpdk_vu_intf_t *vui = xd->vu_intf;
+ struct virtio_memory * mem = xd->vu_vhost_dev.mem;
+
+ for (i=0; i<mem->nregions; i++) {
+ if (vui->region_addr[i] != -1) {
+
+ long page_sz = get_huge_page_size(vui->region_fd[i]);
+
+ ssize_t map_sz = (mem->regions[i].memory_size +
+ vui->region_offset[i] + page_sz) & ~(page_sz - 1);
+
+ r = munmap((void *)(vui->region_addr[i] - vui->region_offset[i]), map_sz);
+
+ DBG_SOCK("unmap memory region %d addr 0x%lx off 0x%lx len 0x%lx page_sz 0x%x",
+ i, vui->region_addr[i], vui->region_offset[i], map_sz, page_sz);
+
+ vui->region_addr[i]= -1;
+
+ if (r == -1) {
+ clib_unix_warning("failed to unmap memory region");
+ }
+ close(vui->region_fd[i]);
+ }
+ }
+ mem->nregions = 0;
+}
+
static inline void
dpdk_vhost_user_if_disconnect(dpdk_device_t * xd)
{
dpdk_vu_intf_t *vui = xd->vu_intf;
vnet_main_t * vnm = vnet_get_main();
dpdk_main_t * dm = &dpdk_main;
+ struct vhost_virtqueue *vq;
+ int q;
xd->admin_up = 0;
vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
vui->unix_fd = -1;
vui->is_up = 0;
+ for (q = 0; q < vui->num_vrings; q++) {
+ vq = xd->vu_vhost_dev.virtqueue[q];
+ vui->vrings[q].enabled = 0; /* Reset local copy */
+ vui->vrings[q].callfd = -1; /* Reset FD */
+ vq->enabled = 0;
+#if RTE_VERSION >= RTE_VERSION_NUM(16, 4, 0, 0)
+ vq->log_guest_addr = 0;
+#endif
+ vq->desc = NULL;
+ vq->used = NULL;
+ vq->avail = NULL;
+ }
+ xd->vu_is_running = 0;
+
+ dpdk_unmap_all_mem_regions(xd);
DBG_SOCK("interface ifindex %d disconnected", xd->vlib_sw_if_index);
}
xd->vlib_hw_if_index);
break;
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
case VHOST_USER_GET_PROTOCOL_FEATURES:
DBG_SOCK("if %d msg VHOST_USER_GET_PROTOCOL_FEATURES",
xd->vlib_hw_if_index);
msg.u64 = xd->vu_vhost_dev.virt_qp_nb;
msg.size = sizeof(msg.u64);
break;
-#endif
default:
DBG_SOCK("unknown vhost-user message %d received. closing socket",
// init server socket on specified sock_filename
static int dpdk_vhost_user_init_server_sock(const char * sock_filename, int *sockfd)
{
- int rv = 0, len;
- struct sockaddr_un un;
+ int rv = 0;
+ struct sockaddr_un un = {};
int fd;
/* create listening socket */
fd = socket(AF_UNIX, SOCK_STREAM, 0);
/* remove if exists */
unlink( (char *) sock_filename);
- len = strlen((char *) un.sun_path) + strlen((char *) sock_filename);
-
- if (bind(fd, (struct sockaddr *) &un, len) == -1) {
+ if (bind(fd, (struct sockaddr *) &un, sizeof(un)) == -1) {
rv = VNET_API_ERROR_SYSCALL_ERROR_2;
goto error;
}
vlib_cli_output(vm, " avail.flags %x avail.idx %d used.flags %x used.idx %d\n",
vq->avail->flags, vq->avail->idx, vq->used->flags, vq->used->idx);
-#if RTE_VERSION >= RTE_VERSION_NUM(2, 2, 0, 0)
vlib_cli_output(vm, " kickfd %d callfd %d errfd %d enabled %d\n",
vq->kickfd, vq->callfd, vui->vrings[q].errfd, vq->enabled);
if (show_descr && vq->enabled) {
-#else
- vlib_cli_output(vm, " kickfd %d callfd %d errfd\n",
- vq->kickfd, vq->callfd, vui->vrings[q].errfd);
-
- if (show_descr) {
-#endif
vlib_cli_output(vm, "\n descriptor table:\n");
vlib_cli_output(vm, " id addr len flags next user_addr\n");
vlib_cli_output(vm, " ===== ================== ===== ====== ===== ==================\n");
.short_help = "show vhost-user interface",
.function = show_dpdk_vhost_user_command_fn,
};
+#endif