New upstream version 18.08
[deb_dpdk.git] / lib / librte_vhost / socket.c
index 83befdc..d630317 100644 (file)
@@ -4,7 +4,6 @@
 
 #include <stdint.h>
 #include <stdio.h>
-#include <stdbool.h>
 #include <limits.h>
 #include <stdlib.h>
 #include <unistd.h>
@@ -52,6 +51,13 @@ struct vhost_user_socket {
        uint64_t supported_features;
        uint64_t features;
 
+       /*
+        * Device id to identify a specific backend device.
+        * It's set to -1 for the default software implementation.
+        * If valid, one socket can have 1 connection only.
+        */
+       int vdpa_dev_id;
+
        struct vhost_device_ops const *notify_ops;
 };
 
@@ -97,6 +103,7 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
        size_t fdsize = fd_num * sizeof(int);
        char control[CMSG_SPACE(fdsize)];
        struct cmsghdr *cmsg;
+       int got_fds = 0;
        int ret;
 
        memset(&msgh, 0, sizeof(msgh));
@@ -123,11 +130,16 @@ read_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
                cmsg = CMSG_NXTHDR(&msgh, cmsg)) {
                if ((cmsg->cmsg_level == SOL_SOCKET) &&
                        (cmsg->cmsg_type == SCM_RIGHTS)) {
-                       memcpy(fds, CMSG_DATA(cmsg), fdsize);
+                       got_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
+                       memcpy(fds, CMSG_DATA(cmsg), got_fds * sizeof(int));
                        break;
                }
        }
 
+       /* Clear out unused file descriptors */
+       while (got_fds < fd_num)
+               fds[got_fds++] = -1;
+
        return ret;
 }
 
@@ -153,6 +165,11 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
                msgh.msg_control = control;
                msgh.msg_controllen = sizeof(control);
                cmsg = CMSG_FIRSTHDR(&msgh);
+               if (cmsg == NULL) {
+                       RTE_LOG(ERR, VHOST_CONFIG, "cmsg == NULL\n");
+                       errno = EINVAL;
+                       return -1;
+               }
                cmsg->cmsg_len = CMSG_LEN(fdsize);
                cmsg->cmsg_level = SOL_SOCKET;
                cmsg->cmsg_type = SCM_RIGHTS;
@@ -163,7 +180,7 @@ send_fd_message(int sockfd, char *buf, int buflen, int *fds, int fd_num)
        }
 
        do {
-               ret = sendmsg(sockfd, &msgh, 0);
+               ret = sendmsg(sockfd, &msgh, MSG_NOSIGNAL);
        } while (ret < 0 && errno == EINTR);
 
        if (ret < 0) {
@@ -182,6 +199,9 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
        struct vhost_user_connection *conn;
        int ret;
 
+       if (vsocket == NULL)
+               return;
+
        conn = malloc(sizeof(*conn));
        if (conn == NULL) {
                close(fd);
@@ -198,6 +218,8 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
 
        vhost_set_builtin_virtio_net(vid, vsocket->use_builtin_virtio_net);
 
+       vhost_attach_vdpa_device(vid, vsocket->vdpa_dev_id);
+
        if (vsocket->dequeue_zero_copy)
                vhost_enable_dequeue_zero_copy(vid);
 
@@ -232,6 +254,8 @@ vhost_user_add_connection(int fd, struct vhost_user_socket *vsocket)
        pthread_mutex_lock(&vsocket->conn_mutex);
        TAILQ_INSERT_TAIL(&vsocket->conn_list, conn, next);
        pthread_mutex_unlock(&vsocket->conn_mutex);
+
+       fdset_pipe_notify(&vhost_user.fdset);
        return;
 
 err:
@@ -318,6 +342,16 @@ vhost_user_start_server(struct vhost_user_socket *vsocket)
        int fd = vsocket->socket_fd;
        const char *path = vsocket->path;
 
+       /*
+        * bind () may fail if the socket file with the same name already
+        * exists. But the library obviously should not delete the file
+        * provided by the user, since we can not be sure that it is not
+        * being used by other applications. Moreover, many applications form
+        * socket names based on user input, which is prone to errors.
+        *
+        * The user must ensure that the socket does not exist before
+        * registering the vhost driver in server mode.
+        */
        ret = bind(fd, (struct sockaddr *)&vsocket->un, sizeof(vsocket->un));
        if (ret < 0) {
                RTE_LOG(ERR, VHOST_CONFIG,
@@ -436,7 +470,6 @@ static int
 vhost_user_reconnect_init(void)
 {
        int ret;
-       char thread_name[RTE_MAX_THREAD_NAME_LEN];
 
        ret = pthread_mutex_init(&reconn_list.mutex, NULL);
        if (ret < 0) {
@@ -445,7 +478,7 @@ vhost_user_reconnect_init(void)
        }
        TAILQ_INIT(&reconn_list.head);
 
-       ret = pthread_create(&reconn_tid, NULL,
+       ret = rte_ctrl_thread_create(&reconn_tid, "vhost_reconn", NULL,
                             vhost_user_client_reconnect, NULL);
        if (ret != 0) {
                RTE_LOG(ERR, VHOST_CONFIG, "failed to create reconnect thread");
@@ -453,14 +486,6 @@ vhost_user_reconnect_init(void)
                        RTE_LOG(ERR, VHOST_CONFIG,
                                "failed to destroy reconnect mutex");
                }
-       } else {
-               snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN,
-                        "vhost-reconn");
-
-               if (rte_thread_setname(reconn_tid, thread_name)) {
-                       RTE_LOG(DEBUG, VHOST_CONFIG,
-                               "failed to set reconnect thread name");
-               }
        }
 
        return ret;
@@ -523,6 +548,52 @@ find_vhost_user_socket(const char *path)
        return NULL;
 }
 
+int
+rte_vhost_driver_attach_vdpa_device(const char *path, int did)
+{
+       struct vhost_user_socket *vsocket;
+
+       if (rte_vdpa_get_device(did) == NULL)
+               return -1;
+
+       pthread_mutex_lock(&vhost_user.mutex);
+       vsocket = find_vhost_user_socket(path);
+       if (vsocket)
+               vsocket->vdpa_dev_id = did;
+       pthread_mutex_unlock(&vhost_user.mutex);
+
+       return vsocket ? 0 : -1;
+}
+
+int
+rte_vhost_driver_detach_vdpa_device(const char *path)
+{
+       struct vhost_user_socket *vsocket;
+
+       pthread_mutex_lock(&vhost_user.mutex);
+       vsocket = find_vhost_user_socket(path);
+       if (vsocket)
+               vsocket->vdpa_dev_id = -1;
+       pthread_mutex_unlock(&vhost_user.mutex);
+
+       return vsocket ? 0 : -1;
+}
+
+int
+rte_vhost_driver_get_vdpa_device_id(const char *path)
+{
+       struct vhost_user_socket *vsocket;
+       int did = -1;
+
+       pthread_mutex_lock(&vhost_user.mutex);
+       vsocket = find_vhost_user_socket(path);
+       if (vsocket)
+               did = vsocket->vdpa_dev_id;
+       pthread_mutex_unlock(&vhost_user.mutex);
+
+       return did;
+}
+
 int
 rte_vhost_driver_disable_features(const char *path, uint64_t features)
 {
@@ -591,19 +662,136 @@ int
 rte_vhost_driver_get_features(const char *path, uint64_t *features)
 {
        struct vhost_user_socket *vsocket;
+       uint64_t vdpa_features;
+       struct rte_vdpa_device *vdpa_dev;
+       int did = -1;
+       int ret = 0;
 
        pthread_mutex_lock(&vhost_user.mutex);
        vsocket = find_vhost_user_socket(path);
-       if (vsocket)
+       if (!vsocket) {
+               RTE_LOG(ERR, VHOST_CONFIG,
+                       "socket file %s is not registered yet.\n", path);
+               ret = -1;
+               goto unlock_exit;
+       }
+
+       did = vsocket->vdpa_dev_id;
+       vdpa_dev = rte_vdpa_get_device(did);
+       if (!vdpa_dev || !vdpa_dev->ops->get_features) {
                *features = vsocket->features;
+               goto unlock_exit;
+       }
+
+       if (vdpa_dev->ops->get_features(did, &vdpa_features) < 0) {
+               RTE_LOG(ERR, VHOST_CONFIG,
+                               "failed to get vdpa features "
+                               "for socket file %s.\n", path);
+               ret = -1;
+               goto unlock_exit;
+       }
+
+       *features = vsocket->features & vdpa_features;
+
+unlock_exit:
        pthread_mutex_unlock(&vhost_user.mutex);
+       return ret;
+}
+
+int
+rte_vhost_driver_get_protocol_features(const char *path,
+               uint64_t *protocol_features)
+{
+       struct vhost_user_socket *vsocket;
+       uint64_t vdpa_protocol_features;
+       struct rte_vdpa_device *vdpa_dev;
+       int did = -1;
+       int ret = 0;
 
+       pthread_mutex_lock(&vhost_user.mutex);
+       vsocket = find_vhost_user_socket(path);
        if (!vsocket) {
                RTE_LOG(ERR, VHOST_CONFIG,
                        "socket file %s is not registered yet.\n", path);
-               return -1;
-       } else {
-               return 0;
+               ret = -1;
+               goto unlock_exit;
+       }
+
+       did = vsocket->vdpa_dev_id;
+       vdpa_dev = rte_vdpa_get_device(did);
+       if (!vdpa_dev || !vdpa_dev->ops->get_protocol_features) {
+               *protocol_features = VHOST_USER_PROTOCOL_FEATURES;
+               goto unlock_exit;
+       }
+
+       if (vdpa_dev->ops->get_protocol_features(did,
+                               &vdpa_protocol_features) < 0) {
+               RTE_LOG(ERR, VHOST_CONFIG,
+                               "failed to get vdpa protocol features "
+                               "for socket file %s.\n", path);
+               ret = -1;
+               goto unlock_exit;
+       }
+
+       *protocol_features = VHOST_USER_PROTOCOL_FEATURES
+               & vdpa_protocol_features;
+
+unlock_exit:
+       pthread_mutex_unlock(&vhost_user.mutex);
+       return ret;
+}
+
+int
+rte_vhost_driver_get_queue_num(const char *path, uint32_t *queue_num)
+{
+       struct vhost_user_socket *vsocket;
+       uint32_t vdpa_queue_num;
+       struct rte_vdpa_device *vdpa_dev;
+       int did = -1;
+       int ret = 0;
+
+       pthread_mutex_lock(&vhost_user.mutex);
+       vsocket = find_vhost_user_socket(path);
+       if (!vsocket) {
+               RTE_LOG(ERR, VHOST_CONFIG,
+                       "socket file %s is not registered yet.\n", path);
+               ret = -1;
+               goto unlock_exit;
+       }
+
+       did = vsocket->vdpa_dev_id;
+       vdpa_dev = rte_vdpa_get_device(did);
+       if (!vdpa_dev || !vdpa_dev->ops->get_queue_num) {
+               *queue_num = VHOST_MAX_QUEUE_PAIRS;
+               goto unlock_exit;
+       }
+
+       if (vdpa_dev->ops->get_queue_num(did, &vdpa_queue_num) < 0) {
+               RTE_LOG(ERR, VHOST_CONFIG,
+                               "failed to get vdpa queue number "
+                               "for socket file %s.\n", path);
+               ret = -1;
+               goto unlock_exit;
+       }
+
+       *queue_num = RTE_MIN((uint32_t)VHOST_MAX_QUEUE_PAIRS, vdpa_queue_num);
+
+unlock_exit:
+       pthread_mutex_unlock(&vhost_user.mutex);
+       return ret;
+}
+
+static void
+vhost_user_socket_mem_free(struct vhost_user_socket *vsocket)
+{
+       if (vsocket && vsocket->path) {
+               free(vsocket->path);
+               vsocket->path = NULL;
+       }
+
+       if (vsocket) {
+               free(vsocket);
+               vsocket = NULL;
        }
 }
 
@@ -637,7 +825,7 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
        if (vsocket->path == NULL) {
                RTE_LOG(ERR, VHOST_CONFIG,
                        "error: failed to copy socket path string\n");
-               free(vsocket);
+               vhost_user_socket_mem_free(vsocket);
                goto out;
        }
        TAILQ_INIT(&vsocket->conn_list);
@@ -665,6 +853,12 @@ rte_vhost_driver_register(const char *path, uint64_t flags)
        vsocket->supported_features = VIRTIO_NET_SUPPORTED_FEATURES;
        vsocket->features           = VIRTIO_NET_SUPPORTED_FEATURES;
 
+       /* Dequeue zero copy can't assure descriptors returned in order */
+       if (vsocket->dequeue_zero_copy) {
+               vsocket->supported_features &= ~(1ULL << VIRTIO_F_IN_ORDER);
+               vsocket->features &= ~(1ULL << VIRTIO_F_IN_ORDER);
+       }
+
        if (!(flags & RTE_VHOST_USER_IOMMU_SUPPORT)) {
                vsocket->supported_features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
                vsocket->features &= ~(1ULL << VIRTIO_F_IOMMU_PLATFORM);
@@ -695,8 +889,7 @@ out_mutex:
                        "error: failed to destroy connection mutex\n");
        }
 out_free:
-       free(vsocket->path);
-       free(vsocket);
+       vhost_user_socket_mem_free(vsocket);
 out:
        pthread_mutex_unlock(&vhost_user.mutex);
 
@@ -743,21 +936,25 @@ rte_vhost_driver_unregister(const char *path)
                struct vhost_user_socket *vsocket = vhost_user.vsockets[i];
 
                if (!strcmp(vsocket->path, path)) {
-                       if (vsocket->is_server) {
-                               fdset_del(&vhost_user.fdset, vsocket->socket_fd);
-                               close(vsocket->socket_fd);
-                               unlink(path);
-                       } else if (vsocket->reconnect) {
-                               vhost_user_remove_reconnect(vsocket);
-                       }
-
+again:
                        pthread_mutex_lock(&vsocket->conn_mutex);
                        for (conn = TAILQ_FIRST(&vsocket->conn_list);
                             conn != NULL;
                             conn = next) {
                                next = TAILQ_NEXT(conn, next);
 
-                               fdset_del(&vhost_user.fdset, conn->connfd);
+                               /*
+                                * If r/wcb is executing, release the
+                                * conn_mutex lock, and try again since
+                                * the r/wcb may use the conn_mutex lock.
+                                */
+                               if (fdset_try_del(&vhost_user.fdset,
+                                                 conn->connfd) == -1) {
+                                       pthread_mutex_unlock(
+                                                       &vsocket->conn_mutex);
+                                       goto again;
+                               }
+
                                RTE_LOG(INFO, VHOST_CONFIG,
                                        "free connfd = %d for device '%s'\n",
                                        conn->connfd, path);
@@ -768,9 +965,17 @@ rte_vhost_driver_unregister(const char *path)
                        }
                        pthread_mutex_unlock(&vsocket->conn_mutex);
 
+                       if (vsocket->is_server) {
+                               fdset_del(&vhost_user.fdset,
+                                               vsocket->socket_fd);
+                               close(vsocket->socket_fd);
+                               unlink(path);
+                       } else if (vsocket->reconnect) {
+                               vhost_user_remove_reconnect(vsocket);
+                       }
+
                        pthread_mutex_destroy(&vsocket->conn_mutex);
-                       free(vsocket->path);
-                       free(vsocket);
+                       vhost_user_socket_mem_free(vsocket);
 
                        count = --vhost_user.vsocket_cnt;
                        vhost_user.vsockets[i] = vhost_user.vsockets[count];
@@ -829,11 +1034,26 @@ rte_vhost_driver_start(const char *path)
                return -1;
 
        if (fdset_tid == 0) {
-               int ret = pthread_create(&fdset_tid, NULL, fdset_event_dispatch,
-                                    &vhost_user.fdset);
-               if (ret != 0)
+               /**
+                * create a pipe which will be waited by poll and notified to
+                * rebuild the wait list of poll.
+                */
+               if (fdset_pipe_init(&vhost_user.fdset) < 0) {
+                       RTE_LOG(ERR, VHOST_CONFIG,
+                               "failed to create pipe for vhost fdset\n");
+                       return -1;
+               }
+
+               int ret = rte_ctrl_thread_create(&fdset_tid,
+                       "vhost-events", NULL, fdset_event_dispatch,
+                       &vhost_user.fdset);
+               if (ret != 0) {
                        RTE_LOG(ERR, VHOST_CONFIG,
                                "failed to create fdset handling thread");
+
+                       fdset_pipe_uninit(&vhost_user.fdset);
+                       return -1;
+               }
        }
 
        if (vsocket->is_server)