X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost_user.c;h=0eb5e0d65b38ac028fe11e33696b912f2a161b60;hb=6e7cbd63706f3435b9d9a2057a37db1da01db9a7;hp=f4c7ce462f0ba6198a7bb8d24ab1fa059391748e;hpb=055c52583a2794da8ba1e85a48cce3832372b12f;p=deb_dpdk.git diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c index f4c7ce46..0eb5e0d6 100644 --- a/lib/librte_vhost/vhost_user.c +++ b/lib/librte_vhost/vhost_user.c @@ -329,21 +329,26 @@ numa_realloc(struct virtio_net *dev, int index __rte_unused) /* Converts QEMU virtual address to Vhost virtual address. */ static uint64_t -qva_to_vva(struct virtio_net *dev, uint64_t qva) +qva_to_vva(struct virtio_net *dev, uint64_t qva, uint64_t *len) { - struct rte_vhost_mem_region *reg; + struct rte_vhost_mem_region *r; uint32_t i; /* Find the region where the address lives. */ for (i = 0; i < dev->mem->nregions; i++) { - reg = &dev->mem->regions[i]; + r = &dev->mem->regions[i]; + + if (qva >= r->guest_user_addr && + qva < r->guest_user_addr + r->size) { - if (qva >= reg->guest_user_addr && - qva < reg->guest_user_addr + reg->size) { - return qva - reg->guest_user_addr + - reg->host_user_addr; + if (unlikely(*len > r->guest_user_addr + r->size - qva)) + *len = r->guest_user_addr + r->size - qva; + + return qva - r->guest_user_addr + + r->host_user_addr; } } + *len = 0; return 0; } @@ -356,20 +361,20 @@ qva_to_vva(struct virtio_net *dev, uint64_t qva) */ static uint64_t ring_addr_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, - uint64_t ra, uint64_t size) + uint64_t ra, uint64_t *size) { if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) { uint64_t vva; vva = vhost_user_iotlb_cache_find(vq, ra, - &size, VHOST_ACCESS_RW); + size, VHOST_ACCESS_RW); if (!vva) vhost_user_iotlb_miss(dev, ra, VHOST_ACCESS_RW); return vva; } - return qva_to_vva(dev, ra); + return qva_to_vva(dev, ra, size); } static struct virtio_net * @@ -377,16 +382,18 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index) { struct vhost_virtqueue *vq = dev->virtqueue[vq_index]; struct vhost_vring_addr *addr = &vq->ring_addrs; + uint64_t len; /* The addresses are converted from QEMU virtual to Vhost virtual. */ if (vq->desc && vq->avail && vq->used) return dev; + len = sizeof(struct vring_desc) * vq->size; vq->desc = (struct vring_desc *)(uintptr_t)ring_addr_to_vva(dev, - vq, addr->desc_user_addr, sizeof(struct vring_desc)); - if (vq->desc == 0) { + vq, addr->desc_user_addr, &len); + if (vq->desc == 0 || len != sizeof(struct vring_desc) * vq->size) { RTE_LOG(DEBUG, VHOST_CONFIG, - "(%d) failed to find desc ring address.\n", + "(%d) failed to map desc ring.\n", dev->vid); return dev; } @@ -395,20 +402,26 @@ translate_ring_addresses(struct virtio_net *dev, int vq_index) vq = dev->virtqueue[vq_index]; addr = &vq->ring_addrs; + len = sizeof(struct vring_avail) + sizeof(uint16_t) * vq->size; vq->avail = (struct vring_avail *)(uintptr_t)ring_addr_to_vva(dev, - vq, addr->avail_user_addr, sizeof(struct vring_avail)); - if (vq->avail == 0) { + vq, addr->avail_user_addr, &len); + if (vq->avail == 0 || + len != sizeof(struct vring_avail) + + sizeof(uint16_t) * vq->size) { RTE_LOG(DEBUG, VHOST_CONFIG, - "(%d) failed to find avail ring address.\n", + "(%d) failed to map avail ring.\n", dev->vid); return dev; } + len = sizeof(struct vring_used) + + sizeof(struct vring_used_elem) * vq->size; vq->used = (struct vring_used *)(uintptr_t)ring_addr_to_vva(dev, - vq, addr->used_user_addr, sizeof(struct vring_used)); - if (vq->used == 0) { + vq, addr->used_user_addr, &len); + if (vq->used == 0 || len != sizeof(struct vring_used) + + sizeof(struct vring_used_elem) * vq->size) { RTE_LOG(DEBUG, VHOST_CONFIG, - "(%d) failed to find used ring address.\n", + "(%d) failed to map used ring.\n", dev->vid); return dev; } @@ -463,7 +476,7 @@ vhost_user_set_vring_addr(struct virtio_net **pdev, VhostUserMsg *msg) if (vq->enabled && (dev->features & (1ULL << VHOST_USER_F_PROTOCOL_FEATURES))) { - dev = translate_ring_addresses(dev, msg->payload.state.index); + dev = translate_ring_addresses(dev, msg->payload.addr.index); if (!dev) return -1; @@ -488,7 +501,7 @@ vhost_user_set_vring_base(struct virtio_net *dev, return 0; } -static void +static int add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, uint64_t host_phys_addr, uint64_t size) { @@ -498,6 +511,10 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, dev->max_guest_pages *= 2; dev->guest_pages = realloc(dev->guest_pages, dev->max_guest_pages * sizeof(*page)); + if (!dev->guest_pages) { + RTE_LOG(ERR, VHOST_CONFIG, "cannot realloc guest_pages\n"); + return -1; + } } if (dev->nr_guest_pages > 0) { @@ -506,7 +523,7 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, if (host_phys_addr == last_page->host_phys_addr + last_page->size) { last_page->size += size; - return; + return 0; } } @@ -514,9 +531,11 @@ add_one_guest_page(struct virtio_net *dev, uint64_t guest_phys_addr, page->guest_phys_addr = guest_phys_addr; page->host_phys_addr = host_phys_addr; page->size = size; + + return 0; } -static void +static int add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, uint64_t page_size) { @@ -530,7 +549,9 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, size = page_size - (guest_phys_addr & (page_size - 1)); size = RTE_MIN(size, reg_size); - add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size); + if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size) < 0) + return -1; + host_user_addr += size; guest_phys_addr += size; reg_size -= size; @@ -539,12 +560,16 @@ add_guest_pages(struct virtio_net *dev, struct rte_vhost_mem_region *reg, size = RTE_MIN(reg_size, page_size); host_phys_addr = rte_mem_virt2iova((void *)(uintptr_t) host_user_addr); - add_one_guest_page(dev, guest_phys_addr, host_phys_addr, size); + if (add_one_guest_page(dev, guest_phys_addr, host_phys_addr, + size) < 0) + return -1; host_user_addr += size; guest_phys_addr += size; reg_size -= size; } + + return 0; } #ifdef RTE_LIBRTE_VHOST_DEBUG @@ -573,9 +598,34 @@ dump_guest_pages(struct virtio_net *dev) #define dump_guest_pages(dev) #endif +static bool +vhost_memory_changed(struct VhostUserMemory *new, + struct rte_vhost_memory *old) +{ + uint32_t i; + + if (new->nregions != old->nregions) + return true; + + for (i = 0; i < new->nregions; ++i) { + VhostUserMemoryRegion *new_r = &new->regions[i]; + struct rte_vhost_mem_region *old_r = &old->regions[i]; + + if (new_r->guest_phys_addr != old_r->guest_phys_addr) + return true; + if (new_r->memory_size != old_r->size) + return true; + if (new_r->userspace_addr != old_r->guest_user_addr) + return true; + } + + return false; +} + static int -vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg) +vhost_user_set_mem_table(struct virtio_net **pdev, struct VhostUserMsg *pmsg) { + struct virtio_net *dev = *pdev; struct VhostUserMemory memory = pmsg->payload.memory; struct rte_vhost_mem_region *reg; void *mmap_addr; @@ -585,12 +635,27 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg) uint32_t i; int fd; + if (dev->mem && !vhost_memory_changed(&memory, dev->mem)) { + RTE_LOG(INFO, VHOST_CONFIG, + "(%d) memory regions not changed\n", dev->vid); + + for (i = 0; i < memory.nregions; i++) + close(pmsg->fds[i]); + + return 0; + } + if (dev->mem) { free_mem_region(dev); rte_free(dev->mem); dev->mem = NULL; } + /* Flush IOTLB cache as previous HVAs are now invalid */ + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + for (i = 0; i < dev->nr_vring; i++) + vhost_user_iotlb_flush_all(dev->virtqueue[i]); + dev->nr_guest_pages = 0; if (!dev->guest_pages) { dev->max_guest_pages = 8; @@ -658,7 +723,12 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg) mmap_offset; if (dev->dequeue_zero_copy) - add_guest_pages(dev, reg, alignment); + if (add_guest_pages(dev, reg, alignment) < 0) { + RTE_LOG(ERR, VHOST_CONFIG, + "adding guest pages to region %u failed.\n", + i); + goto err_mmap; + } RTE_LOG(INFO, VHOST_CONFIG, "guest memory region %u, size: 0x%" PRIx64 "\n" @@ -679,6 +749,25 @@ vhost_user_set_mem_table(struct virtio_net *dev, struct VhostUserMsg *pmsg) mmap_offset); } + for (i = 0; i < dev->nr_vring; i++) { + struct vhost_virtqueue *vq = dev->virtqueue[i]; + + if (vq->desc || vq->avail || vq->used) { + /* + * If the memory table got updated, the ring addresses + * need to be translated again as virtual addresses have + * changed. + */ + vring_invalidate(dev, vq); + + dev = translate_ring_addresses(dev, i); + if (!dev) + return -1; + + *pdev = dev; + } + } + dump_guest_pages(dev); return 0; @@ -810,8 +899,8 @@ vhost_user_get_vring_base(struct virtio_net *dev, dev->flags &= ~VIRTIO_DEV_READY; - /* Here we are safe to get the last used index */ - msg->payload.state.num = vq->last_used_idx; + /* Here we are safe to get the last avail index */ + msg->payload.state.num = vq->last_avail_idx; RTE_LOG(INFO, VHOST_CONFIG, "vring base idx:%d file:%d\n", msg->payload.state.index, @@ -826,6 +915,11 @@ vhost_user_get_vring_base(struct virtio_net *dev, vq->kickfd = VIRTIO_UNINITIALIZED_EVENTFD; + if (vq->callfd >= 0) + close(vq->callfd); + + vq->callfd = VIRTIO_UNINITIALIZED_EVENTFD; + if (dev->dequeue_zero_copy) free_zmbufs(vq); rte_free(vq->shadow_used_ring); @@ -920,7 +1014,7 @@ vhost_user_set_log_base(struct virtio_net *dev, struct VhostUserMsg *msg) * mmap from 0 to workaround a hugepage mmap bug: mmap will * fail when offset is not page size aligned. */ - addr = mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); + addr = mmap(0, size + off, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); close(fd); if (addr == MAP_FAILED) { RTE_LOG(ERR, VHOST_CONFIG, "mmap log base failed!\n"); @@ -1060,11 +1154,12 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg) struct virtio_net *dev = *pdev; struct vhost_iotlb_msg *imsg = &msg->payload.iotlb; uint16_t i; - uint64_t vva; + uint64_t vva, len; switch (imsg->type) { case VHOST_IOTLB_UPDATE: - vva = qva_to_vva(dev, imsg->uaddr); + len = imsg->size; + vva = qva_to_vva(dev, imsg->uaddr, &len); if (!vva) return -1; @@ -1072,7 +1167,7 @@ vhost_user_iotlb_msg(struct virtio_net **pdev, struct VhostUserMsg *msg) struct vhost_virtqueue *vq = dev->virtqueue[i]; vhost_user_iotlb_cache_insert(vq, imsg->iova, vva, - imsg->size, imsg->perm); + len, imsg->perm); if (is_vring_iotlb_update(vq, imsg)) *pdev = dev = translate_ring_addresses(dev, i); @@ -1190,12 +1285,47 @@ vhost_user_check_and_alloc_queue_pair(struct virtio_net *dev, VhostUserMsg *msg) return alloc_vring_queue(dev, vring_idx); } +static void +vhost_user_lock_all_queue_pairs(struct virtio_net *dev) +{ + unsigned int i = 0; + unsigned int vq_num = 0; + + while (vq_num < dev->nr_vring) { + struct vhost_virtqueue *vq = dev->virtqueue[i]; + + if (vq) { + rte_spinlock_lock(&vq->access_lock); + vq_num++; + } + i++; + } +} + +static void +vhost_user_unlock_all_queue_pairs(struct virtio_net *dev) +{ + unsigned int i = 0; + unsigned int vq_num = 0; + + while (vq_num < dev->nr_vring) { + struct vhost_virtqueue *vq = dev->virtqueue[i]; + + if (vq) { + rte_spinlock_unlock(&vq->access_lock); + vq_num++; + } + i++; + } +} + int vhost_user_msg_handler(int vid, int fd) { struct virtio_net *dev; struct VhostUserMsg msg; int ret; + int unlock_required = 0; dev = get_device(vid); if (dev == NULL) @@ -1241,6 +1371,38 @@ vhost_user_msg_handler(int vid, int fd) return -1; } + /* + * Note: we don't lock all queues on VHOST_USER_GET_VRING_BASE, + * since it is sent when virtio stops and device is destroyed. + * destroy_device waits for queues to be inactive, so it is safe. + * Otherwise taking the access_lock would cause a dead lock. + */ + switch (msg.request.master) { + case VHOST_USER_SET_FEATURES: + case VHOST_USER_SET_PROTOCOL_FEATURES: + case VHOST_USER_SET_OWNER: + case VHOST_USER_RESET_OWNER: + case VHOST_USER_SET_MEM_TABLE: + case VHOST_USER_SET_LOG_BASE: + case VHOST_USER_SET_LOG_FD: + case VHOST_USER_SET_VRING_NUM: + case VHOST_USER_SET_VRING_ADDR: + case VHOST_USER_SET_VRING_BASE: + case VHOST_USER_SET_VRING_KICK: + case VHOST_USER_SET_VRING_CALL: + case VHOST_USER_SET_VRING_ERR: + case VHOST_USER_SET_VRING_ENABLE: + case VHOST_USER_SEND_RARP: + case VHOST_USER_NET_SET_MTU: + case VHOST_USER_SET_SLAVE_REQ_FD: + vhost_user_lock_all_queue_pairs(dev); + unlock_required = 1; + break; + default: + break; + + } + switch (msg.request.master) { case VHOST_USER_GET_FEATURES: msg.payload.u64 = vhost_user_get_features(dev); @@ -1267,7 +1429,7 @@ vhost_user_msg_handler(int vid, int fd) break; case VHOST_USER_SET_MEM_TABLE: - ret = vhost_user_set_mem_table(dev, &msg); + ret = vhost_user_set_mem_table(&dev, &msg); break; case VHOST_USER_SET_LOG_BASE: @@ -1342,6 +1504,9 @@ vhost_user_msg_handler(int vid, int fd) } + if (unlock_required) + vhost_user_unlock_all_queue_pairs(dev); + if (msg.flags & VHOST_USER_NEED_REPLY) { msg.payload.u64 = !!ret; msg.size = sizeof(msg.payload.u64);