X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvhost.h;h=11ce2711889a4724bff2cfa1985c31f96a8e6dd5;hb=6e7cbd63706f3435b9d9a2057a37db1da01db9a7;hp=6fe72aeb61df2ac977c633f46a4d425aa7294e2c;hpb=f239aed5e674965691846e8ce3f187dd47523689;p=deb_dpdk.git diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 6fe72aeb..11ce2711 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -45,6 +45,7 @@ #include #include +#include #include "rte_vhost.h" @@ -58,6 +59,8 @@ #define BUF_VECTOR_MAX 256 +#define VHOST_LOG_CACHE_NR 32 + /** * Structure contains buffer address, length and descriptor index * from vring to do scatter RX. @@ -81,6 +84,24 @@ struct zcopy_mbuf { }; TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf); +/* + * Structure contains the info for each batched memory copy. + */ +struct batch_copy_elem { + void *dst; + void *src; + uint32_t len; + uint64_t log_addr; +}; + +/* + * Structure that contains the info for batched dirty logging. + */ +struct log_cache_entry { + uint32_t offset; + unsigned long val; +}; + /** * Structure contains variables relevant to RX/TX virtqueues. */ @@ -97,11 +118,14 @@ struct vhost_virtqueue { /* Backend value to determine if device should started/stopped */ int backend; + int enabled; + int access_ok; + rte_spinlock_t access_lock; + /* Used to notify the guest (trigger interrupt) */ int callfd; /* Currently unused as polling mode is enabled */ int kickfd; - int enabled; /* Physical address of used ring, for logging */ uint64_t log_guest_addr; @@ -114,6 +138,20 @@ struct vhost_virtqueue { struct vring_used_elem *shadow_used_ring; uint16_t shadow_used_idx; + struct vhost_vring_addr ring_addrs; + + struct batch_copy_elem *batch_copy_elems; + uint16_t batch_copy_nb_elems; + + struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR]; + uint16_t log_cache_nb_elem; + + rte_rwlock_t iotlb_lock; + rte_rwlock_t iotlb_pending_lock; + struct rte_mempool *iotlb_pool; + TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list; + int iotlb_cache_nr; + TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list; } __rte_cache_aligned; /* Old kernels have no such macros defined */ @@ -132,6 +170,37 @@ struct vhost_virtqueue { #define VIRTIO_NET_F_MTU 3 #endif +/* Declare IOMMU related bits for older kernels */ +#ifndef VIRTIO_F_IOMMU_PLATFORM + +#define VIRTIO_F_IOMMU_PLATFORM 33 + +struct vhost_iotlb_msg { + __u64 iova; + __u64 size; + __u64 uaddr; +#define VHOST_ACCESS_RO 0x1 +#define VHOST_ACCESS_WO 0x2 +#define VHOST_ACCESS_RW 0x3 + __u8 perm; +#define VHOST_IOTLB_MISS 1 +#define VHOST_IOTLB_UPDATE 2 +#define VHOST_IOTLB_INVALIDATE 3 +#define VHOST_IOTLB_ACCESS_FAIL 4 + __u8 type; +}; + +#define VHOST_IOTLB_MSG 0x1 + +struct vhost_msg { + int type; + union { + struct vhost_iotlb_msg iotlb; + __u8 padding[64]; + }; +}; +#endif + /* * Define virtio 1.0 for older kernels */ @@ -157,7 +226,8 @@ struct vhost_virtqueue { (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \ (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \ (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \ - (1ULL << VIRTIO_NET_F_MTU)) + (1ULL << VIRTIO_NET_F_MTU) | \ + (1ULL << VIRTIO_F_IOMMU_PLATFORM)) struct guest_page { @@ -196,6 +266,8 @@ struct virtio_net { uint32_t nr_guest_pages; uint32_t max_guest_pages; struct guest_page *guest_pages; + + int slave_req_fd; } __rte_cache_aligned; @@ -207,7 +279,15 @@ struct virtio_net { static __rte_always_inline void vhost_set_bit(unsigned int nr, volatile uint8_t *addr) { - __sync_fetch_and_or_8(addr, (1U << nr)); +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) + /* + * __sync_ built-ins are deprecated, but __atomic_ ones + * are sub-optimized in older GCC versions. + */ + __sync_fetch_and_or_1(addr, (1U << nr)); +#else + __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED); +#endif } static __rte_always_inline void @@ -238,6 +318,103 @@ vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len) } } +static __rte_always_inline void +vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq) +{ + unsigned long *log_base; + int i; + + if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) || + !dev->log_base)) + return; + + log_base = (unsigned long *)(uintptr_t)dev->log_base; + + /* + * It is expected a write memory barrier has been issued + * before this function is called. + */ + + for (i = 0; i < vq->log_cache_nb_elem; i++) { + struct log_cache_entry *elem = vq->log_cache + i; + +#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100) + /* + * '__sync' builtins are deprecated, but '__atomic' ones + * are sub-optimized in older GCC versions. + */ + __sync_fetch_and_or(log_base + elem->offset, elem->val); +#else + __atomic_fetch_or(log_base + elem->offset, elem->val, + __ATOMIC_RELAXED); +#endif + } + + rte_smp_wmb(); + + vq->log_cache_nb_elem = 0; +} + +static __rte_always_inline void +vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t page) +{ + uint32_t bit_nr = page % (sizeof(unsigned long) << 3); + uint32_t offset = page / (sizeof(unsigned long) << 3); + int i; + + for (i = 0; i < vq->log_cache_nb_elem; i++) { + struct log_cache_entry *elem = vq->log_cache + i; + + if (elem->offset == offset) { + elem->val |= (1UL << bit_nr); + return; + } + } + + if (unlikely(i >= VHOST_LOG_CACHE_NR)) { + /* + * No more room for a new log cache entry, + * so write the dirty log map directly. + */ + rte_smp_wmb(); + vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page); + + return; + } + + vq->log_cache[i].offset = offset; + vq->log_cache[i].val = (1UL << bit_nr); + vq->log_cache_nb_elem++; +} + +static __rte_always_inline void +vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t addr, uint64_t len) +{ + uint64_t page; + + if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) || + !dev->log_base || !len)) + return; + + if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8))) + return; + + page = addr / VHOST_LOG_PAGE; + while (page * VHOST_LOG_PAGE < addr + len) { + vhost_log_cache_page(dev, vq, page); + page += 1; + } +} + +static __rte_always_inline void +vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t offset, uint64_t len) +{ + vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, len); +} + static __rte_always_inline void vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq, uint64_t offset, uint64_t len) @@ -281,7 +458,7 @@ extern uint64_t VHOST_FEATURES; extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE]; /* Convert guest physical address to host physical address */ -static __rte_always_inline phys_addr_t +static __rte_always_inline rte_iova_t gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size) { uint32_t i; @@ -321,4 +498,19 @@ struct vhost_device_ops const *vhost_driver_callback_get(const char *path); */ void vhost_backend_cleanup(struct virtio_net *dev); +uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t iova, uint64_t *len, uint8_t perm); +int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq); +void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq); + +static __rte_always_inline uint64_t +vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq, + uint64_t iova, uint64_t *len, uint8_t perm) +{ + if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))) + return rte_vhost_va_from_guest_pa(dev->mem, iova, len); + + return __vhost_iova_to_vva(dev, vq, iova, len, perm); +} + #endif /* _VHOST_NET_CDEV_H_ */