New upstream version 17.11.4
[deb_dpdk.git] / lib / librte_vhost / vhost.h
index 1cc81c1..11ce271 100644 (file)
@@ -59,6 +59,8 @@
 
 #define BUF_VECTOR_MAX 256
 
+#define VHOST_LOG_CACHE_NR 32
+
 /**
  * Structure contains buffer address, length and descriptor index
  * from vring to do scatter RX.
@@ -92,6 +94,14 @@ struct batch_copy_elem {
        uint64_t log_addr;
 };
 
+/*
+ * Structure that contains the info for batched dirty logging.
+ */
+struct log_cache_entry {
+       uint32_t offset;
+       unsigned long val;
+};
+
 /**
  * Structure contains variables relevant to RX/TX virtqueues.
  */
@@ -108,12 +118,14 @@ struct vhost_virtqueue {
 
        /* Backend value to determine if device should started/stopped */
        int                     backend;
+       int                     enabled;
+       int                     access_ok;
+       rte_spinlock_t          access_lock;
+
        /* Used to notify the guest (trigger interrupt) */
        int                     callfd;
        /* Currently unused as polling mode is enabled */
        int                     kickfd;
-       int                     enabled;
-       int                     access_ok;
 
        /* Physical address of used ring, for logging */
        uint64_t                log_guest_addr;
@@ -131,6 +143,9 @@ struct vhost_virtqueue {
        struct batch_copy_elem  *batch_copy_elems;
        uint16_t                batch_copy_nb_elems;
 
+       struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
+       uint16_t log_cache_nb_elem;
+
        rte_rwlock_t    iotlb_lock;
        rte_rwlock_t    iotlb_pending_lock;
        struct rte_mempool *iotlb_pool;
@@ -264,7 +279,15 @@ struct virtio_net {
 static __rte_always_inline void
 vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
 {
-       __sync_fetch_and_or_8(addr, (1U << nr));
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
+       /*
+        * __sync_ built-ins are deprecated, but __atomic_ ones
+        * are sub-optimized in older GCC versions.
+        */
+       __sync_fetch_and_or_1(addr, (1U << nr));
+#else
+       __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
+#endif
 }
 
 static __rte_always_inline void
@@ -295,6 +318,103 @@ vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
        }
 }
 
+static __rte_always_inline void
+vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+       unsigned long *log_base;
+       int i;
+
+       if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
+                  !dev->log_base))
+               return;
+
+       log_base = (unsigned long *)(uintptr_t)dev->log_base;
+
+       /*
+        * It is expected a write memory barrier has been issued
+        * before this function is called.
+        */
+
+       for (i = 0; i < vq->log_cache_nb_elem; i++) {
+               struct log_cache_entry *elem = vq->log_cache + i;
+
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
+               /*
+                * '__sync' builtins are deprecated, but '__atomic' ones
+                * are sub-optimized in older GCC versions.
+                */
+               __sync_fetch_and_or(log_base + elem->offset, elem->val);
+#else
+               __atomic_fetch_or(log_base + elem->offset, elem->val,
+                               __ATOMIC_RELAXED);
+#endif
+       }
+
+       rte_smp_wmb();
+
+       vq->log_cache_nb_elem = 0;
+}
+
+static __rte_always_inline void
+vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                       uint64_t page)
+{
+       uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
+       uint32_t offset = page / (sizeof(unsigned long) << 3);
+       int i;
+
+       for (i = 0; i < vq->log_cache_nb_elem; i++) {
+               struct log_cache_entry *elem = vq->log_cache + i;
+
+               if (elem->offset == offset) {
+                       elem->val |= (1UL << bit_nr);
+                       return;
+               }
+       }
+
+       if (unlikely(i >= VHOST_LOG_CACHE_NR)) {
+               /*
+                * No more room for a new log cache entry,
+                * so write the dirty log map directly.
+                */
+               rte_smp_wmb();
+               vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
+
+               return;
+       }
+
+       vq->log_cache[i].offset = offset;
+       vq->log_cache[i].val = (1UL << bit_nr);
+       vq->log_cache_nb_elem++;
+}
+
+static __rte_always_inline void
+vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                       uint64_t addr, uint64_t len)
+{
+       uint64_t page;
+
+       if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
+                  !dev->log_base || !len))
+               return;
+
+       if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
+               return;
+
+       page = addr / VHOST_LOG_PAGE;
+       while (page * VHOST_LOG_PAGE < addr + len) {
+               vhost_log_cache_page(dev, vq, page);
+               page += 1;
+       }
+}
+
+static __rte_always_inline void
+vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                       uint64_t offset, uint64_t len)
+{
+       vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, len);
+}
+
 static __rte_always_inline void
 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
                     uint64_t offset, uint64_t len)
@@ -379,18 +499,18 @@ struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
 void vhost_backend_cleanup(struct virtio_net *dev);
 
 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                       uint64_t iova, uint64_t size, uint8_t perm);
+                       uint64_t iova, uint64_t *len, uint8_t perm);
 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
 
 static __rte_always_inline uint64_t
 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
-                       uint64_t iova, uint64_t size, uint8_t perm)
+                       uint64_t iova, uint64_t *len, uint8_t perm)
 {
        if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
-               return rte_vhost_gpa_to_vva(dev->mem, iova);
+               return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
 
-       return __vhost_iova_to_vva(dev, vq, iova, size, perm);
+       return __vhost_iova_to_vva(dev, vq, iova, len, perm);
 }
 
 #endif /* _VHOST_NET_CDEV_H_ */