New upstream version 17.11.4
[deb_dpdk.git] / lib / librte_vhost / vhost.h
index 22564f1..11ce271 100644 (file)
 #include <sys/queue.h>
 #include <unistd.h>
 #include <linux/vhost.h>
+#include <linux/virtio_net.h>
+#include <sys/socket.h>
+#include <linux/if.h>
 
 #include <rte_log.h>
+#include <rte_ether.h>
+#include <rte_rwlock.h>
 
-#include "rte_virtio_net.h"
+#include "rte_vhost.h"
 
 /* Used to indicate that the device is running on a data core */
 #define VIRTIO_DEV_RUNNING 1
+/* Used to indicate that the device is ready to operate */
+#define VIRTIO_DEV_READY 2
 
 /* Backend value set by guest. */
 #define VIRTIO_DEV_STOPPED -1
 
 #define BUF_VECTOR_MAX 256
 
+#define VHOST_LOG_CACHE_NR 32
+
 /**
  * Structure contains buffer address, length and descriptor index
  * from vring to do scatter RX.
@@ -75,6 +84,24 @@ struct zcopy_mbuf {
 };
 TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
 
+/*
+ * Structure contains the info for each batched memory copy.
+ */
+struct batch_copy_elem {
+       void *dst;
+       void *src;
+       uint32_t len;
+       uint64_t log_addr;
+};
+
+/*
+ * Structure that contains the info for batched dirty logging.
+ */
+struct log_cache_entry {
+       uint32_t offset;
+       unsigned long val;
+};
+
 /**
  * Structure contains variables relevant to RX/TX virtqueues.
  */
@@ -91,11 +118,14 @@ struct vhost_virtqueue {
 
        /* Backend value to determine if device should started/stopped */
        int                     backend;
+       int                     enabled;
+       int                     access_ok;
+       rte_spinlock_t          access_lock;
+
        /* Used to notify the guest (trigger interrupt) */
        int                     callfd;
        /* Currently unused as polling mode is enabled */
        int                     kickfd;
-       int                     enabled;
 
        /* Physical address of used ring, for logging */
        uint64_t                log_guest_addr;
@@ -108,26 +138,67 @@ struct vhost_virtqueue {
 
        struct vring_used_elem  *shadow_used_ring;
        uint16_t                shadow_used_idx;
+       struct vhost_vring_addr ring_addrs;
+
+       struct batch_copy_elem  *batch_copy_elems;
+       uint16_t                batch_copy_nb_elems;
+
+       struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
+       uint16_t log_cache_nb_elem;
+
+       rte_rwlock_t    iotlb_lock;
+       rte_rwlock_t    iotlb_pending_lock;
+       struct rte_mempool *iotlb_pool;
+       TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
+       int                             iotlb_cache_nr;
+       TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
 } __rte_cache_aligned;
 
-/* Old kernels have no such macro defined */
+/* Old kernels have no such macros defined */
 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
  #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
 #endif
 
+#ifndef VIRTIO_NET_F_MQ
+ #define VIRTIO_NET_F_MQ               22
+#endif
 
-/*
- * Make an extra wrapper for VIRTIO_NET_F_MQ and
- * VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX as they are
- * introduced since kernel v3.8. This makes our
- * code buildable for older kernel.
- */
-#ifdef VIRTIO_NET_F_MQ
- #define VHOST_MAX_QUEUE_PAIRS VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX
- #define VHOST_SUPPORTS_MQ     (1ULL << VIRTIO_NET_F_MQ)
-#else
- #define VHOST_MAX_QUEUE_PAIRS 1
- #define VHOST_SUPPORTS_MQ     0
+#define VHOST_MAX_VRING                        0x100
+#define VHOST_MAX_QUEUE_PAIRS          0x80
+
+#ifndef VIRTIO_NET_F_MTU
+ #define VIRTIO_NET_F_MTU 3
+#endif
+
+/* Declare IOMMU related bits for older kernels */
+#ifndef VIRTIO_F_IOMMU_PLATFORM
+
+#define VIRTIO_F_IOMMU_PLATFORM 33
+
+struct vhost_iotlb_msg {
+       __u64 iova;
+       __u64 size;
+       __u64 uaddr;
+#define VHOST_ACCESS_RO      0x1
+#define VHOST_ACCESS_WO      0x2
+#define VHOST_ACCESS_RW      0x3
+       __u8 perm;
+#define VHOST_IOTLB_MISS           1
+#define VHOST_IOTLB_UPDATE         2
+#define VHOST_IOTLB_INVALIDATE     3
+#define VHOST_IOTLB_ACCESS_FAIL    4
+       __u8 type;
+};
+
+#define VHOST_IOTLB_MSG 0x1
+
+struct vhost_msg {
+       int type;
+       union {
+               struct vhost_iotlb_msg iotlb;
+               __u8 padding[64];
+       };
+};
 #endif
 
 /*
@@ -137,6 +208,28 @@ struct vhost_virtqueue {
  #define VIRTIO_F_VERSION_1 32
 #endif
 
+#define VHOST_USER_F_PROTOCOL_FEATURES 30
+
+/* Features supported by this builtin vhost-user net driver. */
+#define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
+                               (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
+                               (1ULL << VIRTIO_NET_F_CTRL_RX) | \
+                               (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
+                               (1ULL << VIRTIO_NET_F_MQ)      | \
+                               (1ULL << VIRTIO_F_VERSION_1)   | \
+                               (1ULL << VHOST_F_LOG_ALL)      | \
+                               (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
+                               (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
+                               (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
+                               (1ULL << VIRTIO_NET_F_CSUM)    | \
+                               (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
+                               (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
+                               (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
+                               (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
+                               (1ULL << VIRTIO_NET_F_MTU) | \
+                               (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+
+
 struct guest_page {
        uint64_t guest_phys_addr;
        uint64_t host_phys_addr;
@@ -149,7 +242,7 @@ struct guest_page {
  */
 struct virtio_net {
        /* Frontend (QEMU) memory and memory region information */
-       struct virtio_memory    *mem;
+       struct rte_vhost_memory *mem;
        uint64_t                features;
        uint64_t                protocol_features;
        int                     vid;
@@ -157,7 +250,7 @@ struct virtio_net {
        uint16_t                vhost_hlen;
        /* to tell if we need broadcast rarp packet */
        rte_atomic16_t          broadcast_rarp;
-       uint32_t                virt_qp_nb;
+       uint32_t                nr_vring;
        int                     dequeue_zero_copy;
        struct vhost_virtqueue  *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
@@ -166,35 +259,168 @@ struct virtio_net {
        uint64_t                log_base;
        uint64_t                log_addr;
        struct ether_addr       mac;
+       uint16_t                mtu;
+
+       struct vhost_device_ops const *notify_ops;
 
        uint32_t                nr_guest_pages;
        uint32_t                max_guest_pages;
        struct guest_page       *guest_pages;
+
+       int                     slave_req_fd;
 } __rte_cache_aligned;
 
-/**
- * Information relating to memory regions including offsets to
- * addresses in QEMUs memory file.
- */
-struct virtio_memory_region {
-       uint64_t guest_phys_addr;
-       uint64_t guest_user_addr;
-       uint64_t host_user_addr;
-       uint64_t size;
-       void     *mmap_addr;
-       uint64_t mmap_size;
-       int fd;
-};
 
+#define VHOST_LOG_PAGE 4096
 
-/**
- * Memory structure includes region and mapping information.
+/*
+ * Atomically set a bit in memory.
  */
-struct virtio_memory {
-       uint32_t nregions;
-       struct virtio_memory_region regions[0];
-};
+static __rte_always_inline void
+vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
+{
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
+       /*
+        * __sync_ built-ins are deprecated, but __atomic_ ones
+        * are sub-optimized in older GCC versions.
+        */
+       __sync_fetch_and_or_1(addr, (1U << nr));
+#else
+       __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
+#endif
+}
+
+static __rte_always_inline void
+vhost_log_page(uint8_t *log_base, uint64_t page)
+{
+       vhost_set_bit(page % 8, &log_base[page / 8]);
+}
+
+static __rte_always_inline void
+vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
+{
+       uint64_t page;
+
+       if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
+                  !dev->log_base || !len))
+               return;
+
+       if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
+               return;
+
+       /* To make sure guest memory updates are committed before logging */
+       rte_smp_wmb();
+
+       page = addr / VHOST_LOG_PAGE;
+       while (page * VHOST_LOG_PAGE < addr + len) {
+               vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
+               page += 1;
+       }
+}
+
+static __rte_always_inline void
+vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+       unsigned long *log_base;
+       int i;
+
+       if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
+                  !dev->log_base))
+               return;
+
+       log_base = (unsigned long *)(uintptr_t)dev->log_base;
+
+       /*
+        * It is expected a write memory barrier has been issued
+        * before this function is called.
+        */
+
+       for (i = 0; i < vq->log_cache_nb_elem; i++) {
+               struct log_cache_entry *elem = vq->log_cache + i;
+
+#if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
+               /*
+                * '__sync' builtins are deprecated, but '__atomic' ones
+                * are sub-optimized in older GCC versions.
+                */
+               __sync_fetch_and_or(log_base + elem->offset, elem->val);
+#else
+               __atomic_fetch_or(log_base + elem->offset, elem->val,
+                               __ATOMIC_RELAXED);
+#endif
+       }
+
+       rte_smp_wmb();
+
+       vq->log_cache_nb_elem = 0;
+}
+
+static __rte_always_inline void
+vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                       uint64_t page)
+{
+       uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
+       uint32_t offset = page / (sizeof(unsigned long) << 3);
+       int i;
+
+       for (i = 0; i < vq->log_cache_nb_elem; i++) {
+               struct log_cache_entry *elem = vq->log_cache + i;
+
+               if (elem->offset == offset) {
+                       elem->val |= (1UL << bit_nr);
+                       return;
+               }
+       }
+
+       if (unlikely(i >= VHOST_LOG_CACHE_NR)) {
+               /*
+                * No more room for a new log cache entry,
+                * so write the dirty log map directly.
+                */
+               rte_smp_wmb();
+               vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
+
+               return;
+       }
+
+       vq->log_cache[i].offset = offset;
+       vq->log_cache[i].val = (1UL << bit_nr);
+       vq->log_cache_nb_elem++;
+}
+
+static __rte_always_inline void
+vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                       uint64_t addr, uint64_t len)
+{
+       uint64_t page;
+
+       if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
+                  !dev->log_base || !len))
+               return;
 
+       if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
+               return;
+
+       page = addr / VHOST_LOG_PAGE;
+       while (page * VHOST_LOG_PAGE < addr + len) {
+               vhost_log_cache_page(dev, vq, page);
+               page += 1;
+       }
+}
+
+static __rte_always_inline void
+vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                       uint64_t offset, uint64_t len)
+{
+       vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, len);
+}
+
+static __rte_always_inline void
+vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                    uint64_t offset, uint64_t len)
+{
+       vhost_log_write(dev, vq->log_guest_addr + offset, len);
+}
 
 /* Macros for printing using RTE_LOG */
 #define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
@@ -231,27 +457,8 @@ extern uint64_t VHOST_FEATURES;
 #define MAX_VHOST_DEVICE       1024
 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
 
-/* Convert guest physical Address to host virtual address */
-static inline uint64_t __attribute__((always_inline))
-gpa_to_vva(struct virtio_net *dev, uint64_t gpa)
-{
-       struct virtio_memory_region *reg;
-       uint32_t i;
-
-       for (i = 0; i < dev->mem->nregions; i++) {
-               reg = &dev->mem->regions[i];
-               if (gpa >= reg->guest_phys_addr &&
-                   gpa <  reg->guest_phys_addr + reg->size) {
-                       return gpa - reg->guest_phys_addr +
-                              reg->host_user_addr;
-               }
-       }
-
-       return 0;
-}
-
 /* Convert guest physical address to host physical address */
-static inline phys_addr_t __attribute__((always_inline))
+static __rte_always_inline rte_iova_t
 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
 {
        uint32_t i;
@@ -270,7 +477,6 @@ gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
        return 0;
 }
 
-struct virtio_net_device_ops const *notify_ops;
 struct virtio_net *get_device(int vid);
 
 int vhost_new_device(void);
@@ -278,11 +484,13 @@ void cleanup_device(struct virtio_net *dev, int destroy);
 void reset_device(struct virtio_net *dev);
 void vhost_destroy_device(int);
 
-int alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx);
+int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
 
 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
 void vhost_enable_dequeue_zero_copy(int vid);
 
+struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
+
 /*
  * Backend-specific cleanup.
  *
@@ -290,4 +498,19 @@ void vhost_enable_dequeue_zero_copy(int vid);
  */
 void vhost_backend_cleanup(struct virtio_net *dev);
 
+uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                       uint64_t iova, uint64_t *len, uint8_t perm);
+int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
+void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
+
+static __rte_always_inline uint64_t
+vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
+                       uint64_t iova, uint64_t *len, uint8_t perm)
+{
+       if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
+               return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
+
+       return __vhost_iova_to_vva(dev, vq, iova, len, perm);
+}
+
 #endif /* _VHOST_NET_CDEV_H_ */