4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _VHOST_NET_CDEV_H_
35 #define _VHOST_NET_CDEV_H_
38 #include <sys/types.h>
39 #include <sys/queue.h>
41 #include <linux/vhost.h>
42 #include <linux/virtio_net.h>
43 #include <sys/socket.h>
47 #include <rte_ether.h>
48 #include <rte_rwlock.h>
50 #include "rte_vhost.h"
52 /* Used to indicate that the device is running on a data core */
53 #define VIRTIO_DEV_RUNNING 1
54 /* Used to indicate that the device is ready to operate */
55 #define VIRTIO_DEV_READY 2
57 /* Backend value set by guest. */
58 #define VIRTIO_DEV_STOPPED -1
60 #define BUF_VECTOR_MAX 256
62 #define VHOST_LOG_CACHE_NR 32
65 * Structure contains buffer address, length and descriptor index
66 * from vring to do scatter RX.
75 * A structure to hold some fields needed in zero copy code path,
76 * mainly for associating an mbuf with the right desc_idx.
79 struct rte_mbuf *mbuf;
83 TAILQ_ENTRY(zcopy_mbuf) next;
85 TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
88 * Structure contains the info for each batched memory copy.
90 struct batch_copy_elem {
98 * Structure that contains the info for batched dirty logging.
100 struct log_cache_entry {
106 * Structure contains variables relevant to RX/TX virtqueues.
108 struct vhost_virtqueue {
109 struct vring_desc *desc;
110 struct vring_avail *avail;
111 struct vring_used *used;
114 uint16_t last_avail_idx;
115 uint16_t last_used_idx;
116 #define VIRTIO_INVALID_EVENTFD (-1)
117 #define VIRTIO_UNINITIALIZED_EVENTFD (-2)
119 /* Backend value to determine if device should started/stopped */
123 rte_spinlock_t access_lock;
125 /* Used to notify the guest (trigger interrupt) */
127 /* Currently unused as polling mode is enabled */
130 /* Physical address of used ring, for logging */
131 uint64_t log_guest_addr;
135 uint16_t last_zmbuf_idx;
136 struct zcopy_mbuf *zmbufs;
137 struct zcopy_mbuf_list zmbuf_list;
139 struct vring_used_elem *shadow_used_ring;
140 uint16_t shadow_used_idx;
141 struct vhost_vring_addr ring_addrs;
143 struct batch_copy_elem *batch_copy_elems;
144 uint16_t batch_copy_nb_elems;
146 struct log_cache_entry log_cache[VHOST_LOG_CACHE_NR];
147 uint16_t log_cache_nb_elem;
149 rte_rwlock_t iotlb_lock;
150 rte_rwlock_t iotlb_pending_lock;
151 struct rte_mempool *iotlb_pool;
152 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_list;
154 TAILQ_HEAD(, vhost_iotlb_entry) iotlb_pending_list;
155 } __rte_cache_aligned;
157 /* Old kernels have no such macros defined */
158 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
159 #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
162 #ifndef VIRTIO_NET_F_MQ
163 #define VIRTIO_NET_F_MQ 22
166 #define VHOST_MAX_VRING 0x100
167 #define VHOST_MAX_QUEUE_PAIRS 0x80
169 #ifndef VIRTIO_NET_F_MTU
170 #define VIRTIO_NET_F_MTU 3
173 /* Declare IOMMU related bits for older kernels */
174 #ifndef VIRTIO_F_IOMMU_PLATFORM
176 #define VIRTIO_F_IOMMU_PLATFORM 33
178 struct vhost_iotlb_msg {
182 #define VHOST_ACCESS_RO 0x1
183 #define VHOST_ACCESS_WO 0x2
184 #define VHOST_ACCESS_RW 0x3
186 #define VHOST_IOTLB_MISS 1
187 #define VHOST_IOTLB_UPDATE 2
188 #define VHOST_IOTLB_INVALIDATE 3
189 #define VHOST_IOTLB_ACCESS_FAIL 4
193 #define VHOST_IOTLB_MSG 0x1
198 struct vhost_iotlb_msg iotlb;
205 * Define virtio 1.0 for older kernels
207 #ifndef VIRTIO_F_VERSION_1
208 #define VIRTIO_F_VERSION_1 32
211 #define VHOST_USER_F_PROTOCOL_FEATURES 30
213 /* Features supported by this builtin vhost-user net driver. */
214 #define VIRTIO_NET_SUPPORTED_FEATURES ((1ULL << VIRTIO_NET_F_MRG_RXBUF) | \
215 (1ULL << VIRTIO_NET_F_CTRL_VQ) | \
216 (1ULL << VIRTIO_NET_F_CTRL_RX) | \
217 (1ULL << VIRTIO_NET_F_GUEST_ANNOUNCE) | \
218 (1ULL << VIRTIO_NET_F_MQ) | \
219 (1ULL << VIRTIO_F_VERSION_1) | \
220 (1ULL << VHOST_F_LOG_ALL) | \
221 (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) | \
222 (1ULL << VIRTIO_NET_F_HOST_TSO4) | \
223 (1ULL << VIRTIO_NET_F_HOST_TSO6) | \
224 (1ULL << VIRTIO_NET_F_CSUM) | \
225 (1ULL << VIRTIO_NET_F_GUEST_CSUM) | \
226 (1ULL << VIRTIO_NET_F_GUEST_TSO4) | \
227 (1ULL << VIRTIO_NET_F_GUEST_TSO6) | \
228 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) | \
229 (1ULL << VIRTIO_NET_F_MTU) | \
230 (1ULL << VIRTIO_F_IOMMU_PLATFORM))
234 uint64_t guest_phys_addr;
235 uint64_t host_phys_addr;
240 * Device structure contains all configuration information relating
244 /* Frontend (QEMU) memory and memory region information */
245 struct rte_vhost_memory *mem;
247 uint64_t protocol_features;
251 /* to tell if we need broadcast rarp packet */
252 rte_atomic16_t broadcast_rarp;
254 int dequeue_zero_copy;
255 struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
256 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
257 char ifname[IF_NAME_SZ];
261 struct ether_addr mac;
264 struct vhost_device_ops const *notify_ops;
266 uint32_t nr_guest_pages;
267 uint32_t max_guest_pages;
268 struct guest_page *guest_pages;
271 } __rte_cache_aligned;
274 #define VHOST_LOG_PAGE 4096
277 * Atomically set a bit in memory.
279 static __rte_always_inline void
280 vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
282 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
284 * __sync_ built-ins are deprecated, but __atomic_ ones
285 * are sub-optimized in older GCC versions.
287 __sync_fetch_and_or_1(addr, (1U << nr));
289 __atomic_fetch_or(addr, (1U << nr), __ATOMIC_RELAXED);
293 static __rte_always_inline void
294 vhost_log_page(uint8_t *log_base, uint64_t page)
296 vhost_set_bit(page % 8, &log_base[page / 8]);
299 static __rte_always_inline void
300 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
304 if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
305 !dev->log_base || !len))
308 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
311 /* To make sure guest memory updates are committed before logging */
314 page = addr / VHOST_LOG_PAGE;
315 while (page * VHOST_LOG_PAGE < addr + len) {
316 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
321 static __rte_always_inline void
322 vhost_log_cache_sync(struct virtio_net *dev, struct vhost_virtqueue *vq)
324 unsigned long *log_base;
327 if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
331 log_base = (unsigned long *)(uintptr_t)dev->log_base;
334 * It is expected a write memory barrier has been issued
335 * before this function is called.
338 for (i = 0; i < vq->log_cache_nb_elem; i++) {
339 struct log_cache_entry *elem = vq->log_cache + i;
341 #if defined(RTE_TOOLCHAIN_GCC) && (GCC_VERSION < 70100)
343 * '__sync' builtins are deprecated, but '__atomic' ones
344 * are sub-optimized in older GCC versions.
346 __sync_fetch_and_or(log_base + elem->offset, elem->val);
348 __atomic_fetch_or(log_base + elem->offset, elem->val,
355 vq->log_cache_nb_elem = 0;
358 static __rte_always_inline void
359 vhost_log_cache_page(struct virtio_net *dev, struct vhost_virtqueue *vq,
362 uint32_t bit_nr = page % (sizeof(unsigned long) << 3);
363 uint32_t offset = page / (sizeof(unsigned long) << 3);
366 for (i = 0; i < vq->log_cache_nb_elem; i++) {
367 struct log_cache_entry *elem = vq->log_cache + i;
369 if (elem->offset == offset) {
370 elem->val |= (1UL << bit_nr);
375 if (unlikely(i >= VHOST_LOG_CACHE_NR)) {
377 * No more room for a new log cache entry,
378 * so write the dirty log map directly.
381 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
386 vq->log_cache[i].offset = offset;
387 vq->log_cache[i].val = (1UL << bit_nr);
388 vq->log_cache_nb_elem++;
391 static __rte_always_inline void
392 vhost_log_cache_write(struct virtio_net *dev, struct vhost_virtqueue *vq,
393 uint64_t addr, uint64_t len)
397 if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
398 !dev->log_base || !len))
401 if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
404 page = addr / VHOST_LOG_PAGE;
405 while (page * VHOST_LOG_PAGE < addr + len) {
406 vhost_log_cache_page(dev, vq, page);
411 static __rte_always_inline void
412 vhost_log_cache_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
413 uint64_t offset, uint64_t len)
415 vhost_log_cache_write(dev, vq, vq->log_guest_addr + offset, len);
418 static __rte_always_inline void
419 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
420 uint64_t offset, uint64_t len)
422 vhost_log_write(dev, vq->log_guest_addr + offset, len);
425 /* Macros for printing using RTE_LOG */
426 #define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
427 #define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
429 #ifdef RTE_LIBRTE_VHOST_DEBUG
430 #define VHOST_MAX_PRINT_BUFF 6072
431 #define LOG_LEVEL RTE_LOG_DEBUG
432 #define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args)
433 #define PRINT_PACKET(device, addr, size, header) do { \
434 char *pkt_addr = (char *)(addr); \
435 unsigned int index; \
436 char packet[VHOST_MAX_PRINT_BUFF]; \
439 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
441 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
442 for (index = 0; index < (size); index++) { \
443 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
444 "%02hhx ", pkt_addr[index]); \
446 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
448 LOG_DEBUG(VHOST_DATA, "%s", packet); \
451 #define LOG_LEVEL RTE_LOG_INFO
452 #define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
453 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
456 extern uint64_t VHOST_FEATURES;
457 #define MAX_VHOST_DEVICE 1024
458 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
460 /* Convert guest physical address to host physical address */
461 static __rte_always_inline rte_iova_t
462 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
465 struct guest_page *page;
467 for (i = 0; i < dev->nr_guest_pages; i++) {
468 page = &dev->guest_pages[i];
470 if (gpa >= page->guest_phys_addr &&
471 gpa + size < page->guest_phys_addr + page->size) {
472 return gpa - page->guest_phys_addr +
473 page->host_phys_addr;
480 struct virtio_net *get_device(int vid);
482 int vhost_new_device(void);
483 void cleanup_device(struct virtio_net *dev, int destroy);
484 void reset_device(struct virtio_net *dev);
485 void vhost_destroy_device(int);
487 int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx);
489 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
490 void vhost_enable_dequeue_zero_copy(int vid);
492 struct vhost_device_ops const *vhost_driver_callback_get(const char *path);
495 * Backend-specific cleanup.
497 * TODO: fix it; we have one backend now
499 void vhost_backend_cleanup(struct virtio_net *dev);
501 uint64_t __vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
502 uint64_t iova, uint64_t *len, uint8_t perm);
503 int vring_translate(struct virtio_net *dev, struct vhost_virtqueue *vq);
504 void vring_invalidate(struct virtio_net *dev, struct vhost_virtqueue *vq);
506 static __rte_always_inline uint64_t
507 vhost_iova_to_vva(struct virtio_net *dev, struct vhost_virtqueue *vq,
508 uint64_t iova, uint64_t *len, uint8_t perm)
510 if (!(dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)))
511 return rte_vhost_va_from_guest_pa(dev->mem, iova, len);
513 return __vhost_iova_to_vva(dev, vq, iova, len, perm);
516 #endif /* _VHOST_NET_CDEV_H_ */