4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #ifndef _VHOST_NET_CDEV_H_
35 #define _VHOST_NET_CDEV_H_
38 #include <sys/types.h>
39 #include <sys/queue.h>
41 #include <linux/vhost.h>
45 #include "rte_virtio_net.h"
47 /* Used to indicate that the device is running on a data core */
48 #define VIRTIO_DEV_RUNNING 1
50 /* Backend value set by guest. */
51 #define VIRTIO_DEV_STOPPED -1
53 #define BUF_VECTOR_MAX 256
56 * Structure contains buffer address, length and descriptor index
57 * from vring to do scatter RX.
66 * A structure to hold some fields needed in zero copy code path,
67 * mainly for associating an mbuf with the right desc_idx.
70 struct rte_mbuf *mbuf;
74 TAILQ_ENTRY(zcopy_mbuf) next;
76 TAILQ_HEAD(zcopy_mbuf_list, zcopy_mbuf);
79 * Structure contains variables relevant to RX/TX virtqueues.
81 struct vhost_virtqueue {
82 struct vring_desc *desc;
83 struct vring_avail *avail;
84 struct vring_used *used;
87 uint16_t last_avail_idx;
88 uint16_t last_used_idx;
89 #define VIRTIO_INVALID_EVENTFD (-1)
90 #define VIRTIO_UNINITIALIZED_EVENTFD (-2)
92 /* Backend value to determine if device should started/stopped */
94 rte_spinlock_t access_lock;
96 /* Used to notify the guest (trigger interrupt) */
98 /* Currently unused as polling mode is enabled */
102 /* Physical address of used ring, for logging */
103 uint64_t log_guest_addr;
107 uint16_t last_zmbuf_idx;
108 struct zcopy_mbuf *zmbufs;
109 struct zcopy_mbuf_list zmbuf_list;
111 struct vring_used_elem *shadow_used_ring;
112 uint16_t shadow_used_idx;
113 } __rte_cache_aligned;
115 /* Old kernels have no such macros defined */
116 #ifndef VIRTIO_NET_F_GUEST_ANNOUNCE
117 #define VIRTIO_NET_F_GUEST_ANNOUNCE 21
120 #ifndef VIRTIO_NET_F_MQ
121 #define VIRTIO_NET_F_MQ 22
123 #define VHOST_MAX_QUEUE_PAIRS 0x80
126 * Define virtio 1.0 for older kernels
128 #ifndef VIRTIO_F_VERSION_1
129 #define VIRTIO_F_VERSION_1 32
133 uint64_t guest_phys_addr;
134 uint64_t host_phys_addr;
139 * Device structure contains all configuration information relating
143 /* Frontend (QEMU) memory and memory region information */
144 struct virtio_memory *mem;
146 uint64_t protocol_features;
150 /* to tell if we need broadcast rarp packet */
151 rte_atomic16_t broadcast_rarp;
153 int dequeue_zero_copy;
154 struct vhost_virtqueue *virtqueue[VHOST_MAX_QUEUE_PAIRS * 2];
155 #define IF_NAME_SZ (PATH_MAX > IFNAMSIZ ? PATH_MAX : IFNAMSIZ)
156 char ifname[IF_NAME_SZ];
160 struct ether_addr mac;
162 uint32_t nr_guest_pages;
163 uint32_t max_guest_pages;
164 struct guest_page *guest_pages;
165 } __rte_cache_aligned;
168 * Information relating to memory regions including offsets to
169 * addresses in QEMUs memory file.
171 struct virtio_memory_region {
172 uint64_t guest_phys_addr;
173 uint64_t guest_user_addr;
174 uint64_t host_user_addr;
183 * Memory structure includes region and mapping information.
185 struct virtio_memory {
187 struct virtio_memory_region regions[0];
191 /* Macros for printing using RTE_LOG */
192 #define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
193 #define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER1
195 #ifdef RTE_LIBRTE_VHOST_DEBUG
196 #define VHOST_MAX_PRINT_BUFF 6072
197 #define LOG_LEVEL RTE_LOG_DEBUG
198 #define LOG_DEBUG(log_type, fmt, args...) RTE_LOG(DEBUG, log_type, fmt, ##args)
199 #define PRINT_PACKET(device, addr, size, header) do { \
200 char *pkt_addr = (char *)(addr); \
201 unsigned int index; \
202 char packet[VHOST_MAX_PRINT_BUFF]; \
205 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Header size %d: ", (device->vid), (size)); \
207 snprintf(packet, VHOST_MAX_PRINT_BUFF, "(%d) Packet size %d: ", (device->vid), (size)); \
208 for (index = 0; index < (size); index++) { \
209 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), \
210 "%02hhx ", pkt_addr[index]); \
212 snprintf(packet + strnlen(packet, VHOST_MAX_PRINT_BUFF), VHOST_MAX_PRINT_BUFF - strnlen(packet, VHOST_MAX_PRINT_BUFF), "\n"); \
214 LOG_DEBUG(VHOST_DATA, "%s", packet); \
217 #define LOG_LEVEL RTE_LOG_INFO
218 #define LOG_DEBUG(log_type, fmt, args...) do {} while (0)
219 #define PRINT_PACKET(device, addr, size, header) do {} while (0)
222 extern uint64_t VHOST_FEATURES;
223 #define MAX_VHOST_DEVICE 1024
224 extern struct virtio_net *vhost_devices[MAX_VHOST_DEVICE];
226 /* Convert guest physical Address to host virtual address */
227 static inline uint64_t __attribute__((always_inline))
228 gpa_to_vva(struct virtio_net *dev, uint64_t gpa, uint64_t *len)
230 struct virtio_memory_region *r;
233 for (i = 0; i < dev->mem->nregions; i++) {
234 r = &dev->mem->regions[i];
235 if (gpa >= r->guest_phys_addr &&
236 gpa < r->guest_phys_addr + r->size) {
238 if (unlikely(*len > r->guest_phys_addr + r->size - gpa))
239 *len = r->guest_phys_addr + r->size - gpa;
241 return gpa - r->guest_phys_addr +
250 /* Convert guest physical address to host physical address */
251 static inline phys_addr_t __attribute__((always_inline))
252 gpa_to_hpa(struct virtio_net *dev, uint64_t gpa, uint64_t size)
255 struct guest_page *page;
257 for (i = 0; i < dev->nr_guest_pages; i++) {
258 page = &dev->guest_pages[i];
260 if (gpa >= page->guest_phys_addr &&
261 gpa + size < page->guest_phys_addr + page->size) {
262 return gpa - page->guest_phys_addr +
263 page->host_phys_addr;
270 struct virtio_net_device_ops const *notify_ops;
271 struct virtio_net *get_device(int vid);
273 int vhost_new_device(void);
274 void cleanup_device(struct virtio_net *dev, int destroy);
275 void reset_device(struct virtio_net *dev);
276 void vhost_destroy_device(int);
278 int alloc_vring_queue_pair(struct virtio_net *dev, uint32_t qp_idx);
280 void vhost_set_ifname(int, const char *if_name, unsigned int if_len);
281 void vhost_enable_dequeue_zero_copy(int vid);
284 * Backend-specific cleanup.
286 * TODO: fix it; we have one backend now
288 void vhost_backend_cleanup(struct virtio_net *dev);
290 #endif /* _VHOST_NET_CDEV_H_ */