4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <linux/virtio_net.h>
39 #include <rte_memcpy.h>
40 #include <rte_vhost.h>
45 * A very simple vhost-user net driver implementation, without
46 * any extra features being enabled, such as TSO and mrg-Rx.
50 vs_vhost_net_setup(struct vhost_dev *dev)
54 struct vhost_queue *queue;
56 RTE_LOG(INFO, VHOST_CONFIG,
57 "setting builtin vhost-user net driver\n");
59 rte_vhost_get_negotiated_features(vid, &dev->features);
60 if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
61 dev->hdr_len = sizeof(struct virtio_net_hdr_mrg_rxbuf);
63 dev->hdr_len = sizeof(struct virtio_net_hdr);
65 rte_vhost_get_mem_table(vid, &dev->mem);
67 dev->nr_vrings = rte_vhost_get_vring_num(vid);
68 for (i = 0; i < dev->nr_vrings; i++) {
69 queue = &dev->queues[i];
71 queue->last_used_idx = 0;
72 queue->last_avail_idx = 0;
73 rte_vhost_get_vhost_vring(vid, i, &queue->vr);
78 vs_vhost_net_remove(struct vhost_dev *dev)
83 static __rte_always_inline int
84 enqueue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
85 struct rte_mbuf *m, uint16_t desc_idx)
87 uint32_t desc_avail, desc_offset;
88 uint64_t desc_chunck_len;
89 uint32_t mbuf_avail, mbuf_offset;
91 struct vring_desc *desc;
92 uint64_t desc_addr, desc_gaddr;
93 struct virtio_net_hdr virtio_hdr = {0, 0, 0, 0, 0, 0};
94 /* A counter to avoid desc dead loop chain */
97 desc = &vr->desc[desc_idx];
98 desc_chunck_len = desc->len;
99 desc_gaddr = desc->addr;
100 desc_addr = rte_vhost_va_from_guest_pa(
101 dev->mem, desc_gaddr, &desc_chunck_len);
103 * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
104 * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
105 * otherwise stores offset on the stack instead of in a register.
107 if (unlikely(desc->len < dev->hdr_len) || !desc_addr)
110 rte_prefetch0((void *)(uintptr_t)desc_addr);
112 /* write virtio-net header */
113 if (likely(desc_chunck_len >= dev->hdr_len)) {
114 *(struct virtio_net_hdr *)(uintptr_t)desc_addr = virtio_hdr;
115 desc_offset = dev->hdr_len;
118 uint64_t remain = dev->hdr_len;
119 uint64_t src = (uint64_t)(uintptr_t)&virtio_hdr, dst;
120 uint64_t guest_addr = desc_gaddr;
124 dst = rte_vhost_va_from_guest_pa(dev->mem,
126 if (unlikely(!dst || !len))
129 rte_memcpy((void *)(uintptr_t)dst,
130 (void *)(uintptr_t)src,
138 desc_chunck_len = desc->len - dev->hdr_len;
139 desc_gaddr += dev->hdr_len;
140 desc_addr = rte_vhost_va_from_guest_pa(
141 dev->mem, desc_gaddr,
143 if (unlikely(!desc_addr))
149 desc_avail = desc->len - dev->hdr_len;
151 mbuf_avail = rte_pktmbuf_data_len(m);
153 while (mbuf_avail != 0 || m->next != NULL) {
154 /* done with current mbuf, fetch next */
155 if (mbuf_avail == 0) {
159 mbuf_avail = rte_pktmbuf_data_len(m);
162 /* done with current desc buf, fetch next */
163 if (desc_avail == 0) {
164 if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
165 /* Room in vring buffer is not enough */
168 if (unlikely(desc->next >= vr->size ||
169 ++nr_desc > vr->size))
172 desc = &vr->desc[desc->next];
173 desc_chunck_len = desc->len;
174 desc_gaddr = desc->addr;
175 desc_addr = rte_vhost_va_from_guest_pa(
176 dev->mem, desc_gaddr, &desc_chunck_len);
177 if (unlikely(!desc_addr))
181 desc_avail = desc->len;
182 } else if (unlikely(desc_chunck_len == 0)) {
183 desc_chunck_len = desc_avail;
184 desc_gaddr += desc_offset;
185 desc_addr = rte_vhost_va_from_guest_pa(dev->mem,
188 if (unlikely(!desc_addr))
194 cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
195 rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
196 rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
199 mbuf_avail -= cpy_len;
200 mbuf_offset += cpy_len;
201 desc_avail -= cpy_len;
202 desc_offset += cpy_len;
203 desc_chunck_len -= cpy_len;
210 vs_enqueue_pkts(struct vhost_dev *dev, uint16_t queue_id,
211 struct rte_mbuf **pkts, uint32_t count)
213 struct vhost_queue *queue;
214 struct rte_vhost_vring *vr;
215 uint16_t avail_idx, free_entries, start_idx;
216 uint16_t desc_indexes[MAX_PKT_BURST];
220 queue = &dev->queues[queue_id];
223 avail_idx = *((volatile uint16_t *)&vr->avail->idx);
224 start_idx = queue->last_used_idx;
225 free_entries = avail_idx - start_idx;
226 count = RTE_MIN(count, free_entries);
227 count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
231 /* Retrieve all of the desc indexes first to avoid caching issues. */
232 rte_prefetch0(&vr->avail->ring[start_idx & (vr->size - 1)]);
233 for (i = 0; i < count; i++) {
234 used_idx = (start_idx + i) & (vr->size - 1);
235 desc_indexes[i] = vr->avail->ring[used_idx];
236 vr->used->ring[used_idx].id = desc_indexes[i];
237 vr->used->ring[used_idx].len = pkts[i]->pkt_len +
241 rte_prefetch0(&vr->desc[desc_indexes[0]]);
242 for (i = 0; i < count; i++) {
243 uint16_t desc_idx = desc_indexes[i];
246 err = enqueue_pkt(dev, vr, pkts[i], desc_idx);
248 used_idx = (start_idx + i) & (vr->size - 1);
249 vr->used->ring[used_idx].len = dev->hdr_len;
253 rte_prefetch0(&vr->desc[desc_indexes[i+1]]);
258 *(volatile uint16_t *)&vr->used->idx += count;
259 queue->last_used_idx += count;
261 /* flush used->idx update before we read avail->flags. */
264 /* Kick the guest if necessary. */
265 if (!(vr->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
266 && (vr->callfd >= 0))
267 eventfd_write(vr->callfd, (eventfd_t)1);
271 static __rte_always_inline int
272 dequeue_pkt(struct vhost_dev *dev, struct rte_vhost_vring *vr,
273 struct rte_mbuf *m, uint16_t desc_idx,
274 struct rte_mempool *mbuf_pool)
276 struct vring_desc *desc;
277 uint64_t desc_addr, desc_gaddr;
278 uint32_t desc_avail, desc_offset;
279 uint64_t desc_chunck_len;
280 uint32_t mbuf_avail, mbuf_offset;
282 struct rte_mbuf *cur = m, *prev = m;
283 /* A counter to avoid desc dead loop chain */
284 uint32_t nr_desc = 1;
286 desc = &vr->desc[desc_idx];
287 if (unlikely((desc->len < dev->hdr_len)) ||
288 (desc->flags & VRING_DESC_F_INDIRECT))
291 desc_chunck_len = desc->len;
292 desc_gaddr = desc->addr;
293 desc_addr = rte_vhost_va_from_guest_pa(
294 dev->mem, desc_gaddr, &desc_chunck_len);
295 if (unlikely(!desc_addr))
299 * We don't support ANY_LAYOUT, neither VERSION_1, meaning
300 * a Tx packet from guest must have 2 desc buffers at least:
301 * the first for storing the header and the others for
304 * And since we don't support TSO, we could simply skip the
307 desc = &vr->desc[desc->next];
308 desc_chunck_len = desc->len;
309 desc_gaddr = desc->addr;
310 desc_addr = rte_vhost_va_from_guest_pa(
311 dev->mem, desc_gaddr, &desc_chunck_len);
312 if (unlikely(!desc_addr))
314 rte_prefetch0((void *)(uintptr_t)desc_addr);
317 desc_avail = desc->len;
321 mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
323 cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
324 rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
326 (void *)((uintptr_t)(desc_addr + desc_offset)),
329 mbuf_avail -= cpy_len;
330 mbuf_offset += cpy_len;
331 desc_avail -= cpy_len;
332 desc_offset += cpy_len;
333 desc_chunck_len -= cpy_len;
335 /* This desc reaches to its end, get the next one */
336 if (desc_avail == 0) {
337 if ((desc->flags & VRING_DESC_F_NEXT) == 0)
340 if (unlikely(desc->next >= vr->size ||
341 ++nr_desc > vr->size))
343 desc = &vr->desc[desc->next];
345 desc_chunck_len = desc->len;
346 desc_gaddr = desc->addr;
347 desc_addr = rte_vhost_va_from_guest_pa(
348 dev->mem, desc_gaddr, &desc_chunck_len);
349 if (unlikely(!desc_addr))
351 rte_prefetch0((void *)(uintptr_t)desc_addr);
354 desc_avail = desc->len;
355 } else if (unlikely(desc_chunck_len == 0)) {
356 desc_chunck_len = desc_avail;
357 desc_gaddr += desc_offset;
358 desc_addr = rte_vhost_va_from_guest_pa(dev->mem,
361 if (unlikely(!desc_addr))
368 * This mbuf reaches to its end, get a new one
371 if (mbuf_avail == 0) {
372 cur = rte_pktmbuf_alloc(mbuf_pool);
373 if (unlikely(cur == NULL)) {
374 RTE_LOG(ERR, VHOST_DATA, "Failed to "
375 "allocate memory for mbuf.\n");
380 prev->data_len = mbuf_offset;
382 m->pkt_len += mbuf_offset;
386 mbuf_avail = cur->buf_len - RTE_PKTMBUF_HEADROOM;
390 prev->data_len = mbuf_offset;
391 m->pkt_len += mbuf_offset;
397 vs_dequeue_pkts(struct vhost_dev *dev, uint16_t queue_id,
398 struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
400 struct vhost_queue *queue;
401 struct rte_vhost_vring *vr;
402 uint32_t desc_indexes[MAX_PKT_BURST];
405 uint16_t free_entries;
408 queue = &dev->queues[queue_id];
411 free_entries = *((volatile uint16_t *)&vr->avail->idx) -
412 queue->last_avail_idx;
413 if (free_entries == 0)
416 /* Prefetch available and used ring */
417 avail_idx = queue->last_avail_idx & (vr->size - 1);
418 used_idx = queue->last_used_idx & (vr->size - 1);
419 rte_prefetch0(&vr->avail->ring[avail_idx]);
420 rte_prefetch0(&vr->used->ring[used_idx]);
422 count = RTE_MIN(count, MAX_PKT_BURST);
423 count = RTE_MIN(count, free_entries);
425 if (unlikely(count == 0))
429 * Retrieve all of the head indexes first and pre-update used entries
430 * to avoid caching issues.
432 for (i = 0; i < count; i++) {
433 avail_idx = (queue->last_avail_idx + i) & (vr->size - 1);
434 used_idx = (queue->last_used_idx + i) & (vr->size - 1);
435 desc_indexes[i] = vr->avail->ring[avail_idx];
437 vr->used->ring[used_idx].id = desc_indexes[i];
438 vr->used->ring[used_idx].len = 0;
441 /* Prefetch descriptor index. */
442 rte_prefetch0(&vr->desc[desc_indexes[0]]);
443 for (i = 0; i < count; i++) {
446 if (likely(i + 1 < count))
447 rte_prefetch0(&vr->desc[desc_indexes[i + 1]]);
449 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
450 if (unlikely(pkts[i] == NULL)) {
451 RTE_LOG(ERR, VHOST_DATA,
452 "Failed to allocate memory for mbuf.\n");
456 err = dequeue_pkt(dev, vr, pkts[i], desc_indexes[i], mbuf_pool);
458 rte_pktmbuf_free(pkts[i]);
464 queue->last_avail_idx += i;
465 queue->last_used_idx += i;
471 if (!(vr->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
472 && (vr->callfd >= 0))
473 eventfd_write(vr->callfd, (eventfd_t)1);