745cc53f43bd106e1f55f9c204fe60070970e76b
[deb_dpdk.git] / lib / librte_vhost / virtio_net.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <stdint.h>
35 #include <stdbool.h>
36 #include <linux/virtio_net.h>
37
38 #include <rte_mbuf.h>
39 #include <rte_memcpy.h>
40 #include <rte_ether.h>
41 #include <rte_ip.h>
42 #include <rte_virtio_net.h>
43 #include <rte_tcp.h>
44 #include <rte_udp.h>
45 #include <rte_sctp.h>
46 #include <rte_arp.h>
47 #include <rte_spinlock.h>
48 #include <rte_malloc.h>
49
50 #include "vhost.h"
51
52 #define MAX_PKT_BURST 32
53 #define VHOST_LOG_PAGE  4096
54
55 /*
56  * Atomically set a bit in memory.
57  */
58 static inline void __attribute__((always_inline))
59 vhost_set_bit(unsigned int nr, volatile uint8_t *addr)
60 {
61         __sync_fetch_and_or_8(addr, (1U << nr));
62 }
63
64 static inline void __attribute__((always_inline))
65 vhost_log_page(uint8_t *log_base, uint64_t page)
66 {
67         vhost_set_bit(page % 8, &log_base[page / 8]);
68 }
69
70 static inline void __attribute__((always_inline))
71 vhost_log_write(struct virtio_net *dev, uint64_t addr, uint64_t len)
72 {
73         uint64_t page;
74
75         if (likely(((dev->features & (1ULL << VHOST_F_LOG_ALL)) == 0) ||
76                    !dev->log_base || !len))
77                 return;
78
79         if (unlikely(dev->log_size <= ((addr + len - 1) / VHOST_LOG_PAGE / 8)))
80                 return;
81
82         /* To make sure guest memory updates are committed before logging */
83         rte_smp_wmb();
84
85         page = addr / VHOST_LOG_PAGE;
86         while (page * VHOST_LOG_PAGE < addr + len) {
87                 vhost_log_page((uint8_t *)(uintptr_t)dev->log_base, page);
88                 page += 1;
89         }
90 }
91
92 static inline void __attribute__((always_inline))
93 vhost_log_used_vring(struct virtio_net *dev, struct vhost_virtqueue *vq,
94                      uint64_t offset, uint64_t len)
95 {
96         vhost_log_write(dev, vq->log_guest_addr + offset, len);
97 }
98
99 static bool
100 is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t qp_nb)
101 {
102         return (is_tx ^ (idx & 1)) == 0 && idx < qp_nb * VIRTIO_QNUM;
103 }
104
105 static inline struct vring_desc *__attribute__((always_inline))
106 alloc_copy_ind_table(struct virtio_net *dev, struct vring_desc *desc)
107 {
108         struct vring_desc *idesc;
109         uint64_t src, dst;
110         uint64_t len, remain = desc->len;
111         uint64_t desc_addr = desc->addr;
112
113         idesc = rte_malloc(__func__, desc->len, 0);
114         if (unlikely(!idesc))
115                 return 0;
116
117         dst = (uint64_t)(uintptr_t)idesc;
118
119         while (remain) {
120                 len = remain;
121                 src = gpa_to_vva(dev, desc_addr, &len);
122                 if (unlikely(!src || !len)) {
123                         rte_free(idesc);
124                         return 0;
125                 }
126
127                 rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
128
129                 remain -= len;
130                 dst += len;
131                 desc_addr += len;
132         }
133
134         return idesc;
135 }
136
137 static inline void __attribute__((always_inline))
138 free_ind_table(struct vring_desc *idesc)
139 {
140         rte_free(idesc);
141 }
142
143 static inline void __attribute__((always_inline))
144 do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
145                           uint16_t to, uint16_t from, uint16_t size)
146 {
147         rte_memcpy(&vq->used->ring[to],
148                         &vq->shadow_used_ring[from],
149                         size * sizeof(struct vring_used_elem));
150         vhost_log_used_vring(dev, vq,
151                         offsetof(struct vring_used, ring[to]),
152                         size * sizeof(struct vring_used_elem));
153 }
154
155 static inline void __attribute__((always_inline))
156 flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq)
157 {
158         uint16_t used_idx = vq->last_used_idx & (vq->size - 1);
159
160         if (used_idx + vq->shadow_used_idx <= vq->size) {
161                 do_flush_shadow_used_ring(dev, vq, used_idx, 0,
162                                           vq->shadow_used_idx);
163         } else {
164                 uint16_t size;
165
166                 /* update used ring interval [used_idx, vq->size] */
167                 size = vq->size - used_idx;
168                 do_flush_shadow_used_ring(dev, vq, used_idx, 0, size);
169
170                 /* update the left half used ring interval [0, left_size] */
171                 do_flush_shadow_used_ring(dev, vq, 0, size,
172                                           vq->shadow_used_idx - size);
173         }
174         vq->last_used_idx += vq->shadow_used_idx;
175
176         rte_smp_wmb();
177
178         *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
179         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
180                 sizeof(vq->used->idx));
181 }
182
183 static inline void __attribute__((always_inline))
184 update_shadow_used_ring(struct vhost_virtqueue *vq,
185                          uint16_t desc_idx, uint16_t len)
186 {
187         uint16_t i = vq->shadow_used_idx++;
188
189         vq->shadow_used_ring[i].id  = desc_idx;
190         vq->shadow_used_ring[i].len = len;
191 }
192
193 static void
194 virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr)
195 {
196         uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK;
197
198         if (m_buf->ol_flags & PKT_TX_TCP_SEG)
199                 csum_l4 |= PKT_TX_TCP_CKSUM;
200
201         if (csum_l4) {
202                 net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
203                 net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len;
204
205                 switch (csum_l4) {
206                 case PKT_TX_TCP_CKSUM:
207                         net_hdr->csum_offset = (offsetof(struct tcp_hdr,
208                                                 cksum));
209                         break;
210                 case PKT_TX_UDP_CKSUM:
211                         net_hdr->csum_offset = (offsetof(struct udp_hdr,
212                                                 dgram_cksum));
213                         break;
214                 case PKT_TX_SCTP_CKSUM:
215                         net_hdr->csum_offset = (offsetof(struct sctp_hdr,
216                                                 cksum));
217                         break;
218                 }
219         }
220
221         /* IP cksum verification cannot be bypassed, then calculate here */
222         if (m_buf->ol_flags & PKT_TX_IP_CKSUM) {
223                 struct ipv4_hdr *ipv4_hdr;
224
225                 ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *,
226                                                    m_buf->l2_len);
227                 ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr);
228         }
229
230         if (m_buf->ol_flags & PKT_TX_TCP_SEG) {
231                 if (m_buf->ol_flags & PKT_TX_IPV4)
232                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
233                 else
234                         net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
235                 net_hdr->gso_size = m_buf->tso_segsz;
236                 net_hdr->hdr_len = m_buf->l2_len + m_buf->l3_len
237                                         + m_buf->l4_len;
238         }
239 }
240
241 static inline void
242 copy_virtio_net_hdr(struct virtio_net *dev, uint64_t desc_addr,
243                     struct virtio_net_hdr_mrg_rxbuf hdr)
244 {
245         if (dev->vhost_hlen == sizeof(struct virtio_net_hdr_mrg_rxbuf))
246                 *(struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)desc_addr = hdr;
247         else
248                 *(struct virtio_net_hdr *)(uintptr_t)desc_addr = hdr.hdr;
249 }
250
251 static inline int __attribute__((always_inline))
252 copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
253                   struct rte_mbuf *m, uint16_t desc_idx, uint32_t size)
254 {
255         uint32_t desc_avail, desc_offset;
256         uint32_t mbuf_avail, mbuf_offset;
257         uint32_t cpy_len;
258         uint64_t desc_chunck_len;
259         struct vring_desc *desc;
260         uint64_t desc_addr, desc_gaddr;
261         struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
262         /* A counter to avoid desc dead loop chain */
263         uint16_t nr_desc = 1;
264
265         desc = &descs[desc_idx];
266         desc_chunck_len = desc->len;
267         desc_gaddr = desc->addr;
268         desc_addr = gpa_to_vva(dev, desc_gaddr, &desc_chunck_len);
269         /*
270          * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
271          * performance issue with some versions of gcc (4.8.4 and 5.3.0) which
272          * otherwise stores offset on the stack instead of in a register.
273          */
274         if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr)
275                 return -1;
276
277         rte_prefetch0((void *)(uintptr_t)desc_addr);
278
279         virtio_enqueue_offload(m, &virtio_hdr.hdr);
280         if (likely(desc_chunck_len >= dev->vhost_hlen)) {
281                 copy_virtio_net_hdr(dev, desc_addr, virtio_hdr);
282
283                 virtio_enqueue_offload(m,
284                                 (struct virtio_net_hdr *)(uintptr_t)desc_addr);
285                 PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
286         } else {
287                 uint64_t remain = dev->vhost_hlen;
288                 uint64_t len;
289                 uint64_t src = (uint64_t)(uintptr_t)&virtio_hdr, dst;
290                 uint64_t guest_addr = desc_gaddr;
291
292                 while (remain) {
293                         len = remain;
294                         dst = gpa_to_vva(dev, guest_addr, &len);
295                         if (unlikely(!dst || !len))
296                                 return -1;
297
298                         rte_memcpy((void *)(uintptr_t)dst,
299                                         (void *)(uintptr_t)src, len);
300
301                         PRINT_PACKET(dev, (uintptr_t)dst, len, 0);
302                         remain -= len;
303                         guest_addr += len;
304                         dst += len;
305                 }
306         }
307
308         vhost_log_write(dev, desc_gaddr, dev->vhost_hlen);
309
310         desc_avail  = desc->len - dev->vhost_hlen;
311         if (unlikely(desc_chunck_len < dev->vhost_hlen)) {
312                 desc_chunck_len = desc_avail;
313                 desc_gaddr += dev->vhost_hlen;
314                 desc_addr = gpa_to_vva(dev,
315                                 desc_gaddr,
316                                 &desc_chunck_len);
317                 if (unlikely(!desc_addr))
318                         return -1;
319
320                 desc_offset = 0;
321         } else {
322                 desc_offset = dev->vhost_hlen;
323                 desc_chunck_len -= dev->vhost_hlen;
324         }
325
326         mbuf_avail  = rte_pktmbuf_data_len(m);
327         mbuf_offset = 0;
328         while (mbuf_avail != 0 || m->next != NULL) {
329                 /* done with current mbuf, fetch next */
330                 if (mbuf_avail == 0) {
331                         m = m->next;
332
333                         mbuf_offset = 0;
334                         mbuf_avail  = rte_pktmbuf_data_len(m);
335                 }
336
337                 /* done with current desc buf, fetch next */
338                 if (desc_avail == 0) {
339                         if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
340                                 /* Room in vring buffer is not enough */
341                                 return -1;
342                         }
343                         if (unlikely(desc->next >= size || ++nr_desc > size))
344                                 return -1;
345
346                         desc = &descs[desc->next];
347                         desc_chunck_len = desc->len;
348                         desc_gaddr = desc->addr;
349                         desc_addr = gpa_to_vva(dev,
350                                         desc_gaddr, &desc_chunck_len);
351                         if (unlikely(!desc_addr))
352                                 return -1;
353
354                         desc_offset = 0;
355                         desc_avail  = desc->len;
356                 } else if (unlikely(desc_chunck_len == 0)) {
357                         desc_chunck_len = desc_avail;
358                         desc_gaddr += desc_offset;
359                         desc_addr = gpa_to_vva(dev,
360                                         desc_gaddr, &desc_chunck_len);
361                         if (unlikely(!desc_addr))
362                                 return -1;
363
364                         desc_offset = 0;
365                 }
366
367                 cpy_len = RTE_MIN(desc_avail, mbuf_avail);
368                 rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
369                         rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
370                         cpy_len);
371                 vhost_log_write(dev, desc_gaddr + desc_offset, cpy_len);
372                 PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
373                              cpy_len, 0);
374
375                 mbuf_avail  -= cpy_len;
376                 mbuf_offset += cpy_len;
377                 desc_avail  -= cpy_len;
378                 desc_offset += cpy_len;
379                 desc_chunck_len -= cpy_len;
380         }
381
382         return 0;
383 }
384
385 /**
386  * This function adds buffers to the virtio devices RX virtqueue. Buffers can
387  * be received from the physical port or from another virtio device. A packet
388  * count is returned to indicate the number of packets that are succesfully
389  * added to the RX queue. This function works when the mbuf is scattered, but
390  * it doesn't support the mergeable feature.
391  */
392 static inline uint32_t __attribute__((always_inline))
393 virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
394               struct rte_mbuf **pkts, uint32_t count)
395 {
396         struct vhost_virtqueue *vq;
397         uint16_t avail_idx, free_entries, start_idx;
398         uint16_t desc_indexes[MAX_PKT_BURST];
399         struct vring_desc *descs;
400         uint16_t used_idx;
401         uint32_t i, sz;
402         uint64_t dlen;
403
404         LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
405         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
406                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
407                         dev->vid, __func__, queue_id);
408                 return 0;
409         }
410
411         vq = dev->virtqueue[queue_id];
412
413         rte_spinlock_lock(&vq->access_lock);
414
415         if (unlikely(vq->enabled == 0))
416                 goto out_access_unlock;
417
418         avail_idx = *((volatile uint16_t *)&vq->avail->idx);
419         start_idx = vq->last_used_idx;
420         free_entries = avail_idx - start_idx;
421         count = RTE_MIN(count, free_entries);
422         count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
423         if (count == 0)
424                 goto out_access_unlock;
425
426         LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n",
427                 dev->vid, start_idx, start_idx + count);
428
429         /* Retrieve all of the desc indexes first to avoid caching issues. */
430         rte_prefetch0(&vq->avail->ring[start_idx & (vq->size - 1)]);
431         for (i = 0; i < count; i++) {
432                 used_idx = (start_idx + i) & (vq->size - 1);
433                 desc_indexes[i] = vq->avail->ring[used_idx];
434                 vq->used->ring[used_idx].id = desc_indexes[i];
435                 vq->used->ring[used_idx].len = pkts[i]->pkt_len +
436                                                dev->vhost_hlen;
437                 vhost_log_used_vring(dev, vq,
438                         offsetof(struct vring_used, ring[used_idx]),
439                         sizeof(vq->used->ring[used_idx]));
440         }
441
442         rte_prefetch0(&vq->desc[desc_indexes[0]]);
443         for (i = 0; i < count; i++) {
444                 struct vring_desc *idesc = NULL;
445                 uint16_t desc_idx = desc_indexes[i];
446                 int err;
447
448                 if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
449                         dlen = vq->desc[desc_idx].len;
450                         descs = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev,
451                                         vq->desc[desc_idx].addr, &dlen);
452                         if (unlikely(!descs)) {
453                                 count = i;
454                                 break;
455                         }
456
457                         if (unlikely(dlen < vq->desc[desc_idx].len)) {
458                                 /*
459                                  * The indirect desc table is not contiguous
460                                  * in process VA space, we have to copy it.
461                                  */
462                                 idesc = alloc_copy_ind_table(dev,
463                                                         &vq->desc[desc_idx]);
464                                 if (unlikely(!idesc))
465                                         break;
466
467                                 descs = idesc;
468                         }
469
470                         desc_idx = 0;
471                         sz = vq->desc[desc_idx].len / sizeof(*descs);
472                 } else {
473                         descs = vq->desc;
474                         sz = vq->size;
475                 }
476
477                 err = copy_mbuf_to_desc(dev, descs, pkts[i], desc_idx, sz);
478                 if (unlikely(err)) {
479                         used_idx = (start_idx + i) & (vq->size - 1);
480                         vq->used->ring[used_idx].len = dev->vhost_hlen;
481                         vhost_log_used_vring(dev, vq,
482                                 offsetof(struct vring_used, ring[used_idx]),
483                                 sizeof(vq->used->ring[used_idx]));
484                 }
485
486                 if (i + 1 < count)
487                         rte_prefetch0(&vq->desc[desc_indexes[i+1]]);
488
489                 if (unlikely(!!idesc))
490                         free_ind_table(idesc);
491         }
492
493         rte_smp_wmb();
494
495         *(volatile uint16_t *)&vq->used->idx += count;
496         vq->last_used_idx += count;
497         vhost_log_used_vring(dev, vq,
498                 offsetof(struct vring_used, idx),
499                 sizeof(vq->used->idx));
500
501         /* flush used->idx update before we read avail->flags. */
502         rte_mb();
503
504         /* Kick the guest if necessary. */
505         if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
506                         && (vq->callfd >= 0))
507                 eventfd_write(vq->callfd, (eventfd_t)1);
508
509 out_access_unlock:
510         rte_spinlock_unlock(&vq->access_lock);
511
512         return count;
513 }
514
515 static inline int __attribute__((always_inline))
516 fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
517                          uint32_t avail_idx, uint32_t *vec_idx,
518                          struct buf_vector *buf_vec, uint16_t *desc_chain_head,
519                          uint16_t *desc_chain_len)
520 {
521         uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
522         uint32_t vec_id = *vec_idx;
523         uint32_t len    = 0;
524         uint64_t dlen;
525         struct vring_desc *descs = vq->desc;
526         struct vring_desc *idesc = NULL;
527
528         *desc_chain_head = idx;
529
530         if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
531                 dlen = vq->desc[idx].len;
532                 descs = (struct vring_desc *)(uintptr_t)
533                                         gpa_to_vva(dev, vq->desc[idx].addr,
534                                                            &dlen);
535                 if (unlikely(!descs))
536                         return -1;
537
538                 if (unlikely(dlen < vq->desc[idx].len)) {
539                         /*
540                          * The indirect desc table is not contiguous
541                          * in process VA space, we have to copy it.
542                          */
543                         idesc = alloc_copy_ind_table(dev, &vq->desc[idx]);
544                         if (unlikely(!idesc))
545                                 return -1;
546
547                         descs = idesc;
548                 }
549
550                 idx = 0;
551         }
552
553         while (1) {
554                 if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size)) {
555                         free_ind_table(idesc);
556                         return -1;
557                 }
558
559                 len += descs[idx].len;
560                 buf_vec[vec_id].buf_addr = descs[idx].addr;
561                 buf_vec[vec_id].buf_len  = descs[idx].len;
562                 buf_vec[vec_id].desc_idx = idx;
563                 vec_id++;
564
565                 if ((descs[idx].flags & VRING_DESC_F_NEXT) == 0)
566                         break;
567
568                 idx = descs[idx].next;
569         }
570
571         *desc_chain_len = len;
572         *vec_idx = vec_id;
573
574         if (unlikely(!!idesc))
575                 free_ind_table(idesc);
576
577         return 0;
578 }
579
580 /*
581  * Returns -1 on fail, 0 on success
582  */
583 static inline int
584 reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
585                                 uint32_t size, struct buf_vector *buf_vec,
586                                 uint16_t *num_buffers, uint16_t avail_head)
587 {
588         uint16_t cur_idx;
589         uint32_t vec_idx = 0;
590         uint16_t tries = 0;
591
592         uint16_t head_idx = 0;
593         uint16_t len = 0;
594
595         *num_buffers = 0;
596         cur_idx  = vq->last_avail_idx;
597
598         while (size > 0) {
599                 if (unlikely(cur_idx == avail_head))
600                         return -1;
601
602                 if (unlikely(fill_vec_buf(dev, vq, cur_idx, &vec_idx, buf_vec,
603                                                 &head_idx, &len) < 0))
604                         return -1;
605                 len = RTE_MIN(len, size);
606                 update_shadow_used_ring(vq, head_idx, len);
607                 size -= len;
608
609                 cur_idx++;
610                 tries++;
611                 *num_buffers += 1;
612
613                 /*
614                  * if we tried all available ring items, and still
615                  * can't get enough buf, it means something abnormal
616                  * happened.
617                  */
618                 if (unlikely(tries >= vq->size))
619                         return -1;
620         }
621
622         return 0;
623 }
624
625 static inline int __attribute__((always_inline))
626 copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
627                             struct buf_vector *buf_vec, uint16_t num_buffers)
628 {
629         struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0, 0, 0, 0, 0, 0}, 0};
630         struct virtio_net_hdr_mrg_rxbuf *hdr;
631         uint32_t vec_idx = 0;
632         uint64_t desc_addr, desc_gaddr;
633         uint64_t desc_chunck_len;
634         uint32_t mbuf_offset, mbuf_avail;
635         uint32_t desc_offset, desc_avail;
636         uint32_t cpy_len;
637         uint64_t hdr_addr, hdr_phys_addr;
638         struct rte_mbuf *hdr_mbuf;
639
640         if (unlikely(m == NULL))
641                 return -1;
642
643         desc_chunck_len = buf_vec[vec_idx].buf_len;
644         desc_gaddr = buf_vec[vec_idx].buf_addr;
645         desc_addr = gpa_to_vva(dev, desc_gaddr, &desc_chunck_len);
646         if (buf_vec[vec_idx].buf_len < dev->vhost_hlen ||
647                         !desc_addr)
648                 return -1;
649
650         hdr_mbuf = m;
651         hdr_addr = desc_addr;
652         if (unlikely(desc_chunck_len < dev->vhost_hlen))
653                 hdr = &virtio_hdr;
654         else
655                 hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
656         hdr_phys_addr = buf_vec[vec_idx].buf_addr;
657         rte_prefetch0((void *)(uintptr_t)hdr_addr);
658
659         virtio_hdr.num_buffers = num_buffers;
660         LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
661                 dev->vid, num_buffers);
662
663         desc_avail  = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
664         if (unlikely(desc_chunck_len < dev->vhost_hlen)) {
665                 desc_chunck_len = desc_avail;
666                 desc_gaddr += dev->vhost_hlen;
667                 desc_addr = gpa_to_vva(dev,
668                                 desc_gaddr,
669                                 &desc_chunck_len);
670                 if (unlikely(!desc_addr))
671                         return -1;
672
673                 desc_offset = 0;
674         } else {
675                 desc_offset = dev->vhost_hlen;
676                 desc_chunck_len -= dev->vhost_hlen;
677         }
678
679
680         mbuf_avail  = rte_pktmbuf_data_len(m);
681         mbuf_offset = 0;
682         while (mbuf_avail != 0 || m->next != NULL) {
683                 /* done with current desc buf, get the next one */
684                 if (desc_avail == 0) {
685                         vec_idx++;
686                         desc_gaddr = buf_vec[vec_idx].buf_addr;
687                         desc_chunck_len = buf_vec[vec_idx].buf_len;
688                         desc_addr = gpa_to_vva(dev, desc_gaddr,
689                                         &desc_chunck_len);
690                         if (unlikely(!desc_addr))
691                                 return -1;
692
693                         /* Prefetch buffer address. */
694                         rte_prefetch0((void *)(uintptr_t)desc_addr);
695                         desc_offset = 0;
696                         desc_avail  = buf_vec[vec_idx].buf_len;
697                 } else if (unlikely(desc_chunck_len == 0)) {
698                         desc_chunck_len = desc_avail;
699                         desc_gaddr += desc_offset;
700                         desc_addr = gpa_to_vva(dev,
701                                         desc_gaddr,
702                                         &desc_chunck_len);
703                         if (unlikely(!desc_addr))
704                                 return -1;
705
706                         desc_offset = 0;
707                 }
708
709                 /* done with current mbuf, get the next one */
710                 if (mbuf_avail == 0) {
711                         m = m->next;
712
713                         mbuf_offset = 0;
714                         mbuf_avail  = rte_pktmbuf_data_len(m);
715                 }
716
717                 if (hdr_addr) {
718                         virtio_enqueue_offload(hdr_mbuf, &virtio_hdr.hdr);
719                         if (likely(hdr != &virtio_hdr)) {
720                                 copy_virtio_net_hdr(dev, hdr_addr, virtio_hdr);
721                         } else {
722                                 uint64_t len;
723                                 uint64_t remain = dev->vhost_hlen;
724                                 uint64_t src = (uint64_t)(uintptr_t)&virtio_hdr;
725                                 uint64_t dst;
726                                 uint64_t guest_addr = hdr_phys_addr;
727
728                                 while (remain) {
729                                         len = remain;
730                                         dst = gpa_to_vva(dev, guest_addr, &len);
731                                         if (unlikely(!dst || !len))
732                                                 return -1;
733
734                                         rte_memcpy((void *)(uintptr_t)dst,
735                                                         (void *)(uintptr_t)src,
736                                                         len);
737
738                                         PRINT_PACKET(dev, (uintptr_t)dst,
739                                                         len, 0);
740
741                                         remain -= len;
742                                         guest_addr += len;
743                                         dst += len;
744                                 }
745                         }
746                         vhost_log_write(dev, hdr_phys_addr, dev->vhost_hlen);
747                         PRINT_PACKET(dev, (uintptr_t)hdr_addr,
748                                      dev->vhost_hlen, 0);
749
750                         hdr_addr = 0;
751                 }
752
753                 cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
754                 rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
755                         rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
756                         cpy_len);
757                 vhost_log_write(dev, desc_gaddr + desc_offset, cpy_len);
758                 PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
759                         cpy_len, 0);
760
761                 mbuf_avail  -= cpy_len;
762                 mbuf_offset += cpy_len;
763                 desc_avail  -= cpy_len;
764                 desc_offset += cpy_len;
765                 desc_chunck_len -= cpy_len;
766         }
767
768         return 0;
769 }
770
771 static inline uint32_t __attribute__((always_inline))
772 virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id,
773         struct rte_mbuf **pkts, uint32_t count)
774 {
775         struct vhost_virtqueue *vq;
776         uint32_t pkt_idx = 0;
777         uint16_t num_buffers;
778         struct buf_vector buf_vec[BUF_VECTOR_MAX];
779         uint16_t avail_head;
780
781         LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
782         if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->virt_qp_nb))) {
783                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
784                         dev->vid, __func__, queue_id);
785                 return 0;
786         }
787
788         vq = dev->virtqueue[queue_id];
789
790         rte_spinlock_lock(&vq->access_lock);
791
792         if (unlikely(vq->enabled == 0))
793                 goto out_access_unlock;
794
795         count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
796         if (count == 0)
797                 goto out_access_unlock;
798
799         rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
800
801         vq->shadow_used_idx = 0;
802         avail_head = *((volatile uint16_t *)&vq->avail->idx);
803         for (pkt_idx = 0; pkt_idx < count; pkt_idx++) {
804                 uint32_t pkt_len = pkts[pkt_idx]->pkt_len + dev->vhost_hlen;
805
806                 if (unlikely(reserve_avail_buf_mergeable(dev, vq,
807                                                 pkt_len, buf_vec, &num_buffers,
808                                                 avail_head) < 0)) {
809                         LOG_DEBUG(VHOST_DATA,
810                                 "(%d) failed to get enough desc from vring\n",
811                                 dev->vid);
812                         vq->shadow_used_idx -= num_buffers;
813                         break;
814                 }
815
816                 LOG_DEBUG(VHOST_DATA, "(%d) current index %d | end index %d\n",
817                         dev->vid, vq->last_avail_idx,
818                         vq->last_avail_idx + num_buffers);
819
820                 if (copy_mbuf_to_desc_mergeable(dev, pkts[pkt_idx],
821                                                 buf_vec, num_buffers) < 0) {
822                         vq->shadow_used_idx -= num_buffers;
823                         break;
824                 }
825
826                 vq->last_avail_idx += num_buffers;
827         }
828
829         if (likely(vq->shadow_used_idx)) {
830                 flush_shadow_used_ring(dev, vq);
831
832                 /* flush used->idx update before we read avail->flags. */
833                 rte_mb();
834
835                 /* Kick the guest if necessary. */
836                 if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
837                                 && (vq->callfd >= 0))
838                         eventfd_write(vq->callfd, (eventfd_t)1);
839         }
840
841 out_access_unlock:
842         rte_spinlock_unlock(&vq->access_lock);
843
844         return pkt_idx;
845 }
846
847 uint16_t
848 rte_vhost_enqueue_burst(int vid, uint16_t queue_id,
849         struct rte_mbuf **pkts, uint16_t count)
850 {
851         struct virtio_net *dev = get_device(vid);
852
853         if (!dev)
854                 return 0;
855
856         if (dev->features & (1 << VIRTIO_NET_F_MRG_RXBUF))
857                 return virtio_dev_merge_rx(dev, queue_id, pkts, count);
858         else
859                 return virtio_dev_rx(dev, queue_id, pkts, count);
860 }
861
862 static inline bool
863 virtio_net_with_host_offload(struct virtio_net *dev)
864 {
865         if (dev->features &
866                         ((1ULL << VIRTIO_NET_F_CSUM) |
867                          (1ULL << VIRTIO_NET_F_HOST_ECN) |
868                          (1ULL << VIRTIO_NET_F_HOST_TSO4) |
869                          (1ULL << VIRTIO_NET_F_HOST_TSO6) |
870                          (1ULL << VIRTIO_NET_F_HOST_UFO)))
871                 return true;
872
873         return false;
874 }
875
876 static void
877 parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr)
878 {
879         struct ipv4_hdr *ipv4_hdr;
880         struct ipv6_hdr *ipv6_hdr;
881         void *l3_hdr = NULL;
882         struct ether_hdr *eth_hdr;
883         uint16_t ethertype;
884
885         eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
886
887         m->l2_len = sizeof(struct ether_hdr);
888         ethertype = rte_be_to_cpu_16(eth_hdr->ether_type);
889
890         if (ethertype == ETHER_TYPE_VLAN) {
891                 struct vlan_hdr *vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
892
893                 m->l2_len += sizeof(struct vlan_hdr);
894                 ethertype = rte_be_to_cpu_16(vlan_hdr->eth_proto);
895         }
896
897         l3_hdr = (char *)eth_hdr + m->l2_len;
898
899         switch (ethertype) {
900         case ETHER_TYPE_IPv4:
901                 ipv4_hdr = (struct ipv4_hdr *)l3_hdr;
902                 *l4_proto = ipv4_hdr->next_proto_id;
903                 m->l3_len = (ipv4_hdr->version_ihl & 0x0f) * 4;
904                 *l4_hdr = (char *)l3_hdr + m->l3_len;
905                 m->ol_flags |= PKT_TX_IPV4;
906                 break;
907         case ETHER_TYPE_IPv6:
908                 ipv6_hdr = (struct ipv6_hdr *)l3_hdr;
909                 *l4_proto = ipv6_hdr->proto;
910                 m->l3_len = sizeof(struct ipv6_hdr);
911                 *l4_hdr = (char *)l3_hdr + m->l3_len;
912                 m->ol_flags |= PKT_TX_IPV6;
913                 break;
914         default:
915                 m->l3_len = 0;
916                 *l4_proto = 0;
917                 *l4_hdr = NULL;
918                 break;
919         }
920 }
921
922 static inline void __attribute__((always_inline))
923 vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m)
924 {
925         uint16_t l4_proto = 0;
926         void *l4_hdr = NULL;
927         struct tcp_hdr *tcp_hdr = NULL;
928
929         if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
930                 return;
931
932         parse_ethernet(m, &l4_proto, &l4_hdr);
933         if (hdr->flags == VIRTIO_NET_HDR_F_NEEDS_CSUM) {
934                 if (hdr->csum_start == (m->l2_len + m->l3_len)) {
935                         switch (hdr->csum_offset) {
936                         case (offsetof(struct tcp_hdr, cksum)):
937                                 if (l4_proto == IPPROTO_TCP)
938                                         m->ol_flags |= PKT_TX_TCP_CKSUM;
939                                 break;
940                         case (offsetof(struct udp_hdr, dgram_cksum)):
941                                 if (l4_proto == IPPROTO_UDP)
942                                         m->ol_flags |= PKT_TX_UDP_CKSUM;
943                                 break;
944                         case (offsetof(struct sctp_hdr, cksum)):
945                                 if (l4_proto == IPPROTO_SCTP)
946                                         m->ol_flags |= PKT_TX_SCTP_CKSUM;
947                                 break;
948                         default:
949                                 break;
950                         }
951                 }
952         }
953
954         if (l4_hdr && hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
955                 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
956                 case VIRTIO_NET_HDR_GSO_TCPV4:
957                 case VIRTIO_NET_HDR_GSO_TCPV6:
958                         tcp_hdr = (struct tcp_hdr *)l4_hdr;
959                         m->ol_flags |= PKT_TX_TCP_SEG;
960                         m->tso_segsz = hdr->gso_size;
961                         m->l4_len = (tcp_hdr->data_off & 0xf0) >> 2;
962                         break;
963                 default:
964                         RTE_LOG(WARNING, VHOST_DATA,
965                                 "unsupported gso type %u.\n", hdr->gso_type);
966                         break;
967                 }
968         }
969 }
970
971 #define RARP_PKT_SIZE   64
972
973 static int
974 make_rarp_packet(struct rte_mbuf *rarp_mbuf, const struct ether_addr *mac)
975 {
976         struct ether_hdr *eth_hdr;
977         struct arp_hdr  *rarp;
978
979         if (rarp_mbuf->buf_len < 64) {
980                 RTE_LOG(WARNING, VHOST_DATA,
981                         "failed to make RARP; mbuf size too small %u (< %d)\n",
982                         rarp_mbuf->buf_len, RARP_PKT_SIZE);
983                 return -1;
984         }
985
986         /* Ethernet header. */
987         eth_hdr = rte_pktmbuf_mtod_offset(rarp_mbuf, struct ether_hdr *, 0);
988         memset(eth_hdr->d_addr.addr_bytes, 0xff, ETHER_ADDR_LEN);
989         ether_addr_copy(mac, &eth_hdr->s_addr);
990         eth_hdr->ether_type = htons(ETHER_TYPE_RARP);
991
992         /* RARP header. */
993         rarp = (struct arp_hdr *)(eth_hdr + 1);
994         rarp->arp_hrd = htons(ARP_HRD_ETHER);
995         rarp->arp_pro = htons(ETHER_TYPE_IPv4);
996         rarp->arp_hln = ETHER_ADDR_LEN;
997         rarp->arp_pln = 4;
998         rarp->arp_op  = htons(ARP_OP_REVREQUEST);
999
1000         ether_addr_copy(mac, &rarp->arp_data.arp_sha);
1001         ether_addr_copy(mac, &rarp->arp_data.arp_tha);
1002         memset(&rarp->arp_data.arp_sip, 0x00, 4);
1003         memset(&rarp->arp_data.arp_tip, 0x00, 4);
1004
1005         rarp_mbuf->pkt_len  = rarp_mbuf->data_len = RARP_PKT_SIZE;
1006
1007         return 0;
1008 }
1009
1010 static inline void __attribute__((always_inline))
1011 put_zmbuf(struct zcopy_mbuf *zmbuf)
1012 {
1013         zmbuf->in_use = 0;
1014 }
1015
1016 static inline int __attribute__((always_inline))
1017 copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
1018                   uint16_t max_desc, struct rte_mbuf *m, uint16_t desc_idx,
1019                   struct rte_mempool *mbuf_pool)
1020 {
1021         struct vring_desc *desc;
1022         uint64_t desc_addr, desc_gaddr;
1023         uint32_t desc_avail, desc_offset;
1024         uint32_t mbuf_avail, mbuf_offset;
1025         uint32_t cpy_len;
1026         uint64_t desc_chunck_len;
1027         struct rte_mbuf *cur = m, *prev = m;
1028         struct virtio_net_hdr tmp_hdr;
1029         struct virtio_net_hdr *hdr = NULL;
1030         /* A counter to avoid desc dead loop chain */
1031         uint32_t nr_desc = 1;
1032
1033         desc = &descs[desc_idx];
1034         if (unlikely((desc->len < dev->vhost_hlen)) ||
1035                         (desc->flags & VRING_DESC_F_INDIRECT))
1036                 return -1;
1037
1038         desc_chunck_len = desc->len;
1039         desc_gaddr = desc->addr;
1040         desc_addr = gpa_to_vva(dev, desc_gaddr, &desc_chunck_len);
1041         if (unlikely(!desc_addr))
1042                 return -1;
1043
1044         if (virtio_net_with_host_offload(dev)) {
1045                 if (unlikely(desc_chunck_len < sizeof(struct virtio_net_hdr))) {
1046                         uint64_t len = desc_chunck_len;
1047                         uint64_t remain = sizeof(struct virtio_net_hdr);
1048                         uint64_t src = desc_addr;
1049                         uint64_t dst = (uint64_t)(uintptr_t)&tmp_hdr;
1050                         uint64_t guest_addr = desc_gaddr;
1051
1052                         /*
1053                          * No luck, the virtio-net header doesn't fit
1054                          * in a contiguous virtual area.
1055                          */
1056                         while (remain) {
1057                                 len = remain;
1058                                 src = gpa_to_vva(dev, guest_addr, &len);
1059                                 if (unlikely(!src || !len))
1060                                         return -1;
1061
1062                                 rte_memcpy((void *)(uintptr_t)dst,
1063                                                    (void *)(uintptr_t)src, len);
1064
1065                                 guest_addr += len;
1066                                 remain -= len;
1067                                 dst += len;
1068                         }
1069
1070                         hdr = &tmp_hdr;
1071                 } else {
1072                         hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
1073                         rte_prefetch0(hdr);
1074                 }
1075         }
1076
1077         /*
1078          * A virtio driver normally uses at least 2 desc buffers
1079          * for Tx: the first for storing the header, and others
1080          * for storing the data.
1081          */
1082         if (likely((desc->len == dev->vhost_hlen) &&
1083                    (desc->flags & VRING_DESC_F_NEXT) != 0)) {
1084                 desc = &descs[desc->next];
1085                 if (unlikely(desc->flags & VRING_DESC_F_INDIRECT))
1086                         return -1;
1087
1088                 desc_chunck_len = desc->len;
1089                 desc_gaddr = desc->addr;
1090                 desc_addr = gpa_to_vva(dev, desc_gaddr, &desc_chunck_len);
1091                 if (unlikely(!desc_addr))
1092                         return -1;
1093
1094                 desc_offset = 0;
1095                 desc_avail  = desc->len;
1096                 nr_desc    += 1;
1097         } else {
1098                 desc_avail  = desc->len - dev->vhost_hlen;
1099
1100                 if (unlikely(desc_chunck_len < dev->vhost_hlen)) {
1101                         desc_chunck_len = desc_avail;
1102                         desc_gaddr += dev->vhost_hlen;
1103                         desc_addr = gpa_to_vva(dev,
1104                                         desc_gaddr,
1105                                         &desc_chunck_len);
1106                         if (unlikely(!desc_addr))
1107                                 return -1;
1108
1109                         desc_offset = 0;
1110                 } else {
1111                         desc_offset = dev->vhost_hlen;
1112                         desc_chunck_len -= dev->vhost_hlen;
1113                 }
1114         }
1115
1116         rte_prefetch0((void *)(uintptr_t)(desc_addr + desc_offset));
1117
1118         PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
1119                         desc_chunck_len, 0);
1120
1121         mbuf_offset = 0;
1122         mbuf_avail  = m->buf_len - RTE_PKTMBUF_HEADROOM;
1123         while (1) {
1124                 uint64_t hpa;
1125
1126                 cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
1127
1128                 /*
1129                  * A desc buf might across two host physical pages that are
1130                  * not continuous. In such case (gpa_to_hpa returns 0), data
1131                  * will be copied even though zero copy is enabled.
1132                  */
1133                 if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
1134                                         desc_gaddr + desc_offset, cpy_len)))) {
1135                         cur->data_len = cpy_len;
1136                         cur->data_off = 0;
1137                         cur->buf_addr = (void *)(uintptr_t)(desc_gaddr
1138                                         + desc_offset);
1139                         cur->buf_physaddr = hpa;
1140
1141                         /*
1142                          * In zero copy mode, one mbuf can only reference data
1143                          * for one or partial of one desc buff.
1144                          */
1145                         mbuf_avail = cpy_len;
1146                 } else {
1147                         rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
1148                                                            mbuf_offset),
1149                                 (void *)((uintptr_t)(desc_addr + desc_offset)),
1150                                 cpy_len);
1151                 }
1152
1153                 mbuf_avail  -= cpy_len;
1154                 mbuf_offset += cpy_len;
1155                 desc_avail  -= cpy_len;
1156                 desc_chunck_len -= cpy_len;
1157                 desc_offset += cpy_len;
1158
1159                 /* This desc reaches to its end, get the next one */
1160                 if (desc_avail == 0) {
1161                         if ((desc->flags & VRING_DESC_F_NEXT) == 0)
1162                                 break;
1163
1164                         if (unlikely(desc->next >= max_desc ||
1165                                      ++nr_desc > max_desc))
1166                                 return -1;
1167                         desc = &descs[desc->next];
1168                         if (unlikely(desc->flags & VRING_DESC_F_INDIRECT))
1169                                 return -1;
1170
1171                         desc_chunck_len = desc->len;
1172                         desc_gaddr = desc->addr;
1173                         desc_addr = gpa_to_vva(dev, desc_gaddr,
1174                                         &desc_chunck_len);
1175                         if (unlikely(!desc_addr))
1176                                 return -1;
1177
1178                         rte_prefetch0((void *)(uintptr_t)desc_addr);
1179
1180                         desc_offset = 0;
1181                         desc_avail  = desc->len;
1182
1183                         PRINT_PACKET(dev, (uintptr_t)desc_addr,
1184                                         desc_chunck_len, 0);
1185                 } else if (unlikely(desc_chunck_len == 0)) {
1186                         desc_chunck_len = desc_avail;
1187                         desc_gaddr += desc_offset;
1188                         desc_addr = gpa_to_vva(dev,
1189                                         desc_gaddr,
1190                                         &desc_chunck_len);
1191                         if (unlikely(!desc_addr))
1192                                 return -1;
1193
1194                         desc_offset = 0;
1195
1196                         PRINT_PACKET(dev, (uintptr_t)desc_addr,
1197                                         desc_chunck_len, 0);
1198                 }
1199
1200                 /*
1201                  * This mbuf reaches to its end, get a new one
1202                  * to hold more data.
1203                  */
1204                 if (mbuf_avail == 0) {
1205                         cur = rte_pktmbuf_alloc(mbuf_pool);
1206                         if (unlikely(cur == NULL)) {
1207                                 RTE_LOG(ERR, VHOST_DATA, "Failed to "
1208                                         "allocate memory for mbuf.\n");
1209                                 return -1;
1210                         }
1211                         if (unlikely(dev->dequeue_zero_copy))
1212                                 rte_mbuf_refcnt_update(cur, 1);
1213
1214                         prev->next = cur;
1215                         prev->data_len = mbuf_offset;
1216                         m->nb_segs += 1;
1217                         m->pkt_len += mbuf_offset;
1218                         prev = cur;
1219
1220                         mbuf_offset = 0;
1221                         mbuf_avail  = cur->buf_len - RTE_PKTMBUF_HEADROOM;
1222                 }
1223         }
1224
1225         prev->data_len = mbuf_offset;
1226         m->pkt_len    += mbuf_offset;
1227
1228         if (hdr)
1229                 vhost_dequeue_offload(hdr, m);
1230
1231         return 0;
1232 }
1233
1234 static inline void __attribute__((always_inline))
1235 update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
1236                  uint32_t used_idx, uint32_t desc_idx)
1237 {
1238         vq->used->ring[used_idx].id  = desc_idx;
1239         vq->used->ring[used_idx].len = 0;
1240         vhost_log_used_vring(dev, vq,
1241                         offsetof(struct vring_used, ring[used_idx]),
1242                         sizeof(vq->used->ring[used_idx]));
1243 }
1244
1245 static inline void __attribute__((always_inline))
1246 update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq,
1247                 uint32_t count)
1248 {
1249         if (unlikely(count == 0))
1250                 return;
1251
1252         rte_smp_wmb();
1253         rte_smp_rmb();
1254
1255         vq->used->idx += count;
1256         vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
1257                         sizeof(vq->used->idx));
1258
1259         /* Kick guest if required. */
1260         if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
1261                         && (vq->callfd >= 0))
1262                 eventfd_write(vq->callfd, (eventfd_t)1);
1263 }
1264
1265 static inline struct zcopy_mbuf *__attribute__((always_inline))
1266 get_zmbuf(struct vhost_virtqueue *vq)
1267 {
1268         uint16_t i;
1269         uint16_t last;
1270         int tries = 0;
1271
1272         /* search [last_zmbuf_idx, zmbuf_size) */
1273         i = vq->last_zmbuf_idx;
1274         last = vq->zmbuf_size;
1275
1276 again:
1277         for (; i < last; i++) {
1278                 if (vq->zmbufs[i].in_use == 0) {
1279                         vq->last_zmbuf_idx = i + 1;
1280                         vq->zmbufs[i].in_use = 1;
1281                         return &vq->zmbufs[i];
1282                 }
1283         }
1284
1285         tries++;
1286         if (tries == 1) {
1287                 /* search [0, last_zmbuf_idx) */
1288                 i = 0;
1289                 last = vq->last_zmbuf_idx;
1290                 goto again;
1291         }
1292
1293         return NULL;
1294 }
1295
1296 static inline bool __attribute__((always_inline))
1297 mbuf_is_consumed(struct rte_mbuf *m)
1298 {
1299         while (m) {
1300                 if (rte_mbuf_refcnt_read(m) > 1)
1301                         return false;
1302                 m = m->next;
1303         }
1304
1305         return true;
1306 }
1307
1308 static inline void __attribute__((always_inline))
1309 restore_mbuf(struct rte_mbuf *m)
1310 {
1311         uint32_t mbuf_size, priv_size;
1312
1313         while (m) {
1314                 priv_size = rte_pktmbuf_priv_size(m->pool);
1315                 mbuf_size = sizeof(struct rte_mbuf) + priv_size;
1316                 /* start of buffer is after mbuf structure and priv data */
1317
1318                 m->buf_addr = (char *)m + mbuf_size;
1319                 m->buf_physaddr = rte_mempool_virt2phy(NULL, m) + mbuf_size;
1320                 m = m->next;
1321         }
1322 }
1323
1324 uint16_t
1325 rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
1326         struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
1327 {
1328         struct virtio_net *dev;
1329         struct rte_mbuf *rarp_mbuf = NULL;
1330         struct vhost_virtqueue *vq;
1331         uint32_t desc_indexes[MAX_PKT_BURST];
1332         uint32_t used_idx;
1333         uint32_t i = 0;
1334         uint16_t free_entries;
1335         uint16_t avail_idx;
1336
1337         dev = get_device(vid);
1338         if (!dev)
1339                 return 0;
1340
1341         if (unlikely(!is_valid_virt_queue_idx(queue_id, 1, dev->virt_qp_nb))) {
1342                 RTE_LOG(ERR, VHOST_DATA, "(%d) %s: invalid virtqueue idx %d.\n",
1343                         dev->vid, __func__, queue_id);
1344                 return 0;
1345         }
1346
1347         vq = dev->virtqueue[queue_id];
1348
1349         if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
1350                 return 0;
1351
1352         if (unlikely(vq->enabled == 0))
1353                 goto out_access_unlock;
1354
1355         if (unlikely(dev->dequeue_zero_copy)) {
1356                 struct zcopy_mbuf *zmbuf, *next;
1357                 int nr_updated = 0;
1358
1359                 for (zmbuf = TAILQ_FIRST(&vq->zmbuf_list);
1360                      zmbuf != NULL; zmbuf = next) {
1361                         next = TAILQ_NEXT(zmbuf, next);
1362
1363                         if (mbuf_is_consumed(zmbuf->mbuf)) {
1364                                 used_idx = vq->last_used_idx++ & (vq->size - 1);
1365                                 update_used_ring(dev, vq, used_idx,
1366                                                  zmbuf->desc_idx);
1367                                 nr_updated += 1;
1368
1369                                 TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
1370                                 restore_mbuf(zmbuf->mbuf);
1371                                 rte_pktmbuf_free(zmbuf->mbuf);
1372                                 put_zmbuf(zmbuf);
1373                                 vq->nr_zmbuf -= 1;
1374                         }
1375                 }
1376
1377                 update_used_idx(dev, vq, nr_updated);
1378         }
1379
1380         /*
1381          * Construct a RARP broadcast packet, and inject it to the "pkts"
1382          * array, to looks like that guest actually send such packet.
1383          *
1384          * Check user_send_rarp() for more information.
1385          *
1386          * broadcast_rarp shares a cacheline in the virtio_net structure
1387          * with some fields that are accessed during enqueue and
1388          * rte_atomic16_cmpset() causes a write if using cmpxchg. This could
1389          * result in false sharing between enqueue and dequeue.
1390          *
1391          * Prevent unnecessary false sharing by reading broadcast_rarp first
1392          * and only performing cmpset if the read indicates it is likely to
1393          * be set.
1394          */
1395
1396         if (unlikely(rte_atomic16_read(&dev->broadcast_rarp) &&
1397                         rte_atomic16_cmpset((volatile uint16_t *)
1398                                 &dev->broadcast_rarp.cnt, 1, 0))) {
1399
1400                 rarp_mbuf = rte_pktmbuf_alloc(mbuf_pool);
1401                 if (rarp_mbuf == NULL) {
1402                         RTE_LOG(ERR, VHOST_DATA,
1403                                 "Failed to allocate memory for mbuf.\n");
1404                         goto out_access_unlock;
1405                 }
1406
1407                 if (make_rarp_packet(rarp_mbuf, &dev->mac)) {
1408                         rte_pktmbuf_free(rarp_mbuf);
1409                         rarp_mbuf = NULL;
1410                 } else {
1411                         count -= 1;
1412                 }
1413         }
1414
1415         free_entries = *((volatile uint16_t *)&vq->avail->idx) -
1416                         vq->last_avail_idx;
1417         if (free_entries == 0)
1418                 goto out_access_unlock;
1419
1420         LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
1421
1422         /* Prefetch available and used ring */
1423         avail_idx = vq->last_avail_idx & (vq->size - 1);
1424         used_idx  = vq->last_used_idx  & (vq->size - 1);
1425         rte_prefetch0(&vq->avail->ring[avail_idx]);
1426         rte_prefetch0(&vq->used->ring[used_idx]);
1427
1428         count = RTE_MIN(count, MAX_PKT_BURST);
1429         count = RTE_MIN(count, free_entries);
1430         LOG_DEBUG(VHOST_DATA, "(%d) about to dequeue %u buffers\n",
1431                         dev->vid, count);
1432
1433         /* Retrieve all of the head indexes first to avoid caching issues. */
1434         for (i = 0; i < count; i++) {
1435                 avail_idx = (vq->last_avail_idx + i) & (vq->size - 1);
1436                 used_idx  = (vq->last_used_idx  + i) & (vq->size - 1);
1437                 desc_indexes[i] = vq->avail->ring[avail_idx];
1438
1439                 if (likely(dev->dequeue_zero_copy == 0))
1440                         update_used_ring(dev, vq, used_idx, desc_indexes[i]);
1441         }
1442
1443         /* Prefetch descriptor index. */
1444         rte_prefetch0(&vq->desc[desc_indexes[0]]);
1445         for (i = 0; i < count; i++) {
1446                 struct vring_desc *desc, *idesc = NULL;
1447                 uint16_t sz, idx;
1448                 uint64_t dlen;
1449                 int err;
1450
1451                 if (likely(i + 1 < count))
1452                         rte_prefetch0(&vq->desc[desc_indexes[i + 1]]);
1453
1454                 if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) {
1455                         dlen = vq->desc[desc_indexes[i]].len;
1456                         desc = (struct vring_desc *)(uintptr_t)gpa_to_vva(dev,
1457                                         vq->desc[desc_indexes[i]].addr,
1458                                         &dlen);
1459                         if (unlikely(!desc))
1460                                 break;
1461
1462                         if (unlikely(dlen < vq->desc[desc_indexes[i]].len)) {
1463                                 /*
1464                                  * The indirect desc table is not contiguous
1465                                  * in process VA space, we have to copy it.
1466                                  */
1467                                 idesc = alloc_copy_ind_table(dev,
1468                                                 &vq->desc[desc_indexes[i]]);
1469                                 if (unlikely(!idesc))
1470                                         break;
1471
1472                                 desc = idesc;
1473                         }
1474
1475                         rte_prefetch0(desc);
1476                         sz = vq->desc[desc_indexes[i]].len / sizeof(*desc);
1477                         idx = 0;
1478                 } else {
1479                         desc = vq->desc;
1480                         sz = vq->size;
1481                         idx = desc_indexes[i];
1482                 }
1483
1484                 pkts[i] = rte_pktmbuf_alloc(mbuf_pool);
1485                 if (unlikely(pkts[i] == NULL)) {
1486                         RTE_LOG(ERR, VHOST_DATA,
1487                                 "Failed to allocate memory for mbuf.\n");
1488                         free_ind_table(idesc);
1489                         break;
1490                 }
1491
1492                 err = copy_desc_to_mbuf(dev, desc, sz, pkts[i], idx, mbuf_pool);
1493                 if (unlikely(err)) {
1494                         rte_pktmbuf_free(pkts[i]);
1495                         free_ind_table(idesc);
1496                         break;
1497                 }
1498
1499                 if (unlikely(dev->dequeue_zero_copy)) {
1500                         struct zcopy_mbuf *zmbuf;
1501
1502                         zmbuf = get_zmbuf(vq);
1503                         if (!zmbuf) {
1504                                 rte_pktmbuf_free(pkts[i]);
1505                                 free_ind_table(idesc);
1506                                 break;
1507                         }
1508                         zmbuf->mbuf = pkts[i];
1509                         zmbuf->desc_idx = desc_indexes[i];
1510
1511                         /*
1512                          * Pin lock the mbuf; we will check later to see
1513                          * whether the mbuf is freed (when we are the last
1514                          * user) or not. If that's the case, we then could
1515                          * update the used ring safely.
1516                          */
1517                         rte_mbuf_refcnt_update(pkts[i], 1);
1518
1519                         vq->nr_zmbuf += 1;
1520                         TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
1521                 }
1522
1523                 if (unlikely(!!idesc))
1524                         free_ind_table(idesc);
1525         }
1526         vq->last_avail_idx += i;
1527
1528         if (likely(dev->dequeue_zero_copy == 0)) {
1529                 vq->last_used_idx += i;
1530                 update_used_idx(dev, vq, i);
1531         }
1532
1533 out_access_unlock:
1534         rte_spinlock_unlock(&vq->access_lock);
1535
1536         if (unlikely(rarp_mbuf != NULL)) {
1537                 /*
1538                  * Inject it to the head of "pkts" array, so that switch's mac
1539                  * learning table will get updated first.
1540                  */
1541                 memmove(&pkts[1], pkts, i * sizeof(struct rte_mbuf *));
1542                 pkts[0] = rarp_mbuf;
1543                 i += 1;
1544         }
1545
1546         return i;
1547 }