#include <rte_udp.h>
#include <rte_sctp.h>
#include <rte_arp.h>
+#include <rte_spinlock.h>
+#include <rte_malloc.h>
+#include "iotlb.h"
#include "vhost.h"
#define MAX_PKT_BURST 32
+#define MAX_BATCH_LEN 256
+
static bool
is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring)
{
return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring;
}
+static __rte_always_inline struct vring_desc *
+alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct vring_desc *desc)
+{
+ struct vring_desc *idesc;
+ uint64_t src, dst;
+ uint64_t len, remain = desc->len;
+ uint64_t desc_addr = desc->addr;
+
+ idesc = rte_malloc(__func__, desc->len, 0);
+ if (unlikely(!idesc))
+ return 0;
+
+ dst = (uint64_t)(uintptr_t)idesc;
+
+ while (remain) {
+ len = remain;
+ src = vhost_iova_to_vva(dev, vq, desc_addr, &len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!src || !len)) {
+ rte_free(idesc);
+ return 0;
+ }
+
+ rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len);
+
+ remain -= len;
+ dst += len;
+ desc_addr += len;
+ }
+
+ return idesc;
+}
+
+static __rte_always_inline void
+free_ind_table(struct vring_desc *idesc)
+{
+ rte_free(idesc);
+}
+
static __rte_always_inline void
do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint16_t to, uint16_t from, uint16_t size)
rte_memcpy(&vq->used->ring[to],
&vq->shadow_used_ring[from],
size * sizeof(struct vring_used_elem));
- vhost_log_used_vring(dev, vq,
+ vhost_log_cache_used_vring(dev, vq,
offsetof(struct vring_used, ring[to]),
size * sizeof(struct vring_used_elem));
}
rte_smp_wmb();
+ vhost_log_cache_sync(dev, vq);
+
*(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx;
vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
sizeof(vq->used->idx));
static __rte_always_inline void
update_shadow_used_ring(struct vhost_virtqueue *vq,
- uint16_t desc_idx, uint16_t len)
+ uint16_t desc_idx, uint32_t len)
{
uint16_t i = vq->shadow_used_idx++;
vq->shadow_used_ring[i].len = len;
}
+static inline void
+do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq)
+{
+ struct batch_copy_elem *elem = vq->batch_copy_elems;
+ uint16_t count = vq->batch_copy_nb_elems;
+ int i;
+
+ for (i = 0; i < count; i++) {
+ rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
+ vhost_log_cache_write(dev, vq, elem[i].log_addr, elem[i].len);
+ PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0);
+ }
+}
+
+static inline void
+do_data_copy_dequeue(struct vhost_virtqueue *vq)
+{
+ struct batch_copy_elem *elem = vq->batch_copy_elems;
+ uint16_t count = vq->batch_copy_nb_elems;
+ int i;
+
+ for (i = 0; i < count; i++)
+ rte_memcpy(elem[i].dst, elem[i].src, elem[i].len);
+}
+
/* avoid write operation when necessary, to lessen cache issues */
#define ASSIGN_UNLESS_EQUAL(var, val) do { \
if ((var) != (val)) \
}
static __rte_always_inline int
-copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs,
- struct rte_mbuf *m, uint16_t desc_idx, uint32_t size)
+copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct vring_desc *descs, struct rte_mbuf *m,
+ uint16_t desc_idx, uint32_t size)
{
uint32_t desc_avail, desc_offset;
uint32_t mbuf_avail, mbuf_offset;
uint32_t cpy_len;
+ uint64_t desc_chunck_len;
struct vring_desc *desc;
- uint64_t desc_addr;
+ uint64_t desc_addr, desc_gaddr;
/* A counter to avoid desc dead loop chain */
uint16_t nr_desc = 1;
+ struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
+ uint16_t copy_nb = vq->batch_copy_nb_elems;
+ int error = 0;
desc = &descs[desc_idx];
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
+ desc_chunck_len = desc->len;
+ desc_gaddr = desc->addr;
+ desc_addr = vhost_iova_to_vva(dev, vq, desc_gaddr,
+ &desc_chunck_len, VHOST_ACCESS_RW);
/*
* Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid
* performance issue with some versions of gcc (4.8.4 and 5.3.0) which
* otherwise stores offset on the stack instead of in a register.
*/
- if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr)
- return -1;
+ if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr) {
+ error = -1;
+ goto out;
+ }
rte_prefetch0((void *)(uintptr_t)desc_addr);
- virtio_enqueue_offload(m, (struct virtio_net_hdr *)(uintptr_t)desc_addr);
- vhost_log_write(dev, desc->addr, dev->vhost_hlen);
- PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
+ if (likely(desc_chunck_len >= dev->vhost_hlen)) {
+ virtio_enqueue_offload(m,
+ (struct virtio_net_hdr *)(uintptr_t)desc_addr);
+ PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0);
+ vhost_log_cache_write(dev, vq, desc_gaddr, dev->vhost_hlen);
+ } else {
+ struct virtio_net_hdr vnet_hdr;
+ uint64_t remain = dev->vhost_hlen;
+ uint64_t len;
+ uint64_t src = (uint64_t)(uintptr_t)&vnet_hdr, dst;
+ uint64_t guest_addr = desc_gaddr;
+
+ virtio_enqueue_offload(m, &vnet_hdr);
+
+ while (remain) {
+ len = remain;
+ dst = vhost_iova_to_vva(dev, vq, guest_addr,
+ &len, VHOST_ACCESS_RW);
+ if (unlikely(!dst || !len)) {
+ error = -1;
+ goto out;
+ }
+
+ rte_memcpy((void *)(uintptr_t)dst,
+ (void *)(uintptr_t)src, len);
+
+ PRINT_PACKET(dev, (uintptr_t)dst, (uint32_t)len, 0);
+ vhost_log_cache_write(dev, vq, guest_addr, len);
+ remain -= len;
+ guest_addr += len;
+ dst += len;
+ }
+ }
- desc_offset = dev->vhost_hlen;
desc_avail = desc->len - dev->vhost_hlen;
+ if (unlikely(desc_chunck_len < dev->vhost_hlen)) {
+ desc_chunck_len = desc_avail;
+ desc_gaddr = desc->addr + dev->vhost_hlen;
+ desc_addr = vhost_iova_to_vva(dev,
+ vq, desc_gaddr,
+ &desc_chunck_len,
+ VHOST_ACCESS_RW);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
+
+ desc_offset = 0;
+ } else {
+ desc_offset = dev->vhost_hlen;
+ desc_chunck_len -= dev->vhost_hlen;
+ }
mbuf_avail = rte_pktmbuf_data_len(m);
mbuf_offset = 0;
if (desc_avail == 0) {
if ((desc->flags & VRING_DESC_F_NEXT) == 0) {
/* Room in vring buffer is not enough */
- return -1;
+ error = -1;
+ goto out;
+ }
+ if (unlikely(desc->next >= size || ++nr_desc > size)) {
+ error = -1;
+ goto out;
}
- if (unlikely(desc->next >= size || ++nr_desc > size))
- return -1;
desc = &descs[desc->next];
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
- if (unlikely(!desc_addr))
- return -1;
+ desc_chunck_len = desc->len;
+ desc_gaddr = desc->addr;
+ desc_addr = vhost_iova_to_vva(dev, vq, desc_gaddr,
+ &desc_chunck_len,
+ VHOST_ACCESS_RW);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
desc_offset = 0;
desc_avail = desc->len;
+ } else if (unlikely(desc_chunck_len == 0)) {
+ desc_chunck_len = desc_avail;
+ desc_gaddr += desc_offset;
+ desc_addr = vhost_iova_to_vva(dev,
+ vq, desc_gaddr,
+ &desc_chunck_len, VHOST_ACCESS_RW);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
+ desc_offset = 0;
}
- cpy_len = RTE_MIN(desc_avail, mbuf_avail);
- rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
- rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
- cpy_len);
- vhost_log_write(dev, desc->addr + desc_offset, cpy_len);
- PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
- cpy_len, 0);
+ cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
+ if (likely(cpy_len > MAX_BATCH_LEN || copy_nb >= vq->size)) {
+ rte_memcpy((void *)((uintptr_t)(desc_addr +
+ desc_offset)),
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
+ cpy_len);
+ vhost_log_cache_write(dev, vq, desc_gaddr + desc_offset,
+ cpy_len);
+ PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
+ cpy_len, 0);
+ } else {
+ batch_copy[copy_nb].dst =
+ (void *)((uintptr_t)(desc_addr + desc_offset));
+ batch_copy[copy_nb].src =
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
+ batch_copy[copy_nb].log_addr = desc_gaddr + desc_offset;
+ batch_copy[copy_nb].len = cpy_len;
+ copy_nb++;
+ }
mbuf_avail -= cpy_len;
mbuf_offset += cpy_len;
desc_avail -= cpy_len;
desc_offset += cpy_len;
+ desc_chunck_len -= cpy_len;
}
- return 0;
+out:
+ vq->batch_copy_nb_elems = copy_nb;
+
+ return error;
}
/**
}
vq = dev->virtqueue[queue_id];
+
+ rte_spinlock_lock(&vq->access_lock);
+
if (unlikely(vq->enabled == 0))
- return 0;
+ goto out_access_unlock;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0)) {
+ if (unlikely(vring_translate(dev, vq) < 0)) {
+ count = 0;
+ goto out;
+ }
+ }
avail_idx = *((volatile uint16_t *)&vq->avail->idx);
start_idx = vq->last_used_idx;
count = RTE_MIN(count, free_entries);
count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST);
if (count == 0)
- return 0;
+ goto out;
LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n",
dev->vid, start_idx, start_idx + count);
+ vq->batch_copy_nb_elems = 0;
+
/* Retrieve all of the desc indexes first to avoid caching issues. */
rte_prefetch0(&vq->avail->ring[start_idx & (vq->size - 1)]);
for (i = 0; i < count; i++) {
vq->used->ring[used_idx].id = desc_indexes[i];
vq->used->ring[used_idx].len = pkts[i]->pkt_len +
dev->vhost_hlen;
- vhost_log_used_vring(dev, vq,
+ vhost_log_cache_used_vring(dev, vq,
offsetof(struct vring_used, ring[used_idx]),
sizeof(vq->used->ring[used_idx]));
}
rte_prefetch0(&vq->desc[desc_indexes[0]]);
for (i = 0; i < count; i++) {
+ struct vring_desc *idesc = NULL;
uint16_t desc_idx = desc_indexes[i];
int err;
if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) {
+ uint64_t dlen = vq->desc[desc_idx].len;
descs = (struct vring_desc *)(uintptr_t)
- rte_vhost_gpa_to_vva(dev->mem,
- vq->desc[desc_idx].addr);
+ vhost_iova_to_vva(dev,
+ vq, vq->desc[desc_idx].addr,
+ &dlen, VHOST_ACCESS_RO);
if (unlikely(!descs)) {
count = i;
break;
}
+ if (unlikely(dlen < vq->desc[desc_idx].len)) {
+ /*
+ * The indirect desc table is not contiguous
+ * in process VA space, we have to copy it.
+ */
+ idesc = alloc_copy_ind_table(dev, vq,
+ &vq->desc[desc_idx]);
+ if (unlikely(!idesc))
+ break;
+
+ descs = idesc;
+ }
+
desc_idx = 0;
sz = vq->desc[desc_idx].len / sizeof(*descs);
} else {
sz = vq->size;
}
- err = copy_mbuf_to_desc(dev, descs, pkts[i], desc_idx, sz);
+ err = copy_mbuf_to_desc(dev, vq, descs, pkts[i], desc_idx, sz);
if (unlikely(err)) {
- used_idx = (start_idx + i) & (vq->size - 1);
- vq->used->ring[used_idx].len = dev->vhost_hlen;
- vhost_log_used_vring(dev, vq,
- offsetof(struct vring_used, ring[used_idx]),
- sizeof(vq->used->ring[used_idx]));
+ count = i;
+ free_ind_table(idesc);
+ break;
}
if (i + 1 < count)
rte_prefetch0(&vq->desc[desc_indexes[i+1]]);
+
+ if (unlikely(!!idesc))
+ free_ind_table(idesc);
}
+ do_data_copy_enqueue(dev, vq);
+
rte_smp_wmb();
+ vhost_log_cache_sync(dev, vq);
+
*(volatile uint16_t *)&vq->used->idx += count;
vq->last_used_idx += count;
vhost_log_used_vring(dev, vq,
if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT)
&& (vq->callfd >= 0))
eventfd_write(vq->callfd, (eventfd_t)1);
+out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+
+out_access_unlock:
+ rte_spinlock_unlock(&vq->access_lock);
+
return count;
}
fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq,
uint32_t avail_idx, uint32_t *vec_idx,
struct buf_vector *buf_vec, uint16_t *desc_chain_head,
- uint16_t *desc_chain_len)
+ uint32_t *desc_chain_len)
{
uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
uint32_t vec_id = *vec_idx;
uint32_t len = 0;
+ uint64_t dlen;
struct vring_desc *descs = vq->desc;
+ struct vring_desc *idesc = NULL;
*desc_chain_head = idx;
if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) {
+ dlen = vq->desc[idx].len;
descs = (struct vring_desc *)(uintptr_t)
- rte_vhost_gpa_to_vva(dev->mem, vq->desc[idx].addr);
+ vhost_iova_to_vva(dev, vq, vq->desc[idx].addr,
+ &dlen,
+ VHOST_ACCESS_RO);
if (unlikely(!descs))
return -1;
+ if (unlikely(dlen < vq->desc[idx].len)) {
+ /*
+ * The indirect desc table is not contiguous
+ * in process VA space, we have to copy it.
+ */
+ idesc = alloc_copy_ind_table(dev, vq, &vq->desc[idx]);
+ if (unlikely(!idesc))
+ return -1;
+
+ descs = idesc;
+ }
+
idx = 0;
}
while (1) {
- if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size))
+ if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size)) {
+ free_ind_table(idesc);
return -1;
+ }
len += descs[idx].len;
buf_vec[vec_id].buf_addr = descs[idx].addr;
*desc_chain_len = len;
*vec_idx = vec_id;
+ if (unlikely(!!idesc))
+ free_ind_table(idesc);
+
return 0;
}
uint16_t tries = 0;
uint16_t head_idx = 0;
- uint16_t len = 0;
+ uint32_t len = 0;
*num_buffers = 0;
cur_idx = vq->last_avail_idx;
}
static __rte_always_inline int
-copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m,
- struct buf_vector *buf_vec, uint16_t num_buffers)
+copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct rte_mbuf *m, struct buf_vector *buf_vec,
+ uint16_t num_buffers)
{
uint32_t vec_idx = 0;
- uint64_t desc_addr;
+ uint64_t desc_addr, desc_gaddr;
uint32_t mbuf_offset, mbuf_avail;
uint32_t desc_offset, desc_avail;
uint32_t cpy_len;
+ uint64_t desc_chunck_len;
uint64_t hdr_addr, hdr_phys_addr;
struct rte_mbuf *hdr_mbuf;
+ struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
+ struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL;
+ uint16_t copy_nb = vq->batch_copy_nb_elems;
+ int error = 0;
- if (unlikely(m == NULL))
- return -1;
+ if (unlikely(m == NULL)) {
+ error = -1;
+ goto out;
+ }
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, buf_vec[vec_idx].buf_addr);
- if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr)
- return -1;
+ desc_chunck_len = buf_vec[vec_idx].buf_len;
+ desc_gaddr = buf_vec[vec_idx].buf_addr;
+ desc_addr = vhost_iova_to_vva(dev, vq,
+ desc_gaddr,
+ &desc_chunck_len,
+ VHOST_ACCESS_RW);
+ if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) {
+ error = -1;
+ goto out;
+ }
hdr_mbuf = m;
hdr_addr = desc_addr;
- hdr_phys_addr = buf_vec[vec_idx].buf_addr;
+ if (unlikely(desc_chunck_len < dev->vhost_hlen))
+ hdr = &tmp_hdr;
+ else
+ hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr;
+ hdr_phys_addr = desc_gaddr;
rte_prefetch0((void *)(uintptr_t)hdr_addr);
LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n",
dev->vid, num_buffers);
desc_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen;
- desc_offset = dev->vhost_hlen;
+ if (unlikely(desc_chunck_len < dev->vhost_hlen)) {
+ desc_chunck_len = desc_avail;
+ desc_gaddr += dev->vhost_hlen;
+ desc_addr = vhost_iova_to_vva(dev, vq,
+ desc_gaddr,
+ &desc_chunck_len,
+ VHOST_ACCESS_RW);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
+
+ desc_offset = 0;
+ } else {
+ desc_offset = dev->vhost_hlen;
+ desc_chunck_len -= dev->vhost_hlen;
+ }
+
mbuf_avail = rte_pktmbuf_data_len(m);
mbuf_offset = 0;
/* done with current desc buf, get the next one */
if (desc_avail == 0) {
vec_idx++;
- desc_addr = rte_vhost_gpa_to_vva(dev->mem,
- buf_vec[vec_idx].buf_addr);
- if (unlikely(!desc_addr))
- return -1;
+ desc_chunck_len = buf_vec[vec_idx].buf_len;
+ desc_gaddr = buf_vec[vec_idx].buf_addr;
+ desc_addr =
+ vhost_iova_to_vva(dev, vq,
+ desc_gaddr,
+ &desc_chunck_len,
+ VHOST_ACCESS_RW);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
/* Prefetch buffer address. */
rte_prefetch0((void *)(uintptr_t)desc_addr);
desc_offset = 0;
desc_avail = buf_vec[vec_idx].buf_len;
+ } else if (unlikely(desc_chunck_len == 0)) {
+ desc_chunck_len = desc_avail;
+ desc_gaddr += desc_offset;
+ desc_addr = vhost_iova_to_vva(dev, vq,
+ desc_gaddr,
+ &desc_chunck_len, VHOST_ACCESS_RW);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
+ desc_offset = 0;
}
/* done with current mbuf, get the next one */
}
if (hdr_addr) {
- struct virtio_net_hdr_mrg_rxbuf *hdr;
-
- hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)
- hdr_addr;
virtio_enqueue_offload(hdr_mbuf, &hdr->hdr);
ASSIGN_UNLESS_EQUAL(hdr->num_buffers, num_buffers);
- vhost_log_write(dev, hdr_phys_addr, dev->vhost_hlen);
- PRINT_PACKET(dev, (uintptr_t)hdr_addr,
- dev->vhost_hlen, 0);
+ if (unlikely(hdr == &tmp_hdr)) {
+ uint64_t len;
+ uint64_t remain = dev->vhost_hlen;
+ uint64_t src = (uint64_t)(uintptr_t)hdr, dst;
+ uint64_t guest_addr = hdr_phys_addr;
+
+ while (remain) {
+ len = remain;
+ dst = vhost_iova_to_vva(dev, vq,
+ guest_addr, &len,
+ VHOST_ACCESS_RW);
+ if (unlikely(!dst || !len)) {
+ error = -1;
+ goto out;
+ }
+
+ rte_memcpy((void *)(uintptr_t)dst,
+ (void *)(uintptr_t)src,
+ len);
+
+ PRINT_PACKET(dev, (uintptr_t)dst,
+ (uint32_t)len, 0);
+ vhost_log_cache_write(dev, vq,
+ guest_addr, len);
+
+ remain -= len;
+ guest_addr += len;
+ dst += len;
+ }
+ } else {
+ PRINT_PACKET(dev, (uintptr_t)hdr_addr,
+ dev->vhost_hlen, 0);
+ vhost_log_cache_write(dev, vq, hdr_phys_addr,
+ dev->vhost_hlen);
+ }
hdr_addr = 0;
}
- cpy_len = RTE_MIN(desc_avail, mbuf_avail);
- rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)),
- rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
- cpy_len);
- vhost_log_write(dev, buf_vec[vec_idx].buf_addr + desc_offset,
- cpy_len);
- PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
- cpy_len, 0);
+ cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
+
+ if (likely(cpy_len > MAX_BATCH_LEN || copy_nb >= vq->size)) {
+ rte_memcpy((void *)((uintptr_t)(desc_addr +
+ desc_offset)),
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset),
+ cpy_len);
+ vhost_log_cache_write(dev, vq, desc_gaddr + desc_offset,
+ cpy_len);
+ PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
+ cpy_len, 0);
+ } else {
+ batch_copy[copy_nb].dst =
+ (void *)((uintptr_t)(desc_addr + desc_offset));
+ batch_copy[copy_nb].src =
+ rte_pktmbuf_mtod_offset(m, void *, mbuf_offset);
+ batch_copy[copy_nb].log_addr = desc_gaddr + desc_offset;
+ batch_copy[copy_nb].len = cpy_len;
+ copy_nb++;
+ }
mbuf_avail -= cpy_len;
mbuf_offset += cpy_len;
desc_avail -= cpy_len;
desc_offset += cpy_len;
+ desc_chunck_len -= cpy_len;
}
- return 0;
+out:
+ vq->batch_copy_nb_elems = copy_nb;
+
+ return error;
}
static __rte_always_inline uint32_t
}
vq = dev->virtqueue[queue_id];
+
+ rte_spinlock_lock(&vq->access_lock);
+
if (unlikely(vq->enabled == 0))
- return 0;
+ goto out_access_unlock;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0))
+ if (unlikely(vring_translate(dev, vq) < 0))
+ goto out;
count = RTE_MIN((uint32_t)MAX_PKT_BURST, count);
if (count == 0)
- return 0;
+ goto out;
+
+ vq->batch_copy_nb_elems = 0;
rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
dev->vid, vq->last_avail_idx,
vq->last_avail_idx + num_buffers);
- if (copy_mbuf_to_desc_mergeable(dev, pkts[pkt_idx],
+ if (copy_mbuf_to_desc_mergeable(dev, vq, pkts[pkt_idx],
buf_vec, num_buffers) < 0) {
vq->shadow_used_idx -= num_buffers;
break;
vq->last_avail_idx += num_buffers;
}
+ do_data_copy_enqueue(dev, vq);
+
if (likely(vq->shadow_used_idx)) {
flush_shadow_used_ring(dev, vq);
eventfd_write(vq->callfd, (eventfd_t)1);
}
+out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+
+out_access_unlock:
+ rte_spinlock_unlock(&vq->access_lock);
+
return pkt_idx;
}
}
static __rte_always_inline int
-copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs,
- uint16_t max_desc, struct rte_mbuf *m, uint16_t desc_idx,
+copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq,
+ struct vring_desc *descs, uint16_t max_desc,
+ struct rte_mbuf *m, uint16_t desc_idx,
struct rte_mempool *mbuf_pool)
{
struct vring_desc *desc;
- uint64_t desc_addr;
+ uint64_t desc_addr, desc_gaddr;
uint32_t desc_avail, desc_offset;
uint32_t mbuf_avail, mbuf_offset;
uint32_t cpy_len;
+ uint64_t desc_chunck_len;
struct rte_mbuf *cur = m, *prev = m;
+ struct virtio_net_hdr tmp_hdr;
struct virtio_net_hdr *hdr = NULL;
/* A counter to avoid desc dead loop chain */
uint32_t nr_desc = 1;
+ struct batch_copy_elem *batch_copy = vq->batch_copy_elems;
+ uint16_t copy_nb = vq->batch_copy_nb_elems;
+ int error = 0;
desc = &descs[desc_idx];
if (unlikely((desc->len < dev->vhost_hlen)) ||
- (desc->flags & VRING_DESC_F_INDIRECT))
- return -1;
+ (desc->flags & VRING_DESC_F_INDIRECT)) {
+ error = -1;
+ goto out;
+ }
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
- if (unlikely(!desc_addr))
- return -1;
+ desc_chunck_len = desc->len;
+ desc_gaddr = desc->addr;
+ desc_addr = vhost_iova_to_vva(dev,
+ vq, desc_gaddr,
+ &desc_chunck_len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
if (virtio_net_with_host_offload(dev)) {
- hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
- rte_prefetch0(hdr);
+ if (unlikely(desc_chunck_len < sizeof(struct virtio_net_hdr))) {
+ uint64_t len = desc_chunck_len;
+ uint64_t remain = sizeof(struct virtio_net_hdr);
+ uint64_t src = desc_addr;
+ uint64_t dst = (uint64_t)(uintptr_t)&tmp_hdr;
+ uint64_t guest_addr = desc_gaddr;
+
+ /*
+ * No luck, the virtio-net header doesn't fit
+ * in a contiguous virtual area.
+ */
+ while (remain) {
+ len = remain;
+ src = vhost_iova_to_vva(dev, vq,
+ guest_addr, &len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!src || !len)) {
+ error = -1;
+ goto out;
+ }
+
+ rte_memcpy((void *)(uintptr_t)dst,
+ (void *)(uintptr_t)src, len);
+
+ guest_addr += len;
+ remain -= len;
+ dst += len;
+ }
+
+ hdr = &tmp_hdr;
+ } else {
+ hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr);
+ rte_prefetch0(hdr);
+ }
}
/*
if (likely((desc->len == dev->vhost_hlen) &&
(desc->flags & VRING_DESC_F_NEXT) != 0)) {
desc = &descs[desc->next];
- if (unlikely(desc->flags & VRING_DESC_F_INDIRECT))
- return -1;
+ if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) {
+ error = -1;
+ goto out;
+ }
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
- if (unlikely(!desc_addr))
- return -1;
+ desc_chunck_len = desc->len;
+ desc_gaddr = desc->addr;
+ desc_addr = vhost_iova_to_vva(dev,
+ vq, desc_gaddr,
+ &desc_chunck_len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
desc_offset = 0;
desc_avail = desc->len;
nr_desc += 1;
} else {
desc_avail = desc->len - dev->vhost_hlen;
- desc_offset = dev->vhost_hlen;
+
+ if (unlikely(desc_chunck_len < dev->vhost_hlen)) {
+ desc_chunck_len = desc_avail;
+ desc_gaddr += dev->vhost_hlen;
+ desc_addr = vhost_iova_to_vva(dev,
+ vq, desc_gaddr,
+ &desc_chunck_len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
+
+ desc_offset = 0;
+ } else {
+ desc_offset = dev->vhost_hlen;
+ desc_chunck_len -= dev->vhost_hlen;
+ }
}
rte_prefetch0((void *)(uintptr_t)(desc_addr + desc_offset));
- PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), desc_avail, 0);
+ PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset),
+ (uint32_t)desc_chunck_len, 0);
mbuf_offset = 0;
mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM;
while (1) {
uint64_t hpa;
- cpy_len = RTE_MIN(desc_avail, mbuf_avail);
+ cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail);
/*
* A desc buf might across two host physical pages that are
* will be copied even though zero copy is enabled.
*/
if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev,
- desc->addr + desc_offset, cpy_len)))) {
+ desc_gaddr + desc_offset, cpy_len)))) {
cur->data_len = cpy_len;
cur->data_off = 0;
- cur->buf_addr = (void *)(uintptr_t)desc_addr;
- cur->buf_physaddr = hpa;
+ cur->buf_addr = (void *)(uintptr_t)(desc_addr
+ + desc_offset);
+ cur->buf_iova = hpa;
/*
* In zero copy mode, one mbuf can only reference data
*/
mbuf_avail = cpy_len;
} else {
- rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
- mbuf_offset),
- (void *)((uintptr_t)(desc_addr + desc_offset)),
- cpy_len);
+ if (likely(cpy_len > MAX_BATCH_LEN ||
+ copy_nb >= vq->size ||
+ (hdr && cur == m) ||
+ desc->len != desc_chunck_len)) {
+ rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *,
+ mbuf_offset),
+ (void *)((uintptr_t)(desc_addr +
+ desc_offset)),
+ cpy_len);
+ } else {
+ batch_copy[copy_nb].dst =
+ rte_pktmbuf_mtod_offset(cur, void *,
+ mbuf_offset);
+ batch_copy[copy_nb].src =
+ (void *)((uintptr_t)(desc_addr +
+ desc_offset));
+ batch_copy[copy_nb].len = cpy_len;
+ copy_nb++;
+ }
}
mbuf_avail -= cpy_len;
mbuf_offset += cpy_len;
desc_avail -= cpy_len;
+ desc_chunck_len -= cpy_len;
desc_offset += cpy_len;
/* This desc reaches to its end, get the next one */
break;
if (unlikely(desc->next >= max_desc ||
- ++nr_desc > max_desc))
- return -1;
+ ++nr_desc > max_desc)) {
+ error = -1;
+ goto out;
+ }
desc = &descs[desc->next];
- if (unlikely(desc->flags & VRING_DESC_F_INDIRECT))
- return -1;
+ if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) {
+ error = -1;
+ goto out;
+ }
- desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr);
- if (unlikely(!desc_addr))
- return -1;
+ desc_chunck_len = desc->len;
+ desc_gaddr = desc->addr;
+ desc_addr = vhost_iova_to_vva(dev,
+ vq, desc_gaddr,
+ &desc_chunck_len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
rte_prefetch0((void *)(uintptr_t)desc_addr);
desc_offset = 0;
desc_avail = desc->len;
- PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0);
+ PRINT_PACKET(dev, (uintptr_t)desc_addr,
+ (uint32_t)desc_chunck_len, 0);
+ } else if (unlikely(desc_chunck_len == 0)) {
+ desc_chunck_len = desc_avail;
+ desc_gaddr += desc_offset;
+ desc_addr = vhost_iova_to_vva(dev, vq,
+ desc_gaddr,
+ &desc_chunck_len,
+ VHOST_ACCESS_RO);
+ if (unlikely(!desc_addr)) {
+ error = -1;
+ goto out;
+ }
+ desc_offset = 0;
+
+ PRINT_PACKET(dev, (uintptr_t)desc_addr,
+ (uint32_t)desc_chunck_len, 0);
}
/*
if (unlikely(cur == NULL)) {
RTE_LOG(ERR, VHOST_DATA, "Failed to "
"allocate memory for mbuf.\n");
- return -1;
+ error = -1;
+ goto out;
}
if (unlikely(dev->dequeue_zero_copy))
rte_mbuf_refcnt_update(cur, 1);
if (hdr)
vhost_dequeue_offload(hdr, m);
- return 0;
+out:
+ vq->batch_copy_nb_elems = copy_nb;
+
+ return error;
}
static __rte_always_inline void
{
vq->used->ring[used_idx].id = desc_idx;
vq->used->ring[used_idx].len = 0;
- vhost_log_used_vring(dev, vq,
+ vhost_log_cache_used_vring(dev, vq,
offsetof(struct vring_used, ring[used_idx]),
sizeof(vq->used->ring[used_idx]));
}
rte_smp_wmb();
rte_smp_rmb();
+ vhost_log_cache_sync(dev, vq);
+
vq->used->idx += count;
vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx),
sizeof(vq->used->idx));
return true;
}
+static __rte_always_inline void
+restore_mbuf(struct rte_mbuf *m)
+{
+ uint32_t mbuf_size, priv_size;
+
+ while (m) {
+ priv_size = rte_pktmbuf_priv_size(m->pool);
+ mbuf_size = sizeof(struct rte_mbuf) + priv_size;
+ /* start of buffer is after mbuf structure and priv data */
+
+ m->buf_addr = (char *)m + mbuf_size;
+ m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size;
+ m = m->next;
+ }
+}
+
uint16_t
rte_vhost_dequeue_burst(int vid, uint16_t queue_id,
struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count)
}
vq = dev->virtqueue[queue_id];
- if (unlikely(vq->enabled == 0))
+
+ if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0))
return 0;
+ if (unlikely(vq->enabled == 0))
+ goto out_access_unlock;
+
+ vq->batch_copy_nb_elems = 0;
+
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_lock(vq);
+
+ if (unlikely(vq->access_ok == 0))
+ if (unlikely(vring_translate(dev, vq) < 0))
+ goto out;
+
if (unlikely(dev->dequeue_zero_copy)) {
struct zcopy_mbuf *zmbuf, *next;
int nr_updated = 0;
nr_updated += 1;
TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next);
+ restore_mbuf(zmbuf->mbuf);
rte_pktmbuf_free(zmbuf->mbuf);
put_zmbuf(zmbuf);
vq->nr_zmbuf -= 1;
if (rarp_mbuf == NULL) {
RTE_LOG(ERR, VHOST_DATA,
"Failed to allocate memory for mbuf.\n");
- return 0;
+ goto out;
}
if (make_rarp_packet(rarp_mbuf, &dev->mac)) {
/* Prefetch descriptor index. */
rte_prefetch0(&vq->desc[desc_indexes[0]]);
for (i = 0; i < count; i++) {
- struct vring_desc *desc;
+ struct vring_desc *desc, *idesc = NULL;
uint16_t sz, idx;
+ uint64_t dlen;
int err;
if (likely(i + 1 < count))
rte_prefetch0(&vq->desc[desc_indexes[i + 1]]);
if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) {
+ dlen = vq->desc[desc_indexes[i]].len;
desc = (struct vring_desc *)(uintptr_t)
- rte_vhost_gpa_to_vva(dev->mem,
- vq->desc[desc_indexes[i]].addr);
+ vhost_iova_to_vva(dev, vq,
+ vq->desc[desc_indexes[i]].addr,
+ &dlen,
+ VHOST_ACCESS_RO);
if (unlikely(!desc))
break;
+ if (unlikely(dlen < vq->desc[desc_indexes[i]].len)) {
+ /*
+ * The indirect desc table is not contiguous
+ * in process VA space, we have to copy it.
+ */
+ idesc = alloc_copy_ind_table(dev, vq,
+ &vq->desc[desc_indexes[i]]);
+ if (unlikely(!idesc))
+ break;
+
+ desc = idesc;
+ }
+
rte_prefetch0(desc);
sz = vq->desc[desc_indexes[i]].len / sizeof(*desc);
idx = 0;
if (unlikely(pkts[i] == NULL)) {
RTE_LOG(ERR, VHOST_DATA,
"Failed to allocate memory for mbuf.\n");
+ free_ind_table(idesc);
break;
}
- err = copy_desc_to_mbuf(dev, desc, sz, pkts[i], idx, mbuf_pool);
+ err = copy_desc_to_mbuf(dev, vq, desc, sz, pkts[i], idx,
+ mbuf_pool);
if (unlikely(err)) {
rte_pktmbuf_free(pkts[i]);
+ free_ind_table(idesc);
break;
}
zmbuf = get_zmbuf(vq);
if (!zmbuf) {
rte_pktmbuf_free(pkts[i]);
+ free_ind_table(idesc);
break;
}
zmbuf->mbuf = pkts[i];
vq->nr_zmbuf += 1;
TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next);
}
+
+ if (unlikely(!!idesc))
+ free_ind_table(idesc);
}
vq->last_avail_idx += i;
if (likely(dev->dequeue_zero_copy == 0)) {
+ do_data_copy_dequeue(vq);
vq->last_used_idx += i;
update_used_idx(dev, vq, i);
}
out:
+ if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
+ vhost_user_iotlb_rd_unlock(vq);
+
+out_access_unlock:
+ rte_spinlock_unlock(&vq->access_lock);
+
if (unlikely(rarp_mbuf != NULL)) {
/*
* Inject it to the head of "pkts" array, so that switch's mac