X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=lib%2Flibrte_vhost%2Fvirtio_net.c;h=bde360f3d925a004f69daf152613dc4f13da6924;hb=6e7cbd63706f3435b9d9a2057a37db1da01db9a7;hp=48219e0509c381d8b7754cdc9e130bdfe132e2d7;hpb=7595afa4d30097c1177b69257118d8ad89a539be;p=deb_dpdk.git diff --git a/lib/librte_vhost/virtio_net.c b/lib/librte_vhost/virtio_net.c index 48219e05..bde360f3 100644 --- a/lib/librte_vhost/virtio_net.c +++ b/lib/librte_vhost/virtio_net.c @@ -44,30 +44,75 @@ #include #include #include +#include +#include +#include "iotlb.h" #include "vhost.h" #define MAX_PKT_BURST 32 +#define MAX_BATCH_LEN 256 + static bool is_valid_virt_queue_idx(uint32_t idx, int is_tx, uint32_t nr_vring) { return (is_tx ^ (idx & 1)) == 0 && idx < nr_vring; } -static inline void __attribute__((always_inline)) +static __rte_always_inline struct vring_desc * +alloc_copy_ind_table(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct vring_desc *desc) +{ + struct vring_desc *idesc; + uint64_t src, dst; + uint64_t len, remain = desc->len; + uint64_t desc_addr = desc->addr; + + idesc = rte_malloc(__func__, desc->len, 0); + if (unlikely(!idesc)) + return 0; + + dst = (uint64_t)(uintptr_t)idesc; + + while (remain) { + len = remain; + src = vhost_iova_to_vva(dev, vq, desc_addr, &len, + VHOST_ACCESS_RO); + if (unlikely(!src || !len)) { + rte_free(idesc); + return 0; + } + + rte_memcpy((void *)(uintptr_t)dst, (void *)(uintptr_t)src, len); + + remain -= len; + dst += len; + desc_addr += len; + } + + return idesc; +} + +static __rte_always_inline void +free_ind_table(struct vring_desc *idesc) +{ + rte_free(idesc); +} + +static __rte_always_inline void do_flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t to, uint16_t from, uint16_t size) { rte_memcpy(&vq->used->ring[to], &vq->shadow_used_ring[from], size * sizeof(struct vring_used_elem)); - vhost_log_used_vring(dev, vq, + vhost_log_cache_used_vring(dev, vq, offsetof(struct vring_used, ring[to]), size * sizeof(struct vring_used_elem)); } -static inline void __attribute__((always_inline)) +static __rte_always_inline void flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq) { uint16_t used_idx = vq->last_used_idx & (vq->size - 1); @@ -90,12 +135,14 @@ flush_shadow_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq) rte_smp_wmb(); + vhost_log_cache_sync(dev, vq); + *(volatile uint16_t *)&vq->used->idx += vq->shadow_used_idx; vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx), sizeof(vq->used->idx)); } -static inline void __attribute__((always_inline)) +static __rte_always_inline void update_shadow_used_ring(struct vhost_virtqueue *vq, uint16_t desc_idx, uint16_t len) { @@ -105,6 +152,31 @@ update_shadow_used_ring(struct vhost_virtqueue *vq, vq->shadow_used_ring[i].len = len; } +static inline void +do_data_copy_enqueue(struct virtio_net *dev, struct vhost_virtqueue *vq) +{ + struct batch_copy_elem *elem = vq->batch_copy_elems; + uint16_t count = vq->batch_copy_nb_elems; + int i; + + for (i = 0; i < count; i++) { + rte_memcpy(elem[i].dst, elem[i].src, elem[i].len); + vhost_log_cache_write(dev, vq, elem[i].log_addr, elem[i].len); + PRINT_PACKET(dev, (uintptr_t)elem[i].dst, elem[i].len, 0); + } +} + +static inline void +do_data_copy_dequeue(struct vhost_virtqueue *vq) +{ + struct batch_copy_elem *elem = vq->batch_copy_elems; + uint16_t count = vq->batch_copy_nb_elems; + int i; + + for (i = 0; i < count; i++) + rte_memcpy(elem[i].dst, elem[i].src, elem[i].len); +} + /* avoid write operation when necessary, to lessen cache issues */ #define ASSIGN_UNLESS_EQUAL(var, val) do { \ if ((var) != (val)) \ @@ -114,11 +186,16 @@ update_shadow_used_ring(struct vhost_virtqueue *vq, static void virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) { - if (m_buf->ol_flags & PKT_TX_L4_MASK) { + uint64_t csum_l4 = m_buf->ol_flags & PKT_TX_L4_MASK; + + if (m_buf->ol_flags & PKT_TX_TCP_SEG) + csum_l4 |= PKT_TX_TCP_CKSUM; + + if (csum_l4) { net_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; net_hdr->csum_start = m_buf->l2_len + m_buf->l3_len; - switch (m_buf->ol_flags & PKT_TX_L4_MASK) { + switch (csum_l4) { case PKT_TX_TCP_CKSUM: net_hdr->csum_offset = (offsetof(struct tcp_hdr, cksum)); @@ -138,6 +215,15 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) ASSIGN_UNLESS_EQUAL(net_hdr->flags, 0); } + /* IP cksum verification cannot be bypassed, then calculate here */ + if (m_buf->ol_flags & PKT_TX_IP_CKSUM) { + struct ipv4_hdr *ipv4_hdr; + + ipv4_hdr = rte_pktmbuf_mtod_offset(m_buf, struct ipv4_hdr *, + m_buf->l2_len); + ipv4_hdr->hdr_checksum = rte_ipv4_cksum(ipv4_hdr); + } + if (m_buf->ol_flags & PKT_TX_TCP_SEG) { if (m_buf->ol_flags & PKT_TX_IPV4) net_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; @@ -153,36 +239,92 @@ virtio_enqueue_offload(struct rte_mbuf *m_buf, struct virtio_net_hdr *net_hdr) } } -static inline int __attribute__((always_inline)) -copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs, - struct rte_mbuf *m, uint16_t desc_idx, uint32_t size) +static __rte_always_inline int +copy_mbuf_to_desc(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct vring_desc *descs, struct rte_mbuf *m, + uint16_t desc_idx, uint32_t size) { uint32_t desc_avail, desc_offset; uint32_t mbuf_avail, mbuf_offset; uint32_t cpy_len; + uint64_t desc_chunck_len; struct vring_desc *desc; - uint64_t desc_addr; + uint64_t desc_addr, desc_gaddr; /* A counter to avoid desc dead loop chain */ uint16_t nr_desc = 1; + struct batch_copy_elem *batch_copy = vq->batch_copy_elems; + uint16_t copy_nb = vq->batch_copy_nb_elems; + int error = 0; desc = &descs[desc_idx]; - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); + desc_chunck_len = desc->len; + desc_gaddr = desc->addr; + desc_addr = vhost_iova_to_vva(dev, vq, desc_gaddr, + &desc_chunck_len, VHOST_ACCESS_RW); /* * Checking of 'desc_addr' placed outside of 'unlikely' macro to avoid * performance issue with some versions of gcc (4.8.4 and 5.3.0) which * otherwise stores offset on the stack instead of in a register. */ - if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr) - return -1; + if (unlikely(desc->len < dev->vhost_hlen) || !desc_addr) { + error = -1; + goto out; + } rte_prefetch0((void *)(uintptr_t)desc_addr); - virtio_enqueue_offload(m, (struct virtio_net_hdr *)(uintptr_t)desc_addr); - vhost_log_write(dev, desc->addr, dev->vhost_hlen); - PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0); + if (likely(desc_chunck_len >= dev->vhost_hlen)) { + virtio_enqueue_offload(m, + (struct virtio_net_hdr *)(uintptr_t)desc_addr); + PRINT_PACKET(dev, (uintptr_t)desc_addr, dev->vhost_hlen, 0); + vhost_log_cache_write(dev, vq, desc_gaddr, dev->vhost_hlen); + } else { + struct virtio_net_hdr vnet_hdr; + uint64_t remain = dev->vhost_hlen; + uint64_t len; + uint64_t src = (uint64_t)(uintptr_t)&vnet_hdr, dst; + uint64_t guest_addr = desc_gaddr; + + virtio_enqueue_offload(m, &vnet_hdr); + + while (remain) { + len = remain; + dst = vhost_iova_to_vva(dev, vq, guest_addr, + &len, VHOST_ACCESS_RW); + if (unlikely(!dst || !len)) { + error = -1; + goto out; + } + + rte_memcpy((void *)(uintptr_t)dst, + (void *)(uintptr_t)src, len); + + PRINT_PACKET(dev, (uintptr_t)dst, (uint32_t)len, 0); + vhost_log_cache_write(dev, vq, guest_addr, len); + remain -= len; + guest_addr += len; + dst += len; + } + } - desc_offset = dev->vhost_hlen; desc_avail = desc->len - dev->vhost_hlen; + if (unlikely(desc_chunck_len < dev->vhost_hlen)) { + desc_chunck_len = desc_avail; + desc_gaddr = desc->addr + dev->vhost_hlen; + desc_addr = vhost_iova_to_vva(dev, + vq, desc_gaddr, + &desc_chunck_len, + VHOST_ACCESS_RW); + if (unlikely(!desc_addr)) { + error = -1; + goto out; + } + + desc_offset = 0; + } else { + desc_offset = dev->vhost_hlen; + desc_chunck_len -= dev->vhost_hlen; + } mbuf_avail = rte_pktmbuf_data_len(m); mbuf_offset = 0; @@ -199,45 +341,81 @@ copy_mbuf_to_desc(struct virtio_net *dev, struct vring_desc *descs, if (desc_avail == 0) { if ((desc->flags & VRING_DESC_F_NEXT) == 0) { /* Room in vring buffer is not enough */ - return -1; + error = -1; + goto out; + } + if (unlikely(desc->next >= size || ++nr_desc > size)) { + error = -1; + goto out; } - if (unlikely(desc->next >= size || ++nr_desc > size)) - return -1; desc = &descs[desc->next]; - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); - if (unlikely(!desc_addr)) - return -1; + desc_chunck_len = desc->len; + desc_gaddr = desc->addr; + desc_addr = vhost_iova_to_vva(dev, vq, desc_gaddr, + &desc_chunck_len, + VHOST_ACCESS_RW); + if (unlikely(!desc_addr)) { + error = -1; + goto out; + } desc_offset = 0; desc_avail = desc->len; + } else if (unlikely(desc_chunck_len == 0)) { + desc_chunck_len = desc_avail; + desc_gaddr += desc_offset; + desc_addr = vhost_iova_to_vva(dev, + vq, desc_gaddr, + &desc_chunck_len, VHOST_ACCESS_RW); + if (unlikely(!desc_addr)) { + error = -1; + goto out; + } + desc_offset = 0; } - cpy_len = RTE_MIN(desc_avail, mbuf_avail); - rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)), - rte_pktmbuf_mtod_offset(m, void *, mbuf_offset), - cpy_len); - vhost_log_write(dev, desc->addr + desc_offset, cpy_len); - PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), - cpy_len, 0); + cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail); + if (likely(cpy_len > MAX_BATCH_LEN || copy_nb >= vq->size)) { + rte_memcpy((void *)((uintptr_t)(desc_addr + + desc_offset)), + rte_pktmbuf_mtod_offset(m, void *, mbuf_offset), + cpy_len); + vhost_log_cache_write(dev, vq, desc_gaddr + desc_offset, + cpy_len); + PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), + cpy_len, 0); + } else { + batch_copy[copy_nb].dst = + (void *)((uintptr_t)(desc_addr + desc_offset)); + batch_copy[copy_nb].src = + rte_pktmbuf_mtod_offset(m, void *, mbuf_offset); + batch_copy[copy_nb].log_addr = desc_gaddr + desc_offset; + batch_copy[copy_nb].len = cpy_len; + copy_nb++; + } mbuf_avail -= cpy_len; mbuf_offset += cpy_len; desc_avail -= cpy_len; desc_offset += cpy_len; + desc_chunck_len -= cpy_len; } - return 0; +out: + vq->batch_copy_nb_elems = copy_nb; + + return error; } /** * This function adds buffers to the virtio devices RX virtqueue. Buffers can * be received from the physical port or from another virtio device. A packet - * count is returned to indicate the number of packets that are succesfully + * count is returned to indicate the number of packets that are successfully * added to the RX queue. This function works when the mbuf is scattered, but * it doesn't support the mergeable feature. */ -static inline uint32_t __attribute__((always_inline)) +static __rte_always_inline uint32_t virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count) { @@ -256,8 +434,21 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, } vq = dev->virtqueue[queue_id]; + + rte_spinlock_lock(&vq->access_lock); + if (unlikely(vq->enabled == 0)) - return 0; + goto out_access_unlock; + + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_lock(vq); + + if (unlikely(vq->access_ok == 0)) { + if (unlikely(vring_translate(dev, vq) < 0)) { + count = 0; + goto out; + } + } avail_idx = *((volatile uint16_t *)&vq->avail->idx); start_idx = vq->last_used_idx; @@ -265,11 +456,13 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, count = RTE_MIN(count, free_entries); count = RTE_MIN(count, (uint32_t)MAX_PKT_BURST); if (count == 0) - return 0; + goto out; LOG_DEBUG(VHOST_DATA, "(%d) start_idx %d | end_idx %d\n", dev->vid, start_idx, start_idx + count); + vq->batch_copy_nb_elems = 0; + /* Retrieve all of the desc indexes first to avoid caching issues. */ rte_prefetch0(&vq->avail->ring[start_idx & (vq->size - 1)]); for (i = 0; i < count; i++) { @@ -278,25 +471,41 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, vq->used->ring[used_idx].id = desc_indexes[i]; vq->used->ring[used_idx].len = pkts[i]->pkt_len + dev->vhost_hlen; - vhost_log_used_vring(dev, vq, + vhost_log_cache_used_vring(dev, vq, offsetof(struct vring_used, ring[used_idx]), sizeof(vq->used->ring[used_idx])); } rte_prefetch0(&vq->desc[desc_indexes[0]]); for (i = 0; i < count; i++) { + struct vring_desc *idesc = NULL; uint16_t desc_idx = desc_indexes[i]; int err; if (vq->desc[desc_idx].flags & VRING_DESC_F_INDIRECT) { + uint64_t dlen = vq->desc[desc_idx].len; descs = (struct vring_desc *)(uintptr_t) - rte_vhost_gpa_to_vva(dev->mem, - vq->desc[desc_idx].addr); + vhost_iova_to_vva(dev, + vq, vq->desc[desc_idx].addr, + &dlen, VHOST_ACCESS_RO); if (unlikely(!descs)) { count = i; break; } + if (unlikely(dlen < vq->desc[desc_idx].len)) { + /* + * The indirect desc table is not contiguous + * in process VA space, we have to copy it. + */ + idesc = alloc_copy_ind_table(dev, vq, + &vq->desc[desc_idx]); + if (unlikely(!idesc)) + break; + + descs = idesc; + } + desc_idx = 0; sz = vq->desc[desc_idx].len / sizeof(*descs); } else { @@ -304,21 +513,26 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, sz = vq->size; } - err = copy_mbuf_to_desc(dev, descs, pkts[i], desc_idx, sz); + err = copy_mbuf_to_desc(dev, vq, descs, pkts[i], desc_idx, sz); if (unlikely(err)) { - used_idx = (start_idx + i) & (vq->size - 1); - vq->used->ring[used_idx].len = dev->vhost_hlen; - vhost_log_used_vring(dev, vq, - offsetof(struct vring_used, ring[used_idx]), - sizeof(vq->used->ring[used_idx])); + count = i; + free_ind_table(idesc); + break; } if (i + 1 < count) rte_prefetch0(&vq->desc[desc_indexes[i+1]]); + + if (unlikely(!!idesc)) + free_ind_table(idesc); } + do_data_copy_enqueue(dev, vq); + rte_smp_wmb(); + vhost_log_cache_sync(dev, vq); + *(volatile uint16_t *)&vq->used->idx += count; vq->last_used_idx += count; vhost_log_used_vring(dev, vq, @@ -332,10 +546,17 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id, if (!(vq->avail->flags & VRING_AVAIL_F_NO_INTERRUPT) && (vq->callfd >= 0)) eventfd_write(vq->callfd, (eventfd_t)1); +out: + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_unlock(vq); + +out_access_unlock: + rte_spinlock_unlock(&vq->access_lock); + return count; } -static inline int __attribute__((always_inline)) +static __rte_always_inline int fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t avail_idx, uint32_t *vec_idx, struct buf_vector *buf_vec, uint16_t *desc_chain_head, @@ -344,22 +565,41 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)]; uint32_t vec_id = *vec_idx; uint32_t len = 0; + uint64_t dlen; struct vring_desc *descs = vq->desc; + struct vring_desc *idesc = NULL; *desc_chain_head = idx; if (vq->desc[idx].flags & VRING_DESC_F_INDIRECT) { + dlen = vq->desc[idx].len; descs = (struct vring_desc *)(uintptr_t) - rte_vhost_gpa_to_vva(dev->mem, vq->desc[idx].addr); + vhost_iova_to_vva(dev, vq, vq->desc[idx].addr, + &dlen, + VHOST_ACCESS_RO); if (unlikely(!descs)) return -1; + if (unlikely(dlen < vq->desc[idx].len)) { + /* + * The indirect desc table is not contiguous + * in process VA space, we have to copy it. + */ + idesc = alloc_copy_ind_table(dev, vq, &vq->desc[idx]); + if (unlikely(!idesc)) + return -1; + + descs = idesc; + } + idx = 0; } while (1) { - if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size)) + if (unlikely(vec_id >= BUF_VECTOR_MAX || idx >= vq->size)) { + free_ind_table(idesc); return -1; + } len += descs[idx].len; buf_vec[vec_id].buf_addr = descs[idx].addr; @@ -376,6 +616,9 @@ fill_vec_buf(struct virtio_net *dev, struct vhost_virtqueue *vq, *desc_chain_len = len; *vec_idx = vec_id; + if (unlikely(!!idesc)) + free_ind_table(idesc); + return 0; } @@ -424,35 +667,71 @@ reserve_avail_buf_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, return 0; } -static inline int __attribute__((always_inline)) -copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, - struct buf_vector *buf_vec, uint16_t num_buffers) +static __rte_always_inline int +copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct rte_mbuf *m, struct buf_vector *buf_vec, + uint16_t num_buffers) { uint32_t vec_idx = 0; - uint64_t desc_addr; + uint64_t desc_addr, desc_gaddr; uint32_t mbuf_offset, mbuf_avail; uint32_t desc_offset, desc_avail; uint32_t cpy_len; + uint64_t desc_chunck_len; uint64_t hdr_addr, hdr_phys_addr; struct rte_mbuf *hdr_mbuf; + struct batch_copy_elem *batch_copy = vq->batch_copy_elems; + struct virtio_net_hdr_mrg_rxbuf tmp_hdr, *hdr = NULL; + uint16_t copy_nb = vq->batch_copy_nb_elems; + int error = 0; - if (unlikely(m == NULL)) - return -1; + if (unlikely(m == NULL)) { + error = -1; + goto out; + } - desc_addr = rte_vhost_gpa_to_vva(dev->mem, buf_vec[vec_idx].buf_addr); - if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) - return -1; + desc_chunck_len = buf_vec[vec_idx].buf_len; + desc_gaddr = buf_vec[vec_idx].buf_addr; + desc_addr = vhost_iova_to_vva(dev, vq, + desc_gaddr, + &desc_chunck_len, + VHOST_ACCESS_RW); + if (buf_vec[vec_idx].buf_len < dev->vhost_hlen || !desc_addr) { + error = -1; + goto out; + } hdr_mbuf = m; hdr_addr = desc_addr; - hdr_phys_addr = buf_vec[vec_idx].buf_addr; + if (unlikely(desc_chunck_len < dev->vhost_hlen)) + hdr = &tmp_hdr; + else + hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t)hdr_addr; + hdr_phys_addr = desc_gaddr; rte_prefetch0((void *)(uintptr_t)hdr_addr); LOG_DEBUG(VHOST_DATA, "(%d) RX: num merge buffers %d\n", dev->vid, num_buffers); desc_avail = buf_vec[vec_idx].buf_len - dev->vhost_hlen; - desc_offset = dev->vhost_hlen; + if (unlikely(desc_chunck_len < dev->vhost_hlen)) { + desc_chunck_len = desc_avail; + desc_gaddr += dev->vhost_hlen; + desc_addr = vhost_iova_to_vva(dev, vq, + desc_gaddr, + &desc_chunck_len, + VHOST_ACCESS_RW); + if (unlikely(!desc_addr)) { + error = -1; + goto out; + } + + desc_offset = 0; + } else { + desc_offset = dev->vhost_hlen; + desc_chunck_len -= dev->vhost_hlen; + } + mbuf_avail = rte_pktmbuf_data_len(m); mbuf_offset = 0; @@ -460,15 +739,33 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, /* done with current desc buf, get the next one */ if (desc_avail == 0) { vec_idx++; - desc_addr = rte_vhost_gpa_to_vva(dev->mem, - buf_vec[vec_idx].buf_addr); - if (unlikely(!desc_addr)) - return -1; + desc_chunck_len = buf_vec[vec_idx].buf_len; + desc_gaddr = buf_vec[vec_idx].buf_addr; + desc_addr = + vhost_iova_to_vva(dev, vq, + desc_gaddr, + &desc_chunck_len, + VHOST_ACCESS_RW); + if (unlikely(!desc_addr)) { + error = -1; + goto out; + } /* Prefetch buffer address. */ rte_prefetch0((void *)(uintptr_t)desc_addr); desc_offset = 0; desc_avail = buf_vec[vec_idx].buf_len; + } else if (unlikely(desc_chunck_len == 0)) { + desc_chunck_len = desc_avail; + desc_gaddr += desc_offset; + desc_addr = vhost_iova_to_vva(dev, vq, + desc_gaddr, + &desc_chunck_len, VHOST_ACCESS_RW); + if (unlikely(!desc_addr)) { + error = -1; + goto out; + } + desc_offset = 0; } /* done with current mbuf, get the next one */ @@ -480,39 +777,83 @@ copy_mbuf_to_desc_mergeable(struct virtio_net *dev, struct rte_mbuf *m, } if (hdr_addr) { - struct virtio_net_hdr_mrg_rxbuf *hdr; - - hdr = (struct virtio_net_hdr_mrg_rxbuf *)(uintptr_t) - hdr_addr; virtio_enqueue_offload(hdr_mbuf, &hdr->hdr); ASSIGN_UNLESS_EQUAL(hdr->num_buffers, num_buffers); - vhost_log_write(dev, hdr_phys_addr, dev->vhost_hlen); - PRINT_PACKET(dev, (uintptr_t)hdr_addr, - dev->vhost_hlen, 0); + if (unlikely(hdr == &tmp_hdr)) { + uint64_t len; + uint64_t remain = dev->vhost_hlen; + uint64_t src = (uint64_t)(uintptr_t)hdr, dst; + uint64_t guest_addr = hdr_phys_addr; + + while (remain) { + len = remain; + dst = vhost_iova_to_vva(dev, vq, + guest_addr, &len, + VHOST_ACCESS_RW); + if (unlikely(!dst || !len)) { + error = -1; + goto out; + } + + rte_memcpy((void *)(uintptr_t)dst, + (void *)(uintptr_t)src, + len); + + PRINT_PACKET(dev, (uintptr_t)dst, + (uint32_t)len, 0); + vhost_log_cache_write(dev, vq, + guest_addr, len); + + remain -= len; + guest_addr += len; + dst += len; + } + } else { + PRINT_PACKET(dev, (uintptr_t)hdr_addr, + dev->vhost_hlen, 0); + vhost_log_cache_write(dev, vq, hdr_phys_addr, + dev->vhost_hlen); + } hdr_addr = 0; } - cpy_len = RTE_MIN(desc_avail, mbuf_avail); - rte_memcpy((void *)((uintptr_t)(desc_addr + desc_offset)), - rte_pktmbuf_mtod_offset(m, void *, mbuf_offset), - cpy_len); - vhost_log_write(dev, buf_vec[vec_idx].buf_addr + desc_offset, - cpy_len); - PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), - cpy_len, 0); + cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail); + + if (likely(cpy_len > MAX_BATCH_LEN || copy_nb >= vq->size)) { + rte_memcpy((void *)((uintptr_t)(desc_addr + + desc_offset)), + rte_pktmbuf_mtod_offset(m, void *, mbuf_offset), + cpy_len); + vhost_log_cache_write(dev, vq, desc_gaddr + desc_offset, + cpy_len); + PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), + cpy_len, 0); + } else { + batch_copy[copy_nb].dst = + (void *)((uintptr_t)(desc_addr + desc_offset)); + batch_copy[copy_nb].src = + rte_pktmbuf_mtod_offset(m, void *, mbuf_offset); + batch_copy[copy_nb].log_addr = desc_gaddr + desc_offset; + batch_copy[copy_nb].len = cpy_len; + copy_nb++; + } mbuf_avail -= cpy_len; mbuf_offset += cpy_len; desc_avail -= cpy_len; desc_offset += cpy_len; + desc_chunck_len -= cpy_len; } - return 0; +out: + vq->batch_copy_nb_elems = copy_nb; + + return error; } -static inline uint32_t __attribute__((always_inline)) +static __rte_always_inline uint32_t virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, struct rte_mbuf **pkts, uint32_t count) { @@ -530,12 +871,24 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, } vq = dev->virtqueue[queue_id]; + + rte_spinlock_lock(&vq->access_lock); + if (unlikely(vq->enabled == 0)) - return 0; + goto out_access_unlock; + + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_lock(vq); + + if (unlikely(vq->access_ok == 0)) + if (unlikely(vring_translate(dev, vq) < 0)) + goto out; count = RTE_MIN((uint32_t)MAX_PKT_BURST, count); if (count == 0) - return 0; + goto out; + + vq->batch_copy_nb_elems = 0; rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]); @@ -558,7 +911,7 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, dev->vid, vq->last_avail_idx, vq->last_avail_idx + num_buffers); - if (copy_mbuf_to_desc_mergeable(dev, pkts[pkt_idx], + if (copy_mbuf_to_desc_mergeable(dev, vq, pkts[pkt_idx], buf_vec, num_buffers) < 0) { vq->shadow_used_idx -= num_buffers; break; @@ -567,6 +920,8 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, vq->last_avail_idx += num_buffers; } + do_data_copy_enqueue(dev, vq); + if (likely(vq->shadow_used_idx)) { flush_shadow_used_ring(dev, vq); @@ -579,6 +934,13 @@ virtio_dev_merge_rx(struct virtio_net *dev, uint16_t queue_id, eventfd_write(vq->callfd, (eventfd_t)1); } +out: + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_unlock(vq); + +out_access_unlock: + rte_spinlock_unlock(&vq->access_lock); + return pkt_idx; } @@ -601,9 +963,11 @@ static inline bool virtio_net_with_host_offload(struct virtio_net *dev) { if (dev->features & - (VIRTIO_NET_F_CSUM | VIRTIO_NET_F_HOST_ECN | - VIRTIO_NET_F_HOST_TSO4 | VIRTIO_NET_F_HOST_TSO6 | - VIRTIO_NET_F_HOST_UFO)) + ((1ULL << VIRTIO_NET_F_CSUM) | + (1ULL << VIRTIO_NET_F_HOST_ECN) | + (1ULL << VIRTIO_NET_F_HOST_TSO4) | + (1ULL << VIRTIO_NET_F_HOST_TSO6) | + (1ULL << VIRTIO_NET_F_HOST_UFO))) return true; return false; @@ -655,7 +1019,7 @@ parse_ethernet(struct rte_mbuf *m, uint16_t *l4_proto, void **l4_hdr) } } -static inline void __attribute__((always_inline)) +static __rte_always_inline void vhost_dequeue_offload(struct virtio_net_hdr *hdr, struct rte_mbuf *m) { uint16_t l4_proto = 0; @@ -743,39 +1107,86 @@ make_rarp_packet(struct rte_mbuf *rarp_mbuf, const struct ether_addr *mac) return 0; } -static inline void __attribute__((always_inline)) +static __rte_always_inline void put_zmbuf(struct zcopy_mbuf *zmbuf) { zmbuf->in_use = 0; } -static inline int __attribute__((always_inline)) -copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, - uint16_t max_desc, struct rte_mbuf *m, uint16_t desc_idx, +static __rte_always_inline int +copy_desc_to_mbuf(struct virtio_net *dev, struct vhost_virtqueue *vq, + struct vring_desc *descs, uint16_t max_desc, + struct rte_mbuf *m, uint16_t desc_idx, struct rte_mempool *mbuf_pool) { struct vring_desc *desc; - uint64_t desc_addr; + uint64_t desc_addr, desc_gaddr; uint32_t desc_avail, desc_offset; uint32_t mbuf_avail, mbuf_offset; uint32_t cpy_len; + uint64_t desc_chunck_len; struct rte_mbuf *cur = m, *prev = m; + struct virtio_net_hdr tmp_hdr; struct virtio_net_hdr *hdr = NULL; /* A counter to avoid desc dead loop chain */ uint32_t nr_desc = 1; + struct batch_copy_elem *batch_copy = vq->batch_copy_elems; + uint16_t copy_nb = vq->batch_copy_nb_elems; + int error = 0; desc = &descs[desc_idx]; if (unlikely((desc->len < dev->vhost_hlen)) || - (desc->flags & VRING_DESC_F_INDIRECT)) - return -1; + (desc->flags & VRING_DESC_F_INDIRECT)) { + error = -1; + goto out; + } - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); - if (unlikely(!desc_addr)) - return -1; + desc_chunck_len = desc->len; + desc_gaddr = desc->addr; + desc_addr = vhost_iova_to_vva(dev, + vq, desc_gaddr, + &desc_chunck_len, + VHOST_ACCESS_RO); + if (unlikely(!desc_addr)) { + error = -1; + goto out; + } if (virtio_net_with_host_offload(dev)) { - hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr); - rte_prefetch0(hdr); + if (unlikely(desc_chunck_len < sizeof(struct virtio_net_hdr))) { + uint64_t len = desc_chunck_len; + uint64_t remain = sizeof(struct virtio_net_hdr); + uint64_t src = desc_addr; + uint64_t dst = (uint64_t)(uintptr_t)&tmp_hdr; + uint64_t guest_addr = desc_gaddr; + + /* + * No luck, the virtio-net header doesn't fit + * in a contiguous virtual area. + */ + while (remain) { + len = remain; + src = vhost_iova_to_vva(dev, vq, + guest_addr, &len, + VHOST_ACCESS_RO); + if (unlikely(!src || !len)) { + error = -1; + goto out; + } + + rte_memcpy((void *)(uintptr_t)dst, + (void *)(uintptr_t)src, len); + + guest_addr += len; + remain -= len; + dst += len; + } + + hdr = &tmp_hdr; + } else { + hdr = (struct virtio_net_hdr *)((uintptr_t)desc_addr); + rte_prefetch0(hdr); + } } /* @@ -786,31 +1197,58 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, if (likely((desc->len == dev->vhost_hlen) && (desc->flags & VRING_DESC_F_NEXT) != 0)) { desc = &descs[desc->next]; - if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) - return -1; + if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) { + error = -1; + goto out; + } - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); - if (unlikely(!desc_addr)) - return -1; + desc_chunck_len = desc->len; + desc_gaddr = desc->addr; + desc_addr = vhost_iova_to_vva(dev, + vq, desc_gaddr, + &desc_chunck_len, + VHOST_ACCESS_RO); + if (unlikely(!desc_addr)) { + error = -1; + goto out; + } desc_offset = 0; desc_avail = desc->len; nr_desc += 1; } else { desc_avail = desc->len - dev->vhost_hlen; - desc_offset = dev->vhost_hlen; + + if (unlikely(desc_chunck_len < dev->vhost_hlen)) { + desc_chunck_len = desc_avail; + desc_gaddr += dev->vhost_hlen; + desc_addr = vhost_iova_to_vva(dev, + vq, desc_gaddr, + &desc_chunck_len, + VHOST_ACCESS_RO); + if (unlikely(!desc_addr)) { + error = -1; + goto out; + } + + desc_offset = 0; + } else { + desc_offset = dev->vhost_hlen; + desc_chunck_len -= dev->vhost_hlen; + } } rte_prefetch0((void *)(uintptr_t)(desc_addr + desc_offset)); - PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), desc_avail, 0); + PRINT_PACKET(dev, (uintptr_t)(desc_addr + desc_offset), + (uint32_t)desc_chunck_len, 0); mbuf_offset = 0; mbuf_avail = m->buf_len - RTE_PKTMBUF_HEADROOM; while (1) { uint64_t hpa; - cpy_len = RTE_MIN(desc_avail, mbuf_avail); + cpy_len = RTE_MIN(desc_chunck_len, mbuf_avail); /* * A desc buf might across two host physical pages that are @@ -818,11 +1256,12 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, * will be copied even though zero copy is enabled. */ if (unlikely(dev->dequeue_zero_copy && (hpa = gpa_to_hpa(dev, - desc->addr + desc_offset, cpy_len)))) { + desc_gaddr + desc_offset, cpy_len)))) { cur->data_len = cpy_len; cur->data_off = 0; - cur->buf_addr = (void *)(uintptr_t)desc_addr; - cur->buf_physaddr = hpa; + cur->buf_addr = (void *)(uintptr_t)(desc_addr + + desc_offset); + cur->buf_iova = hpa; /* * In zero copy mode, one mbuf can only reference data @@ -830,15 +1269,31 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, */ mbuf_avail = cpy_len; } else { - rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, - mbuf_offset), - (void *)((uintptr_t)(desc_addr + desc_offset)), - cpy_len); + if (likely(cpy_len > MAX_BATCH_LEN || + copy_nb >= vq->size || + (hdr && cur == m) || + desc->len != desc_chunck_len)) { + rte_memcpy(rte_pktmbuf_mtod_offset(cur, void *, + mbuf_offset), + (void *)((uintptr_t)(desc_addr + + desc_offset)), + cpy_len); + } else { + batch_copy[copy_nb].dst = + rte_pktmbuf_mtod_offset(cur, void *, + mbuf_offset); + batch_copy[copy_nb].src = + (void *)((uintptr_t)(desc_addr + + desc_offset)); + batch_copy[copy_nb].len = cpy_len; + copy_nb++; + } } mbuf_avail -= cpy_len; mbuf_offset += cpy_len; desc_avail -= cpy_len; + desc_chunck_len -= cpy_len; desc_offset += cpy_len; /* This desc reaches to its end, get the next one */ @@ -847,22 +1302,49 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, break; if (unlikely(desc->next >= max_desc || - ++nr_desc > max_desc)) - return -1; + ++nr_desc > max_desc)) { + error = -1; + goto out; + } desc = &descs[desc->next]; - if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) - return -1; + if (unlikely(desc->flags & VRING_DESC_F_INDIRECT)) { + error = -1; + goto out; + } - desc_addr = rte_vhost_gpa_to_vva(dev->mem, desc->addr); - if (unlikely(!desc_addr)) - return -1; + desc_chunck_len = desc->len; + desc_gaddr = desc->addr; + desc_addr = vhost_iova_to_vva(dev, + vq, desc_gaddr, + &desc_chunck_len, + VHOST_ACCESS_RO); + if (unlikely(!desc_addr)) { + error = -1; + goto out; + } rte_prefetch0((void *)(uintptr_t)desc_addr); desc_offset = 0; desc_avail = desc->len; - PRINT_PACKET(dev, (uintptr_t)desc_addr, desc->len, 0); + PRINT_PACKET(dev, (uintptr_t)desc_addr, + (uint32_t)desc_chunck_len, 0); + } else if (unlikely(desc_chunck_len == 0)) { + desc_chunck_len = desc_avail; + desc_gaddr += desc_offset; + desc_addr = vhost_iova_to_vva(dev, vq, + desc_gaddr, + &desc_chunck_len, + VHOST_ACCESS_RO); + if (unlikely(!desc_addr)) { + error = -1; + goto out; + } + desc_offset = 0; + + PRINT_PACKET(dev, (uintptr_t)desc_addr, + (uint32_t)desc_chunck_len, 0); } /* @@ -874,7 +1356,8 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, if (unlikely(cur == NULL)) { RTE_LOG(ERR, VHOST_DATA, "Failed to " "allocate memory for mbuf.\n"); - return -1; + error = -1; + goto out; } if (unlikely(dev->dequeue_zero_copy)) rte_mbuf_refcnt_update(cur, 1); @@ -896,21 +1379,24 @@ copy_desc_to_mbuf(struct virtio_net *dev, struct vring_desc *descs, if (hdr) vhost_dequeue_offload(hdr, m); - return 0; +out: + vq->batch_copy_nb_elems = copy_nb; + + return error; } -static inline void __attribute__((always_inline)) +static __rte_always_inline void update_used_ring(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t used_idx, uint32_t desc_idx) { vq->used->ring[used_idx].id = desc_idx; vq->used->ring[used_idx].len = 0; - vhost_log_used_vring(dev, vq, + vhost_log_cache_used_vring(dev, vq, offsetof(struct vring_used, ring[used_idx]), sizeof(vq->used->ring[used_idx])); } -static inline void __attribute__((always_inline)) +static __rte_always_inline void update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq, uint32_t count) { @@ -920,6 +1406,8 @@ update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq, rte_smp_wmb(); rte_smp_rmb(); + vhost_log_cache_sync(dev, vq); + vq->used->idx += count; vhost_log_used_vring(dev, vq, offsetof(struct vring_used, idx), sizeof(vq->used->idx)); @@ -930,7 +1418,7 @@ update_used_idx(struct virtio_net *dev, struct vhost_virtqueue *vq, eventfd_write(vq->callfd, (eventfd_t)1); } -static inline struct zcopy_mbuf *__attribute__((always_inline)) +static __rte_always_inline struct zcopy_mbuf * get_zmbuf(struct vhost_virtqueue *vq) { uint16_t i; @@ -961,7 +1449,7 @@ again: return NULL; } -static inline bool __attribute__((always_inline)) +static __rte_always_inline bool mbuf_is_consumed(struct rte_mbuf *m) { while (m) { @@ -973,6 +1461,22 @@ mbuf_is_consumed(struct rte_mbuf *m) return true; } +static __rte_always_inline void +restore_mbuf(struct rte_mbuf *m) +{ + uint32_t mbuf_size, priv_size; + + while (m) { + priv_size = rte_pktmbuf_priv_size(m->pool); + mbuf_size = sizeof(struct rte_mbuf) + priv_size; + /* start of buffer is after mbuf structure and priv data */ + + m->buf_addr = (char *)m + mbuf_size; + m->buf_iova = rte_mempool_virt2iova(m) + mbuf_size; + m = m->next; + } +} + uint16_t rte_vhost_dequeue_burst(int vid, uint16_t queue_id, struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count) @@ -997,9 +1501,22 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, } vq = dev->virtqueue[queue_id]; - if (unlikely(vq->enabled == 0)) + + if (unlikely(rte_spinlock_trylock(&vq->access_lock) == 0)) return 0; + if (unlikely(vq->enabled == 0)) + goto out_access_unlock; + + vq->batch_copy_nb_elems = 0; + + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_lock(vq); + + if (unlikely(vq->access_ok == 0)) + if (unlikely(vring_translate(dev, vq) < 0)) + goto out; + if (unlikely(dev->dequeue_zero_copy)) { struct zcopy_mbuf *zmbuf, *next; int nr_updated = 0; @@ -1015,6 +1532,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, nr_updated += 1; TAILQ_REMOVE(&vq->zmbuf_list, zmbuf, next); + restore_mbuf(zmbuf->mbuf); rte_pktmbuf_free(zmbuf->mbuf); put_zmbuf(zmbuf); vq->nr_zmbuf -= 1; @@ -1048,7 +1566,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, if (rarp_mbuf == NULL) { RTE_LOG(ERR, VHOST_DATA, "Failed to allocate memory for mbuf.\n"); - return 0; + goto out; } if (make_rarp_packet(rarp_mbuf, &dev->mac)) { @@ -1090,20 +1608,37 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, /* Prefetch descriptor index. */ rte_prefetch0(&vq->desc[desc_indexes[0]]); for (i = 0; i < count; i++) { - struct vring_desc *desc; + struct vring_desc *desc, *idesc = NULL; uint16_t sz, idx; + uint64_t dlen; int err; if (likely(i + 1 < count)) rte_prefetch0(&vq->desc[desc_indexes[i + 1]]); if (vq->desc[desc_indexes[i]].flags & VRING_DESC_F_INDIRECT) { + dlen = vq->desc[desc_indexes[i]].len; desc = (struct vring_desc *)(uintptr_t) - rte_vhost_gpa_to_vva(dev->mem, - vq->desc[desc_indexes[i]].addr); + vhost_iova_to_vva(dev, vq, + vq->desc[desc_indexes[i]].addr, + &dlen, + VHOST_ACCESS_RO); if (unlikely(!desc)) break; + if (unlikely(dlen < vq->desc[desc_indexes[i]].len)) { + /* + * The indirect desc table is not contiguous + * in process VA space, we have to copy it. + */ + idesc = alloc_copy_ind_table(dev, vq, + &vq->desc[desc_indexes[i]]); + if (unlikely(!idesc)) + break; + + desc = idesc; + } + rte_prefetch0(desc); sz = vq->desc[desc_indexes[i]].len / sizeof(*desc); idx = 0; @@ -1117,12 +1652,15 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, if (unlikely(pkts[i] == NULL)) { RTE_LOG(ERR, VHOST_DATA, "Failed to allocate memory for mbuf.\n"); + free_ind_table(idesc); break; } - err = copy_desc_to_mbuf(dev, desc, sz, pkts[i], idx, mbuf_pool); + err = copy_desc_to_mbuf(dev, vq, desc, sz, pkts[i], idx, + mbuf_pool); if (unlikely(err)) { rte_pktmbuf_free(pkts[i]); + free_ind_table(idesc); break; } @@ -1132,6 +1670,7 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, zmbuf = get_zmbuf(vq); if (!zmbuf) { rte_pktmbuf_free(pkts[i]); + free_ind_table(idesc); break; } zmbuf->mbuf = pkts[i]; @@ -1148,15 +1687,25 @@ rte_vhost_dequeue_burst(int vid, uint16_t queue_id, vq->nr_zmbuf += 1; TAILQ_INSERT_TAIL(&vq->zmbuf_list, zmbuf, next); } + + if (unlikely(!!idesc)) + free_ind_table(idesc); } vq->last_avail_idx += i; if (likely(dev->dequeue_zero_copy == 0)) { + do_data_copy_dequeue(vq); vq->last_used_idx += i; update_used_idx(dev, vq, i); } out: + if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM)) + vhost_user_iotlb_rd_unlock(vq); + +out_access_unlock: + rte_spinlock_unlock(&vq->access_lock); + if (unlikely(rarp_mbuf != NULL)) { /* * Inject it to the head of "pkts" array, so that switch's mac