New upstream version 18.11-rc1
[deb_dpdk.git] / lib / librte_vhost / virtio_net.c
index 99c7afc..8ad30c9 100644 (file)
@@ -122,7 +122,7 @@ flush_shadow_used_ring_split(struct virtio_net *dev, struct vhost_virtqueue *vq)
 
 static __rte_always_inline void
 update_shadow_used_ring_split(struct vhost_virtqueue *vq,
-                        uint16_t desc_idx, uint16_t len)
+                        uint16_t desc_idx, uint32_t len)
 {
        uint16_t i = vq->shadow_used_idx++;
 
@@ -186,7 +186,7 @@ flush_shadow_used_ring_packed(struct virtio_net *dev,
 
 static __rte_always_inline void
 update_shadow_used_ring_packed(struct vhost_virtqueue *vq,
-                        uint16_t desc_idx, uint16_t len, uint16_t count)
+                        uint16_t desc_idx, uint32_t len, uint16_t count)
 {
        uint16_t i = vq->shadow_used_idx++;
 
@@ -329,7 +329,7 @@ static __rte_always_inline int
 fill_vec_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
                         uint32_t avail_idx, uint16_t *vec_idx,
                         struct buf_vector *buf_vec, uint16_t *desc_chain_head,
-                        uint16_t *desc_chain_len, uint8_t perm)
+                        uint32_t *desc_chain_len, uint8_t perm)
 {
        uint16_t idx = vq->avail->ring[avail_idx & (vq->size - 1)];
        uint16_t vec_id = *vec_idx;
@@ -409,7 +409,7 @@ reserve_avail_buf_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
        uint16_t max_tries, tries = 0;
 
        uint16_t head_idx = 0;
-       uint16_t len = 0;
+       uint32_t len = 0;
 
        *num_buffers = 0;
        cur_idx  = vq->last_avail_idx;
@@ -452,7 +452,7 @@ static __rte_always_inline int
 fill_vec_buf_packed_indirect(struct virtio_net *dev,
                        struct vhost_virtqueue *vq,
                        struct vring_packed_desc *desc, uint16_t *vec_idx,
-                       struct buf_vector *buf_vec, uint16_t *len, uint8_t perm)
+                       struct buf_vector *buf_vec, uint32_t *len, uint8_t perm)
 {
        uint16_t i;
        uint32_t nr_descs;
@@ -508,7 +508,7 @@ static __rte_always_inline int
 fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
                                uint16_t avail_idx, uint16_t *desc_count,
                                struct buf_vector *buf_vec, uint16_t *vec_idx,
-                               uint16_t *buf_id, uint16_t *len, uint8_t perm)
+                               uint16_t *buf_id, uint32_t *len, uint8_t perm)
 {
        bool wrap_counter = vq->avail_wrap_counter;
        struct vring_packed_desc *descs = vq->desc_packed;
@@ -521,6 +521,7 @@ fill_vec_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
                return -1;
 
        *desc_count = 0;
+       *len = 0;
 
        while (1) {
                if (unlikely(vec_id >= BUF_VECTOR_MAX))
@@ -573,7 +574,7 @@ reserve_avail_buf_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
        uint16_t max_tries, tries = 0;
 
        uint16_t buf_id = 0;
-       uint16_t len = 0;
+       uint32_t len = 0;
        uint16_t desc_count;
 
        *num_buffers = 0;
@@ -888,6 +889,7 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
        struct rte_mbuf **pkts, uint32_t count)
 {
        struct vhost_virtqueue *vq;
+       uint32_t nb_tx = 0;
 
        VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
        if (unlikely(!is_valid_virt_queue_idx(queue_id, 0, dev->nr_vring))) {
@@ -915,9 +917,9 @@ virtio_dev_rx(struct virtio_net *dev, uint16_t queue_id,
                goto out;
 
        if (vq_is_packed(dev))
-               count = virtio_dev_rx_packed(dev, vq, pkts, count);
+               nb_tx = virtio_dev_rx_packed(dev, vq, pkts, count);
        else
-               count = virtio_dev_rx_split(dev, vq, pkts, count);
+               nb_tx = virtio_dev_rx_split(dev, vq, pkts, count);
 
 out:
        if (dev->features & (1ULL << VIRTIO_F_IOMMU_PLATFORM))
@@ -926,7 +928,7 @@ out:
 out_access_unlock:
        rte_spinlock_unlock(&vq->access_lock);
 
-       return count;
+       return nb_tx;
 }
 
 uint16_t
@@ -1358,8 +1360,10 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        }
                }
 
-               flush_shadow_used_ring_split(dev, vq);
-               vhost_vring_call_split(dev, vq);
+               if (likely(vq->shadow_used_idx)) {
+                       flush_shadow_used_ring_split(dev, vq);
+                       vhost_vring_call_split(dev, vq);
+               }
        }
 
        rte_prefetch0(&vq->avail->ring[vq->last_avail_idx & (vq->size - 1)]);
@@ -1378,7 +1382,8 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
        for (i = 0; i < count; i++) {
                struct buf_vector buf_vec[BUF_VECTOR_MAX];
-               uint16_t head_idx, dummy_len;
+               uint16_t head_idx;
+               uint32_t dummy_len;
                uint16_t nr_vec = 0;
                int err;
 
@@ -1437,8 +1442,10 @@ virtio_dev_tx_split(struct virtio_net *dev, struct vhost_virtqueue *vq,
                do_data_copy_dequeue(vq);
                if (unlikely(i < count))
                        vq->shadow_used_idx = i;
-               flush_shadow_used_ring_split(dev, vq);
-               vhost_vring_call_split(dev, vq);
+               if (likely(vq->shadow_used_idx)) {
+                       flush_shadow_used_ring_split(dev, vq);
+                       vhost_vring_call_split(dev, vq);
+               }
        }
 
        return i;
@@ -1473,8 +1480,10 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
                        }
                }
 
-               flush_shadow_used_ring_packed(dev, vq);
-               vhost_vring_call_packed(dev, vq);
+               if (likely(vq->shadow_used_idx)) {
+                       flush_shadow_used_ring_packed(dev, vq);
+                       vhost_vring_call_packed(dev, vq);
+               }
        }
 
        VHOST_LOG_DEBUG(VHOST_DATA, "(%d) %s\n", dev->vid, __func__);
@@ -1485,7 +1494,8 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
 
        for (i = 0; i < count; i++) {
                struct buf_vector buf_vec[BUF_VECTOR_MAX];
-               uint16_t buf_id, dummy_len;
+               uint16_t buf_id;
+               uint32_t dummy_len;
                uint16_t desc_count, nr_vec = 0;
                int err;
 
@@ -1551,8 +1561,10 @@ virtio_dev_tx_packed(struct virtio_net *dev, struct vhost_virtqueue *vq,
                do_data_copy_dequeue(vq);
                if (unlikely(i < count))
                        vq->shadow_used_idx = i;
-               flush_shadow_used_ring_packed(dev, vq);
-               vhost_vring_call_packed(dev, vq);
+               if (likely(vq->shadow_used_idx)) {
+                       flush_shadow_used_ring_packed(dev, vq);
+                       vhost_vring_call_packed(dev, vq);
+               }
        }
 
        return i;