X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fvirtio%2Fvirtio_rxtx.c;h=8dbf2a30e0b42e432e2f82ce7d1b7399b4845098;hb=ca33590b6af032bff57d9cc70455660466a654b2;hp=22d97a4ea5b3e447a27d3488754ed43eb7ee5d11;hpb=6b3e017e5d25f15da73f7700f7f2ac553ef1a2e9;p=deb_dpdk.git diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index 22d97a4e..8dbf2a30 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -1,34 +1,5 @@ -/*- - * BSD LICENSE - * - * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2010-2014 Intel Corporation */ #include @@ -39,18 +10,16 @@ #include #include -#include #include #include #include #include #include -#include +#include #include #include #include #include -#include #include #include #include @@ -61,6 +30,7 @@ #include "virtio_pci.h" #include "virtqueue.h" #include "virtio_rxtx.h" +#include "virtio_rxtx_simple.h" #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len) @@ -72,7 +42,16 @@ #define VIRTIO_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ ETH_TXQ_FLAGS_NOOFFLOADS) -static void +int +virtio_dev_rx_queue_done(void *rxq, uint16_t offset) +{ + struct virtnet_rx *rxvq = rxq; + struct virtqueue *vq = rxvq->vq; + + return VIRTQUEUE_NUSED(vq) >= offset; +} + +void vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx) { struct vring_desc *dp, *dp_tail; @@ -124,7 +103,7 @@ virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts, cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie; if (unlikely(cookie == NULL)) { - PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u\n", + PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u", vq->vq_used_cons_idx); break; } @@ -258,6 +237,12 @@ tx_offload_enabled(struct virtio_hw *hw) vtpci_with_feature(hw, VIRTIO_NET_F_HOST_TSO6); } +/* avoid write operation when necessary, to lessen cache issues */ +#define ASSIGN_UNLESS_EQUAL(var, val) do { \ + if ((var) != (val)) \ + (var) = (val); \ +} while (0) + static inline void virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, uint16_t needed, int use_indirect, int can_push) @@ -285,9 +270,19 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, /* prepend cannot fail, checked by caller */ hdr = (struct virtio_net_hdr *) rte_pktmbuf_prepend(cookie, head_size); + /* rte_pktmbuf_prepend() counts the hdr size to the pkt length, + * which is wrong. Below subtract restores correct pkt size. + */ + cookie->pkt_len -= head_size; /* if offload disabled, it is not zeroed below, do it now */ - if (offload == 0) - memset(hdr, 0, head_size); + if (offload == 0) { + ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); + ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); + ASSIGN_UNLESS_EQUAL(hdr->flags, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); + ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); + } } else if (use_indirect) { /* setup tx ring slot to point to indirect * descriptor list stored in reserved region. @@ -337,9 +332,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, break; default: - hdr->csum_start = 0; - hdr->csum_offset = 0; - hdr->flags = 0; + ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0); + ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0); + ASSIGN_UNLESS_EQUAL(hdr->flags, 0); break; } @@ -355,9 +350,9 @@ virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, cookie->l3_len + cookie->l4_len; } else { - hdr->gso_type = 0; - hdr->gso_size = 0; - hdr->hdr_len = 0; + ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0); + ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0); + ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0); } } @@ -384,6 +379,7 @@ virtio_dev_cq_start(struct rte_eth_dev *dev) struct virtio_hw *hw = dev->data->dev_private; if (hw->cvq && hw->cvq->vq) { + rte_spinlock_init(&hw->cvq->lock); VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq); } } @@ -400,9 +396,6 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, struct virtio_hw *hw = dev->data->dev_private; struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; struct virtnet_rx *rxvq; - int error, nbufs; - struct rte_mbuf *m; - uint16_t desc_idx; PMD_INIT_FUNC_TRACE(); @@ -419,18 +412,34 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, } dev->data->rx_queues[queue_idx] = rxvq; + return 0; +} + +int +virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx) +{ + uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; + struct virtnet_rx *rxvq = &vq->rxq; + struct rte_mbuf *m; + uint16_t desc_idx; + int error, nbufs; + + PMD_INIT_FUNC_TRACE(); /* Allocate blank mbufs for the each rx descriptor */ nbufs = 0; - error = ENOSPC; - if (hw->use_simple_rxtx) { + if (hw->use_simple_rx) { for (desc_idx = 0; desc_idx < vq->vq_nentries; desc_idx++) { vq->vq_ring.avail->ring[desc_idx] = desc_idx; vq->vq_ring.desc[desc_idx].flags = VRING_DESC_F_WRITE; } + + virtio_rxq_vec_setup(rxvq); } memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf)); @@ -440,60 +449,36 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, &rxvq->fake_mbuf; } - while (!virtqueue_full(vq)) { - m = rte_mbuf_raw_alloc(rxvq->mpool); - if (m == NULL) - break; + if (hw->use_simple_rx) { + while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { + virtio_rxq_rearm_vec(rxvq); + nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH; + } + } else { + while (!virtqueue_full(vq)) { + m = rte_mbuf_raw_alloc(rxvq->mpool); + if (m == NULL) + break; - /* Enqueue allocated buffers */ - if (hw->use_simple_rxtx) - error = virtqueue_enqueue_recv_refill_simple(vq, m); - else + /* Enqueue allocated buffers */ error = virtqueue_enqueue_recv_refill(vq, m); - - if (error) { - rte_pktmbuf_free(m); - break; + if (error) { + rte_pktmbuf_free(m); + break; + } + nbufs++; } - nbufs++; - } - vq_update_avail_idx(vq); + vq_update_avail_idx(vq); + } PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); - virtio_rxq_vec_setup(rxvq); - VIRTQUEUE_DUMP(vq); return 0; } -static void -virtio_update_rxtx_handler(struct rte_eth_dev *dev, - const struct rte_eth_txconf *tx_conf) -{ - uint8_t use_simple_rxtx = 0; - struct virtio_hw *hw = dev->data->dev_private; - -#if defined RTE_ARCH_X86 - if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE3)) - use_simple_rxtx = 1; -#elif defined RTE_ARCH_ARM64 || defined CONFIG_RTE_ARCH_ARM - if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON)) - use_simple_rxtx = 1; -#endif - /* Use simple rx/tx func if single segment and no offloads */ - if (use_simple_rxtx && - (tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) == VIRTIO_SIMPLE_FLAGS && - !vtpci_with_feature(hw, VIRTIO_NET_F_MRG_RXBUF)) { - PMD_INIT_LOG(INFO, "Using simple rx/tx path"); - dev->tx_pkt_burst = virtio_xmit_pkts_simple; - dev->rx_pkt_burst = virtio_recv_pkts_vec; - hw->use_simple_rxtx = use_simple_rxtx; - } -} - /* * struct rte_eth_dev *dev: Used to update dev * uint16_t nb_desc: Defaults to values read from config space @@ -513,11 +498,12 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; struct virtnet_tx *txvq; uint16_t tx_free_thresh; - uint16_t desc_idx; PMD_INIT_FUNC_TRACE(); - virtio_update_rxtx_handler(dev, tx_conf); + /* cannot use simple rxtx funcs with multisegs or offloads */ + if ((tx_conf->txq_flags & VIRTIO_SIMPLE_FLAGS) != VIRTIO_SIMPLE_FLAGS) + hw->use_simple_tx = 0; if (nb_desc == 0 || nb_desc > vq->vq_nentries) nb_desc = vq->vq_nentries; @@ -542,9 +528,24 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, vq->vq_free_thresh = tx_free_thresh; - if (hw->use_simple_rxtx) { - uint16_t mid_idx = vq->vq_nentries >> 1; + dev->data->tx_queues[queue_idx] = txvq; + return 0; +} + +int +virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev, + uint16_t queue_idx) +{ + uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX; + struct virtio_hw *hw = dev->data->dev_private; + struct virtqueue *vq = hw->vqs[vtpci_queue_idx]; + uint16_t mid_idx = vq->vq_nentries >> 1; + struct virtnet_tx *txvq = &vq->txq; + uint16_t desc_idx; + + PMD_INIT_FUNC_TRACE(); + if (hw->use_simple_tx) { for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) { vq->vq_ring.avail->ring[desc_idx] = desc_idx + mid_idx; @@ -566,7 +567,6 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, VIRTQUEUE_DUMP(vq); - dev->data->tx_queues[queue_idx] = txvq; return 0; } @@ -649,7 +649,7 @@ virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr) * In case of SCTP, this will be wrong since it's a CRC * but there's nothing we can do. */ - uint16_t csum, off; + uint16_t csum = 0, off; rte_raw_cksum_mbuf(m, hdr->csum_start, rte_pktmbuf_pkt_len(m) - hdr->csum_start, @@ -704,7 +704,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; - struct virtio_hw *hw; + struct virtio_hw *hw = vq->hw; struct rte_mbuf *rxm, *new_mbuf; uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; @@ -715,20 +715,23 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) int offload; struct virtio_net_hdr *hdr; + nb_rx = 0; + if (unlikely(hw->started == 0)) + return nb_rx; + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); - num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts); - num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ); + num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts; + if (unlikely(num > VIRTIO_MBUF_BURST_SZ)) + num = VIRTIO_MBUF_BURST_SZ; if (likely(num > DESC_PER_CACHELINE)) num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num); PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num); - hw = vq->hw; - nb_rx = 0; nb_enqueued = 0; hdr_size = hw->vtnet_hdr_size; offload = rx_offload_enabled(hw); @@ -751,8 +754,6 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxm->ol_flags = 0; rxm->vlan_tci = 0; - rxm->nb_segs = 1; - rxm->next = NULL; rxm->pkt_len = (uint32_t)(len[i] - hdr_size); rxm->data_len = (uint16_t)(len[i] - hdr_size); @@ -772,7 +773,7 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_pkts[nb_rx++] = rxm; - rxvq->stats.bytes += rx_pkts[nb_rx - 1]->pkt_len; + rxvq->stats.bytes += rxm->pkt_len; virtio_update_packet_stats(&rxvq->stats, rxm); } @@ -815,7 +816,7 @@ virtio_recv_mergeable_pkts(void *rx_queue, { struct virtnet_rx *rxvq = rx_queue; struct virtqueue *vq = rxvq->vq; - struct virtio_hw *hw; + struct virtio_hw *hw = vq->hw; struct rte_mbuf *rxm, *new_mbuf; uint16_t nb_used, num, nb_rx; uint32_t len[VIRTIO_MBUF_BURST_SZ]; @@ -829,14 +830,16 @@ virtio_recv_mergeable_pkts(void *rx_queue, uint32_t hdr_size; int offload; + nb_rx = 0; + if (unlikely(hw->started == 0)) + return nb_rx; + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); PMD_RX_LOG(DEBUG, "used:%d", nb_used); - hw = vq->hw; - nb_rx = 0; i = 0; nb_enqueued = 0; seg_num = 0; @@ -879,7 +882,6 @@ virtio_recv_mergeable_pkts(void *rx_queue, rxm->data_off = RTE_PKTMBUF_HEADROOM; rxm->nb_segs = seg_num; - rxm->next = NULL; rxm->ol_flags = 0; rxm->vlan_tci = 0; rxm->pkt_len = (uint32_t)(len[0] - hdr_size); @@ -924,7 +926,6 @@ virtio_recv_mergeable_pkts(void *rx_queue, rxm = rcv_pkts[extra_idx]; rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size; - rxm->next = NULL; rxm->pkt_len = (uint32_t)(len[extra_idx]); rxm->data_len = (uint16_t)(len[extra_idx]); @@ -988,9 +989,12 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct virtqueue *vq = txvq->vq; struct virtio_hw *hw = vq->hw; uint16_t hdr_size = hw->vtnet_hdr_size; - uint16_t nb_used, nb_tx; + uint16_t nb_used, nb_tx = 0; int error; + if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts)) + return nb_tx; + if (unlikely(nb_pkts < 1)) return nb_pkts; @@ -1015,7 +1019,8 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) } /* optimize ring usage */ - if (vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) && + if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) || + vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) && rte_mbuf_refcnt_read(txm) == 1 && RTE_MBUF_DIRECT(txm) && txm->nb_segs == 1 &&