4 * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
37 #include "virtqueue.h"
38 #include "virtio_logs.h"
39 #include "virtio_pci.h"
40 #include "virtio_rxtx_simple.h"
43 * Two types of mbuf to be cleaned:
44 * 1) mbuf that has been consumed by backend but not used by virtio.
45 * 2) mbuf that hasn't been consued by backend.
48 virtqueue_detatch_unused(struct virtqueue *vq)
50 struct rte_mbuf *cookie;
59 type = virtio_get_queue_type(hw, vq->vq_queue_index);
60 start = vq->vq_avail_idx & (vq->vq_nentries - 1);
61 end = (vq->vq_avail_idx + vq->vq_free_cnt) & (vq->vq_nentries - 1);
63 for (idx = 0; idx < vq->vq_nentries; idx++) {
64 if (hw->use_simple_rx && type == VTNET_RQ) {
65 if (start <= end && idx >= start && idx < end)
67 if (start > end && (idx >= start || idx < end))
69 cookie = vq->sw_ring[idx];
71 vq->sw_ring[idx] = NULL;
75 cookie = vq->vq_descx[idx].cookie;
77 vq->vq_descx[idx].cookie = NULL;
86 /* Flush the elements in the used ring. */
88 virtqueue_rxvq_flush(struct virtqueue *vq)
90 struct virtnet_rx *rxq = &vq->rxq;
91 struct virtio_hw *hw = vq->hw;
92 struct vring_used_elem *uep;
93 struct vq_desc_extra *dxp;
94 uint16_t used_idx, desc_idx;
97 nb_used = VIRTQUEUE_NUSED(vq);
99 for (i = 0; i < nb_used; i++) {
100 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
101 uep = &vq->vq_ring.used->ring[used_idx];
102 if (hw->use_simple_rx) {
104 rte_pktmbuf_free(vq->sw_ring[desc_idx]);
107 desc_idx = (uint16_t)uep->id;
108 dxp = &vq->vq_descx[desc_idx];
109 if (dxp->cookie != NULL) {
110 rte_pktmbuf_free(dxp->cookie);
113 vq_ring_free_chain(vq, desc_idx);
115 vq->vq_used_cons_idx++;
118 if (hw->use_simple_rx) {
119 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
120 virtio_rxq_rearm_vec(rxq);
121 if (virtqueue_kick_prepare(vq))
122 virtqueue_notify(vq);