1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
11 #include <rte_cycles.h>
12 #include <rte_memory.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_mempool.h>
15 #include <rte_malloc.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_prefetch.h>
20 #include <rte_string_fns.h>
21 #include <rte_errno.h>
22 #include <rte_byteorder.h>
28 #include "virtio_logs.h"
29 #include "virtio_ethdev.h"
30 #include "virtio_pci.h"
31 #include "virtqueue.h"
32 #include "virtio_rxtx.h"
33 #include "virtio_rxtx_simple.h"
35 #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DUMP
36 #define VIRTIO_DUMP_PACKET(m, len) rte_pktmbuf_dump(stdout, m, len)
38 #define VIRTIO_DUMP_PACKET(m, len) do { } while (0)
42 virtio_dev_rx_queue_done(void *rxq, uint16_t offset)
44 struct virtnet_rx *rxvq = rxq;
45 struct virtqueue *vq = rxvq->vq;
47 return VIRTQUEUE_NUSED(vq) >= offset;
51 vq_ring_free_inorder(struct virtqueue *vq, uint16_t desc_idx, uint16_t num)
53 vq->vq_free_cnt += num;
54 vq->vq_desc_tail_idx = desc_idx & (vq->vq_nentries - 1);
58 vq_ring_free_chain(struct virtqueue *vq, uint16_t desc_idx)
60 struct vring_desc *dp, *dp_tail;
61 struct vq_desc_extra *dxp;
62 uint16_t desc_idx_last = desc_idx;
64 dp = &vq->vq_ring.desc[desc_idx];
65 dxp = &vq->vq_descx[desc_idx];
66 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt + dxp->ndescs);
67 if ((dp->flags & VRING_DESC_F_INDIRECT) == 0) {
68 while (dp->flags & VRING_DESC_F_NEXT) {
69 desc_idx_last = dp->next;
70 dp = &vq->vq_ring.desc[dp->next];
76 * We must append the existing free chain, if any, to the end of
77 * newly freed chain. If the virtqueue was completely used, then
78 * head would be VQ_RING_DESC_CHAIN_END (ASSERTed above).
80 if (vq->vq_desc_tail_idx == VQ_RING_DESC_CHAIN_END) {
81 vq->vq_desc_head_idx = desc_idx;
83 dp_tail = &vq->vq_ring.desc[vq->vq_desc_tail_idx];
84 dp_tail->next = desc_idx;
87 vq->vq_desc_tail_idx = desc_idx_last;
88 dp->next = VQ_RING_DESC_CHAIN_END;
92 virtqueue_dequeue_burst_rx(struct virtqueue *vq, struct rte_mbuf **rx_pkts,
93 uint32_t *len, uint16_t num)
95 struct vring_used_elem *uep;
96 struct rte_mbuf *cookie;
97 uint16_t used_idx, desc_idx;
100 /* Caller does the check */
101 for (i = 0; i < num ; i++) {
102 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
103 uep = &vq->vq_ring.used->ring[used_idx];
104 desc_idx = (uint16_t) uep->id;
106 cookie = (struct rte_mbuf *)vq->vq_descx[desc_idx].cookie;
108 if (unlikely(cookie == NULL)) {
109 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
110 vq->vq_used_cons_idx);
114 rte_prefetch0(cookie);
115 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
117 vq->vq_used_cons_idx++;
118 vq_ring_free_chain(vq, desc_idx);
119 vq->vq_descx[desc_idx].cookie = NULL;
126 virtqueue_dequeue_rx_inorder(struct virtqueue *vq,
127 struct rte_mbuf **rx_pkts,
131 struct vring_used_elem *uep;
132 struct rte_mbuf *cookie;
133 uint16_t used_idx = 0;
136 if (unlikely(num == 0))
139 for (i = 0; i < num; i++) {
140 used_idx = vq->vq_used_cons_idx & (vq->vq_nentries - 1);
141 /* Desc idx same as used idx */
142 uep = &vq->vq_ring.used->ring[used_idx];
144 cookie = (struct rte_mbuf *)vq->vq_descx[used_idx].cookie;
146 if (unlikely(cookie == NULL)) {
147 PMD_DRV_LOG(ERR, "vring descriptor with no mbuf cookie at %u",
148 vq->vq_used_cons_idx);
152 rte_prefetch0(cookie);
153 rte_packet_prefetch(rte_pktmbuf_mtod(cookie, void *));
155 vq->vq_used_cons_idx++;
156 vq->vq_descx[used_idx].cookie = NULL;
159 vq_ring_free_inorder(vq, used_idx, i);
163 #ifndef DEFAULT_TX_FREE_THRESH
164 #define DEFAULT_TX_FREE_THRESH 32
167 /* Cleanup from completed transmits. */
169 virtio_xmit_cleanup(struct virtqueue *vq, uint16_t num)
171 uint16_t i, used_idx, desc_idx;
172 for (i = 0; i < num; i++) {
173 struct vring_used_elem *uep;
174 struct vq_desc_extra *dxp;
176 used_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1));
177 uep = &vq->vq_ring.used->ring[used_idx];
179 desc_idx = (uint16_t) uep->id;
180 dxp = &vq->vq_descx[desc_idx];
181 vq->vq_used_cons_idx++;
182 vq_ring_free_chain(vq, desc_idx);
184 if (dxp->cookie != NULL) {
185 rte_pktmbuf_free(dxp->cookie);
191 /* Cleanup from completed inorder transmits. */
193 virtio_xmit_cleanup_inorder(struct virtqueue *vq, uint16_t num)
195 uint16_t i, idx = vq->vq_used_cons_idx;
196 int16_t free_cnt = 0;
197 struct vq_desc_extra *dxp = NULL;
199 if (unlikely(num == 0))
202 for (i = 0; i < num; i++) {
203 dxp = &vq->vq_descx[idx++ & (vq->vq_nentries - 1)];
204 free_cnt += dxp->ndescs;
205 if (dxp->cookie != NULL) {
206 rte_pktmbuf_free(dxp->cookie);
211 vq->vq_free_cnt += free_cnt;
212 vq->vq_used_cons_idx = idx;
216 virtqueue_enqueue_refill_inorder(struct virtqueue *vq,
217 struct rte_mbuf **cookies,
220 struct vq_desc_extra *dxp;
221 struct virtio_hw *hw = vq->hw;
222 struct vring_desc *start_dp;
223 uint16_t head_idx, idx, i = 0;
225 if (unlikely(vq->vq_free_cnt == 0))
227 if (unlikely(vq->vq_free_cnt < num))
230 head_idx = vq->vq_desc_head_idx & (vq->vq_nentries - 1);
231 start_dp = vq->vq_ring.desc;
234 idx = head_idx & (vq->vq_nentries - 1);
235 dxp = &vq->vq_descx[idx];
236 dxp->cookie = (void *)cookies[i];
240 VIRTIO_MBUF_ADDR(cookies[i], vq) +
241 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
243 cookies[i]->buf_len -
244 RTE_PKTMBUF_HEADROOM +
246 start_dp[idx].flags = VRING_DESC_F_WRITE;
248 vq_update_avail_ring(vq, idx);
253 vq->vq_desc_head_idx += num;
254 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
259 virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie)
261 struct vq_desc_extra *dxp;
262 struct virtio_hw *hw = vq->hw;
263 struct vring_desc *start_dp;
265 uint16_t head_idx, idx;
267 if (unlikely(vq->vq_free_cnt == 0))
269 if (unlikely(vq->vq_free_cnt < needed))
272 head_idx = vq->vq_desc_head_idx;
273 if (unlikely(head_idx >= vq->vq_nentries))
277 dxp = &vq->vq_descx[idx];
278 dxp->cookie = (void *)cookie;
279 dxp->ndescs = needed;
281 start_dp = vq->vq_ring.desc;
283 VIRTIO_MBUF_ADDR(cookie, vq) +
284 RTE_PKTMBUF_HEADROOM - hw->vtnet_hdr_size;
286 cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size;
287 start_dp[idx].flags = VRING_DESC_F_WRITE;
288 idx = start_dp[idx].next;
289 vq->vq_desc_head_idx = idx;
290 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
291 vq->vq_desc_tail_idx = idx;
292 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
293 vq_update_avail_ring(vq, head_idx);
298 /* When doing TSO, the IP length is not included in the pseudo header
299 * checksum of the packet given to the PMD, but for virtio it is
303 virtio_tso_fix_cksum(struct rte_mbuf *m)
305 /* common case: header is not fragmented */
306 if (likely(rte_pktmbuf_data_len(m) >= m->l2_len + m->l3_len +
308 struct ipv4_hdr *iph;
309 struct ipv6_hdr *ip6h;
311 uint16_t prev_cksum, new_cksum, ip_len, ip_paylen;
314 iph = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, m->l2_len);
315 th = RTE_PTR_ADD(iph, m->l3_len);
316 if ((iph->version_ihl >> 4) == 4) {
317 iph->hdr_checksum = 0;
318 iph->hdr_checksum = rte_ipv4_cksum(iph);
319 ip_len = iph->total_length;
320 ip_paylen = rte_cpu_to_be_16(rte_be_to_cpu_16(ip_len) -
323 ip6h = (struct ipv6_hdr *)iph;
324 ip_paylen = ip6h->payload_len;
327 /* calculate the new phdr checksum not including ip_paylen */
328 prev_cksum = th->cksum;
331 tmp = (tmp & 0xffff) + (tmp >> 16);
334 /* replace it in the packet */
335 th->cksum = new_cksum;
340 /* avoid write operation when necessary, to lessen cache issues */
341 #define ASSIGN_UNLESS_EQUAL(var, val) do { \
342 if ((var) != (val)) \
347 virtqueue_xmit_offload(struct virtio_net_hdr *hdr,
348 struct rte_mbuf *cookie,
352 if (cookie->ol_flags & PKT_TX_TCP_SEG)
353 cookie->ol_flags |= PKT_TX_TCP_CKSUM;
355 switch (cookie->ol_flags & PKT_TX_L4_MASK) {
356 case PKT_TX_UDP_CKSUM:
357 hdr->csum_start = cookie->l2_len + cookie->l3_len;
358 hdr->csum_offset = offsetof(struct udp_hdr,
360 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
363 case PKT_TX_TCP_CKSUM:
364 hdr->csum_start = cookie->l2_len + cookie->l3_len;
365 hdr->csum_offset = offsetof(struct tcp_hdr, cksum);
366 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
370 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
371 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
372 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
376 /* TCP Segmentation Offload */
377 if (cookie->ol_flags & PKT_TX_TCP_SEG) {
378 virtio_tso_fix_cksum(cookie);
379 hdr->gso_type = (cookie->ol_flags & PKT_TX_IPV6) ?
380 VIRTIO_NET_HDR_GSO_TCPV6 :
381 VIRTIO_NET_HDR_GSO_TCPV4;
382 hdr->gso_size = cookie->tso_segsz;
388 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
389 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
390 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
396 virtqueue_enqueue_xmit_inorder(struct virtnet_tx *txvq,
397 struct rte_mbuf **cookies,
400 struct vq_desc_extra *dxp;
401 struct virtqueue *vq = txvq->vq;
402 struct vring_desc *start_dp;
403 struct virtio_net_hdr *hdr;
405 uint16_t head_size = vq->hw->vtnet_hdr_size;
408 idx = vq->vq_desc_head_idx;
409 start_dp = vq->vq_ring.desc;
412 idx = idx & (vq->vq_nentries - 1);
413 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
414 dxp->cookie = (void *)cookies[i];
417 hdr = (struct virtio_net_hdr *)
418 rte_pktmbuf_prepend(cookies[i], head_size);
419 cookies[i]->pkt_len -= head_size;
421 /* if offload disabled, it is not zeroed below, do it now */
422 if (!vq->hw->has_tx_offload) {
423 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
424 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
425 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
426 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
427 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
428 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
431 virtqueue_xmit_offload(hdr, cookies[i],
432 vq->hw->has_tx_offload);
434 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookies[i], vq);
435 start_dp[idx].len = cookies[i]->data_len;
436 start_dp[idx].flags = 0;
438 vq_update_avail_ring(vq, idx);
444 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - num);
445 vq->vq_desc_head_idx = idx & (vq->vq_nentries - 1);
449 virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie,
450 uint16_t needed, int use_indirect, int can_push,
453 struct virtio_tx_region *txr = txvq->virtio_net_hdr_mz->addr;
454 struct vq_desc_extra *dxp;
455 struct virtqueue *vq = txvq->vq;
456 struct vring_desc *start_dp;
457 uint16_t seg_num = cookie->nb_segs;
458 uint16_t head_idx, idx;
459 uint16_t head_size = vq->hw->vtnet_hdr_size;
460 struct virtio_net_hdr *hdr;
462 head_idx = vq->vq_desc_head_idx;
465 dxp = &vq->vq_descx[vq->vq_avail_idx & (vq->vq_nentries - 1)];
467 dxp = &vq->vq_descx[idx];
468 dxp->cookie = (void *)cookie;
469 dxp->ndescs = needed;
471 start_dp = vq->vq_ring.desc;
474 /* prepend cannot fail, checked by caller */
475 hdr = (struct virtio_net_hdr *)
476 rte_pktmbuf_prepend(cookie, head_size);
477 /* rte_pktmbuf_prepend() counts the hdr size to the pkt length,
478 * which is wrong. Below subtract restores correct pkt size.
480 cookie->pkt_len -= head_size;
482 /* if offload disabled, it is not zeroed below, do it now */
483 if (!vq->hw->has_tx_offload) {
484 ASSIGN_UNLESS_EQUAL(hdr->csum_start, 0);
485 ASSIGN_UNLESS_EQUAL(hdr->csum_offset, 0);
486 ASSIGN_UNLESS_EQUAL(hdr->flags, 0);
487 ASSIGN_UNLESS_EQUAL(hdr->gso_type, 0);
488 ASSIGN_UNLESS_EQUAL(hdr->gso_size, 0);
489 ASSIGN_UNLESS_EQUAL(hdr->hdr_len, 0);
491 } else if (use_indirect) {
492 /* setup tx ring slot to point to indirect
493 * descriptor list stored in reserved region.
495 * the first slot in indirect ring is already preset
496 * to point to the header in reserved region
498 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
499 RTE_PTR_DIFF(&txr[idx].tx_indir, txr);
500 start_dp[idx].len = (seg_num + 1) * sizeof(struct vring_desc);
501 start_dp[idx].flags = VRING_DESC_F_INDIRECT;
502 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
504 /* loop below will fill in rest of the indirect elements */
505 start_dp = txr[idx].tx_indir;
508 /* setup first tx ring slot to point to header
509 * stored in reserved region.
511 start_dp[idx].addr = txvq->virtio_net_hdr_mem +
512 RTE_PTR_DIFF(&txr[idx].tx_hdr, txr);
513 start_dp[idx].len = vq->hw->vtnet_hdr_size;
514 start_dp[idx].flags = VRING_DESC_F_NEXT;
515 hdr = (struct virtio_net_hdr *)&txr[idx].tx_hdr;
517 idx = start_dp[idx].next;
520 virtqueue_xmit_offload(hdr, cookie, vq->hw->has_tx_offload);
523 start_dp[idx].addr = VIRTIO_MBUF_DATA_DMA_ADDR(cookie, vq);
524 start_dp[idx].len = cookie->data_len;
525 start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0;
526 idx = start_dp[idx].next;
527 } while ((cookie = cookie->next) != NULL);
530 idx = vq->vq_ring.desc[head_idx].next;
532 vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed);
534 vq->vq_desc_head_idx = idx;
535 vq_update_avail_ring(vq, head_idx);
538 if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END)
539 vq->vq_desc_tail_idx = idx;
544 virtio_dev_cq_start(struct rte_eth_dev *dev)
546 struct virtio_hw *hw = dev->data->dev_private;
548 if (hw->cvq && hw->cvq->vq) {
549 rte_spinlock_init(&hw->cvq->lock);
550 VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq);
555 virtio_dev_rx_queue_setup(struct rte_eth_dev *dev,
558 unsigned int socket_id __rte_unused,
559 const struct rte_eth_rxconf *rx_conf __rte_unused,
560 struct rte_mempool *mp)
562 uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
563 struct virtio_hw *hw = dev->data->dev_private;
564 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
565 struct virtnet_rx *rxvq;
567 PMD_INIT_FUNC_TRACE();
569 if (nb_desc == 0 || nb_desc > vq->vq_nentries)
570 nb_desc = vq->vq_nentries;
571 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
574 rxvq->queue_id = queue_idx;
576 if (rxvq->mpool == NULL) {
577 rte_exit(EXIT_FAILURE,
578 "Cannot allocate mbufs for rx virtqueue");
581 dev->data->rx_queues[queue_idx] = rxvq;
587 virtio_dev_rx_queue_setup_finish(struct rte_eth_dev *dev, uint16_t queue_idx)
589 uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX;
590 struct virtio_hw *hw = dev->data->dev_private;
591 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
592 struct virtnet_rx *rxvq = &vq->rxq;
597 PMD_INIT_FUNC_TRACE();
599 /* Allocate blank mbufs for the each rx descriptor */
602 if (hw->use_simple_rx) {
603 for (desc_idx = 0; desc_idx < vq->vq_nentries;
605 vq->vq_ring.avail->ring[desc_idx] = desc_idx;
606 vq->vq_ring.desc[desc_idx].flags =
610 virtio_rxq_vec_setup(rxvq);
613 memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf));
614 for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST;
616 vq->sw_ring[vq->vq_nentries + desc_idx] =
620 if (hw->use_simple_rx) {
621 while (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) {
622 virtio_rxq_rearm_vec(rxvq);
623 nbufs += RTE_VIRTIO_VPMD_RX_REARM_THRESH;
625 } else if (hw->use_inorder_rx) {
626 if ((!virtqueue_full(vq))) {
627 uint16_t free_cnt = vq->vq_free_cnt;
628 struct rte_mbuf *pkts[free_cnt];
630 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, pkts,
632 error = virtqueue_enqueue_refill_inorder(vq,
635 if (unlikely(error)) {
636 for (i = 0; i < free_cnt; i++)
637 rte_pktmbuf_free(pkts[i]);
642 vq_update_avail_idx(vq);
645 while (!virtqueue_full(vq)) {
646 m = rte_mbuf_raw_alloc(rxvq->mpool);
650 /* Enqueue allocated buffers */
651 error = virtqueue_enqueue_recv_refill(vq, m);
659 vq_update_avail_idx(vq);
662 PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs);
670 * struct rte_eth_dev *dev: Used to update dev
671 * uint16_t nb_desc: Defaults to values read from config space
672 * unsigned int socket_id: Used to allocate memzone
673 * const struct rte_eth_txconf *tx_conf: Used to setup tx engine
674 * uint16_t queue_idx: Just used as an index in dev txq list
677 virtio_dev_tx_queue_setup(struct rte_eth_dev *dev,
680 unsigned int socket_id __rte_unused,
681 const struct rte_eth_txconf *tx_conf)
683 uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
684 struct virtio_hw *hw = dev->data->dev_private;
685 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
686 struct virtnet_tx *txvq;
687 uint16_t tx_free_thresh;
689 PMD_INIT_FUNC_TRACE();
691 if (nb_desc == 0 || nb_desc > vq->vq_nentries)
692 nb_desc = vq->vq_nentries;
693 vq->vq_free_cnt = RTE_MIN(vq->vq_free_cnt, nb_desc);
696 txvq->queue_id = queue_idx;
698 tx_free_thresh = tx_conf->tx_free_thresh;
699 if (tx_free_thresh == 0)
701 RTE_MIN(vq->vq_nentries / 4, DEFAULT_TX_FREE_THRESH);
703 if (tx_free_thresh >= (vq->vq_nentries - 3)) {
704 RTE_LOG(ERR, PMD, "tx_free_thresh must be less than the "
705 "number of TX entries minus 3 (%u)."
706 " (tx_free_thresh=%u port=%u queue=%u)\n",
708 tx_free_thresh, dev->data->port_id, queue_idx);
712 vq->vq_free_thresh = tx_free_thresh;
714 dev->data->tx_queues[queue_idx] = txvq;
719 virtio_dev_tx_queue_setup_finish(struct rte_eth_dev *dev,
722 uint8_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_TQ_QUEUE_IDX;
723 struct virtio_hw *hw = dev->data->dev_private;
724 struct virtqueue *vq = hw->vqs[vtpci_queue_idx];
726 PMD_INIT_FUNC_TRACE();
728 if (hw->use_inorder_tx)
729 vq->vq_ring.desc[vq->vq_nentries - 1].next = 0;
737 virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m)
741 * Requeue the discarded mbuf. This should always be
742 * successful since it was just dequeued.
744 error = virtqueue_enqueue_recv_refill(vq, m);
746 if (unlikely(error)) {
747 RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
753 virtio_discard_rxbuf_inorder(struct virtqueue *vq, struct rte_mbuf *m)
757 error = virtqueue_enqueue_refill_inorder(vq, &m, 1);
758 if (unlikely(error)) {
759 RTE_LOG(ERR, PMD, "cannot requeue discarded mbuf");
765 virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf)
767 uint32_t s = mbuf->pkt_len;
768 struct ether_addr *ea;
771 stats->size_bins[1]++;
772 } else if (s > 64 && s < 1024) {
775 /* count zeros, and offset into correct bin */
776 bin = (sizeof(s) * 8) - __builtin_clz(s) - 5;
777 stats->size_bins[bin]++;
780 stats->size_bins[0]++;
782 stats->size_bins[6]++;
784 stats->size_bins[7]++;
787 ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *);
788 if (is_multicast_ether_addr(ea)) {
789 if (is_broadcast_ether_addr(ea))
797 virtio_rx_stats_updated(struct virtnet_rx *rxvq, struct rte_mbuf *m)
799 VIRTIO_DUMP_PACKET(m, m->data_len);
801 rxvq->stats.bytes += m->pkt_len;
802 virtio_update_packet_stats(&rxvq->stats, m);
805 /* Optionally fill offload information in structure */
807 virtio_rx_offload(struct rte_mbuf *m, struct virtio_net_hdr *hdr)
809 struct rte_net_hdr_lens hdr_lens;
810 uint32_t hdrlen, ptype;
811 int l4_supported = 0;
814 if (hdr->flags == 0 && hdr->gso_type == VIRTIO_NET_HDR_GSO_NONE)
817 m->ol_flags |= PKT_RX_IP_CKSUM_UNKNOWN;
819 ptype = rte_net_get_ptype(m, &hdr_lens, RTE_PTYPE_ALL_MASK);
820 m->packet_type = ptype;
821 if ((ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_TCP ||
822 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_UDP ||
823 (ptype & RTE_PTYPE_L4_MASK) == RTE_PTYPE_L4_SCTP)
826 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
827 hdrlen = hdr_lens.l2_len + hdr_lens.l3_len + hdr_lens.l4_len;
828 if (hdr->csum_start <= hdrlen && l4_supported) {
829 m->ol_flags |= PKT_RX_L4_CKSUM_NONE;
831 /* Unknown proto or tunnel, do sw cksum. We can assume
832 * the cksum field is in the first segment since the
833 * buffers we provided to the host are large enough.
834 * In case of SCTP, this will be wrong since it's a CRC
835 * but there's nothing we can do.
837 uint16_t csum = 0, off;
839 rte_raw_cksum_mbuf(m, hdr->csum_start,
840 rte_pktmbuf_pkt_len(m) - hdr->csum_start,
842 if (likely(csum != 0xffff))
844 off = hdr->csum_offset + hdr->csum_start;
845 if (rte_pktmbuf_data_len(m) >= off + 1)
846 *rte_pktmbuf_mtod_offset(m, uint16_t *,
849 } else if (hdr->flags & VIRTIO_NET_HDR_F_DATA_VALID && l4_supported) {
850 m->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
853 /* GSO request, save required information in mbuf */
854 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
855 /* Check unsupported modes */
856 if ((hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN) ||
857 (hdr->gso_size == 0)) {
861 /* Update mss lengthes in mbuf */
862 m->tso_segsz = hdr->gso_size;
863 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
864 case VIRTIO_NET_HDR_GSO_TCPV4:
865 case VIRTIO_NET_HDR_GSO_TCPV6:
866 m->ol_flags |= PKT_RX_LRO | \
867 PKT_RX_L4_CKSUM_NONE;
877 #define VIRTIO_MBUF_BURST_SZ 64
878 #define DESC_PER_CACHELINE (RTE_CACHE_LINE_SIZE / sizeof(struct vring_desc))
880 virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
882 struct virtnet_rx *rxvq = rx_queue;
883 struct virtqueue *vq = rxvq->vq;
884 struct virtio_hw *hw = vq->hw;
885 struct rte_mbuf *rxm, *new_mbuf;
886 uint16_t nb_used, num, nb_rx;
887 uint32_t len[VIRTIO_MBUF_BURST_SZ];
888 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
890 uint32_t i, nb_enqueued;
892 struct virtio_net_hdr *hdr;
895 if (unlikely(hw->started == 0))
898 nb_used = VIRTQUEUE_NUSED(vq);
902 num = likely(nb_used <= nb_pkts) ? nb_used : nb_pkts;
903 if (unlikely(num > VIRTIO_MBUF_BURST_SZ))
904 num = VIRTIO_MBUF_BURST_SZ;
905 if (likely(num > DESC_PER_CACHELINE))
906 num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE);
908 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num);
909 PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num);
912 hdr_size = hw->vtnet_hdr_size;
914 for (i = 0; i < num ; i++) {
917 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
919 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
920 PMD_RX_LOG(ERR, "Packet drop");
922 virtio_discard_rxbuf(vq, rxm);
923 rxvq->stats.errors++;
927 rxm->port = rxvq->port_id;
928 rxm->data_off = RTE_PKTMBUF_HEADROOM;
932 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
933 rxm->data_len = (uint16_t)(len[i] - hdr_size);
935 hdr = (struct virtio_net_hdr *)((char *)rxm->buf_addr +
936 RTE_PKTMBUF_HEADROOM - hdr_size);
941 if (hw->has_rx_offload && virtio_rx_offload(rxm, hdr) < 0) {
942 virtio_discard_rxbuf(vq, rxm);
943 rxvq->stats.errors++;
947 virtio_rx_stats_updated(rxvq, rxm);
949 rx_pkts[nb_rx++] = rxm;
952 rxvq->stats.packets += nb_rx;
954 /* Allocate new mbuf for the used descriptor */
955 while (likely(!virtqueue_full(vq))) {
956 new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
957 if (unlikely(new_mbuf == NULL)) {
958 struct rte_eth_dev *dev
959 = &rte_eth_devices[rxvq->port_id];
960 dev->data->rx_mbuf_alloc_failed++;
963 error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
964 if (unlikely(error)) {
965 rte_pktmbuf_free(new_mbuf);
971 if (likely(nb_enqueued)) {
972 vq_update_avail_idx(vq);
974 if (unlikely(virtqueue_kick_prepare(vq))) {
975 virtqueue_notify(vq);
976 PMD_RX_LOG(DEBUG, "Notified");
984 virtio_recv_mergeable_pkts_inorder(void *rx_queue,
985 struct rte_mbuf **rx_pkts,
988 struct virtnet_rx *rxvq = rx_queue;
989 struct virtqueue *vq = rxvq->vq;
990 struct virtio_hw *hw = vq->hw;
991 struct rte_mbuf *rxm;
992 struct rte_mbuf *prev;
993 uint16_t nb_used, num, nb_rx;
994 uint32_t len[VIRTIO_MBUF_BURST_SZ];
995 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
997 uint32_t nb_enqueued;
1004 if (unlikely(hw->started == 0))
1007 nb_used = VIRTQUEUE_NUSED(vq);
1008 nb_used = RTE_MIN(nb_used, nb_pkts);
1009 nb_used = RTE_MIN(nb_used, VIRTIO_MBUF_BURST_SZ);
1013 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1018 hdr_size = hw->vtnet_hdr_size;
1020 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len, nb_used);
1022 for (i = 0; i < num; i++) {
1023 struct virtio_net_hdr_mrg_rxbuf *header;
1025 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1026 PMD_RX_LOG(DEBUG, "packet len:%d", len[i]);
1030 if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) {
1031 PMD_RX_LOG(ERR, "Packet drop");
1033 virtio_discard_rxbuf_inorder(vq, rxm);
1034 rxvq->stats.errors++;
1038 header = (struct virtio_net_hdr_mrg_rxbuf *)
1039 ((char *)rxm->buf_addr + RTE_PKTMBUF_HEADROOM
1041 seg_num = header->num_buffers;
1046 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1047 rxm->nb_segs = seg_num;
1050 rxm->pkt_len = (uint32_t)(len[i] - hdr_size);
1051 rxm->data_len = (uint16_t)(len[i] - hdr_size);
1053 rxm->port = rxvq->port_id;
1055 rx_pkts[nb_rx] = rxm;
1058 if (vq->hw->has_rx_offload &&
1059 virtio_rx_offload(rxm, &header->hdr) < 0) {
1060 virtio_discard_rxbuf_inorder(vq, rxm);
1061 rxvq->stats.errors++;
1066 rte_vlan_strip(rx_pkts[nb_rx]);
1068 seg_res = seg_num - 1;
1070 /* Merge remaining segments */
1071 while (seg_res != 0 && i < (num - 1)) {
1075 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1076 rxm->pkt_len = (uint32_t)(len[i]);
1077 rxm->data_len = (uint16_t)(len[i]);
1079 rx_pkts[nb_rx]->pkt_len += (uint32_t)(len[i]);
1080 rx_pkts[nb_rx]->data_len += (uint16_t)(len[i]);
1090 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1095 /* Last packet still need merge segments */
1096 while (seg_res != 0) {
1097 uint16_t rcv_cnt = RTE_MIN((uint16_t)seg_res,
1098 VIRTIO_MBUF_BURST_SZ);
1100 prev = rcv_pkts[nb_rx];
1101 if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1103 num = virtqueue_dequeue_rx_inorder(vq, rcv_pkts, len,
1105 uint16_t extra_idx = 0;
1108 while (extra_idx < rcv_cnt) {
1109 rxm = rcv_pkts[extra_idx];
1111 RTE_PKTMBUF_HEADROOM - hdr_size;
1112 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1113 rxm->data_len = (uint16_t)(len[extra_idx]);
1116 rx_pkts[nb_rx]->pkt_len += len[extra_idx];
1117 rx_pkts[nb_rx]->data_len += len[extra_idx];
1123 virtio_rx_stats_updated(rxvq, rx_pkts[nb_rx]);
1128 "No enough segments for packet.");
1129 virtio_discard_rxbuf_inorder(vq, prev);
1130 rxvq->stats.errors++;
1135 rxvq->stats.packets += nb_rx;
1137 /* Allocate new mbuf for the used descriptor */
1139 if (likely(!virtqueue_full(vq))) {
1140 /* free_cnt may include mrg descs */
1141 uint16_t free_cnt = vq->vq_free_cnt;
1142 struct rte_mbuf *new_pkts[free_cnt];
1144 if (!rte_pktmbuf_alloc_bulk(rxvq->mpool, new_pkts, free_cnt)) {
1145 error = virtqueue_enqueue_refill_inorder(vq, new_pkts,
1147 if (unlikely(error)) {
1148 for (i = 0; i < free_cnt; i++)
1149 rte_pktmbuf_free(new_pkts[i]);
1151 nb_enqueued += free_cnt;
1153 struct rte_eth_dev *dev =
1154 &rte_eth_devices[rxvq->port_id];
1155 dev->data->rx_mbuf_alloc_failed += free_cnt;
1159 if (likely(nb_enqueued)) {
1160 vq_update_avail_idx(vq);
1162 if (unlikely(virtqueue_kick_prepare(vq))) {
1163 virtqueue_notify(vq);
1164 PMD_RX_LOG(DEBUG, "Notified");
1172 virtio_recv_mergeable_pkts(void *rx_queue,
1173 struct rte_mbuf **rx_pkts,
1176 struct virtnet_rx *rxvq = rx_queue;
1177 struct virtqueue *vq = rxvq->vq;
1178 struct virtio_hw *hw = vq->hw;
1179 struct rte_mbuf *rxm, *new_mbuf;
1180 uint16_t nb_used, num, nb_rx;
1181 uint32_t len[VIRTIO_MBUF_BURST_SZ];
1182 struct rte_mbuf *rcv_pkts[VIRTIO_MBUF_BURST_SZ];
1183 struct rte_mbuf *prev;
1185 uint32_t i, nb_enqueued;
1192 if (unlikely(hw->started == 0))
1195 nb_used = VIRTQUEUE_NUSED(vq);
1199 PMD_RX_LOG(DEBUG, "used:%d", nb_used);
1206 hdr_size = hw->vtnet_hdr_size;
1208 while (i < nb_used) {
1209 struct virtio_net_hdr_mrg_rxbuf *header;
1211 if (nb_rx == nb_pkts)
1214 num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1);
1220 PMD_RX_LOG(DEBUG, "dequeue:%d", num);
1221 PMD_RX_LOG(DEBUG, "packet len:%d", len[0]);
1225 if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) {
1226 PMD_RX_LOG(ERR, "Packet drop");
1228 virtio_discard_rxbuf(vq, rxm);
1229 rxvq->stats.errors++;
1233 header = (struct virtio_net_hdr_mrg_rxbuf *)((char *)rxm->buf_addr +
1234 RTE_PKTMBUF_HEADROOM - hdr_size);
1235 seg_num = header->num_buffers;
1240 rxm->data_off = RTE_PKTMBUF_HEADROOM;
1241 rxm->nb_segs = seg_num;
1244 rxm->pkt_len = (uint32_t)(len[0] - hdr_size);
1245 rxm->data_len = (uint16_t)(len[0] - hdr_size);
1247 rxm->port = rxvq->port_id;
1248 rx_pkts[nb_rx] = rxm;
1251 if (hw->has_rx_offload &&
1252 virtio_rx_offload(rxm, &header->hdr) < 0) {
1253 virtio_discard_rxbuf(vq, rxm);
1254 rxvq->stats.errors++;
1258 seg_res = seg_num - 1;
1260 while (seg_res != 0) {
1262 * Get extra segments for current uncompleted packet.
1265 RTE_MIN(seg_res, RTE_DIM(rcv_pkts));
1266 if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) {
1269 virtqueue_dequeue_burst_rx(vq,
1270 rcv_pkts, len, rcv_cnt);
1275 "No enough segments for packet.");
1277 virtio_discard_rxbuf(vq, rxm);
1278 rxvq->stats.errors++;
1284 while (extra_idx < rcv_cnt) {
1285 rxm = rcv_pkts[extra_idx];
1287 rxm->data_off = RTE_PKTMBUF_HEADROOM - hdr_size;
1288 rxm->pkt_len = (uint32_t)(len[extra_idx]);
1289 rxm->data_len = (uint16_t)(len[extra_idx]);
1295 rx_pkts[nb_rx]->pkt_len += rxm->pkt_len;
1302 rte_vlan_strip(rx_pkts[nb_rx]);
1304 VIRTIO_DUMP_PACKET(rx_pkts[nb_rx],
1305 rx_pkts[nb_rx]->data_len);
1307 rxvq->stats.bytes += rx_pkts[nb_rx]->pkt_len;
1308 virtio_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]);
1312 rxvq->stats.packets += nb_rx;
1314 /* Allocate new mbuf for the used descriptor */
1315 while (likely(!virtqueue_full(vq))) {
1316 new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool);
1317 if (unlikely(new_mbuf == NULL)) {
1318 struct rte_eth_dev *dev
1319 = &rte_eth_devices[rxvq->port_id];
1320 dev->data->rx_mbuf_alloc_failed++;
1323 error = virtqueue_enqueue_recv_refill(vq, new_mbuf);
1324 if (unlikely(error)) {
1325 rte_pktmbuf_free(new_mbuf);
1331 if (likely(nb_enqueued)) {
1332 vq_update_avail_idx(vq);
1334 if (unlikely(virtqueue_kick_prepare(vq))) {
1335 virtqueue_notify(vq);
1336 PMD_RX_LOG(DEBUG, "Notified");
1344 virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1346 struct virtnet_tx *txvq = tx_queue;
1347 struct virtqueue *vq = txvq->vq;
1348 struct virtio_hw *hw = vq->hw;
1349 uint16_t hdr_size = hw->vtnet_hdr_size;
1350 uint16_t nb_used, nb_tx = 0;
1353 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1356 if (unlikely(nb_pkts < 1))
1359 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1360 nb_used = VIRTQUEUE_NUSED(vq);
1363 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1364 virtio_xmit_cleanup(vq, nb_used);
1366 for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) {
1367 struct rte_mbuf *txm = tx_pkts[nb_tx];
1368 int can_push = 0, use_indirect = 0, slots, need;
1370 /* Do VLAN tag insertion */
1371 if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
1372 error = rte_vlan_insert(&txm);
1373 if (unlikely(error)) {
1374 rte_pktmbuf_free(txm);
1377 /* vlan_insert may add a header mbuf */
1378 tx_pkts[nb_tx] = txm;
1381 /* optimize ring usage */
1382 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1383 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1384 rte_mbuf_refcnt_read(txm) == 1 &&
1385 RTE_MBUF_DIRECT(txm) &&
1386 txm->nb_segs == 1 &&
1387 rte_pktmbuf_headroom(txm) >= hdr_size &&
1388 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1389 __alignof__(struct virtio_net_hdr_mrg_rxbuf)))
1391 else if (vtpci_with_feature(hw, VIRTIO_RING_F_INDIRECT_DESC) &&
1392 txm->nb_segs < VIRTIO_MAX_TX_INDIRECT)
1395 /* How many main ring entries are needed to this Tx?
1396 * any_layout => number of segments
1398 * default => number of segments + 1
1400 slots = use_indirect ? 1 : (txm->nb_segs + !can_push);
1401 need = slots - vq->vq_free_cnt;
1403 /* Positive value indicates it need free vring descriptors */
1404 if (unlikely(need > 0)) {
1405 nb_used = VIRTQUEUE_NUSED(vq);
1407 need = RTE_MIN(need, (int)nb_used);
1409 virtio_xmit_cleanup(vq, need);
1410 need = slots - vq->vq_free_cnt;
1411 if (unlikely(need > 0)) {
1413 "No free tx descriptors to transmit");
1418 /* Enqueue Packet buffers */
1419 virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect,
1422 txvq->stats.bytes += txm->pkt_len;
1423 virtio_update_packet_stats(&txvq->stats, txm);
1426 txvq->stats.packets += nb_tx;
1428 if (likely(nb_tx)) {
1429 vq_update_avail_idx(vq);
1431 if (unlikely(virtqueue_kick_prepare(vq))) {
1432 virtqueue_notify(vq);
1433 PMD_TX_LOG(DEBUG, "Notified backend after xmit");
1441 virtio_xmit_pkts_inorder(void *tx_queue,
1442 struct rte_mbuf **tx_pkts,
1445 struct virtnet_tx *txvq = tx_queue;
1446 struct virtqueue *vq = txvq->vq;
1447 struct virtio_hw *hw = vq->hw;
1448 uint16_t hdr_size = hw->vtnet_hdr_size;
1449 uint16_t nb_used, nb_avail, nb_tx = 0, nb_inorder_pkts = 0;
1450 struct rte_mbuf *inorder_pkts[nb_pkts];
1453 if (unlikely(hw->started == 0 && tx_pkts != hw->inject_pkts))
1456 if (unlikely(nb_pkts < 1))
1460 PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts);
1461 nb_used = VIRTQUEUE_NUSED(vq);
1464 if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh))
1465 virtio_xmit_cleanup_inorder(vq, nb_used);
1467 if (unlikely(!vq->vq_free_cnt))
1468 virtio_xmit_cleanup_inorder(vq, nb_used);
1470 nb_avail = RTE_MIN(vq->vq_free_cnt, nb_pkts);
1472 for (nb_tx = 0; nb_tx < nb_avail; nb_tx++) {
1473 struct rte_mbuf *txm = tx_pkts[nb_tx];
1476 /* Do VLAN tag insertion */
1477 if (unlikely(txm->ol_flags & PKT_TX_VLAN_PKT)) {
1478 error = rte_vlan_insert(&txm);
1479 if (unlikely(error)) {
1480 rte_pktmbuf_free(txm);
1483 /* vlan_insert may add a header mbuf */
1484 tx_pkts[nb_tx] = txm;
1487 /* optimize ring usage */
1488 if ((vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) ||
1489 vtpci_with_feature(hw, VIRTIO_F_VERSION_1)) &&
1490 rte_mbuf_refcnt_read(txm) == 1 &&
1491 RTE_MBUF_DIRECT(txm) &&
1492 txm->nb_segs == 1 &&
1493 rte_pktmbuf_headroom(txm) >= hdr_size &&
1494 rte_is_aligned(rte_pktmbuf_mtod(txm, char *),
1495 __alignof__(struct virtio_net_hdr_mrg_rxbuf))) {
1496 inorder_pkts[nb_inorder_pkts] = txm;
1499 txvq->stats.bytes += txm->pkt_len;
1500 virtio_update_packet_stats(&txvq->stats, txm);
1504 if (nb_inorder_pkts) {
1505 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
1507 nb_inorder_pkts = 0;
1510 slots = txm->nb_segs + 1;
1511 need = slots - vq->vq_free_cnt;
1512 if (unlikely(need > 0)) {
1513 nb_used = VIRTQUEUE_NUSED(vq);
1515 need = RTE_MIN(need, (int)nb_used);
1517 virtio_xmit_cleanup_inorder(vq, need);
1519 need = slots - vq->vq_free_cnt;
1521 if (unlikely(need > 0)) {
1523 "No free tx descriptors to transmit");
1527 /* Enqueue Packet buffers */
1528 virtqueue_enqueue_xmit(txvq, txm, slots, 0, 0, 1);
1530 txvq->stats.bytes += txm->pkt_len;
1531 virtio_update_packet_stats(&txvq->stats, txm);
1534 /* Transmit all inorder packets */
1535 if (nb_inorder_pkts)
1536 virtqueue_enqueue_xmit_inorder(txvq, inorder_pkts,
1539 txvq->stats.packets += nb_tx;
1541 if (likely(nb_tx)) {
1542 vq_update_avail_idx(vq);
1544 if (unlikely(virtqueue_kick_prepare(vq))) {
1545 virtqueue_notify(vq);
1546 PMD_TX_LOG(DEBUG, "Notified backend after xmit");