X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fthunderx%2Fnicvf_rxtx.c;h=ac30552b3c34b64c6f95f79346d80de9565bd86a;hb=f7a9461e29147c47ce2bb81bd157ac1833cf5eb1;hp=e15c730317bb6f7ac3b670c0722fe8ba19d37df1;hpb=32e04ea00cd159613e04acef75e52bfca6eeff2f;p=deb_dpdk.git diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c index e15c7303..ac30552b 100644 --- a/drivers/net/thunderx/nicvf_rxtx.c +++ b/drivers/net/thunderx/nicvf_rxtx.c @@ -89,6 +89,14 @@ fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt) entry->buff[0] = sqe.buff[0]; } +static inline void __hot +fill_sq_desc_header_zero_w1(union sq_entry_t *entry, + struct rte_mbuf *pkt) +{ + fill_sq_desc_header(entry, pkt); + entry->buff[1] = 0ULL; +} + void __hot nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq) { @@ -190,12 +198,14 @@ nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) free_desc -= TX_DESC_PER_PKT; } - sq->tail = tail; - sq->xmit_bufs += i; - rte_wmb(); + if (likely(i)) { + sq->tail = tail; + sq->xmit_bufs += i; + rte_wmb(); - /* Inform HW to xmit the packets */ - nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT); + /* Inform HW to xmit the packets */ + nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT); + } return i; } @@ -230,7 +240,7 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts, used_bufs += nb_segs; txbuffs[tail] = NULL; - fill_sq_desc_header(desc_ptr + tail, pkt); + fill_sq_desc_header_zero_w1(desc_ptr + tail, pkt); tail = (tail + 1) & qlen_mask; txbuffs[tail] = pkt; @@ -246,13 +256,15 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts, } } - sq->tail = tail; - sq->xmit_bufs += used_bufs; - rte_wmb(); + if (likely(used_desc)) { + sq->tail = tail; + sq->xmit_bufs += used_bufs; + rte_wmb(); - /* Inform HW to xmit the packets */ - nicvf_addr_write(sq->sq_door, used_desc); - return nb_pkts; + /* Inform HW to xmit the packets */ + nicvf_addr_write(sq->sq_door, used_desc); + } + return i; } static const uint32_t ptype_table[16][16] __rte_cache_aligned = { @@ -368,7 +380,8 @@ nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill) void *obj_p[NICVF_MAX_RX_FREE_THRESH] __rte_cache_aligned; if (unlikely(rte_mempool_get_bulk(rxq->pool, obj_p, to_fill) < 0)) { - rxq->nic->eth_dev->data->rx_mbuf_alloc_failed += to_fill; + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + to_fill; return 0; } @@ -468,11 +481,10 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxq->head = cqe_head; nicvf_addr_write(rxq->cq_door, to_process); rxq->recv_buffers += to_process; - if (rxq->recv_buffers > rxq->rx_free_thresh) { - rxq->recv_buffers -= nicvf_fill_rbdr(rxq, - rxq->rx_free_thresh); - NICVF_RX_ASSERT(rxq->recv_buffers >= 0); - } + } + if (rxq->recv_buffers > rxq->rx_free_thresh) { + rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh); + NICVF_RX_ASSERT(rxq->recv_buffers >= 0); } return to_process; @@ -562,11 +574,10 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts, rxq->head = cqe_head; nicvf_addr_write(rxq->cq_door, to_process); rxq->recv_buffers += buffers_consumed; - if (rxq->recv_buffers > rxq->rx_free_thresh) { - rxq->recv_buffers -= - nicvf_fill_rbdr(rxq, rxq->rx_free_thresh); - NICVF_RX_ASSERT(rxq->recv_buffers >= 0); - } + } + if (rxq->recv_buffers > rxq->rx_free_thresh) { + rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh); + NICVF_RX_ASSERT(rxq->recv_buffers >= 0); } return to_process;