X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fthunderx%2Fnicvf_rxtx.c;h=247c356854b6f63416344bf48ac85b940cac3d65;hb=b63264c8342e6a1b6971c79550d2af2024b6a4de;hp=e27776e6d71fc63a86779a1b2298120436534131;hpb=f239aed5e674965691846e8ce3f187dd47523689;p=deb_dpdk.git diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c index e27776e6..247c3568 100644 --- a/drivers/net/thunderx/nicvf_rxtx.c +++ b/drivers/net/thunderx/nicvf_rxtx.c @@ -1,33 +1,5 @@ -/* - * BSD LICENSE - * - * Copyright (C) Cavium, Inc. 2016. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name of Cavium, Inc nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright(c) 2016 Cavium, Inc */ #include @@ -41,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -190,12 +162,14 @@ nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) free_desc -= TX_DESC_PER_PKT; } - sq->tail = tail; - sq->xmit_bufs += i; - rte_wmb(); + if (likely(i)) { + sq->tail = tail; + sq->xmit_bufs += i; + rte_wmb(); - /* Inform HW to xmit the packets */ - nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT); + /* Inform HW to xmit the packets */ + nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT); + } return i; } @@ -246,13 +220,15 @@ nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts, } } - sq->tail = tail; - sq->xmit_bufs += used_bufs; - rte_wmb(); + if (likely(used_desc)) { + sq->tail = tail; + sq->xmit_bufs += used_bufs; + rte_wmb(); - /* Inform HW to xmit the packets */ - nicvf_addr_write(sq->sq_door, used_desc); - return nb_pkts; + /* Inform HW to xmit the packets */ + nicvf_addr_write(sq->sq_door, used_desc); + } + return i; } static const uint32_t ptype_table[16][16] __rte_cache_aligned = { @@ -355,6 +331,20 @@ nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0) return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type]; } +static inline uint64_t __hot +nicvf_set_olflags(const cqe_rx_word0_t cqe_rx_w0) +{ + static const uint64_t flag_table[3] __rte_cache_aligned = { + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD, + PKT_RX_IP_CKSUM_BAD | PKT_RX_L4_CKSUM_UNKNOWN, + PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_BAD, + }; + + const uint8_t idx = (cqe_rx_w0.err_opcode == CQE_RX_ERR_L4_CHK) << 1 | + (cqe_rx_w0.err_opcode == CQE_RX_ERR_IP_CHK); + return flag_table[idx]; +} + static inline int __hot nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill) { @@ -413,11 +403,13 @@ nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2, if (likely(cqe_rx_w0.rss_alg)) { pkt->hash.rss = cqe_rx_w2.rss_tag; pkt->ol_flags |= PKT_RX_RSS_HASH; + } } -uint16_t __hot -nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +static __rte_always_inline uint16_t +nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts, + const uint32_t flag) { uint32_t i, to_process; struct cqe_rx_t *cqe_rx; @@ -448,7 +440,19 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset); pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt (rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off); - pkt->ol_flags = 0; + + if (flag & NICVF_RX_OFFLOAD_NONE) + pkt->ol_flags = 0; + if (flag & NICVF_RX_OFFLOAD_CKSUM) + pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0); + if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) { + if (unlikely(cqe_rx_w0.vlan_stripped)) { + pkt->ol_flags |= PKT_RX_VLAN + | PKT_RX_VLAN_STRIPPED; + pkt->vlan_tci = + rte_cpu_to_be_16(cqe_rx_w2.vlan_tci); + } + } pkt->data_len = cqe_rx_w3.rb0_sz; pkt->pkt_len = cqe_rx_w3.rb0_sz; pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0); @@ -473,11 +477,43 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) return to_process; } -static inline uint16_t __hot +uint16_t __hot +nicvf_recv_pkts_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_NONE); +} + +uint16_t __hot +nicvf_recv_pkts_cksum(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_CKSUM); +} + +uint16_t __hot +nicvf_recv_pkts_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP); +} + +uint16_t __hot +nicvf_recv_pkts_cksum_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP); +} + +static __rte_always_inline uint16_t __hot nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx, uint64_t mbuf_phys_off, struct rte_mbuf **rx_pkt, uint8_t rbptr_offset, - uint64_t mbuf_init) + uint64_t mbuf_init, const uint32_t flag) { struct rte_mbuf *pkt, *seg, *prev; cqe_rx_word0_t cqe_rx_w0; @@ -495,12 +531,22 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx, pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt (rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off); - pkt->ol_flags = 0; pkt->pkt_len = cqe_rx_w1.pkt_len; pkt->data_len = rb_sz[nicvf_frag_num(0)]; nicvf_mbuff_init_mseg_update( pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs); pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0); + if (flag & NICVF_RX_OFFLOAD_NONE) + pkt->ol_flags = 0; + if (flag & NICVF_RX_OFFLOAD_CKSUM) + pkt->ol_flags = nicvf_set_olflags(cqe_rx_w0); + if (flag & NICVF_RX_OFFLOAD_VLAN_STRIP) { + if (unlikely(cqe_rx_w0.vlan_stripped)) { + pkt->ol_flags |= PKT_RX_VLAN + | PKT_RX_VLAN_STRIPPED; + pkt->vlan_tci = rte_cpu_to_be_16(cqe_rx_w2.vlan_tci); + } + } nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt); *rx_pkt = pkt; @@ -519,9 +565,9 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx, return nb_segs; } -uint16_t __hot +static __rte_always_inline uint16_t __hot nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts) + uint16_t nb_pkts, const uint32_t flag) { union cq_entry_t *cq_entry; struct cqe_rx_t *cqe_rx; @@ -543,7 +589,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts, cq_entry = &desc[cqe_head]; cqe_rx = (struct cqe_rx_t *)cq_entry; nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off, - rx_pkts + i, rbptr_offset, mbuf_init); + rx_pkts + i, rbptr_offset, mbuf_init, flag); buffers_consumed += nb_segs; cqe_head = (cqe_head + 1) & cqe_mask; nicvf_prefetch_store_keep(rx_pkts[i]); @@ -563,6 +609,38 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts, return to_process; } +uint16_t __hot +nicvf_recv_pkts_multiseg_no_offload(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_NONE); +} + +uint16_t __hot +nicvf_recv_pkts_multiseg_cksum(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_CKSUM); +} + +uint16_t __hot +nicvf_recv_pkts_multiseg_vlan_strip(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_NONE | NICVF_RX_OFFLOAD_VLAN_STRIP); +} + +uint16_t __hot +nicvf_recv_pkts_multiseg_cksum_vlan_strip(void *rx_queue, + struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + return nicvf_recv_pkts_multiseg(rx_queue, rx_pkts, nb_pkts, + NICVF_RX_OFFLOAD_CKSUM | NICVF_RX_OFFLOAD_VLAN_STRIP); +} + uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx) {