4 * Copyright (C) Cavium, Inc. 2016.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Cavium, Inc nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 #include <rte_atomic.h>
39 #include <rte_branch_prediction.h>
40 #include <rte_byteorder.h>
41 #include <rte_common.h>
42 #include <rte_cycles.h>
43 #include <rte_errno.h>
44 #include <rte_ethdev.h>
45 #include <rte_ether.h>
48 #include <rte_prefetch.h>
50 #include "base/nicvf_plat.h"
52 #include "nicvf_ethdev.h"
53 #include "nicvf_rxtx.h"
54 #include "nicvf_logs.h"
56 static inline void __hot
57 fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt)
59 /* Local variable sqe to avoid read from sq desc memory*/
63 /* Fill SQ header descriptor */
65 sqe.hdr.subdesc_type = SQ_DESC_TYPE_HEADER;
66 /* Number of sub-descriptors following this one */
67 sqe.hdr.subdesc_cnt = pkt->nb_segs;
68 sqe.hdr.tot_len = pkt->pkt_len;
70 ol_flags = pkt->ol_flags & NICVF_TX_OFFLOAD_MASK;
71 if (unlikely(ol_flags)) {
73 uint64_t l4_flags = ol_flags & PKT_TX_L4_MASK;
74 if (l4_flags == PKT_TX_TCP_CKSUM)
75 sqe.hdr.csum_l4 = SEND_L4_CSUM_TCP;
76 else if (l4_flags == PKT_TX_UDP_CKSUM)
77 sqe.hdr.csum_l4 = SEND_L4_CSUM_UDP;
79 sqe.hdr.csum_l4 = SEND_L4_CSUM_DISABLE;
81 sqe.hdr.l3_offset = pkt->l2_len;
82 sqe.hdr.l4_offset = pkt->l3_len + pkt->l2_len;
85 if (ol_flags & PKT_TX_IP_CKSUM)
89 entry->buff[0] = sqe.buff[0];
92 static inline void __hot
93 fill_sq_desc_header_zero_w1(union sq_entry_t *entry,
96 fill_sq_desc_header(entry, pkt);
97 entry->buff[1] = 0ULL;
101 nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq)
105 uint32_t head = sq->head;
106 struct rte_mbuf **txbuffs = sq->txbuffs;
107 void *obj_p[NICVF_MAX_TX_FREE_THRESH] __rte_cache_aligned;
109 curr_head = nicvf_addr_read(sq->sq_head) >> 4;
110 while (head != curr_head) {
112 obj_p[j++] = txbuffs[head];
114 head = (head + 1) & sq->qlen_mask;
117 rte_mempool_put_bulk(sq->pool, obj_p, j);
118 sq->head = curr_head;
120 NICVF_TX_ASSERT(sq->xmit_bufs >= 0);
124 nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq)
128 uint32_t head = sq->head;
129 struct rte_mbuf **txbuffs = sq->txbuffs;
131 curr_head = nicvf_addr_read(sq->sq_head) >> 4;
132 while (head != curr_head) {
134 rte_pktmbuf_free_seg(txbuffs[head]);
138 head = (head + 1) & sq->qlen_mask;
141 sq->head = curr_head;
143 NICVF_TX_ASSERT(sq->xmit_bufs >= 0);
146 static inline uint32_t __hot
147 nicvf_free_tx_desc(struct nicvf_txq *sq)
149 return ((sq->head - sq->tail - 1) & sq->qlen_mask);
152 /* Send Header + Packet */
153 #define TX_DESC_PER_PKT 2
155 static inline uint32_t __hot
156 nicvf_free_xmitted_buffers(struct nicvf_txq *sq, struct rte_mbuf **tx_pkts,
159 uint32_t free_desc = nicvf_free_tx_desc(sq);
161 if (free_desc < nb_pkts * TX_DESC_PER_PKT ||
162 sq->xmit_bufs > sq->tx_free_thresh) {
163 if (unlikely(sq->pool == NULL))
164 sq->pool = tx_pkts[0]->pool;
167 /* Freed now, let see the number of free descs again */
168 free_desc = nicvf_free_tx_desc(sq);
174 nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
179 struct nicvf_txq *sq = tx_queue;
180 union sq_entry_t *desc_ptr = sq->desc;
181 struct rte_mbuf **txbuffs = sq->txbuffs;
182 struct rte_mbuf *pkt;
183 uint32_t qlen_mask = sq->qlen_mask;
186 free_desc = nicvf_free_xmitted_buffers(sq, tx_pkts, nb_pkts);
188 for (i = 0; i < nb_pkts && (int)free_desc >= TX_DESC_PER_PKT; i++) {
191 txbuffs[tail] = NULL;
192 fill_sq_desc_header(desc_ptr + tail, pkt);
193 tail = (tail + 1) & qlen_mask;
196 fill_sq_desc_gather(desc_ptr + tail, pkt);
197 tail = (tail + 1) & qlen_mask;
198 free_desc -= TX_DESC_PER_PKT;
206 /* Inform HW to xmit the packets */
207 nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
213 nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts,
217 uint32_t used_desc, next_used_desc, used_bufs, free_desc, tail;
218 struct nicvf_txq *sq = tx_queue;
219 union sq_entry_t *desc_ptr = sq->desc;
220 struct rte_mbuf **txbuffs = sq->txbuffs;
221 struct rte_mbuf *pkt, *seg;
222 uint32_t qlen_mask = sq->qlen_mask;
229 free_desc = nicvf_free_xmitted_buffers(sq, tx_pkts, nb_pkts);
231 for (i = 0; i < nb_pkts; i++) {
234 nb_segs = pkt->nb_segs;
236 next_used_desc = used_desc + nb_segs + 1;
237 if (next_used_desc > free_desc)
239 used_desc = next_used_desc;
240 used_bufs += nb_segs;
242 txbuffs[tail] = NULL;
243 fill_sq_desc_header_zero_w1(desc_ptr + tail, pkt);
244 tail = (tail + 1) & qlen_mask;
247 fill_sq_desc_gather(desc_ptr + tail, pkt);
248 tail = (tail + 1) & qlen_mask;
251 for (k = 1; k < nb_segs; k++) {
253 fill_sq_desc_gather(desc_ptr + tail, seg);
254 tail = (tail + 1) & qlen_mask;
259 if (likely(used_desc)) {
261 sq->xmit_bufs += used_bufs;
264 /* Inform HW to xmit the packets */
265 nicvf_addr_write(sq->sq_door, used_desc);
270 static const uint32_t ptype_table[16][16] __rte_cache_aligned = {
271 [L3_NONE][L4_NONE] = RTE_PTYPE_UNKNOWN,
272 [L3_NONE][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
273 [L3_NONE][L4_IPFRAG] = RTE_PTYPE_L4_FRAG,
274 [L3_NONE][L4_IPCOMP] = RTE_PTYPE_UNKNOWN,
275 [L3_NONE][L4_TCP] = RTE_PTYPE_L4_TCP,
276 [L3_NONE][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP,
277 [L3_NONE][L4_GRE] = RTE_PTYPE_TUNNEL_GRE,
278 [L3_NONE][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP,
279 [L3_NONE][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
280 [L3_NONE][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
281 [L3_NONE][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
283 [L3_IPV4][L4_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
284 [L3_IPV4][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
285 [L3_IPV4][L4_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
286 [L3_IPV4][L4_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
287 [L3_IPV4][L4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
288 [L3_IPV4][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
289 [L3_IPV4][L4_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
290 [L3_IPV4][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
291 [L3_IPV4][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
292 [L3_IPV4][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
293 [L3_IPV4][L4_NVGRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
295 [L3_IPV4_OPT][L4_NONE] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
296 [L3_IPV4_OPT][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV4_EXT |
298 [L3_IPV4_OPT][L4_IPFRAG] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
299 [L3_IPV4_OPT][L4_IPCOMP] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
300 [L3_IPV4_OPT][L4_TCP] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
301 [L3_IPV4_OPT][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
302 [L3_IPV4_OPT][L4_GRE] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
303 [L3_IPV4_OPT][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
304 [L3_IPV4_OPT][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV4_EXT |
305 RTE_PTYPE_TUNNEL_GENEVE,
306 [L3_IPV4_OPT][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV4_EXT |
307 RTE_PTYPE_TUNNEL_VXLAN,
308 [L3_IPV4_OPT][L4_NVGRE] = RTE_PTYPE_L3_IPV4_EXT |
309 RTE_PTYPE_TUNNEL_NVGRE,
311 [L3_IPV6][L4_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
312 [L3_IPV6][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
313 [L3_IPV6][L4_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
314 [L3_IPV6][L4_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
315 [L3_IPV6][L4_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
316 [L3_IPV6][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
317 [L3_IPV6][L4_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
318 [L3_IPV6][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
319 [L3_IPV6][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
320 [L3_IPV6][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
321 [L3_IPV6][L4_NVGRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_NVGRE,
323 [L3_IPV6_OPT][L4_NONE] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
324 [L3_IPV6_OPT][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV6_EXT |
326 [L3_IPV6_OPT][L4_IPFRAG] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
327 [L3_IPV6_OPT][L4_IPCOMP] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
328 [L3_IPV6_OPT][L4_TCP] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
329 [L3_IPV6_OPT][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
330 [L3_IPV6_OPT][L4_GRE] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
331 [L3_IPV6_OPT][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
332 [L3_IPV6_OPT][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV6_EXT |
333 RTE_PTYPE_TUNNEL_GENEVE,
334 [L3_IPV6_OPT][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV6_EXT |
335 RTE_PTYPE_TUNNEL_VXLAN,
336 [L3_IPV6_OPT][L4_NVGRE] = RTE_PTYPE_L3_IPV6_EXT |
337 RTE_PTYPE_TUNNEL_NVGRE,
339 [L3_ET_STOP][L4_NONE] = RTE_PTYPE_UNKNOWN,
340 [L3_ET_STOP][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
341 [L3_ET_STOP][L4_IPFRAG] = RTE_PTYPE_L4_FRAG,
342 [L3_ET_STOP][L4_IPCOMP] = RTE_PTYPE_UNKNOWN,
343 [L3_ET_STOP][L4_TCP] = RTE_PTYPE_L4_TCP,
344 [L3_ET_STOP][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP,
345 [L3_ET_STOP][L4_GRE] = RTE_PTYPE_TUNNEL_GRE,
346 [L3_ET_STOP][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP,
347 [L3_ET_STOP][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
348 [L3_ET_STOP][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
349 [L3_ET_STOP][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
351 [L3_OTHER][L4_NONE] = RTE_PTYPE_UNKNOWN,
352 [L3_OTHER][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
353 [L3_OTHER][L4_IPFRAG] = RTE_PTYPE_L4_FRAG,
354 [L3_OTHER][L4_IPCOMP] = RTE_PTYPE_UNKNOWN,
355 [L3_OTHER][L4_TCP] = RTE_PTYPE_L4_TCP,
356 [L3_OTHER][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP,
357 [L3_OTHER][L4_GRE] = RTE_PTYPE_TUNNEL_GRE,
358 [L3_OTHER][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP,
359 [L3_OTHER][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
360 [L3_OTHER][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
361 [L3_OTHER][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
364 static inline uint32_t __hot
365 nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0)
367 return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type];
370 static inline int __hot
371 nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill)
374 uint32_t ltail, next_tail;
375 struct nicvf_rbdr *rbdr = rxq->shared_rbdr;
376 uint64_t mbuf_phys_off = rxq->mbuf_phys_off;
377 struct rbdr_entry_t *desc = rbdr->desc;
378 uint32_t qlen_mask = rbdr->qlen_mask;
379 uintptr_t door = rbdr->rbdr_door;
380 void *obj_p[NICVF_MAX_RX_FREE_THRESH] __rte_cache_aligned;
382 if (unlikely(rte_mempool_get_bulk(rxq->pool, obj_p, to_fill) < 0)) {
383 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
388 NICVF_RX_ASSERT((unsigned int)to_fill <= (qlen_mask -
389 (nicvf_addr_read(rbdr->rbdr_status) & NICVF_RBDR_COUNT_MASK)));
391 next_tail = __atomic_fetch_add(&rbdr->next_tail, to_fill,
394 for (i = 0; i < to_fill; i++) {
395 struct rbdr_entry_t *entry = desc + (ltail & qlen_mask);
397 entry->full_addr = nicvf_mbuff_virt2phy((uintptr_t)obj_p[i],
402 while (__atomic_load_n(&rbdr->tail, __ATOMIC_RELAXED) != next_tail)
405 __atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE);
406 nicvf_addr_write(door, to_fill);
410 static inline int32_t __hot
411 nicvf_rx_pkts_to_process(struct nicvf_rxq *rxq, uint16_t nb_pkts,
412 int32_t available_space)
414 if (unlikely(available_space < nb_pkts))
415 rxq->available_space = nicvf_addr_read(rxq->cq_status)
416 & NICVF_CQ_CQE_COUNT_MASK;
418 return RTE_MIN(nb_pkts, available_space);
421 static inline void __hot
422 nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2,
423 struct rte_mbuf *pkt)
425 if (likely(cqe_rx_w0.rss_alg)) {
426 pkt->hash.rss = cqe_rx_w2.rss_tag;
427 pkt->ol_flags |= PKT_RX_RSS_HASH;
432 nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
434 uint32_t i, to_process;
435 struct cqe_rx_t *cqe_rx;
436 struct rte_mbuf *pkt;
437 cqe_rx_word0_t cqe_rx_w0;
438 cqe_rx_word1_t cqe_rx_w1;
439 cqe_rx_word2_t cqe_rx_w2;
440 cqe_rx_word3_t cqe_rx_w3;
441 struct nicvf_rxq *rxq = rx_queue;
442 union cq_entry_t *desc = rxq->desc;
443 const uint64_t cqe_mask = rxq->qlen_mask;
444 uint64_t rb0_ptr, mbuf_phys_off = rxq->mbuf_phys_off;
445 const uint64_t mbuf_init = rxq->mbuf_initializer.value;
446 uint32_t cqe_head = rxq->head & cqe_mask;
447 int32_t available_space = rxq->available_space;
448 const uint8_t rbptr_offset = rxq->rbptr_offset;
450 to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space);
452 for (i = 0; i < to_process; i++) {
453 rte_prefetch_non_temporal(&desc[cqe_head + 2]);
454 cqe_rx = (struct cqe_rx_t *)&desc[cqe_head];
455 NICVF_RX_ASSERT(((struct cq_entry_type_t *)cqe_rx)->cqe_type
458 NICVF_LOAD_PAIR(cqe_rx_w0.u64, cqe_rx_w1.u64, cqe_rx);
459 NICVF_LOAD_PAIR(cqe_rx_w2.u64, cqe_rx_w3.u64, &cqe_rx->word2);
460 rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
461 pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
462 (rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
464 pkt->data_len = cqe_rx_w3.rb0_sz;
465 pkt->pkt_len = cqe_rx_w3.rb0_sz;
466 pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
467 nicvf_mbuff_init_update(pkt, mbuf_init, cqe_rx_w1.align_pad);
468 nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
470 cqe_head = (cqe_head + 1) & cqe_mask;
471 nicvf_prefetch_store_keep(pkt);
474 if (likely(to_process)) {
475 rxq->available_space -= to_process;
476 rxq->head = cqe_head;
477 nicvf_addr_write(rxq->cq_door, to_process);
478 rxq->recv_buffers += to_process;
480 if (rxq->recv_buffers > rxq->rx_free_thresh) {
481 rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
482 NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
488 static inline uint16_t __hot
489 nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
490 uint64_t mbuf_phys_off,
491 struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
494 struct rte_mbuf *pkt, *seg, *prev;
495 cqe_rx_word0_t cqe_rx_w0;
496 cqe_rx_word1_t cqe_rx_w1;
497 cqe_rx_word2_t cqe_rx_w2;
498 uint16_t *rb_sz, nb_segs, seg_idx;
501 NICVF_LOAD_PAIR(cqe_rx_w0.u64, cqe_rx_w1.u64, cqe_rx);
502 NICVF_RX_ASSERT(cqe_rx_w0.cqe_type == CQE_TYPE_RX);
503 cqe_rx_w2 = cqe_rx->word2;
504 rb_sz = &cqe_rx->word3.rb0_sz;
505 rb_ptr = (uint64_t *)cqe_rx + rbptr_offset;
506 nb_segs = cqe_rx_w0.rb_cnt;
507 pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
508 (rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);
511 pkt->pkt_len = cqe_rx_w1.pkt_len;
512 pkt->data_len = rb_sz[nicvf_frag_num(0)];
513 nicvf_mbuff_init_mseg_update(
514 pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
515 pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
516 nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
520 for (seg_idx = 1; seg_idx < nb_segs; seg_idx++) {
521 seg = (struct rte_mbuf *)nicvf_mbuff_phy2virt
522 (rb_ptr[seg_idx], mbuf_phys_off);
525 seg->data_len = rb_sz[nicvf_frag_num(seg_idx)];
526 nicvf_mbuff_init_update(seg, mbuf_init, 0);
535 nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
538 union cq_entry_t *cq_entry;
539 struct cqe_rx_t *cqe_rx;
540 struct nicvf_rxq *rxq = rx_queue;
541 union cq_entry_t *desc = rxq->desc;
542 const uint64_t cqe_mask = rxq->qlen_mask;
543 uint64_t mbuf_phys_off = rxq->mbuf_phys_off;
544 uint32_t i, to_process, cqe_head, buffers_consumed = 0;
545 int32_t available_space = rxq->available_space;
547 const uint64_t mbuf_init = rxq->mbuf_initializer.value;
548 const uint8_t rbptr_offset = rxq->rbptr_offset;
550 cqe_head = rxq->head & cqe_mask;
551 to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space);
553 for (i = 0; i < to_process; i++) {
554 rte_prefetch_non_temporal(&desc[cqe_head + 2]);
555 cq_entry = &desc[cqe_head];
556 cqe_rx = (struct cqe_rx_t *)cq_entry;
557 nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
558 rx_pkts + i, rbptr_offset, mbuf_init);
559 buffers_consumed += nb_segs;
560 cqe_head = (cqe_head + 1) & cqe_mask;
561 nicvf_prefetch_store_keep(rx_pkts[i]);
564 if (likely(to_process)) {
565 rxq->available_space -= to_process;
566 rxq->head = cqe_head;
567 nicvf_addr_write(rxq->cq_door, to_process);
568 rxq->recv_buffers += buffers_consumed;
570 if (rxq->recv_buffers > rxq->rx_free_thresh) {
571 rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
572 NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
579 nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx)
581 struct nicvf_rxq *rxq;
583 rxq = dev->data->rx_queues[queue_idx];
584 return nicvf_addr_read(rxq->cq_status) & NICVF_CQ_CQE_COUNT_MASK;
588 nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx)
590 struct nicvf_rxq *rxq;
594 rxq = dev->data->rx_queues[queue_idx];
595 to_process = rxq->recv_buffers;
596 while (rxq->recv_buffers > 0) {
597 rx_free = RTE_MIN(rxq->recv_buffers, NICVF_MAX_RX_FREE_THRESH);
598 rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rx_free);
601 assert(rxq->recv_buffers == 0);