/*
* BSD LICENSE
*
- * Copyright (C) Cavium networks Ltd. 2016.
+ * Copyright (C) Cavium, Inc. 2016.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
- * * Neither the name of Cavium networks nor the names of its
+ * * Neither the name of Cavium, Inc nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
entry->buff[0] = sqe.buff[0];
}
+static inline void __hot
+fill_sq_desc_header_zero_w1(union sq_entry_t *entry,
+ struct rte_mbuf *pkt)
+{
+ fill_sq_desc_header(entry, pkt);
+ entry->buff[1] = 0ULL;
+}
+
void __hot
nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq)
{
free_desc -= TX_DESC_PER_PKT;
}
- sq->tail = tail;
- sq->xmit_bufs += i;
- rte_wmb();
+ if (likely(i)) {
+ sq->tail = tail;
+ sq->xmit_bufs += i;
+ rte_wmb();
- /* Inform HW to xmit the packets */
- nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT);
+ }
return i;
}
used_bufs += nb_segs;
txbuffs[tail] = NULL;
- fill_sq_desc_header(desc_ptr + tail, pkt);
+ fill_sq_desc_header_zero_w1(desc_ptr + tail, pkt);
tail = (tail + 1) & qlen_mask;
txbuffs[tail] = pkt;
}
}
- sq->tail = tail;
- sq->xmit_bufs += used_bufs;
- rte_wmb();
+ if (likely(used_desc)) {
+ sq->tail = tail;
+ sq->xmit_bufs += used_bufs;
+ rte_wmb();
- /* Inform HW to xmit the packets */
- nicvf_addr_write(sq->sq_door, used_desc);
- return nb_pkts;
+ /* Inform HW to xmit the packets */
+ nicvf_addr_write(sq->sq_door, used_desc);
+ }
+ return i;
}
static const uint32_t ptype_table[16][16] __rte_cache_aligned = {
union cq_entry_t *desc = rxq->desc;
const uint64_t cqe_mask = rxq->qlen_mask;
uint64_t rb0_ptr, mbuf_phys_off = rxq->mbuf_phys_off;
+ const uint64_t mbuf_init = rxq->mbuf_initializer.value;
uint32_t cqe_head = rxq->head & cqe_mask;
int32_t available_space = rxq->available_space;
- uint8_t port_id = rxq->port_id;
const uint8_t rbptr_offset = rxq->rbptr_offset;
to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space);
rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
(rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
-
pkt->ol_flags = 0;
- pkt->port = port_id;
pkt->data_len = cqe_rx_w3.rb0_sz;
- pkt->data_off = RTE_PKTMBUF_HEADROOM + cqe_rx_w1.align_pad;
- pkt->nb_segs = 1;
pkt->pkt_len = cqe_rx_w3.rb0_sz;
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
-
+ nicvf_mbuff_init_update(pkt, mbuf_init, cqe_rx_w1.align_pad);
nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
- rte_mbuf_refcnt_set(pkt, 1);
rx_pkts[i] = pkt;
cqe_head = (cqe_head + 1) & cqe_mask;
nicvf_prefetch_store_keep(pkt);
rxq->head = cqe_head;
nicvf_addr_write(rxq->cq_door, to_process);
rxq->recv_buffers += to_process;
- if (rxq->recv_buffers > rxq->rx_free_thresh) {
- rxq->recv_buffers -= nicvf_fill_rbdr(rxq,
- rxq->rx_free_thresh);
- NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
- }
+ }
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
}
return to_process;
static inline uint16_t __hot
nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
- uint64_t mbuf_phys_off, uint8_t port_id,
- struct rte_mbuf **rx_pkt, uint8_t rbptr_offset)
+ uint64_t mbuf_phys_off,
+ struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
+ uint64_t mbuf_init)
{
struct rte_mbuf *pkt, *seg, *prev;
cqe_rx_word0_t cqe_rx_w0;
(rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);
pkt->ol_flags = 0;
- pkt->port = port_id;
- pkt->data_off = RTE_PKTMBUF_HEADROOM + cqe_rx_w1.align_pad;
- pkt->nb_segs = nb_segs;
pkt->pkt_len = cqe_rx_w1.pkt_len;
pkt->data_len = rb_sz[nicvf_frag_num(0)];
- rte_mbuf_refcnt_set(pkt, 1);
+ nicvf_mbuff_init_mseg_update(
+ pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
prev->next = seg;
seg->data_len = rb_sz[nicvf_frag_num(seg_idx)];
- seg->port = port_id;
- seg->data_off = RTE_PKTMBUF_HEADROOM;
- rte_mbuf_refcnt_set(seg, 1);
+ nicvf_mbuff_init_update(seg, mbuf_init, 0);
prev = seg;
}
uint32_t i, to_process, cqe_head, buffers_consumed = 0;
int32_t available_space = rxq->available_space;
uint16_t nb_segs;
- const uint8_t port_id = rxq->port_id;
+ const uint64_t mbuf_init = rxq->mbuf_initializer.value;
const uint8_t rbptr_offset = rxq->rbptr_offset;
cqe_head = rxq->head & cqe_mask;
cq_entry = &desc[cqe_head];
cqe_rx = (struct cqe_rx_t *)cq_entry;
nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
- port_id, rx_pkts + i, rbptr_offset);
+ rx_pkts + i, rbptr_offset, mbuf_init);
buffers_consumed += nb_segs;
cqe_head = (cqe_head + 1) & cqe_mask;
nicvf_prefetch_store_keep(rx_pkts[i]);
rxq->head = cqe_head;
nicvf_addr_write(rxq->cq_door, to_process);
rxq->recv_buffers += buffers_consumed;
- if (rxq->recv_buffers > rxq->rx_free_thresh) {
- rxq->recv_buffers -=
- nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
- NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
- }
+ }
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
}
return to_process;