1 From 057358356e7d05f07ab2df37c12b1cce37a3cca9 Mon Sep 17 00:00:00 2001
2 From: John Daley <johndale@cisco.com>
3 Date: Fri, 4 Mar 2016 13:09:00 -0800
4 Subject: [PATCH 11/22] enic: improve Rx performance
6 This is a wholesale replacement of the Enic PMD receive path in order
7 to improve performance and code clarity. The changes are:
8 - Simplify and reduce code path length of receive function.
9 - Put most of the fast-path receive functions in one file.
10 - Reduce the number of posted_index updates (pay attention to
12 - Remove the unneeded container structure around the RQ mbuf ring
13 - Prefetch next Mbuf and descriptors while processing the current one
14 - Use a lookup table for converting CQ flags to mbuf flags.
16 Signed-off-by: John Daley <johndale@cisco.com>
18 drivers/net/enic/Makefile | 1 +
19 drivers/net/enic/base/vnic_rq.c | 99 ++---------
20 drivers/net/enic/base/vnic_rq.h | 147 +---------------
21 drivers/net/enic/enic.h | 16 +-
22 drivers/net/enic/enic_ethdev.c | 27 ++-
23 drivers/net/enic/enic_main.c | 321 ++++++++++------------------------
24 drivers/net/enic/enic_res.h | 16 +-
25 drivers/net/enic/enic_rx.c | 370 ++++++++++++++++++++++++++++++++++++++++
26 8 files changed, 511 insertions(+), 486 deletions(-)
27 create mode 100644 drivers/net/enic/enic_rx.c
29 diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile
30 index f0ee093..f316274 100644
31 --- a/drivers/net/enic/Makefile
32 +++ b/drivers/net/enic/Makefile
33 @@ -53,6 +53,7 @@ VPATH += $(SRCDIR)/src
35 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c
36 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c
37 +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rx.c
38 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c
39 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c
40 SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c
41 diff --git a/drivers/net/enic/base/vnic_rq.c b/drivers/net/enic/base/vnic_rq.c
42 index 1441604..cb62c5e 100644
43 --- a/drivers/net/enic/base/vnic_rq.c
44 +++ b/drivers/net/enic/base/vnic_rq.c
49 -static int vnic_rq_alloc_bufs(struct vnic_rq *rq)
51 - struct vnic_rq_buf *buf;
52 - unsigned int i, j, count = rq->ring.desc_count;
53 - unsigned int blks = VNIC_RQ_BUF_BLKS_NEEDED(count);
55 - for (i = 0; i < blks; i++) {
56 - rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC);
61 - for (i = 0; i < blks; i++) {
63 - for (j = 0; j < VNIC_RQ_BUF_BLK_ENTRIES(count); j++) {
64 - buf->index = i * VNIC_RQ_BUF_BLK_ENTRIES(count) + j;
65 - buf->desc = (u8 *)rq->ring.descs +
66 - rq->ring.desc_size * buf->index;
67 - if (buf->index + 1 == count) {
68 - buf->next = rq->bufs[0];
70 - } else if (j + 1 == VNIC_RQ_BUF_BLK_ENTRIES(count)) {
71 - buf->next = rq->bufs[i + 1];
73 - buf->next = buf + 1;
79 - rq->to_use = rq->to_clean = rq->bufs[0];
84 -int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count,
85 - unsigned int desc_size)
89 - mem_size += vnic_dev_desc_ring_size(&rq->ring, desc_count, desc_size);
91 - mem_size += VNIC_RQ_BUF_BLKS_NEEDED(rq->ring.desc_count) *
92 - VNIC_RQ_BUF_BLK_SZ(rq->ring.desc_count);
97 void vnic_rq_free(struct vnic_rq *rq)
99 struct vnic_dev *vdev;
104 vnic_dev_free_desc_ring(vdev, &rq->ring);
106 - for (i = 0; i < VNIC_RQ_BUF_BLKS_MAX; i++) {
108 - kfree(rq->bufs[i]);
109 - rq->bufs[i] = NULL;
116 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
117 unsigned int desc_count, unsigned int desc_size)
121 char res_name[NAME_MAX];
124 @@ -121,18 +65,9 @@ int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
127 snprintf(res_name, sizeof(res_name), "%d-rq-%d", instance++, index);
128 - err = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size,
129 + rc = vnic_dev_alloc_desc_ring(vdev, &rq->ring, desc_count, desc_size,
130 rq->socket_id, res_name);
134 - err = vnic_rq_alloc_bufs(rq);
144 void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
145 @@ -154,9 +89,6 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
146 iowrite32(fetch_index, &rq->ctrl->fetch_index);
147 iowrite32(posted_index, &rq->ctrl->posted_index);
149 - rq->to_use = rq->to_clean =
150 - &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
151 - [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
154 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
155 @@ -176,6 +108,8 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
156 fetch_index, fetch_index,
157 error_interrupt_enable,
158 error_interrupt_offset);
163 void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error)
164 @@ -212,21 +146,20 @@ int vnic_rq_disable(struct vnic_rq *rq)
167 void vnic_rq_clean(struct vnic_rq *rq,
168 - void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf))
169 + void (*buf_clean)(struct rte_mbuf **buf))
171 - struct vnic_rq_buf *buf;
173 + struct rte_mbuf **buf;
174 + u32 fetch_index, i;
175 unsigned int count = rq->ring.desc_count;
177 - buf = rq->to_clean;
179 - while (vnic_rq_desc_used(rq) > 0) {
180 + buf = &rq->mbuf_ring[0];
182 - (*buf_clean)(rq, buf);
184 - buf = rq->to_clean = buf->next;
185 - rq->ring.desc_avail++;
186 + for (i = 0; i < count; i++) {
190 + rq->ring.desc_avail = count - 1;
191 + rq->rx_nb_hold = 0;
193 /* Use current fetch_index as the ring starting point */
194 fetch_index = ioread32(&rq->ctrl->fetch_index);
195 @@ -235,9 +168,7 @@ void vnic_rq_clean(struct vnic_rq *rq,
196 /* Hardware surprise removal: reset fetch_index */
199 - rq->to_use = rq->to_clean =
200 - &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)]
201 - [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)];
203 iowrite32(fetch_index, &rq->ctrl->posted_index);
205 vnic_dev_clear_desc_ring(&rq->ring);
206 diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
207 index 0f5c3c1..e083ccc 100644
208 --- a/drivers/net/enic/base/vnic_rq.h
209 +++ b/drivers/net/enic/base/vnic_rq.h
210 @@ -66,42 +66,22 @@ struct vnic_rq_ctrl {
214 -/* Break the vnic_rq_buf allocations into blocks of 32/64 entries */
215 -#define VNIC_RQ_BUF_MIN_BLK_ENTRIES 32
216 -#define VNIC_RQ_BUF_DFLT_BLK_ENTRIES 64
217 -#define VNIC_RQ_BUF_BLK_ENTRIES(entries) \
218 - ((unsigned int)((entries < VNIC_RQ_BUF_DFLT_BLK_ENTRIES) ? \
219 - VNIC_RQ_BUF_MIN_BLK_ENTRIES : VNIC_RQ_BUF_DFLT_BLK_ENTRIES))
220 -#define VNIC_RQ_BUF_BLK_SZ(entries) \
221 - (VNIC_RQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_rq_buf))
222 -#define VNIC_RQ_BUF_BLKS_NEEDED(entries) \
223 - DIV_ROUND_UP(entries, VNIC_RQ_BUF_BLK_ENTRIES(entries))
224 -#define VNIC_RQ_BUF_BLKS_MAX VNIC_RQ_BUF_BLKS_NEEDED(4096)
226 -struct vnic_rq_buf {
227 - struct vnic_rq_buf *next;
228 - dma_addr_t dma_addr;
230 - unsigned int os_buf_index;
232 - unsigned int index;
239 + unsigned int posted_index;
240 struct vnic_dev *vdev;
241 - struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
242 + struct vnic_rq_ctrl __iomem *ctrl; /* memory-mapped */
243 struct vnic_dev_ring ring;
244 - struct vnic_rq_buf *bufs[VNIC_RQ_BUF_BLKS_MAX];
245 - struct vnic_rq_buf *to_use;
246 - struct vnic_rq_buf *to_clean;
247 + struct rte_mbuf **mbuf_ring; /* array of allocated mbufs */
248 + unsigned int mbuf_next_idx; /* next mb to consume */
250 unsigned int pkts_outstanding;
252 + uint16_t rx_nb_hold;
253 + uint16_t rx_free_thresh;
254 unsigned int socket_id;
255 struct rte_mempool *mp;
260 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
261 @@ -116,119 +96,13 @@ static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq)
262 return rq->ring.desc_count - rq->ring.desc_avail - 1;
265 -static inline void *vnic_rq_next_desc(struct vnic_rq *rq)
267 - return rq->to_use->desc;
270 -static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq)
272 - return rq->to_use->index;
275 -static inline void vnic_rq_post(struct vnic_rq *rq,
276 - void *os_buf, unsigned int os_buf_index,
277 - dma_addr_t dma_addr, unsigned int len,
280 - struct vnic_rq_buf *buf = rq->to_use;
282 - buf->os_buf = os_buf;
283 - buf->os_buf_index = os_buf_index;
284 - buf->dma_addr = dma_addr;
290 - rq->ring.desc_avail--;
292 - /* Move the posted_index every nth descriptor
295 -#ifndef VNIC_RQ_RETURN_RATE
296 -#define VNIC_RQ_RETURN_RATE 0xf /* keep 2^n - 1 */
299 - if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
300 - /* Adding write memory barrier prevents compiler and/or CPU
301 - * reordering, thus avoiding descriptor posting before
302 - * descriptor is initialized. Otherwise, hardware can read
303 - * stale descriptor fields.
306 - iowrite32(buf->index, &rq->ctrl->posted_index);
310 -static inline void vnic_rq_post_commit(struct vnic_rq *rq,
311 - void *os_buf, unsigned int os_buf_index,
312 - dma_addr_t dma_addr, unsigned int len)
314 - struct vnic_rq_buf *buf = rq->to_use;
316 - buf->os_buf = os_buf;
317 - buf->os_buf_index = os_buf_index;
318 - buf->dma_addr = dma_addr;
323 - rq->ring.desc_avail--;
325 - /* Move the posted_index every descriptor
328 - /* Adding write memory barrier prevents compiler and/or CPU
329 - * reordering, thus avoiding descriptor posting before
330 - * descriptor is initialized. Otherwise, hardware can read
331 - * stale descriptor fields.
334 - iowrite32(buf->index, &rq->ctrl->posted_index);
337 -static inline void vnic_rq_return_descs(struct vnic_rq *rq, unsigned int count)
339 - rq->ring.desc_avail += count;
342 enum desc_return_options {
344 VNIC_RQ_DEFER_RETURN_DESC,
347 -static inline int vnic_rq_service(struct vnic_rq *rq,
348 - struct cq_desc *cq_desc, u16 completed_index,
349 - int desc_return, int (*buf_service)(struct vnic_rq *rq,
350 - struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
351 - int skipped, void *opaque), void *opaque)
353 - struct vnic_rq_buf *buf;
357 - buf = rq->to_clean;
360 - skipped = (buf->index != completed_index);
362 - if ((*buf_service)(rq, cq_desc, buf, skipped, opaque))
365 - if (desc_return == VNIC_RQ_RETURN_DESC)
366 - rq->ring.desc_avail++;
368 - rq->to_clean = buf->next;
373 - buf = rq->to_clean;
378 static inline int vnic_rq_fill(struct vnic_rq *rq,
379 int (*buf_fill)(struct vnic_rq *rq))
381 @@ -274,8 +148,5 @@ unsigned int vnic_rq_error_status(struct vnic_rq *rq);
382 void vnic_rq_enable(struct vnic_rq *rq);
383 int vnic_rq_disable(struct vnic_rq *rq);
384 void vnic_rq_clean(struct vnic_rq *rq,
385 - void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf));
386 -int vnic_rq_mem_size(struct vnic_rq *rq, unsigned int desc_count,
387 - unsigned int desc_size);
389 + void (*buf_clean)(struct rte_mbuf **buf));
390 #endif /* _VNIC_RQ_H_ */
391 diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
392 index 9e78305..8c914f5 100644
393 --- a/drivers/net/enic/enic.h
394 +++ b/drivers/net/enic/enic.h
396 #include "vnic_nic.h"
397 #include "vnic_rss.h"
398 #include "enic_res.h"
399 +#include "cq_enet_desc.h"
401 #define DRV_NAME "enic_pmd"
402 #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver"
403 @@ -154,6 +155,16 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev)
404 return (struct enic *)eth_dev->data->dev_private;
407 +#define RTE_LIBRTE_ENIC_ASSERT_ENABLE
408 +#ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE
409 +#define ASSERT(x) do { \
411 + rte_panic("ENIC: x"); \
417 extern void enic_fdir_stats_get(struct enic *enic,
418 struct rte_eth_fdir_stats *stats);
419 extern int enic_fdir_add_fltr(struct enic *enic,
420 @@ -193,9 +204,10 @@ extern void enic_send_pkt(struct enic *enic, struct vnic_wq *wq,
421 uint16_t ol_flags, uint16_t vlan_tag);
423 extern void enic_post_wq_index(struct vnic_wq *wq);
424 -extern int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts,
425 - unsigned int budget, unsigned int *work_done);
426 extern int enic_probe(struct enic *enic);
427 extern int enic_clsf_init(struct enic *enic);
428 extern void enic_clsf_destroy(struct enic *enic);
429 +uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
432 #endif /* _ENIC_H_ */
433 diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c
434 index 2a88043..6f2ada5 100644
435 --- a/drivers/net/enic/enic_ethdev.c
436 +++ b/drivers/net/enic/enic_ethdev.c
437 @@ -255,7 +255,7 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
440 unsigned int socket_id,
441 - __rte_unused const struct rte_eth_rxconf *rx_conf,
442 + const struct rte_eth_rxconf *rx_conf,
443 struct rte_mempool *mp)
446 @@ -270,6 +270,10 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
450 + enic->rq[queue_idx].rx_free_thresh = rx_conf->rx_free_thresh;
451 + dev_debug(enic, "Set queue_id:%u free thresh:%u\n", queue_idx,
452 + enic->rq[queue_idx].rx_free_thresh);
454 return enicpmd_dev_setup_intr(enic);
457 @@ -429,6 +433,9 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
458 DEV_TX_OFFLOAD_IPV4_CKSUM |
459 DEV_TX_OFFLOAD_UDP_CKSUM |
460 DEV_TX_OFFLOAD_TCP_CKSUM;
461 + device_info->default_rxconf = (struct rte_eth_rxconf) {
462 + .rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
466 static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
467 @@ -538,18 +545,6 @@ static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
471 -static uint16_t enicpmd_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
474 - struct vnic_rq *rq = (struct vnic_rq *)rx_queue;
475 - unsigned int work_done;
477 - if (enic_poll(rq, rx_pkts, (unsigned int)nb_pkts, &work_done))
478 - dev_err(enic, "error in enicpmd poll\n");
483 static const struct eth_dev_ops enicpmd_eth_dev_ops = {
484 .dev_configure = enicpmd_dev_configure,
485 .dev_start = enicpmd_dev_start,
486 @@ -606,7 +601,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
487 enic->port_id = eth_dev->data->port_id;
488 enic->rte_dev = eth_dev;
489 eth_dev->dev_ops = &enicpmd_eth_dev_ops;
490 - eth_dev->rx_pkt_burst = &enicpmd_recv_pkts;
491 + eth_dev->rx_pkt_burst = &enic_recv_pkts;
492 eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts;
494 pdev = eth_dev->pci_dev;
495 @@ -635,8 +630,8 @@ static struct eth_driver rte_enic_pmd = {
496 * Register as the [Poll Mode] Driver of Cisco ENIC device.
499 -rte_enic_pmd_init(const char *name __rte_unused,
500 - const char *params __rte_unused)
501 +rte_enic_pmd_init(__rte_unused const char *name,
502 + __rte_unused const char *params)
504 ENICPMD_FUNC_TRACE();
506 diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
507 index f818c32..9fff020 100644
508 --- a/drivers/net/enic/enic_main.c
509 +++ b/drivers/net/enic/enic_main.c
511 #include "vnic_nic.h"
512 #include "enic_vnic_wq.h"
514 +static inline struct rte_mbuf *
515 +rte_rxmbuf_alloc(struct rte_mempool *mp)
517 + struct rte_mbuf *m;
519 + m = __rte_mbuf_raw_alloc(mp);
520 + __rte_mbuf_sanity_check_raw(m, 0);
525 static inline int enic_is_sriov_vf(struct enic *enic)
527 return enic->pdev->id.device_id == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
528 @@ -80,16 +91,25 @@ static int is_eth_addr_valid(uint8_t *addr)
529 return !is_mcast_addr(addr) && !is_zero_addr(addr);
532 -static inline struct rte_mbuf *
533 -enic_rxmbuf_alloc(struct rte_mempool *mp)
535 +enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq)
537 - struct rte_mbuf *m;
540 - m = __rte_mbuf_raw_alloc(mp);
541 - __rte_mbuf_sanity_check_raw(m, 0);
543 + if (!rq || !rq->mbuf_ring) {
544 + dev_debug(enic, "Pointer to rq or mbuf_ring is NULL");
548 + for (i = 0; i < enic->config.rq_desc_count; i++) {
549 + if (rq->mbuf_ring[i]) {
550 + rte_pktmbuf_free_seg(rq->mbuf_ring[i]);
551 + rq->mbuf_ring[i] = NULL;
557 void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size)
559 vnic_set_hdr_split_size(enic->vdev, split_hdr_size);
560 @@ -262,13 +282,13 @@ void enic_set_mac_address(struct enic *enic, uint8_t *mac_addr)
564 -enic_free_rq_buf(__rte_unused struct vnic_rq *rq, struct vnic_rq_buf *buf)
565 +enic_free_rq_buf(struct rte_mbuf **mbuf)
571 - rte_pktmbuf_free((struct rte_mbuf *)buf->os_buf);
572 - buf->os_buf = NULL;
573 + rte_pktmbuf_free(*mbuf);
577 void enic_init_vnic_resources(struct enic *enic)
578 @@ -314,221 +334,47 @@ void enic_init_vnic_resources(struct enic *enic)
582 -static int enic_rq_alloc_buf(struct vnic_rq *rq)
584 +enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
586 - struct enic *enic = vnic_dev_priv(rq->vdev);
587 + struct rte_mbuf *mb;
588 + struct rq_enet_desc *rqd = rq->ring.descs;
591 - struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
592 - uint8_t type = RQ_ENET_TYPE_ONLY_SOP;
593 - u16 split_hdr_size = vnic_get_hdr_split_size(enic->vdev);
594 - struct rte_mbuf *mbuf = enic_rxmbuf_alloc(rq->mp);
595 - struct rte_mbuf *hdr_mbuf = NULL;
598 - dev_err(enic, "mbuf alloc in enic_rq_alloc_buf failed\n");
602 - if (unlikely(split_hdr_size)) {
603 - if (vnic_rq_desc_avail(rq) < 2) {
604 - rte_mempool_put(mbuf->pool, mbuf);
607 - hdr_mbuf = enic_rxmbuf_alloc(rq->mp);
609 - rte_mempool_put(mbuf->pool, mbuf);
611 - "hdr_mbuf alloc in enic_rq_alloc_buf failed\n");
615 - hdr_mbuf->data_off = RTE_PKTMBUF_HEADROOM;
617 - hdr_mbuf->nb_segs = 2;
618 - hdr_mbuf->port = enic->port_id;
619 - hdr_mbuf->next = mbuf;
621 - dma_addr = (dma_addr_t)
622 - (hdr_mbuf->buf_physaddr + hdr_mbuf->data_off);
624 - rq_enet_desc_enc(desc, dma_addr, type, split_hdr_size);
626 - vnic_rq_post(rq, (void *)hdr_mbuf, 0 /*os_buf_index*/, dma_addr,
627 - (unsigned int)split_hdr_size, 0 /*wrid*/);
628 + dev_debug(enic, "queue %u, allocating %u rx queue mbufs", rq->index,
629 + rq->ring.desc_count);
631 - desc = vnic_rq_next_desc(rq);
632 - type = RQ_ENET_TYPE_NOT_SOP;
635 - mbuf->port = enic->port_id;
638 - mbuf->data_off = RTE_PKTMBUF_HEADROOM;
641 - dma_addr = (dma_addr_t)
642 - (mbuf->buf_physaddr + mbuf->data_off);
644 - rq_enet_desc_enc(desc, dma_addr, type, mbuf->buf_len);
646 - vnic_rq_post(rq, (void *)mbuf, 0 /*os_buf_index*/, dma_addr,
647 - (unsigned int)mbuf->buf_len, 0 /*wrid*/);
652 -static int enic_rq_indicate_buf(struct vnic_rq *rq,
653 - struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
654 - int skipped, void *opaque)
656 - struct enic *enic = vnic_dev_priv(rq->vdev);
657 - struct rte_mbuf **rx_pkt_bucket = (struct rte_mbuf **)opaque;
658 - struct rte_mbuf *rx_pkt = NULL;
659 - struct rte_mbuf *hdr_rx_pkt = NULL;
661 - u8 type, color, eop, sop, ingress_port, vlan_stripped;
662 - u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
663 - u8 tcp_udp_csum_ok, udp, tcp, ipv4_csum_ok;
664 - u8 ipv6, ipv4, ipv4_fragment, fcs_ok, rss_type, csum_not_calc;
666 - u16 q_number, completed_index, bytes_written, vlan_tci, checksum;
669 - cq_enet_rq_desc_dec((struct cq_enet_rq_desc *)cq_desc,
670 - &type, &color, &q_number, &completed_index,
671 - &ingress_port, &fcoe, &eop, &sop, &rss_type,
672 - &csum_not_calc, &rss_hash, &bytes_written,
673 - &packet_error, &vlan_stripped, &vlan_tci, &checksum,
674 - &fcoe_sof, &fcoe_fc_crc_ok, &fcoe_enc_error,
675 - &fcoe_eof, &tcp_udp_csum_ok, &udp, &tcp,
676 - &ipv4_csum_ok, &ipv6, &ipv4, &ipv4_fragment,
679 - rx_pkt = (struct rte_mbuf *)buf->os_buf;
680 - buf->os_buf = NULL;
682 - if (unlikely(packet_error)) {
683 - dev_err(enic, "packet error\n");
684 - rx_pkt->data_len = 0;
688 - if (unlikely(skipped)) {
689 - rx_pkt->data_len = 0;
693 - if (likely(!vnic_get_hdr_split_size(enic->vdev))) {
694 - /* No header split configured */
695 - *rx_pkt_bucket = rx_pkt;
696 - rx_pkt->pkt_len = bytes_written;
699 - rx_pkt->packet_type = RTE_PTYPE_L3_IPV4;
700 - if (!csum_not_calc) {
701 - if (unlikely(!ipv4_csum_ok))
702 - rx_pkt->ol_flags |= PKT_RX_IP_CKSUM_BAD;
704 - if ((tcp || udp) && (!tcp_udp_csum_ok))
705 - rx_pkt->ol_flags |= PKT_RX_L4_CKSUM_BAD;
708 - rx_pkt->packet_type = RTE_PTYPE_L3_IPV6;
712 - /* This piece is header */
713 - *rx_pkt_bucket = rx_pkt;
714 - rx_pkt->pkt_len = bytes_written;
717 - /* The packet is smaller than split_hdr_size */
718 - *rx_pkt_bucket = rx_pkt;
719 - rx_pkt->pkt_len = bytes_written;
721 - rx_pkt->packet_type = RTE_PTYPE_L3_IPV4;
722 - if (!csum_not_calc) {
723 - if (unlikely(!ipv4_csum_ok))
724 - rx_pkt->ol_flags |=
725 - PKT_RX_IP_CKSUM_BAD;
727 - if ((tcp || udp) &&
728 - (!tcp_udp_csum_ok))
729 - rx_pkt->ol_flags |=
730 - PKT_RX_L4_CKSUM_BAD;
733 - rx_pkt->packet_type = RTE_PTYPE_L3_IPV6;
736 - hdr_rx_pkt = *rx_pkt_bucket;
737 - hdr_rx_pkt->pkt_len += bytes_written;
739 - hdr_rx_pkt->packet_type =
741 - if (!csum_not_calc) {
742 - if (unlikely(!ipv4_csum_ok))
743 - hdr_rx_pkt->ol_flags |=
744 - PKT_RX_IP_CKSUM_BAD;
746 - if ((tcp || udp) &&
747 - (!tcp_udp_csum_ok))
748 - hdr_rx_pkt->ol_flags |=
749 - PKT_RX_L4_CKSUM_BAD;
752 - hdr_rx_pkt->packet_type =
755 + for (i = 0; i < rq->ring.desc_count; i++, rqd++) {
756 + mb = rte_rxmbuf_alloc(rq->mp);
758 + dev_err(enic, "RX mbuf alloc failed queue_id=%u",
759 + (unsigned)rq->index);
764 - rx_pkt->data_len = bytes_written;
765 + dma_addr = (dma_addr_t)(mb->buf_physaddr + mb->data_off);
768 - rx_pkt->ol_flags |= PKT_RX_RSS_HASH;
769 - rx_pkt->hash.rss = rss_hash;
770 + rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP,
772 + rq->mbuf_ring[i] = mb;
776 - rx_pkt->ol_flags |= PKT_RX_VLAN_PKT;
777 - rx_pkt->vlan_tci = vlan_tci;
779 + /* make sure all prior writes are complete before doing the PIO write */
784 + /* Post all but the last 2 cache lines' worth of descriptors */
785 + rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE
786 + / sizeof(struct rq_enet_desc));
787 + rq->rx_nb_hold = 0;
789 -static int enic_rq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
790 - __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque)
792 - struct enic *enic = vnic_dev_priv(vdev);
794 - return vnic_rq_service(&enic->rq[q_number], cq_desc,
795 - completed_index, VNIC_RQ_RETURN_DESC,
796 - enic_rq_indicate_buf, opaque);
799 + dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
800 + enic->port_id, rq->index, rq->posted_index, rq->rx_nb_hold);
801 + iowrite32(rq->posted_index, &rq->ctrl->posted_index);
804 -int enic_poll(struct vnic_rq *rq, struct rte_mbuf **rx_pkts,
805 - unsigned int budget, unsigned int *work_done)
807 - struct enic *enic = vnic_dev_priv(rq->vdev);
808 - unsigned int cq = enic_cq_rq(enic, rq->index);
811 - *work_done = vnic_cq_service(&enic->cq[cq],
812 - budget, enic_rq_service, (void *)rx_pkts);
815 - vnic_rq_fill(rq, enic_rq_alloc_buf);
818 - /* Need at least one buffer on ring to get going */
819 - if (vnic_rq_desc_used(rq) == 0) {
820 - dev_err(enic, "Unable to alloc receive buffers\n");
828 @@ -576,6 +422,7 @@ enic_intr_handler(__rte_unused struct rte_intr_handle *handle,
829 int enic_enable(struct enic *enic)
833 struct rte_eth_dev *eth_dev = enic->rte_dev;
835 eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
836 @@ -586,15 +433,11 @@ int enic_enable(struct enic *enic)
837 dev_warning(enic, "Init of hash table for clsf failed."\
838 "Flow director feature will not work\n");
841 for (index = 0; index < enic->rq_count; index++) {
842 - vnic_rq_fill(&enic->rq[index], enic_rq_alloc_buf);
844 - /* Need at least one buffer on ring to get going
846 - if (vnic_rq_desc_used(&enic->rq[index]) == 0) {
847 - dev_err(enic, "Unable to alloc receive buffers\n");
849 + err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]);
851 + dev_err(enic, "Failed to alloc RX queue mbufs\n");
856 @@ -636,6 +479,9 @@ void enic_free_rq(void *rxq)
857 struct vnic_rq *rq = (struct vnic_rq *)rxq;
858 struct enic *enic = vnic_dev_priv(rq->vdev);
860 + enic_rxmbuf_queue_release(enic, rq);
861 + rte_free(rq->mbuf_ring);
862 + rq->mbuf_ring = NULL;
864 vnic_cq_free(&enic->cq[rq->index]);
866 @@ -664,7 +510,7 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
867 unsigned int socket_id, struct rte_mempool *mp,
872 struct vnic_rq *rq = &enic->rq[queue_idx];
874 rq->socket_id = socket_id;
875 @@ -687,23 +533,35 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
878 /* Allocate queue resources */
879 - err = vnic_rq_alloc(enic->vdev, &enic->rq[queue_idx], queue_idx,
880 - enic->config.rq_desc_count,
881 - sizeof(struct rq_enet_desc));
883 + rc = vnic_rq_alloc(enic->vdev, rq, queue_idx,
884 + enic->config.rq_desc_count, sizeof(struct rq_enet_desc));
886 dev_err(enic, "error in allocation of rq\n");
891 - err = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
892 + rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
893 socket_id, enic->config.rq_desc_count,
894 sizeof(struct cq_enet_rq_desc));
898 dev_err(enic, "error in allocation of cq for rq\n");
899 + goto err_free_rq_exit;
903 + /* Allocate the mbuf ring */
904 + rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
905 + sizeof(struct rte_mbuf *) * enic->config.rq_desc_count,
906 + RTE_CACHE_LINE_SIZE, rq->socket_id);
908 + if (rq->mbuf_ring != NULL)
911 + /* cleanup on error */
912 + vnic_cq_free(&enic->cq[queue_idx]);
919 void enic_free_wq(void *txq)
920 @@ -790,6 +648,7 @@ int enic_disable(struct enic *enic)
922 for (i = 0; i < enic->wq_count; i++)
923 vnic_wq_clean(&enic->wq[i], enic_free_wq_buf);
925 for (i = 0; i < enic->rq_count; i++)
926 vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
927 for (i = 0; i < enic->cq_count; i++)
928 @@ -1074,7 +933,7 @@ int enic_probe(struct enic *enic)
930 /* Set ingress vlan rewrite mode before vnic initialization */
931 err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
932 - IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
933 + IG_VLAN_REWRITE_MODE_PASS_THRU);
936 "Failed to set ingress vlan rewrite mode, aborting.\n");
937 diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
938 index 49f7e22..33f2e84 100644
939 --- a/drivers/net/enic/enic_res.h
940 +++ b/drivers/net/enic/enic_res.h
942 #define ENIC_UNICAST_PERFECT_FILTERS 32
944 #define ENIC_NON_TSO_MAX_DESC 16
945 +#define ENIC_DEFAULT_RX_FREE_THRESH 32
947 #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0)
949 @@ -133,21 +134,6 @@ static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq,
950 WQ_ENET_OFFLOAD_MODE_TSO,
951 eop, 1 /* SOP */, eop, loopback);
953 -static inline void enic_queue_rq_desc(struct vnic_rq *rq,
954 - void *os_buf, unsigned int os_buf_index,
955 - dma_addr_t dma_addr, unsigned int len)
957 - struct rq_enet_desc *desc = vnic_rq_next_desc(rq);
959 - u8 type = os_buf_index ?
960 - RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP;
962 - rq_enet_desc_enc(desc,
963 - (u64)dma_addr | VNIC_PADDR_TARGET,
966 - vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid);
971 diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c
973 index 0000000..945a60f
975 +++ b/drivers/net/enic/enic_rx.c
978 + * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved.
979 + * Copyright 2007 Nuova Systems, Inc. All rights reserved.
981 + * Copyright (c) 2014, Cisco Systems, Inc.
982 + * All rights reserved.
984 + * Redistribution and use in source and binary forms, with or without
985 + * modification, are permitted provided that the following conditions
988 + * 1. Redistributions of source code must retain the above copyright
989 + * notice, this list of conditions and the following disclaimer.
991 + * 2. Redistributions in binary form must reproduce the above copyright
992 + * notice, this list of conditions and the following disclaimer in
993 + * the documentation and/or other materials provided with the
996 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
997 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
998 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
999 + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
1000 + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
1001 + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
1002 + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
1003 + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
1004 + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
1005 + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
1006 + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1007 + * POSSIBILITY OF SUCH DAMAGE.
1011 +#include <rte_mbuf.h>
1012 +#include <rte_ethdev.h>
1013 +#include <rte_prefetch.h>
1015 +#include "enic_compat.h"
1016 +#include "rq_enet_desc.h"
1019 +#define RTE_PMD_USE_PREFETCH
1021 +#ifdef RTE_PMD_USE_PREFETCH
1023 + * Prefetch a cache line into all cache levels.
1025 +#define rte_enic_prefetch(p) rte_prefetch0(p)
1027 +#define rte_enic_prefetch(p) do {} while (0)
1030 +#ifdef RTE_PMD_PACKET_PREFETCH
1031 +#define rte_packet_prefetch(p) rte_prefetch1(p)
1033 +#define rte_packet_prefetch(p) do {} while (0)
1036 +static inline struct rte_mbuf *
1037 +rte_rxmbuf_alloc(struct rte_mempool *mp)
1039 + struct rte_mbuf *m;
1041 + m = __rte_mbuf_raw_alloc(mp);
1042 + __rte_mbuf_sanity_check_raw(m, 0);
1046 +static inline uint16_t
1047 +enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd)
1049 + return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK;
1052 +static inline uint16_t
1053 +enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd)
1055 + return(le16_to_cpu(crd->bytes_written_flags) &
1056 + ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK);
1059 +static inline uint8_t
1060 +enic_cq_rx_desc_packet_error(uint16_t bwflags)
1062 + return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) ==
1063 + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED);
1066 +static inline uint8_t
1067 +enic_cq_rx_desc_eop(uint16_t ciflags)
1069 + return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP)
1070 + == CQ_ENET_RQ_DESC_FLAGS_EOP;
1073 +static inline uint8_t
1074 +enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd)
1076 + return ((le16_to_cpu(cqrd->q_number_rss_type_flags) &
1077 + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) ==
1078 + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC);
1081 +static inline uint8_t
1082 +enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd)
1084 + return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ==
1085 + CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK);
1088 +static inline uint8_t
1089 +enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd)
1091 + return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ==
1092 + CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK);
1095 +static inline uint8_t
1096 +enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd)
1098 + return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >>
1099 + CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK);
1102 +static inline uint32_t
1103 +enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd)
1105 + return le32_to_cpu(cqrd->rss_hash);
1108 +static inline uint8_t
1109 +enic_cq_rx_desc_fcs_ok(struct cq_enet_rq_desc *cqrd)
1111 + return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ==
1112 + CQ_ENET_RQ_DESC_FLAGS_FCS_OK);
1115 +static inline uint16_t
1116 +enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd)
1118 + return le16_to_cpu(cqrd->vlan);
1121 +static inline uint16_t
1122 +enic_cq_rx_desc_n_bytes(struct cq_desc *cqd)
1124 + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1125 + return le16_to_cpu(cqrd->bytes_written_flags) &
1126 + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK;
1129 +static inline uint64_t
1130 +enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd)
1132 + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1134 + uint64_t pkt_err_flags = 0;
1136 + bwflags = enic_cq_rx_desc_bwflags(cqrd);
1138 + /* Check for packet error. Can't be more specific than MAC error */
1139 + if (enic_cq_rx_desc_packet_error(bwflags)) {
1140 + pkt_err_flags |= PKT_RX_MAC_ERR;
1143 + /* Check for bad FCS. MAC error isn't quite, but no other choice */
1144 + if (!enic_cq_rx_desc_fcs_ok(cqrd)) {
1145 + pkt_err_flags |= PKT_RX_MAC_ERR;
1147 + return pkt_err_flags;
1151 + * Lookup table to translate RX CQ flags to mbuf flags.
1153 +static inline uint32_t
1154 +enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd)
1156 + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1157 + uint8_t cqrd_flags = cqrd->flags;
1158 + static const uint32_t cq_type_table[128] __rte_cache_aligned = {
1159 + [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
1160 + [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1161 + | RTE_PTYPE_L4_UDP,
1162 + [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1163 + | RTE_PTYPE_L4_TCP,
1164 + [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4
1165 + | RTE_PTYPE_L4_FRAG,
1166 + [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
1167 + [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1168 + | RTE_PTYPE_L4_UDP,
1169 + [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1170 + | RTE_PTYPE_L4_TCP,
1171 + [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6
1172 + | RTE_PTYPE_L4_FRAG,
1173 + /* All others reserved */
1175 + cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT
1176 + | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6
1177 + | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP;
1178 + return cq_type_table[cqrd_flags];
1182 +enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf)
1184 + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd;
1185 + uint16_t ciflags, bwflags, pkt_flags = 0;
1186 + ciflags = enic_cq_rx_desc_ciflags(cqrd);
1187 + bwflags = enic_cq_rx_desc_bwflags(cqrd);
1189 + ASSERT(mbuf->ol_flags == 0);
1191 + /* flags are meaningless if !EOP */
1192 + if (unlikely(!enic_cq_rx_desc_eop(ciflags)))
1193 + goto mbuf_flags_done;
1195 + /* VLAN stripping */
1196 + if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) {
1197 + pkt_flags |= PKT_RX_VLAN_PKT;
1198 + mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd);
1200 + mbuf->vlan_tci = 0;
1204 + if (enic_cq_rx_desc_rss_type(cqrd)) {
1205 + pkt_flags |= PKT_RX_RSS_HASH;
1206 + mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd);
1209 + /* checksum flags */
1210 + if (!enic_cq_rx_desc_csum_not_calc(cqrd) &&
1211 + (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) {
1212 + if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd)))
1213 + pkt_flags |= PKT_RX_IP_CKSUM_BAD;
1214 + if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) {
1215 + if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd)))
1216 + pkt_flags |= PKT_RX_L4_CKSUM_BAD;
1221 + mbuf->ol_flags = pkt_flags;
1224 +static inline uint32_t
1225 +enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1)
1227 + uint32_t d = i0 + i1;
1228 + ASSERT(i0 < n_descriptors);
1229 + ASSERT(i1 < n_descriptors);
1230 + d -= (d >= n_descriptors) ? n_descriptors : 0;
1236 +enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1239 + struct vnic_rq *rq = rx_queue;
1240 + struct enic *enic = vnic_dev_priv(rq->vdev);
1241 + unsigned int rx_id;
1242 + struct rte_mbuf *nmb, *rxmb;
1243 + uint16_t nb_rx = 0;
1245 + struct vnic_cq *cq;
1246 + volatile struct cq_desc *cqd_ptr;
1249 + cq = &enic->cq[enic_cq_rq(enic, rq->index)];
1250 + rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
1251 + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
1253 + nb_hold = rq->rx_nb_hold; /* mbufs held by software */
1255 + while (nb_rx < nb_pkts) {
1256 + uint16_t rx_pkt_len;
1257 + volatile struct rq_enet_desc *rqd_ptr;
1258 + dma_addr_t dma_addr;
1259 + struct cq_desc cqd;
1260 + uint64_t ol_err_flags;
1262 + /* Check for pkts available */
1263 + color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
1264 + & CQ_DESC_COLOR_MASK;
1265 + if (color == cq->last_color)
1268 + /* Get the cq descriptor and rq pointer */
1270 + rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
1272 + /* allocate a new mbuf */
1273 + nmb = rte_rxmbuf_alloc(rq->mp);
1274 + if (nmb == NULL) {
1275 + dev_err(enic, "RX mbuf alloc failed port=%u qid=%u",
1276 + enic->port_id, (unsigned)rq->index);
1277 + rte_eth_devices[enic->port_id].
1278 + data->rx_mbuf_alloc_failed++;
1282 + /* Check for FCS or packet errors */
1283 + ol_err_flags = enic_cq_rx_to_pkt_err_flags(&cqd);
1284 + if (ol_err_flags == 0)
1285 + rx_pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
1289 + /* Get the mbuf to return and replace with one just allocated */
1290 + rxmb = rq->mbuf_ring[rx_id];
1291 + rq->mbuf_ring[rx_id] = nmb;
1293 + /* Increment cqd, rqd, mbuf_table index */
1295 + if (unlikely(rx_id == rq->ring.desc_count)) {
1297 + cq->last_color = cq->last_color ? 0 : 1;
1300 + /* Prefetch next mbuf & desc while processing current one */
1301 + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
1302 + rte_enic_prefetch(cqd_ptr);
1303 + rte_enic_prefetch(rq->mbuf_ring[rx_id]);
1304 + rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
1307 + /* Push descriptor for newly allocated mbuf */
1308 + dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off);
1309 + rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
1310 + rqd_ptr->length_type = cpu_to_le16(nmb->buf_len);
1312 + /* Fill in the rest of the mbuf */
1313 + rxmb->data_off = RTE_PKTMBUF_HEADROOM;
1314 + rxmb->nb_segs = 1;
1315 + rxmb->next = NULL;
1316 + rxmb->pkt_len = rx_pkt_len;
1317 + rxmb->data_len = rx_pkt_len;
1318 + rxmb->port = enic->port_id;
1319 + rxmb->ol_flags = ol_err_flags;
1320 + if (!ol_err_flags)
1321 + enic_cq_rx_to_pkt_flags(&cqd, rxmb);
1322 + rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
1324 + /* prefetch mbuf data for caller */
1325 + rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
1326 + RTE_PKTMBUF_HEADROOM));
1328 + /* store the mbuf address into the next entry of the array */
1329 + rx_pkts[nb_rx++] = rxmb;
1333 + cq->to_clean = rx_id;
1335 + if (nb_hold > rq->rx_free_thresh) {
1336 + rq->posted_index = enic_ring_add(rq->ring.desc_count,
1337 + rq->posted_index, nb_hold);
1340 + iowrite32(rq->posted_index, &rq->ctrl->posted_index);
1343 + rq->rx_nb_hold = nb_hold;