1 From f03d5a02fc2b3cc24bf059a273ea1473cdb9993b Mon Sep 17 00:00:00 2001
2 From: John Lo <loj@cisco.com>
3 Date: Tue, 7 Jun 2016 12:40:07 +0200
4 Subject: [PATCH 16/17] ENIC scatter RX
7 drivers/net/enic/base/rq_enet_desc.h | 2 +-
8 drivers/net/enic/base/vnic_rq.c | 12 +-
9 drivers/net/enic/base/vnic_rq.h | 18 ++-
10 drivers/net/enic/enic.h | 10 ++
11 drivers/net/enic/enic_main.c | 236 +++++++++++++++++++++++++++--------
12 drivers/net/enic/enic_rxtx.c | 139 ++++++++++++++-------
13 6 files changed, 313 insertions(+), 104 deletions(-)
15 diff --git a/drivers/net/enic/base/rq_enet_desc.h b/drivers/net/enic/base/rq_enet_desc.h
16 index 7292d9d..13e24b4 100644
17 --- a/drivers/net/enic/base/rq_enet_desc.h
18 +++ b/drivers/net/enic/base/rq_enet_desc.h
19 @@ -55,7 +55,7 @@ enum rq_enet_type_types {
20 #define RQ_ENET_TYPE_BITS 2
21 #define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
23 -static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
24 +static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc,
25 u64 address, u8 type, u16 length)
27 desc->address = cpu_to_le64(address);
28 diff --git a/drivers/net/enic/base/vnic_rq.c b/drivers/net/enic/base/vnic_rq.c
29 index cb62c5e..d97f93e 100644
30 --- a/drivers/net/enic/base/vnic_rq.c
31 +++ b/drivers/net/enic/base/vnic_rq.c
32 @@ -84,11 +84,16 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index,
33 iowrite32(cq_index, &rq->ctrl->cq_index);
34 iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
35 iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
36 - iowrite32(0, &rq->ctrl->dropped_packet_count);
37 iowrite32(0, &rq->ctrl->error_status);
38 iowrite32(fetch_index, &rq->ctrl->fetch_index);
39 iowrite32(posted_index, &rq->ctrl->posted_index);
42 +// printf("Writing 0x%x to %s rq\n",
43 +// ((rq->is_sop << 10) | rq->data_queue_idx),
44 +// rq->is_sop ? "sop":"data");
45 + iowrite32(((rq->is_sop << 10) | rq->data_queue_idx),
46 + &rq->ctrl->data_ring);
50 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
51 @@ -96,6 +101,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
52 unsigned int error_interrupt_offset)
56 /* Use current fetch_index as the ring starting point */
57 fetch_index = ioread32(&rq->ctrl->fetch_index);
59 @@ -110,6 +116,8 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
60 error_interrupt_offset);
63 + rq->pkt_first_seg = NULL;
64 + rq->pkt_last_seg = NULL;
67 void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error)
68 diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h
69 index 424415c..d1e2f52 100644
70 --- a/drivers/net/enic/base/vnic_rq.h
71 +++ b/drivers/net/enic/base/vnic_rq.h
72 @@ -60,10 +60,18 @@ struct vnic_rq_ctrl {
74 u32 error_status; /* 0x48 */
76 - u32 dropped_packet_count; /* 0x50 */
77 + u32 tcp_sn; /* 0x50 */
79 - u32 dropped_packet_count_rc; /* 0x58 */
80 + u32 unused; /* 0x58 */
82 + u32 dca_select; /* 0x60 */
84 + u32 dca_value; /* 0x68 */
86 + u32 data_ring; /* 0x70 */
88 + u32 header_split; /* 0x78 */
93 @@ -82,6 +90,12 @@ struct vnic_rq {
94 struct rte_mempool *mp;
97 + uint16_t data_queue_idx;
100 + struct rte_mbuf *pkt_first_seg;
101 + struct rte_mbuf *pkt_last_seg;
102 + unsigned int max_mbufs_per_pkt;
105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
106 diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
107 index 7c1b5c9..d2de6ee 100644
108 --- a/drivers/net/enic/enic.h
109 +++ b/drivers/net/enic/enic.h
110 @@ -142,6 +142,16 @@ struct enic {
111 struct enic_soft_stats soft_stats;
114 +static inline unsigned int enic_sop_rq(__rte_unused struct enic *enic, unsigned int rq)
119 +static inline unsigned int enic_data_rq(__rte_unused struct enic *enic, unsigned int rq)
124 static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
127 diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
128 index a00565a..be17707 100644
129 --- a/drivers/net/enic/enic_main.c
130 +++ b/drivers/net/enic/enic_main.c
131 @@ -247,15 +247,23 @@ void enic_init_vnic_resources(struct enic *enic)
132 unsigned int error_interrupt_offset = 0;
133 unsigned int index = 0;
135 + struct vnic_rq *data_rq;
137 vnic_dev_stats_clear(enic->vdev);
139 for (index = 0; index < enic->rq_count; index++) {
140 - vnic_rq_init(&enic->rq[index],
141 + vnic_rq_init(&enic->rq[enic_sop_rq(enic, index)],
142 enic_cq_rq(enic, index),
143 error_interrupt_enable,
144 error_interrupt_offset);
146 + data_rq = &enic->rq[enic_data_rq(enic, index)];
147 + if (data_rq->in_use)
148 + vnic_rq_init(data_rq,
149 + enic_cq_rq(enic, index),
150 + error_interrupt_enable,
151 + error_interrupt_offset);
153 cq_idx = enic_cq_rq(enic, index);
154 vnic_cq_init(&enic->cq[cq_idx],
155 0 /* flow_control_enable */,
156 @@ -305,6 +313,9 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
163 dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
164 rq->ring.desc_count);
166 @@ -316,20 +327,20 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
170 - dma_addr = (dma_addr_t)(mb->buf_physaddr
171 - + RTE_PKTMBUF_HEADROOM);
173 - rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP,
174 - mb->buf_len - RTE_PKTMBUF_HEADROOM);
175 + dma_addr = (dma_addr_t)(mb->buf_physaddr + RTE_PKTMBUF_HEADROOM);
176 + rq_enet_desc_enc(rqd, dma_addr,
177 + (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
178 + : RQ_ENET_TYPE_NOT_SOP),
179 + mb->buf_len - RTE_PKTMBUF_HEADROOM);
180 rq->mbuf_ring[i] = mb;
183 /* make sure all prior writes are complete before doing the PIO write */
186 - /* Post all but the last 2 cache lines' worth of descriptors */
187 - rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE
188 - / sizeof(struct rq_enet_desc));
189 + /* Post all but the last buffer to VIC. */
190 + rq->posted_index = rq->ring.desc_count - 1;
194 dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
195 @@ -337,6 +348,8 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq)
196 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
199 +// printf("posted %d buffers to %s rq\n", rq->ring.desc_count,
200 +// rq->is_sop ? "sop" : "data");
204 @@ -398,17 +411,25 @@ int enic_enable(struct enic *enic)
205 "Flow director feature will not work\n");
207 for (index = 0; index < enic->rq_count; index++) {
208 - err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]);
209 + err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[enic_sop_rq(enic, index)]);
211 - dev_err(enic, "Failed to alloc RX queue mbufs\n");
212 + dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
215 + err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[enic_data_rq(enic, index)]);
217 + /* release the previously allocated mbufs for the sop rq */
218 + enic_rxmbuf_queue_release(enic, &enic->rq[enic_sop_rq(enic, index)]);
220 + dev_err(enic, "Failed to alloc data RX queue mbufs\n");
225 for (index = 0; index < enic->wq_count; index++)
226 - vnic_wq_enable(&enic->wq[index]);
227 + enic_start_wq(enic, index);
228 for (index = 0; index < enic->rq_count; index++)
229 - vnic_rq_enable(&enic->rq[index]);
230 + enic_start_rq(enic, index);
232 vnic_dev_enable_wait(enic->vdev);
234 @@ -440,14 +461,26 @@ int enic_alloc_intr_resources(struct enic *enic)
236 void enic_free_rq(void *rxq)
238 - struct vnic_rq *rq = (struct vnic_rq *)rxq;
239 - struct enic *enic = vnic_dev_priv(rq->vdev);
240 + struct vnic_rq *rq_sop = (struct vnic_rq *)rxq;
241 + struct enic *enic = vnic_dev_priv(rq_sop->vdev);
242 + struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
244 - enic_rxmbuf_queue_release(enic, rq);
245 - rte_free(rq->mbuf_ring);
246 - rq->mbuf_ring = NULL;
248 - vnic_cq_free(&enic->cq[rq->index]);
249 + enic_rxmbuf_queue_release(enic, rq_sop);
250 + if (rq_data->in_use)
251 + enic_rxmbuf_queue_release(enic, rq_data);
253 + rte_free(rq_sop->mbuf_ring);
254 + if (rq_data->in_use)
255 + rte_free(rq_data->mbuf_ring);
257 + rq_sop->mbuf_ring = NULL;
258 + rq_data->mbuf_ring = NULL;
260 + vnic_rq_free(rq_sop);
261 + if (rq_data->in_use)
262 + vnic_rq_free(rq_data);
264 + vnic_cq_free(&enic->cq[rq_sop->index]);
267 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
268 @@ -462,12 +495,32 @@ int enic_stop_wq(struct enic *enic, uint16_t queue_idx)
270 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
272 - vnic_rq_enable(&enic->rq[queue_idx]);
273 + struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(enic, queue_idx)];
274 + struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
276 + if (rq_data->in_use)
277 + vnic_rq_enable(rq_data);
279 + vnic_rq_enable(rq_sop);
283 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
285 - return vnic_rq_disable(&enic->rq[queue_idx]);
286 + int ret1 = 0, ret2 = 0;
288 + struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(enic, queue_idx)];
289 + struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
291 + ret2 = vnic_rq_disable(rq_sop);
293 + if (rq_data->in_use)
294 + ret1 = vnic_rq_disable(rq_data);
302 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
303 @@ -475,53 +528,128 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
307 - struct vnic_rq *rq = &enic->rq[queue_idx];
309 - rq->socket_id = socket_id;
311 + uint16_t sop_queue_idx = enic_sop_rq(enic, queue_idx);
312 + uint16_t data_queue_idx = enic_data_rq(enic, queue_idx);
313 + struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
314 + struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
315 + unsigned int mbuf_size, mbufs_per_pkt;
316 + unsigned int nb_sop_desc, nb_data_desc;
317 + uint16_t min_sop, max_sop, min_data, max_data;
319 + rq_sop->is_sop = 1;
320 + rq_sop->data_queue_idx = data_queue_idx;
321 + rq_data->is_sop = 0;
322 + rq_data->data_queue_idx = 0;
323 + rq_sop->socket_id = socket_id;
325 + rq_data->socket_id = socket_id;
327 + rq_sop->in_use = 1;
329 + mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM);
331 + /* ceil(mtu/mbuf_size) */
332 + mbufs_per_pkt = (enic->config.mtu + (mbuf_size - 1)) / mbuf_size;
334 + if (mbufs_per_pkt > 1)
335 + rq_data->in_use = 1;
337 + rq_data->in_use = 0;
339 + /* number of descriptors have to be a multiple of 32 */
340 + nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
341 + nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
343 + rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
344 + rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
346 + //printf("mtu = %u, mbuf_size = %u, mbuf_per_pkt = %u\n",
347 + // enic->config.mtu, mbuf_size, mbufs_per_pkt);
349 + if (mbufs_per_pkt > 1) {
351 + max_sop = ((enic->config.rq_desc_count / (mbufs_per_pkt - 1)) & ~0x1F);
352 + min_data = min_sop * (mbufs_per_pkt - 1);
353 + max_data = enic->config.rq_desc_count;
356 + max_sop = enic->config.rq_desc_count;
362 - if (nb_desc > enic->config.rq_desc_count) {
364 - "RQ %d - number of rx desc in cmd line (%d)"\
365 - "is greater than that in the UCSM/CIMC adapter"\
366 - "policy. Applying the value in the adapter "\
368 - queue_idx, nb_desc, enic->config.rq_desc_count);
369 - nb_desc = enic->config.rq_desc_count;
371 - dev_info(enic, "RX Queues - effective number of descs:%d\n",
373 + if (nb_desc < (min_sop + min_data)) {
375 + "Number of rx descs too low, adjusting to minimum\n");
376 + nb_sop_desc = min_sop;
377 + nb_data_desc = min_data;
378 + } else if (nb_desc > (max_sop + max_data)){
380 + "Number of rx_descs too high, adjusting to maximum\n");
381 + nb_sop_desc = max_sop;
382 + nb_data_desc = max_data;
384 + dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
385 + enic->config.mtu, mbuf_size, min_sop + min_data, max_sop + max_data);
387 - /* Allocate queue resources */
388 - rc = vnic_rq_alloc(enic->vdev, rq, queue_idx,
389 - nb_desc, sizeof(struct rq_enet_desc));
390 + dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
391 + nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
393 + /* Allocate sop queue resources */
394 + rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
395 + nb_sop_desc, sizeof(struct rq_enet_desc));
397 - dev_err(enic, "error in allocation of rq\n");
398 + dev_err(enic, "error in allocation of sop rq\n");
402 + nb_sop_desc = rq_sop->ring.desc_count;
404 + if (rq_data->in_use) {
405 + /* Allocate data queue resources */
406 + rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
408 + sizeof(struct rq_enet_desc));
410 + dev_err(enic, "error in allocation of data rq\n");
411 + goto err_free_rq_sop;
413 + nb_data_desc = rq_data->ring.desc_count;
415 rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
416 - socket_id, nb_desc,
417 - sizeof(struct cq_enet_rq_desc));
418 + socket_id, nb_sop_desc + nb_data_desc,
419 + sizeof(struct cq_enet_rq_desc));
421 dev_err(enic, "error in allocation of cq for rq\n");
422 - goto err_free_rq_exit;
423 + goto err_free_rq_data;
426 - /* Allocate the mbuf ring */
427 - rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
428 - sizeof(struct rte_mbuf *) * nb_desc,
429 - RTE_CACHE_LINE_SIZE, rq->socket_id);
430 + /* Allocate the mbuf rings */
431 + rq_sop->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
432 + sizeof(struct rte_mbuf *) * nb_sop_desc,
433 + RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
434 + if (rq_sop->mbuf_ring == NULL)
437 + if (rq_data->in_use) {
438 + rq_data->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
439 + sizeof(struct rte_mbuf *) * nb_data_desc,
440 + RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
441 + if (rq_data->mbuf_ring == NULL)
442 + goto err_free_sop_mbuf;
445 - if (rq->mbuf_ring != NULL)
450 + rte_free(rq_sop->mbuf_ring);
452 /* cleanup on error */
453 vnic_cq_free(&enic->cq[queue_idx]);
457 + if (rq_data->in_use)
458 + vnic_rq_free(rq_data);
460 + vnic_rq_free(rq_sop);
464 diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c
465 index 174486b..463b954 100644
466 --- a/drivers/net/enic/enic_rxtx.c
467 +++ b/drivers/net/enic/enic_rxtx.c
468 @@ -242,22 +242,27 @@ uint16_t
469 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
472 - struct vnic_rq *rq = rx_queue;
473 - struct enic *enic = vnic_dev_priv(rq->vdev);
474 - unsigned int rx_id;
475 + struct vnic_rq *sop_rq = rx_queue;
476 + struct vnic_rq *data_rq;
477 + struct vnic_rq *rq;
478 + struct enic *enic = vnic_dev_priv(sop_rq->vdev);
482 struct rte_mbuf *nmb, *rxmb;
486 volatile struct cq_desc *cqd_ptr;
488 - uint16_t nb_err = 0;
489 + uint16_t seg_length;
490 + struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
491 + struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
493 - cq = &enic->cq[enic_cq_rq(enic, rq->index)];
494 - rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
495 - cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
496 + cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
497 + cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */
498 + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
500 - nb_hold = rq->rx_nb_hold; /* mbufs held by software */
501 + data_rq = &enic->rq[sop_rq->data_queue_idx];
503 while (nb_rx < nb_pkts) {
504 volatile struct rq_enet_desc *rqd_ptr;
505 @@ -265,6 +270,7 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
507 uint64_t ol_err_flags;
508 uint8_t packet_error;
511 /* Check for pkts available */
512 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
513 @@ -272,9 +278,13 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
514 if (color == cq->last_color)
517 - /* Get the cq descriptor and rq pointer */
518 + /* Get the cq descriptor and extract rq info from it */
520 - rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
521 + rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
522 + rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
524 + rq = &enic->rq[rq_num];
525 + rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
527 /* allocate a new mbuf */
528 nmb = rte_rxmbuf_alloc(rq->mp);
529 @@ -287,67 +297,106 @@ enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
530 packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
532 /* Get the mbuf to return and replace with one just allocated */
533 - rxmb = rq->mbuf_ring[rx_id];
534 - rq->mbuf_ring[rx_id] = nmb;
535 + rxmb = rq->mbuf_ring[rq_idx];
536 + rq->mbuf_ring[rq_idx] = nmb;
538 /* Increment cqd, rqd, mbuf_table index */
540 - if (unlikely(rx_id == rq->ring.desc_count)) {
543 + if (unlikely(cq_idx == cq->ring.desc_count)) {
545 cq->last_color = cq->last_color ? 0 : 1;
548 /* Prefetch next mbuf & desc while processing current one */
549 - cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
550 + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
551 rte_enic_prefetch(cqd_ptr);
552 - rte_enic_prefetch(rq->mbuf_ring[rx_id]);
553 - rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
555 +// rte_enic_prefetch(rq->mbuf_ring[rx_id]);
556 +// rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
559 + ciflags = enic_cq_rx_desc_ciflags((struct cq_enet_rq_desc *) &cqd);
561 /* Push descriptor for newly allocated mbuf */
562 - dma_addr = (dma_addr_t)(nmb->buf_physaddr
563 - + RTE_PKTMBUF_HEADROOM);
564 - rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
565 - rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
566 - - RTE_PKTMBUF_HEADROOM);
568 + dma_addr = (dma_addr_t)(nmb->buf_physaddr + RTE_PKTMBUF_HEADROOM);
569 + rq_enet_desc_enc(rqd_ptr, dma_addr,
570 + (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
571 + : RQ_ENET_TYPE_NOT_SOP),
572 + nmb->buf_len - RTE_PKTMBUF_HEADROOM);
574 /* Fill in the rest of the mbuf */
575 - rxmb->data_off = RTE_PKTMBUF_HEADROOM;
577 + seg_length = enic_cq_rx_desc_n_bytes(&cqd);
578 + rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
579 + enic_cq_rx_to_pkt_flags(&cqd, rxmb);
582 + first_seg->nb_segs = 1;
583 + first_seg->pkt_len = seg_length;
585 + first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
587 + first_seg->nb_segs++;
588 + last_seg->next = rxmb;
592 rxmb->port = enic->port_id;
593 - if (!packet_error) {
594 - rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
595 - rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
596 - enic_cq_rx_to_pkt_flags(&cqd, rxmb);
598 - rte_pktmbuf_free(rxmb);
599 + rxmb->data_len = seg_length;
603 + if (!(enic_cq_rx_desc_eop(ciflags))) {
608 + if (unlikely(packet_error)) {
609 + rte_pktmbuf_free(first_seg);
610 rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
615 - rxmb->data_len = rxmb->pkt_len;
618 +// printf("EOP: final packet length is %d\n", first_seg->pkt_len);
619 +// rte_pktmbuf_dump(stdout, first_seg, 64);
621 /* prefetch mbuf data for caller */
622 - rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
623 + rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
624 RTE_PKTMBUF_HEADROOM));
626 /* store the mbuf address into the next entry of the array */
627 - rx_pkts[nb_rx++] = rxmb;
628 + rx_pkts[nb_rx++] = first_seg;
631 - nb_hold += nb_rx + nb_err;
632 - cq->to_clean = rx_id;
633 + sop_rq->pkt_first_seg = first_seg;
634 + sop_rq->pkt_last_seg = last_seg;
636 + cq->to_clean = cq_idx;
638 + if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) > sop_rq->rx_free_thresh) {
639 + if (data_rq->in_use) {
640 + data_rq->posted_index = enic_ring_add(data_rq->ring.desc_count,
641 + data_rq->posted_index,
642 + data_rq->rx_nb_hold);
643 + //printf("Processed %d data descs. Posted index now %d\n",
644 + // data_rq->rx_nb_hold, data_rq->posted_index);
645 + data_rq->rx_nb_hold = 0;
647 + sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
648 + sop_rq->posted_index,
649 + sop_rq->rx_nb_hold);
650 + //printf("Processed %d sop descs. Posted index now %d\n",
651 + // sop_rq->rx_nb_hold, sop_rq->posted_index);
652 + sop_rq->rx_nb_hold = 0;
654 - if (nb_hold > rq->rx_free_thresh) {
655 - rq->posted_index = enic_ring_add(rq->ring.desc_count,
656 - rq->posted_index, nb_hold);
659 - iowrite32(rq->posted_index, &rq->ctrl->posted_index);
660 + if (data_rq->in_use)
661 + iowrite32(data_rq->posted_index, &data_rq->ctrl->posted_index);
662 + rte_compiler_barrier();
663 + iowrite32(sop_rq->posted_index, &sop_rq->ctrl->posted_index);
666 - rq->rx_nb_hold = nb_hold;