1 diff -r -u dpdk-16.04.orig2/drivers/net/enic/base/rq_enet_desc.h dpdk-16.04/drivers/net/enic/base/rq_enet_desc.h
2 --- dpdk-16.04.orig2/drivers/net/enic/base/rq_enet_desc.h 2016-05-13 18:09:07.523938072 -0700
3 +++ dpdk-16.04/drivers/net/enic/base/rq_enet_desc.h 2016-05-13 18:09:54.359743075 -0700
5 #define RQ_ENET_TYPE_BITS 2
6 #define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1)
8 -static inline void rq_enet_desc_enc(struct rq_enet_desc *desc,
9 +static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc,
10 u64 address, u8 type, u16 length)
12 desc->address = cpu_to_le64(address);
13 diff -r -u dpdk-16.04.orig2/drivers/net/enic/base/vnic_rq.c dpdk-16.04/drivers/net/enic/base/vnic_rq.c
14 --- dpdk-16.04.orig2/drivers/net/enic/base/vnic_rq.c 2016-05-13 18:09:07.533938883 -0700
15 +++ dpdk-16.04/drivers/net/enic/base/vnic_rq.c 2016-05-13 18:09:54.360743158 -0700
17 iowrite32(cq_index, &rq->ctrl->cq_index);
18 iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable);
19 iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset);
20 - iowrite32(0, &rq->ctrl->dropped_packet_count);
21 iowrite32(0, &rq->ctrl->error_status);
22 iowrite32(fetch_index, &rq->ctrl->fetch_index);
23 iowrite32(posted_index, &rq->ctrl->posted_index);
26 +// printf("Writing 0x%x to %s rq\n",
27 +// ((rq->is_sop << 10) | rq->data_queue_idx),
28 +// rq->is_sop ? "sop":"data");
29 + iowrite32(((rq->is_sop << 10) | rq->data_queue_idx),
30 + &rq->ctrl->data_ring);
34 void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index,
36 unsigned int error_interrupt_offset)
40 /* Use current fetch_index as the ring starting point */
41 fetch_index = ioread32(&rq->ctrl->fetch_index);
44 error_interrupt_offset);
47 + rq->pkt_first_seg = NULL;
48 + rq->pkt_last_seg = NULL;
51 void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error)
52 diff -r -u dpdk-16.04.orig2/drivers/net/enic/base/vnic_rq.h dpdk-16.04/drivers/net/enic/base/vnic_rq.h
53 --- dpdk-16.04.orig2/drivers/net/enic/base/vnic_rq.h 2016-05-13 18:09:07.540939452 -0700
54 +++ dpdk-16.04/drivers/net/enic/base/vnic_rq.h 2016-05-13 18:09:54.362743322 -0700
57 u32 error_status; /* 0x48 */
59 - u32 dropped_packet_count; /* 0x50 */
60 + u32 tcp_sn; /* 0x50 */
62 - u32 dropped_packet_count_rc; /* 0x58 */
63 + u32 unused; /* 0x58 */
65 + u32 dca_select; /* 0x60 */
67 + u32 dca_value; /* 0x68 */
69 + u32 data_ring; /* 0x70 */
71 + u32 header_split; /* 0x78 */
77 struct rte_mempool *mp;
80 + uint16_t data_queue_idx;
83 + struct rte_mbuf *pkt_first_seg;
84 + struct rte_mbuf *pkt_last_seg;
85 + unsigned int max_mbufs_per_pkt;
88 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
89 diff -r -u dpdk-16.04.orig2/drivers/net/enic/enic.h dpdk-16.04/drivers/net/enic/enic.h
90 --- dpdk-16.04.orig2/drivers/net/enic/enic.h 2016-05-13 18:09:07.553940507 -0700
91 +++ dpdk-16.04/drivers/net/enic/enic.h 2016-05-13 18:09:54.365743565 -0700
93 struct enic_soft_stats soft_stats;
96 +static inline unsigned int enic_sop_rq(__rte_unused struct enic *enic, unsigned int rq)
101 +static inline unsigned int enic_data_rq(__rte_unused struct enic *enic, unsigned int rq)
106 static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq)
109 diff -r -u dpdk-16.04.orig2/drivers/net/enic/enic_main.c dpdk-16.04/drivers/net/enic/enic_main.c
110 --- dpdk-16.04.orig2/drivers/net/enic/enic_main.c 2016-05-13 18:09:07.557940834 -0700
111 +++ dpdk-16.04/drivers/net/enic/enic_main.c 2016-05-13 18:10:40.099459001 -0700
112 @@ -248,15 +248,23 @@
113 unsigned int error_interrupt_offset = 0;
114 unsigned int index = 0;
116 + struct vnic_rq *data_rq;
118 vnic_dev_stats_clear(enic->vdev);
120 for (index = 0; index < enic->rq_count; index++) {
121 - vnic_rq_init(&enic->rq[index],
122 + vnic_rq_init(&enic->rq[enic_sop_rq(enic, index)],
123 enic_cq_rq(enic, index),
124 error_interrupt_enable,
125 error_interrupt_offset);
127 + data_rq = &enic->rq[enic_data_rq(enic, index)];
128 + if (data_rq->in_use)
129 + vnic_rq_init(data_rq,
130 + enic_cq_rq(enic, index),
131 + error_interrupt_enable,
132 + error_interrupt_offset);
134 cq_idx = enic_cq_rq(enic, index);
135 vnic_cq_init(&enic->cq[cq_idx],
136 0 /* flow_control_enable */,
144 dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index,
145 rq->ring.desc_count);
147 @@ -317,20 +328,20 @@
151 - dma_addr = (dma_addr_t)(mb->buf_physaddr
152 - + RTE_PKTMBUF_HEADROOM);
154 - rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP,
155 - mb->buf_len - RTE_PKTMBUF_HEADROOM);
156 + dma_addr = (dma_addr_t)(mb->buf_physaddr + RTE_PKTMBUF_HEADROOM);
157 + rq_enet_desc_enc(rqd, dma_addr,
158 + (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
159 + : RQ_ENET_TYPE_NOT_SOP),
160 + mb->buf_len - RTE_PKTMBUF_HEADROOM);
161 rq->mbuf_ring[i] = mb;
164 /* make sure all prior writes are complete before doing the PIO write */
167 - /* Post all but the last 2 cache lines' worth of descriptors */
168 - rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE
169 - / sizeof(struct rq_enet_desc));
170 + /* Post all but the last buffer to VIC. */
171 + rq->posted_index = rq->ring.desc_count - 1;
175 dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n",
177 iowrite32(rq->posted_index, &rq->ctrl->posted_index);
180 +// printf("posted %d buffers to %s rq\n", rq->ring.desc_count,
181 +// rq->is_sop ? "sop" : "data");
185 @@ -399,17 +412,25 @@
186 "Flow director feature will not work\n");
188 for (index = 0; index < enic->rq_count; index++) {
189 - err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]);
190 + err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[enic_sop_rq(enic, index)]);
192 - dev_err(enic, "Failed to alloc RX queue mbufs\n");
193 + dev_err(enic, "Failed to alloc sop RX queue mbufs\n");
196 + err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[enic_data_rq(enic, index)]);
198 + /* release the previously allocated mbufs for the sop rq */
199 + enic_rxmbuf_queue_release(enic, &enic->rq[enic_sop_rq(enic, index)]);
201 + dev_err(enic, "Failed to alloc data RX queue mbufs\n");
206 for (index = 0; index < enic->wq_count; index++)
207 - vnic_wq_enable(&enic->wq[index]);
208 + enic_start_wq(enic, index);
209 for (index = 0; index < enic->rq_count; index++)
210 - vnic_rq_enable(&enic->rq[index]);
211 + enic_start_rq(enic, index);
213 vnic_dev_enable_wait(enic->vdev);
215 @@ -441,14 +462,26 @@
217 void enic_free_rq(void *rxq)
219 - struct vnic_rq *rq = (struct vnic_rq *)rxq;
220 - struct enic *enic = vnic_dev_priv(rq->vdev);
221 + struct vnic_rq *rq_sop = (struct vnic_rq *)rxq;
222 + struct enic *enic = vnic_dev_priv(rq_sop->vdev);
223 + struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
225 + enic_rxmbuf_queue_release(enic, rq_sop);
226 + if (rq_data->in_use)
227 + enic_rxmbuf_queue_release(enic, rq_data);
229 + rte_free(rq_sop->mbuf_ring);
230 + if (rq_data->in_use)
231 + rte_free(rq_data->mbuf_ring);
233 + rq_sop->mbuf_ring = NULL;
234 + rq_data->mbuf_ring = NULL;
236 + vnic_rq_free(rq_sop);
237 + if (rq_data->in_use)
238 + vnic_rq_free(rq_data);
240 - enic_rxmbuf_queue_release(enic, rq);
241 - rte_free(rq->mbuf_ring);
242 - rq->mbuf_ring = NULL;
244 - vnic_cq_free(&enic->cq[rq->index]);
245 + vnic_cq_free(&enic->cq[rq_sop->index]);
248 void enic_start_wq(struct enic *enic, uint16_t queue_idx)
249 @@ -463,12 +496,32 @@
251 void enic_start_rq(struct enic *enic, uint16_t queue_idx)
253 - vnic_rq_enable(&enic->rq[queue_idx]);
254 + struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(enic, queue_idx)];
255 + struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
257 + if (rq_data->in_use)
258 + vnic_rq_enable(rq_data);
260 + vnic_rq_enable(rq_sop);
264 int enic_stop_rq(struct enic *enic, uint16_t queue_idx)
266 - return vnic_rq_disable(&enic->rq[queue_idx]);
267 + int ret1 = 0, ret2 = 0;
269 + struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(enic, queue_idx)];
270 + struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx];
272 + ret2 = vnic_rq_disable(rq_sop);
274 + if (rq_data->in_use)
275 + ret1 = vnic_rq_disable(rq_data);
283 int enic_alloc_rq(struct enic *enic, uint16_t queue_idx,
284 @@ -476,53 +529,128 @@
288 - struct vnic_rq *rq = &enic->rq[queue_idx];
290 - rq->socket_id = socket_id;
292 + uint16_t sop_queue_idx = enic_sop_rq(enic, queue_idx);
293 + uint16_t data_queue_idx = enic_data_rq(enic, queue_idx);
294 + struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx];
295 + struct vnic_rq *rq_data = &enic->rq[data_queue_idx];
296 + unsigned int mbuf_size, mbufs_per_pkt;
297 + unsigned int nb_sop_desc, nb_data_desc;
298 + uint16_t min_sop, max_sop, min_data, max_data;
300 + rq_sop->is_sop = 1;
301 + rq_sop->data_queue_idx = data_queue_idx;
302 + rq_data->is_sop = 0;
303 + rq_data->data_queue_idx = 0;
304 + rq_sop->socket_id = socket_id;
306 + rq_data->socket_id = socket_id;
308 + rq_sop->in_use = 1;
310 + mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM);
312 + /* ceil(mtu/mbuf_size) */
313 + mbufs_per_pkt = (enic->config.mtu + (mbuf_size - 1)) / mbuf_size;
315 + if (mbufs_per_pkt > 1)
316 + rq_data->in_use = 1;
318 + rq_data->in_use = 0;
320 + /* number of descriptors have to be a multiple of 32 */
321 + nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F;
322 + nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F;
324 + rq_sop->max_mbufs_per_pkt = mbufs_per_pkt;
325 + rq_data->max_mbufs_per_pkt = mbufs_per_pkt;
327 + //printf("mtu = %u, mbuf_size = %u, mbuf_per_pkt = %u\n",
328 + // enic->config.mtu, mbuf_size, mbufs_per_pkt);
330 + if (mbufs_per_pkt > 1) {
332 + max_sop = ((enic->config.rq_desc_count / (mbufs_per_pkt - 1)) & ~0x1F);
333 + min_data = min_sop * (mbufs_per_pkt - 1);
334 + max_data = enic->config.rq_desc_count;
337 + max_sop = enic->config.rq_desc_count;
343 - if (nb_desc > enic->config.rq_desc_count) {
345 - "RQ %d - number of rx desc in cmd line (%d)"\
346 - "is greater than that in the UCSM/CIMC adapter"\
347 - "policy. Applying the value in the adapter "\
349 - queue_idx, nb_desc, enic->config.rq_desc_count);
350 - nb_desc = enic->config.rq_desc_count;
352 - dev_info(enic, "RX Queues - effective number of descs:%d\n",
354 + if (nb_desc < (min_sop + min_data)) {
356 + "Number of rx descs too low, adjusting to minimum\n");
357 + nb_sop_desc = min_sop;
358 + nb_data_desc = min_data;
359 + } else if (nb_desc > (max_sop + max_data)){
361 + "Number of rx_descs too high, adjusting to maximum\n");
362 + nb_sop_desc = max_sop;
363 + nb_data_desc = max_data;
365 + dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n",
366 + enic->config.mtu, mbuf_size, min_sop + min_data, max_sop + max_data);
368 + dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n",
369 + nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc);
371 - /* Allocate queue resources */
372 - rc = vnic_rq_alloc(enic->vdev, rq, queue_idx,
373 - nb_desc, sizeof(struct rq_enet_desc));
374 + /* Allocate sop queue resources */
375 + rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx,
376 + nb_sop_desc, sizeof(struct rq_enet_desc));
378 - dev_err(enic, "error in allocation of rq\n");
379 + dev_err(enic, "error in allocation of sop rq\n");
382 + nb_sop_desc = rq_sop->ring.desc_count;
384 + if (rq_data->in_use) {
385 + /* Allocate data queue resources */
386 + rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx,
388 + sizeof(struct rq_enet_desc));
390 + dev_err(enic, "error in allocation of data rq\n");
391 + goto err_free_rq_sop;
393 + nb_data_desc = rq_data->ring.desc_count;
395 rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx,
396 - socket_id, nb_desc,
397 - sizeof(struct cq_enet_rq_desc));
398 + socket_id, nb_sop_desc + nb_data_desc,
399 + sizeof(struct cq_enet_rq_desc));
401 dev_err(enic, "error in allocation of cq for rq\n");
402 - goto err_free_rq_exit;
403 + goto err_free_rq_data;
406 - /* Allocate the mbuf ring */
407 - rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
408 - sizeof(struct rte_mbuf *) * nb_desc,
409 - RTE_CACHE_LINE_SIZE, rq->socket_id);
410 + /* Allocate the mbuf rings */
411 + rq_sop->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
412 + sizeof(struct rte_mbuf *) * nb_sop_desc,
413 + RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
414 + if (rq_sop->mbuf_ring == NULL)
417 + if (rq_data->in_use) {
418 + rq_data->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring",
419 + sizeof(struct rte_mbuf *) * nb_data_desc,
420 + RTE_CACHE_LINE_SIZE, rq_sop->socket_id);
421 + if (rq_data->mbuf_ring == NULL)
422 + goto err_free_sop_mbuf;
425 - if (rq->mbuf_ring != NULL)
430 + rte_free(rq_sop->mbuf_ring);
432 /* cleanup on error */
433 vnic_cq_free(&enic->cq[queue_idx]);
437 + if (rq_data->in_use)
438 + vnic_rq_free(rq_data);
440 + vnic_rq_free(rq_sop);
445 diff -r -u dpdk-16.04.orig2/drivers/net/enic/enic_rxtx.c dpdk-16.04/drivers/net/enic/enic_rxtx.c
446 --- dpdk-16.04.orig2/drivers/net/enic/enic_rxtx.c 2016-05-13 18:09:07.556940752 -0700
447 +++ dpdk-16.04/drivers/net/enic/enic_rxtx.c 2016-05-13 18:12:22.225755674 -0700
448 @@ -242,22 +242,27 @@
449 enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
452 - struct vnic_rq *rq = rx_queue;
453 - struct enic *enic = vnic_dev_priv(rq->vdev);
454 - unsigned int rx_id;
455 + struct vnic_rq *sop_rq = rx_queue;
456 + struct vnic_rq *data_rq;
457 + struct vnic_rq *rq;
458 + struct enic *enic = vnic_dev_priv(sop_rq->vdev);
462 struct rte_mbuf *nmb, *rxmb;
466 volatile struct cq_desc *cqd_ptr;
468 - uint16_t nb_err = 0;
469 + uint16_t seg_length;
470 + struct rte_mbuf *first_seg = sop_rq->pkt_first_seg;
471 + struct rte_mbuf *last_seg = sop_rq->pkt_last_seg;
473 + cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)];
474 + cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */
475 + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
477 - cq = &enic->cq[enic_cq_rq(enic, rq->index)];
478 - rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */
479 - cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
481 - nb_hold = rq->rx_nb_hold; /* mbufs held by software */
482 + data_rq = &enic->rq[sop_rq->data_queue_idx];
484 while (nb_rx < nb_pkts) {
485 volatile struct rq_enet_desc *rqd_ptr;
488 uint64_t ol_err_flags;
489 uint8_t packet_error;
492 /* Check for pkts available */
493 color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT)
495 if (color == cq->last_color)
498 - /* Get the cq descriptor and rq pointer */
499 + /* Get the cq descriptor and extract rq info from it */
501 - rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id;
502 + rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK;
503 + rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK;
505 + rq = &enic->rq[rq_num];
506 + rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx;
508 /* allocate a new mbuf */
509 nmb = rte_rxmbuf_alloc(rq->mp);
510 @@ -287,67 +297,106 @@
511 packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags);
513 /* Get the mbuf to return and replace with one just allocated */
514 - rxmb = rq->mbuf_ring[rx_id];
515 - rq->mbuf_ring[rx_id] = nmb;
516 + rxmb = rq->mbuf_ring[rq_idx];
517 + rq->mbuf_ring[rq_idx] = nmb;
519 /* Increment cqd, rqd, mbuf_table index */
521 - if (unlikely(rx_id == rq->ring.desc_count)) {
524 + if (unlikely(cq_idx == cq->ring.desc_count)) {
526 cq->last_color = cq->last_color ? 0 : 1;
529 /* Prefetch next mbuf & desc while processing current one */
530 - cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id;
531 + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx;
532 rte_enic_prefetch(cqd_ptr);
533 - rte_enic_prefetch(rq->mbuf_ring[rx_id]);
534 - rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
536 +// rte_enic_prefetch(rq->mbuf_ring[rx_id]);
537 +// rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs)
540 + ciflags = enic_cq_rx_desc_ciflags((struct cq_enet_rq_desc *) &cqd);
542 /* Push descriptor for newly allocated mbuf */
543 - dma_addr = (dma_addr_t)(nmb->buf_physaddr
544 - + RTE_PKTMBUF_HEADROOM);
545 - rqd_ptr->address = rte_cpu_to_le_64(dma_addr);
546 - rqd_ptr->length_type = cpu_to_le16(nmb->buf_len
547 - - RTE_PKTMBUF_HEADROOM);
549 + dma_addr = (dma_addr_t)(nmb->buf_physaddr + RTE_PKTMBUF_HEADROOM);
550 + rq_enet_desc_enc(rqd_ptr, dma_addr,
551 + (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
552 + : RQ_ENET_TYPE_NOT_SOP),
553 + nmb->buf_len - RTE_PKTMBUF_HEADROOM);
555 /* Fill in the rest of the mbuf */
556 - rxmb->data_off = RTE_PKTMBUF_HEADROOM;
558 + seg_length = enic_cq_rx_desc_n_bytes(&cqd);
559 + rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
560 + enic_cq_rx_to_pkt_flags(&cqd, rxmb);
563 + first_seg->nb_segs = 1;
564 + first_seg->pkt_len = seg_length;
566 + first_seg->pkt_len = (uint16_t)(first_seg->pkt_len
568 + first_seg->nb_segs++;
569 + last_seg->next = rxmb;
573 rxmb->port = enic->port_id;
574 - if (!packet_error) {
575 - rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd);
576 - rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd);
577 - enic_cq_rx_to_pkt_flags(&cqd, rxmb);
579 - rte_pktmbuf_free(rxmb);
580 + rxmb->data_len = seg_length;
584 + if (!(enic_cq_rx_desc_eop(ciflags))) {
589 + if (unlikely(packet_error)) {
590 + rte_pktmbuf_free(first_seg);
591 rte_atomic64_inc(&enic->soft_stats.rx_packet_errors);
596 - rxmb->data_len = rxmb->pkt_len;
599 +// printf("EOP: final packet length is %d\n", first_seg->pkt_len);
600 +// rte_pktmbuf_dump(stdout, first_seg, 64);
602 /* prefetch mbuf data for caller */
603 - rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr,
604 + rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr,
605 RTE_PKTMBUF_HEADROOM));
607 /* store the mbuf address into the next entry of the array */
608 - rx_pkts[nb_rx++] = rxmb;
609 + rx_pkts[nb_rx++] = first_seg;
612 - nb_hold += nb_rx + nb_err;
613 - cq->to_clean = rx_id;
614 + sop_rq->pkt_first_seg = first_seg;
615 + sop_rq->pkt_last_seg = last_seg;
617 + cq->to_clean = cq_idx;
619 + if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) > sop_rq->rx_free_thresh) {
620 + if (data_rq->in_use) {
621 + data_rq->posted_index = enic_ring_add(data_rq->ring.desc_count,
622 + data_rq->posted_index,
623 + data_rq->rx_nb_hold);
624 + //printf("Processed %d data descs. Posted index now %d\n",
625 + // data_rq->rx_nb_hold, data_rq->posted_index);
626 + data_rq->rx_nb_hold = 0;
628 + sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count,
629 + sop_rq->posted_index,
630 + sop_rq->rx_nb_hold);
631 + //printf("Processed %d sop descs. Posted index now %d\n",
632 + // sop_rq->rx_nb_hold, sop_rq->posted_index);
633 + sop_rq->rx_nb_hold = 0;
635 - if (nb_hold > rq->rx_free_thresh) {
636 - rq->posted_index = enic_ring_add(rq->ring.desc_count,
637 - rq->posted_index, nb_hold);
640 - iowrite32(rq->posted_index, &rq->ctrl->posted_index);
641 + if (data_rq->in_use)
642 + iowrite32(data_rq->posted_index, &data_rq->ctrl->posted_index);
643 + rte_compiler_barrier();
644 + iowrite32(sop_rq->posted_index, &sop_rq->ctrl->posted_index);
647 - rq->rx_nb_hold = nb_hold;