1 From 3d10b7f1332d3f1326c182d3b7fa13669a528592 Mon Sep 17 00:00:00 2001
2 From: Leyi Rong <leyi.rong@intel.com>
3 Date: Wed, 8 Apr 2020 14:22:02 +0800
4 Subject: [DPDK 09/17] net/iavf: flexible Rx descriptor support in normal path
6 Support flexible Rx descriptor format in normal
9 Signed-off-by: Leyi Rong <leyi.rong@intel.com>
11 drivers/net/iavf/iavf.h | 2 +
12 drivers/net/iavf/iavf_ethdev.c | 8 +
13 drivers/net/iavf/iavf_rxtx.c | 479 ++++++++++++++++++++++++++++++---
14 drivers/net/iavf/iavf_rxtx.h | 8 +
15 drivers/net/iavf/iavf_vchnl.c | 42 ++-
16 5 files changed, 501 insertions(+), 38 deletions(-)
18 diff --git a/drivers/net/iavf/iavf.h b/drivers/net/iavf/iavf.h
19 index 526040c6e..67d625053 100644
20 --- a/drivers/net/iavf/iavf.h
21 +++ b/drivers/net/iavf/iavf.h
22 @@ -97,6 +97,7 @@ struct iavf_info {
23 struct virtchnl_version_info virtchnl_version;
24 struct virtchnl_vf_resource *vf_res; /* VF resource */
25 struct virtchnl_vsi_resource *vsi_res; /* LAN VSI */
26 + uint64_t supported_rxdid;
28 volatile enum virtchnl_ops pend_cmd; /* pending command not finished */
29 uint32_t cmd_retval; /* return value of the cmd response from PF */
30 @@ -225,6 +226,7 @@ int iavf_disable_queues(struct iavf_adapter *adapter);
31 int iavf_configure_rss_lut(struct iavf_adapter *adapter);
32 int iavf_configure_rss_key(struct iavf_adapter *adapter);
33 int iavf_configure_queues(struct iavf_adapter *adapter);
34 +int iavf_get_supported_rxdid(struct iavf_adapter *adapter);
35 int iavf_config_irq_map(struct iavf_adapter *adapter);
36 void iavf_add_del_all_mac_addr(struct iavf_adapter *adapter, bool add);
37 int iavf_dev_link_update(struct rte_eth_dev *dev,
38 diff --git a/drivers/net/iavf/iavf_ethdev.c b/drivers/net/iavf/iavf_ethdev.c
39 index ee9f82249..d3a121eac 100644
40 --- a/drivers/net/iavf/iavf_ethdev.c
41 +++ b/drivers/net/iavf/iavf_ethdev.c
42 @@ -1236,6 +1236,14 @@ iavf_init_vf(struct rte_eth_dev *dev)
47 + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC) {
48 + if (iavf_get_supported_rxdid(adapter) != 0) {
49 + PMD_INIT_LOG(ERR, "failed to do get supported rxdid");
56 rte_free(vf->rss_key);
57 diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
58 index 9eccb7c41..67297dcb7 100644
59 --- a/drivers/net/iavf/iavf_rxtx.c
60 +++ b/drivers/net/iavf/iavf_rxtx.c
61 @@ -346,6 +346,14 @@ iavf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
65 + if (vf->vf_res->vf_cap_flags &
66 + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
67 + vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) {
68 + rxq->rxdid = IAVF_RXDID_COMMS_OVS_1;
70 + rxq->rxdid = IAVF_RXDID_LEGACY_1;
74 rxq->nb_rx_desc = nb_desc;
75 rxq->rx_free_thresh = rx_free_thresh;
76 @@ -720,6 +728,20 @@ iavf_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union iavf_rx_desc *rxdp)
81 +iavf_flex_rxd_to_vlan_tci(struct rte_mbuf *mb,
82 + volatile union iavf_rx_flex_desc *rxdp)
84 + if (rte_le_to_cpu_64(rxdp->wb.status_error0) &
85 + (1 << IAVF_RX_FLEX_DESC_STATUS0_L2TAG1P_S)) {
86 + mb->ol_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
88 + rte_le_to_cpu_16(rxdp->wb.l2tag1);
94 /* Translate the rx descriptor status and error fields to pkt flags */
95 static inline uint64_t
96 iavf_rxd_to_pkt_flags(uint64_t qword)
97 @@ -754,6 +776,87 @@ iavf_rxd_to_pkt_flags(uint64_t qword)
101 +/* Translate the rx flex descriptor status to pkt flags */
103 +iavf_rxd_to_pkt_fields(struct rte_mbuf *mb,
104 + volatile union iavf_rx_flex_desc *rxdp)
106 + volatile struct iavf_32b_rx_flex_desc_comms_ovs *desc =
107 + (volatile struct iavf_32b_rx_flex_desc_comms_ovs *)rxdp;
110 +#ifndef RTE_LIBRTE_IAVF_16BYTE_RX_DESC
111 + stat_err = rte_le_to_cpu_16(desc->status_error0);
112 + if (likely(stat_err & (1 << IAVF_RX_FLEX_DESC_STATUS0_RSS_VALID_S))) {
113 + mb->ol_flags |= PKT_RX_RSS_HASH;
114 + mb->hash.rss = rte_le_to_cpu_32(desc->rss_hash);
119 +#define IAVF_RX_FLEX_ERR0_BITS \
120 + ((1 << IAVF_RX_FLEX_DESC_STATUS0_HBO_S) | \
121 + (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S) | \
122 + (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S) | \
123 + (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S) | \
124 + (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EUDPE_S) | \
125 + (1 << IAVF_RX_FLEX_DESC_STATUS0_RXE_S))
127 +/* Rx L3/L4 checksum */
128 +static inline uint64_t
129 +iavf_flex_rxd_error_to_pkt_flags(uint16_t stat_err0)
131 + uint64_t flags = 0;
133 + /* check if HW has decoded the packet and checksum */
134 + if (unlikely(!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_L3L4P_S))))
137 + if (likely(!(stat_err0 & IAVF_RX_FLEX_ERR0_BITS))) {
138 + flags |= (PKT_RX_IP_CKSUM_GOOD | PKT_RX_L4_CKSUM_GOOD);
142 + if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_IPE_S)))
143 + flags |= PKT_RX_IP_CKSUM_BAD;
145 + flags |= PKT_RX_IP_CKSUM_GOOD;
147 + if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_L4E_S)))
148 + flags |= PKT_RX_L4_CKSUM_BAD;
150 + flags |= PKT_RX_L4_CKSUM_GOOD;
152 + if (unlikely(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_XSUM_EIPE_S)))
153 + flags |= PKT_RX_EIP_CKSUM_BAD;
158 +/* If the number of free RX descriptors is greater than the RX free
159 + * threshold of the queue, advance the Receive Descriptor Tail (RDT)
160 + * register. Update the RDT with the value of the last processed RX
161 + * descriptor minus 1, to guarantee that the RDT register is never
162 + * equal to the RDH register, which creates a "full" ring situtation
163 + * from the hardware point of view.
166 +iavf_update_rx_tail(struct iavf_rx_queue *rxq, uint16_t nb_hold, uint16_t rx_id)
168 + nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
170 + if (nb_hold > rxq->rx_free_thresh) {
172 + "port_id=%u queue_id=%u rx_tail=%u nb_hold=%u",
173 + rxq->port_id, rxq->queue_id, rx_id, nb_hold);
174 + rx_id = (uint16_t)((rx_id == 0) ?
175 + (rxq->nb_rx_desc - 1) : (rx_id - 1));
176 + IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
179 + rxq->nb_rx_hold = nb_hold;
182 /* implement recv_pkts */
184 iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
185 @@ -854,23 +957,256 @@ iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
187 rxq->rx_tail = rx_id;
189 - /* If the number of free RX descriptors is greater than the RX free
190 - * threshold of the queue, advance the receive tail register of queue.
191 - * Update that register with the value of the last processed RX
192 - * descriptor minus 1.
194 - nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
195 - if (nb_hold > rxq->rx_free_thresh) {
196 - PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
197 - "nb_hold=%u nb_rx=%u",
198 - rxq->port_id, rxq->queue_id,
199 - rx_id, nb_hold, nb_rx);
200 - rx_id = (uint16_t)((rx_id == 0) ?
201 - (rxq->nb_rx_desc - 1) : (rx_id - 1));
202 - IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
204 + iavf_update_rx_tail(rxq, nb_hold, rx_id);
209 +/* implement recv_pkts for flexible Rx descriptor */
211 +iavf_recv_pkts_flex_rxd(void *rx_queue,
212 + struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
214 + volatile union iavf_rx_desc *rx_ring;
215 + volatile union iavf_rx_flex_desc *rxdp;
216 + struct iavf_rx_queue *rxq;
217 + union iavf_rx_flex_desc rxd;
218 + struct rte_mbuf *rxe;
219 + struct rte_eth_dev *dev;
220 + struct rte_mbuf *rxm;
221 + struct rte_mbuf *nmb;
223 + uint16_t rx_stat_err0;
224 + uint16_t rx_packet_len;
225 + uint16_t rx_id, nb_hold;
227 + uint64_t pkt_flags;
228 + const uint32_t *ptype_tbl;
233 + rx_id = rxq->rx_tail;
234 + rx_ring = rxq->rx_ring;
235 + ptype_tbl = rxq->vsi->adapter->ptype_tbl;
237 + while (nb_rx < nb_pkts) {
238 + rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
239 + rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
241 + /* Check the DD bit first */
242 + if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
244 + IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
246 + nmb = rte_mbuf_raw_alloc(rxq->mp);
247 + if (unlikely(!nmb)) {
248 + dev = &rte_eth_devices[rxq->port_id];
249 + dev->data->rx_mbuf_alloc_failed++;
250 + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
251 + "queue_id=%u", rxq->port_id, rxq->queue_id);
257 + rxe = rxq->sw_ring[rx_id];
259 + if (unlikely(rx_id == rxq->nb_rx_desc))
262 + /* Prefetch next mbuf */
263 + rte_prefetch0(rxq->sw_ring[rx_id]);
265 + /* When next RX descriptor is on a cache line boundary,
266 + * prefetch the next 4 RX descriptors and next 8 pointers
269 + if ((rx_id & 0x3) == 0) {
270 + rte_prefetch0(&rx_ring[rx_id]);
271 + rte_prefetch0(rxq->sw_ring[rx_id]);
276 + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
277 + rxdp->read.hdr_addr = 0;
278 + rxdp->read.pkt_addr = dma_addr;
280 + rx_packet_len = (rte_le_to_cpu_16(rxd.wb.pkt_len) &
281 + IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
283 + rxm->data_off = RTE_PKTMBUF_HEADROOM;
284 + rte_prefetch0(RTE_PTR_ADD(rxm->buf_addr, RTE_PKTMBUF_HEADROOM));
287 + rxm->pkt_len = rx_packet_len;
288 + rxm->data_len = rx_packet_len;
289 + rxm->port = rxq->port_id;
291 + rxm->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
292 + rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
293 + iavf_flex_rxd_to_vlan_tci(rxm, &rxd);
294 + iavf_rxd_to_pkt_fields(rxm, &rxd);
295 + pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
296 + rxm->ol_flags |= pkt_flags;
298 + rx_pkts[nb_rx++] = rxm;
300 - rxq->nb_rx_hold = nb_hold;
301 + rxq->rx_tail = rx_id;
303 + iavf_update_rx_tail(rxq, nb_hold, rx_id);
308 +/* implement recv_scattered_pkts for flexible Rx descriptor */
310 +iavf_recv_scattered_pkts_flex_rxd(void *rx_queue, struct rte_mbuf **rx_pkts,
313 + struct iavf_rx_queue *rxq = rx_queue;
314 + union iavf_rx_flex_desc rxd;
315 + struct rte_mbuf *rxe;
316 + struct rte_mbuf *first_seg = rxq->pkt_first_seg;
317 + struct rte_mbuf *last_seg = rxq->pkt_last_seg;
318 + struct rte_mbuf *nmb, *rxm;
319 + uint16_t rx_id = rxq->rx_tail;
320 + uint16_t nb_rx = 0, nb_hold = 0, rx_packet_len;
321 + struct rte_eth_dev *dev;
322 + uint16_t rx_stat_err0;
324 + uint64_t pkt_flags;
326 + volatile union iavf_rx_desc *rx_ring = rxq->rx_ring;
327 + volatile union iavf_rx_flex_desc *rxdp;
328 + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
330 + while (nb_rx < nb_pkts) {
331 + rxdp = (volatile union iavf_rx_flex_desc *)&rx_ring[rx_id];
332 + rx_stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
334 + /* Check the DD bit */
335 + if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
337 + IAVF_DUMP_RX_DESC(rxq, rxdp, rx_id);
339 + nmb = rte_mbuf_raw_alloc(rxq->mp);
340 + if (unlikely(!nmb)) {
341 + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u "
342 + "queue_id=%u", rxq->port_id, rxq->queue_id);
343 + dev = &rte_eth_devices[rxq->port_id];
344 + dev->data->rx_mbuf_alloc_failed++;
350 + rxe = rxq->sw_ring[rx_id];
352 + if (rx_id == rxq->nb_rx_desc)
355 + /* Prefetch next mbuf */
356 + rte_prefetch0(rxq->sw_ring[rx_id]);
358 + /* When next RX descriptor is on a cache line boundary,
359 + * prefetch the next 4 RX descriptors and next 8 pointers
362 + if ((rx_id & 0x3) == 0) {
363 + rte_prefetch0(&rx_ring[rx_id]);
364 + rte_prefetch0(rxq->sw_ring[rx_id]);
370 + rte_cpu_to_le_64(rte_mbuf_data_iova_default(nmb));
372 + /* Set data buffer address and data length of the mbuf */
373 + rxdp->read.hdr_addr = 0;
374 + rxdp->read.pkt_addr = dma_addr;
375 + rx_packet_len = rte_le_to_cpu_16(rxd.wb.pkt_len) &
376 + IAVF_RX_FLX_DESC_PKT_LEN_M;
377 + rxm->data_len = rx_packet_len;
378 + rxm->data_off = RTE_PKTMBUF_HEADROOM;
380 + /* If this is the first buffer of the received packet, set the
381 + * pointer to the first mbuf of the packet and initialize its
382 + * context. Otherwise, update the total length and the number
383 + * of segments of the current scattered packet, and update the
384 + * pointer to the last mbuf of the current packet.
388 + first_seg->nb_segs = 1;
389 + first_seg->pkt_len = rx_packet_len;
391 + first_seg->pkt_len =
392 + (uint16_t)(first_seg->pkt_len +
394 + first_seg->nb_segs++;
395 + last_seg->next = rxm;
398 + /* If this is not the last buffer of the received packet,
399 + * update the pointer to the last mbuf of the current scattered
400 + * packet and continue to parse the RX ring.
402 + if (!(rx_stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_EOF_S))) {
407 + /* This is the last buffer of the received packet. If the CRC
408 + * is not stripped by the hardware:
409 + * - Subtract the CRC length from the total packet length.
410 + * - If the last buffer only contains the whole CRC or a part
411 + * of it, free the mbuf associated to the last buffer. If part
412 + * of the CRC is also contained in the previous mbuf, subtract
413 + * the length of that CRC part from the data length of the
417 + if (unlikely(rxq->crc_len > 0)) {
418 + first_seg->pkt_len -= RTE_ETHER_CRC_LEN;
419 + if (rx_packet_len <= RTE_ETHER_CRC_LEN) {
420 + rte_pktmbuf_free_seg(rxm);
421 + first_seg->nb_segs--;
422 + last_seg->data_len =
423 + (uint16_t)(last_seg->data_len -
424 + (RTE_ETHER_CRC_LEN - rx_packet_len));
425 + last_seg->next = NULL;
427 + rxm->data_len = (uint16_t)(rx_packet_len -
428 + RTE_ETHER_CRC_LEN);
432 + first_seg->port = rxq->port_id;
433 + first_seg->ol_flags = 0;
434 + first_seg->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
435 + rte_le_to_cpu_16(rxd.wb.ptype_flex_flags0)];
436 + iavf_flex_rxd_to_vlan_tci(first_seg, &rxd);
437 + iavf_rxd_to_pkt_fields(first_seg, &rxd);
438 + pkt_flags = iavf_flex_rxd_error_to_pkt_flags(rx_stat_err0);
440 + first_seg->ol_flags |= pkt_flags;
442 + /* Prefetch data of first segment, if configured to do so. */
443 + rte_prefetch0(RTE_PTR_ADD(first_seg->buf_addr,
444 + first_seg->data_off));
445 + rx_pkts[nb_rx++] = first_seg;
449 + /* Record index of the next RX descriptor to probe. */
450 + rxq->rx_tail = rx_id;
451 + rxq->pkt_first_seg = first_seg;
452 + rxq->pkt_last_seg = last_seg;
454 + iavf_update_rx_tail(rxq, nb_hold, rx_id);
458 @@ -1027,30 +1363,88 @@ iavf_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
459 rxq->pkt_first_seg = first_seg;
460 rxq->pkt_last_seg = last_seg;
462 - /* If the number of free RX descriptors is greater than the RX free
463 - * threshold of the queue, advance the Receive Descriptor Tail (RDT)
464 - * register. Update the RDT with the value of the last processed RX
465 - * descriptor minus 1, to guarantee that the RDT register is never
466 - * equal to the RDH register, which creates a "full" ring situtation
467 - * from the hardware point of view.
468 + iavf_update_rx_tail(rxq, nb_hold, rx_id);
473 +#define IAVF_LOOK_AHEAD 8
475 +iavf_rx_scan_hw_ring_flex_rxd(struct iavf_rx_queue *rxq)
477 + volatile union iavf_rx_flex_desc *rxdp;
478 + struct rte_mbuf **rxep;
479 + struct rte_mbuf *mb;
480 + uint16_t stat_err0;
482 + int32_t s[IAVF_LOOK_AHEAD], nb_dd;
483 + int32_t i, j, nb_rx = 0;
484 + uint64_t pkt_flags;
485 + const uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
487 + rxdp = (volatile union iavf_rx_flex_desc *)&rxq->rx_ring[rxq->rx_tail];
488 + rxep = &rxq->sw_ring[rxq->rx_tail];
490 + stat_err0 = rte_le_to_cpu_16(rxdp->wb.status_error0);
492 + /* Make sure there is at least 1 packet to receive */
493 + if (!(stat_err0 & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S)))
496 + /* Scan LOOK_AHEAD descriptors at a time to determine which
497 + * descriptors reference packets that are ready to be received.
499 - nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold);
500 - if (nb_hold > rxq->rx_free_thresh) {
501 - PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u "
502 - "nb_hold=%u nb_rx=%u",
503 - rxq->port_id, rxq->queue_id,
504 - rx_id, nb_hold, nb_rx);
505 - rx_id = (uint16_t)(rx_id == 0 ?
506 - (rxq->nb_rx_desc - 1) : (rx_id - 1));
507 - IAVF_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
509 + for (i = 0; i < IAVF_RX_MAX_BURST; i += IAVF_LOOK_AHEAD,
510 + rxdp += IAVF_LOOK_AHEAD, rxep += IAVF_LOOK_AHEAD) {
511 + /* Read desc statuses backwards to avoid race condition */
512 + for (j = IAVF_LOOK_AHEAD - 1; j >= 0; j--)
513 + s[j] = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
517 + /* Compute how many status bits were set */
518 + for (j = 0, nb_dd = 0; j < IAVF_LOOK_AHEAD; j++)
519 + nb_dd += s[j] & (1 << IAVF_RX_FLEX_DESC_STATUS0_DD_S);
523 + /* Translate descriptor info to mbuf parameters */
524 + for (j = 0; j < nb_dd; j++) {
525 + IAVF_DUMP_RX_DESC(rxq, &rxdp[j],
527 + i * IAVF_LOOK_AHEAD + j);
530 + pkt_len = (rte_le_to_cpu_16(rxdp[j].wb.pkt_len) &
531 + IAVF_RX_FLX_DESC_PKT_LEN_M) - rxq->crc_len;
532 + mb->data_len = pkt_len;
533 + mb->pkt_len = pkt_len;
536 + mb->packet_type = ptype_tbl[IAVF_RX_FLEX_DESC_PTYPE_M &
537 + rte_le_to_cpu_16(rxdp[j].wb.ptype_flex_flags0)];
538 + iavf_flex_rxd_to_vlan_tci(mb, &rxdp[j]);
539 + iavf_rxd_to_pkt_fields(mb, &rxdp[j]);
540 + stat_err0 = rte_le_to_cpu_16(rxdp[j].wb.status_error0);
541 + pkt_flags = iavf_flex_rxd_error_to_pkt_flags(stat_err0);
543 + mb->ol_flags |= pkt_flags;
546 + for (j = 0; j < IAVF_LOOK_AHEAD; j++)
547 + rxq->rx_stage[i + j] = rxep[j];
549 + if (nb_dd != IAVF_LOOK_AHEAD)
552 - rxq->nb_rx_hold = nb_hold;
554 + /* Clear software ring entries */
555 + for (i = 0; i < nb_rx; i++)
556 + rxq->sw_ring[rxq->rx_tail + i] = NULL;
561 -#define IAVF_LOOK_AHEAD 8
563 iavf_rx_scan_hw_ring(struct iavf_rx_queue *rxq)
565 @@ -1219,7 +1613,10 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
566 if (rxq->rx_nb_avail)
567 return iavf_rx_fill_from_stage(rxq, rx_pkts, nb_pkts);
569 - nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
570 + if (rxq->rxdid == IAVF_RXDID_COMMS_OVS_1)
571 + nb_rx = (uint16_t)iavf_rx_scan_hw_ring_flex_rxd(rxq);
573 + nb_rx = (uint16_t)iavf_rx_scan_hw_ring(rxq);
574 rxq->rx_next_avail = 0;
575 rxq->rx_nb_avail = nb_rx;
576 rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_rx);
577 @@ -1663,6 +2060,7 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
579 struct iavf_adapter *adapter =
580 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
581 + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(dev->data->dev_private);
583 struct iavf_rx_queue *rxq;
585 @@ -1702,7 +2100,10 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
586 if (dev->data->scattered_rx) {
587 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
589 - dev->rx_pkt_burst = iavf_recv_scattered_pkts;
590 + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
591 + dev->rx_pkt_burst = iavf_recv_scattered_pkts_flex_rxd;
593 + dev->rx_pkt_burst = iavf_recv_scattered_pkts;
594 } else if (adapter->rx_bulk_alloc_allowed) {
595 PMD_DRV_LOG(DEBUG, "Using bulk Rx callback (port=%d).",
597 @@ -1710,7 +2111,10 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
599 PMD_DRV_LOG(DEBUG, "Using Basic Rx callback (port=%d).",
601 - dev->rx_pkt_burst = iavf_recv_pkts;
602 + if (vf->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC)
603 + dev->rx_pkt_burst = iavf_recv_pkts_flex_rxd;
605 + dev->rx_pkt_burst = iavf_recv_pkts;
609 @@ -1797,6 +2201,7 @@ iavf_dev_rxq_count(struct rte_eth_dev *dev, uint16_t queue_id)
611 rxq = dev->data->rx_queues[queue_id];
612 rxdp = &rxq->rx_ring[rxq->rx_tail];
614 while ((desc < rxq->nb_rx_desc) &&
615 ((rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) &
616 IAVF_RXD_QW1_STATUS_MASK) >> IAVF_RXD_QW1_STATUS_SHIFT) &
617 diff --git a/drivers/net/iavf/iavf_rxtx.h b/drivers/net/iavf/iavf_rxtx.h
618 index 5e309631e..f33d1df41 100644
619 --- a/drivers/net/iavf/iavf_rxtx.h
620 +++ b/drivers/net/iavf/iavf_rxtx.h
622 #define iavf_rx_desc iavf_16byte_rx_desc
624 #define iavf_rx_desc iavf_32byte_rx_desc
625 +#define iavf_rx_flex_desc iavf_32b_rx_flex_desc
628 struct iavf_rxq_ops {
629 @@ -87,6 +88,7 @@ struct iavf_rx_queue {
630 struct rte_mbuf *pkt_first_seg; /* first segment of current packet */
631 struct rte_mbuf *pkt_last_seg; /* last segment of current packet */
632 struct rte_mbuf fake_mbuf; /* dummy mbuf */
636 uint16_t rxrearm_nb; /* number of remaining to be re-armed */
637 @@ -379,9 +381,15 @@ void iavf_dev_tx_queue_release(void *txq);
638 void iavf_stop_queues(struct rte_eth_dev *dev);
639 uint16_t iavf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
641 +uint16_t iavf_recv_pkts_flex_rxd(void *rx_queue,
642 + struct rte_mbuf **rx_pkts,
644 uint16_t iavf_recv_scattered_pkts(void *rx_queue,
645 struct rte_mbuf **rx_pkts,
647 +uint16_t iavf_recv_scattered_pkts_flex_rxd(void *rx_queue,
648 + struct rte_mbuf **rx_pkts,
650 uint16_t iavf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
652 uint16_t iavf_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
653 diff --git a/drivers/net/iavf/iavf_vchnl.c b/drivers/net/iavf/iavf_vchnl.c
654 index b7fb05d32..3f0d23a92 100644
655 --- a/drivers/net/iavf/iavf_vchnl.c
656 +++ b/drivers/net/iavf/iavf_vchnl.c
657 @@ -88,6 +88,7 @@ iavf_execute_vf_cmd(struct iavf_adapter *adapter, struct iavf_cmd_info *args)
659 case VIRTCHNL_OP_VERSION:
660 case VIRTCHNL_OP_GET_VF_RESOURCES:
661 + case VIRTCHNL_OP_GET_SUPPORTED_RXDIDS:
662 /* for init virtchnl ops, need to poll the response */
664 ret = iavf_read_msg_from_pf(adapter, args->out_size,
665 @@ -338,7 +339,8 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
666 * add advanced/optional offload capabilities
669 - caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED;
670 + caps = IAVF_BASIC_OFFLOAD_CAPS | VIRTCHNL_VF_CAP_ADV_LINK_SPEED |
671 + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC;
673 args.in_args = (uint8_t *)∩︀
674 args.in_args_size = sizeof(caps);
675 @@ -375,6 +377,32 @@ iavf_get_vf_resource(struct iavf_adapter *adapter)
680 +iavf_get_supported_rxdid(struct iavf_adapter *adapter)
682 + struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(adapter);
683 + struct iavf_cmd_info args;
686 + args.ops = VIRTCHNL_OP_GET_SUPPORTED_RXDIDS;
687 + args.in_args = NULL;
688 + args.in_args_size = 0;
689 + args.out_buffer = vf->aq_resp;
690 + args.out_size = IAVF_AQ_BUF_SZ;
692 + ret = iavf_execute_vf_cmd(adapter, &args);
695 + "Failed to execute command of OP_GET_SUPPORTED_RXDIDS");
699 + vf->supported_rxdid =
700 + ((struct virtchnl_supported_rxdids *)args.out_buffer)->supported_rxdids;
706 iavf_enable_queues(struct iavf_adapter *adapter)
708 @@ -567,6 +595,18 @@ iavf_configure_queues(struct iavf_adapter *adapter)
709 vc_qp->rxq.ring_len = rxq[i]->nb_rx_desc;
710 vc_qp->rxq.dma_ring_addr = rxq[i]->rx_ring_phys_addr;
711 vc_qp->rxq.databuffer_size = rxq[i]->rx_buf_len;
713 + if (vf->vf_res->vf_cap_flags &
714 + VIRTCHNL_VF_OFFLOAD_RX_FLEX_DESC &&
715 + vf->supported_rxdid & BIT(IAVF_RXDID_COMMS_OVS_1)) {
716 + vc_qp->rxq.rxdid = IAVF_RXDID_COMMS_OVS_1;
717 + PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
718 + "Queue[%d]", vc_qp->rxq.rxdid, i);
720 + vc_qp->rxq.rxdid = IAVF_RXDID_LEGACY_1;
721 + PMD_DRV_LOG(NOTICE, "request RXDID == %d in "
722 + "Queue[%d]", vc_qp->rxq.rxdid, i);