dpdk: fix arm iavf rx vector path on 22.03
[vpp.git] / build / external / patches / dpdk_22.03 / 0001-net-iavf-add-basic-neon-rx.patch
1 From 0581aae1d59fb28ed7ddf8b8dd3700df1b95e051 Mon Sep 17 00:00:00 2001
2 From: Kathleen Capella <kathleen.capella@arm.com>
3 Date: Fri, 17 Jun 2022 18:21:34 +0000
4 Subject: [PATCH] net/iavf: add basic NEON Rx
5
6 This patch adds the basic NEON Rx path to the iavf driver. It does not
7 include scatter or flex varieties.
8
9 Tested on N1SDP platform with Intel XL710 NIC and 40G connection.
10 Tested with a single core and testpmd rxonly mode. Saw no significant
11 performance difference between scalar and Arm vPMD paths using this test
12 in iavf and saw the same results when comparing scalar and Arm vPMD
13 path in i40e.
14
15 Signed-off-by: Kathleen Capella <kathleen.capella@arm.com>
16 Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
17 Reviewed-by: Qi Zhang <qi.z.zhang@intel.com>
18 ---
19  drivers/net/iavf/iavf_rxtx.c          |  14 +
20  drivers/net/iavf/iavf_rxtx_vec_neon.c | 415 ++++++++++++++++++++++++++
21  drivers/net/iavf/meson.build          |   2 +
22  3 files changed, 431 insertions(+)
23  create mode 100644 drivers/net/iavf/iavf_rxtx_vec_neon.c
24
25 diff --git a/drivers/net/iavf/iavf_rxtx.c b/drivers/net/iavf/iavf_rxtx.c
26 index 14d4dbe9670..109ba756f83 100644
27 --- a/drivers/net/iavf/iavf_rxtx.c
28 +++ b/drivers/net/iavf/iavf_rxtx.c
29 @@ -3059,7 +3059,23 @@ iavf_set_rx_function(struct rte_eth_dev *dev)
30  
31                 return;
32         }
33 +#elif defined RTE_ARCH_ARM
34 +       int check_ret;
35 +       int i;
36 +       struct iavf_rx_queue *rxq;
37  
38 +       check_ret = iavf_rx_vec_dev_check(dev);
39 +       if (check_ret >= 0 &&
40 +           rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128) {
41 +               PMD_DRV_LOG(DEBUG, "Using a Vector Rx callback (port=%d).",
42 +                           dev->data->port_id);
43 +               for (i = 0; i < dev->data->nb_rx_queues; i++) {
44 +                       rxq = dev->data->rx_queues[i];
45 +                       (void)iavf_rxq_vec_setup(rxq);
46 +               }
47 +               dev->rx_pkt_burst = iavf_recv_pkts_vec;
48 +               return;
49 +       }
50  #endif
51         if (dev->data->scattered_rx) {
52                 PMD_DRV_LOG(DEBUG, "Using a Scattered Rx callback (port=%d).",
53 diff --git a/drivers/net/iavf/iavf_rxtx_vec_neon.c b/drivers/net/iavf/iavf_rxtx_vec_neon.c
54 new file mode 100644
55 index 00000000000..83825aa427a
56 --- /dev/null
57 +++ b/drivers/net/iavf/iavf_rxtx_vec_neon.c
58 @@ -0,0 +1,415 @@
59 +/* SPDX-License-Identifier: BSD-3-Clause
60 + * Copyright(c) 2022 Intel Corporation
61 + * Copyright(c) 2022 Arm Limited
62 + */
63 +
64 +#include <stdint.h>
65 +#include <ethdev_driver.h>
66 +#include <rte_malloc.h>
67 +#include <rte_vect.h>
68 +
69 +#include "iavf.h"
70 +#include "iavf_rxtx.h"
71 +#include "iavf_rxtx_vec_common.h"
72 +
73 +static inline void
74 +iavf_rxq_rearm(struct iavf_rx_queue *rxq)
75 +{
76 +       int i;
77 +       uint16_t rx_id;
78 +       volatile union iavf_rx_desc *rxdp;
79 +       struct rte_mbuf **rxep = &rxq->sw_ring[rxq->rxrearm_start];
80 +       struct rte_mbuf *mb0, *mb1;
81 +       uint64x2_t dma_addr0, dma_addr1;
82 +       uint64x2_t zero = vdupq_n_u64(0);
83 +       uint64_t paddr;
84 +
85 +       rxdp = rxq->rx_ring + rxq->rxrearm_start;
86 +
87 +       /* Pull 'n' more MBUFs into the software ring */
88 +       if (unlikely(rte_mempool_get_bulk(rxq->mp,
89 +                                         (void *)rxep,
90 +                                         IAVF_RXQ_REARM_THRESH) < 0)) {
91 +               if (rxq->rxrearm_nb + IAVF_RXQ_REARM_THRESH >=
92 +                   rxq->nb_rx_desc) {
93 +                       for (i = 0; i < IAVF_VPMD_DESCS_PER_LOOP; i++) {
94 +                               rxep[i] = &rxq->fake_mbuf;
95 +                               vst1q_u64((uint64_t *)&rxdp[i].read, zero);
96 +                       }
97 +               }
98 +               rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
99 +                       IAVF_RXQ_REARM_THRESH;
100 +               return;
101 +       }
102 +
103 +       /* Initialize the mbufs in vector, process 2 mbufs in one loop */
104 +       for (i = 0; i < IAVF_RXQ_REARM_THRESH; i += 2, rxep += 2) {
105 +               mb0 = rxep[0];
106 +               mb1 = rxep[1];
107 +
108 +               paddr = mb0->buf_iova + RTE_PKTMBUF_HEADROOM;
109 +               dma_addr0 = vdupq_n_u64(paddr);
110 +
111 +               /* flush desc with pa dma_addr */
112 +               vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0);
113 +
114 +               paddr = mb1->buf_iova + RTE_PKTMBUF_HEADROOM;
115 +               dma_addr1 = vdupq_n_u64(paddr);
116 +               vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1);
117 +       }
118 +
119 +       rxq->rxrearm_start += IAVF_RXQ_REARM_THRESH;
120 +       if (rxq->rxrearm_start >= rxq->nb_rx_desc)
121 +               rxq->rxrearm_start = 0;
122 +
123 +       rxq->rxrearm_nb -= IAVF_RXQ_REARM_THRESH;
124 +
125 +       rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
126 +                            (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
127 +
128 +       rte_io_wmb();
129 +       /* Update the tail pointer on the NIC */
130 +       IAVF_PCI_REG_WRITE_RELAXED(rxq->qrx_tail, rx_id);
131 +}
132 +
133 +static inline void
134 +desc_to_olflags_v(struct iavf_rx_queue *rxq, volatile union iavf_rx_desc *rxdp,
135 +                 uint64x2_t descs[4], struct rte_mbuf **rx_pkts)
136 +{
137 +       RTE_SET_USED(rxdp);
138 +
139 +       uint32x4_t vlan0, vlan1, rss, l3_l4e;
140 +       const uint64x2_t mbuf_init = {rxq->mbuf_initializer, 0};
141 +       uint64x2_t rearm0, rearm1, rearm2, rearm3;
142 +
143 +       /* mask everything except RSS, flow director and VLAN flags
144 +        * bit2 is for VLAN tag, bit11 for flow director indication
145 +        * bit13:12 for RSS indication.
146 +        */
147 +       const uint32x4_t rss_vlan_msk = {
148 +                       0x1c03804, 0x1c03804, 0x1c03804, 0x1c03804};
149 +
150 +       const uint32x4_t cksum_mask = {
151 +                       RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
152 +                       RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
153 +                       RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
154 +                       RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
155 +                       RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
156 +                       RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
157 +                       RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
158 +                       RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
159 +                       RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD,
160 +                       RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_IP_CKSUM_BAD |
161 +                       RTE_MBUF_F_RX_L4_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
162 +                       RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD};
163 +
164 +       /* map rss and vlan type to rss hash and vlan flag */
165 +       const uint8x16_t vlan_flags = {
166 +                       0, 0, 0, 0,
167 +                       RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED, 0, 0, 0,
168 +                       0, 0, 0, 0,
169 +                       0, 0, 0, 0};
170 +
171 +       const uint8x16_t rss_flags = {
172 +                       0, RTE_MBUF_F_RX_FDIR, 0, 0,
173 +                       0, 0, RTE_MBUF_F_RX_RSS_HASH, RTE_MBUF_F_RX_RSS_HASH | RTE_MBUF_F_RX_FDIR,
174 +                       0, 0, 0, 0,
175 +                       0, 0, 0, 0};
176 +
177 +       const uint8x16_t l3_l4e_flags = {
178 +                       (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD) >> 1,
179 +                       RTE_MBUF_F_RX_IP_CKSUM_BAD >> 1,
180 +                       (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
181 +                       (RTE_MBUF_F_RX_L4_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
182 +                       (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD) >> 1,
183 +                       (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
184 +                       (RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD |
185 +                        RTE_MBUF_F_RX_L4_CKSUM_BAD) >> 1,
186 +                       (RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD |
187 +                        RTE_MBUF_F_RX_IP_CKSUM_BAD) >> 1,
188 +                       0, 0, 0, 0, 0, 0, 0, 0};
189 +
190 +       vlan0 = vzipq_u32(vreinterpretq_u32_u64(descs[0]),
191 +                         vreinterpretq_u32_u64(descs[2])).val[1];
192 +       vlan1 = vzipq_u32(vreinterpretq_u32_u64(descs[1]),
193 +                         vreinterpretq_u32_u64(descs[3])).val[1];
194 +       vlan0 = vzipq_u32(vlan0, vlan1).val[0];
195 +
196 +       vlan1 = vandq_u32(vlan0, rss_vlan_msk);
197 +       vlan0 = vreinterpretq_u32_u8(vqtbl1q_u8(vlan_flags,
198 +                                               vreinterpretq_u8_u32(vlan1)));
199 +
200 +       const uint32x4_t desc_fltstat = vshrq_n_u32(vlan1, 11);
201 +       rss = vreinterpretq_u32_u8(vqtbl1q_u8(rss_flags,
202 +                                             vreinterpretq_u8_u32(desc_fltstat)));
203 +
204 +       l3_l4e = vshrq_n_u32(vlan1, 22);
205 +       l3_l4e = vreinterpretq_u32_u8(vqtbl1q_u8(l3_l4e_flags,
206 +                                             vreinterpretq_u8_u32(l3_l4e)));
207 +       /* then we shift left 1 bit */
208 +       l3_l4e = vshlq_n_u32(l3_l4e, 1);
209 +       /* we need to mask out the redundant bits */
210 +       l3_l4e = vandq_u32(l3_l4e, cksum_mask);
211 +
212 +       vlan0 = vorrq_u32(vlan0, rss);
213 +       vlan0 = vorrq_u32(vlan0, l3_l4e);
214 +
215 +       rearm0 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 0), mbuf_init, 1);
216 +       rearm1 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 1), mbuf_init, 1);
217 +       rearm2 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 2), mbuf_init, 1);
218 +       rearm3 = vsetq_lane_u64(vgetq_lane_u32(vlan0, 3), mbuf_init, 1);
219 +
220 +       vst1q_u64((uint64_t *)&rx_pkts[0]->rearm_data, rearm0);
221 +       vst1q_u64((uint64_t *)&rx_pkts[1]->rearm_data, rearm1);
222 +       vst1q_u64((uint64_t *)&rx_pkts[2]->rearm_data, rearm2);
223 +       vst1q_u64((uint64_t *)&rx_pkts[3]->rearm_data, rearm3);
224 +}
225 +
226 +#define PKTLEN_SHIFT     10
227 +#define IAVF_UINT16_BIT (CHAR_BIT * sizeof(uint16_t))
228 +
229 +static inline void
230 +desc_to_ptype_v(uint64x2_t descs[4], struct rte_mbuf **__rte_restrict rx_pkts,
231 +               uint32_t *__rte_restrict ptype_tbl)
232 +{
233 +       int i;
234 +       uint8_t ptype;
235 +       uint8x16_t tmp;
236 +
237 +       for (i = 0; i < 4; i++) {
238 +               tmp = vreinterpretq_u8_u64(vshrq_n_u64(descs[i], 30));
239 +               ptype = vgetq_lane_u8(tmp, 8);
240 +               rx_pkts[i]->packet_type = ptype_tbl[ptype];
241 +       }
242 +}
243 +
244 +/**
245 + * vPMD raw receive routine, only accept(nb_pkts >= IAVF_VPMD_DESCS_PER_LOOP)
246 + *
247 + * Notice:
248 + * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
249 + * - floor align nb_pkts to a IAVF_VPMD_DESCS_PER_LOOP power-of-two
250 + */
251 +static inline uint16_t
252 +_recv_raw_pkts_vec(struct iavf_rx_queue *__rte_restrict rxq,
253 +                  struct rte_mbuf **__rte_restrict rx_pkts,
254 +                  uint16_t nb_pkts, uint8_t *split_packet)
255 +{
256 +       RTE_SET_USED(split_packet);
257 +
258 +       volatile union iavf_rx_desc *rxdp;
259 +       struct rte_mbuf **sw_ring;
260 +       uint16_t nb_pkts_recd;
261 +       int pos;
262 +       uint32_t *ptype_tbl = rxq->vsi->adapter->ptype_tbl;
263 +
264 +       /* mask to shuffle from desc. to mbuf */
265 +       uint8x16_t shuf_msk = {
266 +               0xFF, 0xFF,   /* pkt_type set as unknown */
267 +               0xFF, 0xFF,   /* pkt_type set as unknown */
268 +               14, 15,       /* octet 15~14, low 16 bits pkt_len */
269 +               0xFF, 0xFF,   /* skip high 16 bits pkt_len, zero out */
270 +               14, 15,       /* octet 15~14, 16 bits data_len */
271 +               2, 3,         /* octet 2~3, low 16 bits vlan_macip */
272 +               4, 5, 6, 7    /* octet 4~7, 32bits rss */
273 +               };
274 +
275 +       uint16x8_t crc_adjust = {
276 +               0, 0,         /* ignore pkt_type field */
277 +               rxq->crc_len, /* sub crc on pkt_len */
278 +               0,            /* ignore high-16bits of pkt_len */
279 +               rxq->crc_len, /* sub crc on data_len */
280 +               0, 0, 0       /* ignore non-length fields */
281 +               };
282 +       /* nb_pkts has to be floor-aligned to IAVF_VPMD_DESCS_PER_LOOP */
283 +       nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, IAVF_VPMD_DESCS_PER_LOOP);
284 +
285 +       rxdp = rxq->rx_ring + rxq->rx_tail;
286 +
287 +       rte_prefetch_non_temporal(rxdp);
288 +
289 +       /* See if we need to rearm the RX queue - gives the prefetch a bit
290 +        * of time to act
291 +        */
292 +       if (rxq->rxrearm_nb > IAVF_RXQ_REARM_THRESH)
293 +               iavf_rxq_rearm(rxq);
294 +
295 +       /* Before we start moving massive data around, check to see if
296 +        * there is actually a packet available
297 +        */
298 +       if (!(rxdp->wb.qword1.status_error_len &
299 +                       rte_cpu_to_le_32(1 << IAVF_RX_DESC_STATUS_DD_SHIFT)))
300 +               return 0;
301 +
302 +       /* Cache is empty -> need to scan the buffer rings, but first move
303 +        * the next 'n' mbufs into the cache
304 +        */
305 +       sw_ring = &rxq->sw_ring[rxq->rx_tail];
306 +       /* A. load 4 packet in one loop
307 +        * [A*. mask out 4 unused dirty field in desc]
308 +        * B. copy 4 mbuf point from swring to rx_pkts
309 +        * C. calc the number of DD bits among the 4 packets
310 +        * [C*. extract the end-of-packet bit, if requested]
311 +        * D. fill info. from desc to mbuf
312 +        */
313 +
314 +       for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
315 +                       pos += IAVF_VPMD_DESCS_PER_LOOP,
316 +                       rxdp += IAVF_VPMD_DESCS_PER_LOOP) {
317 +               uint64x2_t descs[IAVF_VPMD_DESCS_PER_LOOP];
318 +               uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
319 +               uint16x8x2_t sterr_tmp1, sterr_tmp2;
320 +               uint64x2_t mbp1, mbp2;
321 +               uint16x8_t staterr;
322 +               uint16x8_t tmp;
323 +               uint64_t stat;
324 +
325 +               int32x4_t len_shl = {0, 0, 0, PKTLEN_SHIFT};
326 +
327 +               /* A.1 load desc[3-0] */
328 +               descs[3] =  vld1q_u64((uint64_t *)(rxdp + 3));
329 +               descs[2] =  vld1q_u64((uint64_t *)(rxdp + 2));
330 +               descs[1] =  vld1q_u64((uint64_t *)(rxdp + 1));
331 +               descs[0] =  vld1q_u64((uint64_t *)(rxdp));
332 +
333 +               /* Use acquire fence to order loads of descriptor qwords */
334 +               rte_atomic_thread_fence(__ATOMIC_ACQUIRE);
335 +               /* A.2 reload qword0 to make it ordered after qword1 load */
336 +               descs[3] = vld1q_lane_u64((uint64_t *)(rxdp + 3), descs[3], 0);
337 +               descs[2] = vld1q_lane_u64((uint64_t *)(rxdp + 2), descs[2], 0);
338 +               descs[1] = vld1q_lane_u64((uint64_t *)(rxdp + 1), descs[1], 0);
339 +               descs[0] = vld1q_lane_u64((uint64_t *)(rxdp), descs[0], 0);
340 +
341 +               /* B.1 load 4 mbuf point */
342 +               mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]);
343 +               mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]);
344 +
345 +               /* B.2 copy 4 mbuf point into rx_pkts  */
346 +               vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1);
347 +               vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2);
348 +
349 +               /* pkts shift the pktlen field to be 16-bit aligned*/
350 +               uint32x4_t len3 = vshlq_u32(vreinterpretq_u32_u64(descs[3]),
351 +                                           len_shl);
352 +               descs[3] = vreinterpretq_u64_u16(vsetq_lane_u16
353 +                               (vgetq_lane_u16(vreinterpretq_u16_u32(len3), 7),
354 +                                vreinterpretq_u16_u64(descs[3]),
355 +                                7));
356 +               uint32x4_t len2 = vshlq_u32(vreinterpretq_u32_u64(descs[2]),
357 +                                           len_shl);
358 +               descs[2] = vreinterpretq_u64_u16(vsetq_lane_u16
359 +                               (vgetq_lane_u16(vreinterpretq_u16_u32(len2), 7),
360 +                                vreinterpretq_u16_u64(descs[2]),
361 +                                7));
362 +               uint32x4_t len1 = vshlq_u32(vreinterpretq_u32_u64(descs[1]),
363 +                                           len_shl);
364 +               descs[1] = vreinterpretq_u64_u16(vsetq_lane_u16
365 +                               (vgetq_lane_u16(vreinterpretq_u16_u32(len1), 7),
366 +                                vreinterpretq_u16_u64(descs[1]),
367 +                                7));
368 +               uint32x4_t len0 = vshlq_u32(vreinterpretq_u32_u64(descs[0]),
369 +                                           len_shl);
370 +               descs[0] = vreinterpretq_u64_u16(vsetq_lane_u16
371 +                               (vgetq_lane_u16(vreinterpretq_u16_u32(len0), 7),
372 +                                vreinterpretq_u16_u64(descs[0]),
373 +                                7));
374 +               desc_to_olflags_v(rxq, rxdp, descs, &rx_pkts[pos]);
375 +
376 +               /* D.1 pkts convert format from desc to pktmbuf */
377 +               pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk);
378 +               pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk);
379 +               pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk);
380 +               pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk);
381 +
382 +               /* D.2 pkts set in_port/nb_seg and remove crc */
383 +               tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust);
384 +               pkt_mb4 = vreinterpretq_u8_u16(tmp);
385 +               tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust);
386 +               pkt_mb3 = vreinterpretq_u8_u16(tmp);
387 +               tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust);
388 +               pkt_mb2 = vreinterpretq_u8_u16(tmp);
389 +               tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust);
390 +               pkt_mb1 = vreinterpretq_u8_u16(tmp);
391 +
392 +               /* D.3 copy final data to rx_pkts */
393 +               vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1,
394 +                               pkt_mb4);
395 +               vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1,
396 +                               pkt_mb3);
397 +               vst1q_u8((void *)&rx_pkts[pos + 1]->rx_descriptor_fields1,
398 +                               pkt_mb2);
399 +               vst1q_u8((void *)&rx_pkts[pos]->rx_descriptor_fields1,
400 +                               pkt_mb1);
401 +
402 +               desc_to_ptype_v(descs, &rx_pkts[pos], ptype_tbl);
403 +
404 +               if (likely(pos + IAVF_VPMD_DESCS_PER_LOOP < nb_pkts))
405 +                       rte_prefetch_non_temporal(rxdp + IAVF_VPMD_DESCS_PER_LOOP);
406 +
407 +               /* C.1 4=>2 filter staterr info only */
408 +               sterr_tmp2 = vzipq_u16(vreinterpretq_u16_u64(descs[1]),
409 +                                      vreinterpretq_u16_u64(descs[3]));
410 +               sterr_tmp1 = vzipq_u16(vreinterpretq_u16_u64(descs[0]),
411 +                                      vreinterpretq_u16_u64(descs[2]));
412 +
413 +               /* C.2 get 4 pkts staterr value  */
414 +               staterr = vzipq_u16(sterr_tmp1.val[1],
415 +                                   sterr_tmp2.val[1]).val[0];
416 +
417 +               staterr = vshlq_n_u16(staterr, IAVF_UINT16_BIT - 1);
418 +               staterr = vreinterpretq_u16_s16(
419 +                               vshrq_n_s16(vreinterpretq_s16_u16(staterr),
420 +                                           IAVF_UINT16_BIT - 1));
421 +               stat = ~vgetq_lane_u64(vreinterpretq_u64_u16(staterr), 0);
422 +
423 +               /* C.4 calc available number of desc */
424 +               if (unlikely(stat == 0)) {
425 +                       nb_pkts_recd += IAVF_VPMD_DESCS_PER_LOOP;
426 +               } else {
427 +                       nb_pkts_recd += __builtin_ctzl(stat) / IAVF_UINT16_BIT;
428 +                       break;
429 +               }
430 +       }
431 +
432 +       /* Update our internal tail pointer */
433 +       rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
434 +       rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
435 +       rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
436 +       return nb_pkts_recd;
437 +}
438 +
439 +/*
440 + * Notice:
441 + * - nb_pkts < IAVF_VPMD_DESCS_PER_LOOP, just return no packet
442 + * - nb_pkts > IAVF_VPMD_RX_BURST, only scan IAVF_VPMD_RX_BURST
443 + *   numbers of DD bits
444 + */
445 +uint16_t
446 +iavf_recv_pkts_vec(void *__rte_restrict rx_queue,
447 +               struct rte_mbuf **__rte_restrict rx_pkts, uint16_t nb_pkts)
448 +{
449 +       return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
450 +}
451 +
452 +static void __rte_cold
453 +iavf_rx_queue_release_mbufs_neon(struct iavf_rx_queue *rxq)
454 +{
455 +       _iavf_rx_queue_release_mbufs_vec(rxq);
456 +}
457 +
458 +static const struct iavf_rxq_ops neon_vec_rxq_ops = {
459 +       .release_mbufs = iavf_rx_queue_release_mbufs_neon,
460 +};
461 +
462 +int __rte_cold
463 +iavf_rxq_vec_setup(struct iavf_rx_queue *rxq)
464 +{
465 +       rxq->ops = &neon_vec_rxq_ops;
466 +       return iavf_rxq_vec_setup_default(rxq);
467 +}
468 +
469 +int __rte_cold
470 +iavf_rx_vec_dev_check(struct rte_eth_dev *dev)
471 +{
472 +       return iavf_rx_vec_dev_check_default(dev);
473 +}
474 diff --git a/drivers/net/iavf/meson.build b/drivers/net/iavf/meson.build
475 index 5eb230f6870..2da37de6629 100644
476 --- a/drivers/net/iavf/meson.build
477 +++ b/drivers/net/iavf/meson.build
478 @@ -65,6 +65,8 @@ if arch_subdir == 'x86'
479                  c_args: avx512_args)
480          objs += iavf_avx512_lib.extract_objects('iavf_rxtx_vec_avx512.c')
481      endif
482 +elif arch_subdir == 'arm'
483 +    sources += files('iavf_rxtx_vec_neon.c')
484  endif
485  
486  headers = files('rte_pmd_iavf.h')