1 Description: i40e: implement vector PMD for altivec
3 From: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
5 This patch enables i40e driver in powerpc along with its altivec
9 v4 - docs and config update.
10 v3 - minor corrections for coding style standard.
11 v2 - minor corrections for gcc strict aliasing and coding style standard.
13 Signed-off-by: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
15 Note: can be dropped >=DPDK 17.05
17 Forwarded: yes, http://dpdk.org/dev/patchwork/patch/20680/
18 Original-Author: Gowrishankar Muthukrishnan <gowrishankar.m@linux.vnet.ibm.com>
19 Bug-Ubuntu: https://bugs.launchpad.net/ubuntu/+source/dpdk/+bug/1670686
20 Author: Christian Ehrhardt <christian.ehrhardt@canonical.com>
21 Last-Update: 2017-03-07
26 M: Chao Zhu <chaozhu@linux.vnet.ibm.com>
27 F: lib/librte_eal/common/arch/ppc_64/
28 F: lib/librte_eal/common/include/arch/ppc_64/
29 +F: drivers/net/i40e/i40e_rxtx_vec_altivec.c
32 M: Bruce Richardson <bruce.richardson@intel.com>
33 --- a/config/defconfig_ppc_64-power8-linuxapp-gcc
34 +++ b/config/defconfig_ppc_64-power8-linuxapp-gcc
36 # Note: Initially, all of the PMD drivers compilation are turned off on Power
37 # Will turn on them only after the successful testing on Power
38 CONFIG_RTE_LIBRTE_IXGBE_PMD=n
39 -CONFIG_RTE_LIBRTE_I40E_PMD=n
40 CONFIG_RTE_LIBRTE_VIRTIO_PMD=y
41 CONFIG_RTE_LIBRTE_VMXNET3_PMD=n
42 CONFIG_RTE_LIBRTE_ENIC_PMD=n
43 --- a/doc/guides/nics/features/i40e.ini
44 +++ b/doc/guides/nics/features/i40e.ini
50 --- a/doc/guides/nics/features/i40e_vec.ini
51 +++ b/doc/guides/nics/features/i40e_vec.ini
57 --- a/drivers/net/i40e/Makefile
58 +++ b/drivers/net/i40e/Makefile
60 SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_rxtx.c
61 ifeq ($(CONFIG_RTE_ARCH_ARM64),y)
62 SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_neon.c
63 +else ifeq ($(CONFIG_RTE_ARCH_PPC_64),y)
64 +SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_altivec.c
66 SRCS-$(CONFIG_RTE_LIBRTE_I40E_INC_VECTOR) += i40e_rxtx_vec_sse.c
69 +++ b/drivers/net/i40e/i40e_rxtx_vec_altivec.c
74 + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
75 + * Copyright(c) 2017 IBM Corporation.
76 + * All rights reserved.
78 + * Redistribution and use in source and binary forms, with or without
79 + * modification, are permitted provided that the following conditions
82 + * * Redistributions of source code must retain the above copyright
83 + * notice, this list of conditions and the following disclaimer.
84 + * * Redistributions in binary form must reproduce the above copyright
85 + * notice, this list of conditions and the following disclaimer in
86 + * the documentation and/or other materials provided with the
88 + * * Neither the name of Intel Corporation nor the names of its
89 + * contributors may be used to endorse or promote products derived
90 + * from this software without specific prior written permission.
92 + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
93 + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
94 + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
95 + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
96 + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
97 + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
98 + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
99 + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
100 + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
101 + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
102 + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
106 +#include <rte_ethdev.h>
107 +#include <rte_malloc.h>
109 +#include "base/i40e_prototype.h"
110 +#include "base/i40e_type.h"
111 +#include "i40e_ethdev.h"
112 +#include "i40e_rxtx.h"
113 +#include "i40e_rxtx_vec_common.h"
115 +#include <altivec.h>
117 +#pragma GCC diagnostic ignored "-Wcast-qual"
120 +i40e_rxq_rearm(struct i40e_rx_queue *rxq)
124 + volatile union i40e_rx_desc *rxdp;
126 + struct i40e_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start];
127 + struct rte_mbuf *mb0, *mb1;
129 + vector unsigned long hdr_room = (vector unsigned long){
130 + RTE_PKTMBUF_HEADROOM,
131 + RTE_PKTMBUF_HEADROOM};
132 + vector unsigned long dma_addr0, dma_addr1;
134 + rxdp = rxq->rx_ring + rxq->rxrearm_start;
136 + /* Pull 'n' more MBUFs into the software ring */
137 + if (rte_mempool_get_bulk(rxq->mp,
139 + RTE_I40E_RXQ_REARM_THRESH) < 0) {
140 + if (rxq->rxrearm_nb + RTE_I40E_RXQ_REARM_THRESH >=
142 + dma_addr0 = (vector unsigned long){};
143 + for (i = 0; i < RTE_I40E_DESCS_PER_LOOP; i++) {
144 + rxep[i].mbuf = &rxq->fake_mbuf;
145 + vec_st(dma_addr0, 0,
146 + (vector unsigned long *)&rxdp[i].read);
149 + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed +=
150 + RTE_I40E_RXQ_REARM_THRESH;
154 + /* Initialize the mbufs in vector, process 2 mbufs in one loop */
155 + for (i = 0; i < RTE_I40E_RXQ_REARM_THRESH; i += 2, rxep += 2) {
156 + vector unsigned long vaddr0, vaddr1;
159 + mb0 = rxep[0].mbuf;
160 + mb1 = rxep[1].mbuf;
162 + /* Flush mbuf with pkt template.
163 + * Data to be rearmed is 6 bytes long.
164 + * Though, RX will overwrite ol_flags that are coming next
165 + * anyway. So overwrite whole 8 bytes with one load:
166 + * 6 bytes of rearm_data plus first 2 bytes of ol_flags.
168 + p0 = (uintptr_t)&mb0->rearm_data;
169 + *(uint64_t *)p0 = rxq->mbuf_initializer;
170 + p1 = (uintptr_t)&mb1->rearm_data;
171 + *(uint64_t *)p1 = rxq->mbuf_initializer;
173 + /* load buf_addr(lo 64bit) and buf_physaddr(hi 64bit) */
174 + vaddr0 = vec_ld(0, (vector unsigned long *)&mb0->buf_addr);
175 + vaddr1 = vec_ld(0, (vector unsigned long *)&mb1->buf_addr);
177 + /* convert pa to dma_addr hdr/data */
178 + dma_addr0 = vec_mergel(vaddr0, vaddr0);
179 + dma_addr1 = vec_mergel(vaddr1, vaddr1);
181 + /* add headroom to pa values */
182 + dma_addr0 = vec_add(dma_addr0, hdr_room);
183 + dma_addr1 = vec_add(dma_addr1, hdr_room);
185 + /* flush desc with pa dma_addr */
186 + vec_st(dma_addr0, 0, (vector unsigned long *)&rxdp++->read);
187 + vec_st(dma_addr1, 0, (vector unsigned long *)&rxdp++->read);
190 + rxq->rxrearm_start += RTE_I40E_RXQ_REARM_THRESH;
191 + if (rxq->rxrearm_start >= rxq->nb_rx_desc)
192 + rxq->rxrearm_start = 0;
194 + rxq->rxrearm_nb -= RTE_I40E_RXQ_REARM_THRESH;
196 + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ?
197 + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1));
199 + /* Update the tail pointer on the NIC */
200 + I40E_PCI_REG_WRITE(rxq->qrx_tail, rx_id);
203 +/* Handling the offload flags (olflags) field takes computation
204 + * time when receiving packets. Therefore we provide a flag to disable
205 + * the processing of the olflags field when they are not needed. This
206 + * gives improved performance, at the cost of losing the offload info
207 + * in the received packet
209 +#ifdef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE
212 +desc_to_olflags_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
214 + vector unsigned int vlan0, vlan1, rss, l3_l4e;
216 + /* mask everything except RSS, flow director and VLAN flags
217 + * bit2 is for VLAN tag, bit11 for flow director indication
218 + * bit13:12 for RSS indication.
220 + const vector unsigned int rss_vlan_msk = (vector unsigned int){
221 + (int32_t)0x1c03804, (int32_t)0x1c03804,
222 + (int32_t)0x1c03804, (int32_t)0x1c03804};
224 + /* map rss and vlan type to rss hash and vlan flag */
225 + const vector unsigned char vlan_flags = (vector unsigned char){
227 + PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0,
231 + const vector unsigned char rss_flags = (vector unsigned char){
232 + 0, PKT_RX_FDIR, 0, 0,
233 + 0, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH | PKT_RX_FDIR,
237 + const vector unsigned char l3_l4e_flags = (vector unsigned char){
239 + PKT_RX_IP_CKSUM_BAD,
240 + PKT_RX_L4_CKSUM_BAD,
241 + PKT_RX_L4_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
242 + PKT_RX_EIP_CKSUM_BAD,
243 + PKT_RX_EIP_CKSUM_BAD | PKT_RX_IP_CKSUM_BAD,
244 + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD,
245 + PKT_RX_EIP_CKSUM_BAD | PKT_RX_L4_CKSUM_BAD
246 + | PKT_RX_IP_CKSUM_BAD,
247 + 0, 0, 0, 0, 0, 0, 0, 0};
249 + vlan0 = (vector unsigned int)vec_mergel(descs[0], descs[1]);
250 + vlan1 = (vector unsigned int)vec_mergel(descs[2], descs[3]);
251 + vlan0 = (vector unsigned int)vec_mergeh(vlan0, vlan1);
253 + vlan1 = vec_and(vlan0, rss_vlan_msk);
254 + vlan0 = (vector unsigned int)vec_perm(vlan_flags,
255 + (vector unsigned char){},
256 + *(vector unsigned char *)&vlan1);
258 + rss = vec_sr(vlan1, (vector unsigned int){11, 11, 11, 11});
259 + rss = (vector unsigned int)vec_perm(rss_flags, (vector unsigned char){},
260 + *(vector unsigned char *)&rss);
262 + l3_l4e = vec_sr(vlan1, (vector unsigned int){22, 22, 22, 22});
263 + l3_l4e = (vector unsigned int)vec_perm(l3_l4e_flags,
264 + (vector unsigned char){},
265 + *(vector unsigned char *)&l3_l4e);
267 + vlan0 = vec_or(vlan0, rss);
268 + vlan0 = vec_or(vlan0, l3_l4e);
270 + rx_pkts[0]->ol_flags = (uint64_t)vlan0[2];
271 + rx_pkts[1]->ol_flags = (uint64_t)vlan0[3];
272 + rx_pkts[2]->ol_flags = (uint64_t)vlan0[0];
273 + rx_pkts[3]->ol_flags = (uint64_t)vlan0[1];
276 +#define desc_to_olflags_v(desc, rx_pkts) do {} while (0)
279 +#define PKTLEN_SHIFT 10
282 +desc_to_ptype_v(vector unsigned long descs[4], struct rte_mbuf **rx_pkts)
284 + vector unsigned long ptype0 = vec_mergel(descs[0], descs[1]);
285 + vector unsigned long ptype1 = vec_mergel(descs[2], descs[3]);
287 + ptype0 = vec_sr(ptype0, (vector unsigned long){30, 30});
288 + ptype1 = vec_sr(ptype1, (vector unsigned long){30, 30});
290 + rx_pkts[0]->packet_type = i40e_rxd_pkt_type_mapping(
291 + (*(vector unsigned char *)&ptype0)[0]);
292 + rx_pkts[1]->packet_type = i40e_rxd_pkt_type_mapping(
293 + (*(vector unsigned char *)&ptype0)[8]);
294 + rx_pkts[2]->packet_type = i40e_rxd_pkt_type_mapping(
295 + (*(vector unsigned char *)&ptype1)[0]);
296 + rx_pkts[3]->packet_type = i40e_rxd_pkt_type_mapping(
297 + (*(vector unsigned char *)&ptype1)[8]);
301 + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
302 + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
303 + * numbers of DD bits
305 +static inline uint16_t
306 +_recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts,
307 + uint16_t nb_pkts, uint8_t *split_packet)
309 + volatile union i40e_rx_desc *rxdp;
310 + struct i40e_rx_entry *sw_ring;
311 + uint16_t nb_pkts_recd;
314 + vector unsigned char shuf_msk;
316 + vector unsigned short crc_adjust = (vector unsigned short){
317 + 0, 0, /* ignore pkt_type field */
318 + rxq->crc_len, /* sub crc on pkt_len */
319 + 0, /* ignore high-16bits of pkt_len */
320 + rxq->crc_len, /* sub crc on data_len */
321 + 0, 0, 0 /* ignore non-length fields */
323 + vector unsigned long dd_check, eop_check;
325 + /* nb_pkts shall be less equal than RTE_I40E_MAX_RX_BURST */
326 + nb_pkts = RTE_MIN(nb_pkts, RTE_I40E_MAX_RX_BURST);
328 + /* nb_pkts has to be floor-aligned to RTE_I40E_DESCS_PER_LOOP */
329 + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_I40E_DESCS_PER_LOOP);
331 + /* Just the act of getting into the function from the application is
332 + * going to cost about 7 cycles
334 + rxdp = rxq->rx_ring + rxq->rx_tail;
336 + rte_prefetch0(rxdp);
338 + /* See if we need to rearm the RX queue - gives the prefetch a bit
341 + if (rxq->rxrearm_nb > RTE_I40E_RXQ_REARM_THRESH)
342 + i40e_rxq_rearm(rxq);
344 + /* Before we start moving massive data around, check to see if
345 + * there is actually a packet available
347 + if (!(rxdp->wb.qword1.status_error_len &
348 + rte_cpu_to_le_32(1 << I40E_RX_DESC_STATUS_DD_SHIFT)))
351 + /* 4 packets DD mask */
352 + dd_check = (vector unsigned long){0x0000000100000001ULL,
353 + 0x0000000100000001ULL};
355 + /* 4 packets EOP mask */
356 + eop_check = (vector unsigned long){0x0000000200000002ULL,
357 + 0x0000000200000002ULL};
359 + /* mask to shuffle from desc. to mbuf */
360 + shuf_msk = (vector unsigned char){
361 + 0xFF, 0xFF, /* pkt_type set as unknown */
362 + 0xFF, 0xFF, /* pkt_type set as unknown */
363 + 14, 15, /* octet 15~14, low 16 bits pkt_len */
364 + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */
365 + 14, 15, /* octet 15~14, 16 bits data_len */
366 + 2, 3, /* octet 2~3, low 16 bits vlan_macip */
367 + 4, 5, 6, 7 /* octet 4~7, 32bits rss */
370 + /* Cache is empty -> need to scan the buffer rings, but first move
371 + * the next 'n' mbufs into the cache
373 + sw_ring = &rxq->sw_ring[rxq->rx_tail];
375 + /* A. load 4 packet in one loop
376 + * [A*. mask out 4 unused dirty field in desc]
377 + * B. copy 4 mbuf point from swring to rx_pkts
378 + * C. calc the number of DD bits among the 4 packets
379 + * [C*. extract the end-of-packet bit, if requested]
380 + * D. fill info. from desc to mbuf
383 + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts;
384 + pos += RTE_I40E_DESCS_PER_LOOP,
385 + rxdp += RTE_I40E_DESCS_PER_LOOP) {
386 + vector unsigned long descs[RTE_I40E_DESCS_PER_LOOP];
387 + vector unsigned char pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4;
388 + vector unsigned short staterr, sterr_tmp1, sterr_tmp2;
389 + vector unsigned long mbp1, mbp2; /* two mbuf pointer
393 + /* B.1 load 1 mbuf point */
394 + mbp1 = *(vector unsigned long *)&sw_ring[pos];
395 + /* Read desc statuses backwards to avoid race condition */
396 + /* A.1 load 4 pkts desc */
397 + descs[3] = *(vector unsigned long *)(rxdp + 3);
398 + rte_compiler_barrier();
400 + /* B.2 copy 2 mbuf point into rx_pkts */
401 + *(vector unsigned long *)&rx_pkts[pos] = mbp1;
403 + /* B.1 load 1 mbuf point */
404 + mbp2 = *(vector unsigned long *)&sw_ring[pos + 2];
406 + descs[2] = *(vector unsigned long *)(rxdp + 2);
407 + rte_compiler_barrier();
408 + /* B.1 load 2 mbuf point */
409 + descs[1] = *(vector unsigned long *)(rxdp + 1);
410 + rte_compiler_barrier();
411 + descs[0] = *(vector unsigned long *)(rxdp);
413 + /* B.2 copy 2 mbuf point into rx_pkts */
414 + *(vector unsigned long *)&rx_pkts[pos + 2] = mbp2;
416 + if (split_packet) {
417 + rte_mbuf_prefetch_part2(rx_pkts[pos]);
418 + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]);
419 + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]);
420 + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]);
423 + /* avoid compiler reorder optimization */
424 + rte_compiler_barrier();
426 + /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/
427 + const vector unsigned int len3 = vec_sl(
428 + vec_ld(0, (vector unsigned int *)&descs[3]),
429 + (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
431 + const vector unsigned int len2 = vec_sl(
432 + vec_ld(0, (vector unsigned int *)&descs[2]),
433 + (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
435 + /* merge the now-aligned packet length fields back in */
436 + descs[3] = (vector unsigned long)len3;
437 + descs[2] = (vector unsigned long)len2;
439 + /* D.1 pkt 3,4 convert format from desc to pktmbuf */
440 + pkt_mb4 = vec_perm((vector unsigned char)descs[3],
441 + (vector unsigned char){}, shuf_msk);
442 + pkt_mb3 = vec_perm((vector unsigned char)descs[2],
443 + (vector unsigned char){}, shuf_msk);
445 + /* C.1 4=>2 filter staterr info only */
446 + sterr_tmp2 = vec_mergel((vector unsigned short)descs[3],
447 + (vector unsigned short)descs[2]);
448 + /* C.1 4=>2 filter staterr info only */
449 + sterr_tmp1 = vec_mergel((vector unsigned short)descs[1],
450 + (vector unsigned short)descs[0]);
451 + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */
452 + pkt_mb4 = (vector unsigned char)vec_sub(
453 + (vector unsigned short)pkt_mb4, crc_adjust);
454 + pkt_mb3 = (vector unsigned char)vec_sub(
455 + (vector unsigned short)pkt_mb3, crc_adjust);
457 + /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/
458 + const vector unsigned int len1 = vec_sl(
459 + vec_ld(0, (vector unsigned int *)&descs[1]),
460 + (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
461 + const vector unsigned int len0 = vec_sl(
462 + vec_ld(0, (vector unsigned int *)&descs[0]),
463 + (vector unsigned int){0, 0, 0, PKTLEN_SHIFT});
465 + /* merge the now-aligned packet length fields back in */
466 + descs[1] = (vector unsigned long)len1;
467 + descs[0] = (vector unsigned long)len0;
469 + /* D.1 pkt 1,2 convert format from desc to pktmbuf */
470 + pkt_mb2 = vec_perm((vector unsigned char)descs[1],
471 + (vector unsigned char){}, shuf_msk);
472 + pkt_mb1 = vec_perm((vector unsigned char)descs[0],
473 + (vector unsigned char){}, shuf_msk);
475 + /* C.2 get 4 pkts staterr value */
476 + staterr = (vector unsigned short)vec_mergeh(
477 + sterr_tmp1, sterr_tmp2);
479 + /* D.3 copy final 3,4 data to rx_pkts */
481 + (vector unsigned char *)&rx_pkts[pos + 3]
482 + ->rx_descriptor_fields1
485 + (vector unsigned char *)&rx_pkts[pos + 2]
486 + ->rx_descriptor_fields1
489 + /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */
490 + pkt_mb2 = (vector unsigned char)vec_sub(
491 + (vector unsigned short)pkt_mb2, crc_adjust);
492 + pkt_mb1 = (vector unsigned char)vec_sub(
493 + (vector unsigned short)pkt_mb1, crc_adjust);
495 + /* C* extract and record EOP bit */
496 + if (split_packet) {
497 + vector unsigned char eop_shuf_mask =
498 + (vector unsigned char){
499 + 0xFF, 0xFF, 0xFF, 0xFF,
500 + 0xFF, 0xFF, 0xFF, 0xFF,
501 + 0xFF, 0xFF, 0xFF, 0xFF,
502 + 0x04, 0x0C, 0x00, 0x08
505 + /* and with mask to extract bits, flipping 1-0 */
506 + vector unsigned char eop_bits = vec_and(
507 + (vector unsigned char)vec_nor(staterr, staterr),
508 + (vector unsigned char)eop_check);
509 + /* the staterr values are not in order, as the count
510 + * count of dd bits doesn't care. However, for end of
511 + * packet tracking, we do care, so shuffle. This also
512 + * compresses the 32-bit values to 8-bit
514 + eop_bits = vec_perm(eop_bits, (vector unsigned char){},
516 + /* store the resulting 32-bit value */
517 + *split_packet = (vec_ld(0,
518 + (vector unsigned int *)&eop_bits))[0];
519 + split_packet += RTE_I40E_DESCS_PER_LOOP;
521 + /* zero-out next pointers */
522 + rx_pkts[pos]->next = NULL;
523 + rx_pkts[pos + 1]->next = NULL;
524 + rx_pkts[pos + 2]->next = NULL;
525 + rx_pkts[pos + 3]->next = NULL;
528 + /* C.3 calc available number of desc */
529 + staterr = vec_and(staterr, (vector unsigned short)dd_check);
531 + /* D.3 copy final 1,2 data to rx_pkts */
533 + (vector unsigned char *)&rx_pkts[pos + 1]
534 + ->rx_descriptor_fields1
537 + (vector unsigned char *)&rx_pkts[pos]->rx_descriptor_fields1
539 + desc_to_ptype_v(descs, &rx_pkts[pos]);
540 + desc_to_olflags_v(descs, &rx_pkts[pos]);
542 + /* C.4 calc avaialbe number of desc */
543 + var = __builtin_popcountll((vec_ld(0,
544 + (vector unsigned long *)&staterr)[0]));
545 + nb_pkts_recd += var;
546 + if (likely(var != RTE_I40E_DESCS_PER_LOOP))
550 + /* Update our internal tail pointer */
551 + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd);
552 + rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1));
553 + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd);
555 + return nb_pkts_recd;
559 + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
560 + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
561 + * numbers of DD bits
564 +i40e_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
567 + return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL);
570 + /* vPMD receive routine that reassembles scattered packets
572 + * - nb_pkts < RTE_I40E_DESCS_PER_LOOP, just return no packet
573 + * - nb_pkts > RTE_I40E_VPMD_RX_BURST, only scan RTE_I40E_VPMD_RX_BURST
574 + * numbers of DD bits
577 +i40e_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts,
580 + struct i40e_rx_queue *rxq = rx_queue;
581 + uint8_t split_flags[RTE_I40E_VPMD_RX_BURST] = {0};
583 + /* get some new buffers */
584 + uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts,
589 + /* happy day case, full burst + no packets to be joined */
590 + const uint64_t *split_fl64 = (uint64_t *)split_flags;
592 + if (rxq->pkt_first_seg == NULL &&
593 + split_fl64[0] == 0 && split_fl64[1] == 0 &&
594 + split_fl64[2] == 0 && split_fl64[3] == 0)
597 + /* reassemble any packets that need reassembly*/
598 + unsigned int i = 0;
600 + if (!rxq->pkt_first_seg) {
601 + /* find the first split flag, and only reassemble then*/
602 + while (i < nb_bufs && !split_flags[i])
607 + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i,
612 +vtx1(volatile struct i40e_tx_desc *txdp,
613 + struct rte_mbuf *pkt, uint64_t flags)
615 + uint64_t high_qw = (I40E_TX_DESC_DTYPE_DATA |
616 + ((uint64_t)flags << I40E_TXD_QW1_CMD_SHIFT) |
617 + ((uint64_t)pkt->data_len << I40E_TXD_QW1_TX_BUF_SZ_SHIFT));
619 + vector unsigned long descriptor = (vector unsigned long){
620 + pkt->buf_physaddr + pkt->data_off, high_qw};
621 + *(vector unsigned long *)txdp = descriptor;
625 +vtx(volatile struct i40e_tx_desc *txdp,
626 + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags)
630 + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt)
631 + vtx1(txdp, *pkt, flags);
635 +i40e_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
638 + struct i40e_tx_queue *txq = (struct i40e_tx_queue *)tx_queue;
639 + volatile struct i40e_tx_desc *txdp;
640 + struct i40e_tx_entry *txep;
641 + uint16_t n, nb_commit, tx_id;
642 + uint64_t flags = I40E_TD_CMD;
643 + uint64_t rs = I40E_TX_DESC_CMD_RS | I40E_TD_CMD;
646 + /* cross rx_thresh boundary is not allowed */
647 + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh);
649 + if (txq->nb_tx_free < txq->tx_free_thresh)
650 + i40e_tx_free_bufs(txq);
652 + nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts);
653 + nb_commit = nb_pkts;
654 + if (unlikely(nb_pkts == 0))
657 + tx_id = txq->tx_tail;
658 + txdp = &txq->tx_ring[tx_id];
659 + txep = &txq->sw_ring[tx_id];
661 + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts);
663 + n = (uint16_t)(txq->nb_tx_desc - tx_id);
664 + if (nb_commit >= n) {
665 + tx_backlog_entry(txep, tx_pkts, n);
667 + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp)
668 + vtx1(txdp, *tx_pkts, flags);
670 + vtx1(txdp, *tx_pkts++, rs);
672 + nb_commit = (uint16_t)(nb_commit - n);
675 + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1);
677 + /* avoid reach the end of ring */
678 + txdp = &txq->tx_ring[tx_id];
679 + txep = &txq->sw_ring[tx_id];
682 + tx_backlog_entry(txep, tx_pkts, nb_commit);
684 + vtx(txdp, tx_pkts, nb_commit, flags);
686 + tx_id = (uint16_t)(tx_id + nb_commit);
687 + if (tx_id > txq->tx_next_rs) {
688 + txq->tx_ring[txq->tx_next_rs].cmd_type_offset_bsz |=
689 + rte_cpu_to_le_64(((uint64_t)I40E_TX_DESC_CMD_RS) <<
690 + I40E_TXD_QW1_CMD_SHIFT);
692 + (uint16_t)(txq->tx_next_rs + txq->tx_rs_thresh);
695 + txq->tx_tail = tx_id;
697 + I40E_PCI_REG_WRITE(txq->qtx_tail, txq->tx_tail);
702 +void __attribute__((cold))
703 +i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue *rxq)
705 + _i40e_rx_queue_release_mbufs_vec(rxq);
708 +int __attribute__((cold))
709 +i40e_rxq_vec_setup(struct i40e_rx_queue *rxq)
711 + return i40e_rxq_vec_setup_default(rxq);
714 +int __attribute__((cold))
715 +i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused * txq)
720 +int __attribute__((cold))
721 +i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev)
723 + return i40e_rx_vec_dev_conf_condition_check_default(dev);