3c057a390231dc87746e4223d1ca7f95c2f321b8
[deb_dpdk.git] / drivers / net / dpaa2 / dpaa2_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright 2016 NXP.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Freescale Semiconductor, Inc nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <time.h>
35 #include <net/if.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_dev.h>
43
44 #include <fslmc_logs.h>
45 #include <fslmc_vfio.h>
46 #include <dpaa2_hw_pvt.h>
47 #include <dpaa2_hw_dpio.h>
48 #include <dpaa2_hw_mempool.h>
49
50 #include "dpaa2_ethdev.h"
51 #include "base/dpaa2_hw_dpni_annot.h"
52
53 static inline uint32_t __attribute__((hot))
54 dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
55 {
56         uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
57         struct dpaa2_annot_hdr *annotation =
58                         (struct dpaa2_annot_hdr *)hw_annot_addr;
59
60         PMD_RX_LOG(DEBUG, "annotation = 0x%lx   ", annotation->word4);
61
62         if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
63                 pkt_type = RTE_PTYPE_L2_ETHER_ARP;
64                 goto parse_done;
65         } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
66                 pkt_type = RTE_PTYPE_L2_ETHER;
67         } else {
68                 goto parse_done;
69         }
70
71         if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
72                              L3_IPV4_N_PRESENT)) {
73                 pkt_type |= RTE_PTYPE_L3_IPV4;
74                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
75                         L3_IP_N_OPT_PRESENT))
76                         pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
77
78         } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
79                   L3_IPV6_N_PRESENT)) {
80                 pkt_type |= RTE_PTYPE_L3_IPV6;
81                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
82                     L3_IP_N_OPT_PRESENT))
83                         pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
84         } else {
85                 goto parse_done;
86         }
87
88         if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
89             L3_IP_1_MORE_FRAGMENT |
90             L3_IP_N_FIRST_FRAGMENT |
91             L3_IP_N_MORE_FRAGMENT)) {
92                 pkt_type |= RTE_PTYPE_L4_FRAG;
93                 goto parse_done;
94         } else {
95                 pkt_type |= RTE_PTYPE_L4_NONFRAG;
96         }
97
98         if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
99                 pkt_type |= RTE_PTYPE_L4_UDP;
100
101         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
102                 pkt_type |= RTE_PTYPE_L4_TCP;
103
104         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
105                 pkt_type |= RTE_PTYPE_L4_SCTP;
106
107         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
108                 pkt_type |= RTE_PTYPE_L4_ICMP;
109
110         else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
111                 pkt_type |= RTE_PTYPE_UNKNOWN;
112
113 parse_done:
114         return pkt_type;
115 }
116
117 static inline void __attribute__((hot))
118 dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
119 {
120         struct dpaa2_annot_hdr *annotation =
121                 (struct dpaa2_annot_hdr *)hw_annot_addr;
122
123         if (BIT_ISSET_AT_POS(annotation->word3,
124                              L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
125                 mbuf->ol_flags |= PKT_RX_VLAN_PKT;
126
127         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
128                 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
129
130         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
131                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
132 }
133
134 static inline struct rte_mbuf *__attribute__((hot))
135 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
136 {
137         struct qbman_sge *sgt, *sge;
138         dma_addr_t sg_addr;
139         int i = 0;
140         uint64_t fd_addr;
141         struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
142
143         fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
144
145         /* Get Scatter gather table address */
146         sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
147
148         sge = &sgt[i++];
149         sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
150
151         /* First Scatter gather entry */
152         first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
153                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
154         /* Prepare all the metadata for first segment */
155         first_seg->buf_addr = (uint8_t *)sg_addr;
156         first_seg->ol_flags = 0;
157         first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
158         first_seg->data_len = sge->length  & 0x1FFFF;
159         first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
160         first_seg->nb_segs = 1;
161         first_seg->next = NULL;
162
163         first_seg->packet_type = dpaa2_dev_rx_parse(
164                          (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
165                          + DPAA2_FD_PTA_SIZE);
166         dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
167                         DPAA2_GET_FD_ADDR(fd)) +
168                         DPAA2_FD_PTA_SIZE, first_seg);
169         rte_mbuf_refcnt_set(first_seg, 1);
170         cur_seg = first_seg;
171         while (!DPAA2_SG_IS_FINAL(sge)) {
172                 sge = &sgt[i++];
173                 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(
174                                 DPAA2_GET_FLE_ADDR(sge));
175                 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
176                         rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
177                 next_seg->buf_addr  = (uint8_t *)sg_addr;
178                 next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
179                 next_seg->data_len  = sge->length  & 0x1FFFF;
180                 first_seg->nb_segs += 1;
181                 rte_mbuf_refcnt_set(next_seg, 1);
182                 cur_seg->next = next_seg;
183                 next_seg->next = NULL;
184                 cur_seg = next_seg;
185         }
186         temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
187                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
188         rte_mbuf_refcnt_set(temp, 1);
189         rte_pktmbuf_free_seg(temp);
190
191         return (void *)first_seg;
192 }
193
194 static inline struct rte_mbuf *__attribute__((hot))
195 eth_fd_to_mbuf(const struct qbman_fd *fd)
196 {
197         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
198                 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
199                      rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
200
201         /* need to repopulated some of the fields,
202          * as they may have changed in last transmission
203          */
204         mbuf->nb_segs = 1;
205         mbuf->ol_flags = 0;
206         mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
207         mbuf->data_len = DPAA2_GET_FD_LEN(fd);
208         mbuf->pkt_len = mbuf->data_len;
209
210         /* Parse the packet */
211         /* parse results are after the private - sw annotation area */
212         mbuf->packet_type = dpaa2_dev_rx_parse(
213                         (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
214                          + DPAA2_FD_PTA_SIZE);
215
216         dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
217                              DPAA2_GET_FD_ADDR(fd)) +
218                              DPAA2_FD_PTA_SIZE, mbuf);
219
220         mbuf->next = NULL;
221         rte_mbuf_refcnt_set(mbuf, 1);
222
223         PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
224                 "fd_off=%d fd =%lx, meta = %d  bpid =%d, len=%d\n",
225                 mbuf, mbuf->buf_addr, mbuf->data_off,
226                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
227                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
228                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
229
230         return mbuf;
231 }
232
233 static int __attribute__ ((noinline)) __attribute__((hot))
234 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
235                   struct qbman_fd *fd, uint16_t bpid)
236 {
237         struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
238         struct qbman_sge *sgt, *sge = NULL;
239         int i;
240
241         /* First Prepare FD to be transmited*/
242         /* Resetting the buffer pool id and offset field*/
243         fd->simple.bpid_offset = 0;
244
245         temp = rte_pktmbuf_alloc(mbuf->pool);
246         if (temp == NULL) {
247                 PMD_TX_LOG(ERR, "No memory to allocate S/G table");
248                 return -ENOMEM;
249         }
250
251         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
252         DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
253         DPAA2_SET_FD_OFFSET(fd, temp->data_off);
254         DPAA2_SET_FD_BPID(fd, bpid);
255         DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
256         DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
257         /*Set Scatter gather table and Scatter gather entries*/
258         sgt = (struct qbman_sge *)(
259                         (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
260                         + DPAA2_GET_FD_OFFSET(fd));
261
262         for (i = 0; i < mbuf->nb_segs; i++) {
263                 sge = &sgt[i];
264                 /*Resetting the buffer pool id and offset field*/
265                 sge->fin_bpid_offset = 0;
266                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
267                 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
268                 sge->length = cur_seg->data_len;
269                 if (RTE_MBUF_DIRECT(cur_seg)) {
270                         if (rte_mbuf_refcnt_read(cur_seg) > 1) {
271                                 /* If refcnt > 1, invalid bpid is set to ensure
272                                  * buffer is not freed by HW
273                                  */
274                                 DPAA2_SET_FLE_IVP(sge);
275                                 rte_mbuf_refcnt_update(cur_seg, -1);
276                         } else
277                                 DPAA2_SET_FLE_BPID(sge,
278                                                 mempool_to_bpid(cur_seg->pool));
279                         cur_seg = cur_seg->next;
280                 } else {
281                         /* Get owner MBUF from indirect buffer */
282                         mi = rte_mbuf_from_indirect(cur_seg);
283                         if (rte_mbuf_refcnt_read(mi) > 1) {
284                                 /* If refcnt > 1, invalid bpid is set to ensure
285                                  * owner buffer is not freed by HW
286                                  */
287                                 DPAA2_SET_FLE_IVP(sge);
288                         } else {
289                                 DPAA2_SET_FLE_BPID(sge,
290                                                    mempool_to_bpid(mi->pool));
291                                 rte_mbuf_refcnt_update(mi, 1);
292                         }
293                         prev_seg = cur_seg;
294                         cur_seg = cur_seg->next;
295                         prev_seg->next = NULL;
296                         rte_pktmbuf_free(prev_seg);
297                 }
298         }
299         DPAA2_SG_SET_FINAL(sge, true);
300         return 0;
301 }
302
303 static void
304 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
305                struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
306
307 static void __attribute__ ((noinline)) __attribute__((hot))
308 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
309                struct qbman_fd *fd, uint16_t bpid)
310 {
311         /*Resetting the buffer pool id and offset field*/
312         fd->simple.bpid_offset = 0;
313
314         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
315         DPAA2_SET_FD_LEN(fd, mbuf->data_len);
316         DPAA2_SET_FD_BPID(fd, bpid);
317         DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
318         DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
319
320         PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
321                 "fd_off=%d fd =%lx, meta = %d  bpid =%d, len=%d\n",
322                 mbuf, mbuf->buf_addr, mbuf->data_off,
323                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
324                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
325                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
326         if (RTE_MBUF_DIRECT(mbuf)) {
327                 if (rte_mbuf_refcnt_read(mbuf) > 1) {
328                         DPAA2_SET_FD_IVP(fd);
329                         rte_mbuf_refcnt_update(mbuf, -1);
330                 }
331         } else {
332                 struct rte_mbuf *mi;
333
334                 mi = rte_mbuf_from_indirect(mbuf);
335                 if (rte_mbuf_refcnt_read(mi) > 1)
336                         DPAA2_SET_FD_IVP(fd);
337                 else
338                         rte_mbuf_refcnt_update(mi, 1);
339                 rte_pktmbuf_free(mbuf);
340         }
341 }
342
343 static inline int __attribute__((hot))
344 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
345                     struct qbman_fd *fd, uint16_t bpid)
346 {
347         struct rte_mbuf *m;
348         void *mb = NULL;
349
350         if (rte_dpaa2_mbuf_alloc_bulk(
351                 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
352                 PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
353                 rte_pktmbuf_free(mbuf);
354                 return -1;
355         }
356         m = (struct rte_mbuf *)mb;
357         memcpy((char *)m->buf_addr + mbuf->data_off,
358                (void *)((char *)mbuf->buf_addr + mbuf->data_off),
359                 mbuf->pkt_len);
360
361         /* Copy required fields */
362         m->data_off = mbuf->data_off;
363         m->ol_flags = mbuf->ol_flags;
364         m->packet_type = mbuf->packet_type;
365         m->tx_offload = mbuf->tx_offload;
366
367         /*Resetting the buffer pool id and offset field*/
368         fd->simple.bpid_offset = 0;
369
370         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(m));
371         DPAA2_SET_FD_LEN(fd, mbuf->data_len);
372         DPAA2_SET_FD_BPID(fd, bpid);
373         DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
374         DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
375
376         PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
377                    (void *)mbuf, mbuf->buf_addr);
378
379         PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d",
380                    DPAA2_GET_FD_ADDR(fd),
381                 DPAA2_GET_FD_BPID(fd),
382                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
383                 DPAA2_GET_FD_OFFSET(fd),
384                 DPAA2_GET_FD_LEN(fd));
385         /*free the original packet */
386         rte_pktmbuf_free(mbuf);
387
388         return 0;
389 }
390
391 uint16_t
392 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
393 {
394         /* Function receive frames for a given device and VQ*/
395         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
396         struct qbman_result *dq_storage;
397         uint32_t fqid = dpaa2_q->fqid;
398         int ret, num_rx = 0;
399         uint8_t is_last = 0, status;
400         struct qbman_swp *swp;
401         const struct qbman_fd *fd[DPAA2_DQRR_RING_SIZE];
402         struct qbman_pull_desc pulldesc;
403         struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
404         struct rte_eth_dev *dev = dpaa2_q->dev;
405
406         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
407                 ret = dpaa2_affine_qbman_swp();
408                 if (ret) {
409                         RTE_LOG(ERR, PMD, "Failure in affining portal\n");
410                         return 0;
411                 }
412         }
413         swp = DPAA2_PER_LCORE_PORTAL;
414         if (!q_storage->active_dqs) {
415                 q_storage->toggle = 0;
416                 dq_storage = q_storage->dq_storage[q_storage->toggle];
417                 qbman_pull_desc_clear(&pulldesc);
418                 qbman_pull_desc_set_numframes(&pulldesc,
419                                               (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
420                                                DPAA2_DQRR_RING_SIZE : nb_pkts);
421                 qbman_pull_desc_set_fq(&pulldesc, fqid);
422                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
423                         (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
424                 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
425                         while (!qbman_check_command_complete(swp,
426                                get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
427                                 ;
428                         clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
429                 }
430                 while (1) {
431                         if (qbman_swp_pull(swp, &pulldesc)) {
432                                 PMD_RX_LOG(WARNING, "VDQ command is not issued."
433                                            "QBMAN is busy\n");
434                                 /* Portal was busy, try again */
435                                 continue;
436                         }
437                         break;
438                 }
439                 q_storage->active_dqs = dq_storage;
440                 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
441                 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
442         }
443         dq_storage = q_storage->active_dqs;
444         /* Check if the previous issued command is completed.
445          * Also seems like the SWP is shared between the Ethernet Driver
446          * and the SEC driver.
447          */
448         while (!qbman_check_command_complete(swp, dq_storage))
449                 ;
450         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
451                 clear_swp_active_dqs(q_storage->active_dpio_id);
452         while (!is_last) {
453                 /* Loop until the dq_storage is updated with
454                  * new token by QBMAN
455                  */
456                 while (!qbman_result_has_new_result(swp, dq_storage))
457                         ;
458                 rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
459                 /* Check whether Last Pull command is Expired and
460                  * setting Condition for Loop termination
461                  */
462                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
463                         is_last = 1;
464                         /* Check for valid frame. */
465                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
466                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
467                                 continue;
468                 }
469                 fd[num_rx] = qbman_result_DQ_fd(dq_storage);
470
471                 /* Prefetch Annotation address for the parse results */
472                 rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx])
473                                 + DPAA2_FD_PTA_SIZE + 16));
474
475                 if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
476                         bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
477                 else
478                         bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
479                 bufs[num_rx]->port = dev->data->port_id;
480
481                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
482                         rte_vlan_strip(bufs[num_rx]);
483
484                 dq_storage++;
485                 num_rx++;
486         }
487
488         if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
489                 while (!qbman_check_command_complete(swp,
490                        get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
491                         ;
492                 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
493         }
494         q_storage->toggle ^= 1;
495         dq_storage = q_storage->dq_storage[q_storage->toggle];
496         qbman_pull_desc_clear(&pulldesc);
497         qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
498         qbman_pull_desc_set_fq(&pulldesc, fqid);
499         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
500                         (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
501         /* Issue a volatile dequeue command. */
502         while (1) {
503                 if (qbman_swp_pull(swp, &pulldesc)) {
504                         PMD_RX_LOG(WARNING, "VDQ command is not issued."
505                                    "QBMAN is busy\n");
506                         continue;
507                 }
508                 break;
509         }
510         q_storage->active_dqs = dq_storage;
511         q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
512         set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
513
514         dpaa2_q->rx_pkts += num_rx;
515
516         /* Return the total number of packets received to DPAA2 app */
517         return num_rx;
518 }
519
520 /*
521  * Callback to handle sending packets through WRIOP based interface
522  */
523 uint16_t
524 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
525 {
526         /* Function to transmit the frames to given device and VQ*/
527         uint32_t loop, retry_count;
528         int32_t ret;
529         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
530         struct rte_mbuf *mi;
531         uint32_t frames_to_send;
532         struct rte_mempool *mp;
533         struct qbman_eq_desc eqdesc;
534         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
535         struct qbman_swp *swp;
536         uint16_t num_tx = 0;
537         uint16_t bpid;
538         struct rte_eth_dev *dev = dpaa2_q->dev;
539         struct dpaa2_dev_priv *priv = dev->data->dev_private;
540
541         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
542                 ret = dpaa2_affine_qbman_swp();
543                 if (ret) {
544                         RTE_LOG(ERR, PMD, "Failure in affining portal\n");
545                         return 0;
546                 }
547         }
548         swp = DPAA2_PER_LCORE_PORTAL;
549
550         PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid);
551
552         /*Prepare enqueue descriptor*/
553         qbman_eq_desc_clear(&eqdesc);
554         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
555         qbman_eq_desc_set_response(&eqdesc, 0, 0);
556         qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
557                              dpaa2_q->flow_id, dpaa2_q->tc_index);
558
559         /*Clear the unused FD fields before sending*/
560         while (nb_pkts) {
561                 /*Check if the queue is congested*/
562                 retry_count = 0;
563                 if (qbman_result_SCN_state_in_mem(dpaa2_q->cscn)) {
564                         retry_count++;
565                         /* Retry for some time before giving up */
566                         if (retry_count > CONG_RETRY_COUNT)
567                                 goto skip_tx;
568                 }
569
570                 frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
571
572                 for (loop = 0; loop < frames_to_send; loop++) {
573                         fd_arr[loop].simple.frc = 0;
574                         DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
575                         DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
576                         if (RTE_MBUF_DIRECT(*bufs)) {
577                                 mp = (*bufs)->pool;
578                         } else {
579                                 mi = rte_mbuf_from_indirect(*bufs);
580                                 mp = mi->pool;
581                         }
582                         /* Not a hw_pkt pool allocated frame */
583                         if (!mp) {
584                                 PMD_TX_LOG(ERR, "err: no bpool attached");
585                                 goto skip_tx;
586                         }
587                         if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
588                                 PMD_TX_LOG(ERR, "non hw offload bufffer ");
589                                 /* alloc should be from the default buffer pool
590                                  * attached to this interface
591                                  */
592                                 if (priv->bp_list) {
593                                         bpid = priv->bp_list->buf_pool.bpid;
594                                 } else {
595                                         PMD_TX_LOG(ERR,
596                                                    "err: no bpool attached");
597                                         num_tx = 0;
598                                         goto skip_tx;
599                                 }
600                                 if (unlikely((*bufs)->nb_segs > 1)) {
601                                         PMD_TX_LOG(ERR, "S/G support not added"
602                                                 " for non hw offload buffer");
603                                         goto skip_tx;
604                                 }
605                                 if (eth_copy_mbuf_to_fd(*bufs,
606                                                         &fd_arr[loop], bpid)) {
607                                         bufs++;
608                                         continue;
609                                 }
610                         } else {
611                                 bpid = mempool_to_bpid(mp);
612                                 if (unlikely((*bufs)->nb_segs > 1)) {
613                                         if (eth_mbuf_to_sg_fd(*bufs,
614                                                         &fd_arr[loop], bpid))
615                                                 goto skip_tx;
616                                 } else {
617                                         eth_mbuf_to_fd(*bufs,
618                                                        &fd_arr[loop], bpid);
619                                 }
620                         }
621                         bufs++;
622                 }
623                 loop = 0;
624                 while (loop < frames_to_send) {
625                         loop += qbman_swp_send_multiple(swp, &eqdesc,
626                                         &fd_arr[loop], frames_to_send - loop);
627                 }
628
629                 num_tx += frames_to_send;
630                 dpaa2_q->tx_pkts += frames_to_send;
631                 nb_pkts -= frames_to_send;
632         }
633 skip_tx:
634         return num_tx;
635 }
636
637 /**
638  * Dummy DPDK callback for TX.
639  *
640  * This function is used to temporarily replace the real callback during
641  * unsafe control operations on the queue, or in case of error.
642  *
643  * @param dpdk_txq
644  *   Generic pointer to TX queue structure.
645  * @param[in] pkts
646  *   Packets to transmit.
647  * @param pkts_n
648  *   Number of packets in array.
649  *
650  * @return
651  *   Number of packets successfully transmitted (<= pkts_n).
652  */
653 uint16_t
654 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
655 {
656         (void)queue;
657         (void)bufs;
658         (void)nb_pkts;
659         return 0;
660 }