New upstream version 17.11-rc3
[deb_dpdk.git] / drivers / net / dpaa2 / dpaa2_rxtx.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright (c) 2016 Freescale Semiconductor, Inc. All rights reserved.
5  *   Copyright 2016 NXP.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Freescale Semiconductor, Inc nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <time.h>
35 #include <net/if.h>
36
37 #include <rte_mbuf.h>
38 #include <rte_ethdev.h>
39 #include <rte_malloc.h>
40 #include <rte_memcpy.h>
41 #include <rte_string_fns.h>
42 #include <rte_dev.h>
43
44 #include <fslmc_logs.h>
45 #include <fslmc_vfio.h>
46 #include <dpaa2_hw_pvt.h>
47 #include <dpaa2_hw_dpio.h>
48 #include <dpaa2_hw_mempool.h>
49
50 #include "dpaa2_ethdev.h"
51 #include "base/dpaa2_hw_dpni_annot.h"
52
53 static inline uint32_t __attribute__((hot))
54 dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
55 {
56         uint32_t pkt_type = RTE_PTYPE_UNKNOWN;
57         struct dpaa2_annot_hdr *annotation =
58                         (struct dpaa2_annot_hdr *)hw_annot_addr;
59
60         PMD_RX_LOG(DEBUG, "annotation = 0x%lx   ", annotation->word4);
61
62         if (BIT_ISSET_AT_POS(annotation->word3, L2_ARP_PRESENT)) {
63                 pkt_type = RTE_PTYPE_L2_ETHER_ARP;
64                 goto parse_done;
65         } else if (BIT_ISSET_AT_POS(annotation->word3, L2_ETH_MAC_PRESENT)) {
66                 pkt_type = RTE_PTYPE_L2_ETHER;
67         } else {
68                 goto parse_done;
69         }
70
71         if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV4_1_PRESENT |
72                              L3_IPV4_N_PRESENT)) {
73                 pkt_type |= RTE_PTYPE_L3_IPV4;
74                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
75                         L3_IP_N_OPT_PRESENT))
76                         pkt_type |= RTE_PTYPE_L3_IPV4_EXT;
77
78         } else if (BIT_ISSET_AT_POS(annotation->word4, L3_IPV6_1_PRESENT |
79                   L3_IPV6_N_PRESENT)) {
80                 pkt_type |= RTE_PTYPE_L3_IPV6;
81                 if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_OPT_PRESENT |
82                     L3_IP_N_OPT_PRESENT))
83                         pkt_type |= RTE_PTYPE_L3_IPV6_EXT;
84         } else {
85                 goto parse_done;
86         }
87
88         if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_1_FIRST_FRAGMENT |
89             L3_IP_1_MORE_FRAGMENT |
90             L3_IP_N_FIRST_FRAGMENT |
91             L3_IP_N_MORE_FRAGMENT)) {
92                 pkt_type |= RTE_PTYPE_L4_FRAG;
93                 goto parse_done;
94         } else {
95                 pkt_type |= RTE_PTYPE_L4_NONFRAG;
96         }
97
98         if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_UDP_PRESENT))
99                 pkt_type |= RTE_PTYPE_L4_UDP;
100
101         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_TCP_PRESENT))
102                 pkt_type |= RTE_PTYPE_L4_TCP;
103
104         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_SCTP_PRESENT))
105                 pkt_type |= RTE_PTYPE_L4_SCTP;
106
107         else if (BIT_ISSET_AT_POS(annotation->word4, L3_PROTO_ICMP_PRESENT))
108                 pkt_type |= RTE_PTYPE_L4_ICMP;
109
110         else if (BIT_ISSET_AT_POS(annotation->word4, L3_IP_UNKNOWN_PROTOCOL))
111                 pkt_type |= RTE_PTYPE_UNKNOWN;
112
113 parse_done:
114         return pkt_type;
115 }
116
117 static inline void __attribute__((hot))
118 dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
119 {
120         struct dpaa2_annot_hdr *annotation =
121                 (struct dpaa2_annot_hdr *)hw_annot_addr;
122
123         if (BIT_ISSET_AT_POS(annotation->word3,
124                              L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
125                 mbuf->ol_flags |= PKT_RX_VLAN;
126
127         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
128                 mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
129
130         if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
131                 mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
132 }
133
134 static inline struct rte_mbuf *__attribute__((hot))
135 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
136 {
137         struct qbman_sge *sgt, *sge;
138         dma_addr_t sg_addr;
139         int i = 0;
140         uint64_t fd_addr;
141         struct rte_mbuf *first_seg, *next_seg, *cur_seg, *temp;
142
143         fd_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd));
144
145         /* Get Scatter gather table address */
146         sgt = (struct qbman_sge *)(fd_addr + DPAA2_GET_FD_OFFSET(fd));
147
148         sge = &sgt[i++];
149         sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FLE_ADDR(sge));
150
151         /* First Scatter gather entry */
152         first_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
153                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
154         /* Prepare all the metadata for first segment */
155         first_seg->buf_addr = (uint8_t *)sg_addr;
156         first_seg->ol_flags = 0;
157         first_seg->data_off = DPAA2_GET_FLE_OFFSET(sge);
158         first_seg->data_len = sge->length  & 0x1FFFF;
159         first_seg->pkt_len = DPAA2_GET_FD_LEN(fd);
160         first_seg->nb_segs = 1;
161         first_seg->next = NULL;
162
163         first_seg->packet_type = dpaa2_dev_rx_parse(
164                          (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
165                          + DPAA2_FD_PTA_SIZE);
166         dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
167                         DPAA2_GET_FD_ADDR(fd)) +
168                         DPAA2_FD_PTA_SIZE, first_seg);
169         rte_mbuf_refcnt_set(first_seg, 1);
170         cur_seg = first_seg;
171         while (!DPAA2_SG_IS_FINAL(sge)) {
172                 sge = &sgt[i++];
173                 sg_addr = (uint64_t)DPAA2_IOVA_TO_VADDR(
174                                 DPAA2_GET_FLE_ADDR(sge));
175                 next_seg = DPAA2_INLINE_MBUF_FROM_BUF(sg_addr,
176                         rte_dpaa2_bpid_info[DPAA2_GET_FLE_BPID(sge)].meta_data_size);
177                 next_seg->buf_addr  = (uint8_t *)sg_addr;
178                 next_seg->data_off  = DPAA2_GET_FLE_OFFSET(sge);
179                 next_seg->data_len  = sge->length  & 0x1FFFF;
180                 first_seg->nb_segs += 1;
181                 rte_mbuf_refcnt_set(next_seg, 1);
182                 cur_seg->next = next_seg;
183                 next_seg->next = NULL;
184                 cur_seg = next_seg;
185         }
186         temp = DPAA2_INLINE_MBUF_FROM_BUF(fd_addr,
187                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
188         rte_mbuf_refcnt_set(temp, 1);
189         rte_pktmbuf_free_seg(temp);
190
191         return (void *)first_seg;
192 }
193
194 static inline struct rte_mbuf *__attribute__((hot))
195 eth_fd_to_mbuf(const struct qbman_fd *fd)
196 {
197         struct rte_mbuf *mbuf = DPAA2_INLINE_MBUF_FROM_BUF(
198                 DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd)),
199                      rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size);
200
201         /* need to repopulated some of the fields,
202          * as they may have changed in last transmission
203          */
204         mbuf->nb_segs = 1;
205         mbuf->ol_flags = 0;
206         mbuf->data_off = DPAA2_GET_FD_OFFSET(fd);
207         mbuf->data_len = DPAA2_GET_FD_LEN(fd);
208         mbuf->pkt_len = mbuf->data_len;
209
210         /* Parse the packet */
211         /* parse results are after the private - sw annotation area */
212         mbuf->packet_type = dpaa2_dev_rx_parse(
213                         (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
214                          + DPAA2_FD_PTA_SIZE);
215
216         dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
217                              DPAA2_GET_FD_ADDR(fd)) +
218                              DPAA2_FD_PTA_SIZE, mbuf);
219
220         mbuf->next = NULL;
221         rte_mbuf_refcnt_set(mbuf, 1);
222
223         PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
224                 "fd_off=%d fd =%lx, meta = %d  bpid =%d, len=%d\n",
225                 mbuf, mbuf->buf_addr, mbuf->data_off,
226                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
227                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
228                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
229
230         return mbuf;
231 }
232
233 static int __attribute__ ((noinline)) __attribute__((hot))
234 eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
235                   struct qbman_fd *fd, uint16_t bpid)
236 {
237         struct rte_mbuf *cur_seg = mbuf, *prev_seg, *mi, *temp;
238         struct qbman_sge *sgt, *sge = NULL;
239         int i;
240
241         /* First Prepare FD to be transmited*/
242         /* Resetting the buffer pool id and offset field*/
243         fd->simple.bpid_offset = 0;
244
245         temp = rte_pktmbuf_alloc(mbuf->pool);
246         if (temp == NULL) {
247                 PMD_TX_LOG(ERR, "No memory to allocate S/G table");
248                 return -ENOMEM;
249         }
250
251         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(temp));
252         DPAA2_SET_FD_LEN(fd, mbuf->pkt_len);
253         DPAA2_SET_FD_OFFSET(fd, temp->data_off);
254         DPAA2_SET_FD_BPID(fd, bpid);
255         DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
256         DPAA2_FD_SET_FORMAT(fd, qbman_fd_sg);
257         /*Set Scatter gather table and Scatter gather entries*/
258         sgt = (struct qbman_sge *)(
259                         (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
260                         + DPAA2_GET_FD_OFFSET(fd));
261
262         for (i = 0; i < mbuf->nb_segs; i++) {
263                 sge = &sgt[i];
264                 /*Resetting the buffer pool id and offset field*/
265                 sge->fin_bpid_offset = 0;
266                 DPAA2_SET_FLE_ADDR(sge, DPAA2_MBUF_VADDR_TO_IOVA(cur_seg));
267                 DPAA2_SET_FLE_OFFSET(sge, cur_seg->data_off);
268                 sge->length = cur_seg->data_len;
269                 if (RTE_MBUF_DIRECT(cur_seg)) {
270                         if (rte_mbuf_refcnt_read(cur_seg) > 1) {
271                                 /* If refcnt > 1, invalid bpid is set to ensure
272                                  * buffer is not freed by HW
273                                  */
274                                 DPAA2_SET_FLE_IVP(sge);
275                                 rte_mbuf_refcnt_update(cur_seg, -1);
276                         } else
277                                 DPAA2_SET_FLE_BPID(sge,
278                                                 mempool_to_bpid(cur_seg->pool));
279                         cur_seg = cur_seg->next;
280                 } else {
281                         /* Get owner MBUF from indirect buffer */
282                         mi = rte_mbuf_from_indirect(cur_seg);
283                         if (rte_mbuf_refcnt_read(mi) > 1) {
284                                 /* If refcnt > 1, invalid bpid is set to ensure
285                                  * owner buffer is not freed by HW
286                                  */
287                                 DPAA2_SET_FLE_IVP(sge);
288                         } else {
289                                 DPAA2_SET_FLE_BPID(sge,
290                                                    mempool_to_bpid(mi->pool));
291                                 rte_mbuf_refcnt_update(mi, 1);
292                         }
293                         prev_seg = cur_seg;
294                         cur_seg = cur_seg->next;
295                         prev_seg->next = NULL;
296                         rte_pktmbuf_free(prev_seg);
297                 }
298         }
299         DPAA2_SG_SET_FINAL(sge, true);
300         return 0;
301 }
302
303 static void
304 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
305                struct qbman_fd *fd, uint16_t bpid) __attribute__((unused));
306
307 static void __attribute__ ((noinline)) __attribute__((hot))
308 eth_mbuf_to_fd(struct rte_mbuf *mbuf,
309                struct qbman_fd *fd, uint16_t bpid)
310 {
311         /*Resetting the buffer pool id and offset field*/
312         fd->simple.bpid_offset = 0;
313
314         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(mbuf));
315         DPAA2_SET_FD_LEN(fd, mbuf->data_len);
316         DPAA2_SET_FD_BPID(fd, bpid);
317         DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
318         DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
319
320         PMD_TX_LOG(DEBUG, "mbuf =%p, mbuf->buf_addr =%p, off = %d,"
321                 "fd_off=%d fd =%lx, meta = %d  bpid =%d, len=%d\n",
322                 mbuf, mbuf->buf_addr, mbuf->data_off,
323                 DPAA2_GET_FD_OFFSET(fd), DPAA2_GET_FD_ADDR(fd),
324                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
325                 DPAA2_GET_FD_BPID(fd), DPAA2_GET_FD_LEN(fd));
326         if (RTE_MBUF_DIRECT(mbuf)) {
327                 if (rte_mbuf_refcnt_read(mbuf) > 1) {
328                         DPAA2_SET_FD_IVP(fd);
329                         rte_mbuf_refcnt_update(mbuf, -1);
330                 }
331         } else {
332                 struct rte_mbuf *mi;
333
334                 mi = rte_mbuf_from_indirect(mbuf);
335                 if (rte_mbuf_refcnt_read(mi) > 1)
336                         DPAA2_SET_FD_IVP(fd);
337                 else
338                         rte_mbuf_refcnt_update(mi, 1);
339                 rte_pktmbuf_free(mbuf);
340         }
341 }
342
343 static inline int __attribute__((hot))
344 eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
345                     struct qbman_fd *fd, uint16_t bpid)
346 {
347         struct rte_mbuf *m;
348         void *mb = NULL;
349
350         if (rte_dpaa2_mbuf_alloc_bulk(
351                 rte_dpaa2_bpid_info[bpid].bp_list->mp, &mb, 1)) {
352                 PMD_TX_LOG(WARNING, "Unable to allocated DPAA2 buffer");
353                 return -1;
354         }
355         m = (struct rte_mbuf *)mb;
356         memcpy((char *)m->buf_addr + mbuf->data_off,
357                (void *)((char *)mbuf->buf_addr + mbuf->data_off),
358                 mbuf->pkt_len);
359
360         /* Copy required fields */
361         m->data_off = mbuf->data_off;
362         m->ol_flags = mbuf->ol_flags;
363         m->packet_type = mbuf->packet_type;
364         m->tx_offload = mbuf->tx_offload;
365
366         /*Resetting the buffer pool id and offset field*/
367         fd->simple.bpid_offset = 0;
368
369         DPAA2_SET_FD_ADDR(fd, DPAA2_MBUF_VADDR_TO_IOVA(m));
370         DPAA2_SET_FD_LEN(fd, mbuf->data_len);
371         DPAA2_SET_FD_BPID(fd, bpid);
372         DPAA2_SET_FD_OFFSET(fd, mbuf->data_off);
373         DPAA2_SET_FD_ASAL(fd, DPAA2_ASAL_VAL);
374
375         PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
376                    (void *)mbuf, mbuf->buf_addr);
377
378         PMD_TX_LOG(DEBUG, " fdaddr =%lx bpid =%d meta =%d off =%d, len =%d",
379                    DPAA2_GET_FD_ADDR(fd),
380                 DPAA2_GET_FD_BPID(fd),
381                 rte_dpaa2_bpid_info[DPAA2_GET_FD_BPID(fd)].meta_data_size,
382                 DPAA2_GET_FD_OFFSET(fd),
383                 DPAA2_GET_FD_LEN(fd));
384
385         return 0;
386 }
387
388 uint16_t
389 dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
390 {
391         /* Function receive frames for a given device and VQ*/
392         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
393         struct qbman_result *dq_storage;
394         uint32_t fqid = dpaa2_q->fqid;
395         int ret, num_rx = 0;
396         uint8_t is_last = 0, status;
397         struct qbman_swp *swp;
398         const struct qbman_fd *fd[DPAA2_DQRR_RING_SIZE];
399         struct qbman_pull_desc pulldesc;
400         struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
401         struct rte_eth_dev *dev = dpaa2_q->dev;
402
403         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
404                 ret = dpaa2_affine_qbman_swp();
405                 if (ret) {
406                         RTE_LOG(ERR, PMD, "Failure in affining portal\n");
407                         return 0;
408                 }
409         }
410         swp = DPAA2_PER_LCORE_PORTAL;
411         if (!q_storage->active_dqs) {
412                 q_storage->toggle = 0;
413                 dq_storage = q_storage->dq_storage[q_storage->toggle];
414                 qbman_pull_desc_clear(&pulldesc);
415                 qbman_pull_desc_set_numframes(&pulldesc,
416                                               (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
417                                                DPAA2_DQRR_RING_SIZE : nb_pkts);
418                 qbman_pull_desc_set_fq(&pulldesc, fqid);
419                 qbman_pull_desc_set_storage(&pulldesc, dq_storage,
420                         (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
421                 if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
422                         while (!qbman_check_command_complete(
423                                get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
424                                 ;
425                         clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
426                 }
427                 while (1) {
428                         if (qbman_swp_pull(swp, &pulldesc)) {
429                                 PMD_RX_LOG(WARNING, "VDQ command is not issued."
430                                            "QBMAN is busy\n");
431                                 /* Portal was busy, try again */
432                                 continue;
433                         }
434                         break;
435                 }
436                 q_storage->active_dqs = dq_storage;
437                 q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
438                 set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
439         }
440         dq_storage = q_storage->active_dqs;
441         /* Check if the previous issued command is completed.
442          * Also seems like the SWP is shared between the Ethernet Driver
443          * and the SEC driver.
444          */
445         while (!qbman_check_command_complete(dq_storage))
446                 ;
447         if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
448                 clear_swp_active_dqs(q_storage->active_dpio_id);
449         while (!is_last) {
450                 /* Loop until the dq_storage is updated with
451                  * new token by QBMAN
452                  */
453                 while (!qbman_check_new_result(dq_storage))
454                         ;
455                 rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
456                 /* Check whether Last Pull command is Expired and
457                  * setting Condition for Loop termination
458                  */
459                 if (qbman_result_DQ_is_pull_complete(dq_storage)) {
460                         is_last = 1;
461                         /* Check for valid frame. */
462                         status = (uint8_t)qbman_result_DQ_flags(dq_storage);
463                         if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
464                                 continue;
465                 }
466                 fd[num_rx] = qbman_result_DQ_fd(dq_storage);
467
468                 /* Prefetch Annotation address for the parse results */
469                 rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(fd[num_rx])
470                                 + DPAA2_FD_PTA_SIZE + 16));
471
472                 if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
473                         bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
474                 else
475                         bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
476                 bufs[num_rx]->port = dev->data->port_id;
477
478                 if (dev->data->dev_conf.rxmode.hw_vlan_strip)
479                         rte_vlan_strip(bufs[num_rx]);
480
481                 dq_storage++;
482                 num_rx++;
483         }
484
485         if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
486                 while (!qbman_check_command_complete(
487                        get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
488                         ;
489                 clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
490         }
491         q_storage->toggle ^= 1;
492         dq_storage = q_storage->dq_storage[q_storage->toggle];
493         qbman_pull_desc_clear(&pulldesc);
494         qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
495         qbman_pull_desc_set_fq(&pulldesc, fqid);
496         qbman_pull_desc_set_storage(&pulldesc, dq_storage,
497                         (dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
498         /* Issue a volatile dequeue command. */
499         while (1) {
500                 if (qbman_swp_pull(swp, &pulldesc)) {
501                         PMD_RX_LOG(WARNING, "VDQ command is not issued."
502                                    "QBMAN is busy\n");
503                         continue;
504                 }
505                 break;
506         }
507         q_storage->active_dqs = dq_storage;
508         q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
509         set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
510
511         dpaa2_q->rx_pkts += num_rx;
512
513         /* Return the total number of packets received to DPAA2 app */
514         return num_rx;
515 }
516
517 void __attribute__((hot))
518 dpaa2_dev_process_parallel_event(struct qbman_swp *swp,
519                                  const struct qbman_fd *fd,
520                                  const struct qbman_result *dq,
521                                  struct dpaa2_queue *rxq,
522                                  struct rte_event *ev)
523 {
524         ev->mbuf = eth_fd_to_mbuf(fd);
525
526         ev->flow_id = rxq->ev.flow_id;
527         ev->sub_event_type = rxq->ev.sub_event_type;
528         ev->event_type = RTE_EVENT_TYPE_ETHDEV;
529         ev->op = RTE_EVENT_OP_NEW;
530         ev->sched_type = rxq->ev.sched_type;
531         ev->queue_id = rxq->ev.queue_id;
532         ev->priority = rxq->ev.priority;
533
534         qbman_swp_dqrr_consume(swp, dq);
535 }
536
537 /*
538  * Callback to handle sending packets through WRIOP based interface
539  */
540 uint16_t
541 dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
542 {
543         /* Function to transmit the frames to given device and VQ*/
544         uint32_t loop, retry_count;
545         int32_t ret;
546         struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
547         struct rte_mbuf *mi;
548         uint32_t frames_to_send;
549         struct rte_mempool *mp;
550         struct qbman_eq_desc eqdesc;
551         struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
552         struct qbman_swp *swp;
553         uint16_t num_tx = 0;
554         uint16_t bpid;
555         struct rte_eth_dev *dev = dpaa2_q->dev;
556         struct dpaa2_dev_priv *priv = dev->data->dev_private;
557
558         if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
559                 ret = dpaa2_affine_qbman_swp();
560                 if (ret) {
561                         RTE_LOG(ERR, PMD, "Failure in affining portal\n");
562                         return 0;
563                 }
564         }
565         swp = DPAA2_PER_LCORE_PORTAL;
566
567         PMD_TX_LOG(DEBUG, "===> dev =%p, fqid =%d", dev, dpaa2_q->fqid);
568
569         /*Prepare enqueue descriptor*/
570         qbman_eq_desc_clear(&eqdesc);
571         qbman_eq_desc_set_no_orp(&eqdesc, DPAA2_EQ_RESP_ERR_FQ);
572         qbman_eq_desc_set_response(&eqdesc, 0, 0);
573         qbman_eq_desc_set_qd(&eqdesc, priv->qdid,
574                              dpaa2_q->flow_id, dpaa2_q->tc_index);
575
576         /*Clear the unused FD fields before sending*/
577         while (nb_pkts) {
578                 /*Check if the queue is congested*/
579                 retry_count = 0;
580                 while (qbman_result_SCN_state(dpaa2_q->cscn)) {
581                         retry_count++;
582                         /* Retry for some time before giving up */
583                         if (retry_count > CONG_RETRY_COUNT)
584                                 goto skip_tx;
585                 }
586
587                 frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
588
589                 for (loop = 0; loop < frames_to_send; loop++) {
590                         fd_arr[loop].simple.frc = 0;
591                         DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
592                         DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
593                         if (RTE_MBUF_DIRECT(*bufs)) {
594                                 mp = (*bufs)->pool;
595                         } else {
596                                 mi = rte_mbuf_from_indirect(*bufs);
597                                 mp = mi->pool;
598                         }
599                         /* Not a hw_pkt pool allocated frame */
600                         if (unlikely(!mp || !priv->bp_list)) {
601                                 PMD_TX_LOG(ERR, "err: no bpool attached");
602                                 goto send_n_return;
603                         }
604
605                         if (mp->ops_index != priv->bp_list->dpaa2_ops_index) {
606                                 PMD_TX_LOG(ERR, "non hw offload bufffer ");
607                                 /* alloc should be from the default buffer pool
608                                  * attached to this interface
609                                  */
610                                 bpid = priv->bp_list->buf_pool.bpid;
611
612                                 if (unlikely((*bufs)->nb_segs > 1)) {
613                                         PMD_TX_LOG(ERR, "S/G support not added"
614                                                 " for non hw offload buffer");
615                                         goto send_n_return;
616                                 }
617                                 if (eth_copy_mbuf_to_fd(*bufs,
618                                                         &fd_arr[loop], bpid)) {
619                                         goto send_n_return;
620                                 }
621                                 /* free the original packet */
622                                 rte_pktmbuf_free(*bufs);
623                         } else {
624                                 bpid = mempool_to_bpid(mp);
625                                 if (unlikely((*bufs)->nb_segs > 1)) {
626                                         if (eth_mbuf_to_sg_fd(*bufs,
627                                                         &fd_arr[loop], bpid))
628                                                 goto send_n_return;
629                                 } else {
630                                         eth_mbuf_to_fd(*bufs,
631                                                        &fd_arr[loop], bpid);
632                                 }
633                         }
634                         bufs++;
635                 }
636                 loop = 0;
637                 while (loop < frames_to_send) {
638                         loop += qbman_swp_enqueue_multiple(swp, &eqdesc,
639                                         &fd_arr[loop], frames_to_send - loop);
640                 }
641
642                 num_tx += frames_to_send;
643                 dpaa2_q->tx_pkts += frames_to_send;
644                 nb_pkts -= frames_to_send;
645         }
646         return num_tx;
647
648 send_n_return:
649         /* send any already prepared fd */
650         if (loop) {
651                 unsigned int i = 0;
652
653                 while (i < loop) {
654                         i += qbman_swp_enqueue_multiple(swp, &eqdesc,
655                                                         &fd_arr[i], loop - i);
656                 }
657                 num_tx += loop;
658                 dpaa2_q->tx_pkts += loop;
659         }
660 skip_tx:
661         return num_tx;
662 }
663
664 /**
665  * Dummy DPDK callback for TX.
666  *
667  * This function is used to temporarily replace the real callback during
668  * unsafe control operations on the queue, or in case of error.
669  *
670  * @param dpdk_txq
671  *   Generic pointer to TX queue structure.
672  * @param[in] pkts
673  *   Packets to transmit.
674  * @param pkts_n
675  *   Number of packets in array.
676  *
677  * @return
678  *   Number of packets successfully transmitted (<= pkts_n).
679  */
680 uint16_t
681 dummy_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
682 {
683         (void)queue;
684         (void)bufs;
685         (void)nb_pkts;
686         return 0;
687 }