New upstream version 16.11.5
[deb_dpdk.git] / drivers / net / qede / qede_rxtx.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "qede_rxtx.h"
10
11 static bool gro_disable = 1;    /* mod_param */
12
13 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
14 {
15         struct rte_mbuf *new_mb = NULL;
16         struct eth_rx_bd *rx_bd;
17         dma_addr_t mapping;
18         uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
19
20         new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
21         if (unlikely(!new_mb)) {
22                 PMD_RX_LOG(ERR, rxq,
23                            "Failed to allocate rx buffer "
24                            "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
25                            idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
26                            rte_mempool_avail_count(rxq->mb_pool),
27                            rte_mempool_in_use_count(rxq->mb_pool));
28                 return -ENOMEM;
29         }
30         rxq->sw_rx_ring[idx].mbuf = new_mb;
31         rxq->sw_rx_ring[idx].page_offset = 0;
32         mapping = rte_mbuf_data_dma_addr_default(new_mb);
33         /* Advance PROD and get BD pointer */
34         rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
35         rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
36         rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
37         rxq->sw_rx_prod++;
38         return 0;
39 }
40
41 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
42 {
43         uint16_t i;
44
45         if (rxq->sw_rx_ring != NULL) {
46                 for (i = 0; i < rxq->nb_rx_desc; i++) {
47                         if (rxq->sw_rx_ring[i].mbuf != NULL) {
48                                 rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
49                                 rxq->sw_rx_ring[i].mbuf = NULL;
50                         }
51                 }
52         }
53 }
54
55 void qede_rx_queue_release(void *rx_queue)
56 {
57         struct qede_rx_queue *rxq = rx_queue;
58
59         if (rxq != NULL) {
60                 qede_rx_queue_release_mbufs(rxq);
61                 rte_free(rxq->sw_rx_ring);
62                 rxq->sw_rx_ring = NULL;
63                 rte_free(rxq);
64                 rxq = NULL;
65         }
66 }
67
68 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
69 {
70         unsigned int i;
71
72         PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc);
73
74         if (txq->sw_tx_ring) {
75                 for (i = 0; i < txq->nb_tx_desc; i++) {
76                         if (txq->sw_tx_ring[i].mbuf) {
77                                 rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
78                                 txq->sw_tx_ring[i].mbuf = NULL;
79                         }
80                 }
81         }
82 }
83
84 int
85 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
86                     uint16_t nb_desc, unsigned int socket_id,
87                     const struct rte_eth_rxconf *rx_conf,
88                     struct rte_mempool *mp)
89 {
90         struct qede_dev *qdev = dev->data->dev_private;
91         struct ecore_dev *edev = &qdev->edev;
92         struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
93         struct qede_rx_queue *rxq;
94         uint16_t max_rx_pkt_len;
95         uint16_t bufsz;
96         size_t size;
97         int rc;
98         int i;
99
100         PMD_INIT_FUNC_TRACE(edev);
101
102         /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
103         if (!rte_is_power_of_2(nb_desc)) {
104                 DP_ERR(edev, "Ring size %u is not power of 2\n",
105                           nb_desc);
106                 return -EINVAL;
107         }
108
109         /* Free memory prior to re-allocation if needed... */
110         if (dev->data->rx_queues[queue_idx] != NULL) {
111                 qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
112                 dev->data->rx_queues[queue_idx] = NULL;
113         }
114
115         /* First allocate the rx queue data structure */
116         rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
117                                  RTE_CACHE_LINE_SIZE, socket_id);
118
119         if (!rxq) {
120                 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
121                           socket_id);
122                 return -ENOMEM;
123         }
124
125         rxq->qdev = qdev;
126         rxq->mb_pool = mp;
127         rxq->nb_rx_desc = nb_desc;
128         rxq->queue_id = queue_idx;
129         rxq->port_id = dev->data->port_id;
130         max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
131
132         /* Fix up RX buffer size */
133         bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
134         if ((rxmode->enable_scatter)                    ||
135             (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
136                 if (!dev->data->scattered_rx) {
137                         DP_INFO(edev, "Forcing scatter-gather mode\n");
138                         dev->data->scattered_rx = 1;
139                 }
140         }
141         if (dev->data->scattered_rx)
142                 rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
143                                    ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
144         else
145                 rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
146         /* Align to cache-line size if needed */
147         rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
148
149         DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
150                 qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
151
152         /* Allocate the parallel driver ring for Rx buffers */
153         size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
154         rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
155                                              RTE_CACHE_LINE_SIZE, socket_id);
156         if (!rxq->sw_rx_ring) {
157                 DP_NOTICE(edev, false,
158                           "Unable to alloc memory for sw_rx_ring on socket %u\n",
159                           socket_id);
160                 rte_free(rxq);
161                 rxq = NULL;
162                 return -ENOMEM;
163         }
164
165         /* Allocate FW Rx ring  */
166         rc = qdev->ops->common->chain_alloc(edev,
167                                             ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
168                                             ECORE_CHAIN_MODE_NEXT_PTR,
169                                             ECORE_CHAIN_CNT_TYPE_U16,
170                                             rxq->nb_rx_desc,
171                                             sizeof(struct eth_rx_bd),
172                                             &rxq->rx_bd_ring);
173
174         if (rc != ECORE_SUCCESS) {
175                 DP_NOTICE(edev, false,
176                           "Unable to alloc memory for rxbd ring on socket %u\n",
177                           socket_id);
178                 rte_free(rxq->sw_rx_ring);
179                 rxq->sw_rx_ring = NULL;
180                 rte_free(rxq);
181                 rxq = NULL;
182                 return -ENOMEM;
183         }
184
185         /* Allocate FW completion ring */
186         rc = qdev->ops->common->chain_alloc(edev,
187                                             ECORE_CHAIN_USE_TO_CONSUME,
188                                             ECORE_CHAIN_MODE_PBL,
189                                             ECORE_CHAIN_CNT_TYPE_U16,
190                                             rxq->nb_rx_desc,
191                                             sizeof(union eth_rx_cqe),
192                                             &rxq->rx_comp_ring);
193
194         if (rc != ECORE_SUCCESS) {
195                 DP_NOTICE(edev, false,
196                           "Unable to alloc memory for cqe ring on socket %u\n",
197                           socket_id);
198                 /* TBD: Freeing RX BD ring */
199                 rte_free(rxq->sw_rx_ring);
200                 rxq->sw_rx_ring = NULL;
201                 rte_free(rxq);
202                 return -ENOMEM;
203         }
204
205         /* Allocate buffers for the Rx ring */
206         for (i = 0; i < rxq->nb_rx_desc; i++) {
207                 rc = qede_alloc_rx_buffer(rxq);
208                 if (rc) {
209                         DP_NOTICE(edev, false,
210                                   "RX buffer allocation failed at idx=%d\n", i);
211                         goto err4;
212                 }
213         }
214
215         dev->data->rx_queues[queue_idx] = rxq;
216
217         DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
218                   queue_idx, nb_desc, rxq->rx_buf_size, socket_id);
219
220         return 0;
221 err4:
222         qede_rx_queue_release(rxq);
223         return -ENOMEM;
224 }
225
226 void qede_tx_queue_release(void *tx_queue)
227 {
228         struct qede_tx_queue *txq = tx_queue;
229
230         if (txq != NULL) {
231                 qede_tx_queue_release_mbufs(txq);
232                 if (txq->sw_tx_ring) {
233                         rte_free(txq->sw_tx_ring);
234                         txq->sw_tx_ring = NULL;
235                 }
236                 rte_free(txq);
237         }
238         txq = NULL;
239 }
240
241 int
242 qede_tx_queue_setup(struct rte_eth_dev *dev,
243                     uint16_t queue_idx,
244                     uint16_t nb_desc,
245                     unsigned int socket_id,
246                     const struct rte_eth_txconf *tx_conf)
247 {
248         struct qede_dev *qdev = dev->data->dev_private;
249         struct ecore_dev *edev = &qdev->edev;
250         struct qede_tx_queue *txq;
251         int rc;
252
253         PMD_INIT_FUNC_TRACE(edev);
254
255         if (!rte_is_power_of_2(nb_desc)) {
256                 DP_ERR(edev, "Ring size %u is not power of 2\n",
257                        nb_desc);
258                 return -EINVAL;
259         }
260
261         /* Free memory prior to re-allocation if needed... */
262         if (dev->data->tx_queues[queue_idx] != NULL) {
263                 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
264                 dev->data->tx_queues[queue_idx] = NULL;
265         }
266
267         txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
268                                  RTE_CACHE_LINE_SIZE, socket_id);
269
270         if (txq == NULL) {
271                 DP_ERR(edev,
272                        "Unable to allocate memory for txq on socket %u",
273                        socket_id);
274                 return -ENOMEM;
275         }
276
277         txq->nb_tx_desc = nb_desc;
278         txq->qdev = qdev;
279         txq->port_id = dev->data->port_id;
280
281         rc = qdev->ops->common->chain_alloc(edev,
282                                             ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
283                                             ECORE_CHAIN_MODE_PBL,
284                                             ECORE_CHAIN_CNT_TYPE_U16,
285                                             txq->nb_tx_desc,
286                                             sizeof(union eth_tx_bd_types),
287                                             &txq->tx_pbl);
288         if (rc != ECORE_SUCCESS) {
289                 DP_ERR(edev,
290                        "Unable to allocate memory for txbd ring on socket %u",
291                        socket_id);
292                 qede_tx_queue_release(txq);
293                 return -ENOMEM;
294         }
295
296         /* Allocate software ring */
297         txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
298                                              (sizeof(struct qede_tx_entry) *
299                                               txq->nb_tx_desc),
300                                              RTE_CACHE_LINE_SIZE, socket_id);
301
302         if (!txq->sw_tx_ring) {
303                 DP_ERR(edev,
304                        "Unable to allocate memory for txbd ring on socket %u",
305                        socket_id);
306                 qede_tx_queue_release(txq);
307                 return -ENOMEM;
308         }
309
310         txq->queue_id = queue_idx;
311
312         txq->nb_tx_avail = txq->nb_tx_desc;
313
314         txq->tx_free_thresh =
315             tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
316             (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
317
318         dev->data->tx_queues[queue_idx] = txq;
319
320         DP_INFO(edev,
321                   "txq %u num_desc %u tx_free_thresh %u socket %u\n",
322                   queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
323
324         return 0;
325 }
326
327 /* This function inits fp content and resets the SB, RXQ and TXQ arrays */
328 static void qede_init_fp(struct qede_dev *qdev)
329 {
330         struct qede_fastpath *fp;
331         uint8_t i, rss_id, tc;
332         int fp_rx = qdev->fp_num_rx, rxq = 0, txq = 0;
333
334         memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
335                                            sizeof(*qdev->fp_array)));
336         memset((void *)qdev->sb_array, 0, (QEDE_QUEUE_CNT(qdev) *
337                                            sizeof(*qdev->sb_array)));
338         for_each_queue(i) {
339                 fp = &qdev->fp_array[i];
340                 if (fp_rx) {
341                         fp->type = QEDE_FASTPATH_RX;
342                         fp_rx--;
343                 } else{
344                         fp->type = QEDE_FASTPATH_TX;
345                 }
346                 fp->qdev = qdev;
347                 fp->id = i;
348                 fp->sb_info = &qdev->sb_array[i];
349                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i);
350         }
351
352         qdev->gro_disable = gro_disable;
353 }
354
355 void qede_free_fp_arrays(struct qede_dev *qdev)
356 {
357         /* It asseumes qede_free_mem_load() is called before */
358         if (qdev->fp_array != NULL) {
359                 rte_free(qdev->fp_array);
360                 qdev->fp_array = NULL;
361         }
362
363         if (qdev->sb_array != NULL) {
364                 rte_free(qdev->sb_array);
365                 qdev->sb_array = NULL;
366         }
367 }
368
369 int qede_alloc_fp_array(struct qede_dev *qdev)
370 {
371         struct qede_fastpath *fp;
372         struct ecore_dev *edev = &qdev->edev;
373         int i;
374
375         qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev),
376                                     sizeof(*qdev->fp_array),
377                                     RTE_CACHE_LINE_SIZE);
378
379         if (!qdev->fp_array) {
380                 DP_ERR(edev, "fp array allocation failed\n");
381                 return -ENOMEM;
382         }
383
384         qdev->sb_array = rte_calloc("sb", QEDE_QUEUE_CNT(qdev),
385                                     sizeof(*qdev->sb_array),
386                                     RTE_CACHE_LINE_SIZE);
387
388         if (!qdev->sb_array) {
389                 DP_ERR(edev, "sb array allocation failed\n");
390                 rte_free(qdev->fp_array);
391                 return -ENOMEM;
392         }
393
394         return 0;
395 }
396
397 /* This function allocates fast-path status block memory */
398 static int
399 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
400                   uint16_t sb_id)
401 {
402         struct ecore_dev *edev = &qdev->edev;
403         struct status_block *sb_virt;
404         dma_addr_t sb_phys;
405         int rc;
406
407         sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
408
409         if (!sb_virt) {
410                 DP_ERR(edev, "Status block allocation failed\n");
411                 return -ENOMEM;
412         }
413
414         rc = qdev->ops->common->sb_init(edev, sb_info,
415                                         sb_virt, sb_phys, sb_id,
416                                         QED_SB_TYPE_L2_QUEUE);
417         if (rc) {
418                 DP_ERR(edev, "Status block initialization failed\n");
419                 /* TBD: No dma_free_coherent possible */
420                 return rc;
421         }
422
423         return 0;
424 }
425
426 int qede_alloc_fp_resc(struct qede_dev *qdev)
427 {
428         struct ecore_dev *edev = &qdev->edev;
429         struct qede_fastpath *fp;
430         uint32_t num_sbs;
431         uint16_t i;
432         uint16_t sb_idx;
433         int rc;
434
435         if (IS_VF(edev))
436                 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
437         else
438                 num_sbs = ecore_cxt_get_proto_cid_count
439                           (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
440
441         if (num_sbs == 0) {
442                 DP_ERR(edev, "No status blocks available\n");
443                 return -EINVAL;
444         }
445
446         if (qdev->fp_array)
447                 qede_free_fp_arrays(qdev);
448
449         rc = qede_alloc_fp_array(qdev);
450         if (rc != 0)
451                 return rc;
452
453         qede_init_fp(qdev);
454
455         for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
456                 fp = &qdev->fp_array[i];
457                 if (IS_VF(edev))
458                         sb_idx = i % num_sbs;
459                 else
460                         sb_idx = i;
461                 if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
462                         qede_free_fp_arrays(qdev);
463                         return -ENOMEM;
464                 }
465         }
466
467         return 0;
468 }
469
470 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
471 {
472         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
473
474         qede_free_mem_load(eth_dev);
475         qede_free_fp_arrays(qdev);
476 }
477
478 static inline void
479 qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
480 {
481         uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
482         uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
483         struct eth_rx_prod_data rx_prods = { 0 };
484
485         /* Update producers */
486         rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
487         rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
488
489         /* Make sure that the BD and SGE data is updated before updating the
490          * producers since FW might read the BD/SGE right after the producer
491          * is updated.
492          */
493         rte_wmb();
494
495         internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
496                         (uint32_t *)&rx_prods);
497
498         /* mmiowb is needed to synchronize doorbell writes from more than one
499          * processor. It guarantees that the write arrives to the device before
500          * the napi lock is released and another qede_poll is called (possibly
501          * on another CPU). Without this barrier, the next doorbell can bypass
502          * this doorbell. This is applicable to IA64/Altix systems.
503          */
504         rte_wmb();
505
506         PMD_RX_LOG(DEBUG, rxq, "bd_prod %u  cqe_prod %u\n", bd_prod, cqe_prod);
507 }
508
509 static inline uint32_t
510 qede_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
511 {
512         return index % n_rx_rings;
513 }
514
515 static void qede_prandom_bytes(uint32_t *buff, size_t bytes)
516 {
517         unsigned int i;
518
519         srand((unsigned int)time(NULL));
520
521         for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
522                 buff[i] = rand();
523 }
524
525 static bool
526 qede_check_vport_rss_enable(struct rte_eth_dev *eth_dev,
527                             struct qed_update_vport_rss_params *rss_params)
528 {
529         struct rte_eth_rss_conf rss_conf;
530         enum rte_eth_rx_mq_mode mode = eth_dev->data->dev_conf.rxmode.mq_mode;
531         struct qede_dev *qdev = eth_dev->data->dev_private;
532         struct ecore_dev *edev = &qdev->edev;
533         uint8_t rss_caps;
534         unsigned int i;
535         uint64_t hf;
536         uint32_t *key;
537
538         PMD_INIT_FUNC_TRACE(edev);
539
540         rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
541         key = (uint32_t *)rss_conf.rss_key;
542         hf = rss_conf.rss_hf;
543
544         /* Check if RSS conditions are met.
545          * Note: Even though its meaningless to enable RSS with one queue, it
546          * could be used to produce RSS Hash, so skipping that check.
547          */
548         if (!(mode & ETH_MQ_RX_RSS)) {
549                 DP_INFO(edev, "RSS flag is not set\n");
550                 return false;
551         }
552
553         if (hf == 0) {
554                 DP_INFO(edev, "Request to disable RSS\n");
555                 return false;
556         }
557
558         memset(rss_params, 0, sizeof(*rss_params));
559
560         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
561                 rss_params->rss_ind_table[i] = qede_rxfh_indir_default(i,
562                                                         QEDE_RSS_COUNT(qdev));
563
564         if (!key)
565                 qede_prandom_bytes(rss_params->rss_key,
566                                    sizeof(rss_params->rss_key));
567         else
568                 memcpy(rss_params->rss_key, rss_conf.rss_key,
569                        rss_conf.rss_key_len);
570
571         qede_init_rss_caps(&rss_caps, hf);
572
573         rss_params->rss_caps = rss_caps;
574
575         DP_INFO(edev, "RSS conditions are met\n");
576
577         return true;
578 }
579
580 static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
581 {
582         struct qede_dev *qdev = eth_dev->data->dev_private;
583         struct ecore_dev *edev = &qdev->edev;
584         struct ecore_queue_start_common_params q_params;
585         struct qed_update_vport_rss_params *rss_params = &qdev->rss_params;
586         struct qed_dev_info *qed_info = &qdev->dev_info.common;
587         struct qed_update_vport_params vport_update_params;
588         struct qede_tx_queue *txq;
589         struct qede_fastpath *fp;
590         dma_addr_t p_phys_table;
591         int txq_index;
592         uint16_t page_cnt;
593         int vlan_removal_en = 1;
594         int rc, tc, i;
595
596         for_each_queue(i) {
597                 fp = &qdev->fp_array[i];
598                 if (fp->type & QEDE_FASTPATH_RX) {
599                         p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->
600                                                                 rx_comp_ring);
601                         page_cnt = ecore_chain_get_page_cnt(&fp->rxq->
602                                                                 rx_comp_ring);
603
604                         memset(&q_params, 0, sizeof(q_params));
605                         q_params.queue_id = i;
606                         q_params.vport_id = 0;
607                         q_params.sb = fp->sb_info->igu_sb_id;
608                         q_params.sb_idx = RX_PI;
609
610                         ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
611
612                         rc = qdev->ops->q_rx_start(edev, i, &q_params,
613                                            fp->rxq->rx_buf_size,
614                                            fp->rxq->rx_bd_ring.p_phys_addr,
615                                            p_phys_table,
616                                            page_cnt,
617                                            &fp->rxq->hw_rxq_prod_addr);
618                         if (rc) {
619                                 DP_ERR(edev, "Start rxq #%d failed %d\n",
620                                        fp->rxq->queue_id, rc);
621                                 return rc;
622                         }
623
624                         fp->rxq->hw_cons_ptr =
625                                         &fp->sb_info->sb_virt->pi_array[RX_PI];
626
627                         qede_update_rx_prod(qdev, fp->rxq);
628                 }
629
630                 if (!(fp->type & QEDE_FASTPATH_TX))
631                         continue;
632                 for (tc = 0; tc < qdev->num_tc; tc++) {
633                         txq = fp->txqs[tc];
634                         txq_index = tc * QEDE_RSS_COUNT(qdev) + i;
635
636                         p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
637                         page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
638
639                         memset(&q_params, 0, sizeof(q_params));
640                         q_params.queue_id = txq->queue_id;
641                         q_params.vport_id = 0;
642                         q_params.sb = fp->sb_info->igu_sb_id;
643                         q_params.sb_idx = TX_PI(tc);
644
645                         rc = qdev->ops->q_tx_start(edev, i, &q_params,
646                                                    p_phys_table,
647                                                    page_cnt, /* **pp_doorbell */
648                                                    &txq->doorbell_addr);
649                         if (rc) {
650                                 DP_ERR(edev, "Start txq %u failed %d\n",
651                                        txq_index, rc);
652                                 return rc;
653                         }
654
655                         txq->hw_cons_ptr =
656                             &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
657                         SET_FIELD(txq->tx_db.data.params,
658                                   ETH_DB_DATA_DEST, DB_DEST_XCM);
659                         SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
660                                   DB_AGG_CMD_SET);
661                         SET_FIELD(txq->tx_db.data.params,
662                                   ETH_DB_DATA_AGG_VAL_SEL,
663                                   DQ_XCM_ETH_TX_BD_PROD_CMD);
664
665                         txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
666                 }
667         }
668
669         /* Prepare and send the vport enable */
670         memset(&vport_update_params, 0, sizeof(vport_update_params));
671         /* Update MTU via vport update */
672         vport_update_params.mtu = qdev->mtu;
673         vport_update_params.vport_id = 0;
674         vport_update_params.update_vport_active_flg = 1;
675         vport_update_params.vport_active_flg = 1;
676
677         /* @DPDK */
678         if (qed_info->mf_mode == MF_NPAR && qed_info->tx_switching) {
679                 /* TBD: Check SRIOV enabled for VF */
680                 vport_update_params.update_tx_switching_flg = 1;
681                 vport_update_params.tx_switching_flg = 1;
682         }
683
684         if (qede_check_vport_rss_enable(eth_dev, rss_params)) {
685                 vport_update_params.update_rss_flg = 1;
686                 qdev->rss_enabled = 1;
687         } else {
688                 qdev->rss_enabled = 0;
689         }
690
691         rte_memcpy(&vport_update_params.rss_params, rss_params,
692                sizeof(*rss_params));
693
694         rc = qdev->ops->vport_update(edev, &vport_update_params);
695         if (rc) {
696                 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
697                 return rc;
698         }
699
700         return 0;
701 }
702
703 #ifdef ENC_SUPPORTED
704 static bool qede_tunn_exist(uint16_t flag)
705 {
706         return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
707                     PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
708 }
709
710 static inline uint8_t qede_check_tunn_csum(uint16_t flag)
711 {
712         uint8_t tcsum = 0;
713         uint16_t csum_flag = 0;
714
715         if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
716              PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
717                 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
718                     PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
719
720         if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
721              PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
722                 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
723                     PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
724                 tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
725         }
726
727         csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
728             PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
729             PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
730             PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
731
732         if (csum_flag & flag)
733                 return QEDE_CSUM_ERROR;
734
735         return QEDE_CSUM_UNNECESSARY | tcsum;
736 }
737 #else
738 static inline uint8_t qede_tunn_exist(uint16_t flag)
739 {
740         return 0;
741 }
742
743 static inline uint8_t qede_check_tunn_csum(uint16_t flag)
744 {
745         return 0;
746 }
747 #endif
748
749 static inline uint8_t qede_check_notunn_csum(uint16_t flag)
750 {
751         uint8_t csum = 0;
752         uint16_t csum_flag = 0;
753
754         if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
755              PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
756                 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
757                     PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
758                 csum = QEDE_CSUM_UNNECESSARY;
759         }
760
761         csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
762             PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
763
764         if (csum_flag & flag)
765                 return QEDE_CSUM_ERROR;
766
767         return csum;
768 }
769
770 static inline uint8_t qede_check_csum(uint16_t flag)
771 {
772         if (likely(!qede_tunn_exist(flag)))
773                 return qede_check_notunn_csum(flag);
774         else
775                 return qede_check_tunn_csum(flag);
776 }
777
778 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
779 {
780         ecore_chain_consume(&rxq->rx_bd_ring);
781         rxq->sw_rx_cons++;
782 }
783
784 static inline void
785 qede_reuse_page(struct qede_dev *qdev,
786                 struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
787 {
788         struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
789         uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
790         struct qede_rx_entry *curr_prod;
791         dma_addr_t new_mapping;
792
793         curr_prod = &rxq->sw_rx_ring[idx];
794         *curr_prod = *curr_cons;
795
796         new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
797                       curr_prod->page_offset;
798
799         rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
800         rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
801
802         rxq->sw_rx_prod++;
803 }
804
805 static inline void
806 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
807                         struct qede_dev *qdev, uint8_t count)
808 {
809         struct qede_rx_entry *curr_cons;
810
811         for (; count > 0; count--) {
812                 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
813                 qede_reuse_page(qdev, rxq, curr_cons);
814                 qede_rx_bd_ring_consume(rxq);
815         }
816 }
817
818 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
819 {
820         uint32_t p_type;
821         /* TBD - L4 indications needed ? */
822         uint16_t protocol = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
823                               PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & flags);
824
825         /* protocol = 3 means LLC/SNAP over Ethernet */
826         if (unlikely(protocol == 0 || protocol == 3))
827                 p_type = RTE_PTYPE_UNKNOWN;
828         else if (protocol == 1)
829                 p_type = RTE_PTYPE_L3_IPV4;
830         else if (protocol == 2)
831                 p_type = RTE_PTYPE_L3_IPV6;
832
833         return RTE_PTYPE_L2_ETHER | p_type;
834 }
835
836 int qede_process_sg_pkts(void *p_rxq,  struct rte_mbuf *rx_mb,
837                          int num_segs, uint16_t pkt_len)
838 {
839         struct qede_rx_queue *rxq = p_rxq;
840         struct qede_dev *qdev = rxq->qdev;
841         struct ecore_dev *edev = &qdev->edev;
842         uint16_t sw_rx_index, cur_size;
843
844         register struct rte_mbuf *seg1 = NULL;
845         register struct rte_mbuf *seg2 = NULL;
846
847         seg1 = rx_mb;
848         while (num_segs) {
849                 cur_size = pkt_len > rxq->rx_buf_size ?
850                                 rxq->rx_buf_size : pkt_len;
851                 if (!cur_size) {
852                         PMD_RX_LOG(DEBUG, rxq,
853                                    "SG packet, len and num BD mismatch\n");
854                         qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
855                         return -EINVAL;
856                 }
857
858                 if (qede_alloc_rx_buffer(rxq)) {
859                         uint8_t index;
860
861                         PMD_RX_LOG(DEBUG, rxq, "Buffer allocation failed\n");
862                         index = rxq->port_id;
863                         rte_eth_devices[index].data->rx_mbuf_alloc_failed++;
864                         rxq->rx_alloc_errors++;
865                         return -ENOMEM;
866                 }
867
868                 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
869                 seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
870                 qede_rx_bd_ring_consume(rxq);
871                 pkt_len -= cur_size;
872                 seg2->data_len = cur_size;
873                 seg1->next = seg2;
874                 seg1 = seg1->next;
875                 num_segs--;
876                 rxq->rx_segs++;
877                 continue;
878         }
879         seg1 = NULL;
880
881         if (pkt_len)
882                 PMD_RX_LOG(DEBUG, rxq,
883                            "Mapped all BDs of jumbo, but still have %d bytes\n",
884                            pkt_len);
885
886         return ECORE_SUCCESS;
887 }
888
889 uint16_t
890 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
891 {
892         struct qede_rx_queue *rxq = p_rxq;
893         struct qede_dev *qdev = rxq->qdev;
894         struct ecore_dev *edev = &qdev->edev;
895         struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id];
896         uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
897         uint16_t rx_pkt = 0;
898         union eth_rx_cqe *cqe;
899         struct eth_fast_path_rx_reg_cqe *fp_cqe;
900         register struct rte_mbuf *rx_mb = NULL;
901         register struct rte_mbuf *seg1 = NULL;
902         enum eth_rx_cqe_type cqe_type;
903         uint16_t len, pad, preload_idx, pkt_len, parse_flag;
904         uint8_t csum_flag, num_segs;
905         enum rss_hash_type htype;
906         int ret;
907
908         hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
909         sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
910
911         rte_rmb();
912
913         if (hw_comp_cons == sw_comp_cons)
914                 return 0;
915
916         while (sw_comp_cons != hw_comp_cons) {
917                 /* Get the CQE from the completion ring */
918                 cqe =
919                     (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
920                 cqe_type = cqe->fast_path_regular.type;
921
922                 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
923                         PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE\n");
924
925                         qdev->ops->eth_cqe_completion(edev, fp->id,
926                                 (struct eth_slow_path_rx_cqe *)cqe);
927                         goto next_cqe;
928                 }
929
930                 /* Get the data from the SW ring */
931                 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
932                 rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
933                 assert(rx_mb != NULL);
934
935                 /* non GRO */
936                 fp_cqe = &cqe->fast_path_regular;
937
938                 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
939                 pad = fp_cqe->placement_offset;
940                 assert((len + pad) <= rx_mb->buf_len);
941
942                 PMD_RX_LOG(DEBUG, rxq,
943                            "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x"
944                            " len = %u, parsing_flags = %d\n",
945                            cqe_type, fp_cqe->bitfields,
946                            rte_le_to_cpu_16(fp_cqe->vlan_tag),
947                            len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags));
948
949                 /* If this is an error packet then drop it */
950                 parse_flag =
951                     rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags);
952                 csum_flag = qede_check_csum(parse_flag);
953                 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
954                         PMD_RX_LOG(ERR, rxq,
955                                    "CQE in CONS = %u has error, flags = 0x%x "
956                                    "dropping incoming packet\n",
957                                    sw_comp_cons, parse_flag);
958                         rxq->rx_hw_errors++;
959                         qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
960                         goto next_cqe;
961                 }
962
963                 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
964                         PMD_RX_LOG(ERR, rxq,
965                                    "New buffer allocation failed,"
966                                    "dropping incoming packet\n");
967                         qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
968                         rte_eth_devices[rxq->port_id].
969                             data->rx_mbuf_alloc_failed++;
970                         rxq->rx_alloc_errors++;
971                         break;
972                 }
973
974                 qede_rx_bd_ring_consume(rxq);
975
976                 if (fp_cqe->bd_num > 1) {
977                         pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
978                         num_segs = fp_cqe->bd_num - 1;
979
980                         rxq->rx_segs++;
981
982                         pkt_len -= len;
983                         seg1 = rx_mb;
984                         ret = qede_process_sg_pkts(p_rxq, seg1, num_segs,
985                                                    pkt_len);
986                         if (ret != ECORE_SUCCESS) {
987                                 qede_recycle_rx_bd_ring(rxq, qdev,
988                                                         fp_cqe->bd_num);
989                                 goto next_cqe;
990                         }
991                 }
992
993                 /* Prefetch next mbuf while processing current one. */
994                 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
995                 rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
996
997                 /* Update MBUF fields */
998                 rx_mb->ol_flags = 0;
999                 rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
1000                 rx_mb->nb_segs = fp_cqe->bd_num;
1001                 rx_mb->data_len = len;
1002                 rx_mb->pkt_len = fp_cqe->pkt_len;
1003                 rx_mb->port = rxq->port_id;
1004                 rx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag);
1005
1006                 htype = (uint8_t)GET_FIELD(fp_cqe->bitfields,
1007                                 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
1008                 if (qdev->rss_enabled && htype) {
1009                         rx_mb->ol_flags |= PKT_RX_RSS_HASH;
1010                         rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
1011                         PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x\n",
1012                                    rx_mb->hash.rss);
1013                 }
1014
1015                 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
1016
1017                 if (CQE_HAS_VLAN(parse_flag)) {
1018                         rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1019                         rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
1020                 }
1021
1022                 if (CQE_HAS_OUTER_VLAN(parse_flag)) {
1023                         /* FW does not provide indication of Outer VLAN tag,
1024                          * which is always stripped, so vlan_tci_outer is set
1025                          * to 0. Here vlan_tag represents inner VLAN tag.
1026                          */
1027                         rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1028                         rx_mb->ol_flags |= PKT_RX_QINQ_PKT;
1029                         rx_mb->vlan_tci_outer = 0;
1030                 }
1031
1032                 rx_pkts[rx_pkt] = rx_mb;
1033                 rx_pkt++;
1034 next_cqe:
1035                 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1036                 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1037                 if (rx_pkt == nb_pkts) {
1038                         PMD_RX_LOG(DEBUG, rxq,
1039                                    "Budget reached nb_pkts=%u received=%u\n",
1040                                    rx_pkt, nb_pkts);
1041                         break;
1042                 }
1043         }
1044
1045         qede_update_rx_prod(qdev, rxq);
1046
1047         rxq->rcv_pkts += rx_pkt;
1048
1049         PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id());
1050
1051         return rx_pkt;
1052 }
1053
1054 static inline int
1055 qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq)
1056 {
1057         uint16_t nb_segs, idx = TX_CONS(txq);
1058         struct eth_tx_bd *tx_data_bd;
1059         struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;
1060
1061         if (unlikely(!mbuf)) {
1062                 PMD_TX_LOG(ERR, txq, "null mbuf\n");
1063                 PMD_TX_LOG(ERR, txq,
1064                            "tx_desc %u tx_avail %u tx_cons %u tx_prod %u\n",
1065                            txq->nb_tx_desc, txq->nb_tx_avail, idx,
1066                            TX_PROD(txq));
1067                 return -1;
1068         }
1069
1070         nb_segs = mbuf->nb_segs;
1071         while (nb_segs) {
1072                 /* It's like consuming rxbuf in recv() */
1073                 ecore_chain_consume(&txq->tx_pbl);
1074                 txq->nb_tx_avail++;
1075                 nb_segs--;
1076         }
1077         rte_pktmbuf_free(mbuf);
1078         txq->sw_tx_ring[idx].mbuf = NULL;
1079
1080         return 0;
1081 }
1082
1083 static inline uint16_t
1084 qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
1085 {
1086         uint16_t tx_compl = 0;
1087         uint16_t hw_bd_cons;
1088
1089         hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
1090         rte_compiler_barrier();
1091
1092         while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {
1093                 if (qede_free_tx_pkt(edev, txq)) {
1094                         PMD_TX_LOG(ERR, txq,
1095                                    "hw_bd_cons = %u, chain_cons = %u\n",
1096                                    hw_bd_cons,
1097                                    ecore_chain_get_cons_idx(&txq->tx_pbl));
1098                         break;
1099                 }
1100                 txq->sw_tx_cons++;      /* Making TXD available */
1101                 tx_compl++;
1102         }
1103
1104         PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u\n",
1105                    tx_compl, txq->sw_tx_cons, txq->nb_tx_avail);
1106         return tx_compl;
1107 }
1108
1109 /* Populate scatter gather buffer descriptor fields */
1110 static inline uint16_t qede_encode_sg_bd(struct qede_tx_queue *p_txq,
1111                                          struct rte_mbuf *m_seg,
1112                                          uint16_t count,
1113                                          struct eth_tx_1st_bd *bd1)
1114 {
1115         struct qede_tx_queue *txq = p_txq;
1116         struct eth_tx_2nd_bd *bd2 = NULL;
1117         struct eth_tx_3rd_bd *bd3 = NULL;
1118         struct eth_tx_bd *tx_bd = NULL;
1119         uint16_t nb_segs = count;
1120         dma_addr_t mapping;
1121
1122         /* Check for scattered buffers */
1123         while (m_seg) {
1124                 if (nb_segs == 1) {
1125                         bd2 = (struct eth_tx_2nd_bd *)
1126                                 ecore_chain_produce(&txq->tx_pbl);
1127                         memset(bd2, 0, sizeof(*bd2));
1128                         mapping = rte_mbuf_data_dma_addr(m_seg);
1129                         bd2->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
1130                         bd2->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
1131                         bd2->nbytes = rte_cpu_to_le_16(m_seg->data_len);
1132                 } else if (nb_segs == 2) {
1133                         bd3 = (struct eth_tx_3rd_bd *)
1134                                 ecore_chain_produce(&txq->tx_pbl);
1135                         memset(bd3, 0, sizeof(*bd3));
1136                         mapping = rte_mbuf_data_dma_addr(m_seg);
1137                         bd3->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
1138                         bd3->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
1139                         bd3->nbytes = rte_cpu_to_le_16(m_seg->data_len);
1140                 } else {
1141                         tx_bd = (struct eth_tx_bd *)
1142                                 ecore_chain_produce(&txq->tx_pbl);
1143                         memset(tx_bd, 0, sizeof(*tx_bd));
1144                         mapping = rte_mbuf_data_dma_addr(m_seg);
1145                         tx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
1146                         tx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
1147                         tx_bd->nbytes = rte_cpu_to_le_16(m_seg->data_len);
1148                 }
1149                 nb_segs++;
1150                 bd1->data.nbds = nb_segs;
1151                 m_seg = m_seg->next;
1152         }
1153
1154         /* Return total scattered buffers */
1155         return nb_segs;
1156 }
1157
1158 uint16_t
1159 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1160 {
1161         struct qede_tx_queue *txq = p_txq;
1162         struct qede_dev *qdev = txq->qdev;
1163         struct ecore_dev *edev = &qdev->edev;
1164         struct qede_fastpath *fp;
1165         struct eth_tx_1st_bd *bd1;
1166         struct rte_mbuf *m_seg = NULL;
1167         uint16_t nb_tx_pkts;
1168         uint16_t nb_pkt_sent = 0;
1169         uint16_t bd_prod;
1170         uint16_t idx;
1171         uint16_t tx_count;
1172         uint16_t nb_segs = 0;
1173
1174         fp = &qdev->fp_array[QEDE_RSS_COUNT(qdev) + txq->queue_id];
1175
1176         if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
1177                 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u\n",
1178                            nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
1179                 (void)qede_process_tx_compl(edev, txq);
1180         }
1181
1182         nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail /
1183                         ETH_TX_MAX_BDS_PER_NON_LSO_PACKET));
1184         if (unlikely(nb_tx_pkts == 0)) {
1185                 PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n",
1186                            nb_pkts, txq->nb_tx_avail);
1187                 return 0;
1188         }
1189
1190         tx_count = nb_tx_pkts;
1191         while (nb_tx_pkts--) {
1192                 /* Fill the entry in the SW ring and the BDs in the FW ring */
1193                 idx = TX_PROD(txq);
1194                 struct rte_mbuf *mbuf = *tx_pkts++;
1195
1196                 txq->sw_tx_ring[idx].mbuf = mbuf;
1197                 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
1198                 /* Zero init struct fields */
1199                 bd1->data.bd_flags.bitfields = 0;
1200                 bd1->data.bitfields = 0;
1201
1202                 bd1->data.bd_flags.bitfields =
1203                         1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1204                 /* Map MBUF linear data for DMA and set in the first BD */
1205                 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
1206                                      mbuf->pkt_len);
1207
1208                 /* Descriptor based VLAN insertion */
1209                 if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1210                         bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
1211                         bd1->data.bd_flags.bitfields |=
1212                             1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1213                 }
1214
1215                 /* Offload the IP checksum in the hardware */
1216                 if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
1217                         bd1->data.bd_flags.bitfields |=
1218                             1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1219                 }
1220
1221                 /* L4 checksum offload (tcp or udp) */
1222                 if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
1223                         bd1->data.bd_flags.bitfields |=
1224                             1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1225                         /* IPv6 + extn. -> later */
1226                 }
1227
1228                 /* Handle fragmented MBUF */
1229                 m_seg = mbuf->next;
1230                 nb_segs++;
1231                 bd1->data.nbds = nb_segs;
1232                 /* Encode scatter gather buffer descriptors if required */
1233                 nb_segs = qede_encode_sg_bd(txq, m_seg, nb_segs, bd1);
1234                 txq->nb_tx_avail = txq->nb_tx_avail - nb_segs;
1235                 nb_segs = 0;
1236                 txq->sw_tx_prod++;
1237                 rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
1238                 bd_prod =
1239                     rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
1240                 nb_pkt_sent++;
1241                 txq->xmit_pkts++;
1242         }
1243
1244         /* Write value of prod idx into bd_prod */
1245         txq->tx_db.data.bd_prod = bd_prod;
1246         rte_wmb();
1247         rte_compiler_barrier();
1248         DIRECT_REG_WR(edev, txq->doorbell_addr, txq->tx_db.raw);
1249         rte_wmb();
1250
1251         /* Check again for Tx completions */
1252         (void)qede_process_tx_compl(edev, txq);
1253
1254         PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d\n",
1255                    nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id());
1256
1257         return nb_pkt_sent;
1258 }
1259
1260 static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
1261 {
1262         struct qede_dev *qdev = eth_dev->data->dev_private;
1263         struct qede_fastpath *fp;
1264         uint8_t i, rss_id, txq_index, tc;
1265         int rxq = 0, txq = 0;
1266
1267         for_each_queue(i) {
1268                 fp = &qdev->fp_array[i];
1269                 if (fp->type & QEDE_FASTPATH_RX) {
1270                         fp->rxq = eth_dev->data->rx_queues[i];
1271                         fp->rxq->queue_id = rxq++;
1272                 }
1273
1274                 if (fp->type & QEDE_FASTPATH_TX) {
1275                         for (tc = 0; tc < qdev->num_tc; tc++) {
1276                                 txq_index = tc * QEDE_TSS_COUNT(qdev) + txq;
1277                                 fp->txqs[tc] =
1278                                         eth_dev->data->tx_queues[txq_index];
1279                                 fp->txqs[tc]->queue_id = txq_index;
1280                         }
1281                         txq++;
1282                 }
1283         }
1284 }
1285
1286 int qede_dev_start(struct rte_eth_dev *eth_dev)
1287 {
1288         struct qede_dev *qdev = eth_dev->data->dev_private;
1289         struct ecore_dev *edev = &qdev->edev;
1290         struct qed_link_output link_output;
1291         struct qede_fastpath *fp;
1292         int rc, i;
1293
1294         DP_INFO(edev, "Device state is %d\n", qdev->state);
1295
1296         if (qdev->state == QEDE_DEV_START) {
1297                 DP_INFO(edev, "Port is already started\n");
1298                 return 0;
1299         }
1300
1301         if (qdev->state == QEDE_DEV_CONFIG)
1302                 qede_init_fp_queue(eth_dev);
1303
1304         rc = qede_start_queues(eth_dev, true);
1305         if (rc) {
1306                 DP_ERR(edev, "Failed to start queues\n");
1307                 /* TBD: free */
1308                 return rc;
1309         }
1310
1311         /* Bring-up the link */
1312         qede_dev_set_link_state(eth_dev, true);
1313
1314         /* Start/resume traffic */
1315         qdev->ops->fastpath_start(edev);
1316
1317         qdev->state = QEDE_DEV_START;
1318
1319         DP_INFO(edev, "dev_state is QEDE_DEV_START\n");
1320
1321         return 0;
1322 }
1323
1324 static int qede_drain_txq(struct qede_dev *qdev,
1325                           struct qede_tx_queue *txq, bool allow_drain)
1326 {
1327         struct ecore_dev *edev = &qdev->edev;
1328         int rc, cnt = 1000;
1329
1330         while (txq->sw_tx_cons != txq->sw_tx_prod) {
1331                 qede_process_tx_compl(edev, txq);
1332                 if (!cnt) {
1333                         if (allow_drain) {
1334                                 DP_NOTICE(edev, false,
1335                                           "Tx queue[%u] is stuck,"
1336                                           "requesting MCP to drain\n",
1337                                           txq->queue_id);
1338                                 rc = qdev->ops->common->drain(edev);
1339                                 if (rc)
1340                                         return rc;
1341                                 return qede_drain_txq(qdev, txq, false);
1342                         }
1343
1344                         DP_NOTICE(edev, false,
1345                                   "Timeout waiting for tx queue[%d]:"
1346                                   "PROD=%d, CONS=%d\n",
1347                                   txq->queue_id, txq->sw_tx_prod,
1348                                   txq->sw_tx_cons);
1349                         return -ENODEV;
1350                 }
1351                 cnt--;
1352                 DELAY(1000);
1353                 rte_compiler_barrier();
1354         }
1355
1356         /* FW finished processing, wait for HW to transmit all tx packets */
1357         DELAY(2000);
1358
1359         return 0;
1360 }
1361
1362 static int qede_stop_queues(struct qede_dev *qdev)
1363 {
1364         struct qed_update_vport_params vport_update_params;
1365         struct ecore_dev *edev = &qdev->edev;
1366         int rc, tc, i;
1367
1368         /* Disable the vport */
1369         memset(&vport_update_params, 0, sizeof(vport_update_params));
1370         vport_update_params.vport_id = 0;
1371         vport_update_params.update_vport_active_flg = 1;
1372         vport_update_params.vport_active_flg = 0;
1373         vport_update_params.update_rss_flg = 0;
1374
1375         DP_INFO(edev, "Deactivate vport\n");
1376
1377         rc = qdev->ops->vport_update(edev, &vport_update_params);
1378         if (rc) {
1379                 DP_ERR(edev, "Failed to update vport\n");
1380                 return rc;
1381         }
1382
1383         DP_INFO(edev, "Flushing tx queues\n");
1384
1385         /* Flush Tx queues. If needed, request drain from MCP */
1386         for_each_queue(i) {
1387                 struct qede_fastpath *fp = &qdev->fp_array[i];
1388
1389                 if (fp->type & QEDE_FASTPATH_TX) {
1390                         for (tc = 0; tc < qdev->num_tc; tc++) {
1391                                 struct qede_tx_queue *txq = fp->txqs[tc];
1392
1393                                 rc = qede_drain_txq(qdev, txq, true);
1394                                 if (rc)
1395                                         return rc;
1396                         }
1397                 }
1398         }
1399
1400         /* Stop all Queues in reverse order */
1401         for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) {
1402                 struct qed_stop_rxq_params rx_params;
1403
1404                 /* Stop the Tx Queue(s) */
1405                 if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) {
1406                         for (tc = 0; tc < qdev->num_tc; tc++) {
1407                                 struct qed_stop_txq_params tx_params;
1408                                 u8 val;
1409
1410                                 tx_params.rss_id = i;
1411                                 val = qdev->fp_array[i].txqs[tc]->queue_id;
1412                                 tx_params.tx_queue_id = val;
1413
1414                                 DP_INFO(edev, "Stopping tx queues\n");
1415                                 rc = qdev->ops->q_tx_stop(edev, &tx_params);
1416                                 if (rc) {
1417                                         DP_ERR(edev, "Failed to stop TXQ #%d\n",
1418                                                tx_params.tx_queue_id);
1419                                         return rc;
1420                                 }
1421                         }
1422                 }
1423
1424                 /* Stop the Rx Queue */
1425                 if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) {
1426                         memset(&rx_params, 0, sizeof(rx_params));
1427                         rx_params.rss_id = i;
1428                         rx_params.rx_queue_id = qdev->fp_array[i].rxq->queue_id;
1429                         rx_params.eq_completion_only = 1;
1430
1431                         DP_INFO(edev, "Stopping rx queues\n");
1432
1433                         rc = qdev->ops->q_rx_stop(edev, &rx_params);
1434                         if (rc) {
1435                                 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
1436                                 return rc;
1437                         }
1438                 }
1439         }
1440
1441         return 0;
1442 }
1443
1444 int qede_reset_fp_rings(struct qede_dev *qdev)
1445 {
1446         struct qede_fastpath *fp;
1447         struct qede_tx_queue *txq;
1448         uint8_t tc;
1449         uint16_t id, i;
1450
1451         for_each_queue(id) {
1452                 fp = &qdev->fp_array[id];
1453
1454                 if (fp->type & QEDE_FASTPATH_RX) {
1455                         DP_INFO(&qdev->edev,
1456                                 "Reset FP chain for RSS %u\n", id);
1457                         qede_rx_queue_release_mbufs(fp->rxq);
1458                         ecore_chain_reset(&fp->rxq->rx_bd_ring);
1459                         ecore_chain_reset(&fp->rxq->rx_comp_ring);
1460                         fp->rxq->sw_rx_prod = 0;
1461                         fp->rxq->sw_rx_cons = 0;
1462                         *fp->rxq->hw_cons_ptr = 0;
1463                         for (i = 0; i < fp->rxq->nb_rx_desc; i++) {
1464                                 if (qede_alloc_rx_buffer(fp->rxq)) {
1465                                         DP_ERR(&qdev->edev,
1466                                                "RX buffer allocation failed\n");
1467                                         return -ENOMEM;
1468                                 }
1469                         }
1470                 }
1471                 if (fp->type & QEDE_FASTPATH_TX) {
1472                         for (tc = 0; tc < qdev->num_tc; tc++) {
1473                                 txq = fp->txqs[tc];
1474                                 qede_tx_queue_release_mbufs(txq);
1475                                 ecore_chain_reset(&txq->tx_pbl);
1476                                 txq->sw_tx_cons = 0;
1477                                 txq->sw_tx_prod = 0;
1478                                 *txq->hw_cons_ptr = 0;
1479                         }
1480                 }
1481         }
1482         qede_reset_fp_rings(qdev);
1483
1484         return 0;
1485 }
1486
1487 /* This function frees all memory of a single fp */
1488 void qede_free_mem_load(struct rte_eth_dev *eth_dev)
1489 {
1490         struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1491         struct qede_fastpath *fp;
1492         uint16_t txq_idx;
1493         uint8_t id;
1494         uint8_t tc;
1495
1496         for_each_queue(id) {
1497                 fp = &qdev->fp_array[id];
1498                 if (fp->type & QEDE_FASTPATH_RX) {
1499                         qede_rx_queue_release(fp->rxq);
1500                         eth_dev->data->rx_queues[id] = NULL;
1501                 } else {
1502                         for (tc = 0; tc < qdev->num_tc; tc++) {
1503                                 txq_idx = fp->txqs[tc]->queue_id;
1504                                 qede_tx_queue_release(fp->txqs[tc]);
1505                                 eth_dev->data->tx_queues[txq_idx] = NULL;
1506                         }
1507                 }
1508         }
1509 }
1510
1511 void qede_dev_stop(struct rte_eth_dev *eth_dev)
1512 {
1513         struct qede_dev *qdev = eth_dev->data->dev_private;
1514         struct ecore_dev *edev = &qdev->edev;
1515
1516         DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
1517
1518         if (qdev->state != QEDE_DEV_START) {
1519                 DP_INFO(edev, "Device not yet started\n");
1520                 return;
1521         }
1522
1523         if (qede_stop_queues(qdev))
1524                 DP_ERR(edev, "Didn't succeed to close queues\n");
1525
1526         DP_INFO(edev, "Stopped queues\n");
1527
1528         qdev->ops->fastpath_stop(edev);
1529
1530         /* Bring the link down */
1531         qede_dev_set_link_state(eth_dev, false);
1532
1533         qdev->state = QEDE_DEV_STOP;
1534
1535         DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
1536 }
1537
1538 uint16_t
1539 qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
1540                      __rte_unused struct rte_mbuf **pkts,
1541                      __rte_unused uint16_t nb_pkts)
1542 {
1543         return 0;
1544 }