2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
11 static bool gro_disable = 1; /* mod_param */
13 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
15 struct rte_mbuf *new_mb = NULL;
16 struct eth_rx_bd *rx_bd;
18 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
20 new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
21 if (unlikely(!new_mb)) {
23 "Failed to allocate rx buffer "
24 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
25 idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
26 rte_mempool_avail_count(rxq->mb_pool),
27 rte_mempool_in_use_count(rxq->mb_pool));
30 rxq->sw_rx_ring[idx].mbuf = new_mb;
31 rxq->sw_rx_ring[idx].page_offset = 0;
32 mapping = rte_mbuf_data_dma_addr_default(new_mb);
33 /* Advance PROD and get BD pointer */
34 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
35 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
36 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
41 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
45 if (rxq->sw_rx_ring != NULL) {
46 for (i = 0; i < rxq->nb_rx_desc; i++) {
47 if (rxq->sw_rx_ring[i].mbuf != NULL) {
48 rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
49 rxq->sw_rx_ring[i].mbuf = NULL;
55 void qede_rx_queue_release(void *rx_queue)
57 struct qede_rx_queue *rxq = rx_queue;
60 qede_rx_queue_release_mbufs(rxq);
61 rte_free(rxq->sw_rx_ring);
62 rxq->sw_rx_ring = NULL;
68 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
72 PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc);
74 if (txq->sw_tx_ring) {
75 for (i = 0; i < txq->nb_tx_desc; i++) {
76 if (txq->sw_tx_ring[i].mbuf) {
77 rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
78 txq->sw_tx_ring[i].mbuf = NULL;
85 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
86 uint16_t nb_desc, unsigned int socket_id,
87 const struct rte_eth_rxconf *rx_conf,
88 struct rte_mempool *mp)
90 struct qede_dev *qdev = dev->data->dev_private;
91 struct ecore_dev *edev = &qdev->edev;
92 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
93 struct qede_rx_queue *rxq;
94 uint16_t max_rx_pkt_len;
100 PMD_INIT_FUNC_TRACE(edev);
102 /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
103 if (!rte_is_power_of_2(nb_desc)) {
104 DP_ERR(edev, "Ring size %u is not power of 2\n",
109 /* Free memory prior to re-allocation if needed... */
110 if (dev->data->rx_queues[queue_idx] != NULL) {
111 qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
112 dev->data->rx_queues[queue_idx] = NULL;
115 /* First allocate the rx queue data structure */
116 rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
117 RTE_CACHE_LINE_SIZE, socket_id);
120 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
127 rxq->nb_rx_desc = nb_desc;
128 rxq->queue_id = queue_idx;
129 rxq->port_id = dev->data->port_id;
130 max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
132 /* Fix up RX buffer size */
133 bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
134 if ((rxmode->enable_scatter) ||
135 (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
136 if (!dev->data->scattered_rx) {
137 DP_INFO(edev, "Forcing scatter-gather mode\n");
138 dev->data->scattered_rx = 1;
141 if (dev->data->scattered_rx)
142 rxq->rx_buf_size = bufsz + ETHER_HDR_LEN +
143 ETHER_CRC_LEN + QEDE_ETH_OVERHEAD;
145 rxq->rx_buf_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD;
146 /* Align to cache-line size if needed */
147 rxq->rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rxq->rx_buf_size);
149 DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
150 qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
152 /* Allocate the parallel driver ring for Rx buffers */
153 size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
154 rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
155 RTE_CACHE_LINE_SIZE, socket_id);
156 if (!rxq->sw_rx_ring) {
157 DP_NOTICE(edev, false,
158 "Unable to alloc memory for sw_rx_ring on socket %u\n",
165 /* Allocate FW Rx ring */
166 rc = qdev->ops->common->chain_alloc(edev,
167 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
168 ECORE_CHAIN_MODE_NEXT_PTR,
169 ECORE_CHAIN_CNT_TYPE_U16,
171 sizeof(struct eth_rx_bd),
174 if (rc != ECORE_SUCCESS) {
175 DP_NOTICE(edev, false,
176 "Unable to alloc memory for rxbd ring on socket %u\n",
178 rte_free(rxq->sw_rx_ring);
179 rxq->sw_rx_ring = NULL;
185 /* Allocate FW completion ring */
186 rc = qdev->ops->common->chain_alloc(edev,
187 ECORE_CHAIN_USE_TO_CONSUME,
188 ECORE_CHAIN_MODE_PBL,
189 ECORE_CHAIN_CNT_TYPE_U16,
191 sizeof(union eth_rx_cqe),
194 if (rc != ECORE_SUCCESS) {
195 DP_NOTICE(edev, false,
196 "Unable to alloc memory for cqe ring on socket %u\n",
198 /* TBD: Freeing RX BD ring */
199 rte_free(rxq->sw_rx_ring);
200 rxq->sw_rx_ring = NULL;
205 /* Allocate buffers for the Rx ring */
206 for (i = 0; i < rxq->nb_rx_desc; i++) {
207 rc = qede_alloc_rx_buffer(rxq);
209 DP_NOTICE(edev, false,
210 "RX buffer allocation failed at idx=%d\n", i);
215 dev->data->rx_queues[queue_idx] = rxq;
217 DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
218 queue_idx, nb_desc, rxq->rx_buf_size, socket_id);
222 qede_rx_queue_release(rxq);
226 void qede_tx_queue_release(void *tx_queue)
228 struct qede_tx_queue *txq = tx_queue;
231 qede_tx_queue_release_mbufs(txq);
232 if (txq->sw_tx_ring) {
233 rte_free(txq->sw_tx_ring);
234 txq->sw_tx_ring = NULL;
242 qede_tx_queue_setup(struct rte_eth_dev *dev,
245 unsigned int socket_id,
246 const struct rte_eth_txconf *tx_conf)
248 struct qede_dev *qdev = dev->data->dev_private;
249 struct ecore_dev *edev = &qdev->edev;
250 struct qede_tx_queue *txq;
253 PMD_INIT_FUNC_TRACE(edev);
255 if (!rte_is_power_of_2(nb_desc)) {
256 DP_ERR(edev, "Ring size %u is not power of 2\n",
261 /* Free memory prior to re-allocation if needed... */
262 if (dev->data->tx_queues[queue_idx] != NULL) {
263 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
264 dev->data->tx_queues[queue_idx] = NULL;
267 txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
268 RTE_CACHE_LINE_SIZE, socket_id);
272 "Unable to allocate memory for txq on socket %u",
277 txq->nb_tx_desc = nb_desc;
279 txq->port_id = dev->data->port_id;
281 rc = qdev->ops->common->chain_alloc(edev,
282 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
283 ECORE_CHAIN_MODE_PBL,
284 ECORE_CHAIN_CNT_TYPE_U16,
286 sizeof(union eth_tx_bd_types),
288 if (rc != ECORE_SUCCESS) {
290 "Unable to allocate memory for txbd ring on socket %u",
292 qede_tx_queue_release(txq);
296 /* Allocate software ring */
297 txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
298 (sizeof(struct qede_tx_entry) *
300 RTE_CACHE_LINE_SIZE, socket_id);
302 if (!txq->sw_tx_ring) {
304 "Unable to allocate memory for txbd ring on socket %u",
306 qede_tx_queue_release(txq);
310 txq->queue_id = queue_idx;
312 txq->nb_tx_avail = txq->nb_tx_desc;
314 txq->tx_free_thresh =
315 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
316 (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
318 dev->data->tx_queues[queue_idx] = txq;
321 "txq %u num_desc %u tx_free_thresh %u socket %u\n",
322 queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
327 /* This function inits fp content and resets the SB, RXQ and TXQ arrays */
328 static void qede_init_fp(struct qede_dev *qdev)
330 struct qede_fastpath *fp;
331 uint8_t i, rss_id, tc;
332 int fp_rx = qdev->fp_num_rx, rxq = 0, txq = 0;
334 memset((void *)qdev->fp_array, 0, (QEDE_QUEUE_CNT(qdev) *
335 sizeof(*qdev->fp_array)));
336 memset((void *)qdev->sb_array, 0, (QEDE_QUEUE_CNT(qdev) *
337 sizeof(*qdev->sb_array)));
339 fp = &qdev->fp_array[i];
341 fp->type = QEDE_FASTPATH_RX;
344 fp->type = QEDE_FASTPATH_TX;
348 fp->sb_info = &qdev->sb_array[i];
349 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", "qdev", i);
352 qdev->gro_disable = gro_disable;
355 void qede_free_fp_arrays(struct qede_dev *qdev)
357 /* It asseumes qede_free_mem_load() is called before */
358 if (qdev->fp_array != NULL) {
359 rte_free(qdev->fp_array);
360 qdev->fp_array = NULL;
363 if (qdev->sb_array != NULL) {
364 rte_free(qdev->sb_array);
365 qdev->sb_array = NULL;
369 int qede_alloc_fp_array(struct qede_dev *qdev)
371 struct qede_fastpath *fp;
372 struct ecore_dev *edev = &qdev->edev;
375 qdev->fp_array = rte_calloc("fp", QEDE_QUEUE_CNT(qdev),
376 sizeof(*qdev->fp_array),
377 RTE_CACHE_LINE_SIZE);
379 if (!qdev->fp_array) {
380 DP_ERR(edev, "fp array allocation failed\n");
384 qdev->sb_array = rte_calloc("sb", QEDE_QUEUE_CNT(qdev),
385 sizeof(*qdev->sb_array),
386 RTE_CACHE_LINE_SIZE);
388 if (!qdev->sb_array) {
389 DP_ERR(edev, "sb array allocation failed\n");
390 rte_free(qdev->fp_array);
397 /* This function allocates fast-path status block memory */
399 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
402 struct ecore_dev *edev = &qdev->edev;
403 struct status_block *sb_virt;
407 sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt));
410 DP_ERR(edev, "Status block allocation failed\n");
414 rc = qdev->ops->common->sb_init(edev, sb_info,
415 sb_virt, sb_phys, sb_id,
416 QED_SB_TYPE_L2_QUEUE);
418 DP_ERR(edev, "Status block initialization failed\n");
419 /* TBD: No dma_free_coherent possible */
426 int qede_alloc_fp_resc(struct qede_dev *qdev)
428 struct ecore_dev *edev = &qdev->edev;
429 struct qede_fastpath *fp;
436 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
438 num_sbs = ecore_cxt_get_proto_cid_count
439 (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
442 DP_ERR(edev, "No status blocks available\n");
447 qede_free_fp_arrays(qdev);
449 rc = qede_alloc_fp_array(qdev);
455 for (i = 0; i < QEDE_QUEUE_CNT(qdev); i++) {
456 fp = &qdev->fp_array[i];
458 sb_idx = i % num_sbs;
461 if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
462 qede_free_fp_arrays(qdev);
470 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
472 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
474 qede_free_mem_load(eth_dev);
475 qede_free_fp_arrays(qdev);
479 qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq)
481 uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
482 uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
483 struct eth_rx_prod_data rx_prods = { 0 };
485 /* Update producers */
486 rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
487 rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
489 /* Make sure that the BD and SGE data is updated before updating the
490 * producers since FW might read the BD/SGE right after the producer
495 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
496 (uint32_t *)&rx_prods);
498 /* mmiowb is needed to synchronize doorbell writes from more than one
499 * processor. It guarantees that the write arrives to the device before
500 * the napi lock is released and another qede_poll is called (possibly
501 * on another CPU). Without this barrier, the next doorbell can bypass
502 * this doorbell. This is applicable to IA64/Altix systems.
506 PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u\n", bd_prod, cqe_prod);
509 static inline uint32_t
510 qede_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings)
512 return index % n_rx_rings;
515 static void qede_prandom_bytes(uint32_t *buff, size_t bytes)
519 srand((unsigned int)time(NULL));
521 for (i = 0; i < ECORE_RSS_KEY_SIZE; i++)
526 qede_check_vport_rss_enable(struct rte_eth_dev *eth_dev,
527 struct qed_update_vport_rss_params *rss_params)
529 struct rte_eth_rss_conf rss_conf;
530 enum rte_eth_rx_mq_mode mode = eth_dev->data->dev_conf.rxmode.mq_mode;
531 struct qede_dev *qdev = eth_dev->data->dev_private;
532 struct ecore_dev *edev = &qdev->edev;
538 PMD_INIT_FUNC_TRACE(edev);
540 rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
541 key = (uint32_t *)rss_conf.rss_key;
542 hf = rss_conf.rss_hf;
544 /* Check if RSS conditions are met.
545 * Note: Even though its meaningless to enable RSS with one queue, it
546 * could be used to produce RSS Hash, so skipping that check.
548 if (!(mode & ETH_MQ_RX_RSS)) {
549 DP_INFO(edev, "RSS flag is not set\n");
554 DP_INFO(edev, "Request to disable RSS\n");
558 memset(rss_params, 0, sizeof(*rss_params));
560 for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++)
561 rss_params->rss_ind_table[i] = qede_rxfh_indir_default(i,
562 QEDE_RSS_COUNT(qdev));
565 qede_prandom_bytes(rss_params->rss_key,
566 sizeof(rss_params->rss_key));
568 memcpy(rss_params->rss_key, rss_conf.rss_key,
569 rss_conf.rss_key_len);
571 qede_init_rss_caps(&rss_caps, hf);
573 rss_params->rss_caps = rss_caps;
575 DP_INFO(edev, "RSS conditions are met\n");
580 static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats)
582 struct qede_dev *qdev = eth_dev->data->dev_private;
583 struct ecore_dev *edev = &qdev->edev;
584 struct ecore_queue_start_common_params q_params;
585 struct qed_update_vport_rss_params *rss_params = &qdev->rss_params;
586 struct qed_dev_info *qed_info = &qdev->dev_info.common;
587 struct qed_update_vport_params vport_update_params;
588 struct qede_tx_queue *txq;
589 struct qede_fastpath *fp;
590 dma_addr_t p_phys_table;
593 int vlan_removal_en = 1;
597 fp = &qdev->fp_array[i];
598 if (fp->type & QEDE_FASTPATH_RX) {
599 p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->
601 page_cnt = ecore_chain_get_page_cnt(&fp->rxq->
604 memset(&q_params, 0, sizeof(q_params));
605 q_params.queue_id = i;
606 q_params.vport_id = 0;
607 q_params.sb = fp->sb_info->igu_sb_id;
608 q_params.sb_idx = RX_PI;
610 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
612 rc = qdev->ops->q_rx_start(edev, i, &q_params,
613 fp->rxq->rx_buf_size,
614 fp->rxq->rx_bd_ring.p_phys_addr,
617 &fp->rxq->hw_rxq_prod_addr);
619 DP_ERR(edev, "Start rxq #%d failed %d\n",
620 fp->rxq->queue_id, rc);
624 fp->rxq->hw_cons_ptr =
625 &fp->sb_info->sb_virt->pi_array[RX_PI];
627 qede_update_rx_prod(qdev, fp->rxq);
630 if (!(fp->type & QEDE_FASTPATH_TX))
632 for (tc = 0; tc < qdev->num_tc; tc++) {
634 txq_index = tc * QEDE_RSS_COUNT(qdev) + i;
636 p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
637 page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
639 memset(&q_params, 0, sizeof(q_params));
640 q_params.queue_id = txq->queue_id;
641 q_params.vport_id = 0;
642 q_params.sb = fp->sb_info->igu_sb_id;
643 q_params.sb_idx = TX_PI(tc);
645 rc = qdev->ops->q_tx_start(edev, i, &q_params,
647 page_cnt, /* **pp_doorbell */
648 &txq->doorbell_addr);
650 DP_ERR(edev, "Start txq %u failed %d\n",
656 &fp->sb_info->sb_virt->pi_array[TX_PI(tc)];
657 SET_FIELD(txq->tx_db.data.params,
658 ETH_DB_DATA_DEST, DB_DEST_XCM);
659 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
661 SET_FIELD(txq->tx_db.data.params,
662 ETH_DB_DATA_AGG_VAL_SEL,
663 DQ_XCM_ETH_TX_BD_PROD_CMD);
665 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
669 /* Prepare and send the vport enable */
670 memset(&vport_update_params, 0, sizeof(vport_update_params));
671 /* Update MTU via vport update */
672 vport_update_params.mtu = qdev->mtu;
673 vport_update_params.vport_id = 0;
674 vport_update_params.update_vport_active_flg = 1;
675 vport_update_params.vport_active_flg = 1;
678 if (qed_info->mf_mode == MF_NPAR && qed_info->tx_switching) {
679 /* TBD: Check SRIOV enabled for VF */
680 vport_update_params.update_tx_switching_flg = 1;
681 vport_update_params.tx_switching_flg = 1;
684 if (qede_check_vport_rss_enable(eth_dev, rss_params)) {
685 vport_update_params.update_rss_flg = 1;
686 qdev->rss_enabled = 1;
688 qdev->rss_enabled = 0;
691 rte_memcpy(&vport_update_params.rss_params, rss_params,
692 sizeof(*rss_params));
694 rc = qdev->ops->vport_update(edev, &vport_update_params);
696 DP_ERR(edev, "Update V-PORT failed %d\n", rc);
704 static bool qede_tunn_exist(uint16_t flag)
706 return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
707 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
710 static inline uint8_t qede_check_tunn_csum(uint16_t flag)
713 uint16_t csum_flag = 0;
715 if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
716 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
717 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
718 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT;
720 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
721 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
722 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
723 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
724 tcsum = QEDE_TUNN_CSUM_UNNECESSARY;
727 csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
728 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT |
729 PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
730 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
732 if (csum_flag & flag)
733 return QEDE_CSUM_ERROR;
735 return QEDE_CSUM_UNNECESSARY | tcsum;
738 static inline uint8_t qede_tunn_exist(uint16_t flag)
743 static inline uint8_t qede_check_tunn_csum(uint16_t flag)
749 static inline uint8_t qede_check_notunn_csum(uint16_t flag)
752 uint16_t csum_flag = 0;
754 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
755 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) {
756 csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
757 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT;
758 csum = QEDE_CSUM_UNNECESSARY;
761 csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
762 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT;
764 if (csum_flag & flag)
765 return QEDE_CSUM_ERROR;
770 static inline uint8_t qede_check_csum(uint16_t flag)
772 if (likely(!qede_tunn_exist(flag)))
773 return qede_check_notunn_csum(flag);
775 return qede_check_tunn_csum(flag);
778 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
780 ecore_chain_consume(&rxq->rx_bd_ring);
785 qede_reuse_page(struct qede_dev *qdev,
786 struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
788 struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
789 uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
790 struct qede_rx_entry *curr_prod;
791 dma_addr_t new_mapping;
793 curr_prod = &rxq->sw_rx_ring[idx];
794 *curr_prod = *curr_cons;
796 new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) +
797 curr_prod->page_offset;
799 rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
800 rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
806 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
807 struct qede_dev *qdev, uint8_t count)
809 struct qede_rx_entry *curr_cons;
811 for (; count > 0; count--) {
812 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
813 qede_reuse_page(qdev, rxq, curr_cons);
814 qede_rx_bd_ring_consume(rxq);
818 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
821 /* TBD - L4 indications needed ? */
822 uint16_t protocol = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
823 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & flags);
825 /* protocol = 3 means LLC/SNAP over Ethernet */
826 if (unlikely(protocol == 0 || protocol == 3))
827 p_type = RTE_PTYPE_UNKNOWN;
828 else if (protocol == 1)
829 p_type = RTE_PTYPE_L3_IPV4;
830 else if (protocol == 2)
831 p_type = RTE_PTYPE_L3_IPV6;
833 return RTE_PTYPE_L2_ETHER | p_type;
836 int qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
837 int num_segs, uint16_t pkt_len)
839 struct qede_rx_queue *rxq = p_rxq;
840 struct qede_dev *qdev = rxq->qdev;
841 struct ecore_dev *edev = &qdev->edev;
842 uint16_t sw_rx_index, cur_size;
844 register struct rte_mbuf *seg1 = NULL;
845 register struct rte_mbuf *seg2 = NULL;
849 cur_size = pkt_len > rxq->rx_buf_size ?
850 rxq->rx_buf_size : pkt_len;
852 PMD_RX_LOG(DEBUG, rxq,
853 "SG packet, len and num BD mismatch\n");
854 qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
858 if (qede_alloc_rx_buffer(rxq)) {
861 PMD_RX_LOG(DEBUG, rxq, "Buffer allocation failed\n");
862 index = rxq->port_id;
863 rte_eth_devices[index].data->rx_mbuf_alloc_failed++;
864 rxq->rx_alloc_errors++;
868 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
869 seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
870 qede_rx_bd_ring_consume(rxq);
872 seg2->data_len = cur_size;
882 PMD_RX_LOG(DEBUG, rxq,
883 "Mapped all BDs of jumbo, but still have %d bytes\n",
886 return ECORE_SUCCESS;
890 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
892 struct qede_rx_queue *rxq = p_rxq;
893 struct qede_dev *qdev = rxq->qdev;
894 struct ecore_dev *edev = &qdev->edev;
895 struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id];
896 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
898 union eth_rx_cqe *cqe;
899 struct eth_fast_path_rx_reg_cqe *fp_cqe;
900 register struct rte_mbuf *rx_mb = NULL;
901 register struct rte_mbuf *seg1 = NULL;
902 enum eth_rx_cqe_type cqe_type;
903 uint16_t len, pad, preload_idx, pkt_len, parse_flag;
904 uint8_t csum_flag, num_segs;
905 enum rss_hash_type htype;
908 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
909 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
913 if (hw_comp_cons == sw_comp_cons)
916 while (sw_comp_cons != hw_comp_cons) {
917 /* Get the CQE from the completion ring */
919 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
920 cqe_type = cqe->fast_path_regular.type;
922 if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) {
923 PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE\n");
925 qdev->ops->eth_cqe_completion(edev, fp->id,
926 (struct eth_slow_path_rx_cqe *)cqe);
930 /* Get the data from the SW ring */
931 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
932 rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
933 assert(rx_mb != NULL);
936 fp_cqe = &cqe->fast_path_regular;
938 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
939 pad = fp_cqe->placement_offset;
940 assert((len + pad) <= rx_mb->buf_len);
942 PMD_RX_LOG(DEBUG, rxq,
943 "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x"
944 " len = %u, parsing_flags = %d\n",
945 cqe_type, fp_cqe->bitfields,
946 rte_le_to_cpu_16(fp_cqe->vlan_tag),
947 len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags));
949 /* If this is an error packet then drop it */
951 rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags);
952 csum_flag = qede_check_csum(parse_flag);
953 if (unlikely(csum_flag == QEDE_CSUM_ERROR)) {
955 "CQE in CONS = %u has error, flags = 0x%x "
956 "dropping incoming packet\n",
957 sw_comp_cons, parse_flag);
959 qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
963 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
965 "New buffer allocation failed,"
966 "dropping incoming packet\n");
967 qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
968 rte_eth_devices[rxq->port_id].
969 data->rx_mbuf_alloc_failed++;
970 rxq->rx_alloc_errors++;
974 qede_rx_bd_ring_consume(rxq);
976 if (fp_cqe->bd_num > 1) {
977 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
978 num_segs = fp_cqe->bd_num - 1;
984 ret = qede_process_sg_pkts(p_rxq, seg1, num_segs,
986 if (ret != ECORE_SUCCESS) {
987 qede_recycle_rx_bd_ring(rxq, qdev,
993 /* Prefetch next mbuf while processing current one. */
994 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
995 rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
997 /* Update MBUF fields */
999 rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM;
1000 rx_mb->nb_segs = fp_cqe->bd_num;
1001 rx_mb->data_len = len;
1002 rx_mb->pkt_len = fp_cqe->pkt_len;
1003 rx_mb->port = rxq->port_id;
1004 rx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag);
1006 htype = (uint8_t)GET_FIELD(fp_cqe->bitfields,
1007 ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE);
1008 if (qdev->rss_enabled && htype) {
1009 rx_mb->ol_flags |= PKT_RX_RSS_HASH;
1010 rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash);
1011 PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x\n",
1015 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
1017 if (CQE_HAS_VLAN(parse_flag)) {
1018 rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1019 rx_mb->ol_flags |= PKT_RX_VLAN_PKT;
1022 if (CQE_HAS_OUTER_VLAN(parse_flag)) {
1023 /* FW does not provide indication of Outer VLAN tag,
1024 * which is always stripped, so vlan_tci_outer is set
1025 * to 0. Here vlan_tag represents inner VLAN tag.
1027 rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1028 rx_mb->ol_flags |= PKT_RX_QINQ_PKT;
1029 rx_mb->vlan_tci_outer = 0;
1032 rx_pkts[rx_pkt] = rx_mb;
1035 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1036 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1037 if (rx_pkt == nb_pkts) {
1038 PMD_RX_LOG(DEBUG, rxq,
1039 "Budget reached nb_pkts=%u received=%u\n",
1045 qede_update_rx_prod(qdev, rxq);
1047 rxq->rcv_pkts += rx_pkt;
1049 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id());
1055 qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq)
1057 uint16_t nb_segs, idx = TX_CONS(txq);
1058 struct eth_tx_bd *tx_data_bd;
1059 struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf;
1061 if (unlikely(!mbuf)) {
1062 PMD_TX_LOG(ERR, txq, "null mbuf\n");
1063 PMD_TX_LOG(ERR, txq,
1064 "tx_desc %u tx_avail %u tx_cons %u tx_prod %u\n",
1065 txq->nb_tx_desc, txq->nb_tx_avail, idx,
1070 nb_segs = mbuf->nb_segs;
1072 /* It's like consuming rxbuf in recv() */
1073 ecore_chain_consume(&txq->tx_pbl);
1077 rte_pktmbuf_free(mbuf);
1078 txq->sw_tx_ring[idx].mbuf = NULL;
1083 static inline uint16_t
1084 qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq)
1086 uint16_t tx_compl = 0;
1087 uint16_t hw_bd_cons;
1089 hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
1090 rte_compiler_barrier();
1092 while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) {
1093 if (qede_free_tx_pkt(edev, txq)) {
1094 PMD_TX_LOG(ERR, txq,
1095 "hw_bd_cons = %u, chain_cons = %u\n",
1097 ecore_chain_get_cons_idx(&txq->tx_pbl));
1100 txq->sw_tx_cons++; /* Making TXD available */
1104 PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u\n",
1105 tx_compl, txq->sw_tx_cons, txq->nb_tx_avail);
1109 /* Populate scatter gather buffer descriptor fields */
1110 static inline uint16_t qede_encode_sg_bd(struct qede_tx_queue *p_txq,
1111 struct rte_mbuf *m_seg,
1113 struct eth_tx_1st_bd *bd1)
1115 struct qede_tx_queue *txq = p_txq;
1116 struct eth_tx_2nd_bd *bd2 = NULL;
1117 struct eth_tx_3rd_bd *bd3 = NULL;
1118 struct eth_tx_bd *tx_bd = NULL;
1119 uint16_t nb_segs = count;
1122 /* Check for scattered buffers */
1125 bd2 = (struct eth_tx_2nd_bd *)
1126 ecore_chain_produce(&txq->tx_pbl);
1127 memset(bd2, 0, sizeof(*bd2));
1128 mapping = rte_mbuf_data_dma_addr(m_seg);
1129 bd2->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
1130 bd2->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
1131 bd2->nbytes = rte_cpu_to_le_16(m_seg->data_len);
1132 } else if (nb_segs == 2) {
1133 bd3 = (struct eth_tx_3rd_bd *)
1134 ecore_chain_produce(&txq->tx_pbl);
1135 memset(bd3, 0, sizeof(*bd3));
1136 mapping = rte_mbuf_data_dma_addr(m_seg);
1137 bd3->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
1138 bd3->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
1139 bd3->nbytes = rte_cpu_to_le_16(m_seg->data_len);
1141 tx_bd = (struct eth_tx_bd *)
1142 ecore_chain_produce(&txq->tx_pbl);
1143 memset(tx_bd, 0, sizeof(*tx_bd));
1144 mapping = rte_mbuf_data_dma_addr(m_seg);
1145 tx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
1146 tx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
1147 tx_bd->nbytes = rte_cpu_to_le_16(m_seg->data_len);
1150 bd1->data.nbds = nb_segs;
1151 m_seg = m_seg->next;
1154 /* Return total scattered buffers */
1159 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1161 struct qede_tx_queue *txq = p_txq;
1162 struct qede_dev *qdev = txq->qdev;
1163 struct ecore_dev *edev = &qdev->edev;
1164 struct qede_fastpath *fp;
1165 struct eth_tx_1st_bd *bd1;
1166 struct rte_mbuf *m_seg = NULL;
1167 uint16_t nb_tx_pkts;
1168 uint16_t nb_pkt_sent = 0;
1172 uint16_t nb_segs = 0;
1174 fp = &qdev->fp_array[QEDE_RSS_COUNT(qdev) + txq->queue_id];
1176 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
1177 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u\n",
1178 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
1179 (void)qede_process_tx_compl(edev, txq);
1182 nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail /
1183 ETH_TX_MAX_BDS_PER_NON_LSO_PACKET));
1184 if (unlikely(nb_tx_pkts == 0)) {
1185 PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n",
1186 nb_pkts, txq->nb_tx_avail);
1190 tx_count = nb_tx_pkts;
1191 while (nb_tx_pkts--) {
1192 /* Fill the entry in the SW ring and the BDs in the FW ring */
1194 struct rte_mbuf *mbuf = *tx_pkts++;
1196 txq->sw_tx_ring[idx].mbuf = mbuf;
1197 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
1198 /* Zero init struct fields */
1199 bd1->data.bd_flags.bitfields = 0;
1200 bd1->data.bitfields = 0;
1202 bd1->data.bd_flags.bitfields =
1203 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1204 /* Map MBUF linear data for DMA and set in the first BD */
1205 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_dma_addr(mbuf),
1208 /* Descriptor based VLAN insertion */
1209 if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
1210 bd1->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
1211 bd1->data.bd_flags.bitfields |=
1212 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
1215 /* Offload the IP checksum in the hardware */
1216 if (mbuf->ol_flags & PKT_TX_IP_CKSUM) {
1217 bd1->data.bd_flags.bitfields |=
1218 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
1221 /* L4 checksum offload (tcp or udp) */
1222 if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
1223 bd1->data.bd_flags.bitfields |=
1224 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
1225 /* IPv6 + extn. -> later */
1228 /* Handle fragmented MBUF */
1231 bd1->data.nbds = nb_segs;
1232 /* Encode scatter gather buffer descriptors if required */
1233 nb_segs = qede_encode_sg_bd(txq, m_seg, nb_segs, bd1);
1234 txq->nb_tx_avail = txq->nb_tx_avail - nb_segs;
1237 rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
1239 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
1244 /* Write value of prod idx into bd_prod */
1245 txq->tx_db.data.bd_prod = bd_prod;
1247 rte_compiler_barrier();
1248 DIRECT_REG_WR(edev, txq->doorbell_addr, txq->tx_db.raw);
1251 /* Check again for Tx completions */
1252 (void)qede_process_tx_compl(edev, txq);
1254 PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d\n",
1255 nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id());
1260 static void qede_init_fp_queue(struct rte_eth_dev *eth_dev)
1262 struct qede_dev *qdev = eth_dev->data->dev_private;
1263 struct qede_fastpath *fp;
1264 uint8_t i, rss_id, txq_index, tc;
1265 int rxq = 0, txq = 0;
1268 fp = &qdev->fp_array[i];
1269 if (fp->type & QEDE_FASTPATH_RX) {
1270 fp->rxq = eth_dev->data->rx_queues[i];
1271 fp->rxq->queue_id = rxq++;
1274 if (fp->type & QEDE_FASTPATH_TX) {
1275 for (tc = 0; tc < qdev->num_tc; tc++) {
1276 txq_index = tc * QEDE_TSS_COUNT(qdev) + txq;
1278 eth_dev->data->tx_queues[txq_index];
1279 fp->txqs[tc]->queue_id = txq_index;
1286 int qede_dev_start(struct rte_eth_dev *eth_dev)
1288 struct qede_dev *qdev = eth_dev->data->dev_private;
1289 struct ecore_dev *edev = &qdev->edev;
1290 struct qed_link_output link_output;
1291 struct qede_fastpath *fp;
1294 DP_INFO(edev, "Device state is %d\n", qdev->state);
1296 if (qdev->state == QEDE_DEV_START) {
1297 DP_INFO(edev, "Port is already started\n");
1301 if (qdev->state == QEDE_DEV_CONFIG)
1302 qede_init_fp_queue(eth_dev);
1304 rc = qede_start_queues(eth_dev, true);
1306 DP_ERR(edev, "Failed to start queues\n");
1311 /* Bring-up the link */
1312 qede_dev_set_link_state(eth_dev, true);
1314 /* Start/resume traffic */
1315 qdev->ops->fastpath_start(edev);
1317 qdev->state = QEDE_DEV_START;
1319 DP_INFO(edev, "dev_state is QEDE_DEV_START\n");
1324 static int qede_drain_txq(struct qede_dev *qdev,
1325 struct qede_tx_queue *txq, bool allow_drain)
1327 struct ecore_dev *edev = &qdev->edev;
1330 while (txq->sw_tx_cons != txq->sw_tx_prod) {
1331 qede_process_tx_compl(edev, txq);
1334 DP_NOTICE(edev, false,
1335 "Tx queue[%u] is stuck,"
1336 "requesting MCP to drain\n",
1338 rc = qdev->ops->common->drain(edev);
1341 return qede_drain_txq(qdev, txq, false);
1344 DP_NOTICE(edev, false,
1345 "Timeout waiting for tx queue[%d]:"
1346 "PROD=%d, CONS=%d\n",
1347 txq->queue_id, txq->sw_tx_prod,
1353 rte_compiler_barrier();
1356 /* FW finished processing, wait for HW to transmit all tx packets */
1362 static int qede_stop_queues(struct qede_dev *qdev)
1364 struct qed_update_vport_params vport_update_params;
1365 struct ecore_dev *edev = &qdev->edev;
1368 /* Disable the vport */
1369 memset(&vport_update_params, 0, sizeof(vport_update_params));
1370 vport_update_params.vport_id = 0;
1371 vport_update_params.update_vport_active_flg = 1;
1372 vport_update_params.vport_active_flg = 0;
1373 vport_update_params.update_rss_flg = 0;
1375 DP_INFO(edev, "Deactivate vport\n");
1377 rc = qdev->ops->vport_update(edev, &vport_update_params);
1379 DP_ERR(edev, "Failed to update vport\n");
1383 DP_INFO(edev, "Flushing tx queues\n");
1385 /* Flush Tx queues. If needed, request drain from MCP */
1387 struct qede_fastpath *fp = &qdev->fp_array[i];
1389 if (fp->type & QEDE_FASTPATH_TX) {
1390 for (tc = 0; tc < qdev->num_tc; tc++) {
1391 struct qede_tx_queue *txq = fp->txqs[tc];
1393 rc = qede_drain_txq(qdev, txq, true);
1400 /* Stop all Queues in reverse order */
1401 for (i = QEDE_QUEUE_CNT(qdev) - 1; i >= 0; i--) {
1402 struct qed_stop_rxq_params rx_params;
1404 /* Stop the Tx Queue(s) */
1405 if (qdev->fp_array[i].type & QEDE_FASTPATH_TX) {
1406 for (tc = 0; tc < qdev->num_tc; tc++) {
1407 struct qed_stop_txq_params tx_params;
1410 tx_params.rss_id = i;
1411 val = qdev->fp_array[i].txqs[tc]->queue_id;
1412 tx_params.tx_queue_id = val;
1414 DP_INFO(edev, "Stopping tx queues\n");
1415 rc = qdev->ops->q_tx_stop(edev, &tx_params);
1417 DP_ERR(edev, "Failed to stop TXQ #%d\n",
1418 tx_params.tx_queue_id);
1424 /* Stop the Rx Queue */
1425 if (qdev->fp_array[i].type & QEDE_FASTPATH_RX) {
1426 memset(&rx_params, 0, sizeof(rx_params));
1427 rx_params.rss_id = i;
1428 rx_params.rx_queue_id = qdev->fp_array[i].rxq->queue_id;
1429 rx_params.eq_completion_only = 1;
1431 DP_INFO(edev, "Stopping rx queues\n");
1433 rc = qdev->ops->q_rx_stop(edev, &rx_params);
1435 DP_ERR(edev, "Failed to stop RXQ #%d\n", i);
1444 int qede_reset_fp_rings(struct qede_dev *qdev)
1446 struct qede_fastpath *fp;
1447 struct qede_tx_queue *txq;
1451 for_each_queue(id) {
1452 fp = &qdev->fp_array[id];
1454 if (fp->type & QEDE_FASTPATH_RX) {
1455 DP_INFO(&qdev->edev,
1456 "Reset FP chain for RSS %u\n", id);
1457 qede_rx_queue_release_mbufs(fp->rxq);
1458 ecore_chain_reset(&fp->rxq->rx_bd_ring);
1459 ecore_chain_reset(&fp->rxq->rx_comp_ring);
1460 fp->rxq->sw_rx_prod = 0;
1461 fp->rxq->sw_rx_cons = 0;
1462 *fp->rxq->hw_cons_ptr = 0;
1463 for (i = 0; i < fp->rxq->nb_rx_desc; i++) {
1464 if (qede_alloc_rx_buffer(fp->rxq)) {
1466 "RX buffer allocation failed\n");
1471 if (fp->type & QEDE_FASTPATH_TX) {
1472 for (tc = 0; tc < qdev->num_tc; tc++) {
1474 qede_tx_queue_release_mbufs(txq);
1475 ecore_chain_reset(&txq->tx_pbl);
1476 txq->sw_tx_cons = 0;
1477 txq->sw_tx_prod = 0;
1478 *txq->hw_cons_ptr = 0;
1482 qede_reset_fp_rings(qdev);
1487 /* This function frees all memory of a single fp */
1488 void qede_free_mem_load(struct rte_eth_dev *eth_dev)
1490 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
1491 struct qede_fastpath *fp;
1496 for_each_queue(id) {
1497 fp = &qdev->fp_array[id];
1498 if (fp->type & QEDE_FASTPATH_RX) {
1499 qede_rx_queue_release(fp->rxq);
1500 eth_dev->data->rx_queues[id] = NULL;
1502 for (tc = 0; tc < qdev->num_tc; tc++) {
1503 txq_idx = fp->txqs[tc]->queue_id;
1504 qede_tx_queue_release(fp->txqs[tc]);
1505 eth_dev->data->tx_queues[txq_idx] = NULL;
1511 void qede_dev_stop(struct rte_eth_dev *eth_dev)
1513 struct qede_dev *qdev = eth_dev->data->dev_private;
1514 struct ecore_dev *edev = &qdev->edev;
1516 DP_INFO(edev, "port %u\n", eth_dev->data->port_id);
1518 if (qdev->state != QEDE_DEV_START) {
1519 DP_INFO(edev, "Device not yet started\n");
1523 if (qede_stop_queues(qdev))
1524 DP_ERR(edev, "Didn't succeed to close queues\n");
1526 DP_INFO(edev, "Stopped queues\n");
1528 qdev->ops->fastpath_stop(edev);
1530 /* Bring the link down */
1531 qede_dev_set_link_state(eth_dev, false);
1533 qdev->state = QEDE_DEV_STOP;
1535 DP_INFO(edev, "dev_state is QEDE_DEV_STOP\n");
1539 qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
1540 __rte_unused struct rte_mbuf **pkts,
1541 __rte_unused uint16_t nb_pkts)