2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
10 #include "qede_rxtx.h"
12 static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq)
14 struct rte_mbuf *new_mb = NULL;
15 struct eth_rx_bd *rx_bd;
17 uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq);
19 new_mb = rte_mbuf_raw_alloc(rxq->mb_pool);
20 if (unlikely(!new_mb)) {
22 "Failed to allocate rx buffer "
23 "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u",
24 idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq),
25 rte_mempool_avail_count(rxq->mb_pool),
26 rte_mempool_in_use_count(rxq->mb_pool));
29 rxq->sw_rx_ring[idx].mbuf = new_mb;
30 rxq->sw_rx_ring[idx].page_offset = 0;
31 mapping = rte_mbuf_data_iova_default(new_mb);
32 /* Advance PROD and get BD pointer */
33 rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring);
34 rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping));
35 rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping));
40 /* Criterias for calculating Rx buffer size -
41 * 1) rx_buf_size should not exceed the size of mbuf
42 * 2) In scattered_rx mode - minimum rx_buf_size should be
43 * (MTU + Maximum L2 Header Size + 2) / ETH_RX_MAX_BUFF_PER_PKT
44 * 3) In regular mode - minimum rx_buf_size should be
45 * (MTU + Maximum L2 Header Size + 2)
46 * In above cases +2 corrosponds to 2 bytes padding in front of L2
48 * 4) rx_buf_size should be cacheline-size aligned. So considering
49 * criteria 1, we need to adjust the size to floor instead of ceil,
50 * so that we don't exceed mbuf size while ceiling rx_buf_size.
53 qede_calc_rx_buf_size(struct rte_eth_dev *dev, uint16_t mbufsz,
54 uint16_t max_frame_size)
56 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
57 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
60 if (dev->data->scattered_rx) {
61 /* per HW limitation, only ETH_RX_MAX_BUFF_PER_PKT number of
62 * bufferes can be used for single packet. So need to make sure
63 * mbuf size is sufficient enough for this.
65 if ((mbufsz * ETH_RX_MAX_BUFF_PER_PKT) <
66 (max_frame_size + QEDE_ETH_OVERHEAD)) {
67 DP_ERR(edev, "mbuf %d size is not enough to hold max fragments (%d) for max rx packet length (%d)\n",
68 mbufsz, ETH_RX_MAX_BUFF_PER_PKT, max_frame_size);
72 rx_buf_size = RTE_MAX(mbufsz,
73 (max_frame_size + QEDE_ETH_OVERHEAD) /
74 ETH_RX_MAX_BUFF_PER_PKT);
76 rx_buf_size = max_frame_size + QEDE_ETH_OVERHEAD;
79 /* Align to cache-line size if needed */
80 return QEDE_FLOOR_TO_CACHE_LINE_SIZE(rx_buf_size);
84 qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
85 uint16_t nb_desc, unsigned int socket_id,
86 __rte_unused const struct rte_eth_rxconf *rx_conf,
87 struct rte_mempool *mp)
89 struct qede_dev *qdev = QEDE_INIT_QDEV(dev);
90 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
91 struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
92 struct qede_rx_queue *rxq;
93 uint16_t max_rx_pkt_len;
98 PMD_INIT_FUNC_TRACE(edev);
100 /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */
101 if (!rte_is_power_of_2(nb_desc)) {
102 DP_ERR(edev, "Ring size %u is not power of 2\n",
107 /* Free memory prior to re-allocation if needed... */
108 if (dev->data->rx_queues[queue_idx] != NULL) {
109 qede_rx_queue_release(dev->data->rx_queues[queue_idx]);
110 dev->data->rx_queues[queue_idx] = NULL;
113 /* First allocate the rx queue data structure */
114 rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue),
115 RTE_CACHE_LINE_SIZE, socket_id);
118 DP_ERR(edev, "Unable to allocate memory for rxq on socket %u",
125 rxq->nb_rx_desc = nb_desc;
126 rxq->queue_id = queue_idx;
127 rxq->port_id = dev->data->port_id;
129 max_rx_pkt_len = (uint16_t)rxmode->max_rx_pkt_len;
131 /* Fix up RX buffer size */
132 bufsz = (uint16_t)rte_pktmbuf_data_room_size(mp) - RTE_PKTMBUF_HEADROOM;
133 /* cache align the mbuf size to simplfy rx_buf_size calculation */
134 bufsz = QEDE_FLOOR_TO_CACHE_LINE_SIZE(bufsz);
136 if ((rxmode->enable_scatter) ||
137 (max_rx_pkt_len + QEDE_ETH_OVERHEAD) > bufsz) {
138 if (!dev->data->scattered_rx) {
139 DP_INFO(edev, "Forcing scatter-gather mode\n");
140 dev->data->scattered_rx = 1;
144 rc = qede_calc_rx_buf_size(dev, bufsz, max_rx_pkt_len);
150 rxq->rx_buf_size = rc;
152 DP_INFO(edev, "mtu %u mbufsz %u bd_max_bytes %u scatter_mode %d\n",
153 qdev->mtu, bufsz, rxq->rx_buf_size, dev->data->scattered_rx);
155 /* Allocate the parallel driver ring for Rx buffers */
156 size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc;
157 rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size,
158 RTE_CACHE_LINE_SIZE, socket_id);
159 if (!rxq->sw_rx_ring) {
160 DP_ERR(edev, "Memory allocation fails for sw_rx_ring on"
161 " socket %u\n", socket_id);
166 /* Allocate FW Rx ring */
167 rc = qdev->ops->common->chain_alloc(edev,
168 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
169 ECORE_CHAIN_MODE_NEXT_PTR,
170 ECORE_CHAIN_CNT_TYPE_U16,
172 sizeof(struct eth_rx_bd),
176 if (rc != ECORE_SUCCESS) {
177 DP_ERR(edev, "Memory allocation fails for RX BD ring"
178 " on socket %u\n", socket_id);
179 rte_free(rxq->sw_rx_ring);
184 /* Allocate FW completion ring */
185 rc = qdev->ops->common->chain_alloc(edev,
186 ECORE_CHAIN_USE_TO_CONSUME,
187 ECORE_CHAIN_MODE_PBL,
188 ECORE_CHAIN_CNT_TYPE_U16,
190 sizeof(union eth_rx_cqe),
194 if (rc != ECORE_SUCCESS) {
195 DP_ERR(edev, "Memory allocation fails for RX CQE ring"
196 " on socket %u\n", socket_id);
197 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
198 rte_free(rxq->sw_rx_ring);
203 dev->data->rx_queues[queue_idx] = rxq;
204 qdev->fp_array[queue_idx].rxq = rxq;
206 DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n",
207 queue_idx, nb_desc, rxq->rx_buf_size, socket_id);
213 qede_rx_queue_reset(__rte_unused struct qede_dev *qdev,
214 struct qede_rx_queue *rxq)
216 DP_INFO(&qdev->edev, "Reset RX queue %u\n", rxq->queue_id);
217 ecore_chain_reset(&rxq->rx_bd_ring);
218 ecore_chain_reset(&rxq->rx_comp_ring);
221 *rxq->hw_cons_ptr = 0;
224 static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq)
228 if (rxq->sw_rx_ring) {
229 for (i = 0; i < rxq->nb_rx_desc; i++) {
230 if (rxq->sw_rx_ring[i].mbuf) {
231 rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf);
232 rxq->sw_rx_ring[i].mbuf = NULL;
238 void qede_rx_queue_release(void *rx_queue)
240 struct qede_rx_queue *rxq = rx_queue;
241 struct qede_dev *qdev;
242 struct ecore_dev *edev;
246 edev = QEDE_INIT_EDEV(qdev);
247 PMD_INIT_FUNC_TRACE(edev);
248 qede_rx_queue_release_mbufs(rxq);
249 qdev->ops->common->chain_free(edev, &rxq->rx_bd_ring);
250 qdev->ops->common->chain_free(edev, &rxq->rx_comp_ring);
251 rte_free(rxq->sw_rx_ring);
256 /* Stops a given RX queue in the HW */
257 static int qede_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
259 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
260 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
261 struct ecore_hwfn *p_hwfn;
262 struct qede_rx_queue *rxq;
266 if (rx_queue_id < eth_dev->data->nb_rx_queues) {
267 rxq = eth_dev->data->rx_queues[rx_queue_id];
268 hwfn_index = rx_queue_id % edev->num_hwfns;
269 p_hwfn = &edev->hwfns[hwfn_index];
270 rc = ecore_eth_rx_queue_stop(p_hwfn, rxq->handle,
272 if (rc != ECORE_SUCCESS) {
273 DP_ERR(edev, "RX queue %u stop fails\n", rx_queue_id);
276 qede_rx_queue_release_mbufs(rxq);
277 qede_rx_queue_reset(qdev, rxq);
278 eth_dev->data->rx_queue_state[rx_queue_id] =
279 RTE_ETH_QUEUE_STATE_STOPPED;
280 DP_INFO(edev, "RX queue %u stopped\n", rx_queue_id);
282 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
290 qede_tx_queue_setup(struct rte_eth_dev *dev,
293 unsigned int socket_id,
294 const struct rte_eth_txconf *tx_conf)
296 struct qede_dev *qdev = dev->data->dev_private;
297 struct ecore_dev *edev = &qdev->edev;
298 struct qede_tx_queue *txq;
301 PMD_INIT_FUNC_TRACE(edev);
303 if (!rte_is_power_of_2(nb_desc)) {
304 DP_ERR(edev, "Ring size %u is not power of 2\n",
309 /* Free memory prior to re-allocation if needed... */
310 if (dev->data->tx_queues[queue_idx] != NULL) {
311 qede_tx_queue_release(dev->data->tx_queues[queue_idx]);
312 dev->data->tx_queues[queue_idx] = NULL;
315 txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue),
316 RTE_CACHE_LINE_SIZE, socket_id);
320 "Unable to allocate memory for txq on socket %u",
325 txq->nb_tx_desc = nb_desc;
327 txq->port_id = dev->data->port_id;
329 rc = qdev->ops->common->chain_alloc(edev,
330 ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,
331 ECORE_CHAIN_MODE_PBL,
332 ECORE_CHAIN_CNT_TYPE_U16,
334 sizeof(union eth_tx_bd_types),
337 if (rc != ECORE_SUCCESS) {
339 "Unable to allocate memory for txbd ring on socket %u",
341 qede_tx_queue_release(txq);
345 /* Allocate software ring */
346 txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring",
347 (sizeof(struct qede_tx_entry) *
349 RTE_CACHE_LINE_SIZE, socket_id);
351 if (!txq->sw_tx_ring) {
353 "Unable to allocate memory for txbd ring on socket %u",
355 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
356 qede_tx_queue_release(txq);
360 txq->queue_id = queue_idx;
362 txq->nb_tx_avail = txq->nb_tx_desc;
364 txq->tx_free_thresh =
365 tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
366 (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH);
368 dev->data->tx_queues[queue_idx] = txq;
369 qdev->fp_array[queue_idx].txq = txq;
372 "txq %u num_desc %u tx_free_thresh %u socket %u\n",
373 queue_idx, nb_desc, txq->tx_free_thresh, socket_id);
379 qede_tx_queue_reset(__rte_unused struct qede_dev *qdev,
380 struct qede_tx_queue *txq)
382 DP_INFO(&qdev->edev, "Reset TX queue %u\n", txq->queue_id);
383 ecore_chain_reset(&txq->tx_pbl);
386 *txq->hw_cons_ptr = 0;
389 static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq)
393 if (txq->sw_tx_ring) {
394 for (i = 0; i < txq->nb_tx_desc; i++) {
395 if (txq->sw_tx_ring[i].mbuf) {
396 rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf);
397 txq->sw_tx_ring[i].mbuf = NULL;
403 void qede_tx_queue_release(void *tx_queue)
405 struct qede_tx_queue *txq = tx_queue;
406 struct qede_dev *qdev;
407 struct ecore_dev *edev;
411 edev = QEDE_INIT_EDEV(qdev);
412 PMD_INIT_FUNC_TRACE(edev);
413 qede_tx_queue_release_mbufs(txq);
414 qdev->ops->common->chain_free(edev, &txq->tx_pbl);
415 rte_free(txq->sw_tx_ring);
420 /* This function allocates fast-path status block memory */
422 qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info,
425 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
426 struct status_block_e4 *sb_virt;
430 sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys,
431 sizeof(struct status_block_e4));
433 DP_ERR(edev, "Status block allocation failed\n");
436 rc = qdev->ops->common->sb_init(edev, sb_info, sb_virt,
439 DP_ERR(edev, "Status block initialization failed\n");
440 OSAL_DMA_FREE_COHERENT(edev, sb_virt, sb_phys,
441 sizeof(struct status_block_e4));
448 int qede_alloc_fp_resc(struct qede_dev *qdev)
450 struct ecore_dev *edev = &qdev->edev;
451 struct qede_fastpath *fp;
456 ecore_vf_get_num_sbs(ECORE_LEADING_HWFN(edev), &num_sbs);
458 num_sbs = ecore_cxt_get_proto_cid_count
459 (ECORE_LEADING_HWFN(edev), PROTOCOLID_ETH, NULL);
462 DP_ERR(edev, "No status blocks available\n");
466 qdev->fp_array = rte_calloc("fp", QEDE_RXTX_MAX(qdev),
467 sizeof(*qdev->fp_array), RTE_CACHE_LINE_SIZE);
469 if (!qdev->fp_array) {
470 DP_ERR(edev, "fp array allocation failed\n");
474 memset((void *)qdev->fp_array, 0, QEDE_RXTX_MAX(qdev) *
475 sizeof(*qdev->fp_array));
477 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
478 fp = &qdev->fp_array[sb_idx];
481 fp->sb_info = rte_calloc("sb", 1, sizeof(struct ecore_sb_info),
482 RTE_CACHE_LINE_SIZE);
484 DP_ERR(edev, "FP sb_info allocation fails\n");
487 if (qede_alloc_mem_sb(qdev, fp->sb_info, sb_idx)) {
488 DP_ERR(edev, "FP status block allocation fails\n");
491 DP_INFO(edev, "sb_info idx 0x%x initialized\n",
492 fp->sb_info->igu_sb_id);
498 void qede_dealloc_fp_resc(struct rte_eth_dev *eth_dev)
500 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
501 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
502 struct qede_fastpath *fp;
506 PMD_INIT_FUNC_TRACE(edev);
508 for (sb_idx = 0; sb_idx < QEDE_RXTX_MAX(qdev); sb_idx++) {
509 fp = &qdev->fp_array[sb_idx];
512 DP_INFO(edev, "Free sb_info index 0x%x\n",
513 fp->sb_info->igu_sb_id);
515 OSAL_DMA_FREE_COHERENT(edev, fp->sb_info->sb_virt,
516 fp->sb_info->sb_phys,
517 sizeof(struct status_block_e4));
518 rte_free(fp->sb_info);
523 /* Free packet buffers and ring memories */
524 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
525 if (eth_dev->data->rx_queues[i]) {
526 qede_rx_queue_release(eth_dev->data->rx_queues[i]);
527 eth_dev->data->rx_queues[i] = NULL;
531 for (i = 0; i < eth_dev->data->nb_tx_queues; i++) {
532 if (eth_dev->data->tx_queues[i]) {
533 qede_tx_queue_release(eth_dev->data->tx_queues[i]);
534 eth_dev->data->tx_queues[i] = NULL;
539 rte_free(qdev->fp_array);
540 qdev->fp_array = NULL;
544 qede_update_rx_prod(__rte_unused struct qede_dev *edev,
545 struct qede_rx_queue *rxq)
547 uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring);
548 uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring);
549 struct eth_rx_prod_data rx_prods = { 0 };
551 /* Update producers */
552 rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod);
553 rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod);
555 /* Make sure that the BD and SGE data is updated before updating the
556 * producers since FW might read the BD/SGE right after the producer
561 internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods),
562 (uint32_t *)&rx_prods);
564 /* mmiowb is needed to synchronize doorbell writes from more than one
565 * processor. It guarantees that the write arrives to the device before
566 * the napi lock is released and another qede_poll is called (possibly
567 * on another CPU). Without this barrier, the next doorbell can bypass
568 * this doorbell. This is applicable to IA64/Altix systems.
572 PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u", bd_prod, cqe_prod);
575 /* Starts a given RX queue in HW */
577 qede_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
579 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
580 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
581 struct ecore_queue_start_common_params params;
582 struct ecore_rxq_start_ret_params ret_params;
583 struct qede_rx_queue *rxq;
584 struct qede_fastpath *fp;
585 struct ecore_hwfn *p_hwfn;
586 dma_addr_t p_phys_table;
592 if (rx_queue_id < eth_dev->data->nb_rx_queues) {
593 fp = &qdev->fp_array[rx_queue_id];
594 rxq = eth_dev->data->rx_queues[rx_queue_id];
595 /* Allocate buffers for the Rx ring */
596 for (j = 0; j < rxq->nb_rx_desc; j++) {
597 rc = qede_alloc_rx_buffer(rxq);
599 DP_ERR(edev, "RX buffer allocation failed"
600 " for rxq = %u\n", rx_queue_id);
604 /* disable interrupts */
605 ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0);
607 memset(¶ms, 0, sizeof(params));
608 params.queue_id = rx_queue_id / edev->num_hwfns;
610 params.stats_id = params.vport_id;
611 params.p_sb = fp->sb_info;
612 DP_INFO(edev, "rxq %u igu_sb_id 0x%x\n",
613 fp->rxq->queue_id, fp->sb_info->igu_sb_id);
614 params.sb_idx = RX_PI;
615 hwfn_index = rx_queue_id % edev->num_hwfns;
616 p_hwfn = &edev->hwfns[hwfn_index];
617 p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring);
618 page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring);
619 memset(&ret_params, 0, sizeof(ret_params));
620 rc = ecore_eth_rx_queue_start(p_hwfn,
621 p_hwfn->hw_info.opaque_fid,
622 ¶ms, fp->rxq->rx_buf_size,
623 fp->rxq->rx_bd_ring.p_phys_addr,
624 p_phys_table, page_cnt,
627 DP_ERR(edev, "RX queue %u could not be started, rc = %d\n",
631 /* Update with the returned parameters */
632 fp->rxq->hw_rxq_prod_addr = ret_params.p_prod;
633 fp->rxq->handle = ret_params.p_handle;
635 fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI];
636 qede_update_rx_prod(qdev, fp->rxq);
637 eth_dev->data->rx_queue_state[rx_queue_id] =
638 RTE_ETH_QUEUE_STATE_STARTED;
639 DP_INFO(edev, "RX queue %u started\n", rx_queue_id);
641 DP_ERR(edev, "RX queue %u is not in range\n", rx_queue_id);
649 qede_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
651 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
652 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
653 struct ecore_queue_start_common_params params;
654 struct ecore_txq_start_ret_params ret_params;
655 struct ecore_hwfn *p_hwfn;
656 dma_addr_t p_phys_table;
657 struct qede_tx_queue *txq;
658 struct qede_fastpath *fp;
663 if (tx_queue_id < eth_dev->data->nb_tx_queues) {
664 txq = eth_dev->data->tx_queues[tx_queue_id];
665 fp = &qdev->fp_array[tx_queue_id];
666 memset(¶ms, 0, sizeof(params));
667 params.queue_id = tx_queue_id / edev->num_hwfns;
669 params.stats_id = params.vport_id;
670 params.p_sb = fp->sb_info;
671 DP_INFO(edev, "txq %u igu_sb_id 0x%x\n",
672 fp->txq->queue_id, fp->sb_info->igu_sb_id);
673 params.sb_idx = TX_PI(0); /* tc = 0 */
674 p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl);
675 page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl);
676 hwfn_index = tx_queue_id % edev->num_hwfns;
677 p_hwfn = &edev->hwfns[hwfn_index];
678 if (qdev->dev_info.is_legacy)
679 fp->txq->is_legacy = true;
680 rc = ecore_eth_tx_queue_start(p_hwfn,
681 p_hwfn->hw_info.opaque_fid,
683 p_phys_table, page_cnt,
685 if (rc != ECORE_SUCCESS) {
686 DP_ERR(edev, "TX queue %u couldn't be started, rc=%d\n",
690 txq->doorbell_addr = ret_params.p_doorbell;
691 txq->handle = ret_params.p_handle;
693 txq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[TX_PI(0)];
694 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_DEST,
696 SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD,
698 SET_FIELD(txq->tx_db.data.params,
699 ETH_DB_DATA_AGG_VAL_SEL,
700 DQ_XCM_ETH_TX_BD_PROD_CMD);
701 txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD;
702 eth_dev->data->tx_queue_state[tx_queue_id] =
703 RTE_ETH_QUEUE_STATE_STARTED;
704 DP_INFO(edev, "TX queue %u started\n", tx_queue_id);
706 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
714 qede_free_tx_pkt(struct qede_tx_queue *txq)
716 struct rte_mbuf *mbuf;
721 mbuf = txq->sw_tx_ring[idx].mbuf;
723 nb_segs = mbuf->nb_segs;
724 PMD_TX_LOG(DEBUG, txq, "nb_segs to free %u\n", nb_segs);
726 /* It's like consuming rxbuf in recv() */
727 ecore_chain_consume(&txq->tx_pbl);
731 rte_pktmbuf_free(mbuf);
732 txq->sw_tx_ring[idx].mbuf = NULL;
734 PMD_TX_LOG(DEBUG, txq, "Freed tx packet\n");
736 ecore_chain_consume(&txq->tx_pbl);
742 qede_process_tx_compl(__rte_unused struct ecore_dev *edev,
743 struct qede_tx_queue *txq)
746 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
750 rte_compiler_barrier();
751 hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr);
752 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
753 sw_tx_cons = ecore_chain_get_cons_idx(&txq->tx_pbl);
754 PMD_TX_LOG(DEBUG, txq, "Tx Completions = %u\n",
755 abs(hw_bd_cons - sw_tx_cons));
757 while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl))
758 qede_free_tx_pkt(txq);
761 static int qede_drain_txq(struct qede_dev *qdev,
762 struct qede_tx_queue *txq, bool allow_drain)
764 struct ecore_dev *edev = &qdev->edev;
767 while (txq->sw_tx_cons != txq->sw_tx_prod) {
768 qede_process_tx_compl(edev, txq);
771 DP_ERR(edev, "Tx queue[%u] is stuck,"
772 "requesting MCP to drain\n",
774 rc = qdev->ops->common->drain(edev);
777 return qede_drain_txq(qdev, txq, false);
779 DP_ERR(edev, "Timeout waiting for tx queue[%d]:"
780 "PROD=%d, CONS=%d\n",
781 txq->queue_id, txq->sw_tx_prod,
787 rte_compiler_barrier();
790 /* FW finished processing, wait for HW to transmit all tx packets */
796 /* Stops a given TX queue in the HW */
797 static int qede_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
799 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
800 struct ecore_dev *edev = QEDE_INIT_EDEV(qdev);
801 struct ecore_hwfn *p_hwfn;
802 struct qede_tx_queue *txq;
806 if (tx_queue_id < eth_dev->data->nb_tx_queues) {
807 txq = eth_dev->data->tx_queues[tx_queue_id];
809 if (qede_drain_txq(qdev, txq, true))
810 return -1; /* For the lack of retcodes */
812 hwfn_index = tx_queue_id % edev->num_hwfns;
813 p_hwfn = &edev->hwfns[hwfn_index];
814 rc = ecore_eth_tx_queue_stop(p_hwfn, txq->handle);
815 if (rc != ECORE_SUCCESS) {
816 DP_ERR(edev, "TX queue %u stop fails\n", tx_queue_id);
819 qede_tx_queue_release_mbufs(txq);
820 qede_tx_queue_reset(qdev, txq);
821 eth_dev->data->tx_queue_state[tx_queue_id] =
822 RTE_ETH_QUEUE_STATE_STOPPED;
823 DP_INFO(edev, "TX queue %u stopped\n", tx_queue_id);
825 DP_ERR(edev, "TX queue %u is not in range\n", tx_queue_id);
832 int qede_start_queues(struct rte_eth_dev *eth_dev)
834 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
839 rc = qede_rx_queue_start(eth_dev, id);
840 if (rc != ECORE_SUCCESS)
845 rc = qede_tx_queue_start(eth_dev, id);
846 if (rc != ECORE_SUCCESS)
853 void qede_stop_queues(struct rte_eth_dev *eth_dev)
855 struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev);
858 /* Stopping RX/TX queues */
860 qede_tx_queue_stop(eth_dev, id);
864 qede_rx_queue_stop(eth_dev, id);
868 static inline bool qede_tunn_exist(uint16_t flag)
870 return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK <<
871 PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag);
874 static inline uint8_t qede_check_tunn_csum_l3(uint16_t flag)
876 return !!((PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK <<
877 PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT) & flag);
881 * qede_check_tunn_csum_l4:
883 * 1 : If L4 csum is enabled AND if the validation has failed.
886 static inline uint8_t qede_check_tunn_csum_l4(uint16_t flag)
888 if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK <<
889 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag)
890 return !!((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK <<
891 PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT) & flag);
896 static inline uint8_t qede_check_notunn_csum_l4(uint16_t flag)
898 if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK <<
899 PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag)
900 return !!((PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK <<
901 PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT) & flag);
906 /* Returns outer L2, L3 and L4 packet_type for tunneled packets */
907 static inline uint32_t qede_rx_cqe_to_pkt_type_outer(struct rte_mbuf *m)
909 uint32_t packet_type = RTE_PTYPE_UNKNOWN;
910 struct ether_hdr *eth_hdr;
911 struct ipv4_hdr *ipv4_hdr;
912 struct ipv6_hdr *ipv6_hdr;
913 struct vlan_hdr *vlan_hdr;
915 bool vlan_tagged = 0;
918 eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
919 len = sizeof(struct ether_hdr);
920 ethertype = rte_cpu_to_be_16(eth_hdr->ether_type);
922 /* Note: Valid only if VLAN stripping is disabled */
923 if (ethertype == ETHER_TYPE_VLAN) {
925 vlan_hdr = (struct vlan_hdr *)(eth_hdr + 1);
926 len += sizeof(struct vlan_hdr);
927 ethertype = rte_cpu_to_be_16(vlan_hdr->eth_proto);
930 if (ethertype == ETHER_TYPE_IPv4) {
931 packet_type |= RTE_PTYPE_L3_IPV4;
932 ipv4_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, len);
933 if (ipv4_hdr->next_proto_id == IPPROTO_TCP)
934 packet_type |= RTE_PTYPE_L4_TCP;
935 else if (ipv4_hdr->next_proto_id == IPPROTO_UDP)
936 packet_type |= RTE_PTYPE_L4_UDP;
937 } else if (ethertype == ETHER_TYPE_IPv6) {
938 packet_type |= RTE_PTYPE_L3_IPV6;
939 ipv6_hdr = rte_pktmbuf_mtod_offset(m, struct ipv6_hdr *, len);
940 if (ipv6_hdr->proto == IPPROTO_TCP)
941 packet_type |= RTE_PTYPE_L4_TCP;
942 else if (ipv6_hdr->proto == IPPROTO_UDP)
943 packet_type |= RTE_PTYPE_L4_UDP;
947 packet_type |= RTE_PTYPE_L2_ETHER_VLAN;
949 packet_type |= RTE_PTYPE_L2_ETHER;
954 static inline uint32_t qede_rx_cqe_to_pkt_type_inner(uint16_t flags)
959 static const uint32_t
960 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
961 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_INNER_L3_IPV4 |
962 RTE_PTYPE_INNER_L2_ETHER,
963 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_INNER_L3_IPV6 |
964 RTE_PTYPE_INNER_L2_ETHER,
965 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_INNER_L3_IPV4 |
966 RTE_PTYPE_INNER_L4_TCP |
967 RTE_PTYPE_INNER_L2_ETHER,
968 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_INNER_L3_IPV6 |
969 RTE_PTYPE_INNER_L4_TCP |
970 RTE_PTYPE_INNER_L2_ETHER,
971 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_INNER_L3_IPV4 |
972 RTE_PTYPE_INNER_L4_UDP |
973 RTE_PTYPE_INNER_L2_ETHER,
974 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_INNER_L3_IPV6 |
975 RTE_PTYPE_INNER_L4_UDP |
976 RTE_PTYPE_INNER_L2_ETHER,
977 /* Frags with no VLAN */
978 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
979 RTE_PTYPE_INNER_L4_FRAG |
980 RTE_PTYPE_INNER_L2_ETHER,
981 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
982 RTE_PTYPE_INNER_L4_FRAG |
983 RTE_PTYPE_INNER_L2_ETHER,
985 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
986 RTE_PTYPE_INNER_L2_ETHER_VLAN,
987 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
988 RTE_PTYPE_INNER_L2_ETHER_VLAN,
989 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
990 RTE_PTYPE_INNER_L4_TCP |
991 RTE_PTYPE_INNER_L2_ETHER_VLAN,
992 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
993 RTE_PTYPE_INNER_L4_TCP |
994 RTE_PTYPE_INNER_L2_ETHER_VLAN,
995 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV4 |
996 RTE_PTYPE_INNER_L4_UDP |
997 RTE_PTYPE_INNER_L2_ETHER_VLAN,
998 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_INNER_L3_IPV6 |
999 RTE_PTYPE_INNER_L4_UDP |
1000 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1001 /* Frags with VLAN */
1002 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV4 |
1003 RTE_PTYPE_INNER_L4_FRAG |
1004 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1005 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_INNER_L3_IPV6 |
1006 RTE_PTYPE_INNER_L4_FRAG |
1007 RTE_PTYPE_INNER_L2_ETHER_VLAN,
1010 /* Bits (0..3) provides L3/L4 protocol type */
1011 /* Bits (4,5) provides frag and VLAN info */
1012 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1013 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1014 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1015 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1016 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1017 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1018 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1019 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1021 if (val < QEDE_PKT_TYPE_MAX)
1022 return ptype_lkup_tbl[val];
1024 return RTE_PTYPE_UNKNOWN;
1027 static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags)
1032 static const uint32_t
1033 ptype_lkup_tbl[QEDE_PKT_TYPE_MAX] __rte_cache_aligned = {
1034 [QEDE_PKT_TYPE_IPV4] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L2_ETHER,
1035 [QEDE_PKT_TYPE_IPV6] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L2_ETHER,
1036 [QEDE_PKT_TYPE_IPV4_TCP] = RTE_PTYPE_L3_IPV4 |
1039 [QEDE_PKT_TYPE_IPV6_TCP] = RTE_PTYPE_L3_IPV6 |
1042 [QEDE_PKT_TYPE_IPV4_UDP] = RTE_PTYPE_L3_IPV4 |
1045 [QEDE_PKT_TYPE_IPV6_UDP] = RTE_PTYPE_L3_IPV6 |
1048 /* Frags with no VLAN */
1049 [QEDE_PKT_TYPE_IPV4_FRAG] = RTE_PTYPE_L3_IPV4 |
1052 [QEDE_PKT_TYPE_IPV6_FRAG] = RTE_PTYPE_L3_IPV6 |
1056 [QEDE_PKT_TYPE_IPV4_VLAN] = RTE_PTYPE_L3_IPV4 |
1057 RTE_PTYPE_L2_ETHER_VLAN,
1058 [QEDE_PKT_TYPE_IPV6_VLAN] = RTE_PTYPE_L3_IPV6 |
1059 RTE_PTYPE_L2_ETHER_VLAN,
1060 [QEDE_PKT_TYPE_IPV4_TCP_VLAN] = RTE_PTYPE_L3_IPV4 |
1062 RTE_PTYPE_L2_ETHER_VLAN,
1063 [QEDE_PKT_TYPE_IPV6_TCP_VLAN] = RTE_PTYPE_L3_IPV6 |
1065 RTE_PTYPE_L2_ETHER_VLAN,
1066 [QEDE_PKT_TYPE_IPV4_UDP_VLAN] = RTE_PTYPE_L3_IPV4 |
1068 RTE_PTYPE_L2_ETHER_VLAN,
1069 [QEDE_PKT_TYPE_IPV6_UDP_VLAN] = RTE_PTYPE_L3_IPV6 |
1071 RTE_PTYPE_L2_ETHER_VLAN,
1072 /* Frags with VLAN */
1073 [QEDE_PKT_TYPE_IPV4_VLAN_FRAG] = RTE_PTYPE_L3_IPV4 |
1075 RTE_PTYPE_L2_ETHER_VLAN,
1076 [QEDE_PKT_TYPE_IPV6_VLAN_FRAG] = RTE_PTYPE_L3_IPV6 |
1078 RTE_PTYPE_L2_ETHER_VLAN,
1081 /* Bits (0..3) provides L3/L4 protocol type */
1082 /* Bits (4,5) provides frag and VLAN info */
1083 val = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK <<
1084 PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) |
1085 (PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK <<
1086 PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT) |
1087 (PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK <<
1088 PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT) |
1089 (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK <<
1090 PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) & flags;
1092 if (val < QEDE_PKT_TYPE_MAX)
1093 return ptype_lkup_tbl[val];
1095 return RTE_PTYPE_UNKNOWN;
1098 static inline uint8_t
1099 qede_check_notunn_csum_l3(struct rte_mbuf *m, uint16_t flag)
1101 struct ipv4_hdr *ip;
1106 val = ((PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK <<
1107 PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT) & flag);
1109 if (unlikely(val)) {
1110 m->packet_type = qede_rx_cqe_to_pkt_type(flag);
1111 if (RTE_ETH_IS_IPV4_HDR(m->packet_type)) {
1112 ip = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *,
1113 sizeof(struct ether_hdr));
1114 pkt_csum = ip->hdr_checksum;
1115 ip->hdr_checksum = 0;
1116 calc_csum = rte_ipv4_cksum(ip);
1117 ip->hdr_checksum = pkt_csum;
1118 return (calc_csum != pkt_csum);
1119 } else if (RTE_ETH_IS_IPV6_HDR(m->packet_type)) {
1126 static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq)
1128 ecore_chain_consume(&rxq->rx_bd_ring);
1133 qede_reuse_page(__rte_unused struct qede_dev *qdev,
1134 struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons)
1136 struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring);
1137 uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1138 struct qede_rx_entry *curr_prod;
1139 dma_addr_t new_mapping;
1141 curr_prod = &rxq->sw_rx_ring[idx];
1142 *curr_prod = *curr_cons;
1144 new_mapping = rte_mbuf_data_iova_default(curr_prod->mbuf) +
1145 curr_prod->page_offset;
1147 rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping));
1148 rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping));
1154 qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq,
1155 struct qede_dev *qdev, uint8_t count)
1157 struct qede_rx_entry *curr_cons;
1159 for (; count > 0; count--) {
1160 curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)];
1161 qede_reuse_page(qdev, rxq, curr_cons);
1162 qede_rx_bd_ring_consume(rxq);
1167 qede_rx_process_tpa_cmn_cont_end_cqe(__rte_unused struct qede_dev *qdev,
1168 struct qede_rx_queue *rxq,
1169 uint8_t agg_index, uint16_t len)
1171 struct qede_agg_info *tpa_info;
1172 struct rte_mbuf *curr_frag; /* Pointer to currently filled TPA seg */
1175 /* Under certain conditions it is possible that FW may not consume
1176 * additional or new BD. So decision to consume the BD must be made
1177 * based on len_list[0].
1179 if (rte_le_to_cpu_16(len)) {
1180 tpa_info = &rxq->tpa_info[agg_index];
1181 cons_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1182 curr_frag = rxq->sw_rx_ring[cons_idx].mbuf;
1184 curr_frag->nb_segs = 1;
1185 curr_frag->pkt_len = rte_le_to_cpu_16(len);
1186 curr_frag->data_len = curr_frag->pkt_len;
1187 tpa_info->tpa_tail->next = curr_frag;
1188 tpa_info->tpa_tail = curr_frag;
1189 qede_rx_bd_ring_consume(rxq);
1190 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1191 PMD_RX_LOG(ERR, rxq, "mbuf allocation fails\n");
1192 rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++;
1193 rxq->rx_alloc_errors++;
1199 qede_rx_process_tpa_cont_cqe(struct qede_dev *qdev,
1200 struct qede_rx_queue *rxq,
1201 struct eth_fast_path_rx_tpa_cont_cqe *cqe)
1203 PMD_RX_LOG(INFO, rxq, "TPA cont[%d] - len [%d]\n",
1204 cqe->tpa_agg_index, rte_le_to_cpu_16(cqe->len_list[0]));
1205 /* only len_list[0] will have value */
1206 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1211 qede_rx_process_tpa_end_cqe(struct qede_dev *qdev,
1212 struct qede_rx_queue *rxq,
1213 struct eth_fast_path_rx_tpa_end_cqe *cqe)
1215 struct rte_mbuf *rx_mb; /* Pointer to head of the chained agg */
1217 qede_rx_process_tpa_cmn_cont_end_cqe(qdev, rxq, cqe->tpa_agg_index,
1219 /* Update total length and frags based on end TPA */
1220 rx_mb = rxq->tpa_info[cqe->tpa_agg_index].tpa_head;
1221 /* TODO: Add Sanity Checks */
1222 rx_mb->nb_segs = cqe->num_of_bds;
1223 rx_mb->pkt_len = cqe->total_packet_len;
1225 PMD_RX_LOG(INFO, rxq, "TPA End[%d] reason %d cqe_len %d nb_segs %d"
1226 " pkt_len %d\n", cqe->tpa_agg_index, cqe->end_reason,
1227 rte_le_to_cpu_16(cqe->len_list[0]), rx_mb->nb_segs,
1231 static inline uint32_t qede_rx_cqe_to_tunn_pkt_type(uint16_t flags)
1236 static const uint32_t
1237 ptype_tunn_lkup_tbl[QEDE_PKT_TYPE_TUNN_MAX_TYPE] __rte_cache_aligned = {
1238 [QEDE_PKT_TYPE_UNKNOWN] = RTE_PTYPE_UNKNOWN,
1239 [QEDE_PKT_TYPE_TUNN_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
1240 [QEDE_PKT_TYPE_TUNN_GRE] = RTE_PTYPE_TUNNEL_GRE,
1241 [QEDE_PKT_TYPE_TUNN_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
1242 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GENEVE] =
1243 RTE_PTYPE_TUNNEL_GENEVE,
1244 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_GRE] =
1245 RTE_PTYPE_TUNNEL_GRE,
1246 [QEDE_PKT_TYPE_TUNN_L2_TENID_NOEXIST_VXLAN] =
1247 RTE_PTYPE_TUNNEL_VXLAN,
1248 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GENEVE] =
1249 RTE_PTYPE_TUNNEL_GENEVE,
1250 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_GRE] =
1251 RTE_PTYPE_TUNNEL_GRE,
1252 [QEDE_PKT_TYPE_TUNN_L2_TENID_EXIST_VXLAN] =
1253 RTE_PTYPE_TUNNEL_VXLAN,
1254 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GENEVE] =
1255 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1256 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_GRE] =
1257 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1258 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_NOEXIST_VXLAN] =
1259 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1260 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GENEVE] =
1261 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV4,
1262 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_GRE] =
1263 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV4,
1264 [QEDE_PKT_TYPE_TUNN_IPV4_TENID_EXIST_VXLAN] =
1265 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV4,
1266 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GENEVE] =
1267 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1268 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_GRE] =
1269 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1270 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_NOEXIST_VXLAN] =
1271 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1272 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GENEVE] =
1273 RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L3_IPV6,
1274 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_GRE] =
1275 RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_L3_IPV6,
1276 [QEDE_PKT_TYPE_TUNN_IPV6_TENID_EXIST_VXLAN] =
1277 RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L3_IPV6,
1280 /* Cover bits[4-0] to include tunn_type and next protocol */
1281 val = ((ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK <<
1282 ETH_TUNNEL_PARSING_FLAGS_TYPE_SHIFT) |
1283 (ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK <<
1284 ETH_TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT)) & flags;
1286 if (val < QEDE_PKT_TYPE_TUNN_MAX_TYPE)
1287 return ptype_tunn_lkup_tbl[val];
1289 return RTE_PTYPE_UNKNOWN;
1293 qede_process_sg_pkts(void *p_rxq, struct rte_mbuf *rx_mb,
1294 uint8_t num_segs, uint16_t pkt_len)
1296 struct qede_rx_queue *rxq = p_rxq;
1297 struct qede_dev *qdev = rxq->qdev;
1298 register struct rte_mbuf *seg1 = NULL;
1299 register struct rte_mbuf *seg2 = NULL;
1300 uint16_t sw_rx_index;
1305 cur_size = pkt_len > rxq->rx_buf_size ? rxq->rx_buf_size :
1307 if (unlikely(!cur_size)) {
1308 PMD_RX_LOG(ERR, rxq, "Length is 0 while %u BDs"
1309 " left for mapping jumbo\n", num_segs);
1310 qede_recycle_rx_bd_ring(rxq, qdev, num_segs);
1313 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1314 seg2 = rxq->sw_rx_ring[sw_rx_index].mbuf;
1315 qede_rx_bd_ring_consume(rxq);
1316 pkt_len -= cur_size;
1317 seg2->data_len = cur_size;
1327 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1329 print_rx_bd_info(struct rte_mbuf *m, struct qede_rx_queue *rxq,
1332 PMD_RX_LOG(INFO, rxq,
1333 "len 0x%04x bf 0x%04x hash_val 0x%x"
1334 " ol_flags 0x%04lx l2=%s l3=%s l4=%s tunn=%s"
1335 " inner_l2=%s inner_l3=%s inner_l4=%s\n",
1336 m->data_len, bitfield, m->hash.rss,
1337 (unsigned long)m->ol_flags,
1338 rte_get_ptype_l2_name(m->packet_type),
1339 rte_get_ptype_l3_name(m->packet_type),
1340 rte_get_ptype_l4_name(m->packet_type),
1341 rte_get_ptype_tunnel_name(m->packet_type),
1342 rte_get_ptype_inner_l2_name(m->packet_type),
1343 rte_get_ptype_inner_l3_name(m->packet_type),
1344 rte_get_ptype_inner_l4_name(m->packet_type));
1349 qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
1351 struct qede_rx_queue *rxq = p_rxq;
1352 struct qede_dev *qdev = rxq->qdev;
1353 struct ecore_dev *edev = &qdev->edev;
1354 uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index;
1355 uint16_t rx_pkt = 0;
1356 union eth_rx_cqe *cqe;
1357 struct eth_fast_path_rx_reg_cqe *fp_cqe = NULL;
1358 register struct rte_mbuf *rx_mb = NULL;
1359 register struct rte_mbuf *seg1 = NULL;
1360 enum eth_rx_cqe_type cqe_type;
1361 uint16_t pkt_len = 0; /* Sum of all BD segments */
1362 uint16_t len; /* Length of first BD */
1363 uint8_t num_segs = 1;
1364 uint16_t preload_idx;
1365 uint16_t parse_flag;
1366 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1367 uint8_t bitfield_val;
1369 uint8_t tunn_parse_flag;
1371 struct eth_fast_path_rx_tpa_start_cqe *cqe_start_tpa;
1373 uint32_t packet_type;
1376 uint8_t offset, tpa_agg_idx, flags;
1377 struct qede_agg_info *tpa_info = NULL;
1380 hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr);
1381 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1385 if (hw_comp_cons == sw_comp_cons)
1388 while (sw_comp_cons != hw_comp_cons) {
1390 packet_type = RTE_PTYPE_UNKNOWN;
1392 tpa_start_flg = false;
1395 /* Get the CQE from the completion ring */
1397 (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring);
1398 cqe_type = cqe->fast_path_regular.type;
1399 PMD_RX_LOG(INFO, rxq, "Rx CQE type %d\n", cqe_type);
1402 case ETH_RX_CQE_TYPE_REGULAR:
1403 fp_cqe = &cqe->fast_path_regular;
1405 case ETH_RX_CQE_TYPE_TPA_START:
1406 cqe_start_tpa = &cqe->fast_path_tpa_start;
1407 tpa_info = &rxq->tpa_info[cqe_start_tpa->tpa_agg_index];
1408 tpa_start_flg = true;
1409 /* Mark it as LRO packet */
1410 ol_flags |= PKT_RX_LRO;
1411 /* In split mode, seg_len is same as len_on_first_bd
1412 * and ext_bd_len_list will be empty since there are
1413 * no additional buffers
1415 PMD_RX_LOG(INFO, rxq,
1416 "TPA start[%d] - len_on_first_bd %d header %d"
1417 " [bd_list[0] %d], [seg_len %d]\n",
1418 cqe_start_tpa->tpa_agg_index,
1419 rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd),
1420 cqe_start_tpa->header_len,
1421 rte_le_to_cpu_16(cqe_start_tpa->ext_bd_len_list[0]),
1422 rte_le_to_cpu_16(cqe_start_tpa->seg_len));
1425 case ETH_RX_CQE_TYPE_TPA_CONT:
1426 qede_rx_process_tpa_cont_cqe(qdev, rxq,
1427 &cqe->fast_path_tpa_cont);
1429 case ETH_RX_CQE_TYPE_TPA_END:
1430 qede_rx_process_tpa_end_cqe(qdev, rxq,
1431 &cqe->fast_path_tpa_end);
1432 tpa_agg_idx = cqe->fast_path_tpa_end.tpa_agg_index;
1433 tpa_info = &rxq->tpa_info[tpa_agg_idx];
1434 rx_mb = rxq->tpa_info[tpa_agg_idx].tpa_head;
1436 case ETH_RX_CQE_TYPE_SLOW_PATH:
1437 PMD_RX_LOG(INFO, rxq, "Got unexpected slowpath CQE\n");
1438 ecore_eth_cqe_completion(
1439 &edev->hwfns[rxq->queue_id % edev->num_hwfns],
1440 (struct eth_slow_path_rx_cqe *)cqe);
1446 /* Get the data from the SW ring */
1447 sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1448 rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf;
1449 assert(rx_mb != NULL);
1451 /* Handle regular CQE or TPA start CQE */
1452 if (!tpa_start_flg) {
1453 parse_flag = rte_le_to_cpu_16(fp_cqe->pars_flags.flags);
1454 offset = fp_cqe->placement_offset;
1455 len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd);
1456 pkt_len = rte_le_to_cpu_16(fp_cqe->pkt_len);
1457 vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag);
1458 rss_hash = rte_le_to_cpu_32(fp_cqe->rss_hash);
1459 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1460 bitfield_val = fp_cqe->bitfields;
1464 rte_le_to_cpu_16(cqe_start_tpa->pars_flags.flags);
1465 offset = cqe_start_tpa->placement_offset;
1466 /* seg_len = len_on_first_bd */
1467 len = rte_le_to_cpu_16(cqe_start_tpa->len_on_first_bd);
1468 vlan_tci = rte_le_to_cpu_16(cqe_start_tpa->vlan_tag);
1469 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1470 bitfield_val = cqe_start_tpa->bitfields;
1472 rss_hash = rte_le_to_cpu_32(cqe_start_tpa->rss_hash);
1474 if (qede_tunn_exist(parse_flag)) {
1475 PMD_RX_LOG(INFO, rxq, "Rx tunneled packet\n");
1476 if (unlikely(qede_check_tunn_csum_l4(parse_flag))) {
1477 PMD_RX_LOG(ERR, rxq,
1478 "L4 csum failed, flags = 0x%x\n",
1480 rxq->rx_hw_errors++;
1481 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1483 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1486 if (unlikely(qede_check_tunn_csum_l3(parse_flag))) {
1487 PMD_RX_LOG(ERR, rxq,
1488 "Outer L3 csum failed, flags = 0x%x\n",
1490 rxq->rx_hw_errors++;
1491 ol_flags |= PKT_RX_EIP_CKSUM_BAD;
1493 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1497 flags = cqe_start_tpa->tunnel_pars_flags.flags;
1499 flags = fp_cqe->tunnel_pars_flags.flags;
1500 tunn_parse_flag = flags;
1504 qede_rx_cqe_to_tunn_pkt_type(tunn_parse_flag);
1508 qede_rx_cqe_to_pkt_type_inner(parse_flag);
1510 /* Outer L3/L4 types is not available in CQE */
1511 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1513 /* Outer L3/L4 types is not available in CQE.
1514 * Need to add offset to parse correctly,
1516 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1517 packet_type |= qede_rx_cqe_to_pkt_type_outer(rx_mb);
1519 packet_type |= qede_rx_cqe_to_pkt_type(parse_flag);
1522 /* Common handling for non-tunnel packets and for inner
1523 * headers in the case of tunnel.
1525 if (unlikely(qede_check_notunn_csum_l4(parse_flag))) {
1526 PMD_RX_LOG(ERR, rxq,
1527 "L4 csum failed, flags = 0x%x\n",
1529 rxq->rx_hw_errors++;
1530 ol_flags |= PKT_RX_L4_CKSUM_BAD;
1532 ol_flags |= PKT_RX_L4_CKSUM_GOOD;
1534 if (unlikely(qede_check_notunn_csum_l3(rx_mb, parse_flag))) {
1535 PMD_RX_LOG(ERR, rxq, "IP csum failed, flags = 0x%x\n",
1537 rxq->rx_hw_errors++;
1538 ol_flags |= PKT_RX_IP_CKSUM_BAD;
1540 ol_flags |= PKT_RX_IP_CKSUM_GOOD;
1543 if (CQE_HAS_VLAN(parse_flag) ||
1544 CQE_HAS_OUTER_VLAN(parse_flag)) {
1545 /* Note: FW doesn't indicate Q-in-Q packet */
1546 ol_flags |= PKT_RX_VLAN;
1547 if (qdev->vlan_strip_flg) {
1548 ol_flags |= PKT_RX_VLAN_STRIPPED;
1549 rx_mb->vlan_tci = vlan_tci;
1554 if (qdev->rss_enable) {
1555 ol_flags |= PKT_RX_RSS_HASH;
1556 rx_mb->hash.rss = rss_hash;
1559 if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) {
1560 PMD_RX_LOG(ERR, rxq,
1561 "New buffer allocation failed,"
1562 "dropping incoming packet\n");
1563 qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num);
1564 rte_eth_devices[rxq->port_id].
1565 data->rx_mbuf_alloc_failed++;
1566 rxq->rx_alloc_errors++;
1569 qede_rx_bd_ring_consume(rxq);
1571 if (!tpa_start_flg && fp_cqe->bd_num > 1) {
1572 PMD_RX_LOG(DEBUG, rxq, "Jumbo-over-BD packet: %02x BDs"
1573 " len on first: %04x Total Len: %04x",
1574 fp_cqe->bd_num, len, pkt_len);
1575 num_segs = fp_cqe->bd_num - 1;
1577 if (qede_process_sg_pkts(p_rxq, seg1, num_segs,
1580 for (j = 0; j < num_segs; j++) {
1581 if (qede_alloc_rx_buffer(rxq)) {
1582 PMD_RX_LOG(ERR, rxq,
1583 "Buffer allocation failed");
1584 rte_eth_devices[rxq->port_id].
1585 data->rx_mbuf_alloc_failed++;
1586 rxq->rx_alloc_errors++;
1592 rxq->rx_segs++; /* for the first segment */
1594 /* Prefetch next mbuf while processing current one. */
1595 preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq);
1596 rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf);
1598 /* Update rest of the MBUF fields */
1599 rx_mb->data_off = offset + RTE_PKTMBUF_HEADROOM;
1600 rx_mb->port = rxq->port_id;
1601 rx_mb->ol_flags = ol_flags;
1602 rx_mb->data_len = len;
1603 rx_mb->packet_type = packet_type;
1604 #ifdef RTE_LIBRTE_QEDE_DEBUG_RX
1605 print_rx_bd_info(rx_mb, rxq, bitfield_val);
1607 if (!tpa_start_flg) {
1608 rx_mb->nb_segs = fp_cqe->bd_num;
1609 rx_mb->pkt_len = pkt_len;
1611 /* store ref to the updated mbuf */
1612 tpa_info->tpa_head = rx_mb;
1613 tpa_info->tpa_tail = tpa_info->tpa_head;
1615 rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *));
1617 if (!tpa_start_flg) {
1618 rx_pkts[rx_pkt] = rx_mb;
1622 ecore_chain_recycle_consumed(&rxq->rx_comp_ring);
1623 sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring);
1624 if (rx_pkt == nb_pkts) {
1625 PMD_RX_LOG(DEBUG, rxq,
1626 "Budget reached nb_pkts=%u received=%u",
1632 qede_update_rx_prod(qdev, rxq);
1634 rxq->rcv_pkts += rx_pkt;
1636 PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d", rx_pkt, rte_lcore_id());
1642 /* Populate scatter gather buffer descriptor fields */
1643 static inline uint16_t
1644 qede_encode_sg_bd(struct qede_tx_queue *p_txq, struct rte_mbuf *m_seg,
1645 struct eth_tx_2nd_bd **bd2, struct eth_tx_3rd_bd **bd3,
1648 struct qede_tx_queue *txq = p_txq;
1649 struct eth_tx_bd *tx_bd = NULL;
1651 uint16_t nb_segs = 0;
1653 /* Check for scattered buffers */
1655 if (start_seg == 0) {
1657 *bd2 = (struct eth_tx_2nd_bd *)
1658 ecore_chain_produce(&txq->tx_pbl);
1659 memset(*bd2, 0, sizeof(struct eth_tx_2nd_bd));
1662 mapping = rte_mbuf_data_iova(m_seg);
1663 QEDE_BD_SET_ADDR_LEN(*bd2, mapping, m_seg->data_len);
1664 PMD_TX_LOG(DEBUG, txq, "BD2 len %04x", m_seg->data_len);
1665 } else if (start_seg == 1) {
1667 *bd3 = (struct eth_tx_3rd_bd *)
1668 ecore_chain_produce(&txq->tx_pbl);
1669 memset(*bd3, 0, sizeof(struct eth_tx_3rd_bd));
1672 mapping = rte_mbuf_data_iova(m_seg);
1673 QEDE_BD_SET_ADDR_LEN(*bd3, mapping, m_seg->data_len);
1674 PMD_TX_LOG(DEBUG, txq, "BD3 len %04x", m_seg->data_len);
1676 tx_bd = (struct eth_tx_bd *)
1677 ecore_chain_produce(&txq->tx_pbl);
1678 memset(tx_bd, 0, sizeof(*tx_bd));
1680 mapping = rte_mbuf_data_iova(m_seg);
1681 QEDE_BD_SET_ADDR_LEN(tx_bd, mapping, m_seg->data_len);
1682 PMD_TX_LOG(DEBUG, txq, "BD len %04x", m_seg->data_len);
1685 m_seg = m_seg->next;
1688 /* Return total scattered buffers */
1692 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1694 print_tx_bd_info(struct qede_tx_queue *txq,
1695 struct eth_tx_1st_bd *bd1,
1696 struct eth_tx_2nd_bd *bd2,
1697 struct eth_tx_3rd_bd *bd3,
1698 uint64_t tx_ol_flags)
1700 char ol_buf[256] = { 0 }; /* for verbose prints */
1703 PMD_TX_LOG(INFO, txq,
1704 "BD1: nbytes=0x%04x nbds=0x%04x bd_flags=0x%04x bf=0x%04x",
1705 rte_cpu_to_le_16(bd1->nbytes), bd1->data.nbds,
1706 bd1->data.bd_flags.bitfields,
1707 rte_cpu_to_le_16(bd1->data.bitfields));
1709 PMD_TX_LOG(INFO, txq,
1710 "BD2: nbytes=0x%04x bf1=0x%04x bf2=0x%04x tunn_ip=0x%04x\n",
1711 rte_cpu_to_le_16(bd2->nbytes), bd2->data.bitfields1,
1712 bd2->data.bitfields2, bd2->data.tunn_ip_size);
1714 PMD_TX_LOG(INFO, txq,
1715 "BD3: nbytes=0x%04x bf=0x%04x MSS=0x%04x "
1716 "tunn_l4_hdr_start_offset_w=0x%04x tunn_hdr_size=0x%04x\n",
1717 rte_cpu_to_le_16(bd3->nbytes),
1718 rte_cpu_to_le_16(bd3->data.bitfields),
1719 rte_cpu_to_le_16(bd3->data.lso_mss),
1720 bd3->data.tunn_l4_hdr_start_offset_w,
1721 bd3->data.tunn_hdr_size_w);
1723 rte_get_tx_ol_flag_list(tx_ol_flags, ol_buf, sizeof(ol_buf));
1724 PMD_TX_LOG(INFO, txq, "TX offloads = %s\n", ol_buf);
1728 /* TX prepare to check packets meets TX conditions */
1730 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1731 qede_xmit_prep_pkts(void *p_txq, struct rte_mbuf **tx_pkts,
1734 struct qede_tx_queue *txq = p_txq;
1736 qede_xmit_prep_pkts(__rte_unused void *p_txq, struct rte_mbuf **tx_pkts,
1743 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1747 for (i = 0; i < nb_pkts; i++) {
1749 ol_flags = m->ol_flags;
1750 if (ol_flags & PKT_TX_TCP_SEG) {
1751 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_LSO_PACKET) {
1752 rte_errno = -EINVAL;
1755 /* TBD: confirm its ~9700B for both ? */
1756 if (m->tso_segsz > ETH_TX_MAX_NON_LSO_PKT_LEN) {
1757 rte_errno = -EINVAL;
1761 if (m->nb_segs >= ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) {
1762 rte_errno = -EINVAL;
1766 if (ol_flags & QEDE_TX_OFFLOAD_NOTSUP_MASK) {
1767 /* We support only limited tunnel protocols */
1768 if (ol_flags & PKT_TX_TUNNEL_MASK) {
1771 temp = ol_flags & PKT_TX_TUNNEL_MASK;
1772 if (temp == PKT_TX_TUNNEL_VXLAN ||
1773 temp == PKT_TX_TUNNEL_MPLSINUDP)
1777 rte_errno = -ENOTSUP;
1781 #ifdef RTE_LIBRTE_ETHDEV_DEBUG
1782 ret = rte_validate_tx_offload(m);
1790 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1791 if (unlikely(i != nb_pkts))
1792 PMD_TX_LOG(ERR, txq, "TX prepare failed for %u\n",
1798 #define MPLSINUDP_HDR_SIZE (12)
1800 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1802 qede_mpls_tunn_tx_sanity_check(struct rte_mbuf *mbuf,
1803 struct qede_tx_queue *txq)
1805 if (((mbuf->outer_l2_len + mbuf->outer_l3_len) / 2) > 0xff)
1806 PMD_TX_LOG(ERR, txq, "tunn_l4_hdr_start_offset overflow\n");
1807 if (((mbuf->outer_l2_len + mbuf->outer_l3_len +
1808 MPLSINUDP_HDR_SIZE) / 2) > 0xff)
1809 PMD_TX_LOG(ERR, txq, "tunn_hdr_size overflow\n");
1810 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE) / 2) >
1811 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK)
1812 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
1813 if (((mbuf->l2_len - MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2) >
1814 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK)
1815 PMD_TX_LOG(ERR, txq, "inner_l2_hdr_size overflow\n");
1820 qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
1822 struct qede_tx_queue *txq = p_txq;
1823 struct qede_dev *qdev = txq->qdev;
1824 struct ecore_dev *edev = &qdev->edev;
1825 struct rte_mbuf *mbuf;
1826 struct rte_mbuf *m_seg = NULL;
1827 uint16_t nb_tx_pkts;
1831 uint16_t nb_pkt_sent = 0;
1835 __rte_unused bool tunn_flg;
1836 bool tunn_ipv6_ext_flg;
1837 struct eth_tx_1st_bd *bd1;
1838 struct eth_tx_2nd_bd *bd2;
1839 struct eth_tx_3rd_bd *bd3;
1840 uint64_t tx_ol_flags;
1844 uint8_t bd1_bd_flags_bf;
1853 uint8_t tunn_l4_hdr_start_offset;
1854 uint8_t tunn_hdr_size;
1855 uint8_t inner_l2_hdr_size;
1856 uint16_t inner_l4_hdr_offset;
1858 if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) {
1859 PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u",
1860 nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh);
1861 qede_process_tx_compl(edev, txq);
1864 nb_tx_pkts = nb_pkts;
1865 bd_prod = rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
1866 while (nb_tx_pkts--) {
1867 /* Init flags/values */
1877 bd1_bd_flags_bf = 0;
1882 mplsoudp_flg = false;
1883 tunn_ipv6_ext_flg = false;
1885 tunn_l4_hdr_start_offset = 0;
1890 /* Check minimum TX BDS availability against available BDs */
1891 if (unlikely(txq->nb_tx_avail < mbuf->nb_segs))
1894 tx_ol_flags = mbuf->ol_flags;
1895 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT;
1897 /* TX prepare would have already checked supported tunnel Tx
1898 * offloads. Don't rely on pkt_type marked by Rx, instead use
1899 * tx_ol_flags to decide.
1901 if (((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
1902 PKT_TX_TUNNEL_VXLAN) ||
1903 ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
1904 PKT_TX_TUNNEL_MPLSINUDP)) {
1905 /* Check against max which is Tunnel IPv6 + ext */
1906 if (unlikely(txq->nb_tx_avail <
1907 ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT))
1910 /* First indicate its a tunnel pkt */
1911 bd1_bf |= ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK <<
1912 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1913 /* Legacy FW had flipped behavior in regard to this bit
1914 * i.e. it needed to set to prevent FW from touching
1915 * encapsulated packets when it didn't need to.
1917 if (unlikely(txq->is_legacy)) {
1919 ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT;
1922 /* Outer IP checksum offload */
1923 if (tx_ol_flags & (PKT_TX_OUTER_IP_CKSUM |
1924 PKT_TX_OUTER_IPV4)) {
1926 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK <<
1927 ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT;
1931 * Currently, only inner checksum offload in MPLS-in-UDP
1932 * tunnel with one MPLS label is supported. Both outer
1933 * and inner layers lengths need to be provided in
1936 if ((tx_ol_flags & PKT_TX_TUNNEL_MASK) ==
1937 PKT_TX_TUNNEL_MPLSINUDP) {
1938 mplsoudp_flg = true;
1939 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
1940 qede_mpls_tunn_tx_sanity_check(mbuf, txq);
1942 /* Outer L4 offset in two byte words */
1943 tunn_l4_hdr_start_offset =
1944 (mbuf->outer_l2_len + mbuf->outer_l3_len) / 2;
1945 /* Tunnel header size in two byte words */
1946 tunn_hdr_size = (mbuf->outer_l2_len +
1947 mbuf->outer_l3_len +
1948 MPLSINUDP_HDR_SIZE) / 2;
1949 /* Inner L2 header size in two byte words */
1950 inner_l2_hdr_size = (mbuf->l2_len -
1951 MPLSINUDP_HDR_SIZE) / 2;
1952 /* Inner L4 header offset from the beggining
1953 * of inner packet in two byte words
1955 inner_l4_hdr_offset = (mbuf->l2_len -
1956 MPLSINUDP_HDR_SIZE + mbuf->l3_len) / 2;
1958 /* Inner L2 size and address type */
1959 bd2_bf1 |= (inner_l2_hdr_size &
1960 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK) <<
1961 ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT;
1962 bd2_bf1 |= (UNICAST_ADDRESS &
1963 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK) <<
1964 ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT;
1965 /* Treated as IPv6+Ext */
1967 1 << ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT;
1969 /* Mark inner IPv6 if present */
1970 if (tx_ol_flags & PKT_TX_IPV6)
1972 1 << ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT;
1974 /* Inner L4 offsets */
1975 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
1976 (tx_ol_flags & (PKT_TX_UDP_CKSUM |
1977 PKT_TX_TCP_CKSUM))) {
1978 /* Determines if BD3 is needed */
1979 tunn_ipv6_ext_flg = true;
1980 if ((tx_ol_flags & PKT_TX_L4_MASK) ==
1983 1 << ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT;
1986 /* TODO other pseudo checksum modes are
1990 ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH <<
1991 ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT;
1992 bd2_bf2 |= (inner_l4_hdr_offset &
1993 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK) <<
1994 ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT;
1996 } /* End MPLSoUDP */
1997 } /* End Tunnel handling */
1999 if (tx_ol_flags & PKT_TX_TCP_SEG) {
2001 if (unlikely(txq->nb_tx_avail <
2002 ETH_TX_MIN_BDS_PER_LSO_PKT))
2004 /* For LSO, packet header and payload must reside on
2005 * buffers pointed by different BDs. Using BD1 for HDR
2006 * and BD2 onwards for data.
2008 hdr_size = mbuf->l2_len + mbuf->l3_len + mbuf->l4_len;
2010 hdr_size += mbuf->outer_l2_len +
2013 bd1_bd_flags_bf |= 1 << ETH_TX_1ST_BD_FLAGS_LSO_SHIFT;
2015 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2016 /* PKT_TX_TCP_SEG implies PKT_TX_TCP_CKSUM */
2018 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2019 mss = rte_cpu_to_le_16(mbuf->tso_segsz);
2020 /* Using one header BD */
2021 bd3_bf |= rte_cpu_to_le_16(1 <<
2022 ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT);
2024 if (unlikely(txq->nb_tx_avail <
2025 ETH_TX_MIN_BDS_PER_NON_LSO_PKT))
2028 (mbuf->pkt_len & ETH_TX_DATA_1ST_BD_PKT_LEN_MASK)
2029 << ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT;
2032 /* Descriptor based VLAN insertion */
2033 if (tx_ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) {
2034 vlan = rte_cpu_to_le_16(mbuf->vlan_tci);
2036 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT;
2039 /* Offload the IP checksum in the hardware */
2040 if (tx_ol_flags & PKT_TX_IP_CKSUM) {
2042 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT;
2043 /* There's no DPDK flag to request outer-L4 csum
2044 * offload. But in the case of tunnel if inner L3 or L4
2045 * csum offload is requested then we need to force
2046 * recalculation of L4 tunnel header csum also.
2050 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2051 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2055 /* L4 checksum offload (tcp or udp) */
2056 if ((tx_ol_flags & (PKT_TX_IPV4 | PKT_TX_IPV6)) &&
2057 (tx_ol_flags & (PKT_TX_UDP_CKSUM | PKT_TX_TCP_CKSUM))) {
2059 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT;
2060 /* There's no DPDK flag to request outer-L4 csum
2061 * offload. But in the case of tunnel if inner L3 or L4
2062 * csum offload is requested then we need to force
2063 * recalculation of L4 tunnel header csum also.
2067 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK <<
2068 ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT;
2072 /* Fill the entry in the SW ring and the BDs in the FW ring */
2074 txq->sw_tx_ring[idx].mbuf = mbuf;
2077 bd1 = (struct eth_tx_1st_bd *)ecore_chain_produce(&txq->tx_pbl);
2078 memset(bd1, 0, sizeof(struct eth_tx_1st_bd));
2081 /* Map MBUF linear data for DMA and set in the BD1 */
2082 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2084 bd1->data.bitfields = rte_cpu_to_le_16(bd1_bf);
2085 bd1->data.bd_flags.bitfields = bd1_bd_flags_bf;
2086 bd1->data.vlan = vlan;
2088 if (lso_flg || mplsoudp_flg) {
2089 bd2 = (struct eth_tx_2nd_bd *)ecore_chain_produce
2091 memset(bd2, 0, sizeof(struct eth_tx_2nd_bd));
2095 QEDE_BD_SET_ADDR_LEN(bd1, rte_mbuf_data_iova(mbuf),
2098 QEDE_BD_SET_ADDR_LEN(bd2, (hdr_size +
2099 rte_mbuf_data_iova(mbuf)),
2100 mbuf->data_len - hdr_size);
2101 bd2->data.bitfields1 = rte_cpu_to_le_16(bd2_bf1);
2103 bd2->data.bitfields2 =
2104 rte_cpu_to_le_16(bd2_bf2);
2106 bd2->data.tunn_ip_size =
2107 rte_cpu_to_le_16(mbuf->outer_l3_len);
2110 if (lso_flg || (mplsoudp_flg && tunn_ipv6_ext_flg)) {
2111 bd3 = (struct eth_tx_3rd_bd *)
2112 ecore_chain_produce(&txq->tx_pbl);
2113 memset(bd3, 0, sizeof(struct eth_tx_3rd_bd));
2115 bd3->data.bitfields = rte_cpu_to_le_16(bd3_bf);
2117 bd3->data.lso_mss = mss;
2119 bd3->data.tunn_l4_hdr_start_offset_w =
2120 tunn_l4_hdr_start_offset;
2121 bd3->data.tunn_hdr_size_w =
2127 /* Handle fragmented MBUF */
2130 /* Encode scatter gather buffer descriptors if required */
2131 nb_frags = qede_encode_sg_bd(txq, m_seg, &bd2, &bd3, nbds - 1);
2132 bd1->data.nbds = nbds + nb_frags;
2134 txq->nb_tx_avail -= bd1->data.nbds;
2136 rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf);
2138 rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl));
2139 #ifdef RTE_LIBRTE_QEDE_DEBUG_TX
2140 print_tx_bd_info(txq, bd1, bd2, bd3, tx_ol_flags);
2146 /* Write value of prod idx into bd_prod */
2147 txq->tx_db.data.bd_prod = bd_prod;
2149 rte_compiler_barrier();
2150 DIRECT_REG_WR_RELAXED(edev, txq->doorbell_addr, txq->tx_db.raw);
2153 /* Check again for Tx completions */
2154 qede_process_tx_compl(edev, txq);
2156 PMD_TX_LOG(DEBUG, txq, "to_send=%u sent=%u bd_prod=%u core=%d",
2157 nb_pkts, nb_pkt_sent, TX_PROD(txq), rte_lcore_id());
2163 qede_rxtx_pkts_dummy(__rte_unused void *p_rxq,
2164 __rte_unused struct rte_mbuf **pkts,
2165 __rte_unused uint16_t nb_pkts)