X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;ds=sidebyside;f=drivers%2Fnet%2Fi40e%2Fi40e_rxtx.c;h=2a28ee348c5e080b5537d03414035893135053ab;hb=b63264c8342e6a1b6971c79550d2af2024b6a4de;hp=1217e5a619a19afae4316422ece74881f71710bb;hpb=ca33590b6af032bff57d9cc70455660466a654b2;p=deb_dpdk.git diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 1217e5a6..2a28ee34 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -40,9 +40,6 @@ /* Base address of the HW descriptor ring should be 128B aligned. */ #define I40E_RING_BASE_ALIGN 128 -#define I40E_SIMPLE_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS | \ - ETH_TXQ_FLAGS_NOOFFLOADS) - #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS) #ifdef RTE_LIBRTE_IEEE1588 @@ -1240,7 +1237,7 @@ i40e_tx_free_bufs(struct i40e_tx_queue *txq) for (i = 0; i < txq->tx_rs_thresh; i++) rte_prefetch0((txep + i)->mbuf); - if (txq->txq_flags & (uint32_t)ETH_TXQ_FLAGS_NOREFCOUNT) { + if (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) { for (i = 0; i < txq->tx_rs_thresh; ++i, ++txep) { rte_mempool_put(txep->mbuf->pool, txep->mbuf); txep->mbuf = NULL; @@ -1442,13 +1439,15 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, /* Check for m->nb_segs to not exceed the limits. */ if (!(ol_flags & PKT_TX_TCP_SEG)) { - if (m->nb_segs > I40E_TX_MAX_SEG || - m->nb_segs > I40E_TX_MAX_MTU_SEG) { + if (m->nb_segs > I40E_TX_MAX_MTU_SEG || + m->pkt_len > I40E_FRAME_SIZE_MAX) { rte_errno = -EINVAL; return i; } - } else if ((m->tso_segsz < I40E_MIN_TSO_MSS) || - (m->tso_segsz > I40E_MAX_TSO_MSS)) { + } else if (m->nb_segs > I40E_TX_MAX_SEG || + m->tso_segsz < I40E_MIN_TSO_MSS || + m->tso_segsz > I40E_MAX_TSO_MSS || + m->pkt_len > I40E_TSO_FRAME_SIZE_MAX) { /* MSS outside the range (256B - 9674B) are considered * malicious */ @@ -1461,6 +1460,12 @@ i40e_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, return i; } + /* check the size of packet */ + if (m->pkt_len < I40E_TX_MIN_PKT_LEN) { + rte_errno = -EINVAL; + return i; + } + #ifdef RTE_LIBRTE_ETHDEV_DEBUG ret = rte_validate_tx_offload(m); if (ret != 0) { @@ -1527,38 +1532,36 @@ int i40e_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) { struct i40e_rx_queue *rxq; - int err = -1; + int err; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); - if (rx_queue_id < dev->data->nb_rx_queues) { - rxq = dev->data->rx_queues[rx_queue_id]; - - err = i40e_alloc_rx_queue_mbufs(rxq); - if (err) { - PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); - return err; - } + rxq = dev->data->rx_queues[rx_queue_id]; - rte_wmb(); + err = i40e_alloc_rx_queue_mbufs(rxq); + if (err) { + PMD_DRV_LOG(ERR, "Failed to allocate RX queue mbuf"); + return err; + } - /* Init the RX tail regieter. */ - I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); + rte_wmb(); - err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE); + /* Init the RX tail regieter. */ + I40E_PCI_REG_WRITE(rxq->qrx_tail, rxq->nb_rx_desc - 1); - if (err) { - PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", - rx_queue_id); + err = i40e_switch_rx_queue(hw, rxq->reg_idx, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u on", + rx_queue_id); - i40e_rx_queue_release_mbufs(rxq); - i40e_reset_rx_queue(rxq); - } else - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + i40e_rx_queue_release_mbufs(rxq); + i40e_reset_rx_queue(rxq); + return err; } + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - return err; + return 0; } int @@ -1568,24 +1571,21 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) int err; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (rx_queue_id < dev->data->nb_rx_queues) { - rxq = dev->data->rx_queues[rx_queue_id]; + rxq = dev->data->rx_queues[rx_queue_id]; - /* - * rx_queue_id is queue id application refers to, while - * rxq->reg_idx is the real queue index. - */ - err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE); - - if (err) { - PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", - rx_queue_id); - return err; - } - i40e_rx_queue_release_mbufs(rxq); - i40e_reset_rx_queue(rxq); - dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + /* + * rx_queue_id is queue id application refers to, while + * rxq->reg_idx is the real queue index. + */ + err = i40e_switch_rx_queue(hw, rxq->reg_idx, FALSE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch RX queue %u off", + rx_queue_id); + return err; } + i40e_rx_queue_release_mbufs(rxq); + i40e_reset_rx_queue(rxq); + dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } @@ -1593,28 +1593,27 @@ i40e_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) int i40e_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) { - int err = -1; + int err; struct i40e_tx_queue *txq; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); PMD_INIT_FUNC_TRACE(); - if (tx_queue_id < dev->data->nb_tx_queues) { - txq = dev->data->tx_queues[tx_queue_id]; + txq = dev->data->tx_queues[tx_queue_id]; - /* - * tx_queue_id is queue id application refers to, while - * rxq->reg_idx is the real queue index. - */ - err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE); - if (err) - PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", - tx_queue_id); - else - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; + /* + * tx_queue_id is queue id application refers to, while + * rxq->reg_idx is the real queue index. + */ + err = i40e_switch_tx_queue(hw, txq->reg_idx, TRUE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u on", + tx_queue_id); + return err; } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; - return err; + return 0; } int @@ -1624,26 +1623,23 @@ i40e_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) int err; struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (tx_queue_id < dev->data->nb_tx_queues) { - txq = dev->data->tx_queues[tx_queue_id]; + txq = dev->data->tx_queues[tx_queue_id]; - /* - * tx_queue_id is queue id application refers to, while - * txq->reg_idx is the real queue index. - */ - err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE); - - if (err) { - PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of", - tx_queue_id); - return err; - } - - i40e_tx_queue_release_mbufs(txq); - i40e_reset_tx_queue(txq); - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + /* + * tx_queue_id is queue id application refers to, while + * txq->reg_idx is the real queue index. + */ + err = i40e_switch_tx_queue(hw, txq->reg_idx, FALSE); + if (err) { + PMD_DRV_LOG(ERR, "Failed to switch TX queue %u of", + tx_queue_id); + return err; } + i40e_tx_queue_release_mbufs(txq); + i40e_reset_tx_queue(txq); + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; } @@ -1692,6 +1688,75 @@ i40e_dev_supported_ptypes_get(struct rte_eth_dev *dev) return NULL; } +static int +i40e_dev_first_queue(uint16_t idx, void **queues, int num) +{ + uint16_t i; + + for (i = 0; i < num; i++) { + if (i != idx && queues[i]) + return 0; + } + + return 1; +} + +static int +i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, + struct i40e_rx_queue *rxq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + int use_def_burst_func = + check_rx_burst_bulk_alloc_preconditions(rxq); + uint16_t buf_size = + (uint16_t)(rte_pktmbuf_data_room_size(rxq->mp) - + RTE_PKTMBUF_HEADROOM); + int use_scattered_rx = + ((rxq->max_pkt_len + 2 * I40E_VLAN_TAG_SIZE) > buf_size); + + if (i40e_rx_queue_init(rxq) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to do RX queue initialization"); + return -EINVAL; + } + + if (i40e_dev_first_queue(rxq->queue_id, + dev->data->rx_queues, + dev->data->nb_rx_queues)) { + /** + * If it is the first queue to setup, + * set all flags to default and call + * i40e_set_rx_function. + */ + ad->rx_bulk_alloc_allowed = true; + ad->rx_vec_allowed = true; + dev->data->scattered_rx = use_scattered_rx; + if (use_def_burst_func) + ad->rx_bulk_alloc_allowed = false; + i40e_set_rx_function(dev); + return 0; + } + + /* check bulk alloc conflict */ + if (ad->rx_bulk_alloc_allowed && use_def_burst_func) { + PMD_DRV_LOG(ERR, "Can't use default burst."); + return -EINVAL; + } + /* check scatterred conflict */ + if (!dev->data->scattered_rx && use_scattered_rx) { + PMD_DRV_LOG(ERR, "Scattered rx is required."); + return -EINVAL; + } + /* check vector conflict */ + if (ad->rx_vec_allowed && i40e_rxq_vec_setup(rxq)) { + PMD_DRV_LOG(ERR, "Failed vector rx setup."); + return -EINVAL; + } + + return 0; +} + int i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1712,6 +1777,9 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t len, i; uint16_t reg_idx, base, bsf, tc_mapping; int q_offset, use_def_burst_func = 1; + uint64_t offloads; + + offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); @@ -1760,11 +1828,14 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->queue_id = queue_idx; rxq->reg_idx = reg_idx; rxq->port_id = dev->data->port_id; - rxq->crc_len = (uint8_t) ((dev->data->dev_conf.rxmode.hw_strip_crc) ? - 0 : ETHER_CRC_LEN); + if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) + rxq->crc_len = ETHER_CRC_LEN; + else + rxq->crc_len = 0; rxq->drop_en = rx_conf->rx_drop_en; rxq->vsi = vsi; rxq->rx_deferred_start = rx_conf->rx_deferred_start; + rxq->offloads = offloads; /* Allocate the maximun number of RX ring hardware descriptor. */ len = I40E_MAX_RING_DESC; @@ -1808,25 +1879,6 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, i40e_reset_rx_queue(rxq); rxq->q_set = TRUE; - dev->data->rx_queues[queue_idx] = rxq; - - use_def_burst_func = check_rx_burst_bulk_alloc_preconditions(rxq); - - if (!use_def_burst_func) { -#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC - PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " - "satisfied. Rx Burst Bulk Alloc function will be " - "used on port=%d, queue=%d.", - rxq->port_id, rxq->queue_id); -#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ - } else { - PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " - "not satisfied, Scattered Rx is requested, " - "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " - "not enabled on port=%d, queue=%d.", - rxq->port_id, rxq->queue_id); - ad->rx_bulk_alloc_allowed = false; - } for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->enabled_tc & (1 << i))) @@ -1841,6 +1893,34 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->dcb_tc = i; } + if (dev->data->dev_started) { + if (i40e_dev_rx_queue_setup_runtime(dev, rxq)) { + i40e_dev_rx_queue_release(rxq); + return -EINVAL; + } + } else { + use_def_burst_func = + check_rx_burst_bulk_alloc_preconditions(rxq); + if (!use_def_burst_func) { +#ifdef RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC + PMD_INIT_LOG(DEBUG, + "Rx Burst Bulk Alloc Preconditions are " + "satisfied. Rx Burst Bulk Alloc function will be " + "used on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); +#endif /* RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC */ + } else { + PMD_INIT_LOG(DEBUG, + "Rx Burst Bulk Alloc Preconditions are " + "not satisfied, Scattered Rx is requested, " + "or RTE_LIBRTE_I40E_RX_ALLOW_BULK_ALLOC is " + "not enabled on port=%d, queue=%d.", + rxq->port_id, rxq->queue_id); + ad->rx_bulk_alloc_allowed = false; + } + } + + dev->data->rx_queues[queue_idx] = rxq; return 0; } @@ -1972,6 +2052,52 @@ i40e_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) return RTE_ETH_TX_DESC_FULL; } +static int +i40e_dev_tx_queue_setup_runtime(struct rte_eth_dev *dev, + struct i40e_tx_queue *txq) +{ + struct i40e_adapter *ad = + I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + + if (i40e_tx_queue_init(txq) != I40E_SUCCESS) { + PMD_DRV_LOG(ERR, + "Failed to do TX queue initialization"); + return -EINVAL; + } + + if (i40e_dev_first_queue(txq->queue_id, + dev->data->tx_queues, + dev->data->nb_tx_queues)) { + /** + * If it is the first queue to setup, + * set all flags and call + * i40e_set_tx_function. + */ + i40e_set_tx_function_flag(dev, txq); + i40e_set_tx_function(dev); + return 0; + } + + /* check vector conflict */ + if (ad->tx_vec_allowed) { + if (txq->tx_rs_thresh > RTE_I40E_TX_MAX_FREE_BUF_SZ || + i40e_txq_vec_setup(txq)) { + PMD_DRV_LOG(ERR, "Failed vector tx setup."); + return -EINVAL; + } + } + /* check simple tx conflict */ + if (ad->tx_simple_allowed) { + if ((txq->offloads & ~DEV_TX_OFFLOAD_MBUF_FAST_FREE) != 0 || + txq->tx_rs_thresh < RTE_PMD_I40E_TX_MAX_BURST) { + PMD_DRV_LOG(ERR, "No-simple tx is required."); + return -EINVAL; + } + } + + return 0; +} + int i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, @@ -1989,6 +2115,9 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_rs_thresh, tx_free_thresh; uint16_t reg_idx, i, base, bsf, tc_mapping; int q_offset; + uint64_t offloads; + + offloads = tx_conf->offloads | dev->data->dev_conf.txmode.offloads; if (hw->mac.type == I40E_MAC_VF || hw->mac.type == I40E_MAC_X722_VF) { vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); @@ -2123,7 +2252,7 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->queue_id = queue_idx; txq->reg_idx = reg_idx; txq->port_id = dev->data->port_id; - txq->txq_flags = tx_conf->txq_flags; + txq->offloads = offloads; txq->vsi = vsi; txq->tx_deferred_start = tx_conf->tx_deferred_start; @@ -2144,10 +2273,6 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, i40e_reset_tx_queue(txq); txq->q_set = TRUE; - dev->data->tx_queues[queue_idx] = txq; - - /* Use a simple TX queue without offloads or multi segs if possible */ - i40e_set_tx_function_flag(dev, txq); for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->enabled_tc & (1 << i))) @@ -2162,6 +2287,20 @@ i40e_dev_tx_queue_setup(struct rte_eth_dev *dev, txq->dcb_tc = i; } + if (dev->data->dev_started) { + if (i40e_dev_tx_queue_setup_runtime(dev, txq)) { + i40e_dev_tx_queue_release(txq); + return -EINVAL; + } + } else { + /** + * Use a simple TX queue without offloads or + * multi segs if possible + */ + i40e_set_tx_function_flag(dev, txq); + } + dev->data->tx_queues[queue_idx] = txq; + return 0; } @@ -2189,8 +2328,8 @@ i40e_memzone_reserve(const char *name, uint32_t len, int socket_id) if (mz) return mz; - mz = rte_memzone_reserve_aligned(name, len, - socket_id, 0, I40E_RING_BASE_ALIGN); + mz = rte_memzone_reserve_aligned(name, len, socket_id, + RTE_MEMZONE_IOVA_CONTIG, I40E_RING_BASE_ALIGN); return mz; } @@ -2469,7 +2608,7 @@ i40e_rx_queue_config(struct i40e_rx_queue *rxq) len = hw->func_caps.rx_buf_chain_len * rxq->rx_buf_len; rxq->max_pkt_len = RTE_MIN(len, data->dev_conf.rxmode.max_rx_pkt_len); - if (data->dev_conf.rxmode.jumbo_frame == 1) { + if (data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_JUMBO_FRAME) { if (rxq->max_pkt_len <= ETHER_MAX_LEN || rxq->max_pkt_len > I40E_FRAME_SIZE_MAX) { PMD_DRV_LOG(ERR, "maximum packet length must " @@ -2747,6 +2886,7 @@ i40e_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.rx_free_thresh = rxq->rx_free_thresh; qinfo->conf.rx_drop_en = rxq->drop_en; qinfo->conf.rx_deferred_start = rxq->rx_deferred_start; + qinfo->conf.offloads = rxq->offloads; } void @@ -2765,8 +2905,8 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.tx_free_thresh = txq->tx_free_thresh; qinfo->conf.tx_rs_thresh = txq->tx_rs_thresh; - qinfo->conf.txq_flags = txq->txq_flags; qinfo->conf.tx_deferred_start = txq->tx_deferred_start; + qinfo->conf.offloads = txq->offloads; } void __attribute__((cold)) @@ -2889,19 +3029,24 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) struct i40e_adapter *ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); - /* Use a simple Tx queue (no offloads, no multi segs) if possible */ - if (((txq->txq_flags & I40E_SIMPLE_FLAGS) == I40E_SIMPLE_FLAGS) - && (txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST)) { - if (txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ) { - PMD_INIT_LOG(DEBUG, "Vector tx" - " can be enabled on this txq."); - - } else { - ad->tx_vec_allowed = false; - } - } else { - ad->tx_simple_allowed = false; - } + /* Use a simple Tx queue if possible (only fast free is allowed) */ + ad->tx_simple_allowed = + (txq->offloads == + (txq->offloads & DEV_TX_OFFLOAD_MBUF_FAST_FREE) && + txq->tx_rs_thresh >= RTE_PMD_I40E_TX_MAX_BURST); + ad->tx_vec_allowed = (ad->tx_simple_allowed && + txq->tx_rs_thresh <= RTE_I40E_TX_MAX_FREE_BUF_SZ); + + if (ad->tx_vec_allowed) + PMD_INIT_LOG(DEBUG, "Vector Tx can be enabled on Tx queue %u.", + txq->queue_id); + else if (ad->tx_simple_allowed) + PMD_INIT_LOG(DEBUG, "Simple Tx can be enabled on Tx queue %u.", + txq->queue_id); + else + PMD_INIT_LOG(DEBUG, + "Neither simple nor vector Tx enabled on Tx queue %u\n", + txq->queue_id); } void __attribute__((cold))