X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fi40e%2Fi40e_rxtx.c;h=e1152ff0ea58f22ac64093182efddc9badf43d0d;hb=219737bfb6f9200e4c9189ab1668029c76a9be62;hp=2a28ee348c5e080b5537d03414035893135053ab;hpb=b63264c8342e6a1b6971c79550d2af2024b6a4de;p=deb_dpdk.git diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 2a28ee34..e1152ff0 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -55,6 +55,10 @@ PKT_TX_OUTER_IP_CKSUM) #define I40E_TX_OFFLOAD_MASK ( \ + PKT_TX_OUTER_IPV4 | \ + PKT_TX_OUTER_IPV6 | \ + PKT_TX_IPV4 | \ + PKT_TX_IPV6 | \ PKT_TX_IP_CKSUM | \ PKT_TX_L4_MASK | \ PKT_TX_OUTER_IP_CKSUM | \ @@ -65,7 +69,7 @@ I40E_TX_IEEE1588_TMST) #define I40E_TX_OFFLOAD_NOTSUP_MASK \ - (PKT_TX_OFFLOAD_MASK ^ I40E_TX_OFFLOAD_MASK) + ~(PKT_TX_OFFLOAD_MASK & I40E_TX_OFFLOAD_MASK) static inline void i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp) @@ -83,7 +87,8 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp) #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) & (1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) { - mb->ol_flags |= PKT_RX_QINQ_STRIPPED; + mb->ol_flags |= PKT_RX_QINQ_STRIPPED | PKT_RX_QINQ | + PKT_RX_VLAN_STRIPPED | PKT_RX_VLAN; mb->vlan_tci_outer = mb->vlan_tci; mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2); PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u", @@ -1736,6 +1741,11 @@ i40e_dev_rx_queue_setup_runtime(struct rte_eth_dev *dev, ad->rx_bulk_alloc_allowed = false; i40e_set_rx_function(dev); return 0; + } else if (ad->rx_vec_allowed && !rte_is_power_of_2(rxq->nb_rx_desc)) { + PMD_DRV_LOG(ERR, "Vector mode is allowed, but descriptor" + " number %d of queue %d isn't power of 2", + rxq->nb_rx_desc, rxq->queue_id); + return -EINVAL; } /* check bulk alloc conflict */ @@ -1828,7 +1838,7 @@ i40e_dev_rx_queue_setup(struct rte_eth_dev *dev, rxq->queue_id = queue_idx; rxq->reg_idx = reg_idx; rxq->port_id = dev->data->port_id; - if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads)) + if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC) rxq->crc_len = ETHER_CRC_LEN; else rxq->crc_len = 0; @@ -2909,6 +2919,35 @@ i40e_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, qinfo->conf.offloads = txq->offloads; } +static eth_rx_burst_t +i40e_get_latest_rx_vec(bool scatter) +{ +#ifdef RTE_ARCH_X86 + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) + return scatter ? i40e_recv_scattered_pkts_vec_avx2 : + i40e_recv_pkts_vec_avx2; +#endif + return scatter ? i40e_recv_scattered_pkts_vec : + i40e_recv_pkts_vec; +} + +static eth_rx_burst_t +i40e_get_recommend_rx_vec(bool scatter) +{ +#ifdef RTE_ARCH_X86 + /* + * since AVX frequency can be different to base frequency, limit + * use of AVX2 version to later plaforms, not all those that could + * theoretically run it. + */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) + return scatter ? i40e_recv_scattered_pkts_vec_avx2 : + i40e_recv_pkts_vec_avx2; +#endif + return scatter ? i40e_recv_scattered_pkts_vec : + i40e_recv_pkts_vec; +} + void __attribute__((cold)) i40e_set_rx_function(struct rte_eth_dev *dev) { @@ -2940,57 +2979,17 @@ i40e_set_rx_function(struct rte_eth_dev *dev) } } - if (dev->data->scattered_rx) { - /* Set the non-LRO scattered callback: there are Vector and - * single allocation versions. - */ - if (ad->rx_vec_allowed) { - PMD_INIT_LOG(DEBUG, "Using Vector Scattered Rx " - "callback (port=%d).", - dev->data->port_id); - - dev->rx_pkt_burst = i40e_recv_scattered_pkts_vec; -#ifdef RTE_ARCH_X86 - /* - * since AVX frequency can be different to base - * frequency, limit use of AVX2 version to later - * plaforms, not all those that could theoretically - * run it. - */ - if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) - dev->rx_pkt_burst = - i40e_recv_scattered_pkts_vec_avx2; -#endif - } else { - PMD_INIT_LOG(DEBUG, "Using a Scattered with bulk " - "allocation callback (port=%d).", - dev->data->port_id); - dev->rx_pkt_burst = i40e_recv_scattered_pkts; - } - /* If parameters allow we are going to choose between the following - * callbacks: - * - Vector - * - Bulk Allocation - * - Single buffer allocation (the simplest one) - */ - } else if (ad->rx_vec_allowed) { - PMD_INIT_LOG(DEBUG, "Vector rx enabled, please make sure RX " - "burst size no less than %d (port=%d).", - RTE_I40E_DESCS_PER_LOOP, - dev->data->port_id); - - dev->rx_pkt_burst = i40e_recv_pkts_vec; -#ifdef RTE_ARCH_X86 - /* - * since AVX frequency can be different to base - * frequency, limit use of AVX2 version to later - * plaforms, not all those that could theoretically - * run it. - */ - if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) - dev->rx_pkt_burst = i40e_recv_pkts_vec_avx2; -#endif - } else if (ad->rx_bulk_alloc_allowed) { + if (ad->rx_vec_allowed) { + /* Vec Rx path */ + PMD_INIT_LOG(DEBUG, "Vector Rx path will be used on port=%d.", + dev->data->port_id); + if (ad->use_latest_vec) + dev->rx_pkt_burst = + i40e_get_latest_rx_vec(dev->data->scattered_rx); + else + dev->rx_pkt_burst = + i40e_get_recommend_rx_vec(dev->data->scattered_rx); + } else if (!dev->data->scattered_rx && ad->rx_bulk_alloc_allowed) { PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are " "satisfied. Rx Burst Bulk Alloc function " "will be used on port=%d.", @@ -2998,12 +2997,12 @@ i40e_set_rx_function(struct rte_eth_dev *dev) dev->rx_pkt_burst = i40e_recv_pkts_bulk_alloc; } else { - PMD_INIT_LOG(DEBUG, "Rx Burst Bulk Alloc Preconditions are not " - "satisfied, or Scattered Rx is requested " - "(port=%d).", + /* Simple Rx Path. */ + PMD_INIT_LOG(DEBUG, "Simple Rx path will be used on port=%d.", dev->data->port_id); - - dev->rx_pkt_burst = i40e_recv_pkts; + dev->rx_pkt_burst = dev->data->scattered_rx ? + i40e_recv_scattered_pkts : + i40e_recv_pkts; } /* Propagate information about RX function choice through all queues. */ @@ -3049,6 +3048,31 @@ i40e_set_tx_function_flag(struct rte_eth_dev *dev, struct i40e_tx_queue *txq) txq->queue_id); } +static eth_tx_burst_t +i40e_get_latest_tx_vec(void) +{ +#ifdef RTE_ARCH_X86 + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2)) + return i40e_xmit_pkts_vec_avx2; +#endif + return i40e_xmit_pkts_vec; +} + +static eth_tx_burst_t +i40e_get_recommend_tx_vec(void) +{ +#ifdef RTE_ARCH_X86 + /* + * since AVX frequency can be different to base frequency, limit + * use of AVX2 version to later plaforms, not all those that could + * theoretically run it. + */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) + return i40e_xmit_pkts_vec_avx2; +#endif + return i40e_xmit_pkts_vec; +} + void __attribute__((cold)) i40e_set_tx_function(struct rte_eth_dev *dev) { @@ -3073,17 +3097,12 @@ i40e_set_tx_function(struct rte_eth_dev *dev) if (ad->tx_simple_allowed) { if (ad->tx_vec_allowed) { PMD_INIT_LOG(DEBUG, "Vector tx finally be used."); - dev->tx_pkt_burst = i40e_xmit_pkts_vec; -#ifdef RTE_ARCH_X86 - /* - * since AVX frequency can be different to base - * frequency, limit use of AVX2 version to later - * plaforms, not all those that could theoretically - * run it. - */ - if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F)) - dev->tx_pkt_burst = i40e_xmit_pkts_vec_avx2; -#endif + if (ad->use_latest_vec) + dev->tx_pkt_burst = + i40e_get_latest_tx_vec(); + else + dev->tx_pkt_burst = + i40e_get_recommend_tx_vec(); } else { PMD_INIT_LOG(DEBUG, "Simple tx finally be used."); dev->tx_pkt_burst = i40e_xmit_pkts_simple; @@ -3166,13 +3185,13 @@ i40e_set_default_pctype_table(struct rte_eth_dev *dev) } /* Stubs needed for linkage when CONFIG_RTE_I40E_INC_VECTOR is set to 'n' */ -int __attribute__((weak)) +__rte_weak int i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev) { return -1; } -uint16_t __attribute__((weak)) +__rte_weak uint16_t i40e_recv_pkts_vec( void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, @@ -3181,7 +3200,7 @@ i40e_recv_pkts_vec( return 0; } -uint16_t __attribute__((weak)) +__rte_weak uint16_t i40e_recv_scattered_pkts_vec( void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, @@ -3190,7 +3209,7 @@ i40e_recv_scattered_pkts_vec( return 0; } -uint16_t __attribute__((weak)) +__rte_weak uint16_t i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, uint16_t __rte_unused nb_pkts) @@ -3198,7 +3217,7 @@ i40e_recv_pkts_vec_avx2(void __rte_unused *rx_queue, return 0; } -uint16_t __attribute__((weak)) +__rte_weak uint16_t i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue, struct rte_mbuf __rte_unused **rx_pkts, uint16_t __rte_unused nb_pkts) @@ -3206,25 +3225,25 @@ i40e_recv_scattered_pkts_vec_avx2(void __rte_unused *rx_queue, return 0; } -int __attribute__((weak)) +__rte_weak int i40e_rxq_vec_setup(struct i40e_rx_queue __rte_unused *rxq) { return -1; } -int __attribute__((weak)) +__rte_weak int i40e_txq_vec_setup(struct i40e_tx_queue __rte_unused *txq) { return -1; } -void __attribute__((weak)) +__rte_weak void i40e_rx_queue_release_mbufs_vec(struct i40e_rx_queue __rte_unused*rxq) { return; } -uint16_t __attribute__((weak)) +__rte_weak uint16_t i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue, struct rte_mbuf __rte_unused **tx_pkts, uint16_t __rte_unused nb_pkts) @@ -3232,7 +3251,7 @@ i40e_xmit_fixed_burst_vec(void __rte_unused * tx_queue, return 0; } -uint16_t __attribute__((weak)) +__rte_weak uint16_t i40e_xmit_pkts_vec_avx2(void __rte_unused * tx_queue, struct rte_mbuf __rte_unused **tx_pkts, uint16_t __rte_unused nb_pkts)