New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / net / ixgbe / ixgbe_rxtx.c
index f82b74a..2f0262a 100644 (file)
 #endif
 /* Bit Mask to indicate what bits required for building TX context */
 #define IXGBE_TX_OFFLOAD_MASK (                         \
+               PKT_TX_OUTER_IPV6 |              \
+               PKT_TX_OUTER_IPV4 |              \
+               PKT_TX_IPV6 |                    \
+               PKT_TX_IPV4 |                    \
                PKT_TX_VLAN_PKT |                \
                PKT_TX_IP_CKSUM |                \
                PKT_TX_L4_MASK |                 \
@@ -2057,8 +2061,7 @@ next_desc:
                 * of the ixgbe PMD.
                 *
                 * TODO:
-                *    - Get rid of "volatile" crap and let the compiler do its
-                *      job.
+                *    - Get rid of "volatile" and let the compiler do its job.
                 *    - Use the proper memory barrier (rte_rmb()) to ensure the
                 *      memory ordering below.
                 */
@@ -2848,7 +2851,6 @@ ixgbe_get_rx_port_offloads(struct rte_eth_dev *dev)
        offloads = DEV_RX_OFFLOAD_IPV4_CKSUM  |
                   DEV_RX_OFFLOAD_UDP_CKSUM   |
                   DEV_RX_OFFLOAD_TCP_CKSUM   |
-                  DEV_RX_OFFLOAD_CRC_STRIP   |
                   DEV_RX_OFFLOAD_KEEP_CRC    |
                   DEV_RX_OFFLOAD_JUMBO_FRAME |
                   DEV_RX_OFFLOAD_SCATTER;
@@ -2936,7 +2938,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev,
        rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
                queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
        rxq->port_id = dev->data->port_id;
-       if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+       if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
                rxq->crc_len = ETHER_CRC_LEN;
        else
                rxq->crc_len = 0;
@@ -4705,7 +4707,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev)
 
        /* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
 
-       if (rte_eth_dev_must_keep_crc(rx_conf->offloads) &&
+       if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
             (rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
                /*
                 * According to chapter of 4.6.7.2.1 of the Spec Rev.
@@ -4854,7 +4856,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
         * Configure CRC stripping, if any.
         */
        hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
-       if (rte_eth_dev_must_keep_crc(rx_conf->offloads))
+       if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
                hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
        else
                hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
@@ -4895,8 +4897,10 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
                 * Reset crc_len in case it was changed after queue setup by a
                 * call to configure.
                 */
-               rxq->crc_len = rte_eth_dev_must_keep_crc(rx_conf->offloads) ?
-                               ETHER_CRC_LEN : 0;
+               if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+                       rxq->crc_len = ETHER_CRC_LEN;
+               else
+                       rxq->crc_len = 0;
 
                /* Setup the Base and Length of the Rx Descriptor Rings */
                bus_addr = rxq->rx_ring_phys_addr;
@@ -4965,7 +4969,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev)
        if (hw->mac.type == ixgbe_mac_82599EB ||
            hw->mac.type == ixgbe_mac_X540) {
                rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
-               if (rte_eth_dev_must_keep_crc(rx_conf->offloads))
+               if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
                        rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
                else
                        rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
@@ -5702,7 +5706,7 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
         */
        if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
                ixgbe_rss_disable(dev);
-               return -EINVAL;
+               return 0;
        }
        if (rss_conf.rss_key == NULL)
                rss_conf.rss_key = rss_intel_key; /* Default hash key */
@@ -5715,13 +5719,13 @@ ixgbe_config_rss_filter(struct rte_eth_dev *dev,
 }
 
 /* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
-int __attribute__((weak))
+__rte_weak int
 ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
 {
        return -1;
 }
 
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
 ixgbe_recv_pkts_vec(
        void __rte_unused *rx_queue,
        struct rte_mbuf __rte_unused **rx_pkts,
@@ -5730,7 +5734,7 @@ ixgbe_recv_pkts_vec(
        return 0;
 }
 
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
 ixgbe_recv_scattered_pkts_vec(
        void __rte_unused *rx_queue,
        struct rte_mbuf __rte_unused **rx_pkts,
@@ -5739,7 +5743,7 @@ ixgbe_recv_scattered_pkts_vec(
        return 0;
 }
 
-int __attribute__((weak))
+__rte_weak int
 ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
 {
        return -1;