#endif
/* Bit Mask to indicate what bits required for building TX context */
#define IXGBE_TX_OFFLOAD_MASK ( \
+ PKT_TX_OUTER_IPV6 | \
+ PKT_TX_OUTER_IPV4 | \
+ PKT_TX_IPV6 | \
+ PKT_TX_IPV4 | \
PKT_TX_VLAN_PKT | \
PKT_TX_IP_CKSUM | \
PKT_TX_L4_MASK | \
* of the ixgbe PMD.
*
* TODO:
- * - Get rid of "volatile" crap and let the compiler do its
- * job.
+ * - Get rid of "volatile" and let the compiler do its job.
* - Use the proper memory barrier (rte_rmb()) to ensure the
* memory ordering below.
*/
offloads = DEV_RX_OFFLOAD_IPV4_CKSUM |
DEV_RX_OFFLOAD_UDP_CKSUM |
DEV_RX_OFFLOAD_TCP_CKSUM |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_KEEP_CRC |
DEV_RX_OFFLOAD_JUMBO_FRAME |
DEV_RX_OFFLOAD_SCATTER;
rxq->reg_idx = (uint16_t)((RTE_ETH_DEV_SRIOV(dev).active == 0) ?
queue_idx : RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx + queue_idx);
rxq->port_id = dev->data->port_id;
- if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ if (dev->data->dev_conf.rxmode.offloads & DEV_RX_OFFLOAD_KEEP_CRC)
rxq->crc_len = ETHER_CRC_LEN;
else
rxq->crc_len = 0;
/* RSC global configuration (chapter 4.6.7.2.1 of 82599 Spec) */
- if (rte_eth_dev_must_keep_crc(rx_conf->offloads) &&
+ if ((rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC) &&
(rx_conf->offloads & DEV_RX_OFFLOAD_TCP_LRO)) {
/*
* According to chapter of 4.6.7.2.1 of the Spec Rev.
* Configure CRC stripping, if any.
*/
hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0);
- if (rte_eth_dev_must_keep_crc(rx_conf->offloads))
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
hlreg0 &= ~IXGBE_HLREG0_RXCRCSTRP;
else
hlreg0 |= IXGBE_HLREG0_RXCRCSTRP;
* Reset crc_len in case it was changed after queue setup by a
* call to configure.
*/
- rxq->crc_len = rte_eth_dev_must_keep_crc(rx_conf->offloads) ?
- ETHER_CRC_LEN : 0;
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
/* Setup the Base and Length of the Rx Descriptor Rings */
bus_addr = rxq->rx_ring_phys_addr;
if (hw->mac.type == ixgbe_mac_82599EB ||
hw->mac.type == ixgbe_mac_X540) {
rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
- if (rte_eth_dev_must_keep_crc(rx_conf->offloads))
+ if (rx_conf->offloads & DEV_RX_OFFLOAD_KEEP_CRC)
rdrxctl &= ~IXGBE_RDRXCTL_CRCSTRIP;
else
rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP;
*/
if ((rss_conf.rss_hf & IXGBE_RSS_OFFLOAD_ALL) == 0) {
ixgbe_rss_disable(dev);
- return -EINVAL;
+ return 0;
}
if (rss_conf.rss_key == NULL)
rss_conf.rss_key = rss_intel_key; /* Default hash key */
}
/* Stubs needed for linkage when CONFIG_RTE_IXGBE_INC_VECTOR is set to 'n' */
-int __attribute__((weak))
+__rte_weak int
ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev __rte_unused *dev)
{
return -1;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
ixgbe_recv_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
return 0;
}
-uint16_t __attribute__((weak))
+__rte_weak uint16_t
ixgbe_recv_scattered_pkts_vec(
void __rte_unused *rx_queue,
struct rte_mbuf __rte_unused **rx_pkts,
return 0;
}
-int __attribute__((weak))
+__rte_weak int
ixgbe_rxq_vec_setup(struct ixgbe_rx_queue __rte_unused *rxq)
{
return -1;