#include "base/ixgbe_phy.h"
#include "ixgbe_regs.h"
+#include "rte_pmd_ixgbe.h"
+
/*
* High threshold controlling when to start sending XOFF frames. Must be at
* least 8 bytes less than receive packet buffer size. This value is in units
/* Timer value included in XOFF frames. */
#define IXGBE_FC_PAUSE 0x680
+/*Default value of Max Rx Queue*/
+#define IXGBE_MAX_RX_QUEUE_NUM 128
+
#define IXGBE_LINK_DOWN_CHECK_TIMEOUT 4000 /* ms */
#define IXGBE_LINK_UP_CHECK_TIMEOUT 1000 /* ms */
#define IXGBE_VMDQ_NUM_UC_MAC 4096 /* Maximum nb. of UC MAC addr. */
#define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000
#define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */
#define IXGBE_QDE_STRIP_TAG 0x00000004
+#define IXGBE_VTEICR_MASK 0x07
enum ixgbevf_xcast_modes {
IXGBEVF_XCAST_MODE_NONE = 0,
IXGBEVF_XCAST_MODE_ALLMULTI,
};
+#define IXGBE_EXVET_VET_EXT_SHIFT 16
+#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000
+
static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbe_dev_configure(struct rte_eth_dev *dev);
static void ixgbe_dev_stats_get(struct rte_eth_dev *dev,
struct rte_eth_stats *stats);
static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstats *xstats, unsigned n);
+ struct rte_eth_xstat *xstats, unsigned n);
static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev,
- struct rte_eth_xstats *xstats, unsigned n);
+ struct rte_eth_xstat *xstats, unsigned n);
static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev);
static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev);
+static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
+static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit);
static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev,
uint16_t queue_id,
uint8_t stat_idx,
static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index);
static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev,
struct ether_addr *mac_addr);
-static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config);
+static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config);
/* For Virtual Function support */
static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev);
static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev);
static int ixgbevf_dev_configure(struct rte_eth_dev *dev);
static int ixgbevf_dev_start(struct rte_eth_dev *dev);
+static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
static void ixgbevf_dev_close(struct rte_eth_dev *dev);
static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
/* For Eth VMDQ APIs support */
static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct
- ether_addr* mac_addr,uint8_t on);
-static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on);
+ ether_addr * mac_addr, uint8_t on);
+static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on);
static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool,
uint16_t rx_mask, uint8_t on);
-static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
-static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on);
+static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
+static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on);
static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan,
- uint64_t pool_mask,uint8_t vlan_on);
+ uint64_t pool_mask, uint8_t vlan_on);
static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev,
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on);
struct timespec *timestamp);
static int ixgbe_timesync_write_time(struct rte_eth_dev *dev,
const struct timespec *timestamp);
+static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle,
+ void *param);
static int ixgbe_dev_l2_tunnel_eth_type_conf
(struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel);
last = latest; \
}
-#define IXGBE_SET_HWSTRIP(h, q) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_SET_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] |= 1 << bit;\
} while (0)
-#define IXGBE_CLEAR_HWSTRIP(h, q) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_CLEAR_HWSTRIP(h, q) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(h)->bitmap[idx] &= ~(1 << bit);\
} while (0)
-#define IXGBE_GET_HWSTRIP(h, q, r) do{\
- uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \
- uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \
+#define IXGBE_GET_HWSTRIP(h, q, r) do {\
+ uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \
+ uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \
(r) = (h)->bitmap[idx] >> bit & 1;\
} while (0)
* The set of PCI devices this driver supports
*/
static const struct rte_pci_id pci_id_ixgbe_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IXGBE(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-
-{ .vendor_id = 0, /* sentinel */ },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_BX) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_DUAL_PORT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AF_SINGLE_PORT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598AT2) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_SFP_LOM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_CX4) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_CX4_DUAL_PORT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_DA_DUAL_PORT) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82598EB_XF_LR) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KX4_MEZZ) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_KR) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_COMBO_BACKPLANE) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_CX4) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_SFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_RNDC) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_560FLR) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_SUBDEV_ID_82599_ECNA_DP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BACKPLANE_FCOE) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_FCOE) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_EM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF2) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_SFP_SF_QP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_QSFP_SF_QP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599EN_SFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_XAUI_LOM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_T3_LOM) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_LS) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540T1) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_SFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_10G_T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_1G_T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550T1) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_KR_L) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP_N) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SGMII_L) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_10G_T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_QSFP_N) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_SFP) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_1G_T_L) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KX4) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_KR) },
+#ifdef RTE_NIC_BYPASS
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_BYPASS) },
+#endif
+ { .vendor_id = 0, /* sentinel */ },
};
-
/*
* The set of PCI devices this driver supports (for 82599 VF)
*/
static const struct rte_pci_id pci_id_ixgbevf_map[] = {
-
-#define RTE_PCI_DEV_ID_DECL_IXGBEVF(vend, dev) {RTE_PCI_DEVICE(vend, dev)},
-#include "rte_pci_dev_ids.h"
-{ .vendor_id = 0, /* sentinel */ },
-
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_82599_VF_HV) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X540_VF_HV) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF_HV) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_A_VF_HV) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF) },
+ { RTE_PCI_DEVICE(IXGBE_INTEL_VENDOR_ID, IXGBE_DEV_ID_X550EM_X_VF_HV) },
+ { .vendor_id = 0, /* sentinel */ },
};
static const struct rte_eth_desc_lim rx_desc_lim = {
.xstats_get = ixgbe_dev_xstats_get,
.stats_reset = ixgbe_dev_stats_reset,
.xstats_reset = ixgbe_dev_xstats_reset,
+ .xstats_get_names = ixgbe_dev_xstats_get_names,
.queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set,
.dev_infos_get = ixgbe_dev_info_get,
.dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get,
.timesync_disable = ixgbe_timesync_disable,
.timesync_read_rx_timestamp = ixgbe_timesync_read_rx_timestamp,
.timesync_read_tx_timestamp = ixgbe_timesync_read_tx_timestamp,
- .get_reg_length = ixgbe_get_reg_length,
.get_reg = ixgbe_get_regs,
.get_eeprom_length = ixgbe_get_eeprom_length,
.get_eeprom = ixgbe_get_eeprom,
.dev_configure = ixgbevf_dev_configure,
.dev_start = ixgbevf_dev_start,
.dev_stop = ixgbevf_dev_stop,
- .link_update = ixgbe_dev_link_update,
+ .link_update = ixgbevf_dev_link_update,
.stats_get = ixgbevf_dev_stats_get,
.xstats_get = ixgbevf_dev_xstats_get,
.stats_reset = ixgbevf_dev_stats_reset,
.xstats_reset = ixgbevf_dev_stats_reset,
+ .xstats_get_names = ixgbevf_dev_xstats_get_names,
.dev_close = ixgbevf_dev_close,
.allmulticast_enable = ixgbevf_dev_allmulticast_enable,
.allmulticast_disable = ixgbevf_dev_allmulticast_disable,
.rxq_info_get = ixgbe_rxq_info_get,
.txq_info_get = ixgbe_txq_info_get,
.mac_addr_set = ixgbevf_set_default_mac_addr,
- .get_reg_length = ixgbevf_get_reg_length,
.get_reg = ixgbevf_get_regs,
.reta_update = ixgbe_dev_rss_reta_update,
.reta_query = ixgbe_dev_rss_reta_query,
#define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \
sizeof(rte_ixgbe_rxq_strings[0]))
+#define IXGBE_NB_RXQ_PRIO_VALUES 8
static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = {
{"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)},
#define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \
sizeof(rte_ixgbe_txq_strings[0]))
+#define IXGBE_NB_TXQ_PRIO_VALUES 8
static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = {
{"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)},
PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d",
stat_mappings->rqsmr[n], n);
IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]);
- }
- else {
+ } else {
PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d",
stat_mappings->tqsm[n], n);
IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]);
}
static void
-ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev)
+ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev)
{
struct ixgbe_stat_mapping_registers *stat_mappings =
IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private);
}
static void
-ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config)
+ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config)
{
uint8_t i;
struct ixgbe_dcb_tc_config *tc;
tc = &dcb_config->tc_config[0];
tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF;
tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF;
- for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) {
dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100;
dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100;
}
struct rte_pci_device *pci_dev;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
* has already done this work. Only check we don't need a different
* RX and TX function.
*/
- if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
struct ixgbe_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
- * Tx queue may not initialized by primary process */
+ * Tx queue may not initialized by primary process
+ */
if (eth_dev->data->tx_queues) {
txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1];
ixgbe_set_tx_function(eth_dev, txq);
} else {
/* Use default TX function if we get here */
PMD_INIT_LOG(NOTICE, "No TX queues configured yet. "
- "Using default TX function.");
+ "Using default TX function.");
}
ixgbe_set_rx_function(eth_dev);
/* Initialize DCB configuration*/
memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config));
- ixgbe_dcb_init(hw,dcb_config);
+ ixgbe_dcb_init(hw, dcb_config);
/* Get Hardware Flow Control setting */
hw->fc.requested_mode = ixgbe_fc_full;
hw->fc.current_mode = ixgbe_fc_full;
if (diag == IXGBE_ERR_EEPROM_VERSION) {
PMD_INIT_LOG(ERR, "This device is a pre-production adapter/"
- "LOM. Please be aware there may be issues associated "
- "with your hardware.");
+ "LOM. Please be aware there may be issues associated "
+ "with your hardware.");
PMD_INIT_LOG(ERR, "If you are experiencing problems "
- "please contact your Intel or hardware representative "
- "who provided you with this hardware.");
+ "please contact your Intel or hardware representative "
+ "who provided you with this hardware.");
} else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED)
PMD_INIT_LOG(ERR, "Unsupported SFP+ Module");
if (diag) {
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
- hw->mac.num_rar_entries, 0);
+ hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %u bytes needed to store "
- "MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
/* Copy the permanent MAC address */
/* Allocate memory for storing hash filter MAC addresses */
eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN *
- IXGBE_VMDQ_NUM_UC_MAC, 0);
+ IXGBE_VMDQ_NUM_UC_MAC, 0);
if (eth_dev->data->hash_mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %d bytes needed to store MAC addresses",
- ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
+ "Failed to allocate %d bytes needed to store MAC addresses",
+ ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC);
return -ENOMEM;
}
(int) hw->mac.type, (int) hw->phy.type);
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
- eth_dev->data->port_id, pci_dev->id.vendor_id,
- pci_dev->id.device_id);
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
rte_intr_callback_register(&pci_dev->intr_handle,
ixgbe_dev_interrupt_handler,
/* initialize 5tuple filter list */
TAILQ_INIT(&filter_info->fivetuple_list);
memset(filter_info->fivetuple_mask, 0,
- sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
+ sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE);
return 0;
}
struct rte_pci_device *pci_dev;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private);
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private);
/* for secondary processes, we don't initialise any further as primary
* has already done this work. Only check we don't need a different
- * RX function */
- if (rte_eal_process_type() != RTE_PROC_PRIMARY){
+ * RX function
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
struct ixgbe_tx_queue *txq;
/* TX queue function in primary, set by last queue initialized
* Tx queue may not initialized by primary process
} else {
/* Use default TX function if we get here */
PMD_INIT_LOG(NOTICE,
- "No TX queues configured yet. Using default TX function.");
+ "No TX queues configured yet. Using default TX function.");
}
ixgbe_set_rx_function(eth_dev);
/* Allocate memory for storing MAC addresses */
eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN *
- hw->mac.num_rar_entries, 0);
+ hw->mac.num_rar_entries, 0);
if (eth_dev->data->mac_addrs == NULL) {
PMD_INIT_LOG(ERR,
- "Failed to allocate %u bytes needed to store "
- "MAC addresses",
- ETHER_ADDR_LEN * hw->mac.num_rar_entries);
+ "Failed to allocate %u bytes needed to store "
+ "MAC addresses",
+ ETHER_ADDR_LEN * hw->mac.num_rar_entries);
return -ENOMEM;
}
/* reset the hardware with the new settings */
diag = hw->mac.ops.start_hw(hw);
switch (diag) {
- case 0:
- break;
+ case 0:
+ break;
- default:
- PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
- return -EIO;
+ default:
+ PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag);
+ return -EIO;
}
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ ixgbevf_dev_interrupt_handler,
+ (void *)eth_dev);
+ rte_intr_enable(&pci_dev->intr_handle);
+ ixgbevf_intr_enable(hw);
+
PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
eth_dev->data->port_id, pci_dev->id.vendor_id,
pci_dev->id.device_id, "ixgbe_mac_82599_vf");
eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev)
{
struct ixgbe_hw *hw;
+ struct rte_pci_device *pci_dev = eth_dev->pci_dev;
PMD_INIT_FUNC_TRACE();
rte_free(eth_dev->data->mac_addrs);
eth_dev->data->mac_addrs = NULL;
+ rte_intr_disable(&pci_dev->intr_handle);
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ ixgbevf_dev_interrupt_handler,
+ (void *)eth_dev);
+
return 0;
}
static struct eth_driver rte_ixgbe_pmd = {
.pci_drv = {
- .name = "rte_ixgbe_pmd",
.id_table = pci_id_ixgbe_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
RTE_PCI_DRV_DETACHABLE,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_ixgbe_dev_init,
.eth_dev_uninit = eth_ixgbe_dev_uninit,
*/
static struct eth_driver rte_ixgbevf_pmd = {
.pci_drv = {
- .name = "rte_ixgbevf_pmd",
.id_table = pci_id_ixgbevf_map,
.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_DETACHABLE,
+ .probe = rte_eth_dev_pci_probe,
+ .remove = rte_eth_dev_pci_remove,
},
.eth_dev_init = eth_ixgbevf_dev_init,
.eth_dev_uninit = eth_ixgbevf_dev_uninit,
.dev_private_size = sizeof(struct ixgbe_adapter),
};
-/*
- * Driver initialization routine.
- * Invoked once at EAL init time.
- * Register itself as the [Poll Mode] Driver of PCI IXGBE devices.
- */
-static int
-rte_ixgbe_pmd_init(const char *name __rte_unused, const char *params __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-
- rte_eth_driver_register(&rte_ixgbe_pmd);
- return 0;
-}
-
-/*
- * VF Driver initialization routine.
- * Invoked one at EAL init time.
- * Register itself as the [Virtual Poll Mode] Driver of PCI niantic devices.
- */
-static int
-rte_ixgbevf_pmd_init(const char *name __rte_unused, const char *param __rte_unused)
-{
- PMD_INIT_FUNC_TRACE();
-
- rte_eth_driver_register(&rte_ixgbevf_pmd);
- return 0;
-}
-
static int
ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on)
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vfta;
uint32_t vid_idx;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int ret = 0;
+ uint32_t reg;
+ uint32_t qinq;
+
+ qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ qinq &= IXGBE_DMATXCTL_GDV;
switch (vlan_type) {
case ETH_VLAN_TYPE_INNER:
- /* Only the high 16-bits is valid */
- IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16);
+ if (qinq) {
+ reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
+ | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+ } else {
+ ret = -ENOTSUP;
+ PMD_DRV_LOG(ERR, "Inner type is not supported"
+ " by single VLAN");
+ }
+ break;
+ case ETH_VLAN_TYPE_OUTER:
+ if (qinq) {
+ /* Only the high 16-bits is valid */
+ IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid <<
+ IXGBE_EXVET_VET_EXT_SHIFT);
+ } else {
+ reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg);
+ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
+ reg = (reg & (~IXGBE_DMATXCTL_VT_MASK))
+ | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT);
+ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg);
+ }
+
break;
default:
ret = -EINVAL;
- PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type);
+ PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type);
break;
}
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vlnctrl;
uint16_t i;
{
struct ixgbe_hwstrip *hwstrip =
IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private);
+ struct ixgbe_rx_queue *rxq;
if (queue >= IXGBE_MAX_RX_QUEUE_NUM)
return;
IXGBE_SET_HWSTRIP(hwstrip, queue);
else
IXGBE_CLEAR_HWSTRIP(hwstrip, queue);
+
+ if (queue >= dev->data->nb_rx_queues)
+ return;
+
+ rxq = dev->data->rx_queues[queue];
+
+ if (on)
+ rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED;
+ else
+ rxq->vlan_flags = PKT_RX_VLAN_PKT;
}
static void
PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
- else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
- ctrl &= ~IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- }
+
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ ctrl &= ~IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
/* record those setting for HW strip per queue */
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0);
}
PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip");
return;
}
- else {
- /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
- ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- }
+
+ /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue));
+ ctrl |= IXGBE_RXDCTL_VME;
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
+
/* record those setting for HW strip per queue */
ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1);
}
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t ctrl;
uint16_t i;
+ struct ixgbe_rx_queue *rxq;
PMD_INIT_FUNC_TRACE();
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
ctrl &= ~IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- }
- else {
+ } else {
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ rxq = dev->data->rx_queues[i];
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
ctrl &= ~IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
/* record those setting for HW strip per queue */
ixgbe_vlan_hw_strip_bitmap_set(dev, i, 0);
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
uint32_t ctrl;
uint16_t i;
+ struct ixgbe_rx_queue *rxq;
PMD_INIT_FUNC_TRACE();
ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
ctrl |= IXGBE_VLNCTRL_VME;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl);
- }
- else {
+ } else {
/* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */
for (i = 0; i < dev->data->nb_rx_queues; i++) {
- ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
+ rxq = dev->data->rx_queues[i];
+ ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx));
ctrl |= IXGBE_RXDCTL_VME;
- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(i), ctrl);
+ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), ctrl);
/* record those setting for HW strip per queue */
ixgbe_vlan_hw_strip_bitmap_set(dev, i, 1);
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
/* VLNCTRL: enable vlan filtering and allow all vlan tags through */
uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+
vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl);
}
return -EINVAL;
}
- RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = nb_rx_q;
- RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx = dev->pci_dev->max_vfs * nb_rx_q;
-
+ RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool =
+ IXGBE_MAX_RX_QUEUE_NUM / RTE_ETH_DEV_SRIOV(dev).active;
+ RTE_ETH_DEV_SRIOV(dev).def_pool_q_idx =
+ dev->pci_dev->max_vfs * RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool;
return 0;
}
/* check multi-queue mode */
switch (dev_conf->rxmode.mq_mode) {
case ETH_MQ_RX_VMDQ_DCB:
+ PMD_INIT_LOG(INFO, "ETH_MQ_RX_VMDQ_DCB mode supported in SRIOV");
+ break;
case ETH_MQ_RX_VMDQ_DCB_RSS:
/* DCB/RSS VMDQ in SRIOV mode, not implement yet */
PMD_INIT_LOG(ERR, "SRIOV active,"
case ETH_MQ_RX_NONE:
/* if nothing mq mode configure, use default scheme */
dev->data->dev_conf.rxmode.mq_mode = ETH_MQ_RX_VMDQ_ONLY;
- if (RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool > 1)
- RTE_ETH_DEV_SRIOV(dev).nb_q_per_pool = 1;
break;
default: /* ETH_MQ_RX_DCB, ETH_MQ_RX_DCB_RSS or ETH_MQ_TX_DCB*/
/* SRIOV only works in VMDq enable mode */
switch (dev_conf->txmode.mq_mode) {
case ETH_MQ_TX_VMDQ_DCB:
- /* DCB VMDQ in SRIOV mode, not implement yet */
- PMD_INIT_LOG(ERR, "SRIOV is active,"
- " unsupported VMDQ mq_mode tx %d.",
- dev_conf->txmode.mq_mode);
- return -EINVAL;
+ PMD_INIT_LOG(INFO, "ETH_MQ_TX_VMDQ_DCB mode supported in SRIOV");
+ dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
+ break;
default: /* ETH_MQ_TX_VMDQ_ONLY or ETH_MQ_TX_NONE */
dev->data->dev_conf.txmode.mq_mode = ETH_MQ_TX_VMDQ_ONLY;
break;
ixgbe_stop_adapter(hw);
/* reinitialize adapter
- * this calls reset and start */
+ * this calls reset and start
+ */
status = ixgbe_pf_reset_hw(hw);
if (status != 0)
return -1;
goto error;
}
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
+ ETH_VLAN_EXTEND_MASK;
+ ixgbe_vlan_offload_set(dev, mask);
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
+ /* Enable vlan filtering for VMDq */
+ ixgbe_vmdq_vlan_hw_filter_enable(dev);
+ }
+
+ /* Configure DCB hw */
+ ixgbe_configure_dcb(dev);
+
+ if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
+ err = ixgbe_fdir_configure(dev);
+ if (err)
+ goto error;
+ }
+
+ /* Restore vf rate limit */
+ if (vfinfo != NULL) {
+ for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
+ for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
+ if (vfinfo[vf].tx_rate[idx] != 0)
+ ixgbe_set_vf_rate_limit(dev, vf,
+ vfinfo[vf].tx_rate[idx],
+ 1 << idx);
+ }
+
+ ixgbe_restore_statistics_mapping(dev);
+
err = ixgbe_dev_rxtx_start(dev);
if (err < 0) {
PMD_INIT_LOG(ERR, "Unable to start rxtx queues");
/* resume enabled intr since hw reset */
ixgbe_enable_intr(dev);
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
- ETH_VLAN_EXTEND_MASK;
- ixgbe_vlan_offload_set(dev, mask);
-
- if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_VMDQ_ONLY) {
- /* Enable vlan filtering for VMDq */
- ixgbe_vmdq_vlan_hw_filter_enable(dev);
- }
-
- /* Configure DCB hw */
- ixgbe_configure_dcb(dev);
-
- if (dev->data->dev_conf.fdir_conf.mode != RTE_FDIR_MODE_NONE) {
- err = ixgbe_fdir_configure(dev);
- if (err)
- goto error;
- }
-
- /* Restore vf rate limit */
- if (vfinfo != NULL) {
- for (vf = 0; vf < dev->pci_dev->max_vfs; vf++)
- for (idx = 0; idx < IXGBE_MAX_QUEUE_NUM_PER_VF; idx++)
- if (vfinfo[vf].tx_rate[idx] != 0)
- ixgbe_set_vf_rate_limit(dev, vf,
- vfinfo[vf].tx_rate[idx],
- 1 << idx);
- }
-
- ixgbe_restore_statistics_mapping(dev);
-
return 0;
error:
hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC);
for (i = 0; i < 8; i++) {
- uint32_t mp;
- mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+ uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i));
+
/* global total per queue */
hw_stats->mpc[i] += mp;
/* Running comprehensive total for stats display */
/* Rx Errors */
stats->imissed = total_missed_rx;
stats->ierrors = hw_stats->crcerrs +
- hw_stats->mspdc +
- hw_stats->rlec +
- hw_stats->ruc +
- hw_stats->roc +
- hw_stats->illerrc +
- hw_stats->errbc +
- hw_stats->rfc +
- hw_stats->fccrc +
- hw_stats->fclast;
+ hw_stats->mspdc +
+ hw_stats->rlec +
+ hw_stats->ruc +
+ hw_stats->roc +
+ hw_stats->illerrc +
+ hw_stats->errbc +
+ hw_stats->rfc +
+ hw_stats->fccrc +
+ hw_stats->fclast;
/* Tx Errors */
stats->oerrors = 0;
/* This function calculates the number of xstats based on the current config */
static unsigned
ixgbe_xstats_calc_num(void) {
- return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) +
- (IXGBE_NB_TXQ_PRIO_STATS * 8);
+ return IXGBE_NB_HW_STATS +
+ (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) +
+ (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES);
+}
+
+static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit)
+{
+ const unsigned cnt_stats = ixgbe_xstats_calc_num();
+ unsigned stat, i, count;
+
+ if (xstats_names != NULL) {
+ count = 0;
+
+ /* Note: limit >= cnt_stats checked upstream
+ * in rte_eth_xstats_names()
+ */
+
+ /* Extended stats from ixgbe_hw_stats */
+ for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "%s",
+ rte_ixgbe_stats_strings[i].name);
+ count++;
+ }
+
+ /* RX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "rx_priority%u_%s", i,
+ rte_ixgbe_rxq_strings[stat].name);
+ count++;
+ }
+ }
+
+ /* TX Priority Stats */
+ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
+ snprintf(xstats_names[count].name,
+ sizeof(xstats_names[count].name),
+ "tx_priority%u_%s", i,
+ rte_ixgbe_txq_strings[stat].name);
+ count++;
+ }
+ }
+ }
+ return cnt_stats;
+}
+
+static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
+ struct rte_eth_xstat_name *xstats_names, unsigned limit)
+{
+ unsigned i;
+
+ if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL)
+ return -ENOMEM;
+
+ if (xstats_names != NULL)
+ for (i = 0; i < IXGBEVF_NB_XSTATS; i++)
+ snprintf(xstats_names[i].name,
+ sizeof(xstats_names[i].name),
+ "%s", rte_ixgbevf_stats_strings[i].name);
+ return IXGBEVF_NB_XSTATS;
}
static int
-ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
{
struct ixgbe_hw *hw =
/* Extended stats from ixgbe_hw_stats */
count = 0;
for (i = 0; i < IXGBE_NB_HW_STATS; i++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name), "%s",
- rte_ixgbe_stats_strings[i].name);
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbe_stats_strings[i].offset);
+ xstats[count].id = count;
count++;
}
/* RX Priority Stats */
for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) {
- for (i = 0; i < 8; i++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "rx_priority%u_%s", i,
- rte_ixgbe_rxq_strings[stat].name);
+ for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbe_rxq_strings[stat].offset +
(sizeof(uint64_t) * i));
+ xstats[count].id = count;
count++;
}
}
/* TX Priority Stats */
for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) {
- for (i = 0; i < 8; i++) {
- snprintf(xstats[count].name, sizeof(xstats[count].name),
- "tx_priority%u_%s", i,
- rte_ixgbe_txq_strings[stat].name);
+ for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) {
xstats[count].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbe_txq_strings[stat].offset +
(sizeof(uint64_t) * i));
+ xstats[count].id = count;
count++;
}
}
-
return count;
}
ixgbevf_update_stats(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* Good Rx packet, include VF loopback */
}
static int
-ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats,
+ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
unsigned n)
{
struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
/* Extended stats */
for (i = 0; i < IXGBEVF_NB_XSTATS; i++) {
- snprintf(xstats[i].name, sizeof(xstats[i].name),
- "%s", rte_ixgbevf_stats_strings[i].name);
+ xstats[i].id = i;
xstats[i].value = *(uint64_t *)(((char *)hw_stats) +
rte_ixgbevf_stats_strings[i].offset);
}
stats->ibytes = hw_stats->vfgorc;
stats->opackets = hw_stats->vfgptc;
stats->obytes = hw_stats->vfgotc;
- stats->imcasts = hw_stats->vfmprc;
- /* stats->imcasts should be removed as imcasts is deprecated */
}
static void
ixgbevf_dev_stats_reset(struct rte_eth_dev *dev)
{
- struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*)
+ struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *)
IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private);
/* Sync HW register to the last stats */
hw_stats->vfgorc = 0;
hw_stats->vfgptc = 0;
hw_stats->vfgotc = 0;
- hw_stats->vfmprc = 0;
-
}
static void
dev_info->max_rx_queues = (uint16_t)hw->mac.max_rx_queues;
dev_info->max_tx_queues = (uint16_t)hw->mac.max_tx_queues;
dev_info->min_rx_bufsize = 1024; /* cf BSIZEPACKET in SRRCTL reg */
- dev_info->max_rx_pktlen = 15872; /* includes CRC, cf MAXFRS reg */
+ dev_info->max_rx_pktlen = 9728; /* includes CRC, cf MAXFRS reg */
dev_info->max_mac_addrs = hw->mac.num_rar_entries;
dev_info->max_hash_mac_addrs = IXGBE_VMDQ_NUM_UC_MAC;
dev_info->max_vfs = dev->pci_dev->max_vfs;
dev_info->tx_desc_lim = tx_desc_lim;
}
+static int
+ixgbevf_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
+ int *link_up, int wait_to_complete)
+{
+ /**
+ * for a quick link status checking, wait_to_compelet == 0,
+ * skip PF link status checking
+ */
+ bool no_pflink_check = wait_to_complete == 0;
+ struct ixgbe_mbx_info *mbx = &hw->mbx;
+ struct ixgbe_mac_info *mac = &hw->mac;
+ uint32_t links_reg, in_msg;
+ int ret_val = 0;
+
+ /* If we were hit with a reset drop the link */
+ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout)
+ mac->get_link_status = true;
+
+ if (!mac->get_link_status)
+ goto out;
+
+ /* if link status is down no point in checking to see if pf is up */
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+
+ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs
+ * before the link status is correct
+ */
+ if (mac->type == ixgbe_mac_82599_vf) {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ rte_delay_us(100);
+ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
+
+ if (!(links_reg & IXGBE_LINKS_UP))
+ goto out;
+ }
+ }
+
+ switch (links_reg & IXGBE_LINKS_SPEED_82599) {
+ case IXGBE_LINKS_SPEED_10G_82599:
+ *speed = IXGBE_LINK_SPEED_10GB_FULL;
+ if (hw->mac.type >= ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_2_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_1G_82599:
+ *speed = IXGBE_LINK_SPEED_1GB_FULL;
+ break;
+ case IXGBE_LINKS_SPEED_100_82599:
+ *speed = IXGBE_LINK_SPEED_100_FULL;
+ if (hw->mac.type == ixgbe_mac_X550) {
+ if (links_reg & IXGBE_LINKS_SPEED_NON_STD)
+ *speed = IXGBE_LINK_SPEED_5GB_FULL;
+ }
+ break;
+ case IXGBE_LINKS_SPEED_10_X550EM_A:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ /* Since Reserved in older MAC's */
+ if (hw->mac.type >= ixgbe_mac_X550)
+ *speed = IXGBE_LINK_SPEED_10_FULL;
+ break;
+ default:
+ *speed = IXGBE_LINK_SPEED_UNKNOWN;
+ }
+
+ if (no_pflink_check) {
+ if (*speed == IXGBE_LINK_SPEED_UNKNOWN)
+ mac->get_link_status = true;
+ else
+ mac->get_link_status = false;
+
+ goto out;
+ }
+ /* if the read failed it could just be a mailbox collision, best wait
+ * until we are called again and don't report an error
+ */
+ if (mbx->ops.read(hw, &in_msg, 1, 0))
+ goto out;
+
+ if (!(in_msg & IXGBE_VT_MSGTYPE_CTS)) {
+ /* msg is not CTS and is NACK we must have lost CTS status */
+ if (in_msg & IXGBE_VT_MSGTYPE_NACK)
+ ret_val = -1;
+ goto out;
+ }
+
+ /* the pf is talking, if we timed out in the past we reinit */
+ if (!mbx->timeout) {
+ ret_val = -1;
+ goto out;
+ }
+
+ /* if we passed all the tests above then the link is up and we no
+ * longer need to check for link
+ */
+ mac->get_link_status = false;
+
+out:
+ *link_up = !mac->get_link_status;
+ return ret_val;
+}
+
/* return 0 means link status changed, -1 means not changed */
static int
-ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+ixgbe_dev_link_update_share(struct rte_eth_dev *dev,
+ int wait_to_complete, int vf)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct rte_eth_link link, old;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
int link_up;
int diag;
+ int wait = 1;
link.link_status = ETH_LINK_DOWN;
link.link_speed = 0;
link.link_duplex = ETH_LINK_HALF_DUPLEX;
+ link.link_autoneg = ETH_LINK_AUTONEG;
memset(&old, 0, sizeof(old));
rte_ixgbe_dev_atomic_read_link_status(dev, &old);
/* check if it needs to wait to complete, if lsc interrupt is enabled */
if (wait_to_complete == 0 || dev->data->dev_conf.intr_conf.lsc != 0)
- diag = ixgbe_check_link(hw, &link_speed, &link_up, 0);
+ wait = 0;
+
+ if (vf)
+ diag = ixgbevf_check_link(hw, &link_speed, &link_up, wait);
else
- diag = ixgbe_check_link(hw, &link_speed, &link_up, 1);
+ diag = ixgbe_check_link(hw, &link_speed, &link_up, wait);
if (diag != 0) {
link.link_speed = ETH_SPEED_NUM_100M;
return 0;
}
+static int
+ixgbe_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ return ixgbe_dev_link_update_share(dev, wait_to_complete, 0);
+}
+
+static int
+ixgbevf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
+{
+ return ixgbe_dev_link_update_share(dev, wait_to_complete, 1);
+}
+
static void
ixgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
{
PMD_INIT_LOG(INFO, " Port %d: Link Down",
(int)(dev->data->port_id));
}
- PMD_INIT_LOG(DEBUG, "PCI Address: %04d:%02d:%02d:%d",
+ PMD_INIT_LOG(DEBUG, "PCI Address: " PCI_PRI_FMT,
dev->pci_dev->addr.domain,
dev->pci_dev->addr.bus,
dev->pci_dev->addr.devid,
IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
int64_t timeout;
struct rte_eth_link link;
- int intr_enable_delay = false;
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
timeout = IXGBE_LINK_DOWN_CHECK_TIMEOUT;
ixgbe_dev_link_status_print(dev);
-
- intr_enable_delay = true;
- }
-
- if (intr_enable_delay) {
+ intr->mask_original = intr->mask;
+ /* only disable lsc interrupt */
+ intr->mask &= ~IXGBE_EIMS_LSC;
if (rte_eal_alarm_set(timeout * 1000,
- ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0)
+ ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0)
PMD_DRV_LOG(ERR, "Error setting alarm");
- } else {
- PMD_DRV_LOG(DEBUG, "enable intr immediately");
- ixgbe_enable_intr(dev);
- rte_intr_enable(&(dev->pci_dev->intr_handle));
+ else
+ intr->mask = intr->mask_original;
}
+ PMD_DRV_LOG(DEBUG, "enable intr immediately");
+ ixgbe_enable_intr(dev);
+ rte_intr_enable(&dev->pci_dev->intr_handle);
return 0;
}
ixgbe_dev_link_update(dev, 0);
intr->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE;
ixgbe_dev_link_status_print(dev);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
}
PMD_DRV_LOG(DEBUG, "enable intr in delayed handler S[%08x]", eicr);
* Enable flow control according to the current settings.
*/
static int
-ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num)
+ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num)
{
int ret_val = 0;
uint32_t mflcn_reg, fccfg_reg;
* and the TX pause can not be disabled
*/
nb_rx_en = 0;
- for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
if (reg & IXGBE_FCRTH_FCEN)
nb_rx_en++;
}
if (nb_rx_en > 1)
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_rx_pause:
/*
* and the TX pause can not be disabled
*/
nb_rx_en = 0;
- for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
+ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
if (reg & IXGBE_FCRTH_FCEN)
nb_rx_en++;
}
if (nb_rx_en > 1)
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_tx_pause:
/*
* Tx Flow control is enabled, and Rx Flow control is
* disabled by software override.
*/
- fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY;
+ fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY;
break;
case ixgbe_fc_full:
/* Flow control (both Rx and Tx) is enabled by SW override. */
PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly");
ret_val = IXGBE_ERR_CONFIG;
goto out;
- break;
}
/* Set 802.3x based flow control settings. */
}
static int
-ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num)
+ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
int32_t ret_val = IXGBE_NOT_IMPLEMENTED;
if (hw->mac.type != ixgbe_mac_82598EB) {
- ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num);
+ ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num);
}
return ret_val;
}
uint8_t tc_num;
uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 };
struct ixgbe_hw *hw =
- IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
struct ixgbe_dcb_config *dcb_config =
- IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
+ IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = {
ixgbe_fc_none,
hw->fc.low_water[tc_num] = pfc_conf->fc.low_water;
hw->fc.high_water[tc_num] = pfc_conf->fc.high_water;
- err = ixgbe_dcb_pfc_enable(dev,tc_num);
+ err = ixgbe_dcb_pfc_enable(dev, tc_num);
/* Not negotiated is not an error case */
if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED))
ixgbe_add_rar(dev, addr, 0, 0);
}
+int
+rte_pmd_ixgbe_set_vf_mac_addr(uint8_t port, uint16_t vf,
+ struct ether_addr *mac_addr)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_vf_info *vfinfo;
+ int rar_entry;
+ uint8_t *new_mac = (uint8_t *)(mac_addr);
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ vfinfo = *(IXGBE_DEV_PRIVATE_TO_P_VFDATA(dev->data->dev_private));
+ rar_entry = hw->mac.num_rar_entries - (vf + 1);
+
+ if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) {
+ rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac,
+ ETHER_ADDR_LEN);
+ return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf,
+ IXGBE_RAH_AV);
+ }
+ return -EINVAL;
+}
+
static int
ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu)
{
struct ixgbe_hw *hw;
struct rte_eth_dev_info dev_info;
uint32_t frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
ixgbe_dev_info_get(dev, &dev_info);
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before. */
- if (!dev->data->scattered_rx &&
+ * feature has not been enabled before.
+ */
+ if (!rx_conf->enable_scatter &&
(frame_size + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
static int
ixgbevf_dev_configure(struct rte_eth_dev *dev)
{
- struct rte_eth_conf* conf = &dev->data->dev_conf;
+ struct rte_eth_conf *conf = &dev->data->dev_conf;
struct ixgbe_adapter *adapter =
(struct ixgbe_adapter *)dev->data->dev_private;
PMD_INIT_FUNC_TRACE();
- hw->mac.ops.reset_hw(hw);
+ err = hw->mac.ops.reset_hw(hw);
+ if (err) {
+ PMD_INIT_LOG(ERR, "Unable to reset vf hardware (%d)", err);
+ return err;
+ }
hw->mac.get_link_status = true;
/* negotiate mailbox API version to use with the PF. */
}
/* Set vfta */
- ixgbevf_set_vfta_all(dev,1);
+ ixgbevf_set_vfta_all(dev, 1);
/* Set HW strip */
- mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \
+ mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK |
ETH_VLAN_EXTEND_MASK;
ixgbevf_vlan_offload_set(dev, mask);
ixgbevf_dev_rxtx_start(dev);
/* check and configure queue intr-vector mapping */
- if (dev->data->dev_conf.intr_conf.rxq != 0) {
+ if (rte_intr_cap_multiple(intr_handle) &&
+ dev->data->dev_conf.intr_conf.rxq != 0) {
intr_vector = dev->data->nb_rx_queues;
if (rte_intr_efd_enable(intr_handle, intr_vector))
return -1;
}
ixgbevf_configure_msix(dev);
+ /* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
+ * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
+ * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
+ * is not cleared, it will fail when following rte_intr_enable( ) tries
+ * to map Rx queue interrupt to other VFIO vectors.
+ * So clear uio/vfio intr/evevnfd first to avoid failure.
+ */
+ rte_intr_disable(intr_handle);
+
rte_intr_enable(intr_handle);
/* Re-enable interrupt for VF */
PMD_INIT_FUNC_TRACE();
+ ixgbevf_intr_disable(hw);
+
hw->adapter_stopped = 1;
ixgbe_stop_adapter(hw);
* Clear what we set, but we still keep shadow_vfta to
* restore after device starts
*/
- ixgbevf_set_vfta_all(dev,0);
+ ixgbevf_set_vfta_all(dev, 0);
/* Clear stored conf */
dev->data->scattered_rx = 0;
static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
int i = 0, j = 0, vfta = 0, mask = 1;
- for (i = 0; i < IXGBE_VFTA_SIZE; i++){
+ for (i = 0; i < IXGBE_VFTA_SIZE; i++) {
vfta = shadow_vfta->vfta[i];
if (vfta) {
mask = 1;
- for (j = 0; j < 32; j++){
+ for (j = 0; j < 32; j++) {
if (vfta & mask)
- ixgbe_set_vfta(hw, (i<<5)+j, 0, on);
- mask<<=1;
+ ixgbe_set_vfta(hw, (i<<5)+j, 0,
+ on, false);
+ mask <<= 1;
}
}
}
{
struct ixgbe_hw *hw =
IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- struct ixgbe_vfta * shadow_vfta =
+ struct ixgbe_vfta *shadow_vfta =
IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private);
uint32_t vid_idx = 0;
uint32_t vid_bit = 0;
PMD_INIT_FUNC_TRACE();
/* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */
- ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on);
+ ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false);
if (ret) {
PMD_INIT_LOG(ERR, "Unable to set VF vlan");
return ret;
ctrl &= ~IXGBE_RXDCTL_VME;
IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl);
- ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on);
+ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on);
}
static void
on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip);
for (i = 0; i < hw->mac.max_rx_queues; i++)
- ixgbevf_vlan_strip_queue_set(dev,i,on);
+ ixgbevf_vlan_strip_queue_set(dev, i, on);
}
}
}
static uint32_t
-ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr)
+ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr)
{
uint32_t vector = 0;
+
switch (hw->mac.mc_filter_type) {
case 0: /* use bits [47:36] of the address */
vector = ((uc_addr->addr_bytes[4] >> 4) |
}
static int
-ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr,
- uint8_t on)
+ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr,
+ uint8_t on)
{
uint32_t vector;
uint32_t uta_idx;
if (hw->mac.type < ixgbe_mac_82599EB)
return -ENOTSUP;
- vector = ixgbe_uta_vector(hw,mac_addr);
+ vector = ixgbe_uta_vector(hw, mac_addr);
uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask;
uta_shift = vector & ixgbe_uta_bit_mask;
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
else
- IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type);
+ IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type);
return 0;
}
static int
ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
{
- uint32_t reg,addr;
+ uint32_t reg, addr;
uint32_t val;
const uint8_t bit1 = 0x1;
if (ixgbe_vmdq_mode_check(hw) < 0)
return -ENOTSUP;
- addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2);
+ if (pool >= ETH_64_POOLS)
+ return -EINVAL;
+
+ /* for pool >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */
+ if (pool >= 32) {
+ addr = IXGBE_VFRE(1);
+ val = bit1 << (pool - 32);
+ } else {
+ addr = IXGBE_VFRE(0);
+ val = bit1 << pool;
+ }
+
reg = IXGBE_READ_REG(hw, addr);
- val = bit1 << pool;
if (on)
reg |= val;
else
reg &= ~val;
- IXGBE_WRITE_REG(hw, addr,reg);
+ IXGBE_WRITE_REG(hw, addr, reg);
return 0;
}
static int
ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on)
{
- uint32_t reg,addr;
+ uint32_t reg, addr;
uint32_t val;
const uint8_t bit1 = 0x1;
if (ixgbe_vmdq_mode_check(hw) < 0)
return -ENOTSUP;
- addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2);
+ if (pool >= ETH_64_POOLS)
+ return -EINVAL;
+
+ /* for pool >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */
+ if (pool >= 32) {
+ addr = IXGBE_VFTE(1);
+ val = bit1 << (pool - 32);
+ } else {
+ addr = IXGBE_VFTE(0);
+ val = bit1 << pool;
+ }
+
reg = IXGBE_READ_REG(hw, addr);
- val = bit1 << pool;
if (on)
reg |= val;
else
reg &= ~val;
- IXGBE_WRITE_REG(hw, addr,reg);
+ IXGBE_WRITE_REG(hw, addr, reg);
return 0;
}
return -ENOTSUP;
for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) {
if (pool_mask & ((uint64_t)(1ULL << pool_idx))) {
- ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on);
+ ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx,
+ vlan_on, false);
if (ret < 0)
return ret;
}
return ret;
}
+int
+rte_pmd_ixgbe_set_vf_vlan_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_mac_info *mac;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mac = &hw->mac;
+
+ mac->ops.set_vlan_anti_spoofing(hw, on, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_mac_anti_spoof(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ struct ixgbe_mac_info *mac;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ mac = &hw->mac;
+ mac->ops.set_mac_anti_spoofing(hw, on, vf);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_insert(uint8_t port, uint16_t vf, uint16_t vlan_id)
+{
+ struct ixgbe_hw *hw;
+ uint32_t ctrl;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (vlan_id > 4095)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_VMVIR(vf));
+ if (vlan_id) {
+ ctrl = vlan_id;
+ ctrl |= IXGBE_VMVIR_VLANA_DEFAULT;
+ } else {
+ ctrl = 0;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_tx_loopback(uint8_t port, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t ctrl;
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ ctrl = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC);
+ /* enable or disable VMDQ loopback */
+ if (on)
+ ctrl |= IXGBE_PFDTXGSWC_VT_LBEN;
+ else
+ ctrl &= ~IXGBE_PFDTXGSWC_VT_LBEN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, ctrl);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_all_queues_drop_en(uint8_t port, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reg_value;
+ int i;
+ int num_queues = (int)(IXGBE_QDE_IDX_MASK >> IXGBE_QDE_IDX_SHIFT);
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ for (i = 0; i <= num_queues; i++) {
+ reg_value = IXGBE_QDE_WRITE |
+ (i << IXGBE_QDE_IDX_SHIFT) |
+ (on & IXGBE_QDE_ENABLE);
+ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg_value);
+ }
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_split_drop_en(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct ixgbe_hw *hw;
+ uint32_t reg_value;
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ /* only support VF's 0 to 63 */
+ if ((vf >= dev_info.max_vfs) || (vf > 63))
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ reg_value = IXGBE_READ_REG(hw, IXGBE_SRRCTL(vf));
+ if (on)
+ reg_value |= IXGBE_SRRCTL_DROP_EN;
+ else
+ reg_value &= ~IXGBE_SRRCTL_DROP_EN;
+
+ IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(vf), reg_value);
+
+ return 0;
+}
+
+int
+rte_pmd_ixgbe_set_vf_vlan_stripq(uint8_t port, uint16_t vf, uint8_t on)
+{
+ struct rte_eth_dev *dev;
+ struct rte_eth_dev_info dev_info;
+ uint16_t queues_per_pool;
+ uint32_t q;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port, -ENODEV);
+
+ dev = &rte_eth_devices[port];
+ rte_eth_dev_info_get(port, &dev_info);
+
+ if (vf >= dev_info.max_vfs)
+ return -EINVAL;
+
+ if (on > 1)
+ return -EINVAL;
+
+ RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->vlan_strip_queue_set, -ENOTSUP);
+
+ /* The PF has 128 queue pairs and in SRIOV configuration
+ * those queues will be assigned to VF's, so RXDCTL
+ * registers will be dealing with queues which will be
+ * assigned to VF's.
+ * Let's say we have SRIOV configured with 31 VF's then the
+ * first 124 queues 0-123 will be allocated to VF's and only
+ * the last 4 queues 123-127 will be assigned to the PF.
+ */
+
+ queues_per_pool = dev_info.vmdq_queue_num / dev_info.max_vmdq_pools;
+
+ for (q = 0; q < queues_per_pool; q++)
+ (*dev->dev_ops->vlan_strip_queue_set)(dev,
+ q + vf * queues_per_pool, on);
+ return 0;
+}
+
#define IXGBE_MRCTL_VPME 0x01 /* Virtual Pool Mirroring. */
#define IXGBE_MRCTL_UPME 0x02 /* Uplink Port Mirroring. */
#define IXGBE_MRCTL_DPME 0x04 /* Downlink Port Mirroring. */
struct rte_eth_mirror_conf *mirror_conf,
uint8_t rule_id, uint8_t on)
{
- uint32_t mr_ctl,vlvf;
+ uint32_t mr_ctl, vlvf;
uint32_t mp_lsb = 0;
uint32_t mv_msb = 0;
uint32_t mv_lsb = 0;
const uint8_t vlan_mask_offset = 32;
const uint8_t dst_pool_offset = 8;
const uint8_t rule_mr_offset = 4;
- const uint8_t mirror_rule_mask= 0x0F;
+ const uint8_t mirror_rule_mask = 0x0F;
struct ixgbe_mirror_info *mr_info =
(IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private));
if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
mirror_type |= IXGBE_MRCTL_VLME;
/* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */
- for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) {
+ for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) {
if (mirror_conf->vlan.vlan_mask & (1ULL << i)) {
/* search vlan id related pool vlan filter index */
reg_index = ixgbe_find_vlvf_slot(hw,
- mirror_conf->vlan.vlan_id[i]);
+ mirror_conf->vlan.vlan_id[i],
+ false);
if (reg_index < 0)
return -EINVAL;
vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index));
IXGBE_WRITE_REG(hw, IXGBE_MRCTL(rule_id), mr_ctl);
/* write pool mirrror control register */
- if (mirror_conf->rule_type == ETH_MIRROR_VIRTUAL_POOL_UP) {
+ if (mirror_conf->rule_type & ETH_MIRROR_VIRTUAL_POOL_UP) {
IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id), mp_lsb);
IXGBE_WRITE_REG(hw, IXGBE_VMRVM(rule_id + rule_mr_offset),
mp_msb);
}
/* write VLAN mirrror control register */
- if (mirror_conf->rule_type == ETH_MIRROR_VLAN) {
+ if (mirror_conf->rule_type & ETH_MIRROR_VLAN) {
IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id), mv_lsb);
IXGBE_WRITE_REG(hw, IXGBE_VMRVLAN(rule_id + rule_mr_offset),
mv_msb);
if (ixgbe_vmdq_mode_check(hw) < 0)
return -ENOTSUP;
+ if (rule_id >= IXGBE_MAX_MIRROR_RULES)
+ return -EINVAL;
+
memset(&mr_info->mr_conf[rule_id], 0,
sizeof(struct rte_eth_mirror_conf));
tmp |= (msix_vector << (8 * (queue & 0x3)));
IXGBE_WRITE_REG(hw, IXGBE_IVAR(idx), tmp);
} else if ((hw->mac.type == ixgbe_mac_82599EB) ||
- (hw->mac.type == ixgbe_mac_X540)) {
+ (hw->mac.type == ixgbe_mac_X540) ||
+ (hw->mac.type == ixgbe_mac_X550)) {
if (direction == -1) {
/* other causes */
idx = ((queue & 1) * 8);
uint32_t q_idx;
uint32_t vector_idx = IXGBE_MISC_VEC_ID;
+ /* Configure VF other cause ivar */
+ ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
+
/* won't configure msix register if no mapping is done
* between intr vector and event fd.
*/
ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx);
intr_handle->intr_vec[q_idx] = vector_idx;
}
-
- /* Configure VF other cause ivar */
- ixgbevf_set_ivar_map(hw, -1, 1, vector_idx);
}
/**
break;
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
+ case ixgbe_mac_X550:
ixgbe_set_ivar_map(hw, -1, 1, IXGBE_MISC_VEC_ID);
break;
default:
{
struct ixgbe_hw *hw;
uint32_t max_frame = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN;
+ struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode;
hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
return -EINVAL;
/* refuse mtu that requires the support of scattered packets when this
- * feature has not been enabled before. */
- if (!dev->data->scattered_rx &&
+ * feature has not been enabled before.
+ */
+ if (!rx_conf->enable_scatter &&
(max_frame + 2 * IXGBE_VLAN_TAG_SIZE >
dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM))
return -EINVAL;
const struct reg_info **reg_set = (hw->mac.type == ixgbe_mac_82598EB) ?
ixgbe_regs_mac_82598EB : ixgbe_regs_others;
+ if (data == NULL) {
+ regs->length = ixgbe_get_reg_length(dev);
+ regs->width = sizeof(uint32_t);
+ return 0;
+ }
+
/* Support only full register dump */
if ((regs->length == 0) ||
(regs->length == (uint32_t)ixgbe_get_reg_length(dev))) {
int count = 0;
const struct reg_info *reg_group;
+ if (data == NULL) {
+ regs->length = ixgbevf_get_reg_length(dev);
+ regs->width = sizeof(uint32_t);
+ return 0;
+ }
+
/* Support only full register dump */
if ((regs->length == 0) ||
(regs->length == (uint32_t)ixgbevf_get_reg_length(dev))) {
struct ixgbe_dcb_config *dcb_config =
IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private);
struct ixgbe_dcb_tc_config *tc;
+ struct rte_eth_dcb_tc_queue_mapping *tc_queue;
+ uint8_t nb_tcs;
uint8_t i, j;
if (dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_DCB_FLAG)
else
dcb_info->nb_tcs = 1;
+ tc_queue = &dcb_info->tc_queue;
+ nb_tcs = dcb_info->nb_tcs;
+
if (dcb_config->vt_mode) { /* vt is enabled*/
struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
&dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf;
for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++)
dcb_info->prio_tc[i] = vmdq_rx_conf->dcb_tc[i];
- for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
- for (j = 0; j < dcb_info->nb_tcs; j++) {
- dcb_info->tc_queue.tc_rxq[i][j].base =
- i * dcb_info->nb_tcs + j;
- dcb_info->tc_queue.tc_rxq[i][j].nb_queue = 1;
- dcb_info->tc_queue.tc_txq[i][j].base =
- i * dcb_info->nb_tcs + j;
- dcb_info->tc_queue.tc_txq[i][j].nb_queue = 1;
+ if (RTE_ETH_DEV_SRIOV(dev).active > 0) {
+ for (j = 0; j < nb_tcs; j++) {
+ tc_queue->tc_rxq[0][j].base = j;
+ tc_queue->tc_rxq[0][j].nb_queue = 1;
+ tc_queue->tc_txq[0][j].base = j;
+ tc_queue->tc_txq[0][j].nb_queue = 1;
+ }
+ } else {
+ for (i = 0; i < vmdq_rx_conf->nb_queue_pools; i++) {
+ for (j = 0; j < nb_tcs; j++) {
+ tc_queue->tc_rxq[i][j].base =
+ i * nb_tcs + j;
+ tc_queue->tc_rxq[i][j].nb_queue = 1;
+ tc_queue->tc_txq[i][j].base =
+ i * nb_tcs + j;
+ tc_queue->tc_txq[i][j].nb_queue = 1;
+ }
}
}
} else { /* vt is disabled*/
return ret;
}
-/* ixgbevf_update_xcast_mode - Update Multicast mode
- * @hw: pointer to the HW structure
- * @netdev: pointer to net device structure
- * @xcast_mode: new multicast mode
- *
- * Updates the Multicast Mode of VF.
- */
-static int ixgbevf_update_xcast_mode(struct ixgbe_hw *hw,
- int xcast_mode)
+static void
+ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
{
- struct ixgbe_mbx_info *mbx = &hw->mbx;
- u32 msgbuf[2];
- s32 err;
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- switch (hw->api_version) {
- case ixgbe_mbox_api_12:
- break;
- default:
- return -EOPNOTSUPP;
- }
+ hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
+}
- msgbuf[0] = IXGBE_VF_UPDATE_XCAST_MODE;
- msgbuf[1] = xcast_mode;
+static void
+ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
- err = mbx->ops.write_posted(hw, msgbuf, 2, 0);
- if (err)
- return err;
+ hw->mac.ops.update_xcast_mode(hw, IXGBEVF_XCAST_MODE_MULTI);
+}
- err = mbx->ops.read_posted(hw, msgbuf, 2, 0);
- if (err)
- return err;
+static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
+{
+ struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ u32 in_msg = 0;
- msgbuf[0] &= ~IXGBE_VT_MSGTYPE_CTS;
- if (msgbuf[0] == (IXGBE_VF_UPDATE_XCAST_MODE | IXGBE_VT_MSGTYPE_NACK))
- return -EPERM;
+ /* peek the message first */
+ in_msg = IXGBE_READ_REG(hw, IXGBE_VFMBMEM);
- return 0;
+ /* PF reset VF event */
+ if (in_msg == IXGBE_PF_CONTROL_MSG) {
+ /* dummy mbx read to ack pf */
+ if (ixgbe_read_mbx(hw, &in_msg, 1, 0))
+ return;
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET,
+ NULL);
+ }
}
-static void
-ixgbevf_dev_allmulticast_enable(struct rte_eth_dev *dev)
+static int
+ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev)
{
+ uint32_t eicr;
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+ ixgbevf_intr_disable(hw);
+
+ /* read-on-clear nic registers here */
+ eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
+ intr->flags = 0;
- ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_ALLMULTI);
+ /* only one misc vector supported - mailbox */
+ eicr &= IXGBE_VTEICR_MASK;
+ if (eicr == IXGBE_MISC_VEC_ID)
+ intr->flags |= IXGBE_FLAG_MAILBOX;
+
+ return 0;
}
-static void
-ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev)
+static int
+ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
{
struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+ struct ixgbe_interrupt *intr =
+ IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+
+ if (intr->flags & IXGBE_FLAG_MAILBOX) {
+ ixgbevf_mbx_process(dev);
+ intr->flags &= ~IXGBE_FLAG_MAILBOX;
+ }
+
+ ixgbevf_intr_enable(hw);
- ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE);
+ return 0;
}
-static struct rte_driver rte_ixgbe_driver = {
- .type = PMD_PDEV,
- .init = rte_ixgbe_pmd_init,
-};
+static void
+ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle,
+ void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
-static struct rte_driver rte_ixgbevf_driver = {
- .type = PMD_PDEV,
- .init = rte_ixgbevf_pmd_init,
-};
+ ixgbevf_dev_interrupt_get_status(dev);
+ ixgbevf_dev_interrupt_action(dev);
+}
-PMD_REGISTER_DRIVER(rte_ixgbe_driver);
-PMD_REGISTER_DRIVER(rte_ixgbevf_driver);
+RTE_PMD_REGISTER_PCI(net_ixgbe, rte_ixgbe_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe, pci_id_ixgbe_map);
+RTE_PMD_REGISTER_PCI(net_ixgbe_vf, rte_ixgbevf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI_TABLE(net_ixgbe_vf, pci_id_ixgbevf_map);