New upstream version 17.11.4
[deb_dpdk.git] / drivers / net / bnx2x / bnx2x.c
index 6edb2f9..e58684d 100644 (file)
@@ -22,7 +22,6 @@
 #include "ecore_init_ops.h"
 
 #include "rte_version.h"
-#include "rte_pci_dev_ids.h"
 
 #include <sys/types.h>
 #include <sys/stat.h>
 #define BNX2X_PMD_VER_PREFIX "BNX2X PMD"
 #define BNX2X_PMD_VERSION_MAJOR 1
 #define BNX2X_PMD_VERSION_MINOR 0
-#define BNX2X_PMD_VERSION_PATCH 0
+#define BNX2X_PMD_VERSION_REVISION 5
+#define BNX2X_PMD_VERSION_PATCH 1
 
 static inline const char *
 bnx2x_pmd_version(void)
 {
        static char version[32];
 
-       snprintf(version, sizeof(version), "%s %s_%d.%d.%d",
+       snprintf(version, sizeof(version), "%s %s_%d.%d.%d.%d",
                        BNX2X_PMD_VER_PREFIX,
                        BNX2X_DRIVER_VERSION,
                        BNX2X_PMD_VERSION_MAJOR,
                        BNX2X_PMD_VERSION_MINOR,
+                       BNX2X_PMD_VERSION_REVISION,
                        BNX2X_PMD_VERSION_PATCH);
 
        return version;
@@ -119,12 +120,11 @@ static int bnx2x_alloc_mem(struct bnx2x_softc *sc);
 static void bnx2x_free_mem(struct bnx2x_softc *sc);
 static int bnx2x_alloc_fw_stats_mem(struct bnx2x_softc *sc);
 static void bnx2x_free_fw_stats_mem(struct bnx2x_softc *sc);
-static __attribute__ ((noinline))
+static __rte_noinline
 int bnx2x_nic_load(struct bnx2x_softc *sc);
 
 static int bnx2x_handle_sp_tq(struct bnx2x_softc *sc);
 static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp);
-static void bnx2x_periodic_stop(struct bnx2x_softc *sc);
 static void bnx2x_ack_sb(struct bnx2x_softc *sc, uint8_t igu_sb_id,
                         uint8_t storm, uint16_t index, uint8_t op,
                         uint8_t update);
@@ -169,21 +169,21 @@ bnx2x_dma_alloc(struct bnx2x_softc *sc, size_t size, struct bnx2x_dma *dma,
 
        dma->sc = sc;
        if (IS_PF(sc))
-               sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg,
+               snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, SC_ABS_FUNC(sc), msg,
                        rte_get_timer_cycles());
        else
-               sprintf(mz_name, "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg,
+               snprintf(mz_name, sizeof(mz_name), "bnx2x%d_%s_%" PRIx64, sc->pcie_device, msg,
                        rte_get_timer_cycles());
 
        /* Caller must take care that strlen(mz_name) < RTE_MEMZONE_NAMESIZE */
        z = rte_memzone_reserve_aligned(mz_name, (uint64_t) (size),
-                                       rte_lcore_to_socket_id(rte_lcore_id()),
+                                       SOCKET_ID_ANY,
                                        0, align);
        if (z == NULL) {
                PMD_DRV_LOG(ERR, "DMA alloc failed for %s", msg);
                return -ENOMEM;
        }
-       dma->paddr = (uint64_t) z->phys_addr;
+       dma->paddr = (uint64_t) z->iova;
        dma->vaddr = z->addr;
 
        PMD_DRV_LOG(DEBUG, "%s: virt=%p phys=%" PRIx64, msg, dma->vaddr, dma->paddr);
@@ -418,7 +418,7 @@ void bnx2x_read_dmae(struct bnx2x_softc *sc, uint32_t src_addr, uint32_t len32)
 }
 
 void
-bnx2x_write_dmae(struct bnx2x_softc *sc, phys_addr_t dma_addr, uint32_t dst_addr,
+bnx2x_write_dmae(struct bnx2x_softc *sc, rte_iova_t dma_addr, uint32_t dst_addr,
               uint32_t len32)
 {
        struct dmae_command dmae;
@@ -446,7 +446,7 @@ bnx2x_write_dmae(struct bnx2x_softc *sc, phys_addr_t dma_addr, uint32_t dst_addr
 }
 
 static void
-bnx2x_write_dmae_phys_len(struct bnx2x_softc *sc, phys_addr_t phys_addr,
+bnx2x_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr,
                        uint32_t addr, uint32_t len)
 {
        uint32_t dmae_wr_max = DMAE_LEN32_WR_MAX(sc);
@@ -822,14 +822,14 @@ bnx2x_fw_command(struct bnx2x_softc *sc, uint32_t command, uint32_t param)
 
 static void
 __storm_memset_dma_mapping(struct bnx2x_softc *sc, uint32_t addr,
-                          phys_addr_t mapping)
+                          rte_iova_t mapping)
 {
        REG_WR(sc, addr, U64_LO(mapping));
        REG_WR(sc, (addr + 4), U64_HI(mapping));
 }
 
 static void
-storm_memset_spq_addr(struct bnx2x_softc *sc, phys_addr_t mapping,
+storm_memset_spq_addr(struct bnx2x_softc *sc, rte_iova_t mapping,
                      uint16_t abs_fid)
 {
        uint32_t addr = (XSEM_REG_FAST_MEMORY +
@@ -886,7 +886,7 @@ storm_memset_eq_prod(struct bnx2x_softc *sc, uint16_t eq_prod, uint16_t pfid)
 /*
  * Post a slowpath command.
  *
- * A slowpath command is used to propogate a configuration change through
+ * A slowpath command is used to propagate a configuration change through
  * the controller in a controlled manner, allowing each STORM processor and
  * other H/W blocks to phase in the change.  The commands sent on the
  * slowpath are referred to as ramrods.  Depending on the ramrod used the
@@ -1293,7 +1293,7 @@ bnx2x_free_tx_pkt(__rte_unused struct bnx2x_fastpath *fp, struct bnx2x_tx_queue
        struct rte_mbuf *tx_mbuf = txq->sw_ring[TX_BD(pkt_idx, txq)];
 
        if (likely(tx_mbuf != NULL)) {
-               rte_pktmbuf_free(tx_mbuf);
+               rte_pktmbuf_free_seg(tx_mbuf);
        } else {
                PMD_RX_LOG(ERR, "fp[%02d] lost mbuf %lu",
                           fp->index, (unsigned long)TX_BD(pkt_idx, txq));
@@ -1396,10 +1396,10 @@ bnx2x_del_all_macs(struct bnx2x_softc *sc, struct ecore_vlan_mac_obj *mac_obj,
        return rc;
 }
 
-int
+static int
 bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode,
-                     unsigned long *rx_accept_flags,
-                     unsigned long *tx_accept_flags)
+                       unsigned long *rx_accept_flags,
+                       unsigned long *tx_accept_flags)
 {
        /* Clear the flags first */
        *rx_accept_flags = 0;
@@ -1437,6 +1437,7 @@ bnx2x_fill_accept_flags(struct bnx2x_softc *sc, uint32_t rx_mode,
 
                break;
 
+       case BNX2X_RX_MODE_ALLMULTI_PROMISC:
        case BNX2X_RX_MODE_PROMISC:
                /*
                 * According to deffinition of SI mode, iface in promisc mode
@@ -1496,7 +1497,7 @@ bnx2x_set_q_rx_mode(struct bnx2x_softc *sc, uint8_t cl_id,
 
        ramrod_param.rdata = BNX2X_SP(sc, rx_mode_rdata);
        ramrod_param.rdata_mapping =
-           (phys_addr_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata),
+           (rte_iova_t)BNX2X_SP_MAPPING(sc, rx_mode_rdata),
            bnx2x_set_bit(ECORE_FILTER_RX_MODE_PENDING, &sc->sp_state);
 
        ramrod_param.ramrod_flags = ramrod_flags;
@@ -1960,7 +1961,7 @@ static void bnx2x_squeeze_objects(struct bnx2x_softc *sc)
 }
 
 /* stop the controller */
-__attribute__ ((noinline))
+__rte_noinline
 int
 bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link)
 {
@@ -1969,9 +1970,6 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
 
        PMD_DRV_LOG(DEBUG, "Starting NIC unload...");
 
-       /* stop the periodic callout */
-       bnx2x_periodic_stop(sc);
-
        /* mark driver as unloaded in shmem2 */
        if (IS_PF(sc) && SHMEM2_HAS(sc, drv_capabilities_flag)) {
                val = SHMEM2_RD(sc, drv_capabilities_flag[SC_FW_MB_IDX(sc)]);
@@ -2000,7 +1998,7 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
 
        /*
         * Nothing to do during unload if previous bnx2x_nic_load()
-        * did not completed succesfully - all resourses are released.
+        * did not completed successfully - all resourses are released.
         */
        if ((sc->state == BNX2X_STATE_CLOSED) || (sc->state == BNX2X_STATE_ERROR)) {
                return 0;
@@ -2113,147 +2111,127 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link
  * the mbuf and return to the caller.
  *
  * Returns:
- *   0 = Success, !0 = Failure
+ *     int: Number of TX BDs used for the mbuf
+ *
  *   Note the side effect that an mbuf may be freed if it causes a problem.
  */
-int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head, int m_pkts)
+int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0)
 {
-       struct rte_mbuf *m0;
        struct eth_tx_start_bd *tx_start_bd;
        uint16_t bd_prod, pkt_prod;
-       int m_tx;
        struct bnx2x_softc *sc;
        uint32_t nbds = 0;
-       struct bnx2x_fastpath *fp;
 
        sc = txq->sc;
-       fp = &sc->fp[txq->queue_id];
-
        bd_prod = txq->tx_bd_tail;
        pkt_prod = txq->tx_pkt_tail;
 
-       for (m_tx = 0; m_tx < m_pkts; m_tx++) {
-
-               m0 = *m_head++;
-
-               if (unlikely(txq->nb_tx_avail < 3)) {
-                       PMD_TX_LOG(ERR, "no enough bds %d/%d",
-                                  bd_prod, txq->nb_tx_avail);
-                       return -ENOMEM;
-               }
+       txq->sw_ring[TX_BD(pkt_prod, txq)] = m0;
 
-               txq->sw_ring[TX_BD(pkt_prod, txq)] = m0;
+       tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
 
-               tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd;
+       tx_start_bd->addr =
+           rte_cpu_to_le_64(rte_mbuf_data_iova(m0));
+       tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
+       tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
+       tx_start_bd->general_data =
+           (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
 
-               tx_start_bd->addr =
-                   rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0));
-               tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len);
-               tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
-               tx_start_bd->general_data =
-                   (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
+       tx_start_bd->nbd = rte_cpu_to_le_16(2);
 
-               tx_start_bd->nbd = rte_cpu_to_le_16(2);
+       if (m0->ol_flags & PKT_TX_VLAN_PKT) {
+               tx_start_bd->vlan_or_ethertype =
+                   rte_cpu_to_le_16(m0->vlan_tci);
+               tx_start_bd->bd_flags.as_bitfield |=
+                   (X_ETH_OUTBAND_VLAN <<
+                    ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
+       } else {
+               if (IS_PF(sc))
+                       tx_start_bd->vlan_or_ethertype =
+                           rte_cpu_to_le_16(pkt_prod);
+               else {
+                       struct ether_hdr *eh =
+                           rte_pktmbuf_mtod(m0, struct ether_hdr *);
 
-               if (m0->ol_flags & PKT_TX_VLAN_PKT) {
                        tx_start_bd->vlan_or_ethertype =
-                           rte_cpu_to_le_16(m0->vlan_tci);
-                       tx_start_bd->bd_flags.as_bitfield |=
-                           (X_ETH_OUTBAND_VLAN <<
-                            ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
-               } else {
-                       if (IS_PF(sc))
-                               tx_start_bd->vlan_or_ethertype =
-                                   rte_cpu_to_le_16(pkt_prod);
-                       else {
-                               struct ether_hdr *eh
-                                   = rte_pktmbuf_mtod(m0, struct ether_hdr *);
-
-                               tx_start_bd->vlan_or_ethertype
-                                   = rte_cpu_to_le_16(rte_be_to_cpu_16(eh->ether_type));
-                       }
+                           rte_cpu_to_le_16(rte_be_to_cpu_16(eh->ether_type));
                }
+       }
 
-               bd_prod = NEXT_TX_BD(bd_prod);
-               if (IS_VF(sc)) {
-                       struct eth_tx_parse_bd_e2 *tx_parse_bd;
-                       const struct ether_hdr *eh = rte_pktmbuf_mtod(m0, struct ether_hdr *);
-                       uint8_t mac_type = UNICAST_ADDRESS;
-
-                       tx_parse_bd =
-                           &txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2;
-                       if (is_multicast_ether_addr(&eh->d_addr)) {
-                               if (is_broadcast_ether_addr(&eh->d_addr))
-                                       mac_type = BROADCAST_ADDRESS;
-                               else
-                                       mac_type = MULTICAST_ADDRESS;
-                       }
-                       tx_parse_bd->parsing_data =
-                           (mac_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
-
-                       rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi,
-                                  &eh->d_addr.addr_bytes[0], 2);
-                       rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid,
-                                  &eh->d_addr.addr_bytes[2], 2);
-                       rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo,
-                                  &eh->d_addr.addr_bytes[4], 2);
-                       rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi,
-                                  &eh->s_addr.addr_bytes[0], 2);
-                       rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid,
-                                  &eh->s_addr.addr_bytes[2], 2);
-                       rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo,
-                                  &eh->s_addr.addr_bytes[4], 2);
-
-                       tx_parse_bd->data.mac_addr.dst_hi =
-                           rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi);
-                       tx_parse_bd->data.mac_addr.dst_mid =
-                           rte_cpu_to_be_16(tx_parse_bd->data.
-                                            mac_addr.dst_mid);
-                       tx_parse_bd->data.mac_addr.dst_lo =
-                           rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_lo);
-                       tx_parse_bd->data.mac_addr.src_hi =
-                           rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_hi);
-                       tx_parse_bd->data.mac_addr.src_mid =
-                           rte_cpu_to_be_16(tx_parse_bd->data.
-                                            mac_addr.src_mid);
-                       tx_parse_bd->data.mac_addr.src_lo =
-                           rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_lo);
-
-                       PMD_TX_LOG(DEBUG,
-                                  "PBD dst %x %x %x src %x %x %x p_data %x",
-                                  tx_parse_bd->data.mac_addr.dst_hi,
-                                  tx_parse_bd->data.mac_addr.dst_mid,
-                                  tx_parse_bd->data.mac_addr.dst_lo,
-                                  tx_parse_bd->data.mac_addr.src_hi,
-                                  tx_parse_bd->data.mac_addr.src_mid,
-                                  tx_parse_bd->data.mac_addr.src_lo,
-                                  tx_parse_bd->parsing_data);
-               }
+       bd_prod = NEXT_TX_BD(bd_prod);
+       if (IS_VF(sc)) {
+               struct eth_tx_parse_bd_e2 *tx_parse_bd;
+               const struct ether_hdr *eh =
+                   rte_pktmbuf_mtod(m0, struct ether_hdr *);
+               uint8_t mac_type = UNICAST_ADDRESS;
+
+               tx_parse_bd =
+                   &txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2;
+               if (is_multicast_ether_addr(&eh->d_addr)) {
+                       if (is_broadcast_ether_addr(&eh->d_addr))
+                               mac_type = BROADCAST_ADDRESS;
+                       else
+                               mac_type = MULTICAST_ADDRESS;
+               }
+               tx_parse_bd->parsing_data =
+                   (mac_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT);
+
+               rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi,
+                          &eh->d_addr.addr_bytes[0], 2);
+               rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid,
+                          &eh->d_addr.addr_bytes[2], 2);
+               rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo,
+                          &eh->d_addr.addr_bytes[4], 2);
+               rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi,
+                          &eh->s_addr.addr_bytes[0], 2);
+               rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid,
+                          &eh->s_addr.addr_bytes[2], 2);
+               rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo,
+                          &eh->s_addr.addr_bytes[4], 2);
+
+               tx_parse_bd->data.mac_addr.dst_hi =
+                   rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi);
+               tx_parse_bd->data.mac_addr.dst_mid =
+                   rte_cpu_to_be_16(tx_parse_bd->data.
+                                    mac_addr.dst_mid);
+               tx_parse_bd->data.mac_addr.dst_lo =
+                   rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_lo);
+               tx_parse_bd->data.mac_addr.src_hi =
+                   rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_hi);
+               tx_parse_bd->data.mac_addr.src_mid =
+                   rte_cpu_to_be_16(tx_parse_bd->data.
+                                    mac_addr.src_mid);
+               tx_parse_bd->data.mac_addr.src_lo =
+                   rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_lo);
 
                PMD_TX_LOG(DEBUG,
-                          "start bd: nbytes %d flags %x vlan %x\n",
-                          tx_start_bd->nbytes,
-                          tx_start_bd->bd_flags.as_bitfield,
-                          tx_start_bd->vlan_or_ethertype);
+                          "PBD dst %x %x %x src %x %x %x p_data %x",
+                          tx_parse_bd->data.mac_addr.dst_hi,
+                          tx_parse_bd->data.mac_addr.dst_mid,
+                          tx_parse_bd->data.mac_addr.dst_lo,
+                          tx_parse_bd->data.mac_addr.src_hi,
+                          tx_parse_bd->data.mac_addr.src_mid,
+                          tx_parse_bd->data.mac_addr.src_lo,
+                          tx_parse_bd->parsing_data);
+       }
 
-               bd_prod = NEXT_TX_BD(bd_prod);
-               pkt_prod++;
+       PMD_TX_LOG(DEBUG,
+                  "start bd: nbytes %d flags %x vlan %x",
+                  tx_start_bd->nbytes,
+                  tx_start_bd->bd_flags.as_bitfield,
+                  tx_start_bd->vlan_or_ethertype);
 
-               if (TX_IDX(bd_prod) < 2) {
-                       nbds++;
-               }
-       }
+       bd_prod = NEXT_TX_BD(bd_prod);
+       pkt_prod++;
+
+       if (TX_IDX(bd_prod) < 2)
+               nbds++;
 
-       txq->nb_tx_avail -= m_pkts << 1;
+       txq->nb_tx_avail -= 2;
        txq->tx_bd_tail = bd_prod;
        txq->tx_pkt_tail = pkt_prod;
 
-       mb();
-       fp->tx_db.data.prod += (m_pkts << 1) + nbds;
-       DOORBELL(sc, txq->queue_id, fp->tx_db.raw);
-       mb();
-
-       return 0;
+       return nbds + 2;
 }
 
 static uint16_t bnx2x_cid_ilt_lines(struct bnx2x_softc *sc)
@@ -3949,7 +3927,7 @@ static void bnx2x_attn_int_deasserted2(struct bnx2x_softc *sc, uint32_t attn)
                        mask1 = REG_RD(sc, PXP2_REG_PXP2_INT_MASK_1);
                        val0 = REG_RD(sc, PXP2_REG_PXP2_INT_STS_0);
                        /*
-                        * If the olny PXP2_EOP_ERROR_BIT is set in
+                        * If the only PXP2_EOP_ERROR_BIT is set in
                         * STS0 and STS1 - clear it
                         *
                         * probably we lose additional attentions between
@@ -4510,6 +4488,8 @@ static void bnx2x_handle_fp_tq(struct bnx2x_fastpath *fp, int scan_fp)
        struct bnx2x_softc *sc = fp->sc;
        uint8_t more_rx = FALSE;
 
+       PMD_DRV_LOG(DEBUG, "---> FP TASK QUEUE (%d) <--", fp->index);
+
        /* update the fastpath index */
        bnx2x_update_fp_sb_idx(fp);
 
@@ -4617,9 +4597,9 @@ static void bnx2x_init_func_obj(struct bnx2x_softc *sc)
        ecore_init_func_obj(sc,
                            &sc->func_obj,
                            BNX2X_SP(sc, func_rdata),
-                           (phys_addr_t)BNX2X_SP_MAPPING(sc, func_rdata),
+                           (rte_iova_t)BNX2X_SP_MAPPING(sc, func_rdata),
                            BNX2X_SP(sc, func_afex_rdata),
-                           (phys_addr_t)BNX2X_SP_MAPPING(sc, func_afex_rdata),
+                           (rte_iova_t)BNX2X_SP_MAPPING(sc, func_afex_rdata),
                            &bnx2x_func_sp_drv);
 }
 
@@ -4790,7 +4770,7 @@ static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
 }
 
 static void
-bnx2x_init_sb(struct bnx2x_softc *sc, phys_addr_t busaddr, int vfid,
+bnx2x_init_sb(struct bnx2x_softc *sc, rte_iova_t busaddr, int vfid,
            uint8_t vf_valid, int fw_sb_id, int igu_sb_id)
 {
        struct hc_status_block_data_e2 sb_data_e2;
@@ -4936,7 +4916,7 @@ static void bnx2x_init_eth_fp(struct bnx2x_softc *sc, int idx)
                             sc->max_cos,
                             SC_FUNC(sc),
                             BNX2X_SP(sc, q_rdata),
-                            (phys_addr_t)BNX2X_SP_MAPPING(sc, q_rdata),
+                            (rte_iova_t)BNX2X_SP_MAPPING(sc, q_rdata),
                             q_type);
 
        /* configure classification DBs */
@@ -4946,7 +4926,7 @@ static void bnx2x_init_eth_fp(struct bnx2x_softc *sc, int idx)
                           idx,
                           SC_FUNC(sc),
                           BNX2X_SP(sc, mac_rdata),
-                          (phys_addr_t)BNX2X_SP_MAPPING(sc, mac_rdata),
+                          (rte_iova_t)BNX2X_SP_MAPPING(sc, mac_rdata),
                           ECORE_FILTER_MAC_PENDING, &sc->sp_state,
                           ECORE_OBJ_TYPE_RX_TX, &sc->macs_pool);
 }
@@ -5046,7 +5026,7 @@ static void bnx2x_init_tx_rings(struct bnx2x_softc *sc)
 static void bnx2x_init_def_sb(struct bnx2x_softc *sc)
 {
        struct host_sp_status_block *def_sb = sc->def_sb;
-       phys_addr_t mapping = sc->def_sb_dma.paddr;
+       rte_iova_t mapping = sc->def_sb_dma.paddr;
        int igu_sp_sb_index;
        int igu_seg_id;
        int port = SC_PORT(sc);
@@ -5718,7 +5698,7 @@ static void bnx2x_init_objs(struct bnx2x_softc *sc)
                             SC_FUNC(sc),
                             SC_FUNC(sc),
                             BNX2X_SP(sc, mcast_rdata),
-                            (phys_addr_t)BNX2X_SP_MAPPING(sc, mcast_rdata),
+                            (rte_iova_t)BNX2X_SP_MAPPING(sc, mcast_rdata),
                             ECORE_FILTER_MCAST_PENDING,
                             &sc->sp_state, o_type);
 
@@ -5742,7 +5722,7 @@ static void bnx2x_init_objs(struct bnx2x_softc *sc)
                                  SC_FUNC(sc),
                                  SC_FUNC(sc),
                                  BNX2X_SP(sc, rss_rdata),
-                                 (phys_addr_t)BNX2X_SP_MAPPING(sc, rss_rdata),
+                                 (rte_iova_t)BNX2X_SP_MAPPING(sc, rss_rdata),
                                  ECORE_FILTER_RSS_CONF_PENDING,
                                  &sc->sp_state, ECORE_OBJ_TYPE_RX);
 }
@@ -5928,7 +5908,7 @@ static void bnx2x_set_234_gates(struct bnx2x_softc *sc, uint8_t close)
                               (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0));
 
        } else {
-/* Prevent incomming interrupts in IGU */
+/* Prevent incoming interrupts in IGU */
                val = REG_RD(sc, IGU_REG_BLOCK_CONFIGURATION);
 
                if (close)
@@ -6463,9 +6443,9 @@ bnx2x_pf_rx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
        pause->pri_map = 1;
 
        /* rxq setup */
-       rxq_init->dscr_map = (phys_addr_t)rxq->rx_ring_phys_addr;
-       rxq_init->rcq_map = (phys_addr_t)rxq->cq_ring_phys_addr;
-       rxq_init->rcq_np_map = (phys_addr_t)(rxq->cq_ring_phys_addr +
+       rxq_init->dscr_map = (rte_iova_t)rxq->rx_ring_phys_addr;
+       rxq_init->rcq_map = (rte_iova_t)rxq->cq_ring_phys_addr;
+       rxq_init->rcq_np_map = (rte_iova_t)(rxq->cq_ring_phys_addr +
                                              BNX2X_PAGE_SIZE);
 
        /*
@@ -6504,7 +6484,7 @@ bnx2x_pf_tx_q_prep(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp,
                PMD_TX_LOG(ERR, "ERROR: TX queue is NULL");
                return;
        }
-       txq_init->dscr_map = (phys_addr_t)txq->tx_ring_phys_addr;
+       txq_init->dscr_map = (rte_iova_t)txq->tx_ring_phys_addr;
        txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
        txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
        txq_init->fw_sb_id = fp->fw_sb_id;
@@ -6622,7 +6602,7 @@ bnx2x_config_rss_pf(struct bnx2x_softc *sc, struct ecore_rss_config_obj *rss_obj
        /* Hash bits */
        params.rss_result_mask = MULTI_MASK;
 
-       (void)rte_memcpy(params.ind_table, rss_obj->ind_table,
+       rte_memcpy(params.ind_table, rss_obj->ind_table,
                         sizeof(params.ind_table));
 
        if (config_hash) {
@@ -6689,7 +6669,7 @@ bnx2x_set_mac_one(struct bnx2x_softc *sc, uint8_t * mac,
 
        /* fill a user request section if needed */
        if (!bnx2x_test_bit(RAMROD_CONT, ramrod_flags)) {
-               (void)rte_memcpy(ramrod_param.user_req.u.mac.mac, mac,
+               rte_memcpy(ramrod_param.user_req.u.mac.mac, mac,
                                 ETH_ALEN);
 
                bnx2x_set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
@@ -6897,7 +6877,7 @@ static void bnx2x_link_report(struct bnx2x_softc *sc)
        sc->link_cnt++;
 
        /* report new link params and remember the state for the next time */
-       (void)rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
+       rte_memcpy(&sc->last_reported_link, &cur_data, sizeof(cur_data));
 
        if (bnx2x_test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
                         &cur_data.link_report_flags)) {
@@ -7017,16 +6997,6 @@ void bnx2x_link_status_update(struct bnx2x_softc *sc)
        }
 }
 
-static void bnx2x_periodic_start(struct bnx2x_softc *sc)
-{
-       atomic_store_rel_long(&sc->periodic_flags, PERIODIC_GO);
-}
-
-static void bnx2x_periodic_stop(struct bnx2x_softc *sc)
-{
-       atomic_store_rel_long(&sc->periodic_flags, PERIODIC_STOP);
-}
-
 static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
 {
        int rc, cfg_idx = bnx2x_get_link_cfg_idx(sc);
@@ -7035,34 +7005,6 @@ static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
 
        bnx2x_set_requested_fc(sc);
 
-       if (CHIP_REV_IS_SLOW(sc)) {
-               uint32_t bond = CHIP_BOND_ID(sc);
-               uint32_t feat = 0;
-
-               if (CHIP_IS_E2(sc) && CHIP_IS_MODE_4_PORT(sc)) {
-                       feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
-               } else if (bond & 0x4) {
-                       if (CHIP_IS_E3(sc)) {
-                               feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_XMAC;
-                       } else {
-                               feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_BMAC;
-                       }
-               } else if (bond & 0x8) {
-                       if (CHIP_IS_E3(sc)) {
-                               feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_UMAC;
-                       } else {
-                               feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
-                       }
-               }
-
-/* disable EMAC for E3 and above */
-               if (bond & 0x2) {
-                       feat |= ELINK_FEATURE_CONFIG_EMUL_DISABLE_EMAC;
-               }
-
-               sc->link_params.feature_config_flags |= feat;
-       }
-
        if (load_mode == LOAD_DIAG) {
                lp->loopback_mode = ELINK_LOOPBACK_XGXS;
 /* Prefer doing PHY loopback at 10G speed, if possible */
@@ -7089,10 +7031,6 @@ static int bnx2x_initial_phy_init(struct bnx2x_softc *sc, int load_mode)
                bnx2x_link_report(sc);
        }
 
-       if (!CHIP_REV_IS_SLOW(sc)) {
-               bnx2x_periodic_start(sc);
-       }
-
        sc->link_params.req_line_speed[cfg_idx] = req_line_speed;
        return rc;
 }
@@ -7124,7 +7062,7 @@ void bnx2x_periodic_callout(struct bnx2x_softc *sc)
 {
        if ((sc->state != BNX2X_STATE_OPEN) ||
            (atomic_load_acq_long(&sc->periodic_flags) == PERIODIC_STOP)) {
-               PMD_DRV_LOG(WARNING, "periodic callout exit (state=0x%x)",
+               PMD_DRV_LOG(INFO, "periodic callout exit (state=0x%x)",
                            sc->state);
                return;
        }
@@ -7170,7 +7108,7 @@ void bnx2x_periodic_callout(struct bnx2x_softc *sc)
 }
 
 /* start the controller */
-static __attribute__ ((noinline))
+static __rte_noinline
 int bnx2x_nic_load(struct bnx2x_softc *sc)
 {
        uint32_t val;
@@ -8335,16 +8273,6 @@ static int bnx2x_get_device_info(struct bnx2x_softc *sc)
                        REG_WR(sc, PXP2_REG_PGL_ADDR_90_F1, 0);
                        REG_WR(sc, PXP2_REG_PGL_ADDR_94_F1, 0);
                }
-
-/*
- * Enable internal target-read (in case we are probed after PF
- * FLR). Must be done prior to any BAR read access. Only for
- * 57712 and up
- */
-               if (!CHIP_IS_E1x(sc)) {
-                       REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ,
-                              1);
-               }
        }
 
        /* get the nvram size */
@@ -8905,7 +8833,7 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc)
 /***************************/
 
                if (bnx2x_dma_alloc(sc, FW_BUF_SIZE, &sc->gz_buf_dma,
-                                 "fw_dec_buf", RTE_CACHE_LINE_SIZE) != 0) {
+                                 "fw_buf", RTE_CACHE_LINE_SIZE) != 0) {
                        sc->spq = NULL;
                        sc->sp = NULL;
                        sc->eq = NULL;
@@ -9570,11 +9498,13 @@ static int bnx2x_pci_get_caps(struct bnx2x_softc *sc)
 static void bnx2x_init_rte(struct bnx2x_softc *sc)
 {
        if (IS_VF(sc)) {
-               sc->max_tx_queues = BNX2X_VF_MAX_QUEUES_PER_VF;
-               sc->max_rx_queues = BNX2X_VF_MAX_QUEUES_PER_VF;
+               sc->max_tx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF,
+                                       sc->igu_sb_cnt);
+               sc->max_rx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF,
+                                       sc->igu_sb_cnt);
        } else {
-               sc->max_tx_queues = 128;
-               sc->max_rx_queues = 128;
+               sc->max_rx_queues = BNX2X_MAX_RSS_COUNT(sc);
+               sc->max_tx_queues = sc->max_rx_queues;
        }
 }
 
@@ -9588,7 +9518,7 @@ void bnx2x_load_firmware(struct bnx2x_softc *sc)
        int f;
        struct stat st;
 
-       fwname = sc->devinfo.device_id == BNX2X_DEV_ID_57711
+       fwname = sc->devinfo.device_id == CHIP_NUM_57711
                ? FW_NAME_57711 : FW_NAME_57810;
        f = open(fwname, O_RDONLY);
        if (f < 0) {
@@ -9698,9 +9628,6 @@ int bnx2x_attach(struct bnx2x_softc *sc)
 
        sc->state = BNX2X_STATE_CLOSED;
 
-       /* Init RTE stuff */
-       bnx2x_init_rte(sc);
-
        pci_write_long(sc, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET);
 
        sc->igu_base_addr = IS_VF(sc) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
@@ -9713,13 +9640,26 @@ int bnx2x_attach(struct bnx2x_softc *sc)
                pci_read(sc,
                         (sc->devinfo.pcie_msix_cap_reg + PCIR_MSIX_CTRL), &val,
                         2);
-               sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE);
+               sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE) + 1;
        } else {
                sc->igu_sb_cnt = 1;
        }
 
+       /* Init RTE stuff */
+       bnx2x_init_rte(sc);
+
        if (IS_PF(sc)) {
-/* get device info and set params */
+               /* Enable internal target-read (in case we are probed after PF
+                * FLR). Must be done prior to any BAR read access. Only for
+                * 57712 and up
+                */
+               if (!CHIP_IS_E1x(sc)) {
+                       REG_WR(sc, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ,
+                              1);
+                       DELAY(200000);
+               }
+
+               /* get device info and set params */
                if (bnx2x_get_device_info(sc) != 0) {
                        PMD_DRV_LOG(NOTICE, "getting device info");
                        return -ENXIO;
@@ -9728,7 +9668,7 @@ int bnx2x_attach(struct bnx2x_softc *sc)
 /* get phy settings from shmem and 'and' against admin settings */
                bnx2x_get_phy_info(sc);
        } else {
-/* Left mac of VF unfilled, PF should set it for VF */
+               /* Left mac of VF unfilled, PF should set it for VF */
                memset(sc->link_params.mac_addr, 0, ETHER_ADDR_LEN);
        }
 
@@ -11103,7 +11043,7 @@ static int bnx2x_init_hw_func(struct bnx2x_softc *sc)
        for (i = 0; i < L2_ILT_LINES(sc); i++) {
                ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt;
                ilt->lines[cdu_ilt_start + i].page_mapping =
-                   (phys_addr_t)sc->context[i].vcxt_dma.paddr;
+                   (rte_iova_t)sc->context[i].vcxt_dma.paddr;
                ilt->lines[cdu_ilt_start + i].size = sc->context[i].size;
        }
        ecore_ilt_init_op(sc, INITOP_SET);
@@ -11401,7 +11341,7 @@ static void bnx2x_reset_port(struct bnx2x_softc *sc)
        }
 }
 
-static void bnx2x_ilt_wr(struct bnx2x_softc *sc, uint32_t index, phys_addr_t addr)
+static void bnx2x_ilt_wr(struct bnx2x_softc *sc, uint32_t index, rte_iova_t addr)
 {
        int reg;
        uint32_t wb_write[2];
@@ -11631,7 +11571,7 @@ static int ecore_gunzip(struct bnx2x_softc *sc, const uint8_t * zbuf, int len)
 }
 
 static void
-ecore_write_dmae_phys_len(struct bnx2x_softc *sc, phys_addr_t phys_addr,
+ecore_write_dmae_phys_len(struct bnx2x_softc *sc, rte_iova_t phys_addr,
                          uint32_t addr, uint32_t len)
 {
        bnx2x_write_dmae_phys_len(sc, phys_addr, addr, len);