New upstream version 17.11.3
[deb_dpdk.git] / drivers / net / qede / base / ecore_sriov.c
index 1b3119d..68f40f8 100644 (file)
 #include "ecore_cxt.h"
 #include "ecore_vf.h"
 #include "ecore_init_fw_funcs.h"
+#include "ecore_sp_commands.h"
+
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+                                                 u8 opcode,
+                                                 __le16 echo,
+                                                 union event_ring_data *data,
+                                                 u8 fw_return_code);
 
-/* TEMPORARY until we implement print_enums... */
 const char *ecore_channel_tlvs_string[] = {
        "CHANNEL_TLV_NONE",     /* ends tlv sequence */
        "CHANNEL_TLV_ACQUIRE",
@@ -51,20 +57,288 @@ const char *ecore_channel_tlvs_string[] = {
        "CHANNEL_TLV_VPORT_UPDATE_RSS",
        "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
        "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
+       "CHANNEL_TLV_UPDATE_TUNN_PARAM",
+       "CHANNEL_TLV_COALESCE_UPDATE",
+       "CHANNEL_TLV_QID",
+       "CHANNEL_TLV_COALESCE_READ",
        "CHANNEL_TLV_MAX"
 };
 
-/* TODO - this is linux crc32; Need a way to ifdef it out for linux */
-u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
+static u8 ecore_vf_calculate_legacy(struct ecore_vf_info *p_vf)
+{
+       u8 legacy = 0;
+
+       if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+           ETH_HSI_VER_NO_PKT_LEN_TUNN)
+               legacy |= ECORE_QCID_LEGACY_VF_RX_PROD;
+
+       if (!(p_vf->acquire.vfdev_info.capabilities &
+            VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+               legacy |= ECORE_QCID_LEGACY_VF_CID;
+
+       return legacy;
+}
+
+/* IOV ramrods */
+static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
+                                             struct ecore_vf_info *p_vf)
+{
+       struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       struct ecore_sp_init_data init_data;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       u8 fp_minor;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_vf->opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_VF_START,
+                                  PROTOCOLID_COMMON, &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.vf_start;
+
+       p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
+       p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
+
+       switch (p_hwfn->hw_info.personality) {
+       case ECORE_PCI_ETH:
+               p_ramrod->personality = PERSONALITY_ETH;
+               break;
+       case ECORE_PCI_ETH_ROCE:
+       case ECORE_PCI_ETH_IWARP:
+               p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
+               break;
+       default:
+               DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
+                         p_hwfn->hw_info.personality);
+               return ECORE_INVAL;
+       }
+
+       fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
+       if (fp_minor > ETH_HSI_VER_MINOR &&
+           fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF [%d] - Requested fp hsi %02x.%02x which is"
+                          " slightly newer than PF's %02x.%02x; Configuring"
+                          " PFs version\n",
+                          p_vf->abs_vf_id,
+                          ETH_HSI_VER_MAJOR, fp_minor,
+                          ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+               fp_minor = ETH_HSI_VER_MINOR;
+       }
+
+       p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
+       p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "VF[%d] - Starting using HSI %02x.%02x\n",
+                  p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
+                                            u32 concrete_vfid,
+                                            u16 opaque_vfid)
+{
+       struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       struct ecore_sp_init_data init_data;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = opaque_vfid;
+       init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_VF_STOP,
+                                  PROTOCOLID_COMMON, &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_ramrod = &p_ent->ramrod.vf_stop;
+
+       p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
+                            bool b_enabled_only, bool b_non_malicious)
+{
+       if (!p_hwfn->pf_iov_info) {
+               DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
+               return false;
+       }
+
+       if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
+           (rel_vf_id < 0))
+               return false;
+
+       if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
+           b_enabled_only)
+               return false;
+
+       if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
+           b_non_malicious)
+               return false;
+
+       return true;
+}
+
+struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
+                                           u16 relative_vf_id,
+                                           bool b_enabled_only)
+{
+       struct ecore_vf_info *vf = OSAL_NULL;
+
+       if (!p_hwfn->pf_iov_info) {
+               DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
+               return OSAL_NULL;
+       }
+
+       if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
+                                   b_enabled_only, false))
+               vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
+       else
+               DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
+                      relative_vf_id);
+
+       return vf;
+}
+
+static struct ecore_queue_cid *
+ecore_iov_get_vf_rx_queue_cid(struct ecore_vf_queue *p_queue)
+{
+       int i;
+
+       for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+               if (p_queue->cids[i].p_cid &&
+                   !p_queue->cids[i].b_is_tx)
+                       return p_queue->cids[i].p_cid;
+       }
+
+       return OSAL_NULL;
+}
+
+enum ecore_iov_validate_q_mode {
+       ECORE_IOV_VALIDATE_Q_NA,
+       ECORE_IOV_VALIDATE_Q_ENABLE,
+       ECORE_IOV_VALIDATE_Q_DISABLE,
+};
+
+static bool ecore_iov_validate_queue_mode(struct ecore_vf_info *p_vf,
+                                         u16 qid,
+                                         enum ecore_iov_validate_q_mode mode,
+                                         bool b_is_tx)
 {
        int i;
 
-       while (length--) {
-               crc ^= *ptr++;
-               for (i = 0; i < 8; i++)
-                       crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
+       if (mode == ECORE_IOV_VALIDATE_Q_NA)
+               return true;
+
+       for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+               struct ecore_vf_queue_cid *p_qcid;
+
+               p_qcid = &p_vf->vf_queues[qid].cids[i];
+
+               if (p_qcid->p_cid == OSAL_NULL)
+                       continue;
+
+               if (p_qcid->b_is_tx != b_is_tx)
+                       continue;
+
+               /* Found. It's enabled. */
+               return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
+       }
+
+       /* In case we haven't found any valid cid, then its disabled */
+       return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
+}
+
+static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_vf_info *p_vf,
+                                  u16 rx_qid,
+                                  enum ecore_iov_validate_q_mode mode)
+{
+       if (rx_qid >= p_vf->num_rxqs) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[0x%02x] - can't touch Rx queue[%04x];"
+                          " Only 0x%04x are allocated\n",
+                          p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
+               return false;
+       }
+
+       return ecore_iov_validate_queue_mode(p_vf, rx_qid, mode, false);
+}
+
+static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_vf_info *p_vf,
+                                  u16 tx_qid,
+                                  enum ecore_iov_validate_q_mode mode)
+{
+       if (tx_qid >= p_vf->num_txqs) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[0x%02x] - can't touch Tx queue[%04x];"
+                          " Only 0x%04x are allocated\n",
+                          p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
+               return false;
        }
-       return crc;
+
+       return ecore_iov_validate_queue_mode(p_vf, tx_qid, mode, true);
+}
+
+static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
+                                 struct ecore_vf_info *p_vf,
+                                 u16 sb_idx)
+{
+       int i;
+
+       for (i = 0; i < p_vf->num_sbs; i++)
+               if (p_vf->igu_sbs[i] == sb_idx)
+                       return true;
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
+                  " one of its 0x%02x SBs\n",
+                  p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
+
+       return false;
+}
+
+/* Is there at least 1 queue open? */
+static bool ecore_iov_validate_active_rxq(struct ecore_vf_info *p_vf)
+{
+       u8 i;
+
+       for (i = 0; i < p_vf->num_rxqs; i++)
+               if (ecore_iov_validate_queue_mode(p_vf, i,
+                                                 ECORE_IOV_VALIDATE_Q_ENABLE,
+                                                 false))
+                       return true;
+
+       return false;
+}
+
+static bool ecore_iov_validate_active_txq(struct ecore_vf_info *p_vf)
+{
+       u8 i;
+
+       for (i = 0; i < p_vf->num_txqs; i++)
+               if (ecore_iov_validate_queue_mode(p_vf, i,
+                                                 ECORE_IOV_VALIDATE_Q_ENABLE,
+                                                 true))
+                       return true;
+
+       return false;
 }
 
 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
@@ -72,9 +346,9 @@ enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
                                                struct ecore_ptt *p_ptt)
 {
        struct ecore_bulletin_content *p_bulletin;
+       int crc_size = sizeof(p_bulletin->crc);
        struct ecore_dmae_params params;
        struct ecore_vf_info *p_vf;
-       int crc_size = sizeof(p_bulletin->crc);
 
        p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
        if (!p_vf)
@@ -88,8 +362,8 @@ enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
 
        /* Increment bulletin board version and compute crc */
        p_bulletin->version++;
-       p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
-                                     p_vf->bulletin.size - crc_size);
+       p_bulletin->crc = OSAL_CRC32(0, (u8 *)p_bulletin + crc_size,
+                                    p_vf->bulletin.size - crc_size);
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                   "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
@@ -106,7 +380,7 @@ enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
 
 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
 {
-       struct ecore_hw_sriov_info *iov = &p_dev->sriov_info;
+       struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
        int pos = iov->pos;
 
        DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
@@ -145,9 +419,9 @@ static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
 
        OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
 
-       DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info[%d]: nres %d, cap 0x%x,"
+       DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
                   "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
-                  " stride %d, page size 0x%x\n", 0,
+                  " stride %d, page size 0x%x\n",
                   iov->nres, iov->cap, iov->ctrl,
                   iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
                   iov->offset, iov->stride, iov->pgsz);
@@ -171,45 +445,16 @@ static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
        return ECORE_SUCCESS;
 }
 
-static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
-                                         struct ecore_ptt *p_ptt)
-{
-       struct ecore_igu_block *p_sb;
-       u16 sb_id;
-       u32 val;
-
-       if (!p_hwfn->hw_info.p_igu_info) {
-               DP_ERR(p_hwfn,
-                      "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
-               return;
-       }
-
-       for (sb_id = 0;
-            sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
-               p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
-               if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
-                   !(p_sb->status & ECORE_IGU_STATUS_PF)) {
-                       val = ecore_rd(p_hwfn, p_ptt,
-                                      IGU_REG_MAPPING_MEMORY + sb_id * 4);
-                       SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
-                       ecore_wr(p_hwfn, p_ptt,
-                                IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
-               }
-       }
-}
-
 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
 {
-       u16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
-       union pfvf_tlvs *p_reply_virt_addr;
-       union vfpf_tlvs *p_req_virt_addr;
+       struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+       struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
        struct ecore_bulletin_content *p_bulletin_virt;
-       struct ecore_pf_iov *p_iov_info;
        dma_addr_t req_p, rply_p, bulletin_p;
+       union pfvf_tlvs *p_reply_virt_addr;
+       union vfpf_tlvs *p_req_virt_addr;
        u8 idx = 0;
 
-       p_iov_info = p_hwfn->pf_iov_info;
-
        OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
 
        p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
@@ -224,9 +469,7 @@ static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
                return;
        }
 
-       p_iov_info->base_vport_id = 1;  /* @@@TBD resource allocation */
-
-       for (idx = 0; idx < num_vfs; idx++) {
+       for (idx = 0; idx < p_iov->total_vfs; idx++) {
                struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
                u32 concrete;
 
@@ -240,6 +483,7 @@ static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
                vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
 #endif
                vf->state = VF_STOPPED;
+               vf->b_init = false;
 
                vf->bulletin.phys = idx *
                    sizeof(struct ecore_bulletin_content) + bulletin_p;
@@ -247,14 +491,15 @@ static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
                vf->bulletin.size = sizeof(struct ecore_bulletin_content);
 
                vf->relative_vf_id = idx;
-               vf->abs_vf_id = idx + p_hwfn->hw_info.first_vf_in_pf;
+               vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
                concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
                vf->concrete_fid = concrete;
                /* TODO - need to devise a better way of getting opaque */
                vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
                    (vf->abs_vf_id << 8);
-               /* @@TBD MichalK - add base vport_id of VFs to equation */
-               vf->vport_id = p_iov_info->base_vport_id + idx;
+
+               vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
+               vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
        }
 }
 
@@ -264,7 +509,7 @@ static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
        void **p_v_addr;
        u16 num_vfs = 0;
 
-       num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
+       num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                   "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
@@ -297,16 +542,15 @@ static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
                return ECORE_NOMEM;
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                  "PF's Requests mailbox [%p virt 0x%" PRIx64 " phys], "
-                  "Response mailbox [%p virt 0x%" PRIx64 " phys] Bulletins"
-                  " [%p virt 0x%" PRIx64 " phys]\n",
+                  "PF's Requests mailbox [%p virt 0x%lx phys],  "
+                  "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
+                  " [%p virt 0x%lx phys]\n",
                   p_iov_info->mbx_msg_virt_addr,
-                  (u64)p_iov_info->mbx_msg_phys_addr,
+                  (unsigned long)p_iov_info->mbx_msg_phys_addr,
                   p_iov_info->mbx_reply_virt_addr,
-                  (u64)p_iov_info->mbx_reply_phys_addr,
-                  p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys);
-
-       /* @@@TBD MichalK - statistics / RSS */
+                  (unsigned long)p_iov_info->mbx_reply_phys_addr,
+                  p_iov_info->p_bulletins,
+                  (unsigned long)p_iov_info->bulletins_phys);
 
        return ECORE_SUCCESS;
 }
@@ -332,141 +576,185 @@ static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
                                       p_iov_info->p_bulletins,
                                       p_iov_info->bulletins_phys,
                                       p_iov_info->bulletins_size);
-
-       /* @@@TBD MichalK - statistics / RSS */
 }
 
 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
 {
-       enum _ecore_status_t rc = ECORE_SUCCESS;
        struct ecore_pf_iov *p_sriov;
 
        if (!IS_PF_SRIOV(p_hwfn)) {
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                           "No SR-IOV - no need for IOV db\n");
-               return rc;
+               return ECORE_SUCCESS;
        }
 
        p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
        if (!p_sriov) {
                DP_NOTICE(p_hwfn, true,
-                         "Failed to allocate `struct ecore_sriov'");
+                         "Failed to allocate `struct ecore_sriov'\n");
                return ECORE_NOMEM;
        }
 
        p_hwfn->pf_iov_info = p_sriov;
 
-       rc = ecore_iov_allocate_vfdb(p_hwfn);
+       ecore_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
+                                   ecore_sriov_eqe_event);
 
-       return rc;
+       return ecore_iov_allocate_vfdb(p_hwfn);
 }
 
-void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
+void ecore_iov_setup(struct ecore_hwfn *p_hwfn)
 {
-       if (!IS_PF_SRIOV(p_hwfn) || !p_hwfn->pf_iov_info)
+       if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
                return;
 
        ecore_iov_setup_vfdb(p_hwfn);
-       ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
 }
 
 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
 {
-       if (p_hwfn->pf_iov_info) {
+       ecore_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
+
+       if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
                ecore_iov_free_vfdb(p_hwfn);
                OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
        }
 }
 
-enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn,
-                                      struct ecore_ptt *p_ptt)
+void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
+{
+       OSAL_FREE(p_dev, p_dev->p_iov_info);
+}
+
+enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
 {
+       struct ecore_dev *p_dev = p_hwfn->p_dev;
+       int pos;
        enum _ecore_status_t rc;
 
-       /* @@@ TBD get this information from shmem / pci cfg */
        if (IS_VF(p_hwfn->p_dev))
                return ECORE_SUCCESS;
 
-       /* First hwfn should learn the PCI configuration */
-       if (IS_LEAD_HWFN(p_hwfn)) {
-               struct ecore_dev *p_dev = p_hwfn->p_dev;
-               int *pos = &p_hwfn->p_dev->sriov_info.pos;
+       /* Learn the PCI configuration */
+       pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
+                                          PCI_EXT_CAP_ID_SRIOV);
+       if (!pos) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
+               return ECORE_SUCCESS;
+       }
 
-               *pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
-                                                   PCI_EXT_CAP_ID_SRIOV);
-               if (!*pos) {
-                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                                  "No PCIe IOV support\n");
-                       return ECORE_SUCCESS;
-               }
+       /* Allocate a new struct for IOV information */
+       /* TODO - can change to VALLOC when its available */
+       p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
+                                       sizeof(*p_dev->p_iov_info));
+       if (!p_dev->p_iov_info) {
+               DP_NOTICE(p_hwfn, true,
+                         "Can't support IOV due to lack of memory\n");
+               return ECORE_NOMEM;
+       }
+       p_dev->p_iov_info->pos = pos;
 
-               rc = ecore_iov_pci_cfg_info(p_dev);
-               if (rc)
-                       return rc;
-       } else if (!p_hwfn->p_dev->sriov_info.pos) {
-               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
+       rc = ecore_iov_pci_cfg_info(p_dev);
+       if (rc)
+               return rc;
+
+       /* We want PF IOV to be synonemous with the existence of p_iov_info;
+        * In case the capability is published but there are no VFs, simply
+        * de-allocate the struct.
+        */
+       if (!p_dev->p_iov_info->total_vfs) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "IOV capabilities, but no VFs are published\n");
+               OSAL_FREE(p_dev, p_dev->p_iov_info);
                return ECORE_SUCCESS;
        }
 
-       /* Calculate the first VF index - this is a bit tricky; Basically,
-        * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
-        * after the first engine's VFs.
+       /* First VF index based on offset is tricky:
+        *  - If ARI is supported [likely], offset - (16 - pf_id) would
+        *    provide the number for eng0. 2nd engine Vfs would begin
+        *    after the first engine's VFs.
+        *  - If !ARI, VFs would start on next device.
+        *    so offset - (256 - pf_id) would provide the number.
+        * Utilize the fact that (256 - pf_id) is achieved only be later
+        * to diffrentiate between the two.
         */
-       p_hwfn->hw_info.first_vf_in_pf = p_hwfn->p_dev->sriov_info.offset +
-           p_hwfn->abs_pf_id - 16;
-       if (ECORE_PATH_ID(p_hwfn))
-               p_hwfn->hw_info.first_vf_in_pf -= MAX_NUM_VFS_BB;
+
+       if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
+               u32 first = p_hwfn->p_dev->p_iov_info->offset +
+                           p_hwfn->abs_pf_id - 16;
+
+               p_dev->p_iov_info->first_vf_in_pf = first;
+
+               if (ECORE_PATH_ID(p_hwfn))
+                       p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
+       } else {
+               u32 first = p_hwfn->p_dev->p_iov_info->offset +
+                           p_hwfn->abs_pf_id - 256;
+
+               p_dev->p_iov_info->first_vf_in_pf = first;
+       }
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                  "First VF in hwfn 0x%08x\n", p_hwfn->hw_info.first_vf_in_pf);
+                  "First VF in hwfn 0x%08x\n",
+                  p_dev->p_iov_info->first_vf_in_pf);
 
        return ECORE_SUCCESS;
 }
 
-struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
-                                           u16 relative_vf_id,
-                                           bool b_enabled_only)
+static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
+                                      bool b_fail_malicious)
 {
-       struct ecore_vf_info *vf = OSAL_NULL;
+       /* Check PF supports sriov */
+       if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
+           !IS_PF_SRIOV_ALLOC(p_hwfn))
+               return false;
 
-       if (!p_hwfn->pf_iov_info) {
-               DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
-               return OSAL_NULL;
-       }
+       /* Check VF validity */
+       if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
+               return false;
 
-       if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
-               vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
-       else
-               DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
-                      relative_vf_id);
+       return true;
+}
 
-       return vf;
+bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
+{
+       return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
 }
 
-void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn,
+void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
                                 u16 rel_vf_id, u8 to_disable)
 {
        struct ecore_vf_info *vf;
+       int i;
 
-       vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
-       if (!vf)
-               return;
+       for_each_hwfn(p_dev, i) {
+               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
+
+               vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
+               if (!vf)
+                       continue;
 
-       vf->to_disable = to_disable;
+               vf->to_disable = to_disable;
+       }
 }
 
-void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, u8 to_disable)
+void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
+                                 u8 to_disable)
 {
        u16 i;
 
-       for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
-               ecore_iov_set_vf_to_disable(p_hwfn, i, to_disable);
+       if (!IS_ECORE_SRIOV(p_dev))
+               return;
+
+       for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
+               ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
 }
 
 #ifndef LINUX_REMOVE
 /* @@@TBD Consider taking outside of ecore... */
 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
-                                         u16 vf_id, void *ctx)
+                                         u16               vf_id,
+                                         void              *ctx)
 {
        enum _ecore_status_t rc = ECORE_SUCCESS;
        struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
@@ -483,29 +771,9 @@ enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
 }
 #endif
 
-/**
- * VF enable primitives
- *
- * when pretend is required the caller is reponsible
- * for calling pretend prioir to calling these routines
- */
-
-/* clears vf error in all semi blocks
- * Assumption: called under VF pretend...
- */
-static OSAL_INLINE void ecore_iov_vf_semi_clear_err(struct ecore_hwfn *p_hwfn,
-                                                   struct ecore_ptt *p_ptt)
-{
-       ecore_wr(p_hwfn, p_ptt, TSEM_REG_VF_ERROR, 1);
-       ecore_wr(p_hwfn, p_ptt, USEM_REG_VF_ERROR, 1);
-       ecore_wr(p_hwfn, p_ptt, MSEM_REG_VF_ERROR, 1);
-       ecore_wr(p_hwfn, p_ptt, XSEM_REG_VF_ERROR, 1);
-       ecore_wr(p_hwfn, p_ptt, YSEM_REG_VF_ERROR, 1);
-       ecore_wr(p_hwfn, p_ptt, PSEM_REG_VF_ERROR, 1);
-}
-
-static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
-                                        struct ecore_ptt *p_ptt, u8 abs_vfid)
+static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn      *p_hwfn,
+                                        struct ecore_ptt       *p_ptt,
+                                        u8                     abs_vfid)
 {
        ecore_wr(p_hwfn, p_ptt,
                 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
@@ -517,30 +785,20 @@ static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
                                   struct ecore_vf_info *vf)
 {
        int i;
-       u16 igu_sb_id;
 
        /* Set VF masks and configuration - pretend */
        ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
 
        ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
 
-       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                  "value in VF_CONFIGURATION of vf %d after write %x\n",
-                  vf->abs_vf_id,
-                  ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION));
-
        /* unpretend */
        ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
 
-       /* iterate ove all queues, clear sb consumer */
-       for (i = 0; i < vf->num_sbs; i++) {
-               igu_sb_id = vf->igu_sbs[i];
-               /* Set then clear... */
-               ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1,
-                                        vf->opaque_fid);
-               ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0,
-                                        vf->opaque_fid);
-       }
+       /* iterate over all queues, clear sb consumer */
+       for (i = 0; i < vf->num_sbs; i++)
+               ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
+                                                 vf->igu_sbs[i],
+                                                 vf->opaque_fid, true);
 }
 
 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
@@ -564,12 +822,53 @@ static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
        ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
 }
 
+static enum _ecore_status_t
+ecore_iov_enable_vf_access_msix(struct ecore_hwfn *p_hwfn,
+                               struct ecore_ptt *p_ptt,
+                               u8 abs_vf_id,
+                               u8 num_sbs)
+{
+       u8 current_max = 0;
+       int i;
+
+       /* If client overrides this, don't do anything */
+       if (p_hwfn->p_dev->b_dont_override_vf_msix)
+               return ECORE_SUCCESS;
+
+       /* For AH onward, configuration is per-PF. Find maximum of all
+        * the currently enabled child VFs, and set the number to be that.
+        */
+       if (!ECORE_IS_BB(p_hwfn->p_dev)) {
+               ecore_for_each_vf(p_hwfn, i) {
+                       struct ecore_vf_info *p_vf;
+
+                       p_vf  = ecore_iov_get_vf_info(p_hwfn, (u16)i, true);
+                       if (!p_vf)
+                               continue;
+
+                       current_max = OSAL_MAX_T(u8, current_max,
+                                                p_vf->num_sbs);
+               }
+       }
+
+       if (num_sbs > current_max)
+               return ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
+                                               abs_vf_id, num_sbs);
+
+       return ECORE_SUCCESS;
+}
+
 static enum _ecore_status_t
 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
                           struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
 {
        u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
-       enum _ecore_status_t rc;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       /* It's possible VF was previously considered malicious -
+        * clear the indication even if we're only going to disable VF.
+        */
+       vf->b_malicious = false;
 
        if (vf->to_disable)
                return ECORE_SUCCESS;
@@ -581,9 +880,11 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
        ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
                                     ECORE_VF_ABS_ID(p_hwfn, vf));
 
-       rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
-                                     vf->abs_vf_id, vf->num_sbs);
-       if (rc)
+       ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
+
+       rc = ecore_iov_enable_vf_access_msix(p_hwfn, p_ptt,
+                                            vf->abs_vf_id, vf->num_sbs);
+       if (rc != ECORE_SUCCESS)
                return rc;
 
        ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
@@ -597,18 +898,6 @@ ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
        /* unpretend */
        ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
 
-       if (vf->state != VF_STOPPED) {
-               DP_NOTICE(p_hwfn, true, "VF[%02x] is already started\n",
-                         vf->abs_vf_id);
-               return ECORE_INVAL;
-       }
-
-       /* Start VF */
-       rc = ecore_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid);
-       if (rc != ECORE_SUCCESS)
-               DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
-                         vf->abs_vf_id);
-
        vf->state = VF_FREE;
 
        return rc;
@@ -631,8 +920,7 @@ static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
                                        struct ecore_ptt *p_ptt,
                                        struct ecore_vf_info *vf, u8 enable)
 {
-       u32 reg_addr;
-       u32 val;
+       u32 reg_addr, val;
        u16 qzone_id = 0;
        int qid;
 
@@ -650,13 +938,13 @@ static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
                                        struct ecore_ptt *p_ptt,
                                        struct ecore_vf_info *vf)
 {
-       /* Reset vf in IGU interrupts are still disabled */
+       /* Reset vf in IGU interrupts are still disabled */
        ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
 
-       ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1 /* enable */);
+       ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
 
        /* Permission Table */
-       ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true /* enable */);
+       ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
 }
 
 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
@@ -664,46 +952,38 @@ static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
                                     struct ecore_vf_info *vf,
                                     u16 num_rx_queues)
 {
-       int igu_id = 0;
+       struct ecore_igu_block *p_block;
+       struct cau_sb_entry sb_entry;
        int qid = 0;
        u32 val = 0;
-       struct ecore_igu_block *igu_blocks =
-           p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
-
-       if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
-               num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
 
-       p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
+       if (num_rx_queues > p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov)
+               num_rx_queues =
+               (u16)p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov;
+       p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov -= num_rx_queues;
 
        SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
        SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
        SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
 
-       while ((qid < num_rx_queues) &&
-              (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
-               if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
-                       struct cau_sb_entry sb_entry;
-
-                       vf->igu_sbs[qid] = (u16)igu_id;
-                       igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
-
-                       SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
-
-                       ecore_wr(p_hwfn, p_ptt,
-                                IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
-                                val);
-
-                       /* Configure igu sb in CAU which were marked valid */
-                       ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
-                                               p_hwfn->rel_pf_id,
-                                               vf->abs_vf_id, 1);
-                       ecore_dmae_host2grc(p_hwfn, p_ptt,
-                                           (u64)(osal_uintptr_t)&sb_entry,
-                                           CAU_REG_SB_VAR_MEMORY +
-                                           igu_id * sizeof(u64), 2, 0);
-                       qid++;
-               }
-               igu_id++;
+       for (qid = 0; qid < num_rx_queues; qid++) {
+               p_block = ecore_get_igu_free_sb(p_hwfn, false);
+               vf->igu_sbs[qid] = p_block->igu_sb_id;
+               p_block->status &= ~ECORE_IGU_STATUS_FREE;
+               SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
+
+               ecore_wr(p_hwfn, p_ptt,
+                        IGU_REG_MAPPING_MEMORY +
+                        sizeof(u32) * p_block->igu_sb_id, val);
+
+               /* Configure igu sb in CAU which were marked valid */
+               ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
+                                       p_hwfn->rel_pf_id,
+                                       vf->abs_vf_id, 1);
+               ecore_dmae_host2grc(p_hwfn, p_ptt,
+                                   (u64)(osal_uintptr_t)&sb_entry,
+                                   CAU_REG_SB_VAR_MEMORY +
+                                   p_block->igu_sb_id * sizeof(u64), 2, 0);
        }
 
        vf->num_sbs = (u8)num_rx_queues;
@@ -739,35 +1019,127 @@ static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
                SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
                ecore_wr(p_hwfn, p_ptt, addr, val);
 
-               p_info->igu_map.igu_blocks[igu_id].status |=
-                   ECORE_IGU_STATUS_FREE;
-
-               p_hwfn->hw_info.p_igu_info->free_blks++;
+               p_info->entry[igu_id].status |= ECORE_IGU_STATUS_FREE;
+               p_hwfn->hw_info.p_igu_info->usage.free_cnt_iov++;
        }
 
        vf->num_sbs = 0;
 }
 
-enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
-                                             struct ecore_ptt *p_ptt,
-                                             u16 rel_vf_id, u16 num_rx_queues)
-{
-       enum _ecore_status_t rc = ECORE_SUCCESS;
+void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
+                       u16 vfid,
+                       struct ecore_mcp_link_params *params,
+                       struct ecore_mcp_link_state *link,
+                       struct ecore_mcp_link_capabilities *p_caps)
+{
+       struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
+       struct ecore_bulletin_content *p_bulletin;
+
+       if (!p_vf)
+               return;
+
+       p_bulletin = p_vf->bulletin.p_virt;
+       p_bulletin->req_autoneg = params->speed.autoneg;
+       p_bulletin->req_adv_speed = params->speed.advertised_speeds;
+       p_bulletin->req_forced_speed = params->speed.forced_speed;
+       p_bulletin->req_autoneg_pause = params->pause.autoneg;
+       p_bulletin->req_forced_rx = params->pause.forced_rx;
+       p_bulletin->req_forced_tx = params->pause.forced_tx;
+       p_bulletin->req_loopback = params->loopback_mode;
+
+       p_bulletin->link_up = link->link_up;
+       p_bulletin->speed = link->speed;
+       p_bulletin->full_duplex = link->full_duplex;
+       p_bulletin->autoneg = link->an;
+       p_bulletin->autoneg_complete = link->an_complete;
+       p_bulletin->parallel_detection = link->parallel_detection;
+       p_bulletin->pfc_enabled = link->pfc_enabled;
+       p_bulletin->partner_adv_speed = link->partner_adv_speed;
+       p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
+       p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
+       p_bulletin->partner_adv_pause = link->partner_adv_pause;
+       p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
+
+       p_bulletin->capability_speed = p_caps->speed_capabilities;
+}
+
+enum _ecore_status_t
+ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
+                        struct ecore_ptt *p_ptt,
+                        struct ecore_iov_vf_init_params *p_params)
+{
+       struct ecore_mcp_link_capabilities link_caps;
+       struct ecore_mcp_link_params link_params;
+       struct ecore_mcp_link_state link_state;
+       u8 num_of_vf_available_chains  = 0;
        struct ecore_vf_info *vf = OSAL_NULL;
-       u8 num_of_vf_available_chains = 0;
+       u16 qid, num_irqs;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
        u32 cids;
        u8 i;
 
-       if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id)) {
+       vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
+       if (!vf) {
+               DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
+               return ECORE_UNKNOWN_ERROR;
+       }
+
+       if (vf->b_init) {
                DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
-                         rel_vf_id);
+                         p_params->rel_vf_id);
                return ECORE_INVAL;
        }
 
-       vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
-       if (!vf) {
-               DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
-               return ECORE_UNKNOWN_ERROR;
+       /* Perform sanity checking on the requested vport/rss */
+       if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
+               DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
+                         p_params->rel_vf_id, p_params->vport_id);
+               return ECORE_INVAL;
+       }
+
+       if ((p_params->num_queues > 1) &&
+           (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
+               DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
+                         p_params->rel_vf_id, p_params->rss_eng_id);
+               return ECORE_INVAL;
+       }
+
+       /* TODO - remove this once we get confidence of change */
+       if (!p_params->vport_id) {
+               DP_NOTICE(p_hwfn, false,
+                         "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
+                         p_params->rel_vf_id);
+       }
+       if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
+               DP_NOTICE(p_hwfn, false,
+                         "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
+                         p_params->rel_vf_id);
+       }
+       vf->vport_id = p_params->vport_id;
+       vf->rss_eng_id = p_params->rss_eng_id;
+
+       /* Since it's possible to relocate SBs, it's a bit difficult to check
+        * things here. Simply check whether the index falls in the range
+        * belonging to the PF.
+        */
+       for (i = 0; i < p_params->num_queues; i++) {
+               qid = p_params->req_rx_queue[i];
+               if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Can't enable Rx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
+                                 qid, p_params->rel_vf_id,
+                                 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
+                       return ECORE_INVAL;
+               }
+
+               qid = p_params->req_tx_queue[i];
+               if (qid > (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Can't enable Tx qid [%04x] for VF[%d]: qids [0,,...,0x%04x] available\n",
+                                 qid, p_params->rel_vf_id,
+                                 (u16)RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
+                       return ECORE_INVAL;
+               }
        }
 
        /* Limit number of queues according to number of CIDs */
@@ -775,13 +1147,13 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                   "VF[%d] - requesting to initialize for 0x%04x queues"
                   " [0x%04x CIDs available]\n",
-                  vf->relative_vf_id, num_rx_queues, (u16)cids);
-       num_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16)cids));
+                  vf->relative_vf_id, p_params->num_queues, (u16)cids);
+       num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
 
        num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
                                                               p_ptt,
                                                               vf,
-                                                              num_rx_queues);
+                                                              num_irqs);
        if (num_of_vf_available_chains == 0) {
                DP_ERR(p_hwfn, "no available igu sbs\n");
                return ECORE_NOMEM;
@@ -792,36 +1164,37 @@ enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
        vf->num_txqs = num_of_vf_available_chains;
 
        for (i = 0; i < vf->num_rxqs; i++) {
-               u16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn,
-                                                            vf->igu_sbs[i]);
-
-               if (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
-                       DP_NOTICE(p_hwfn, true,
-                                 "VF[%d] will require utilizing of"
-                                 " out-of-bounds queues - %04x\n",
-                                 vf->relative_vf_id, queue_id);
-                       /* TODO - cleanup the already allocate SBs */
-                       return ECORE_INVAL;
-               }
+               struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
 
-               /* CIDs are per-VF, so no problem having them 0-based. */
-               vf->vf_queues[i].fw_rx_qid = queue_id;
-               vf->vf_queues[i].fw_tx_qid = queue_id;
-               vf->vf_queues[i].fw_cid = i;
+               p_queue->fw_rx_qid = p_params->req_rx_queue[i];
+               p_queue->fw_tx_qid = p_params->req_tx_queue[i];
 
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                          "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
-                          vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
+                          "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
+                          vf->relative_vf_id, i, vf->igu_sbs[i],
+                          p_queue->fw_rx_qid, p_queue->fw_tx_qid);
        }
 
+       /* Update the link configuration in bulletin.
+        */
+       OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
+                   sizeof(link_params));
+       OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
+                   sizeof(link_state));
+       OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
+                   sizeof(link_caps));
+       ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
+                          &link_params, &link_state, &link_caps);
+
        rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
 
        if (rc == ECORE_SUCCESS) {
-               struct ecore_hw_sriov_info *p_iov = &p_hwfn->p_dev->sriov_info;
-               u16 vf_id = vf->relative_vf_id;
+               vf->b_init = true;
+               p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
+                       (1ULL << (vf->relative_vf_id % 64));
 
-               p_iov->num_vfs++;
-               p_iov->active_vfs[vf_id / 64] |= (1ULL << (vf_id % 64));
+               if (IS_LEAD_HWFN(p_hwfn))
+                       p_hwfn->p_dev->p_iov_info->num_vfs++;
        }
 
        return rc;
@@ -831,8 +1204,10 @@ enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
                                                 struct ecore_ptt *p_ptt,
                                                 u16 rel_vf_id)
 {
+       struct ecore_mcp_link_capabilities caps;
+       struct ecore_mcp_link_params params;
+       struct ecore_mcp_link_state link;
        struct ecore_vf_info *vf = OSAL_NULL;
-       enum _ecore_status_t rc = ECORE_SUCCESS;
 
        vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
        if (!vf) {
@@ -840,38 +1215,45 @@ enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
                return ECORE_UNKNOWN_ERROR;
        }
 
-       if (vf->state != VF_STOPPED) {
-               /* Stopping the VF */
-               rc = ecore_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid);
+       if (vf->bulletin.p_virt)
+               OSAL_MEMSET(vf->bulletin.p_virt, 0,
+                           sizeof(*vf->bulletin.p_virt));
 
-               if (rc != ECORE_SUCCESS) {
-                       DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
-                              rc);
-                       return rc;
-               }
+       OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
 
-               vf->state = VF_STOPPED;
-       }
+       /* Get the link configuration back in bulletin so
+        * that when VFs are re-enabled they get the actual
+        * link configuration.
+        */
+       OSAL_MEMCPY(&params, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
+       OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
+       OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
+                   sizeof(caps));
+       ecore_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
+
+       /* Forget the VF's acquisition message */
+       OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
 
        /* disablng interrupts and resetting permission table was done during
         * vf-close, however, we could get here without going through vf_close
         */
        /* Disable Interrupts for VF */
-       ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0 /* disable */);
+       ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
 
        /* Reset Permission table */
-       ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0 /* disable */);
+       ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
 
        vf->num_rxqs = 0;
        vf->num_txqs = 0;
        ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
 
-       if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id)) {
-               struct ecore_hw_sriov_info *p_iov = &p_hwfn->p_dev->sriov_info;
-               u16 vf_id = vf->relative_vf_id;
+       if (vf->b_init) {
+               vf->b_init = false;
+               p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
+                                       ~(1ULL << (vf->relative_vf_id / 64));
 
-               p_iov->num_vfs--;
-               p_iov->active_vfs[vf_id / 64] &= ~(1ULL << (vf_id % 64));
+               if (IS_LEAD_HWFN(p_hwfn))
+                       p_hwfn->p_dev->p_iov_info->num_vfs--;
        }
 
        return ECORE_SUCCESS;
@@ -885,10 +1267,6 @@ static bool ecore_iov_tlv_supported(u16 tlvtype)
 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
                                         struct ecore_vf_info *vf, u16 tlv)
 {
-       /* we don't lock the channel for unsupported tlvs */
-       if (!ecore_iov_tlv_supported(tlv))
-               return;
-
        /* lock the channel */
        /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
 
@@ -896,43 +1274,42 @@ static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
        /* vf->op_current = tlv; @@@TBD MichalK */
 
        /* log the lock */
-       DP_VERBOSE(p_hwfn,
-                  ECORE_MSG_IOV,
-                  "VF[%d]: vf pf channel locked by     %s\n",
-                  vf->abs_vf_id, ecore_channel_tlvs_string[tlv]);
+       if (ecore_iov_tlv_supported(tlv))
+               DP_VERBOSE(p_hwfn,
+                          ECORE_MSG_IOV,
+                          "VF[%d]: vf pf channel locked by %s\n",
+                          vf->abs_vf_id,
+                          ecore_channel_tlvs_string[tlv]);
+       else
+               DP_VERBOSE(p_hwfn,
+                          ECORE_MSG_IOV,
+                          "VF[%d]: vf pf channel locked by %04x\n",
+                          vf->abs_vf_id, tlv);
 }
 
 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
                                           struct ecore_vf_info *vf,
                                           u16 expected_tlv)
 {
-       /* we don't unlock the channel for unsupported tlvs */
-       if (!ecore_iov_tlv_supported(expected_tlv))
-               return;
-
-       /* WARN(expected_tlv != vf->op_current,
-        * "lock mismatch: expected %s found %s",
-        * channel_tlvs_string[expected_tlv],
-        * channel_tlvs_string[vf->op_current]);
-        * @@@TBD MichalK
-        */
-
-       /* lock the channel */
-       /* mutex_unlock(&vf->op_mutex); @@@TBD MichalK add the lock */
-
        /* log the unlock */
-       DP_VERBOSE(p_hwfn,
-                  ECORE_MSG_IOV,
-                  "VF[%d]: vf pf channel unlocked by %s\n",
-                  vf->abs_vf_id, ecore_channel_tlvs_string[expected_tlv]);
+       if (ecore_iov_tlv_supported(expected_tlv))
+               DP_VERBOSE(p_hwfn,
+                          ECORE_MSG_IOV,
+                          "VF[%d]: vf pf channel unlocked by %s\n",
+                          vf->abs_vf_id,
+                          ecore_channel_tlvs_string[expected_tlv]);
+       else
+               DP_VERBOSE(p_hwfn,
+                          ECORE_MSG_IOV,
+                          "VF[%d]: vf pf channel unlocked by %04x\n",
+                          vf->abs_vf_id, expected_tlv);
 
        /* record the locking op */
        /* vf->op_current = CHANNEL_TLV_NONE; */
 }
 
 /* place a given tlv on the tlv buffer, continuing current tlv list */
-void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
-                   u8 **offset, u16 type, u16 length)
+void *ecore_add_tlv(u8 **offset, u16 type, u16 length)
 {
        struct channel_tlv *tl = (struct channel_tlv *)*offset;
 
@@ -988,7 +1365,12 @@ void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
                                    struct ecore_ptt *p_ptt,
                                    struct ecore_vf_info *p_vf,
-                                   u16 length, u8 status)
+#ifdef CONFIG_ECORE_SW_CHANNEL
+                                   u16 length,
+#else
+                                   u16 OSAL_UNUSED length,
+#endif
+                                   u8 status)
 {
        struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
        struct ecore_dmae_params params;
@@ -996,15 +1378,15 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
 
        mbx->reply_virt->default_resp.hdr.status = status;
 
+       ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
+
 #ifdef CONFIG_ECORE_SW_CHANNEL
        mbx->sw_mbx.response_size =
            length + sizeof(struct channel_list_end_tlv);
-#endif
-
-       ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
 
-       if (!p_hwfn->p_dev->sriov_info.b_hw_channel)
+       if (!p_vf->b_hw_channel)
                return;
+#endif
 
        eng_vf_id = p_vf->abs_vf_id;
 
@@ -1018,17 +1400,22 @@ static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
                             (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
                             &params);
 
+       /* Once PF copies the rc to the VF, the latter can continue and
+        * and send an additional message. So we have to make sure the
+        * channel would be re-set to ready prior to that.
+        */
+       REG_WR(p_hwfn,
+              GTT_BAR0_MAP_REG_USDM_RAM +
+              USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+
        ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
                             mbx->req_virt->first_tlv.reply_address,
                             sizeof(u64) / 4, &params);
 
-       REG_WR(p_hwfn,
-              GTT_BAR0_MAP_REG_USDM_RAM +
-              USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
+       OSAL_IOV_PF_RESP_TYPE(p_hwfn, p_vf->relative_vf_id, status);
 }
 
-static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
-                                 enum ecore_iov_vport_update_flag flag)
+static u16 ecore_iov_vport_to_tlv(enum ecore_iov_vport_update_flag flag)
 {
        switch (flag) {
        case ECORE_IOV_VP_UPDATE_ACTIVATE:
@@ -1062,19 +1449,19 @@ static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
        u16 size, total_len, i;
 
        OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
-       p_mbx->offset = (u8 *)(p_mbx->reply_virt);
+       p_mbx->offset = (u8 *)p_mbx->reply_virt;
        size = sizeof(struct pfvf_def_resp_tlv);
        total_len = size;
 
-       ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
+       ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
 
        /* Prepare response for all extended tlvs if they are found by PF */
        for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
                if (!(tlvs_mask & (1 << i)))
                        continue;
 
-               resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
-                                    ecore_iov_vport_to_tlv(p_hwfn, i), size);
+               resp = ecore_add_tlv(&p_mbx->offset, ecore_iov_vport_to_tlv(i),
+                                    size);
 
                if (tlvs_accepted & (1 << i))
                        resp->hdr.status = status;
@@ -1084,12 +1471,13 @@ static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                           "VF[%d] - vport_update resp: TLV %d, status %02x\n",
                           p_vf->relative_vf_id,
-                          ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
+                          ecore_iov_vport_to_tlv(i),
+                          resp->hdr.status);
 
                total_len += size;
        }
 
-       ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
+       ecore_add_tlv(&p_mbx->offset, CHANNEL_TLV_LIST_END,
                      sizeof(struct channel_list_end_tlv));
 
        return total_len;
@@ -1102,23 +1490,35 @@ static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
 {
        struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
 
-       mbx->offset = (u8 *)(mbx->reply_virt);
+       mbx->offset = (u8 *)mbx->reply_virt;
 
-       ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
-       ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
+       ecore_add_tlv(&mbx->offset, type, length);
+       ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
                      sizeof(struct channel_list_end_tlv));
 
        ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
 }
 
+struct ecore_public_vf_info
+*ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
+                             u16 relative_vf_id,
+                             bool b_enabled_only)
+{
+       struct ecore_vf_info *vf = OSAL_NULL;
+
+       vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
+       if (!vf)
+               return OSAL_NULL;
+
+       return &vf->p_vf_info;
+}
+
 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
                                 struct ecore_vf_info *p_vf)
 {
+       u32 i, j;
        p_vf->vf_bulletin = 0;
        p_vf->vport_instance = 0;
-       p_vf->num_mac_filters = 0;
-       p_vf->num_vlan_filters = 0;
-       p_vf->num_mc_filters = 0;
        p_vf->configured_features = 0;
 
        /* If VF previously requested less resources, go back to default */
@@ -1127,39 +1527,246 @@ static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
 
        p_vf->num_active_rxqs = 0;
 
+       for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
+               struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+
+               for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
+                       if (!p_queue->cids[j].p_cid)
+                               continue;
+
+                       ecore_eth_queue_cid_release(p_hwfn,
+                                                   p_queue->cids[j].p_cid);
+                       p_queue->cids[j].p_cid = OSAL_NULL;
+               }
+       }
+
        OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
+       OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
        OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
 }
 
-static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
-                                    struct ecore_ptt *p_ptt,
-                                    struct ecore_vf_info *vf)
+/* Returns either 0, or log(size) */
+static u32 ecore_iov_vf_db_bar_size(struct ecore_hwfn *p_hwfn,
+                                   struct ecore_ptt *p_ptt)
+{
+       u32 val = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_BAR1_SIZE);
+
+       if (val)
+               return val + 11;
+       return 0;
+}
+
+static void
+ecore_iov_vf_mbx_acquire_resc_cids(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_ptt *p_ptt,
+                                  struct ecore_vf_info *p_vf,
+                                  struct vf_pf_resc_request *p_req,
+                                  struct pf_vf_resc *p_resp)
+{
+       u8 num_vf_cons = p_hwfn->pf_params.eth_pf_params.num_vf_cons;
+       u8 db_size = DB_ADDR_VF(1, DQ_DEMS_LEGACY) -
+                    DB_ADDR_VF(0, DQ_DEMS_LEGACY);
+       u32 bar_size;
+
+       p_resp->num_cids = OSAL_MIN_T(u8, p_req->num_cids, num_vf_cons);
+
+       /* If VF didn't bother asking for QIDs than don't bother limiting
+        * number of CIDs. The VF doesn't care about the number, and this
+        * has the likely result of causing an additional acquisition.
+        */
+       if (!(p_vf->acquire.vfdev_info.capabilities &
+             VFPF_ACQUIRE_CAP_QUEUE_QIDS))
+               return;
+
+       /* If doorbell bar was mapped by VF, limit the VF CIDs to an amount
+        * that would make sure doorbells for all CIDs fall within the bar.
+        * If it doesn't, make sure regview window is sufficient.
+        */
+       if (p_vf->acquire.vfdev_info.capabilities &
+           VFPF_ACQUIRE_CAP_PHYSICAL_BAR) {
+               bar_size = ecore_iov_vf_db_bar_size(p_hwfn, p_ptt);
+               if (bar_size)
+                       bar_size = 1 << bar_size;
+
+               if (ECORE_IS_CMT(p_hwfn->p_dev))
+                       bar_size /= 2;
+       } else {
+               bar_size = PXP_VF_BAR0_DQ_LENGTH;
+       }
+
+       if (bar_size / db_size < 256)
+               p_resp->num_cids = OSAL_MIN_T(u8, p_resp->num_cids,
+                                             (u8)(bar_size / db_size));
+}
+
+static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt *p_ptt,
+                                       struct ecore_vf_info *p_vf,
+                                       struct vf_pf_resc_request *p_req,
+                                       struct pf_vf_resc *p_resp)
+{
+       u8 i;
+
+       /* Queue related information */
+       p_resp->num_rxqs = p_vf->num_rxqs;
+       p_resp->num_txqs = p_vf->num_txqs;
+       p_resp->num_sbs = p_vf->num_sbs;
+
+       for (i = 0; i < p_resp->num_sbs; i++) {
+               p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
+               /* TODO - what's this sb_qid field? Is it deprecated?
+                * or is there an ecore_client that looks at this?
+                */
+               p_resp->hw_sbs[i].sb_qid = 0;
+       }
+
+       /* These fields are filled for backward compatibility.
+        * Unused by modern vfs.
+        */
+       for (i = 0; i < p_resp->num_rxqs; i++) {
+               ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
+                                 (u16 *)&p_resp->hw_qid[i]);
+               p_resp->cid[i] = i;
+       }
+
+       /* Filter related information */
+       p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
+                                            p_req->num_mac_filters);
+       p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
+                                             p_req->num_vlan_filters);
+
+       ecore_iov_vf_mbx_acquire_resc_cids(p_hwfn, p_ptt, p_vf, p_req, p_resp);
+
+       /* This isn't really needed/enforced, but some legacy VFs might depend
+        * on the correct filling of this field.
+        */
+       p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
+
+       /* Validate sufficient resources for VF */
+       if (p_resp->num_rxqs < p_req->num_rxqs ||
+           p_resp->num_txqs < p_req->num_txqs ||
+           p_resp->num_sbs < p_req->num_sbs ||
+           p_resp->num_mac_filters < p_req->num_mac_filters ||
+           p_resp->num_vlan_filters < p_req->num_vlan_filters ||
+           p_resp->num_mc_filters < p_req->num_mc_filters ||
+           p_resp->num_cids < p_req->num_cids) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%d] - Insufficient resources: rxq [%02x/%02x] txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x] vlan [%02x/%02x] mc [%02x/%02x] cids [%02x/%02x]\n",
+                          p_vf->abs_vf_id,
+                          p_req->num_rxqs, p_resp->num_rxqs,
+                          p_req->num_rxqs, p_resp->num_txqs,
+                          p_req->num_sbs, p_resp->num_sbs,
+                          p_req->num_mac_filters, p_resp->num_mac_filters,
+                          p_req->num_vlan_filters, p_resp->num_vlan_filters,
+                          p_req->num_mc_filters, p_resp->num_mc_filters,
+                          p_req->num_cids, p_resp->num_cids);
+
+               /* Some legacy OSes are incapable of correctly handling this
+                * failure.
+                */
+               if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+                    ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
+                   (p_vf->acquire.vfdev_info.os_type ==
+                    VFPF_ACQUIRE_OS_WINDOWS))
+                       return PFVF_STATUS_SUCCESS;
+
+               return PFVF_STATUS_NO_RESOURCE;
+       }
+
+       return PFVF_STATUS_SUCCESS;
+}
+
+static void ecore_iov_vf_mbx_acquire_stats(struct pfvf_stats_info *p_stats)
+{
+       p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
+                                 OFFSETOF(struct mstorm_vf_zone,
+                                          non_trigger.eth_queue_stat);
+       p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
+       p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
+                                 OFFSETOF(struct ustorm_vf_zone,
+                                          non_trigger.eth_queue_stat);
+       p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
+       p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
+                                 OFFSETOF(struct pstorm_vf_zone,
+                                          non_trigger.eth_queue_stat);
+       p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
+       p_stats->tstats.address = 0;
+       p_stats->tstats.len = 0;
+}
+
+static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
+                                    struct ecore_ptt        *p_ptt,
+                                    struct ecore_vf_info    *vf)
 {
        struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
-       struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
        struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
-       struct pf_vf_resc *resc = &resp->resc;
        struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
-       u16 length;
-       u8 i, vfpf_status = PFVF_STATUS_SUCCESS;
+       struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
+       u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+       struct pf_vf_resc *resc = &resp->resc;
+       enum _ecore_status_t rc;
+
+       OSAL_MEMSET(resp, 0, sizeof(*resp));
+
+       /* Write the PF version so that VF would know which version
+        * is supported - might be later overridden. This guarantees that
+        * VF could recognize legacy PF based on lack of versions in reply.
+        */
+       pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
+       pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
+
+       /* TODO - not doing anything is bad since we'll assert, but this isn't
+        * necessarily the right behavior - perhaps we should have allowed some
+        * versatility here.
+        */
+       if (vf->state != VF_FREE &&
+           vf->state != VF_STOPPED) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
+                          vf->abs_vf_id, vf->state);
+               goto out;
+       }
 
        /* Validate FW compatibility */
-       if (req->vfdev_info.fw_major != FW_MAJOR_VERSION ||
-           req->vfdev_info.fw_minor != FW_MINOR_VERSION ||
-           req->vfdev_info.fw_revision != FW_REVISION_VERSION ||
-           req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) {
+       if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
+               if (req->vfdev_info.capabilities &
+                   VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
+                       struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
+
+                       /* This legacy support would need to be removed once
+                        * the major has changed.
+                        */
+                       OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
+
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d] is pre-fastpath HSI\n",
+                                  vf->abs_vf_id);
+                       p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
+                       p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
+               } else {
+                       DP_INFO(p_hwfn,
+                               "VF[%d] needs fastpath HSI %02x.%02x, which is"
+                               " incompatible with loaded FW's faspath"
+                               " HSI %02x.%02x\n",
+                               vf->abs_vf_id,
+                               req->vfdev_info.eth_fp_hsi_major,
+                               req->vfdev_info.eth_fp_hsi_minor,
+                               ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
+
+                       goto out;
+               }
+       }
+
+       /* On 100g PFs, prevent old VFs from loading */
+       if (ECORE_IS_CMT(p_hwfn->p_dev) &&
+           !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
                DP_INFO(p_hwfn,
-                       "VF[%d] is running an incompatible driver [VF needs"
-                       " FW %02x:%02x:%02x:%02x but Hypervisor is"
-                       " using %02x:%02x:%02x:%02x]\n",
-                       vf->abs_vf_id, req->vfdev_info.fw_major,
-                       req->vfdev_info.fw_minor, req->vfdev_info.fw_revision,
-                       req->vfdev_info.fw_engineering, FW_MAJOR_VERSION,
-                       FW_MINOR_VERSION, FW_REVISION_VERSION,
-                       FW_ENGINEERING_VERSION);
-               vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
+                       "VF[%d] is running an old driver that doesn't support"
+                       " 100g\n",
+                       vf->abs_vf_id);
                goto out;
        }
+
 #ifndef __EXTRACT__LINUX__
        if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
                vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
@@ -1167,13 +1774,10 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
        }
 #endif
 
-       OSAL_MEMSET(resp, 0, sizeof(*resp));
+       /* Store the acquire message */
+       OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
 
-       /* Fill in vf info stuff : @@@TBD MichalK Hard Coded for now... */
        vf->opaque_fid = req->vfdev_info.opaque_fid;
-       vf->num_mac_filters = 1;
-       vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
-       vf->num_mc_filters = ECORE_MAX_MC_ADDRS;
 
        vf->vf_bulletin = req->bulletin_addr;
        vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
@@ -1182,29 +1786,24 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
        /* fill in pfdev info */
        pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
        pfdev_info->db_size = 0;        /* @@@ TBD MichalK Vf Doorbells */
-       pfdev_info->indices_per_sb = PIS_PER_SB;
-       pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED;
-
-       pfdev_info->stats_info.mstats.address =
-           PXP_VF_BAR0_START_MSDM_ZONE_B +
-           OFFSETOF(struct mstorm_vf_zone, non_trigger.eth_queue_stat);
-       pfdev_info->stats_info.mstats.len =
-           sizeof(struct eth_mstorm_per_queue_stat);
-
-       pfdev_info->stats_info.ustats.address =
-           PXP_VF_BAR0_START_USDM_ZONE_B +
-           OFFSETOF(struct ustorm_vf_zone, non_trigger.eth_queue_stat);
-       pfdev_info->stats_info.ustats.len =
-           sizeof(struct eth_ustorm_per_queue_stat);
-
-       pfdev_info->stats_info.pstats.address =
-           PXP_VF_BAR0_START_PSDM_ZONE_B +
-           OFFSETOF(struct pstorm_vf_zone, non_trigger.eth_queue_stat);
-       pfdev_info->stats_info.pstats.len =
-           sizeof(struct eth_pstorm_per_queue_stat);
-
-       pfdev_info->stats_info.tstats.address = 0;
-       pfdev_info->stats_info.tstats.len = 0;
+       pfdev_info->indices_per_sb = PIS_PER_SB_E4;
+
+       pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
+                                  PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
+       if (ECORE_IS_CMT(p_hwfn->p_dev))
+               pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
+
+       /* Share our ability to use multiple queue-ids only with VFs
+        * that request it.
+        */
+       if (req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_QUEUE_QIDS)
+               pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_QUEUE_QIDS;
+
+       /* Share the sizes of the bars with VF */
+       resp->pfdev_info.bar_size = (u8)ecore_iov_vf_db_bar_size(p_hwfn,
+                                                            p_ptt);
+
+       ecore_iov_vf_mbx_acquire_stats(&pfdev_info->stats_info);
 
        OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
                    ETH_ALEN);
@@ -1213,35 +1812,36 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
        pfdev_info->fw_minor = FW_MINOR_VERSION;
        pfdev_info->fw_rev = FW_REVISION_VERSION;
        pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
+
+       /* Incorrect when legacy, but doesn't matter as legacy isn't reading
+        * this field.
+        */
+       pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
+                                             req->vfdev_info.eth_fp_hsi_minor);
        pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
-       ecore_mcp_get_mfw_ver(p_hwfn->p_dev, p_ptt, &pfdev_info->mfw_ver,
+       ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
                              OSAL_NULL);
 
        pfdev_info->dev_type = p_hwfn->p_dev->type;
        pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
 
-       /* Fill in resc : @@@TBD MichalK Hard Coded for now... */
-       resc->num_rxqs = vf->num_rxqs;
-       resc->num_txqs = vf->num_txqs;
-       resc->num_sbs = vf->num_sbs;
-       for (i = 0; i < resc->num_sbs; i++) {
-               resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i];
-               resc->hw_sbs[i].sb_qid = 0;
-       }
+       /* Fill resources available to VF; Make sure there are enough to
+        * satisfy the VF's request.
+        */
+       vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
+                                                   &req->resc_request, resc);
+       if (vfpf_status != PFVF_STATUS_SUCCESS)
+               goto out;
 
-       for (i = 0; i < resc->num_rxqs; i++) {
-               ecore_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid,
-                                 (u16 *)&resc->hw_qid[i]);
-               resc->cid[i] = vf->vf_queues[i].fw_cid;
+       /* Start the VF in FW */
+       rc = ecore_sp_vf_start(p_hwfn, vf);
+       if (rc != ECORE_SUCCESS) {
+               DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
+                         vf->abs_vf_id);
+               vfpf_status = PFVF_STATUS_FAILURE;
+               goto out;
        }
 
-       resc->num_mac_filters = OSAL_MIN_T(u8, vf->num_mac_filters,
-                                          req->resc_request.num_mac_filters);
-       resc->num_vlan_filters = OSAL_MIN_T(u8, vf->num_vlan_filters,
-                                           req->resc_request.num_vlan_filters);
-       resc->num_mc_filters = OSAL_MIN_T(u8, vf->num_mc_filters,
-                                         req->resc_request.num_mc_filters);
-
        /* Fill agreed size of bulletin board in response, and post
         * an initial image to the bulletin board.
         */
@@ -1250,25 +1850,22 @@ static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                   "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
-                  " db_size=%d, idx_per_sb=%d, pf_cap=0x%" PRIx64 "\n"
+                  " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
                   "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
-                  " n_vlans-%d, n_mcs-%d\n",
+                  " n_vlans-%d\n",
                   vf->abs_vf_id, resp->pfdev_info.chip_num,
                   resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
-                  resp->pfdev_info.capabilities, resc->num_rxqs,
+                  (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
                   resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
-                  resc->num_vlan_filters, resc->num_mc_filters);
+                  resc->num_vlan_filters);
 
        vf->state = VF_ACQUIRED;
 
-       /* Prepare Response */
-       length = sizeof(struct pfvf_acquire_resp_tlv);
-
 out:
+       /* Prepare Response */
        ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
-                              length, vfpf_status);
-
-       /* @@@TBD Bulletin */
+                              sizeof(struct pfvf_acquire_resp_tlv),
+                              vfpf_status);
 }
 
 static enum _ecore_status_t
@@ -1310,8 +1907,8 @@ static enum _ecore_status_t
 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
                                   struct ecore_vf_info *p_vf)
 {
-       enum _ecore_status_t rc = ECORE_SUCCESS;
        struct ecore_filter_ucast filter;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
        int i;
 
        OSAL_MEMSET(&filter, 0, sizeof(filter));
@@ -1322,24 +1919,23 @@ ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
 
        /* Reconfigure vlans */
        for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
-               if (p_vf->shadow_config.vlans[i].used) {
-                       filter.type = ECORE_FILTER_VLAN;
-                       filter.vlan = p_vf->shadow_config.vlans[i].vid;
-                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                                  "Reconfig VLAN [0x%04x] for VF [%04x]\n",
-                                  filter.vlan, p_vf->relative_vf_id);
-                       rc = ecore_sp_eth_filter_ucast(p_hwfn,
-                                                      p_vf->opaque_fid,
-                                                      &filter,
-                                                      ECORE_SPQ_MODE_CB,
-                                                      OSAL_NULL);
-                       if (rc) {
-                               DP_NOTICE(p_hwfn, true,
-                                         "Failed to configure VLAN [%04x]"
-                                         " to VF [%04x]\n",
-                                         filter.vlan, p_vf->relative_vf_id);
-                               break;
-                       }
+               if (!p_vf->shadow_config.vlans[i].used)
+                       continue;
+
+               filter.type = ECORE_FILTER_VLAN;
+               filter.vlan = p_vf->shadow_config.vlans[i].vid;
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
+                          filter.vlan, p_vf->relative_vf_id);
+               rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
+                                              &filter, ECORE_SPQ_MODE_CB,
+                                              OSAL_NULL);
+               if (rc) {
+                       DP_NOTICE(p_hwfn, true,
+                                 "Failed to configure VLAN [%04x]"
+                                 " to VF [%04x]\n",
+                                 filter.vlan, p_vf->relative_vf_id);
+                       break;
                }
        }
 
@@ -1361,9 +1957,10 @@ ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
        return rc;
 }
 
-static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
-                                           struct ecore_vf_info *p_vf,
-                                           u64 events)
+static  enum _ecore_status_t
+ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
+                                struct ecore_vf_info *p_vf,
+                                u64 events)
 {
        enum _ecore_status_t rc = ECORE_SUCCESS;
        struct ecore_filter_ucast filter;
@@ -1443,22 +2040,24 @@ static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
 
                /* Update all the Rx queues */
                for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
-                       u16 qid;
+                       struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
+                       struct ecore_queue_cid *p_cid = OSAL_NULL;
 
-                       if (!p_vf->vf_queues[i].rxq_active)
+                       /* There can be at most 1 Rx queue on qzone. Find it */
+                       p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
+                       if (p_cid == OSAL_NULL)
                                continue;
 
-                       qid = p_vf->vf_queues[i].fw_rx_qid;
-
-                       rc = ecore_sp_eth_rx_queues_update(p_hwfn, qid,
+                       rc = ecore_sp_eth_rx_queues_update(p_hwfn,
+                                                          (void **)&p_cid,
                                                   1, 0, 1,
                                                   ECORE_SPQ_MODE_EBLOCK,
                                                   OSAL_NULL);
                        if (rc) {
                                DP_NOTICE(p_hwfn, true,
                                          "Failed to send Rx update"
-                                         " queue[0x%04x]\n",
-                                         qid);
+                                         " fo queue[0x%04x]\n",
+                                         p_cid->rel.queue_id);
                                return rc;
                        }
                }
@@ -1482,14 +2081,14 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
                                         struct ecore_ptt *p_ptt,
                                         struct ecore_vf_info *vf)
 {
-       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
-       struct vfpf_vport_start_tlv *start = &mbx->req_virt->start_vport;
        struct ecore_sp_vport_start_params params = { 0 };
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct vfpf_vport_start_tlv *start;
        u8 status = PFVF_STATUS_SUCCESS;
        struct ecore_vf_info *vf_info;
-       enum _ecore_status_t rc;
        u64 *p_bitmap;
        int sb_id;
+       enum _ecore_status_t rc;
 
        vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
        if (!vf_info) {
@@ -1500,6 +2099,9 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
        }
 
        vf->state = VF_ENABLED;
+       start = &mbx->req_virt->start_vport;
+
+       ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
 
        /* Initialize Status block in CAU */
        for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
@@ -1513,9 +2115,8 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
                ecore_int_cau_conf_sb(p_hwfn, p_ptt,
                                      start->sb_addr[sb_id],
                                      vf->igu_sbs[sb_id],
-                                     vf->abs_vf_id, 1 /* VF Valid */);
+                                     vf->abs_vf_id, 1);
        }
-       ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
 
        vf->mtu = start->mtu;
        vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
@@ -1539,7 +2140,7 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
 #ifndef ASIC_ONLY
        if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
                DP_NOTICE(p_hwfn, false,
-                         "FPGA: Don't confi VF for Tx-switching [no pVFC]\n");
+                         "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
                params.tx_switching = false;
        }
 #endif
@@ -1551,6 +2152,7 @@ static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
        params.vport_id = vf->vport_id;
        params.max_buffers_per_cqe = start->max_buffers_per_cqe;
        params.mtu = vf->mtu;
+       params.check_mac = true;
 
        rc = ecore_sp_eth_vport_start(p_hwfn, &params);
        if (rc != ECORE_SUCCESS) {
@@ -1578,9 +2180,21 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
        u8 status = PFVF_STATUS_SUCCESS;
        enum _ecore_status_t rc;
 
+       OSAL_IOV_VF_VPORT_STOP(p_hwfn, vf);
        vf->vport_instance--;
        vf->spoof_chk = false;
 
+       if ((ecore_iov_validate_active_rxq(vf)) ||
+           (ecore_iov_validate_active_txq(vf))) {
+               vf->b_malicious = true;
+               DP_NOTICE(p_hwfn, false,
+                         "VF [%02x] - considered malicious;"
+                         " Unable to stop RX/TX queuess\n",
+                         vf->abs_vf_id);
+               status = PFVF_STATUS_MALICIOUS;
+               goto out;
+       }
+
        rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
        if (rc != ECORE_SUCCESS) {
                DP_ERR(p_hwfn,
@@ -1592,151 +2206,539 @@ static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
        vf->configured_features = 0;
        OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
 
+out:
        ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
                               sizeof(struct pfvf_def_resp_tlv), status);
 }
 
-static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
-                                      struct ecore_ptt *p_ptt,
-                                      struct ecore_vf_info *vf)
+static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           struct ecore_vf_info *vf,
+                                           u8 status, bool b_legacy)
 {
        struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
-       struct vfpf_start_rxq_tlv *req = &mbx->req_virt->start_rxq;
-       u16 length = sizeof(struct pfvf_def_resp_tlv);
-       u8 status = PFVF_STATUS_SUCCESS;
-       enum _ecore_status_t rc;
+       struct pfvf_start_queue_resp_tlv *p_tlv;
+       struct vfpf_start_rxq_tlv *req;
+       u16 length;
 
-       rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
-                                          vf->vf_queues[req->rx_qid].fw_cid,
-                                          vf->vf_queues[req->rx_qid].fw_rx_qid,
-                                          vf->vport_id,
-                                          vf->abs_vf_id + 0x10,
-                                          req->hw_sb,
-                                          req->sb_index,
-                                          req->bd_max_bytes,
-                                          req->rxq_addr,
-                                          req->cqe_pbl_addr,
-                                          req->cqe_pbl_size);
-
-       if (rc) {
-               status = PFVF_STATUS_FAILURE;
-       } else {
-               vf->vf_queues[req->rx_qid].rxq_active = true;
-               vf->num_active_rxqs++;
-       }
+       mbx->offset = (u8 *)mbx->reply_virt;
 
-       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_RXQ,
-                              length, status);
+       /* Taking a bigger struct instead of adding a TLV to list was a
+        * mistake, but one which we're now stuck with, as some older
+        * clients assume the size of the previous response.
+        */
+       if (!b_legacy)
+               length = sizeof(*p_tlv);
+       else
+               length = sizeof(struct pfvf_def_resp_tlv);
+
+       p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_RXQ, length);
+       ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       /* Update the TLV with the response */
+       if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
+               req = &mbx->req_virt->start_rxq;
+               p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
+                               OFFSETOF(struct mstorm_vf_zone,
+                                        non_trigger.eth_rx_queue_producers) +
+                               sizeof(struct eth_rx_prod_data) * req->rx_qid;
+       }
+
+       ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
 }
 
-static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
+static u8 ecore_iov_vf_mbx_qid(struct ecore_hwfn *p_hwfn,
+                              struct ecore_vf_info *p_vf, bool b_is_tx)
+{
+       struct ecore_iov_vf_mbx *p_mbx = &p_vf->vf_mbx;
+       struct vfpf_qid_tlv *p_qid_tlv;
+
+       /* Search for the qid if the VF published if its going to provide it */
+       if (!(p_vf->acquire.vfdev_info.capabilities &
+             VFPF_ACQUIRE_CAP_QUEUE_QIDS)) {
+               if (b_is_tx)
+                       return ECORE_IOV_LEGACY_QID_TX;
+               else
+                       return ECORE_IOV_LEGACY_QID_RX;
+       }
+
+       p_qid_tlv = (struct vfpf_qid_tlv *)
+                   ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt,
+                                              CHANNEL_TLV_QID);
+       if (p_qid_tlv == OSAL_NULL) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%2x]: Failed to provide qid\n",
+                          p_vf->relative_vf_id);
+
+               return ECORE_IOV_QID_INVALID;
+       }
+
+       if (p_qid_tlv->qid >= MAX_QUEUES_PER_QZONE) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%02x]: Provided qid out-of-bounds %02x\n",
+                          p_vf->relative_vf_id, p_qid_tlv->qid);
+               return ECORE_IOV_QID_INVALID;
+       }
+
+       return p_qid_tlv->qid;
+}
+
+static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
                                       struct ecore_ptt *p_ptt,
                                       struct ecore_vf_info *vf)
 {
+       struct ecore_queue_start_common_params params;
+       struct ecore_queue_cid_vf_params vf_params;
        struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
-       struct vfpf_start_txq_tlv *req = &mbx->req_virt->start_txq;
-       u16 length = sizeof(struct pfvf_def_resp_tlv);
-       union ecore_qm_pq_params pq_params;
-       u8 status = PFVF_STATUS_SUCCESS;
+       u8 status = PFVF_STATUS_NO_RESOURCE;
+       u8 qid_usage_idx, vf_legacy = 0;
+       struct ecore_vf_queue *p_queue;
+       struct vfpf_start_rxq_tlv *req;
+       struct ecore_queue_cid *p_cid;
+       struct ecore_sb_info sb_dummy;
        enum _ecore_status_t rc;
 
-       /* Prepare the parameters which would choose the right PQ */
-       OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
-       pq_params.eth.is_vf = 1;
-       pq_params.eth.vf_id = vf->relative_vf_id;
-
-       rc = ecore_sp_eth_txq_start_ramrod(p_hwfn,
-                                          vf->opaque_fid,
-                                          vf->vf_queues[req->tx_qid].fw_tx_qid,
-                                          vf->vf_queues[req->tx_qid].fw_cid,
-                                          vf->vport_id,
-                                          vf->abs_vf_id + 0x10,
-                                          req->hw_sb,
-                                          req->sb_index,
-                                          req->pbl_addr,
-                                          req->pbl_size, &pq_params);
+       req = &mbx->req_virt->start_rxq;
 
-       if (rc)
+       if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
+                                   ECORE_IOV_VALIDATE_Q_DISABLE) ||
+           !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+               goto out;
+
+       qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+       if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+               goto out;
+
+       p_queue = &vf->vf_queues[req->rx_qid];
+       if (p_queue->cids[qid_usage_idx].p_cid)
+               goto out;
+
+       vf_legacy = ecore_vf_calculate_legacy(vf);
+
+       /* Acquire a new queue-cid */
+       OSAL_MEMSET(&params, 0, sizeof(params));
+       params.queue_id = (u8)p_queue->fw_rx_qid;
+       params.vport_id = vf->vport_id;
+       params.stats_id = vf->abs_vf_id + 0x10;
+
+       /* Since IGU index is passed via sb_info, construct a dummy one */
+       OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
+       sb_dummy.igu_sb_id = req->hw_sb;
+       params.p_sb = &sb_dummy;
+       params.sb_idx = req->sb_index;
+
+       OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+       vf_params.vfid = vf->relative_vf_id;
+       vf_params.vf_qid = (u8)req->rx_qid;
+       vf_params.vf_legacy = vf_legacy;
+       vf_params.qid_usage_idx = qid_usage_idx;
+
+       p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+                                      &params, true, &vf_params);
+       if (p_cid == OSAL_NULL)
+               goto out;
+
+       /* Legacy VFs have their Producers in a different location, which they
+        * calculate on their own and clean the producer prior to this.
+        */
+       if (!(vf_legacy & ECORE_QCID_LEGACY_VF_RX_PROD))
+               REG_WR(p_hwfn,
+                      GTT_BAR0_MAP_REG_MSDM_RAM +
+                      MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
+                      0);
+
+       rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
+                                       req->bd_max_bytes,
+                                       req->rxq_addr,
+                                       req->cqe_pbl_addr,
+                                       req->cqe_pbl_size);
+       if (rc != ECORE_SUCCESS) {
                status = PFVF_STATUS_FAILURE;
+               ecore_eth_queue_cid_release(p_hwfn, p_cid);
+       } else {
+               p_queue->cids[qid_usage_idx].p_cid = p_cid;
+               p_queue->cids[qid_usage_idx].b_is_tx = false;
+               status = PFVF_STATUS_SUCCESS;
+               vf->num_active_rxqs++;
+       }
+
+out:
+       ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
+                                       !!(vf_legacy &
+                                          ECORE_QCID_LEGACY_VF_RX_PROD));
+}
+
+static void
+ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
+                                struct ecore_tunnel_info *p_tun,
+                                u16 tunn_feature_mask)
+{
+       p_resp->tunn_feature_mask = tunn_feature_mask;
+       p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
+       p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
+       p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
+       p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
+       p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
+       p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
+       p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
+       p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
+       p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
+       p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
+       p_resp->geneve_udp_port = p_tun->geneve_port.port;
+       p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
+}
+
+static void
+__ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+                               struct ecore_tunn_update_type *p_tun,
+                               enum ecore_tunn_mode mask, u8 tun_cls)
+{
+       if (p_req->tun_mode_update_mask & (1 << mask)) {
+               p_tun->b_update_mode = true;
+
+               if (p_req->tunn_mode & (1 << mask))
+                       p_tun->b_mode_enabled = true;
+       }
+
+       p_tun->tun_cls = tun_cls;
+}
+
+static void
+ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
+                             struct ecore_tunn_update_type *p_tun,
+                             struct ecore_tunn_update_udp_port *p_port,
+                             enum ecore_tunn_mode mask,
+                             u8 tun_cls, u8 update_port, u16 port)
+{
+       if (update_port) {
+               p_port->b_update_port = true;
+               p_port->port = port;
+       }
+
+       __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
+}
+
+static bool
+ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
+{
+       bool b_update_requested = false;
+
+       if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
+           p_req->update_geneve_port || p_req->update_vxlan_port)
+               b_update_requested = true;
+
+       return b_update_requested;
+}
+
+static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
+                                              struct ecore_ptt *p_ptt,
+                                              struct ecore_vf_info *p_vf)
+{
+       struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+       struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+       struct pfvf_update_tunn_param_tlv *p_resp;
+       struct vfpf_update_tunn_param_tlv *p_req;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u8 status = PFVF_STATUS_SUCCESS;
+       bool b_update_required = false;
+       struct ecore_tunnel_info tunn;
+       u16 tunn_feature_mask = 0;
+       int i;
+
+       mbx->offset = (u8 *)mbx->reply_virt;
+
+       OSAL_MEM_ZERO(&tunn, sizeof(tunn));
+       p_req = &mbx->req_virt->tunn_param_update;
+
+       if (!ecore_iov_pf_validate_tunn_param(p_req)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "No tunnel update requested by VF\n");
+               status = PFVF_STATUS_FAILURE;
+               goto send_resp;
+       }
+
+       tunn.b_update_rx_cls = p_req->update_tun_cls;
+       tunn.b_update_tx_cls = p_req->update_tun_cls;
+
+       ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
+                                     ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
+                                     p_req->update_vxlan_port,
+                                     p_req->vxlan_port);
+       ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
+                                     ECORE_MODE_L2GENEVE_TUNN,
+                                     p_req->l2geneve_clss,
+                                     p_req->update_geneve_port,
+                                     p_req->geneve_port);
+       __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
+                                       ECORE_MODE_IPGENEVE_TUNN,
+                                       p_req->ipgeneve_clss);
+       __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
+                                       ECORE_MODE_L2GRE_TUNN,
+                                       p_req->l2gre_clss);
+       __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
+                                       ECORE_MODE_IPGRE_TUNN,
+                                       p_req->ipgre_clss);
+
+       /* If PF modifies VF's req then it should
+        * still return an error in case of partial configuration
+        * or modified configuration as opposed to requested one.
+        */
+       rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
+                                                &b_update_required, &tunn);
+
+       if (rc != ECORE_SUCCESS)
+               status = PFVF_STATUS_FAILURE;
+
+       /* If ECORE client is willing to update anything ? */
+       if (b_update_required) {
+               u16 geneve_port;
+
+               rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_ptt, &tunn,
+                                                ECORE_SPQ_MODE_EBLOCK,
+                                                OSAL_NULL);
+               if (rc != ECORE_SUCCESS)
+                       status = PFVF_STATUS_FAILURE;
+
+               geneve_port = p_tun->geneve_port.port;
+               ecore_for_each_vf(p_hwfn, i) {
+                       ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
+                                                        p_tun->vxlan_port.port,
+                                                        geneve_port);
+               }
+       }
+
+send_resp:
+       p_resp = ecore_add_tlv(&mbx->offset,
+                              CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
+
+       ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
+       ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
+static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
+                                           struct ecore_ptt *p_ptt,
+                                           struct ecore_vf_info *p_vf,
+                                           u32 cid,
+                                           u8 status)
+{
+       struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+       struct pfvf_start_queue_resp_tlv *p_tlv;
+       bool b_legacy = false;
+       u16 length;
+
+       mbx->offset = (u8 *)mbx->reply_virt;
+
+       /* Taking a bigger struct instead of adding a TLV to list was a
+        * mistake, but one which we're now stuck with, as some older
+        * clients assume the size of the previous response.
+        */
+       if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
+           ETH_HSI_VER_NO_PKT_LEN_TUNN)
+               b_legacy = true;
+
+       if (!b_legacy)
+               length = sizeof(*p_tlv);
        else
-               vf->vf_queues[req->tx_qid].txq_active = true;
+               length = sizeof(struct pfvf_def_resp_tlv);
 
-       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ,
-                              length, status);
+       p_tlv = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_START_TXQ, length);
+       ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       /* Update the TLV with the response */
+       if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
+               p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
+
+       ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
+}
+
+static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_vf_info *vf)
+{
+       struct ecore_queue_start_common_params params;
+       struct ecore_queue_cid_vf_params vf_params;
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       u8 status = PFVF_STATUS_NO_RESOURCE;
+       struct ecore_vf_queue *p_queue;
+       struct vfpf_start_txq_tlv *req;
+       struct ecore_queue_cid *p_cid;
+       struct ecore_sb_info sb_dummy;
+       u8 qid_usage_idx, vf_legacy;
+       u32 cid = 0;
+       enum _ecore_status_t rc;
+       u16 pq;
+
+       OSAL_MEMSET(&params, 0, sizeof(params));
+       req = &mbx->req_virt->start_txq;
+
+       if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
+                                   ECORE_IOV_VALIDATE_Q_NA) ||
+           !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
+               goto out;
+
+       qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
+       if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+               goto out;
+
+       p_queue = &vf->vf_queues[req->tx_qid];
+       if (p_queue->cids[qid_usage_idx].p_cid)
+               goto out;
+
+       vf_legacy = ecore_vf_calculate_legacy(vf);
+
+       /* Acquire a new queue-cid */
+       params.queue_id = p_queue->fw_tx_qid;
+       params.vport_id = vf->vport_id;
+       params.stats_id = vf->abs_vf_id + 0x10;
+
+       /* Since IGU index is passed via sb_info, construct a dummy one */
+       OSAL_MEM_ZERO(&sb_dummy, sizeof(sb_dummy));
+       sb_dummy.igu_sb_id = req->hw_sb;
+       params.p_sb = &sb_dummy;
+       params.sb_idx = req->sb_index;
+
+       OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
+       vf_params.vfid = vf->relative_vf_id;
+       vf_params.vf_qid = (u8)req->tx_qid;
+       vf_params.vf_legacy = vf_legacy;
+       vf_params.qid_usage_idx = qid_usage_idx;
+
+       p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
+                                      &params, false, &vf_params);
+       if (p_cid == OSAL_NULL)
+               goto out;
+
+       pq = ecore_get_cm_pq_idx_vf(p_hwfn,
+                                   vf->relative_vf_id);
+       rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
+                                       req->pbl_addr, req->pbl_size, pq);
+       if (rc != ECORE_SUCCESS) {
+               status = PFVF_STATUS_FAILURE;
+               ecore_eth_queue_cid_release(p_hwfn, p_cid);
+       } else {
+               status = PFVF_STATUS_SUCCESS;
+               p_queue->cids[qid_usage_idx].p_cid = p_cid;
+               p_queue->cids[qid_usage_idx].b_is_tx = true;
+               cid = p_cid->cid;
+       }
+
+out:
+       ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
+                                       cid, status);
 }
 
 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
                                                   struct ecore_vf_info *vf,
                                                   u16 rxq_id,
-                                                  u8 num_rxqs,
+                                                  u8 qid_usage_idx,
                                                   bool cqe_completion)
 {
+       struct ecore_vf_queue *p_queue;
        enum _ecore_status_t rc = ECORE_SUCCESS;
-       int qid;
 
-       if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+       if (!ecore_iov_validate_rxq(p_hwfn, vf, rxq_id,
+                                   ECORE_IOV_VALIDATE_Q_NA)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%d] Tried Closing Rx 0x%04x.%02x which is inactive\n",
+                          vf->relative_vf_id, rxq_id, qid_usage_idx);
                return ECORE_INVAL;
+       }
 
-       for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
-               if (vf->vf_queues[qid].rxq_active) {
-                       rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
-                                                       vf->vf_queues[qid].
-                                                       fw_rx_qid, false,
-                                                       cqe_completion);
+       p_queue = &vf->vf_queues[rxq_id];
 
-                       if (rc)
-                               return rc;
-               }
-               vf->vf_queues[qid].rxq_active = false;
-               vf->num_active_rxqs--;
+       /* We've validated the index and the existence of the active RXQ -
+        * now we need to make sure that it's using the correct qid.
+        */
+       if (!p_queue->cids[qid_usage_idx].p_cid ||
+           p_queue->cids[qid_usage_idx].b_is_tx) {
+               struct ecore_queue_cid *p_cid;
+
+               p_cid = ecore_iov_get_vf_rx_queue_cid(p_queue);
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%d] - Tried Closing Rx 0x%04x.%02x, but Rx is at %04x.%02x\n",
+                           vf->relative_vf_id, rxq_id, qid_usage_idx,
+                           rxq_id, p_cid->qid_usage_idx);
+               return ECORE_INVAL;
        }
 
-       return rc;
+       /* Now that we know we have a valid Rx-queue - close it */
+       rc = ecore_eth_rx_queue_stop(p_hwfn,
+                                    p_queue->cids[qid_usage_idx].p_cid,
+                                    false, cqe_completion);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
+       vf->num_active_rxqs--;
+
+       return ECORE_SUCCESS;
 }
 
 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
                                                   struct ecore_vf_info *vf,
-                                                  u16 txq_id, u8 num_txqs)
+                                                  u16 txq_id,
+                                                  u8 qid_usage_idx)
 {
+       struct ecore_vf_queue *p_queue;
        enum _ecore_status_t rc = ECORE_SUCCESS;
-       int qid;
 
-       if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
+       if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
+                                   ECORE_IOV_VALIDATE_Q_NA))
                return ECORE_INVAL;
 
-       for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
-               if (vf->vf_queues[qid].txq_active) {
-                       rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
-                                                       vf->vf_queues[qid].
-                                                       fw_tx_qid);
+       p_queue = &vf->vf_queues[txq_id];
+       if (!p_queue->cids[qid_usage_idx].p_cid ||
+           !p_queue->cids[qid_usage_idx].b_is_tx)
+               return ECORE_INVAL;
 
-                       if (rc)
-                               return rc;
-               }
-               vf->vf_queues[qid].txq_active = false;
-       }
-       return rc;
+       rc = ecore_eth_tx_queue_stop(p_hwfn,
+                                    p_queue->cids[qid_usage_idx].p_cid);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_queue->cids[qid_usage_idx].p_cid = OSAL_NULL;
+       return ECORE_SUCCESS;
 }
 
 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
                                       struct ecore_ptt *p_ptt,
                                       struct ecore_vf_info *vf)
 {
-       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
-       struct vfpf_stop_rxqs_tlv *req = &mbx->req_virt->stop_rxqs;
        u16 length = sizeof(struct pfvf_def_resp_tlv);
-       u8 status = PFVF_STATUS_SUCCESS;
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       u8 status = PFVF_STATUS_FAILURE;
+       struct vfpf_stop_rxqs_tlv *req;
+       u8 qid_usage_idx;
        enum _ecore_status_t rc;
 
-       /* We give the option of starting from qid != 0, in this case we
-        * need to make sure that qid + num_qs doesn't exceed the actual
-        * amount of queues that exist.
+       /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_rxqs'
+        * would be one. Since no older ecore passed multiple queues
+        * using this API, sanitize on the value.
         */
-       rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
-                                   req->num_rxqs, req->cqe_completion);
-       if (rc)
-               status = PFVF_STATUS_FAILURE;
+       req = &mbx->req_virt->stop_rxqs;
+       if (req->num_rxqs != 1) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Odd; VF[%d] tried stopping multiple Rx queues\n",
+                          vf->relative_vf_id);
+               status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
 
+       /* Find which qid-index is associated with the queue */
+       qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+       if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+               goto out;
+
+       rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
+                                   qid_usage_idx, req->cqe_completion);
+       if (rc == ECORE_SUCCESS)
+               status = PFVF_STATUS_SUCCESS;
+out:
        ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
                               length, status);
 }
@@ -1745,20 +2747,37 @@ static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
                                       struct ecore_ptt *p_ptt,
                                       struct ecore_vf_info *vf)
 {
-       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
-       struct vfpf_stop_txqs_tlv *req = &mbx->req_virt->stop_txqs;
        u16 length = sizeof(struct pfvf_def_resp_tlv);
-       u8 status = PFVF_STATUS_SUCCESS;
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       u8 status = PFVF_STATUS_FAILURE;
+       struct vfpf_stop_txqs_tlv *req;
+       u8 qid_usage_idx;
        enum _ecore_status_t rc;
 
-       /* We give the option of starting from qid != 0, in this case we
-        * need to make sure that qid + num_qs doesn't exceed the actual
-        * amount of queues that exist.
+       /* Starting with CHANNEL_TLV_QID, it's assumed the 'num_txqs'
+        * would be one. Since no older ecore passed multiple queues
+        * using this API, sanitize on the value.
         */
-       rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
-       if (rc)
-               status = PFVF_STATUS_FAILURE;
+       req = &mbx->req_virt->stop_txqs;
+       if (req->num_txqs != 1) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Odd; VF[%d] tried stopping multiple Tx queues\n",
+                          vf->relative_vf_id);
+               status = PFVF_STATUS_NOT_SUPPORTED;
+               goto out;
+       }
+
+       /* Find which qid-index is associated with the queue */
+       qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, true);
+       if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+               goto out;
 
+       rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid,
+                                   qid_usage_idx);
+       if (rc == ECORE_SUCCESS)
+               status = PFVF_STATUS_SUCCESS;
+
+out:
        ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
                               length, status);
 }
@@ -1767,43 +2786,70 @@ static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
                                         struct ecore_ptt *p_ptt,
                                         struct ecore_vf_info *vf)
 {
-       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
-       struct vfpf_update_rxq_tlv *req = &mbx->req_virt->update_rxq;
+       struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
        u16 length = sizeof(struct pfvf_def_resp_tlv);
-       u8 status = PFVF_STATUS_SUCCESS;
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct vfpf_update_rxq_tlv *req;
+       u8 status = PFVF_STATUS_FAILURE;
        u8 complete_event_flg;
        u8 complete_cqe_flg;
+       u8 qid_usage_idx;
        enum _ecore_status_t rc;
-       u16 qid;
-       u8 i;
+       u16 i;
 
+       req = &mbx->req_virt->update_rxq;
        complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
        complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
 
-       for (i = 0; i < req->num_rxqs; i++) {
-               qid = req->rx_qid + i;
+       qid_usage_idx = ecore_iov_vf_mbx_qid(p_hwfn, vf, false);
+       if (qid_usage_idx == ECORE_IOV_QID_INVALID)
+               goto out;
 
-               if (!vf->vf_queues[qid].rxq_active) {
-                       DP_NOTICE(p_hwfn, true,
-                                 "VF rx_qid = %d isn`t active!\n", qid);
-                       status = PFVF_STATUS_FAILURE;
-                       break;
+       /* Starting with the addition of CHANNEL_TLV_QID, this API started
+        * expecting a single queue at a time. Validate this.
+        */
+       if ((vf->acquire.vfdev_info.capabilities &
+            VFPF_ACQUIRE_CAP_QUEUE_QIDS) &&
+            req->num_rxqs != 1) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "VF[%d] supports QIDs but sends multiple queues\n",
+                          vf->relative_vf_id);
+               goto out;
+       }
+
+       /* Validate inputs - for the legacy case this is still true since
+        * qid_usage_idx for each Rx queue would be LEGACY_QID_RX.
+        */
+       for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
+               if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
+                                           ECORE_IOV_VALIDATE_Q_NA) ||
+                   !vf->vf_queues[i].cids[qid_usage_idx].p_cid ||
+                   vf->vf_queues[i].cids[qid_usage_idx].b_is_tx) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
+                                  vf->relative_vf_id, req->rx_qid,
+                                  req->num_rxqs);
+                       goto out;
                }
+       }
 
-               rc = ecore_sp_eth_rx_queues_update(p_hwfn,
-                                                  vf->vf_queues[qid].fw_rx_qid,
-                                                  1,
-                                                  complete_cqe_flg,
-                                                  complete_event_flg,
-                                                  ECORE_SPQ_MODE_EBLOCK,
-                                                  OSAL_NULL);
+       for (i = 0; i < req->num_rxqs; i++) {
+               u16 qid = req->rx_qid + i;
 
-               if (rc) {
-                       status = PFVF_STATUS_FAILURE;
-                       break;
-               }
+               handlers[i] = vf->vf_queues[qid].cids[qid_usage_idx].p_cid;
        }
 
+       rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
+                                          req->num_rxqs,
+                                          complete_cqe_flg,
+                                          complete_event_flg,
+                                          ECORE_SPQ_MODE_EBLOCK,
+                                          OSAL_NULL);
+       if (rc != ECORE_SUCCESS)
+               goto out;
+
+       status = PFVF_STATUS_SUCCESS;
+out:
        ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
                               length, status);
 }
@@ -1851,13 +2897,14 @@ ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
 
        p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
            ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
-       if (p_act_tlv) {
-               p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
-               p_data->vport_active_rx_flg = p_act_tlv->active_rx;
-               p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
-               p_data->vport_active_tx_flg = p_act_tlv->active_tx;
-               *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
-       }
+       if (!p_act_tlv)
+               return;
+
+       p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
+       p_data->vport_active_rx_flg = p_act_tlv->active_rx;
+       p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
+       p_data->vport_active_tx_flg = p_act_tlv->active_tx;
+       *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
 }
 
 static void
@@ -1895,20 +2942,21 @@ ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
 
        p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
            ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+       if (!p_tx_switch_tlv)
+               return;
 
 #ifndef ASIC_ONLY
        if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
                DP_NOTICE(p_hwfn, false,
-                         "FPGA: Ignore tx-switching configuration originating from VFs\n");
+                         "FPGA: Ignore tx-switching configuration originating"
+                         " from VFs\n");
                return;
        }
 #endif
 
-       if (p_tx_switch_tlv) {
-               p_data->update_tx_switching_flg = 1;
-               p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
-               *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
-       }
+       p_data->update_tx_switching_flg = 1;
+       p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
+       *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
 }
 
 static void
@@ -1922,14 +2970,13 @@ ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
 
        p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
            ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+       if (!p_mcast_tlv)
+               return;
 
-       if (p_mcast_tlv) {
-               p_data->update_approx_mcast_flg = 1;
-               OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
-                           sizeof(unsigned long) *
-                           ETH_MULTICAST_MAC_BINS_IN_REGS);
-               *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
-       }
+       p_data->update_approx_mcast_flg = 1;
+       OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
+                   sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+       *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
 }
 
 static void
@@ -1937,23 +2984,20 @@ ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
                                struct ecore_sp_vport_update_params *p_data,
                                struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
 {
+       struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
        struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
        u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
 
        p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
            ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+       if (!p_accept_tlv)
+               return;
 
-       if (p_accept_tlv) {
-               p_data->accept_flags.update_rx_mode_config =
-                   p_accept_tlv->update_rx_mode;
-               p_data->accept_flags.rx_accept_filter =
-                   p_accept_tlv->rx_accept_filter;
-               p_data->accept_flags.update_tx_mode_config =
-                   p_accept_tlv->update_tx_mode;
-               p_data->accept_flags.tx_accept_filter =
-                   p_accept_tlv->tx_accept_filter;
-               *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
-       }
+       p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
+       p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
+       p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
+       p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
+       *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
 }
 
 static void
@@ -1967,13 +3011,13 @@ ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
 
        p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
            ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
+       if (!p_accept_any_vlan)
+               return;
 
-       if (p_accept_any_vlan) {
-               p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
-               p_data->update_accept_any_vlan_flg =
-                   p_accept_any_vlan->update_accept_any_vlan_flg;
-               *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
-       }
+       p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
+       p_data->update_accept_any_vlan_flg =
+                       p_accept_any_vlan->update_accept_any_vlan_flg;
+       *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
 }
 
 static void
@@ -1981,77 +3025,73 @@ ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
                              struct ecore_vf_info *vf,
                              struct ecore_sp_vport_update_params *p_data,
                              struct ecore_rss_params *p_rss,
-                             struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
+                             struct ecore_iov_vf_mbx *p_mbx,
+                             u16 *tlvs_mask, u16 *tlvs_accepted)
 {
        struct vfpf_vport_update_rss_tlv *p_rss_tlv;
        u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
+       bool b_reject = false;
        u16 table_size;
-       u16 i, q_idx, max_q_idx;
+       u16 i, q_idx;
 
        p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
            ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
-       if (p_rss_tlv) {
-               OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
-
-               p_rss->update_rss_config =
-                   !!(p_rss_tlv->update_rss_flags &
-                       VFPF_UPDATE_RSS_CONFIG_FLAG);
-               p_rss->update_rss_capabilities =
-                   !!(p_rss_tlv->update_rss_flags &
-                       VFPF_UPDATE_RSS_CAPS_FLAG);
-               p_rss->update_rss_ind_table =
-                   !!(p_rss_tlv->update_rss_flags &
-                       VFPF_UPDATE_RSS_IND_TABLE_FLAG);
-               p_rss->update_rss_key =
-                   !!(p_rss_tlv->update_rss_flags & VFPF_UPDATE_RSS_KEY_FLAG);
-
-               p_rss->rss_enable = p_rss_tlv->rss_enable;
-               p_rss->rss_eng_id = vf->relative_vf_id + 1;
-               p_rss->rss_caps = p_rss_tlv->rss_caps;
-               p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
-               OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
-                           sizeof(p_rss->rss_ind_table));
-               OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
-                           sizeof(p_rss->rss_key));
-
-               table_size = OSAL_MIN_T(u16,
-                                       OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
-                                       (1 << p_rss_tlv->rss_table_size_log));
-
-               max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);
-
-               for (i = 0; i < table_size; i++) {
-                       q_idx = p_rss->rss_ind_table[i];
-                       if (q_idx >= max_q_idx) {
-                               DP_NOTICE(p_hwfn, true,
-                                         "rss_ind_table[%d] = %d, rxq is out of range\n",
-                                         i, q_idx);
-                               /* TBD: fail the request mark VF as malicious */
-                               p_rss->rss_ind_table[i] =
-                                   vf->vf_queues[0].fw_rx_qid;
-                       } else if (!vf->vf_queues[q_idx].rxq_active) {
-                               DP_NOTICE(p_hwfn, true,
-                                         "rss_ind_table[%d] = %d, rxq is not active\n",
-                                         i, q_idx);
-                               /* TBD: fail the request mark VF as malicious */
-                               p_rss->rss_ind_table[i] =
-                                   vf->vf_queues[0].fw_rx_qid;
-                       } else {
-                               p_rss->rss_ind_table[i] =
-                                   vf->vf_queues[q_idx].fw_rx_qid;
-                       }
+       if (!p_rss_tlv) {
+               p_data->rss_params = OSAL_NULL;
+               return;
+       }
+
+       OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
+
+       p_rss->update_rss_config =
+           !!(p_rss_tlv->update_rss_flags &
+               VFPF_UPDATE_RSS_CONFIG_FLAG);
+       p_rss->update_rss_capabilities =
+           !!(p_rss_tlv->update_rss_flags &
+               VFPF_UPDATE_RSS_CAPS_FLAG);
+       p_rss->update_rss_ind_table =
+           !!(p_rss_tlv->update_rss_flags &
+               VFPF_UPDATE_RSS_IND_TABLE_FLAG);
+       p_rss->update_rss_key =
+           !!(p_rss_tlv->update_rss_flags &
+               VFPF_UPDATE_RSS_KEY_FLAG);
+
+       p_rss->rss_enable = p_rss_tlv->rss_enable;
+       p_rss->rss_eng_id = vf->rss_eng_id;
+       p_rss->rss_caps = p_rss_tlv->rss_caps;
+       p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
+       OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
+                   sizeof(p_rss->rss_key));
+
+       table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
+                               (1 << p_rss_tlv->rss_table_size_log));
+
+       for (i = 0; i < table_size; i++) {
+               struct ecore_queue_cid *p_cid;
+
+               q_idx = p_rss_tlv->rss_ind_table[i];
+               if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
+                                           ECORE_IOV_VALIDATE_Q_ENABLE)) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d]: Omitting RSS due to wrong queue %04x\n",
+                                  vf->relative_vf_id, q_idx);
+                       b_reject = true;
+                       goto out;
                }
 
-               p_data->rss_params = p_rss;
-               *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
-       } else {
-               p_data->rss_params = OSAL_NULL;
+               p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[q_idx]);
+               p_rss->rss_ind_table[i] = p_cid;
        }
+
+       p_data->rss_params = p_rss;
+out:
+       *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
+       if (!b_reject)
+               *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
 }
 
 static void
 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
-                                 struct ecore_vf_info *vf,
                                  struct ecore_sp_vport_update_params *p_data,
                                  struct ecore_sge_tpa_params *p_sge_tpa,
                                  struct ecore_iov_vf_mbx *p_mbx,
@@ -2102,14 +3142,30 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
                                          struct ecore_ptt *p_ptt,
                                          struct ecore_vf_info *vf)
 {
+       struct ecore_rss_params *p_rss_params = OSAL_NULL;
        struct ecore_sp_vport_update_params params;
        struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
        struct ecore_sge_tpa_params sge_tpa_params;
-       struct ecore_rss_params rss_params;
+       u16 tlvs_mask = 0, tlvs_accepted = 0;
        u8 status = PFVF_STATUS_SUCCESS;
-       enum _ecore_status_t rc;
-       u16 tlvs_mask = 0, tlvs_accepted;
        u16 length;
+       enum _ecore_status_t rc;
+
+       /* Valiate PF can send such a request */
+       if (!vf->vport_instance) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "No VPORT instance available for VF[%d],"
+                          " failing vport update\n",
+                          vf->abs_vf_id);
+               status = PFVF_STATUS_FAILURE;
+               goto out;
+       }
+
+       p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
+       if (p_rss_params == OSAL_NULL) {
+               status = PFVF_STATUS_FAILURE;
+               goto out;
+       }
 
        OSAL_MEMSET(&params, 0, sizeof(params));
        params.opaque_fid = vf->opaque_fid;
@@ -2124,20 +3180,24 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
        ecore_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
        ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
        ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
-       ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, &rss_params,
-                                     mbx, &tlvs_mask);
        ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
-       ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
+       ecore_iov_vp_update_sge_tpa_param(p_hwfn, &params,
                                          &sge_tpa_params, mbx, &tlvs_mask);
 
+       tlvs_accepted = tlvs_mask;
+
+       /* Some of the extended TLVs need to be validated first; In that case,
+        * they can update the mask without updating the accepted [so that
+        * PF could communicate to VF it has rejected request].
+        */
+       ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
+                                     mbx, &tlvs_mask, &tlvs_accepted);
+
        /* Just log a message if there is no single extended tlv in buffer.
         * When all features of vport update ramrod would be requested by VF
         * as extended TLVs in buffer then an error can be returned in response
         * if there is no extended TLV present in buffer.
         */
-       tlvs_accepted = tlvs_mask;
-
-#ifndef __EXTRACT__LINUX__
        if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
                                     &params, &tlvs_accepted) !=
            ECORE_SUCCESS) {
@@ -2145,15 +3205,15 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
                status = PFVF_STATUS_NOT_SUPPORTED;
                goto out;
        }
-#endif
 
        if (!tlvs_accepted) {
                if (tlvs_mask)
                        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                                  "Upper-layer prevents said VF configuration\n");
+                                  "Upper-layer prevents said VF"
+                                  " configuration\n");
                else
-                       DP_NOTICE(p_hwfn, true,
-                                 "No feature tlvs found for vport update\n");
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "No feature tlvs found for vport update\n");
                status = PFVF_STATUS_NOT_SUPPORTED;
                goto out;
        }
@@ -2165,22 +3225,19 @@ static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
                status = PFVF_STATUS_FAILURE;
 
 out:
+       OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
        length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
                                                    tlvs_mask, tlvs_accepted);
        ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
 }
 
 static enum _ecore_status_t
-ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
-                                  struct ecore_vf_info *p_vf,
-                                  struct ecore_filter_ucast *p_params)
+ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
+                               struct ecore_vf_info *p_vf,
+                               struct ecore_filter_ucast *p_params)
 {
        int i;
 
-       /* TODO - do we need a MAC shadow registery? */
-       if (p_params->type == ECORE_FILTER_MAC)
-               return ECORE_SUCCESS;
-
        /* First remove entries and then add new ones */
        if (p_params->opcode == ECORE_FILTER_REMOVE) {
                for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
@@ -2192,7 +3249,8 @@ ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
                        }
                if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
                        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                                  "VF [%d] - Tries to remove a non-existing vlan\n",
+                                  "VF [%d] - Tries to remove a non-existing"
+                                  " vlan\n",
                                   p_vf->relative_vf_id);
                        return ECORE_INVAL;
                }
@@ -2210,16 +3268,19 @@ ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
 
        if (p_params->opcode == ECORE_FILTER_ADD ||
            p_params->opcode == ECORE_FILTER_REPLACE) {
-               for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
-                       if (!p_vf->shadow_config.vlans[i].used) {
-                               p_vf->shadow_config.vlans[i].used = true;
-                               p_vf->shadow_config.vlans[i].vid =
-                                   p_params->vlan;
-                               break;
-                       }
+               for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
+                       if (p_vf->shadow_config.vlans[i].used)
+                               continue;
+
+                       p_vf->shadow_config.vlans[i].used = true;
+                       p_vf->shadow_config.vlans[i].vid = p_params->vlan;
+                       break;
+               }
+
                if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
                        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                                  "VF [%d] - Tries to configure more than %d vlan filters\n",
+                                  "VF [%d] - Tries to configure more than %d"
+                                  " vlan filters\n",
                                   p_vf->relative_vf_id,
                                   ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
                        return ECORE_INVAL;
@@ -2229,19 +3290,104 @@ ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
        return ECORE_SUCCESS;
 }
 
+static enum _ecore_status_t
+ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
+                              struct ecore_vf_info *p_vf,
+                              struct ecore_filter_ucast *p_params)
+{
+       char empty_mac[ETH_ALEN];
+       int i;
+
+       OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
+
+       /* If we're in forced-mode, we don't allow any change */
+       /* TODO - this would change if we were ever to implement logic for
+        * removing a forced MAC altogether [in which case, like for vlans,
+        * we should be able to re-trace previous configuration.
+        */
+       if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
+               return ECORE_SUCCESS;
+
+       /* First remove entries and then add new ones */
+       if (p_params->opcode == ECORE_FILTER_REMOVE) {
+               for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
+                       if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
+                                        p_params->mac, ETH_ALEN)) {
+                               OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
+                                             ETH_ALEN);
+                               break;
+                       }
+               }
+
+               if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "MAC isn't configured\n");
+                       return ECORE_INVAL;
+               }
+       } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
+                  p_params->opcode == ECORE_FILTER_FLUSH) {
+               for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
+                       OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
+       }
+
+       /* List the new MAC address */
+       if (p_params->opcode != ECORE_FILTER_ADD &&
+           p_params->opcode != ECORE_FILTER_REPLACE)
+               return ECORE_SUCCESS;
+
+       for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
+               if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
+                                empty_mac, ETH_ALEN)) {
+                       OSAL_MEMCPY(p_vf->shadow_config.macs[i],
+                                   p_params->mac, ETH_ALEN);
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "Added MAC at %d entry in shadow\n", i);
+                       break;
+               }
+       }
+
+       if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "No available place for MAC\n");
+               return ECORE_INVAL;
+       }
+
+       return ECORE_SUCCESS;
+}
+
+static enum _ecore_status_t
+ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
+                                  struct ecore_vf_info *p_vf,
+                                  struct ecore_filter_ucast *p_params)
+{
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+
+       if (p_params->type == ECORE_FILTER_MAC) {
+               rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
+               if (rc != ECORE_SUCCESS)
+                       return rc;
+       }
+
+       if (p_params->type == ECORE_FILTER_VLAN)
+               rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
+
+       return rc;
+}
+
 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
                                          struct ecore_ptt *p_ptt,
                                          struct ecore_vf_info *vf)
 {
-       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
-       struct vfpf_ucast_filter_tlv *req = &mbx->req_virt->ucast_filter;
        struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
-       struct ecore_filter_ucast params;
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       struct vfpf_ucast_filter_tlv *req;
        u8 status = PFVF_STATUS_SUCCESS;
+       struct ecore_filter_ucast params;
        enum _ecore_status_t rc;
 
        /* Prepare the unicast filter params */
        OSAL_MEMSET(&params, 0, sizeof(struct ecore_filter_ucast));
+       req = &mbx->req_virt->ucast_filter;
        params.opcode = (enum ecore_filter_opcode)req->opcode;
        params.type = (enum ecore_filter_ucast_type)req->type;
 
@@ -2254,7 +3400,8 @@ static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
        params.vlan = req->vlan;
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                  "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
+                  "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
+                  " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
                   vf->abs_vf_id, params.opcode, params.type,
                   params.is_rx_filter ? "RX" : "",
                   params.is_tx_filter ? "TX" : "",
@@ -2264,18 +3411,20 @@ static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
 
        if (!vf->vport_instance) {
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                          "No VPORT instance available for VF[%d], failing ucast MAC configuration\n",
+                          "No VPORT instance available for VF[%d],"
+                          " failing ucast MAC configuration\n",
                           vf->abs_vf_id);
                status = PFVF_STATUS_FAILURE;
                goto out;
        }
 
-       /* Update shadow copy of the VF configuration */
+       /* Update shadow copy of the VF configuration. In case shadow indicates
+        * the action should be blocked return success to VF to imitate the
+        * firmware behaviour in such case.
+        */
        if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=
-           ECORE_SUCCESS) {
-               status = PFVF_STATUS_FAILURE;
+           ECORE_SUCCESS)
                goto out;
-       }
 
        /* Determine if the unicast filtering is acceptible by PF */
        if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
@@ -2343,10 +3492,10 @@ static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
        u8 status = PFVF_STATUS_SUCCESS;
 
        /* Disable Interrupts for VF */
-       ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0 /* disable */);
+       ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
 
        /* Reset Permission table */
-       ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0 /* disable */);
+       ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
 
        ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
                               length, status);
@@ -2357,11 +3506,266 @@ static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
                                     struct ecore_vf_info *p_vf)
 {
        u16 length = sizeof(struct pfvf_def_resp_tlv);
+       u8 status = PFVF_STATUS_SUCCESS;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
 
        ecore_iov_vf_cleanup(p_hwfn, p_vf);
 
+       if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
+               /* Stopping the VF */
+               rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
+                                     p_vf->opaque_fid);
+
+               if (rc != ECORE_SUCCESS) {
+                       DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
+                              rc);
+                       status = PFVF_STATUS_FAILURE;
+               }
+
+               p_vf->state = VF_STOPPED;
+       }
+
        ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
-                              length, PFVF_STATUS_SUCCESS);
+                              length, status);
+}
+
+static void ecore_iov_vf_pf_get_coalesce(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        struct ecore_vf_info *p_vf)
+{
+       struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
+       struct pfvf_read_coal_resp_tlv *p_resp;
+       struct vfpf_read_coal_req_tlv *req;
+       u8 status = PFVF_STATUS_FAILURE;
+       struct ecore_vf_queue *p_queue;
+       struct ecore_queue_cid *p_cid;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       u16 coal = 0, qid, i;
+       bool b_is_rx;
+
+       mbx->offset = (u8 *)mbx->reply_virt;
+       req = &mbx->req_virt->read_coal_req;
+
+       qid = req->qid;
+       b_is_rx = req->is_rx ? true : false;
+
+       if (b_is_rx) {
+               if (!ecore_iov_validate_rxq(p_hwfn, p_vf, qid,
+                                           ECORE_IOV_VALIDATE_Q_ENABLE)) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d]: Invalid Rx queue_id = %d\n",
+                                  p_vf->abs_vf_id, qid);
+                       goto send_resp;
+               }
+
+               p_cid = ecore_iov_get_vf_rx_queue_cid(&p_vf->vf_queues[qid]);
+               rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, &coal);
+               if (rc != ECORE_SUCCESS)
+                       goto send_resp;
+       } else {
+               if (!ecore_iov_validate_txq(p_hwfn, p_vf, qid,
+                                           ECORE_IOV_VALIDATE_Q_ENABLE)) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d]: Invalid Tx queue_id = %d\n",
+                                  p_vf->abs_vf_id, qid);
+                       goto send_resp;
+               }
+               for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+                       p_queue = &p_vf->vf_queues[qid];
+                       if ((p_queue->cids[i].p_cid == OSAL_NULL) ||
+                           (!p_queue->cids[i].b_is_tx))
+                               continue;
+
+                       p_cid = p_queue->cids[i].p_cid;
+
+                       rc = ecore_get_txq_coalesce(p_hwfn, p_ptt,
+                                                   p_cid, &coal);
+                       if (rc != ECORE_SUCCESS)
+                               goto send_resp;
+                       break;
+               }
+       }
+
+       status = PFVF_STATUS_SUCCESS;
+
+send_resp:
+       p_resp = ecore_add_tlv(&mbx->offset, CHANNEL_TLV_COALESCE_READ,
+                              sizeof(*p_resp));
+       p_resp->coal = coal;
+
+       ecore_add_tlv(&mbx->offset, CHANNEL_TLV_LIST_END,
+                     sizeof(struct channel_list_end_tlv));
+
+       ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
+}
+
+static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
+                                        struct ecore_ptt *p_ptt,
+                                        struct ecore_vf_info *vf)
+{
+       struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
+       struct vfpf_update_coalesce *req;
+       u8 status = PFVF_STATUS_FAILURE;
+       struct ecore_queue_cid *p_cid;
+       u16 rx_coal, tx_coal;
+       u16 qid;
+       int i;
+
+       req = &mbx->req_virt->update_coalesce;
+
+       rx_coal = req->rx_coal;
+       tx_coal = req->tx_coal;
+       qid = req->qid;
+
+       if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
+                                   ECORE_IOV_VALIDATE_Q_ENABLE) &&
+           rx_coal) {
+               DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
+                      vf->abs_vf_id, qid);
+               goto out;
+       }
+
+       if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
+                                   ECORE_IOV_VALIDATE_Q_ENABLE) &&
+           tx_coal) {
+               DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
+                      vf->abs_vf_id, qid);
+               goto out;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+                  vf->abs_vf_id, rx_coal, tx_coal, qid);
+
+       if (rx_coal) {
+               p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
+
+               rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+               if (rc != ECORE_SUCCESS) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d]: Unable to set rx queue = %d coalesce\n",
+                                  vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+                       goto out;
+               }
+               vf->rx_coal = rx_coal;
+       }
+
+       /* TODO - in future, it might be possible to pass this in a per-cid
+        * granularity. For now, do this for all Tx queues.
+        */
+       if (tx_coal) {
+               struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+
+               for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+                       if (p_queue->cids[i].p_cid == OSAL_NULL)
+                               continue;
+
+                       if (!p_queue->cids[i].b_is_tx)
+                               continue;
+
+                       rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+                                                   p_queue->cids[i].p_cid);
+                       if (rc != ECORE_SUCCESS) {
+                               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                          "VF[%d]: Unable to set tx queue coalesce\n",
+                                          vf->abs_vf_id);
+                               goto out;
+                       }
+               }
+               vf->tx_coal = tx_coal;
+       }
+
+       status = PFVF_STATUS_SUCCESS;
+out:
+       ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
+                              sizeof(struct pfvf_def_resp_tlv), status);
+}
+
+enum _ecore_status_t
+ecore_iov_pf_configure_vf_queue_coalesce(struct ecore_hwfn *p_hwfn,
+                                        u16 rx_coal, u16 tx_coal,
+                                        u16 vf_id, u16 qid)
+{
+       struct ecore_queue_cid *p_cid;
+       struct ecore_vf_info *vf;
+       struct ecore_ptt *p_ptt;
+       int i, rc = 0;
+
+       if (!ecore_iov_is_valid_vfid(p_hwfn, vf_id, true, true)) {
+               DP_NOTICE(p_hwfn, true,
+                         "VF[%d] - Can not set coalescing: VF is not active\n",
+                         vf_id);
+               return ECORE_INVAL;
+       }
+
+       vf = &p_hwfn->pf_iov_info->vfs_array[vf_id];
+       p_ptt = ecore_ptt_acquire(p_hwfn);
+       if (!p_ptt)
+               return ECORE_AGAIN;
+
+       if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
+                                   ECORE_IOV_VALIDATE_Q_ENABLE) &&
+           rx_coal) {
+               DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
+                      vf->abs_vf_id, qid);
+               goto out;
+       }
+
+       if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
+                                   ECORE_IOV_VALIDATE_Q_ENABLE) &&
+           tx_coal) {
+               DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
+                      vf->abs_vf_id, qid);
+               goto out;
+       }
+
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
+                  vf->abs_vf_id, rx_coal, tx_coal, qid);
+
+       if (rx_coal) {
+               p_cid = ecore_iov_get_vf_rx_queue_cid(&vf->vf_queues[qid]);
+
+               rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
+               if (rc != ECORE_SUCCESS) {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%d]: Unable to set rx queue = %d coalesce\n",
+                                  vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
+                       goto out;
+               }
+               vf->rx_coal = rx_coal;
+       }
+
+       /* TODO - in future, it might be possible to pass this in a per-cid
+        * granularity. For now, do this for all Tx queues.
+        */
+       if (tx_coal) {
+               struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
+
+               for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
+                       if (p_queue->cids[i].p_cid == OSAL_NULL)
+                               continue;
+
+                       if (!p_queue->cids[i].b_is_tx)
+                               continue;
+
+                       rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
+                                                   p_queue->cids[i].p_cid);
+                       if (rc != ECORE_SUCCESS) {
+                               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                          "VF[%d]: Unable to set tx queue coalesce\n",
+                                          vf->abs_vf_id);
+                               goto out;
+                       }
+               }
+               vf->tx_coal = tx_coal;
+       }
+
+out:
+       ecore_ptt_release(p_hwfn, p_ptt);
+
+       return rc;
 }
 
 static enum _ecore_status_t
@@ -2395,11 +3799,11 @@ static enum _ecore_status_t
 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
                          struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
 {
-       u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
+       u32 cons[MAX_NUM_VOQS_E4], distance[MAX_NUM_VOQS_E4];
        int i, cnt;
 
        /* Read initial consumers & producers */
-       for (i = 0; i < MAX_NUM_VOQS; i++) {
+       for (i = 0; i < MAX_NUM_VOQS_E4; i++) {
                u32 prod;
 
                cons[i] = ecore_rd(p_hwfn, p_ptt,
@@ -2414,7 +3818,7 @@ ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
        /* Wait for consumers to pass the producers */
        i = 0;
        for (cnt = 0; cnt < 50; cnt++) {
-               for (; i < MAX_NUM_VOQS; i++) {
+               for (; i < MAX_NUM_VOQS_E4; i++) {
                        u32 tmp;
 
                        tmp = ecore_rd(p_hwfn, p_ptt,
@@ -2424,7 +3828,7 @@ ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
                                break;
                }
 
-               if (i == MAX_NUM_VOQS)
+               if (i == MAX_NUM_VOQS_E4)
                        break;
 
                OSAL_MSLEEP(20);
@@ -2439,61 +3843,6 @@ ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
        return ECORE_SUCCESS;
 }
 
-static enum _ecore_status_t
-ecore_iov_vf_flr_poll_prs(struct ecore_hwfn *p_hwfn,
-                         struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
-{
-       u16 tc_cons[NUM_OF_TCS], tc_lb_cons[NUM_OF_TCS];
-       u16 prod[NUM_OF_TCS];
-       int i, cnt;
-
-       /* Read initial consumers & producers */
-       for (i = 0; i < NUM_OF_TCS; i++) {
-               tc_cons[i] = (u16)ecore_rd(p_hwfn, p_ptt,
-                                          PRS_REG_MSG_CT_MAIN_0 + i * 0x4);
-               tc_lb_cons[i] = (u16)ecore_rd(p_hwfn, p_ptt,
-                                             PRS_REG_MSG_CT_LB_0 + i * 0x4);
-               prod[i] = (u16)ecore_rd(p_hwfn, p_ptt,
-                                       BRB_REG_PER_TC_COUNTERS +
-                                       p_hwfn->port_id * 0x20 + i * 0x4);
-       }
-
-       /* Wait for consumers to pass the producers */
-       i = 0;
-       for (cnt = 0; cnt < 50; cnt++) {
-               for (; i < NUM_OF_TCS; i++) {
-                       u16 cons;
-
-                       cons = (u16)ecore_rd(p_hwfn, p_ptt,
-                                            PRS_REG_MSG_CT_MAIN_0 + i * 0x4);
-                       if (prod[i] - tc_cons[i] > cons - tc_cons[i])
-                               break;
-
-                       cons = (u16)ecore_rd(p_hwfn, p_ptt,
-                                            PRS_REG_MSG_CT_LB_0 + i * 0x4);
-                       if (prod[i] - tc_lb_cons[i] > cons - tc_lb_cons[i])
-                               break;
-               }
-
-               if (i == NUM_OF_TCS)
-                       break;
-
-               /* 16-bit counters; Delay instead of sleep... */
-               OSAL_UDELAY(10);
-       }
-
-       /* This is only optional polling for BB, since registers are only
-        * 16-bit wide and guarantee is not good enough. Don't fail things
-        * if polling didn't return the expected results.
-        */
-       if (cnt == 50)
-               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                          "VF[%d] - prs polling failed on TC %d\n",
-                          p_vf->abs_vf_id, i);
-
-       return ECORE_SUCCESS;
-}
-
 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
                                                  struct ecore_vf_info *p_vf,
                                                  struct ecore_ptt *p_ptt)
@@ -2510,10 +3859,6 @@ static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
        if (rc)
                return rc;
 
-       rc = ecore_iov_vf_flr_poll_prs(p_hwfn, p_vf, p_ptt);
-       if (rc)
-               return rc;
-
        return ECORE_SUCCESS;
 }
 
@@ -2522,8 +3867,8 @@ ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
                                 struct ecore_ptt *p_ptt,
                                 u16 rel_vf_id, u32 *ack_vfs)
 {
-       enum _ecore_status_t rc = ECORE_SUCCESS;
        struct ecore_vf_info *p_vf;
+       enum _ecore_status_t rc = ECORE_SUCCESS;
 
        p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
        if (!p_vf)
@@ -2541,7 +3886,7 @@ ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
                ecore_iov_vf_cleanup(p_hwfn, p_vf);
 
                /* If VF isn't active, no need for anything but SW */
-               if (!ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, p_vf->relative_vf_id))
+               if (!p_vf->b_init)
                        goto cleanup;
 
                /* TODO - what to do in case of failure? */
@@ -2556,6 +3901,13 @@ ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
                        return rc;
                }
 
+               /* Workaround to make VF-PF channel ready, as FW
+                * doesn't do that as a part of FLR.
+                */
+               REG_WR(p_hwfn,
+                      GTT_BAR0_MAP_REG_USDM_RAM +
+                      USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
+
                /* VF_STOPPED has to be set only after final cleanup
                 * but prior to re-enabling the VF.
                 */
@@ -2575,8 +3927,7 @@ cleanup:
                ack_vfs[vfid / 32] |= (1 << (vfid % 32));
                p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
                    ~(1ULL << (rel_vf_id % 64));
-               p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
-                   ~(1ULL << (rel_vf_id % 64));
+               p_vf->vf_mbx.b_pending_msg = false;
        }
 
        return rc;
@@ -2591,7 +3942,13 @@ enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
 
        OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
 
-       for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++)
+       /* Since BRB <-> PRS interface can't be tested as part of the flr
+        * polling due to HW limitations, simply sleep a bit. And since
+        * there's no need to wait per-vf, do it before looping.
+        */
+       OSAL_MSLEEP(100);
+
+       for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
                ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
 
        rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
@@ -2607,15 +3964,19 @@ ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
 
        OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
 
+       /* Wait instead of polling the BRB <-> PRS interface */
+       OSAL_MSLEEP(100);
+
        ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
 
        rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
        return rc;
 }
 
-int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
+bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
 {
-       u16 i, found = 0;
+       bool found = false;
+       u16 i;
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
        for (i = 0; i < (VF_MAX_STATIC / 32); i++)
@@ -2623,8 +3984,13 @@ int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
                           "[%08x,...,%08x]: %08x\n",
                           i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
 
+       if (!p_hwfn->p_dev->p_iov_info) {
+               DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
+               return false;
+       }
+
        /* Mark VFs */
-       for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++) {
+       for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
                struct ecore_vf_info *p_vf;
                u8 vfid;
 
@@ -2649,50 +4015,13 @@ int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
                         * VF flr until ACKs, we're safe.
                         */
                        p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
-                       found = 1;
+                       found = true;
                }
        }
 
        return found;
 }
 
-void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
-                       u16 vfid,
-                       struct ecore_mcp_link_params *params,
-                       struct ecore_mcp_link_state *link,
-                       struct ecore_mcp_link_capabilities *p_caps)
-{
-       struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
-       struct ecore_bulletin_content *p_bulletin;
-
-       if (!p_vf)
-               return;
-
-       p_bulletin = p_vf->bulletin.p_virt;
-       p_bulletin->req_autoneg = params->speed.autoneg;
-       p_bulletin->req_adv_speed = params->speed.advertised_speeds;
-       p_bulletin->req_forced_speed = params->speed.forced_speed;
-       p_bulletin->req_autoneg_pause = params->pause.autoneg;
-       p_bulletin->req_forced_rx = params->pause.forced_rx;
-       p_bulletin->req_forced_tx = params->pause.forced_tx;
-       p_bulletin->req_loopback = params->loopback_mode;
-
-       p_bulletin->link_up = link->link_up;
-       p_bulletin->speed = link->speed;
-       p_bulletin->full_duplex = link->full_duplex;
-       p_bulletin->autoneg = link->an;
-       p_bulletin->autoneg_complete = link->an_complete;
-       p_bulletin->parallel_detection = link->parallel_detection;
-       p_bulletin->pfc_enabled = link->pfc_enabled;
-       p_bulletin->partner_adv_speed = link->partner_adv_speed;
-       p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
-       p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
-       p_bulletin->partner_adv_pause = link->partner_adv_pause;
-       p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
-
-       p_bulletin->capability_speed = p_caps->speed_capabilities;
-}
-
 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
                        u16 vfid,
                        struct ecore_mcp_link_params *p_params,
@@ -2708,11 +4037,11 @@ void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
        p_bulletin = p_vf->bulletin.p_virt;
 
        if (p_params)
-               __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
+               __ecore_vf_get_link_params(p_params, p_bulletin);
        if (p_link)
-               __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
+               __ecore_vf_get_link_state(p_link, p_bulletin);
        if (p_caps)
-               __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
+               __ecore_vf_get_link_caps(p_caps, p_bulletin);
 }
 
 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
@@ -2720,7 +4049,6 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
 {
        struct ecore_iov_vf_mbx *mbx;
        struct ecore_vf_info *p_vf;
-       int i;
 
        p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
        if (!p_vf)
@@ -2729,20 +4057,35 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
        mbx = &p_vf->vf_mbx;
 
        /* ecore_iov_process_mbx_request */
-       DP_VERBOSE(p_hwfn,
-                  ECORE_MSG_IOV,
-                  "ecore_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id);
+#ifndef CONFIG_ECORE_SW_CHANNEL
+       if (!mbx->b_pending_msg) {
+               DP_NOTICE(p_hwfn, true,
+                         "VF[%02x]: Trying to process mailbox message when none is pending\n",
+                         p_vf->abs_vf_id);
+               return;
+       }
+       mbx->b_pending_msg = false;
+#endif
 
        mbx->first_tlv = mbx->req_virt->first_tlv;
 
-       /* check if tlv type is known */
-       if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
-               /* Lock the per vf op mutex and note the locker's identity.
-                * The unlock will take place in mbx response.
-                */
-               ecore_iov_lock_vf_pf_channel(p_hwfn,
-                                            p_vf, mbx->first_tlv.tl.type);
+       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                  "VF[%02x]: Processing mailbox message [type %04x]\n",
+                  p_vf->abs_vf_id, mbx->first_tlv.tl.type);
+
+       OSAL_IOV_VF_MSG_TYPE(p_hwfn,
+                            p_vf->relative_vf_id,
+                            mbx->first_tlv.tl.type);
+
+       /* Lock the per vf op mutex and note the locker's identity.
+        * The unlock will take place in mbx response.
+        */
+       ecore_iov_lock_vf_pf_channel(p_hwfn,
+                                    p_vf, mbx->first_tlv.tl.type);
 
+       /* check if tlv type is known */
+       if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
+           !p_vf->b_malicious) {
                /* switch on the opcode */
                switch (mbx->first_tlv.tl.type) {
                case CHANNEL_TLV_ACQUIRE:
@@ -2784,11 +4127,37 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
                case CHANNEL_TLV_RELEASE:
                        ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
                        break;
+               case CHANNEL_TLV_UPDATE_TUNN_PARAM:
+                       ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_COALESCE_UPDATE:
+                       ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
+                       break;
+               case CHANNEL_TLV_COALESCE_READ:
+                       ecore_iov_vf_pf_get_coalesce(p_hwfn, p_ptt, p_vf);
+                       break;
+               }
+       } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
+               /* If we've received a message from a VF we consider malicious
+                * we ignore the messasge unless it's one for RELEASE, in which
+                * case we'll let it have the benefit of doubt, allowing the
+                * next loaded driver to start again.
+                */
+               if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
+                       /* TODO - initiate FLR, remove malicious indication */
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
+                                  p_vf->abs_vf_id);
+               } else {
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
+                                  p_vf->abs_vf_id, mbx->first_tlv.tl.type);
                }
 
-               ecore_iov_unlock_vf_pf_channel(p_hwfn,
-                                              p_vf, mbx->first_tlv.tl.type);
-
+               ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+                                      mbx->first_tlv.tl.type,
+                                      sizeof(struct pfvf_def_resp_tlv),
+                                      PFVF_STATUS_MALICIOUS);
        } else {
                /* unknown TLV - this may belong to a VF driver from the future
                 * - a version written after this PF driver was written, which
@@ -2796,76 +4165,135 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
                 * support them. Or this may be because someone wrote a crappy
                 * VF driver and is sending garbage over the channel.
                 */
-               DP_ERR(p_hwfn,
-                      "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n",
-                      mbx->first_tlv.tl.type, mbx->first_tlv.tl.length);
-
-               for (i = 0; i < 20; i++) {
-                       DP_VERBOSE(p_hwfn,
-                                  ECORE_MSG_IOV,
-                                  "%x ",
-                                  mbx->req_virt->tlv_buf_size.tlv_buffer[i]);
-               }
-
-               /* test whether we can respond to the VF (do we have an address
-                * for it?)
+               DP_NOTICE(p_hwfn, false,
+                         "VF[%02x]: unknown TLV. type %04x length %04x"
+                         " padding %08x reply address %lu\n",
+                         p_vf->abs_vf_id,
+                         mbx->first_tlv.tl.type,
+                         mbx->first_tlv.tl.length,
+                         mbx->first_tlv.padding,
+                         (unsigned long)mbx->first_tlv.reply_address);
+
+               /* Try replying in case reply address matches the acquisition's
+                * posted address.
                 */
-               if (p_vf->state == VF_ACQUIRED)
-                       DP_ERR(p_hwfn, "UNKNOWN TLV Not supported yet\n");
+               if (p_vf->acquire.first_tlv.reply_address &&
+                   (mbx->first_tlv.reply_address ==
+                    p_vf->acquire.first_tlv.reply_address))
+                       ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
+                                              mbx->first_tlv.tl.type,
+                                              sizeof(struct pfvf_def_resp_tlv),
+                                              PFVF_STATUS_NOT_SUPPORTED);
+               else
+                       DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                                  "VF[%02x]: Can't respond to TLV -"
+                                  " no valid reply address\n",
+                                  p_vf->abs_vf_id);
        }
 
+       ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
+                                      mbx->first_tlv.tl.type);
+
 #ifdef CONFIG_ECORE_SW_CHANNEL
        mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
        mbx->sw_mbx.response_offset = 0;
 #endif
 }
 
-static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
-                                                __le16 vfid,
-                                                struct regpair *vf_msg)
+void ecore_iov_pf_get_pending_events(struct ecore_hwfn *p_hwfn,
+                                    u64 *events)
 {
-       struct ecore_vf_info *p_vf;
-       u8 min, max;
+       int i;
 
-       if (!p_hwfn->pf_iov_info || !p_hwfn->pf_iov_info->vfs_array) {
-               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                          "Got a message from VF while PF is not initialized for IOV support\n");
-               return ECORE_SUCCESS;
+       OSAL_MEM_ZERO(events, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+
+       ecore_for_each_vf(p_hwfn, i) {
+               struct ecore_vf_info *p_vf;
+
+               p_vf = &p_hwfn->pf_iov_info->vfs_array[i];
+               if (p_vf->vf_mbx.b_pending_msg)
+                       events[i / 64] |= 1ULL << (i % 64);
        }
+}
 
-       /* Find the VF record - message comes with realtive [engine] vfid */
-       min = (u8)p_hwfn->hw_info.first_vf_in_pf;
-       max = min + p_hwfn->p_dev->sriov_info.total_vfs;
-       /* @@@TBD - for BE machines, should echo field be reversed? */
-       if ((u8)vfid < min || (u8)vfid >= max) {
-               DP_INFO(p_hwfn,
-                       "Got a message from VF with relative id 0x%08x, but PF's range is [0x%02x,...,0x%02x)\n",
-                       (u8)vfid, min, max);
-               return ECORE_INVAL;
+static struct ecore_vf_info *
+ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
+{
+       u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
+
+       if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Got indication for VF [abs 0x%08x] that cannot be"
+                          " handled by PF\n",
+                          abs_vfid);
+               return OSAL_NULL;
        }
-       p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)vfid - min];
+
+       return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
+}
+
+static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
+                                                u16 abs_vfid,
+                                                struct regpair *vf_msg)
+{
+       struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
+                                                                  abs_vfid);
+
+       if (!p_vf)
+               return ECORE_SUCCESS;
 
        /* List the physical address of the request so that handler
         * could later on copy the message from it.
         */
        p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
 
+       p_vf->vf_mbx.b_pending_msg = true;
+
        return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
 }
 
-enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
-                                          u8 opcode,
-                                          __le16 echo,
-                                          union event_ring_data *data)
+static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
+                                      struct malicious_vf_eqe_data *p_data)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vf_id);
+
+       if (!p_vf)
+               return;
+
+       if (!p_vf->b_malicious) {
+               DP_NOTICE(p_hwfn, false,
+                         "VF [%d] - Malicious behavior [%02x]\n",
+                         p_vf->abs_vf_id, p_data->err_id);
+
+               p_vf->b_malicious = true;
+       } else {
+               DP_INFO(p_hwfn,
+                       "VF [%d] - Malicious behavior [%02x]\n",
+                       p_vf->abs_vf_id, p_data->err_id);
+       }
+
+       OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
+}
+
+static enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
+                                                 u8 opcode,
+                                                 __le16 echo,
+                                                 union event_ring_data *data,
+                                                 u8 OSAL_UNUSED fw_return_code)
 {
        switch (opcode) {
        case COMMON_EVENT_VF_PF_CHANNEL:
-               return ecore_sriov_vfpf_msg(p_hwfn, echo,
+               return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
                                            &data->vf_pf_channel.msg_addr);
        case COMMON_EVENT_VF_FLR:
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
                           "VF-FLR is still not supported\n");
                return ECORE_SUCCESS;
+       case COMMON_EVENT_MALICIOUS_VF:
+               ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
+               return ECORE_SUCCESS;
        default:
                DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
                        opcode);
@@ -2879,51 +4307,20 @@ bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
                   (1ULL << (rel_vf_id % 64)));
 }
 
-bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
-                            bool b_enabled_only)
-{
-       if (!p_hwfn->pf_iov_info) {
-               DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
-               return false;
-       }
-
-       return b_enabled_only ? ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id) :
-           (rel_vf_id < p_hwfn->p_dev->sriov_info.total_vfs);
-}
-
-struct ecore_public_vf_info *ecore_iov_get_public_vf_info(struct ecore_hwfn
-                                                         *p_hwfn,
-                                                         u16 relative_vf_id,
-                                                         bool b_enabled_only)
-{
-       struct ecore_vf_info *vf = OSAL_NULL;
-
-       vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
-       if (!vf)
-               return OSAL_NULL;
-
-       return &vf->p_vf_info;
-}
-
-void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
+u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
 {
-       u64 add_bit = 1ULL << (vfid % 64);
+       struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
+       u16 i;
 
-       /* TODO - add locking mechanisms [no atomics in ecore, so we can't
-        * add the lock inside the ecore_pf_iov struct].
-        */
-       p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
-}
+       if (!p_iov)
+               goto out;
 
-void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
-                                              u64 *events)
-{
-       u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
+       for (i = rel_vf_id; i < p_iov->total_vfs; i++)
+               if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
+                       return i;
 
-       /* TODO - Take a lock */
-       OSAL_MEMCPY(events, p_pending_events,
-                   sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
-       OSAL_MEMSET(p_pending_events, 0, sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
+out:
+       return MAX_NUM_VFS_E4;
 }
 
 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
@@ -2965,6 +4362,12 @@ void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
                          "Can not set forced MAC, invalid vfid [%d]\n", vfid);
                return;
        }
+       if (vf_info->b_malicious) {
+               DP_NOTICE(p_hwfn->p_dev, false,
+                         "Can't set forced MAC to malicious VF [%d]\n",
+                         vfid);
+               return;
+       }
 
        feature = 1 << MAC_ADDR_FORCED;
        OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
@@ -2989,6 +4392,12 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
                          "Can not set MAC, invalid vfid [%d]\n", vfid);
                return ECORE_INVAL;
        }
+       if (vf_info->b_malicious) {
+               DP_NOTICE(p_hwfn->p_dev, false,
+                         "Can't set MAC to malicious VF [%d]\n",
+                         vfid);
+               return ECORE_INVAL;
+       }
 
        if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
@@ -3004,29 +4413,6 @@ enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
        return ECORE_SUCCESS;
 }
 
-void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
-                                       u16 pvid, int vfid)
-{
-       struct ecore_vf_info *vf_info;
-       u64 feature;
-
-       vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
-       if (!vf_info) {
-               DP_NOTICE(p_hwfn->p_dev, true,
-                         "Can not set forced MAC, invalid vfid [%d]\n", vfid);
-               return;
-       }
-
-       feature = 1 << VLAN_ADDR_FORCED;
-       vf_info->bulletin.p_virt->pvid = pvid;
-       if (pvid)
-               vf_info->bulletin.p_virt->valid_bitmap |= feature;
-       else
-               vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
-
-       ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
-}
-
 enum _ecore_status_t
 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
                                               bool b_untagged_only, int vfid)
@@ -3037,7 +4423,14 @@ ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
        vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
        if (!vf_info) {
                DP_NOTICE(p_hwfn->p_dev, true,
-                         "Can not set forced MAC, invalid vfid [%d]\n", vfid);
+                         "Can not set untagged default, invalid vfid [%d]\n",
+                         vfid);
+               return ECORE_INVAL;
+       }
+       if (vf_info->b_malicious) {
+               DP_NOTICE(p_hwfn->p_dev, false,
+                         "Can't set untagged default to malicious VF [%d]\n",
+                         vfid);
                return ECORE_INVAL;
        }
 
@@ -3046,7 +4439,8 @@ ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
         */
        if (vf_info->state == VF_ENABLED) {
                DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
-                          "Can't support untagged change for vfid[%d] - VF is already active\n",
+                          "Can't support untagged change for vfid[%d] -"
+                          " VF is already active\n",
                           vfid);
                return ECORE_INVAL;
        }
@@ -3076,16 +4470,57 @@ void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
        *opaque_fid = vf_info->opaque_fid;
 }
 
-void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
-                               u8 *p_vort_id)
+void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
+                                       u16 pvid, int vfid)
 {
        struct ecore_vf_info *vf_info;
+       u64 feature;
 
        vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
-       if (!vf_info)
+       if (!vf_info) {
+               DP_NOTICE(p_hwfn->p_dev, true,
+                         "Can not set forced MAC, invalid vfid [%d]\n",
+                         vfid);
+               return;
+       }
+       if (vf_info->b_malicious) {
+               DP_NOTICE(p_hwfn->p_dev, false,
+                         "Can't set forced vlan to malicious VF [%d]\n",
+                         vfid);
+               return;
+       }
+
+       feature = 1 << VLAN_ADDR_FORCED;
+       vf_info->bulletin.p_virt->pvid = pvid;
+       if (pvid)
+               vf_info->bulletin.p_virt->valid_bitmap |= feature;
+       else
+               vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
+
+       ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
+}
+
+void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
+                                     int vfid, u16 vxlan_port, u16 geneve_port)
+{
+       struct ecore_vf_info *vf_info;
+
+       vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+       if (!vf_info) {
+               DP_NOTICE(p_hwfn->p_dev, true,
+                         "Can not set udp ports, invalid vfid [%d]\n", vfid);
+               return;
+       }
+
+       if (vf_info->b_malicious) {
+               DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
+                          "Can not set udp ports to malicious VF [%d]\n",
+                          vfid);
                return;
+       }
 
-       *p_vort_id = vf_info->vport_id;
+       vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
+       vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
 }
 
 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
@@ -3104,6 +4539,8 @@ bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
        struct ecore_vf_info *p_vf_info;
 
        p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+       if (!p_vf_info)
+               return true;
 
        return p_vf_info->state == VF_STOPPED;
 }
@@ -3119,21 +4556,11 @@ bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
        return vf_info->spoof_chk;
 }
 
-bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
-{
-       if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
-           !IS_PF_SRIOV_ALLOC(p_hwfn) ||
-           !ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, vfid))
-               return false;
-       else
-               return true;
-}
-
 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
                                            int vfid, bool val)
 {
-       enum _ecore_status_t rc = ECORE_INVAL;
        struct ecore_vf_info *vf;
+       enum _ecore_status_t rc = ECORE_INVAL;
 
        if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
                DP_NOTICE(p_hwfn, true,
@@ -3262,9 +4689,10 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
                                                 struct ecore_ptt *p_ptt,
                                                 int vfid, int val)
 {
+       struct ecore_mcp_link_state *p_link;
        struct ecore_vf_info *vf;
-       enum _ecore_status_t rc;
        u8 abs_vp_id = 0;
+       enum _ecore_status_t rc;
 
        vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
 
@@ -3275,35 +4703,10 @@ enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
        if (rc != ECORE_SUCCESS)
                return rc;
 
-       rc = ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
-
-       return rc;
-}
-
-enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
-                                                    int vfid, u32 rate)
-{
-       struct ecore_vf_info *vf;
-       enum _ecore_status_t rc;
-       u8 vport_id;
-       int i;
-
-       for_each_hwfn(p_dev, i) {
-               struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
-
-               if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
-                       DP_NOTICE(p_hwfn, true,
-                                 "SR-IOV sanity check failed, can't set min rate\n");
-                       return ECORE_INVAL;
-               }
-       }
-
-       vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
-       vport_id = vf->vport_id;
-
-       rc = ecore_configure_vport_wfq(p_dev, vport_id, rate);
+       p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
 
-       return rc;
+       return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
+                                  p_link->speed);
 }
 
 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
@@ -3404,7 +4807,20 @@ bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
        return (p_vf->state == VF_ENABLED);
 }
 
-int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
+bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
+                            u16 rel_vf_id)
+{
+       struct ecore_vf_info *p_vf;
+
+       p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
+       if (!p_vf)
+               return false;
+
+       return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
+}
+
+enum _ecore_status_t
+ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
 {
        struct ecore_wfq_data *vf_vp_wfq;
        struct ecore_vf_info *vf_info;
@@ -3420,3 +4836,17 @@ int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
        else
                return 0;
 }
+
+#ifdef CONFIG_ECORE_SW_CHANNEL
+void ecore_iov_set_vf_hw_channel(struct ecore_hwfn *p_hwfn, int vfid,
+                                bool b_is_hw)
+{
+       struct ecore_vf_info *vf_info;
+
+       vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
+       if (!vf_info)
+               return;
+
+       vf_info->b_hw_channel = b_is_hw;
+}
+#endif