Imported Upstream version 17.05
[deb_dpdk.git] / drivers / net / qede / base / ecore_sp_commands.c
index b3736a8..8fd64d7 100644 (file)
@@ -22,6 +22,7 @@
 #include "ecore_hw.h"
 #include "ecore_dcbx.h"
 #include "ecore_sriov.h"
+#include "ecore_vf.h"
 
 enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
                                           struct ecore_spq_entry **pp_ent,
@@ -31,7 +32,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
 {
        u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
        struct ecore_spq_entry *p_ent = OSAL_NULL;
-       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       enum _ecore_status_t rc;
 
        if (!pp_ent)
                return ECORE_INVAL;
@@ -88,7 +89,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
        return ECORE_SUCCESS;
 }
 
-static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
+static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
 {
        switch (type) {
        case ECORE_TUNN_CLSS_MAC_VLAN:
@@ -107,224 +108,208 @@ static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
 }
 
 static void
-ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
-                               struct ecore_tunn_update_params *p_src,
-                               struct pf_update_tunnel_config *p_tunn_cfg)
+ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
+                             struct ecore_tunnel_info *p_src,
+                             bool b_pf_start)
 {
-       unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode;
-       unsigned long update_mask = p_src->tunn_mode_update_mask;
-       unsigned long tunn_mode = p_src->tunn_mode;
-       unsigned long new_tunn_mode = 0;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
-       }
-
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
-       }
-
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
-       }
-
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
-               if (p_src->update_geneve_udp_port)
-                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
-               p_src->update_geneve_udp_port = 0;
-               p_src->tunn_mode = new_tunn_mode;
-               return;
-       }
+       if (p_src->vxlan.b_update_mode || b_pf_start)
+               p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
 
-       if (p_src->update_geneve_udp_port) {
-               p_tunn_cfg->set_geneve_udp_port_flg = 1;
-               p_tunn_cfg->geneve_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
-       }
+       if (p_src->l2_gre.b_update_mode || b_pf_start)
+               p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
-       }
+       if (p_src->ip_gre.b_update_mode || b_pf_start)
+               p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
-       }
+       if (p_src->l2_geneve.b_update_mode || b_pf_start)
+               p_tun->l2_geneve.b_mode_enabled =
+                               p_src->l2_geneve.b_mode_enabled;
 
-       p_src->tunn_mode = new_tunn_mode;
+       if (p_src->ip_geneve.b_update_mode || b_pf_start)
+               p_tun->ip_geneve.b_mode_enabled =
+                               p_src->ip_geneve.b_mode_enabled;
 }
 
-static void
-ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
-                               struct ecore_tunn_update_params *p_src,
-                               struct pf_update_tunnel_config *p_tunn_cfg)
+static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
+                                   struct ecore_tunnel_info *p_src)
 {
-       unsigned long tunn_mode = p_src->tunn_mode;
        enum tunnel_clss type;
 
-       ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
-       p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
-       p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
-
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
-       p_tunn_cfg->tunnel_clss_vxlan = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
-       p_tunn_cfg->tunnel_clss_l2gre = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
-       p_tunn_cfg->tunnel_clss_ipgre = type;
-
-       if (p_src->update_vxlan_udp_port) {
-               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
-               p_tunn_cfg->vxlan_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
-       }
-
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_l2gre = 1;
+       p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
+       p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
+
+       /* @DPDK - typecast tunnul class */
+       type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
+       p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
+       p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
+       p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
+       p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
+       p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
+}
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_ipgre = 1;
+static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
+                                struct ecore_tunnel_info *p_src)
+{
+       p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
+       p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_vxlan = 1;
+       if (p_src->geneve_port.b_update_port)
+               p_tun->geneve_port.port = p_src->geneve_port.port;
 
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
-               if (p_src->update_geneve_udp_port)
-                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
-               p_src->update_geneve_udp_port = 0;
-               return;
-       }
+       if (p_src->vxlan_port.b_update_port)
+               p_tun->vxlan_port.port = p_src->vxlan_port.port;
+}
 
-       if (p_src->update_geneve_udp_port) {
-               p_tunn_cfg->set_geneve_udp_port_flg = 1;
-               p_tunn_cfg->geneve_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
-       }
+static void
+__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+                               struct ecore_tunn_update_type *tun_type)
+{
+       *p_tunn_cls = tun_type->tun_cls;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_l2geneve = 1;
+       if (tun_type->b_mode_enabled)
+               *p_enable_tx_clas = 1;
+}
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_ipgeneve = 1;
+static void
+ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls, u8 *p_enable_tx_clas,
+                             struct ecore_tunn_update_type *tun_type,
+                             u8 *p_update_port, __le16 *p_port,
+                             struct ecore_tunn_update_udp_port *p_udp_port)
+{
+       __ecore_set_ramrod_tunnel_param(p_tunn_cls, p_enable_tx_clas,
+                                       tun_type);
+       if (p_udp_port->b_update_port) {
+               *p_update_port = 1;
+               *p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
+       }
+}
 
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
-       p_tunn_cfg->tunnel_clss_l2geneve = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
-       p_tunn_cfg->tunnel_clss_ipgeneve = type;
+static void
+ecore_tunn_set_pf_update_params(struct ecore_hwfn              *p_hwfn,
+                               struct ecore_tunnel_info *p_src,
+                               struct pf_update_tunnel_config  *p_tunn_cfg)
+{
+       struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
+
+       ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
+       ecore_set_tunn_cls_info(p_tun, p_src);
+       ecore_set_tunn_ports(p_tun, p_src);
+
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+                                     &p_tunn_cfg->tx_enable_vxlan,
+                                     &p_tun->vxlan,
+                                     &p_tunn_cfg->set_vxlan_udp_port_flg,
+                                     &p_tunn_cfg->vxlan_udp_port,
+                                     &p_tun->vxlan_port);
+
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+                                     &p_tunn_cfg->tx_enable_l2geneve,
+                                     &p_tun->l2_geneve,
+                                     &p_tunn_cfg->set_geneve_udp_port_flg,
+                                     &p_tunn_cfg->geneve_udp_port,
+                                     &p_tun->geneve_port);
+
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+                                       &p_tunn_cfg->tx_enable_ipgeneve,
+                                       &p_tun->ip_geneve);
+
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+                                       &p_tunn_cfg->tx_enable_l2gre,
+                                       &p_tun->l2_gre);
+
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+                                       &p_tunn_cfg->tx_enable_ipgre,
+                                       &p_tun->ip_gre);
+
+       p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
+       p_tunn_cfg->update_tx_pf_clss = p_tun->b_update_tx_cls;
 }
 
 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
                                   struct ecore_ptt *p_ptt,
-                                  unsigned long tunn_mode)
+                                  struct ecore_tunnel_info *p_tun)
 {
-       u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
-       u8 l2geneve_enable = 0, ipgeneve_enable = 0;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
-               l2gre_enable = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
-               ipgre_enable = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
-               vxlan_enable = 1;
+       ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
+                            p_tun->ip_gre.b_mode_enabled);
+       ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
 
-       ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
-       ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+       ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
+                               p_tun->ip_geneve.b_mode_enabled);
+}
 
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_tunnel_info *p_tunn)
+{
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, true,
+                         "A0 chip: tunnel hw config is not supported\n");
                return;
+       }
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
-               l2geneve_enable = 1;
+       if (p_tunn->vxlan_port.b_update_port)
+               ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+                                         p_tunn->vxlan_port.port);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
-               ipgeneve_enable = 1;
+       if (p_tunn->geneve_port.b_update_port)
+               ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
+                                          p_tunn->geneve_port.port);
 
-       ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
-                               ipgeneve_enable);
+       ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn);
 }
 
 static void
 ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
-                              struct ecore_tunn_start_params *p_src,
+                              struct ecore_tunnel_info         *p_src,
                               struct pf_start_tunnel_config *p_tunn_cfg)
 {
-       unsigned long tunn_mode;
-       enum tunnel_clss type;
-
-       if (!p_src)
-               return;
-
-       tunn_mode = p_src->tunn_mode;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
-       p_tunn_cfg->tunnel_clss_vxlan = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
-       p_tunn_cfg->tunnel_clss_l2gre = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
-       p_tunn_cfg->tunnel_clss_ipgre = type;
-
-       if (p_src->update_vxlan_udp_port) {
-               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
-               p_tunn_cfg->vxlan_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
-       }
-
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_l2gre = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_ipgre = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_vxlan = 1;
+       struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
 
        if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
-               if (p_src->update_geneve_udp_port)
-                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
-               p_src->update_geneve_udp_port = 0;
+               DP_NOTICE(p_hwfn, true,
+                         "A0 chip: tunnel pf start config is not supported\n");
                return;
        }
 
-       if (p_src->update_geneve_udp_port) {
-               p_tunn_cfg->set_geneve_udp_port_flg = 1;
-               p_tunn_cfg->geneve_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
-       }
-
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_l2geneve = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_ipgeneve = 1;
+       if (!p_src)
+               return;
 
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
-       p_tunn_cfg->tunnel_clss_l2geneve = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
-       p_tunn_cfg->tunnel_clss_ipgeneve = type;
+       ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
+       ecore_set_tunn_cls_info(p_tun, p_src);
+       ecore_set_tunn_ports(p_tun, p_src);
+
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+                                     &p_tunn_cfg->tx_enable_vxlan,
+                                     &p_tun->vxlan,
+                                     &p_tunn_cfg->set_vxlan_udp_port_flg,
+                                     &p_tunn_cfg->vxlan_udp_port,
+                                     &p_tun->vxlan_port);
+
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+                                     &p_tunn_cfg->tx_enable_l2geneve,
+                                     &p_tun->l2_geneve,
+                                     &p_tunn_cfg->set_geneve_udp_port_flg,
+                                     &p_tunn_cfg->geneve_udp_port,
+                                     &p_tun->geneve_port);
+
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+                                       &p_tunn_cfg->tx_enable_ipgeneve,
+                                       &p_tun->ip_geneve);
+
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+                                       &p_tunn_cfg->tx_enable_l2gre,
+                                       &p_tun->l2_gre);
+
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+                                       &p_tunn_cfg->tx_enable_ipgre,
+                                       &p_tun->ip_gre);
 }
 
 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
-                                      struct ecore_tunn_start_params *p_tunn,
+                                      struct ecore_tunnel_info *p_tunn,
                                       enum ecore_mf_mode mode,
                                       bool allow_npar_tx_switch)
 {
@@ -379,11 +364,11 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 
        /* Place EQ address in RAMROD */
        DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
-                      p_hwfn->p_eq->chain.pbl.p_phys_table);
+                      p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
        page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
        p_ramrod->event_ring_num_pages = page_cnt;
        DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
-                      p_hwfn->p_consq->chain.pbl.p_phys_table);
+                      p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
 
        ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
                                       &p_ramrod->tunnel_config);
@@ -419,11 +404,8 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 
        rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 
-       if (p_tunn) {
-               ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
-                                      p_tunn->tunn_mode);
-               p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
-       }
+       if (p_tunn)
+               ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
 
        return rc;
 }
@@ -498,7 +480,7 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
 /* Set pf update ramrod command params */
 enum _ecore_status_t
 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
-                           struct ecore_tunn_update_params *p_tunn,
+                           struct ecore_tunnel_info *p_tunn,
                            enum spq_mode comp_mode,
                            struct ecore_spq_comp_cb *p_comp_data)
 {
@@ -506,6 +488,18 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
        struct ecore_sp_init_data init_data;
        enum _ecore_status_t rc = ECORE_NOTIMPL;
 
+       if (IS_VF(p_hwfn->p_dev))
+               return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
+
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, true,
+                         "A0 chip: tunnel pf update config is not supported\n");
+               return rc;
+       }
+
+       if (!p_tunn)
+               return ECORE_INVAL;
+
        /* Get SPQ entry */
        OSAL_MEMSET(&init_data, 0, sizeof(init_data));
        init_data.cid = ecore_spq_get_cid(p_hwfn);
@@ -526,15 +520,7 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
        if (rc != ECORE_SUCCESS)
                return rc;
 
-       if (p_tunn->update_vxlan_udp_port)
-               ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
-                                         p_tunn->vxlan_udp_port);
-       if (p_tunn->update_geneve_udp_port)
-               ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
-                                          p_tunn->geneve_udp_port);
-
-       ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
-       p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
+       ecore_set_hw_tunn_mode_port(p_hwfn, &p_hwfn->p_dev->tunnel);
 
        return rc;
 }
@@ -564,7 +550,7 @@ enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
 {
        struct ecore_spq_entry *p_ent = OSAL_NULL;
        struct ecore_sp_init_data init_data;
-       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       enum _ecore_status_t rc;
 
        /* Get SPQ entry */
        OSAL_MEMSET(&init_data, 0, sizeof(init_data));