New upstream version 17.11.3
[deb_dpdk.git] / drivers / net / qede / base / ecore_sp_commands.c
index b3736a8..83705b8 100644 (file)
@@ -22,6 +22,7 @@
 #include "ecore_hw.h"
 #include "ecore_dcbx.h"
 #include "ecore_sriov.h"
+#include "ecore_vf.h"
 
 enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
                                           struct ecore_spq_entry **pp_ent,
@@ -31,7 +32,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
 {
        u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
        struct ecore_spq_entry *p_ent = OSAL_NULL;
-       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       enum _ecore_status_t rc;
 
        if (!pp_ent)
                return ECORE_INVAL;
@@ -88,7 +89,7 @@ enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
        return ECORE_SUCCESS;
 }
 
-static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
+static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
 {
        switch (type) {
        case ECORE_TUNN_CLSS_MAC_VLAN:
@@ -107,225 +108,198 @@ static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
 }
 
 static void
-ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
-                               struct ecore_tunn_update_params *p_src,
-                               struct pf_update_tunnel_config *p_tunn_cfg)
+ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
+                             struct ecore_tunnel_info *p_src,
+                             bool b_pf_start)
 {
-       unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode;
-       unsigned long update_mask = p_src->tunn_mode_update_mask;
-       unsigned long tunn_mode = p_src->tunn_mode;
-       unsigned long new_tunn_mode = 0;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
-       }
+       if (p_src->vxlan.b_update_mode || b_pf_start)
+               p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
-       }
+       if (p_src->l2_gre.b_update_mode || b_pf_start)
+               p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
-       }
+       if (p_src->ip_gre.b_update_mode || b_pf_start)
+               p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
 
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
-               if (p_src->update_geneve_udp_port)
-                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
-               p_src->update_geneve_udp_port = 0;
-               p_src->tunn_mode = new_tunn_mode;
-               return;
-       }
+       if (p_src->l2_geneve.b_update_mode || b_pf_start)
+               p_tun->l2_geneve.b_mode_enabled =
+                               p_src->l2_geneve.b_mode_enabled;
 
-       if (p_src->update_geneve_udp_port) {
-               p_tunn_cfg->set_geneve_udp_port_flg = 1;
-               p_tunn_cfg->geneve_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
-       }
+       if (p_src->ip_geneve.b_update_mode || b_pf_start)
+               p_tun->ip_geneve.b_mode_enabled =
+                               p_src->ip_geneve.b_mode_enabled;
+}
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
-       }
+static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
+                                   struct ecore_tunnel_info *p_src)
+{
+       enum tunnel_clss type;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
-       } else {
-               if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
-                       OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
-       }
+       p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
+       p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
+
+       /* @DPDK - typecast tunnul class */
+       type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
+       p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
+       p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
+       p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
+       p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
+       type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
+       p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
+}
+
+static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
+                                struct ecore_tunnel_info *p_src)
+{
+       p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
+       p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
 
-       p_src->tunn_mode = new_tunn_mode;
+       if (p_src->geneve_port.b_update_port)
+               p_tun->geneve_port.port = p_src->geneve_port.port;
+
+       if (p_src->vxlan_port.b_update_port)
+               p_tun->vxlan_port.port = p_src->vxlan_port.port;
 }
 
 static void
-ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
-                               struct ecore_tunn_update_params *p_src,
-                               struct pf_update_tunnel_config *p_tunn_cfg)
+__ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
+                               struct ecore_tunn_update_type *tun_type)
 {
-       unsigned long tunn_mode = p_src->tunn_mode;
-       enum tunnel_clss type;
+       *p_tunn_cls = tun_type->tun_cls;
+}
 
-       ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
-       p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
-       p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
-
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
-       p_tunn_cfg->tunnel_clss_vxlan = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
-       p_tunn_cfg->tunnel_clss_l2gre = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
-       p_tunn_cfg->tunnel_clss_ipgre = type;
-
-       if (p_src->update_vxlan_udp_port) {
-               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
-               p_tunn_cfg->vxlan_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
+static void
+ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
+                             struct ecore_tunn_update_type *tun_type,
+                             u8 *p_update_port, __le16 *p_port,
+                             struct ecore_tunn_update_udp_port *p_udp_port)
+{
+       __ecore_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
+       if (p_udp_port->b_update_port) {
+               *p_update_port = 1;
+               *p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
        }
+}
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_l2gre = 1;
+static void
+ecore_tunn_set_pf_update_params(struct ecore_hwfn              *p_hwfn,
+                               struct ecore_tunnel_info *p_src,
+                               struct pf_update_tunnel_config  *p_tunn_cfg)
+{
+       struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_ipgre = 1;
+       ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
+       ecore_set_tunn_cls_info(p_tun, p_src);
+       ecore_set_tunn_ports(p_tun, p_src);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_vxlan = 1;
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+                                     &p_tun->vxlan,
+                                     &p_tunn_cfg->set_vxlan_udp_port_flg,
+                                     &p_tunn_cfg->vxlan_udp_port,
+                                     &p_tun->vxlan_port);
 
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
-               if (p_src->update_geneve_udp_port)
-                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
-               p_src->update_geneve_udp_port = 0;
-               return;
-       }
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+                                     &p_tun->l2_geneve,
+                                     &p_tunn_cfg->set_geneve_udp_port_flg,
+                                     &p_tunn_cfg->geneve_udp_port,
+                                     &p_tun->geneve_port);
 
-       if (p_src->update_geneve_udp_port) {
-               p_tunn_cfg->set_geneve_udp_port_flg = 1;
-               p_tunn_cfg->geneve_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
-       }
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+                                       &p_tun->ip_geneve);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_l2geneve = 1;
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+                                       &p_tun->l2_gre);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_ipgeneve = 1;
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+                                       &p_tun->ip_gre);
 
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
-       p_tunn_cfg->tunnel_clss_l2geneve = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
-       p_tunn_cfg->tunnel_clss_ipgeneve = type;
+       p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
 }
 
 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
                                   struct ecore_ptt *p_ptt,
-                                  unsigned long tunn_mode)
+                                  struct ecore_tunnel_info *p_tun)
 {
-       u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
-       u8 l2geneve_enable = 0, ipgeneve_enable = 0;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
-               l2gre_enable = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
-               ipgre_enable = 1;
-
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
-               vxlan_enable = 1;
+       ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
+                            p_tun->ip_gre.b_mode_enabled);
+       ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
 
-       ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
-       ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
+       ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
+                               p_tun->ip_geneve.b_mode_enabled);
+}
 
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev))
+static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
+                                       struct ecore_ptt  *p_ptt,
+                                       struct ecore_tunnel_info *p_tunn)
+{
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, true,
+                         "A0 chip: tunnel hw config is not supported\n");
                return;
+       }
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
-               l2geneve_enable = 1;
+       if (p_tunn->vxlan_port.b_update_port)
+               ecore_set_vxlan_dest_port(p_hwfn, p_ptt,
+                                         p_tunn->vxlan_port.port);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
-               ipgeneve_enable = 1;
+       if (p_tunn->geneve_port.b_update_port)
+               ecore_set_geneve_dest_port(p_hwfn, p_ptt,
+                                          p_tunn->geneve_port.port);
 
-       ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
-                               ipgeneve_enable);
+       ecore_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
 }
 
 static void
 ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
-                              struct ecore_tunn_start_params *p_src,
+                              struct ecore_tunnel_info         *p_src,
                               struct pf_start_tunnel_config *p_tunn_cfg)
 {
-       unsigned long tunn_mode;
-       enum tunnel_clss type;
+       struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
 
-       if (!p_src)
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, true,
+                         "A0 chip: tunnel pf start config is not supported\n");
                return;
-
-       tunn_mode = p_src->tunn_mode;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
-       p_tunn_cfg->tunnel_clss_vxlan = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
-       p_tunn_cfg->tunnel_clss_l2gre = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
-       p_tunn_cfg->tunnel_clss_ipgre = type;
-
-       if (p_src->update_vxlan_udp_port) {
-               p_tunn_cfg->set_vxlan_udp_port_flg = 1;
-               p_tunn_cfg->vxlan_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
        }
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_l2gre = 1;
+       if (!p_src)
+               return;
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_ipgre = 1;
+       ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
+       ecore_set_tunn_cls_info(p_tun, p_src);
+       ecore_set_tunn_ports(p_tun, p_src);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_vxlan = 1;
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
+                                     &p_tun->vxlan,
+                                     &p_tunn_cfg->set_vxlan_udp_port_flg,
+                                     &p_tunn_cfg->vxlan_udp_port,
+                                     &p_tun->vxlan_port);
 
-       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
-               if (p_src->update_geneve_udp_port)
-                       DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
-               p_src->update_geneve_udp_port = 0;
-               return;
-       }
+       ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
+                                     &p_tun->l2_geneve,
+                                     &p_tunn_cfg->set_geneve_udp_port_flg,
+                                     &p_tunn_cfg->geneve_udp_port,
+                                     &p_tun->geneve_port);
 
-       if (p_src->update_geneve_udp_port) {
-               p_tunn_cfg->set_geneve_udp_port_flg = 1;
-               p_tunn_cfg->geneve_udp_port =
-                   OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
-       }
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
+                                       &p_tun->ip_geneve);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_l2geneve = 1;
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
+                                       &p_tun->l2_gre);
 
-       if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
-               p_tunn_cfg->tx_enable_ipgeneve = 1;
-
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
-       p_tunn_cfg->tunnel_clss_l2geneve = type;
-       type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
-       p_tunn_cfg->tunnel_clss_ipgeneve = type;
+       __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
+                                       &p_tun->ip_gre);
 }
 
+#define ETH_P_8021Q 0x8100
+#define ETH_P_8021AD 0x88A8 /* 802.1ad Service VLAN         */
+
 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
-                                      struct ecore_tunn_start_params *p_tunn,
-                                      enum ecore_mf_mode mode,
+                                      struct ecore_ptt *p_ptt,
+                                      struct ecore_tunnel_info *p_tunn,
                                       bool allow_npar_tx_switch)
 {
        struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
@@ -335,6 +309,7 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
        struct ecore_sp_init_data init_data;
        enum _ecore_status_t rc = ECORE_NOTIMPL;
        u8 page_cnt;
+       u8 i;
 
        /* update initial eq producer */
        ecore_eq_prod_update(p_hwfn,
@@ -360,35 +335,51 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
 
        /* For easier debugging */
        p_ramrod->dont_log_ramrods = 0;
-       p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
+       p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
 
-       switch (mode) {
-       case ECORE_MF_DEFAULT:
-       case ECORE_MF_NPAR:
-               p_ramrod->mf_mode = MF_NPAR;
-               break;
-       case ECORE_MF_OVLAN:
+       if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits))
                p_ramrod->mf_mode = MF_OVLAN;
-               break;
-       default:
-               DP_NOTICE(p_hwfn, true,
-                         "Unsupported MF mode, init as DEFAULT\n");
+       else
                p_ramrod->mf_mode = MF_NPAR;
+
+       p_ramrod->outer_tag_config.outer_tag.tci =
+               OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
+       if (OSAL_TEST_BIT(ECORE_MF_8021Q_TAGGING, &p_hwfn->p_dev->mf_bits)) {
+               p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021Q;
+       } else if (OSAL_TEST_BIT(ECORE_MF_8021AD_TAGGING,
+                &p_hwfn->p_dev->mf_bits)) {
+               p_ramrod->outer_tag_config.outer_tag.tpid = ETH_P_8021AD;
+               p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+       }
+
+       p_ramrod->outer_tag_config.pri_map_valid = 1;
+       for (i = 0; i < ECORE_MAX_PFC_PRIORITIES; i++)
+               p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] = i;
+
+       /* enable_stag_pri_change should be set if port is in BD mode or,
+        * UFP with Host Control mode or, UFP with DCB over base interface.
+        */
+       if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {
+               if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
+                   (p_hwfn->p_dcbx_info->results.dcbx_enabled))
+                       p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
+               else
+                       p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
        }
-       p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
 
        /* Place EQ address in RAMROD */
        DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
-                      p_hwfn->p_eq->chain.pbl.p_phys_table);
+                      p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
        page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
        p_ramrod->event_ring_num_pages = page_cnt;
        DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
-                      p_hwfn->p_consq->chain.pbl.p_phys_table);
+                      p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
 
        ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
                                       &p_ramrod->tunnel_config);
 
-       if (IS_MF_SI(p_hwfn))
+       if (OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH,
+                         &p_hwfn->p_dev->mf_bits))
                p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
 
        switch (p_hwfn->hw_info.personality) {
@@ -414,21 +405,20 @@ enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
        p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
 
        DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
-                  "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
-                  sb, sb_index, p_ramrod->outer_tag);
+                  "Setting event_ring_sb [id %04x index %02x], outer_tag.tpid [%d], outer_tag.tci [%d]\n",
+                  sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tpid,
+                  p_ramrod->outer_tag_config.outer_tag.tci);
 
        rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 
-       if (p_tunn) {
-               ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
-                                      p_tunn->tunn_mode);
-               p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
-       }
+       if (p_tunn)
+               ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt,
+                                           &p_hwfn->p_dev->tunnel);
 
        return rc;
 }
 
-enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
+enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn)
 {
        struct ecore_spq_entry *p_ent = OSAL_NULL;
        struct ecore_sp_init_data init_data;
@@ -452,6 +442,51 @@ enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
        return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
 
+enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       struct ecore_sp_init_data init_data;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_CB;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+                                  &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
+       if ((p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS) ||
+           (p_hwfn->p_dcbx_info->results.dcbx_enabled))
+               p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
+       else
+               p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}
+
+
+/* QM rate limiter resolution is 1.6Mbps */
+#define QM_RL_RESOLUTION(mb_val)       ((mb_val) * 10 / 16)
+
+/* FW uses 1/64k to express gd */
+#define FW_GD_RESOLUTION(gd)           (64 * 1024 / (gd))
+
+u16 ecore_sp_rl_mb_to_qm(u32 mb_val)
+{
+       return (u16)OSAL_MIN_T(u32, (u16)(~0U), QM_RL_RESOLUTION(mb_val));
+}
+
+u16 ecore_sp_rl_gd_denom(u32 gd)
+{
+       return gd ? (u16)OSAL_MIN_T(u32, (u16)(~0U), FW_GD_RESOLUTION(gd)) : 0;
+}
+
 enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
                                        struct ecore_rl_update_params *params)
 {
@@ -483,22 +518,38 @@ enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
        rl_update->rl_id_last = params->rl_id_last;
        rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
        rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
-       rl_update->rl_max_rate = OSAL_CPU_TO_LE16(params->rl_max_rate);
-       rl_update->rl_r_ai = OSAL_CPU_TO_LE16(params->rl_r_ai);
-       rl_update->rl_r_hai = OSAL_CPU_TO_LE16(params->rl_r_hai);
-       rl_update->dcqcn_g = OSAL_CPU_TO_LE16(params->dcqcn_g);
+       rl_update->rl_max_rate =
+               OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_max_rate));
+       rl_update->rl_r_ai =
+               OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_ai));
+       rl_update->rl_r_hai =
+               OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_hai));
+       rl_update->dcqcn_g =
+               OSAL_CPU_TO_LE16(ecore_sp_rl_gd_denom(params->dcqcn_gd));
        rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
-       rl_update->dcqcn_timeuot_us = OSAL_CPU_TO_LE32(
-               params->dcqcn_timeuot_us);
+       rl_update->dcqcn_timeuot_us =
+               OSAL_CPU_TO_LE32(params->dcqcn_timeuot_us);
        rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
 
+       DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
+                  rl_update->qcn_update_param_flg,
+                  rl_update->dcqcn_update_param_flg,
+                  rl_update->rl_init_flg, rl_update->rl_start_flg,
+                  rl_update->rl_stop_flg, rl_update->rl_id_first,
+                  rl_update->rl_id_last, rl_update->rl_dc_qcn_flg,
+                  rl_update->rl_bc_rate, rl_update->rl_max_rate,
+                  rl_update->rl_r_ai, rl_update->rl_r_hai,
+                  rl_update->dcqcn_g, rl_update->dcqcn_k_us,
+                  rl_update->dcqcn_timeuot_us, rl_update->qcn_timeuot_us);
+
        return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
 
 /* Set pf update ramrod command params */
 enum _ecore_status_t
 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
-                           struct ecore_tunn_update_params *p_tunn,
+                           struct ecore_ptt *p_ptt,
+                           struct ecore_tunnel_info *p_tunn,
                            enum spq_mode comp_mode,
                            struct ecore_spq_comp_cb *p_comp_data)
 {
@@ -506,6 +557,18 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
        struct ecore_sp_init_data init_data;
        enum _ecore_status_t rc = ECORE_NOTIMPL;
 
+       if (IS_VF(p_hwfn->p_dev))
+               return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
+
+       if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
+               DP_NOTICE(p_hwfn, true,
+                         "A0 chip: tunnel pf update config is not supported\n");
+               return rc;
+       }
+
+       if (!p_tunn)
+               return ECORE_INVAL;
+
        /* Get SPQ entry */
        OSAL_MEMSET(&init_data, 0, sizeof(init_data));
        init_data.cid = ecore_spq_get_cid(p_hwfn);
@@ -526,15 +589,7 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
        if (rc != ECORE_SUCCESS)
                return rc;
 
-       if (p_tunn->update_vxlan_udp_port)
-               ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
-                                         p_tunn->vxlan_udp_port);
-       if (p_tunn->update_geneve_udp_port)
-               ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
-                                          p_tunn->geneve_udp_port);
-
-       ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, p_tunn->tunn_mode);
-       p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
+       ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->p_dev->tunnel);
 
        return rc;
 }
@@ -564,7 +619,7 @@ enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
 {
        struct ecore_spq_entry *p_ent = OSAL_NULL;
        struct ecore_sp_init_data init_data;
-       enum _ecore_status_t rc = ECORE_NOTIMPL;
+       enum _ecore_status_t rc;
 
        /* Get SPQ entry */
        OSAL_MEMSET(&init_data, 0, sizeof(init_data));
@@ -580,3 +635,28 @@ enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
 
        return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
 }
+
+enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn)
+{
+       struct ecore_spq_entry *p_ent = OSAL_NULL;
+       struct ecore_sp_init_data init_data;
+       enum _ecore_status_t rc = ECORE_NOTIMPL;
+
+       /* Get SPQ entry */
+       OSAL_MEMSET(&init_data, 0, sizeof(init_data));
+       init_data.cid = ecore_spq_get_cid(p_hwfn);
+       init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
+       init_data.comp_mode = ECORE_SPQ_MODE_CB;
+
+       rc = ecore_sp_init_request(p_hwfn, &p_ent,
+                                  COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
+                                  &init_data);
+       if (rc != ECORE_SUCCESS)
+               return rc;
+
+       p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
+       p_ent->ramrod.pf_update.mf_vlan =
+                               OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
+
+       return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
+}