New upstream version 17.11-rc3
[deb_dpdk.git] / drivers / net / qede / base / ecore_sp_commands.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10
11 #include "ecore.h"
12 #include "ecore_status.h"
13 #include "ecore_chain.h"
14 #include "ecore_spq.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_cxt.h"
17 #include "ecore_sp_commands.h"
18 #include "ecore_gtt_reg_addr.h"
19 #include "ecore_iro.h"
20 #include "reg_addr.h"
21 #include "ecore_int.h"
22 #include "ecore_hw.h"
23 #include "ecore_dcbx.h"
24 #include "ecore_sriov.h"
25 #include "ecore_vf.h"
26
27 enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
28                                            struct ecore_spq_entry **pp_ent,
29                                            u8 cmd,
30                                            u8 protocol,
31                                            struct ecore_sp_init_data *p_data)
32 {
33         u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
34         struct ecore_spq_entry *p_ent = OSAL_NULL;
35         enum _ecore_status_t rc;
36
37         if (!pp_ent)
38                 return ECORE_INVAL;
39
40         /* Get an SPQ entry */
41         rc = ecore_spq_get_entry(p_hwfn, pp_ent);
42         if (rc != ECORE_SUCCESS)
43                 return rc;
44
45         /* Fill the SPQ entry */
46         p_ent = *pp_ent;
47         p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
48         p_ent->elem.hdr.cmd_id = cmd;
49         p_ent->elem.hdr.protocol_id = protocol;
50         p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
51         p_ent->comp_mode = p_data->comp_mode;
52         p_ent->comp_done.done = 0;
53
54         switch (p_ent->comp_mode) {
55         case ECORE_SPQ_MODE_EBLOCK:
56                 p_ent->comp_cb.cookie = &p_ent->comp_done;
57                 break;
58
59         case ECORE_SPQ_MODE_BLOCK:
60                 if (!p_data->p_comp_data)
61                         return ECORE_INVAL;
62
63                 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
64                 break;
65
66         case ECORE_SPQ_MODE_CB:
67                 if (!p_data->p_comp_data)
68                         p_ent->comp_cb.function = OSAL_NULL;
69                 else
70                         p_ent->comp_cb = *p_data->p_comp_data;
71                 break;
72
73         default:
74                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
75                           p_ent->comp_mode);
76                 return ECORE_INVAL;
77         }
78
79         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
80                    "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
81                    opaque_cid, cmd, protocol,
82                    (unsigned long)&p_ent->ramrod,
83                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
84                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
85                            "MODE_CB"));
86
87         OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
88
89         return ECORE_SUCCESS;
90 }
91
92 static enum tunnel_clss ecore_tunn_clss_to_fw_clss(u8 type)
93 {
94         switch (type) {
95         case ECORE_TUNN_CLSS_MAC_VLAN:
96                 return TUNNEL_CLSS_MAC_VLAN;
97         case ECORE_TUNN_CLSS_MAC_VNI:
98                 return TUNNEL_CLSS_MAC_VNI;
99         case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
100                 return TUNNEL_CLSS_INNER_MAC_VLAN;
101         case ECORE_TUNN_CLSS_INNER_MAC_VNI:
102                 return TUNNEL_CLSS_INNER_MAC_VNI;
103         case ECORE_TUNN_CLSS_MAC_VLAN_DUAL_STAGE:
104                 return TUNNEL_CLSS_MAC_VLAN_DUAL_STAGE;
105         default:
106                 return TUNNEL_CLSS_MAC_VLAN;
107         }
108 }
109
110 static void
111 ecore_set_pf_update_tunn_mode(struct ecore_tunnel_info *p_tun,
112                               struct ecore_tunnel_info *p_src,
113                               bool b_pf_start)
114 {
115         if (p_src->vxlan.b_update_mode || b_pf_start)
116                 p_tun->vxlan.b_mode_enabled = p_src->vxlan.b_mode_enabled;
117
118         if (p_src->l2_gre.b_update_mode || b_pf_start)
119                 p_tun->l2_gre.b_mode_enabled = p_src->l2_gre.b_mode_enabled;
120
121         if (p_src->ip_gre.b_update_mode || b_pf_start)
122                 p_tun->ip_gre.b_mode_enabled = p_src->ip_gre.b_mode_enabled;
123
124         if (p_src->l2_geneve.b_update_mode || b_pf_start)
125                 p_tun->l2_geneve.b_mode_enabled =
126                                 p_src->l2_geneve.b_mode_enabled;
127
128         if (p_src->ip_geneve.b_update_mode || b_pf_start)
129                 p_tun->ip_geneve.b_mode_enabled =
130                                 p_src->ip_geneve.b_mode_enabled;
131 }
132
133 static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun,
134                                     struct ecore_tunnel_info *p_src)
135 {
136         enum tunnel_clss type;
137
138         p_tun->b_update_rx_cls = p_src->b_update_rx_cls;
139         p_tun->b_update_tx_cls = p_src->b_update_tx_cls;
140
141         /* @DPDK - typecast tunnul class */
142         type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls);
143         p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type;
144         type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls);
145         p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type;
146         type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls);
147         p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type;
148         type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls);
149         p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type;
150         type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls);
151         p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type;
152 }
153
154 static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun,
155                                  struct ecore_tunnel_info *p_src)
156 {
157         p_tun->geneve_port.b_update_port = p_src->geneve_port.b_update_port;
158         p_tun->vxlan_port.b_update_port = p_src->vxlan_port.b_update_port;
159
160         if (p_src->geneve_port.b_update_port)
161                 p_tun->geneve_port.port = p_src->geneve_port.port;
162
163         if (p_src->vxlan_port.b_update_port)
164                 p_tun->vxlan_port.port = p_src->vxlan_port.port;
165 }
166
167 static void
168 __ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
169                                 struct ecore_tunn_update_type *tun_type)
170 {
171         *p_tunn_cls = tun_type->tun_cls;
172 }
173
174 static void
175 ecore_set_ramrod_tunnel_param(u8 *p_tunn_cls,
176                               struct ecore_tunn_update_type *tun_type,
177                               u8 *p_update_port, __le16 *p_port,
178                               struct ecore_tunn_update_udp_port *p_udp_port)
179 {
180         __ecore_set_ramrod_tunnel_param(p_tunn_cls, tun_type);
181         if (p_udp_port->b_update_port) {
182                 *p_update_port = 1;
183                 *p_port = OSAL_CPU_TO_LE16(p_udp_port->port);
184         }
185 }
186
187 static void
188 ecore_tunn_set_pf_update_params(struct ecore_hwfn               *p_hwfn,
189                                 struct ecore_tunnel_info *p_src,
190                                 struct pf_update_tunnel_config  *p_tunn_cfg)
191 {
192         struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
193
194         ecore_set_pf_update_tunn_mode(p_tun, p_src, false);
195         ecore_set_tunn_cls_info(p_tun, p_src);
196         ecore_set_tunn_ports(p_tun, p_src);
197
198         ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
199                                       &p_tun->vxlan,
200                                       &p_tunn_cfg->set_vxlan_udp_port_flg,
201                                       &p_tunn_cfg->vxlan_udp_port,
202                                       &p_tun->vxlan_port);
203
204         ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
205                                       &p_tun->l2_geneve,
206                                       &p_tunn_cfg->set_geneve_udp_port_flg,
207                                       &p_tunn_cfg->geneve_udp_port,
208                                       &p_tun->geneve_port);
209
210         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
211                                         &p_tun->ip_geneve);
212
213         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
214                                         &p_tun->l2_gre);
215
216         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
217                                         &p_tun->ip_gre);
218
219         p_tunn_cfg->update_rx_pf_clss = p_tun->b_update_rx_cls;
220 }
221
222 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
223                                    struct ecore_ptt *p_ptt,
224                                    struct ecore_tunnel_info *p_tun)
225 {
226         ecore_set_gre_enable(p_hwfn, p_ptt, p_tun->l2_gre.b_mode_enabled,
227                              p_tun->ip_gre.b_mode_enabled);
228         ecore_set_vxlan_enable(p_hwfn, p_ptt, p_tun->vxlan.b_mode_enabled);
229
230         ecore_set_geneve_enable(p_hwfn, p_ptt, p_tun->l2_geneve.b_mode_enabled,
231                                 p_tun->ip_geneve.b_mode_enabled);
232 }
233
234 static void ecore_set_hw_tunn_mode_port(struct ecore_hwfn *p_hwfn,
235                                         struct ecore_ptt  *p_ptt,
236                                         struct ecore_tunnel_info *p_tunn)
237 {
238         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
239                 DP_NOTICE(p_hwfn, true,
240                           "A0 chip: tunnel hw config is not supported\n");
241                 return;
242         }
243
244         if (p_tunn->vxlan_port.b_update_port)
245                 ecore_set_vxlan_dest_port(p_hwfn, p_ptt,
246                                           p_tunn->vxlan_port.port);
247
248         if (p_tunn->geneve_port.b_update_port)
249                 ecore_set_geneve_dest_port(p_hwfn, p_ptt,
250                                            p_tunn->geneve_port.port);
251
252         ecore_set_hw_tunn_mode(p_hwfn, p_ptt, p_tunn);
253 }
254
255 static void
256 ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
257                                struct ecore_tunnel_info         *p_src,
258                                struct pf_start_tunnel_config *p_tunn_cfg)
259 {
260         struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
261
262         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
263                 DP_NOTICE(p_hwfn, true,
264                           "A0 chip: tunnel pf start config is not supported\n");
265                 return;
266         }
267
268         if (!p_src)
269                 return;
270
271         ecore_set_pf_update_tunn_mode(p_tun, p_src, true);
272         ecore_set_tunn_cls_info(p_tun, p_src);
273         ecore_set_tunn_ports(p_tun, p_src);
274
275         ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_vxlan,
276                                       &p_tun->vxlan,
277                                       &p_tunn_cfg->set_vxlan_udp_port_flg,
278                                       &p_tunn_cfg->vxlan_udp_port,
279                                       &p_tun->vxlan_port);
280
281         ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2geneve,
282                                       &p_tun->l2_geneve,
283                                       &p_tunn_cfg->set_geneve_udp_port_flg,
284                                       &p_tunn_cfg->geneve_udp_port,
285                                       &p_tun->geneve_port);
286
287         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgeneve,
288                                         &p_tun->ip_geneve);
289
290         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_l2gre,
291                                         &p_tun->l2_gre);
292
293         __ecore_set_ramrod_tunnel_param(&p_tunn_cfg->tunnel_clss_ipgre,
294                                         &p_tun->ip_gre);
295 }
296
297 #define ETH_P_8021Q 0x8100
298
299 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
300                                        struct ecore_ptt *p_ptt,
301                                        struct ecore_tunnel_info *p_tunn,
302                                        bool allow_npar_tx_switch)
303 {
304         struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
305         u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
306         u8 sb_index = p_hwfn->p_eq->eq_sb_index;
307         struct ecore_spq_entry *p_ent = OSAL_NULL;
308         struct ecore_sp_init_data init_data;
309         enum _ecore_status_t rc = ECORE_NOTIMPL;
310         u8 page_cnt;
311         int i;
312
313         /* update initial eq producer */
314         ecore_eq_prod_update(p_hwfn,
315                              ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
316
317         /* Initialize the SPQ entry for the ramrod */
318         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
319         init_data.cid = ecore_spq_get_cid(p_hwfn);
320         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
321         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
322
323         rc = ecore_sp_init_request(p_hwfn, &p_ent,
324                                    COMMON_RAMROD_PF_START,
325                                    PROTOCOLID_COMMON, &init_data);
326         if (rc != ECORE_SUCCESS)
327                 return rc;
328
329         /* Fill the ramrod data */
330         p_ramrod = &p_ent->ramrod.pf_start;
331         p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
332         p_ramrod->event_ring_sb_index = sb_index;
333         p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
334
335         /* For easier debugging */
336         p_ramrod->dont_log_ramrods = 0;
337         p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0x8f);
338
339         if (OSAL_TEST_BIT(ECORE_MF_OVLAN_CLSS, &p_hwfn->p_dev->mf_bits))
340                 p_ramrod->mf_mode = MF_OVLAN;
341         else
342                 p_ramrod->mf_mode = MF_NPAR;
343
344         p_ramrod->outer_tag_config.outer_tag.tci =
345                 OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
346
347         if (OSAL_TEST_BIT(ECORE_MF_UFP_SPECIFIC, &p_hwfn->p_dev->mf_bits)) {
348                 p_ramrod->outer_tag_config.outer_tag.tpid =
349                         OSAL_CPU_TO_LE16(ETH_P_8021Q);
350                 if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
351                         p_ramrod->outer_tag_config.enable_stag_pri_change = 1;
352                 else
353                         p_ramrod->outer_tag_config.enable_stag_pri_change = 0;
354                 p_ramrod->outer_tag_config.pri_map_valid = 1;
355                 for (i = 0; i < 8; i++)
356                         p_ramrod->outer_tag_config.inner_to_outer_pri_map[i] =
357                                                                           (u8)i;
358         }
359
360         /* Place EQ address in RAMROD */
361         DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
362                        p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
363         page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
364         p_ramrod->event_ring_num_pages = page_cnt;
365         DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
366                        p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
367
368         ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
369                                        &p_ramrod->tunnel_config);
370
371         if (OSAL_TEST_BIT(ECORE_MF_INTER_PF_SWITCH,
372                           &p_hwfn->p_dev->mf_bits))
373                 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
374
375         switch (p_hwfn->hw_info.personality) {
376         case ECORE_PCI_ETH:
377                 p_ramrod->personality = PERSONALITY_ETH;
378                 break;
379         default:
380                 DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
381                          p_hwfn->hw_info.personality);
382                 p_ramrod->personality = PERSONALITY_ETH;
383         }
384
385         if (p_hwfn->p_dev->p_iov_info) {
386                 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
387
388                 p_ramrod->base_vf_id = (u8)p_iov->first_vf_in_pf;
389                 p_ramrod->num_vfs = (u8)p_iov->total_vfs;
390         }
391         /* @@@TBD - update also the "ROCE_VER_KEY" entries when the FW RoCE HSI
392          * version is available.
393          */
394         p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
395         p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
396
397         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
398                    "Setting event_ring_sb [id %04x index %02x], outer_tag.tpid [%d], outer_tag.tci [%d]\n",
399                    sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tpid,
400                    p_ramrod->outer_tag_config.outer_tag.tci);
401
402         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
403
404         if (p_tunn)
405                 ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt,
406                                             &p_hwfn->p_dev->tunnel);
407
408         return rc;
409 }
410
411 enum _ecore_status_t ecore_sp_pf_update_dcbx(struct ecore_hwfn *p_hwfn)
412 {
413         struct ecore_spq_entry *p_ent = OSAL_NULL;
414         struct ecore_sp_init_data init_data;
415         enum _ecore_status_t rc = ECORE_NOTIMPL;
416
417         /* Get SPQ entry */
418         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
419         init_data.cid = ecore_spq_get_cid(p_hwfn);
420         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
421         init_data.comp_mode = ECORE_SPQ_MODE_CB;
422
423         rc = ecore_sp_init_request(p_hwfn, &p_ent,
424                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
425                                    &init_data);
426         if (rc != ECORE_SUCCESS)
427                 return rc;
428
429         ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
430                                         &p_ent->ramrod.pf_update);
431
432         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
433 }
434
435 enum _ecore_status_t ecore_sp_pf_update_ufp(struct ecore_hwfn *p_hwfn)
436 {
437         struct ecore_spq_entry *p_ent = OSAL_NULL;
438         struct ecore_sp_init_data init_data;
439         enum _ecore_status_t rc = ECORE_NOTIMPL;
440
441         /* Get SPQ entry */
442         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
443         init_data.cid = ecore_spq_get_cid(p_hwfn);
444         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
445         init_data.comp_mode = ECORE_SPQ_MODE_CB;
446
447         rc = ecore_sp_init_request(p_hwfn, &p_ent,
448                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
449                                    &init_data);
450         if (rc != ECORE_SUCCESS)
451                 return rc;
452
453         p_ent->ramrod.pf_update.update_enable_stag_pri_change = true;
454         if (p_hwfn->ufp_info.pri_type == ECORE_UFP_PRI_OS)
455                 p_ent->ramrod.pf_update.enable_stag_pri_change = 1;
456         else
457                 p_ent->ramrod.pf_update.enable_stag_pri_change = 0;
458
459         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
460 }
461
462
463 /* QM rate limiter resolution is 1.6Mbps */
464 #define QM_RL_RESOLUTION(mb_val)        ((mb_val) * 10 / 16)
465
466 /* FW uses 1/64k to express gd */
467 #define FW_GD_RESOLUTION(gd)            (64 * 1024 / (gd))
468
469 u16 ecore_sp_rl_mb_to_qm(u32 mb_val)
470 {
471         return (u16)OSAL_MIN_T(u32, (u16)(~0U), QM_RL_RESOLUTION(mb_val));
472 }
473
474 u16 ecore_sp_rl_gd_denom(u32 gd)
475 {
476         return gd ? (u16)OSAL_MIN_T(u32, (u16)(~0U), FW_GD_RESOLUTION(gd)) : 0;
477 }
478
479 enum _ecore_status_t ecore_sp_rl_update(struct ecore_hwfn *p_hwfn,
480                                         struct ecore_rl_update_params *params)
481 {
482         struct ecore_spq_entry *p_ent = OSAL_NULL;
483         enum _ecore_status_t rc = ECORE_NOTIMPL;
484         struct rl_update_ramrod_data *rl_update;
485         struct ecore_sp_init_data init_data;
486
487         /* Get SPQ entry */
488         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
489         init_data.cid = ecore_spq_get_cid(p_hwfn);
490         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
491         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
492
493         rc = ecore_sp_init_request(p_hwfn, &p_ent,
494                                    COMMON_RAMROD_RL_UPDATE, PROTOCOLID_COMMON,
495                                    &init_data);
496         if (rc != ECORE_SUCCESS)
497                 return rc;
498
499         rl_update = &p_ent->ramrod.rl_update;
500
501         rl_update->qcn_update_param_flg = params->qcn_update_param_flg;
502         rl_update->dcqcn_update_param_flg = params->dcqcn_update_param_flg;
503         rl_update->rl_init_flg = params->rl_init_flg;
504         rl_update->rl_start_flg = params->rl_start_flg;
505         rl_update->rl_stop_flg = params->rl_stop_flg;
506         rl_update->rl_id_first = params->rl_id_first;
507         rl_update->rl_id_last = params->rl_id_last;
508         rl_update->rl_dc_qcn_flg = params->rl_dc_qcn_flg;
509         rl_update->rl_bc_rate = OSAL_CPU_TO_LE32(params->rl_bc_rate);
510         rl_update->rl_max_rate =
511                 OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_max_rate));
512         rl_update->rl_r_ai =
513                 OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_ai));
514         rl_update->rl_r_hai =
515                 OSAL_CPU_TO_LE16(ecore_sp_rl_mb_to_qm(params->rl_r_hai));
516         rl_update->dcqcn_g =
517                 OSAL_CPU_TO_LE16(ecore_sp_rl_gd_denom(params->dcqcn_gd));
518         rl_update->dcqcn_k_us = OSAL_CPU_TO_LE32(params->dcqcn_k_us);
519         rl_update->dcqcn_timeuot_us =
520                 OSAL_CPU_TO_LE32(params->dcqcn_timeuot_us);
521         rl_update->qcn_timeuot_us = OSAL_CPU_TO_LE32(params->qcn_timeuot_us);
522
523         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "rl_params: qcn_update_param_flg %x, dcqcn_update_param_flg %x, rl_init_flg %x, rl_start_flg %x, rl_stop_flg %x, rl_id_first %x, rl_id_last %x, rl_dc_qcn_flg %x, rl_bc_rate %x, rl_max_rate %x, rl_r_ai %x, rl_r_hai %x, dcqcn_g %x, dcqcn_k_us %x, dcqcn_timeuot_us %x, qcn_timeuot_us %x\n",
524                    rl_update->qcn_update_param_flg,
525                    rl_update->dcqcn_update_param_flg,
526                    rl_update->rl_init_flg, rl_update->rl_start_flg,
527                    rl_update->rl_stop_flg, rl_update->rl_id_first,
528                    rl_update->rl_id_last, rl_update->rl_dc_qcn_flg,
529                    rl_update->rl_bc_rate, rl_update->rl_max_rate,
530                    rl_update->rl_r_ai, rl_update->rl_r_hai,
531                    rl_update->dcqcn_g, rl_update->dcqcn_k_us,
532                    rl_update->dcqcn_timeuot_us, rl_update->qcn_timeuot_us);
533
534         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
535 }
536
537 /* Set pf update ramrod command params */
538 enum _ecore_status_t
539 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
540                             struct ecore_ptt *p_ptt,
541                             struct ecore_tunnel_info *p_tunn,
542                             enum spq_mode comp_mode,
543                             struct ecore_spq_comp_cb *p_comp_data)
544 {
545         struct ecore_spq_entry *p_ent = OSAL_NULL;
546         struct ecore_sp_init_data init_data;
547         enum _ecore_status_t rc = ECORE_NOTIMPL;
548
549         if (IS_VF(p_hwfn->p_dev))
550                 return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn);
551
552         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
553                 DP_NOTICE(p_hwfn, true,
554                           "A0 chip: tunnel pf update config is not supported\n");
555                 return rc;
556         }
557
558         if (!p_tunn)
559                 return ECORE_INVAL;
560
561         /* Get SPQ entry */
562         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
563         init_data.cid = ecore_spq_get_cid(p_hwfn);
564         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
565         init_data.comp_mode = comp_mode;
566         init_data.p_comp_data = p_comp_data;
567
568         rc = ecore_sp_init_request(p_hwfn, &p_ent,
569                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
570                                    &init_data);
571         if (rc != ECORE_SUCCESS)
572                 return rc;
573
574         ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
575                                         &p_ent->ramrod.pf_update.tunnel_config);
576
577         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
578         if (rc != ECORE_SUCCESS)
579                 return rc;
580
581         ecore_set_hw_tunn_mode_port(p_hwfn, p_ptt, &p_hwfn->p_dev->tunnel);
582
583         return rc;
584 }
585
586 enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
587 {
588         struct ecore_spq_entry *p_ent = OSAL_NULL;
589         struct ecore_sp_init_data init_data;
590         enum _ecore_status_t rc = ECORE_NOTIMPL;
591
592         /* Get SPQ entry */
593         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
594         init_data.cid = ecore_spq_get_cid(p_hwfn);
595         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
596         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
597
598         rc = ecore_sp_init_request(p_hwfn, &p_ent,
599                                    COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
600                                    &init_data);
601         if (rc != ECORE_SUCCESS)
602                 return rc;
603
604         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
605 }
606
607 enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
608 {
609         struct ecore_spq_entry *p_ent = OSAL_NULL;
610         struct ecore_sp_init_data init_data;
611         enum _ecore_status_t rc;
612
613         /* Get SPQ entry */
614         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
615         init_data.cid = ecore_spq_get_cid(p_hwfn);
616         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
617         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
618
619         rc = ecore_sp_init_request(p_hwfn, &p_ent,
620                                    COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
621                                    &init_data);
622         if (rc != ECORE_SUCCESS)
623                 return rc;
624
625         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
626 }
627
628 enum _ecore_status_t ecore_sp_pf_update_stag(struct ecore_hwfn *p_hwfn)
629 {
630         struct ecore_spq_entry *p_ent = OSAL_NULL;
631         struct ecore_sp_init_data init_data;
632         enum _ecore_status_t rc = ECORE_NOTIMPL;
633
634         /* Get SPQ entry */
635         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
636         init_data.cid = ecore_spq_get_cid(p_hwfn);
637         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
638         init_data.comp_mode = ECORE_SPQ_MODE_CB;
639
640         rc = ecore_sp_init_request(p_hwfn, &p_ent,
641                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
642                                    &init_data);
643         if (rc != ECORE_SUCCESS)
644                 return rc;
645
646         p_ent->ramrod.pf_update.update_mf_vlan_flag = true;
647         p_ent->ramrod.pf_update.mf_vlan =
648                                 OSAL_CPU_TO_LE16(p_hwfn->hw_info.ovlan);
649
650         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
651 }