Imported Upstream version 16.07-rc1
[deb_dpdk.git] / drivers / net / qede / base / ecore_sp_commands.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10
11 #include "ecore.h"
12 #include "ecore_status.h"
13 #include "ecore_chain.h"
14 #include "ecore_spq.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_cxt.h"
17 #include "ecore_sp_commands.h"
18 #include "ecore_gtt_reg_addr.h"
19 #include "ecore_iro.h"
20 #include "reg_addr.h"
21 #include "ecore_int.h"
22 #include "ecore_hw.h"
23 #include "ecore_dcbx.h"
24
25 enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn,
26                                            struct ecore_spq_entry **pp_ent,
27                                            u8 cmd,
28                                            u8 protocol,
29                                            struct ecore_sp_init_data *p_data)
30 {
31         u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid;
32         struct ecore_spq_entry *p_ent = OSAL_NULL;
33         enum _ecore_status_t rc = ECORE_NOTIMPL;
34
35         /* Get an SPQ entry */
36         rc = ecore_spq_get_entry(p_hwfn, pp_ent);
37         if (rc != ECORE_SUCCESS)
38                 return rc;
39
40         /* Fill the SPQ entry */
41         p_ent = *pp_ent;
42         p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid);
43         p_ent->elem.hdr.cmd_id = cmd;
44         p_ent->elem.hdr.protocol_id = protocol;
45         p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL;
46         p_ent->comp_mode = p_data->comp_mode;
47         p_ent->comp_done.done = 0;
48
49         switch (p_ent->comp_mode) {
50         case ECORE_SPQ_MODE_EBLOCK:
51                 p_ent->comp_cb.cookie = &p_ent->comp_done;
52                 break;
53
54         case ECORE_SPQ_MODE_BLOCK:
55                 if (!p_data->p_comp_data)
56                         return ECORE_INVAL;
57
58                 p_ent->comp_cb.cookie = p_data->p_comp_data->cookie;
59                 break;
60
61         case ECORE_SPQ_MODE_CB:
62                 if (!p_data->p_comp_data)
63                         p_ent->comp_cb.function = OSAL_NULL;
64                 else
65                         p_ent->comp_cb = *p_data->p_comp_data;
66                 break;
67
68         default:
69                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
70                           p_ent->comp_mode);
71                 return ECORE_INVAL;
72         }
73
74         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
75                    "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n",
76                    opaque_cid, cmd, protocol,
77                    (unsigned long)&p_ent->ramrod,
78                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
79                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
80                            "MODE_CB"));
81
82         OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod));
83
84         return ECORE_SUCCESS;
85 }
86
87 static enum tunnel_clss ecore_tunn_get_clss_type(u8 type)
88 {
89         switch (type) {
90         case ECORE_TUNN_CLSS_MAC_VLAN:
91                 return TUNNEL_CLSS_MAC_VLAN;
92         case ECORE_TUNN_CLSS_MAC_VNI:
93                 return TUNNEL_CLSS_MAC_VNI;
94         case ECORE_TUNN_CLSS_INNER_MAC_VLAN:
95                 return TUNNEL_CLSS_INNER_MAC_VLAN;
96         case ECORE_TUNN_CLSS_INNER_MAC_VNI:
97                 return TUNNEL_CLSS_INNER_MAC_VNI;
98         default:
99                 return TUNNEL_CLSS_MAC_VLAN;
100         }
101 }
102
103 static void
104 ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn,
105                                 struct ecore_tunn_update_params *p_src,
106                                 struct pf_update_tunnel_config *p_tunn_cfg)
107 {
108         unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode;
109         unsigned long update_mask = p_src->tunn_mode_update_mask;
110         unsigned long tunn_mode = p_src->tunn_mode;
111         unsigned long new_tunn_mode = 0;
112
113         if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) {
114                 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
115                         OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
116         } else {
117                 if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode))
118                         OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode);
119         }
120
121         if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) {
122                 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
123                         OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
124         } else {
125                 if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode))
126                         OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode);
127         }
128
129         if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) {
130                 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
131                         OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
132         } else {
133                 if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode))
134                         OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode);
135         }
136
137         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
138                 if (p_src->update_geneve_udp_port)
139                         DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
140                 p_src->update_geneve_udp_port = 0;
141                 p_src->tunn_mode = new_tunn_mode;
142                 return;
143         }
144
145         if (p_src->update_geneve_udp_port) {
146                 p_tunn_cfg->set_geneve_udp_port_flg = 1;
147                 p_tunn_cfg->geneve_udp_port =
148                     OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
149         }
150
151         if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) {
152                 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
153                         OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
154         } else {
155                 if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode))
156                         OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode);
157         }
158
159         if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) {
160                 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
161                         OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
162         } else {
163                 if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode))
164                         OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode);
165         }
166
167         p_src->tunn_mode = new_tunn_mode;
168 }
169
170 static void
171 ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn,
172                                 struct ecore_tunn_update_params *p_src,
173                                 struct pf_update_tunnel_config *p_tunn_cfg)
174 {
175         unsigned long tunn_mode = p_src->tunn_mode;
176         enum tunnel_clss type;
177
178         ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg);
179         p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss;
180         p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss;
181
182         type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
183         p_tunn_cfg->tunnel_clss_vxlan = type;
184         type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
185         p_tunn_cfg->tunnel_clss_l2gre = type;
186         type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
187         p_tunn_cfg->tunnel_clss_ipgre = type;
188
189         if (p_src->update_vxlan_udp_port) {
190                 p_tunn_cfg->set_vxlan_udp_port_flg = 1;
191                 p_tunn_cfg->vxlan_udp_port =
192                     OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
193         }
194
195         if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
196                 p_tunn_cfg->tx_enable_l2gre = 1;
197
198         if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
199                 p_tunn_cfg->tx_enable_ipgre = 1;
200
201         if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
202                 p_tunn_cfg->tx_enable_vxlan = 1;
203
204         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
205                 if (p_src->update_geneve_udp_port)
206                         DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
207                 p_src->update_geneve_udp_port = 0;
208                 return;
209         }
210
211         if (p_src->update_geneve_udp_port) {
212                 p_tunn_cfg->set_geneve_udp_port_flg = 1;
213                 p_tunn_cfg->geneve_udp_port =
214                     OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
215         }
216
217         if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
218                 p_tunn_cfg->tx_enable_l2geneve = 1;
219
220         if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
221                 p_tunn_cfg->tx_enable_ipgeneve = 1;
222
223         type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
224         p_tunn_cfg->tunnel_clss_l2geneve = type;
225         type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
226         p_tunn_cfg->tunnel_clss_ipgeneve = type;
227 }
228
229 static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn,
230                                    struct ecore_ptt *p_ptt,
231                                    unsigned long tunn_mode)
232 {
233         u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0;
234         u8 l2geneve_enable = 0, ipgeneve_enable = 0;
235
236         if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
237                 l2gre_enable = 1;
238
239         if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
240                 ipgre_enable = 1;
241
242         if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
243                 vxlan_enable = 1;
244
245         ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable);
246         ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable);
247
248         if (ECORE_IS_BB_A0(p_hwfn->p_dev))
249                 return;
250
251         if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
252                 l2geneve_enable = 1;
253
254         if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
255                 ipgeneve_enable = 1;
256
257         ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable,
258                                 ipgeneve_enable);
259 }
260
261 static void
262 ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn,
263                                struct ecore_tunn_start_params *p_src,
264                                struct pf_start_tunnel_config *p_tunn_cfg)
265 {
266         unsigned long tunn_mode;
267         enum tunnel_clss type;
268
269         if (!p_src)
270                 return;
271
272         tunn_mode = p_src->tunn_mode;
273         type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan);
274         p_tunn_cfg->tunnel_clss_vxlan = type;
275         type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre);
276         p_tunn_cfg->tunnel_clss_l2gre = type;
277         type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre);
278         p_tunn_cfg->tunnel_clss_ipgre = type;
279
280         if (p_src->update_vxlan_udp_port) {
281                 p_tunn_cfg->set_vxlan_udp_port_flg = 1;
282                 p_tunn_cfg->vxlan_udp_port =
283                     OSAL_CPU_TO_LE16(p_src->vxlan_udp_port);
284         }
285
286         if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode))
287                 p_tunn_cfg->tx_enable_l2gre = 1;
288
289         if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode))
290                 p_tunn_cfg->tx_enable_ipgre = 1;
291
292         if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode))
293                 p_tunn_cfg->tx_enable_vxlan = 1;
294
295         if (ECORE_IS_BB_A0(p_hwfn->p_dev)) {
296                 if (p_src->update_geneve_udp_port)
297                         DP_NOTICE(p_hwfn, true, "Geneve not supported\n");
298                 p_src->update_geneve_udp_port = 0;
299                 return;
300         }
301
302         if (p_src->update_geneve_udp_port) {
303                 p_tunn_cfg->set_geneve_udp_port_flg = 1;
304                 p_tunn_cfg->geneve_udp_port =
305                     OSAL_CPU_TO_LE16(p_src->geneve_udp_port);
306         }
307
308         if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode))
309                 p_tunn_cfg->tx_enable_l2geneve = 1;
310
311         if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode))
312                 p_tunn_cfg->tx_enable_ipgeneve = 1;
313
314         type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve);
315         p_tunn_cfg->tunnel_clss_l2geneve = type;
316         type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve);
317         p_tunn_cfg->tunnel_clss_ipgeneve = type;
318 }
319
320 enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn,
321                                        struct ecore_tunn_start_params *p_tunn,
322                                        enum ecore_mf_mode mode,
323                                        bool allow_npar_tx_switch)
324 {
325         struct pf_start_ramrod_data *p_ramrod = OSAL_NULL;
326         struct ecore_spq_entry *p_ent = OSAL_NULL;
327         u16 sb = ecore_int_get_sp_sb_id(p_hwfn);
328         u8 sb_index = p_hwfn->p_eq->eq_sb_index;
329         enum _ecore_status_t rc = ECORE_NOTIMPL;
330         struct ecore_sp_init_data init_data;
331         u8 page_cnt;
332
333         /* update initial eq producer */
334         ecore_eq_prod_update(p_hwfn,
335                              ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain));
336
337         /* Initialize the SPQ entry for the ramrod */
338         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
339         init_data.cid = ecore_spq_get_cid(p_hwfn);
340         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
341         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
342
343         rc = ecore_sp_init_request(p_hwfn, &p_ent,
344                                    COMMON_RAMROD_PF_START,
345                                    PROTOCOLID_COMMON, &init_data);
346         if (rc != ECORE_SUCCESS)
347                 return rc;
348
349         /* Fill the ramrod data */
350         p_ramrod = &p_ent->ramrod.pf_start;
351         p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb);
352         p_ramrod->event_ring_sb_index = sb_index;
353         p_ramrod->path_id = ECORE_PATH_ID(p_hwfn);
354         p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
355
356         /* For easier debugging */
357         p_ramrod->dont_log_ramrods = 0;
358         p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf);
359
360         switch (mode) {
361         case ECORE_MF_DEFAULT:
362         case ECORE_MF_NPAR:
363                 p_ramrod->mf_mode = MF_NPAR;
364                 break;
365         case ECORE_MF_OVLAN:
366                 p_ramrod->mf_mode = MF_OVLAN;
367                 break;
368         default:
369                 DP_NOTICE(p_hwfn, true,
370                           "Unsupported MF mode, init as DEFAULT\n");
371                 p_ramrod->mf_mode = MF_NPAR;
372         }
373
374         /* Place EQ address in RAMROD */
375         DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
376                        p_hwfn->p_eq->chain.pbl.p_phys_table);
377         page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain);
378         p_ramrod->event_ring_num_pages = page_cnt;
379         DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
380                        p_hwfn->p_consq->chain.pbl.p_phys_table);
381
382         ecore_tunn_set_pf_start_params(p_hwfn, p_tunn,
383                                        &p_ramrod->tunnel_config);
384
385         if (IS_MF_SI(p_hwfn))
386                 p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch;
387
388         switch (p_hwfn->hw_info.personality) {
389         case ECORE_PCI_ETH:
390                 p_ramrod->personality = PERSONALITY_ETH;
391                 break;
392         default:
393                 DP_NOTICE(p_hwfn, true, "Unknown personality %d\n",
394                           p_hwfn->hw_info.personality);
395                 p_ramrod->personality = PERSONALITY_ETH;
396         }
397
398         p_ramrod->base_vf_id = (u8)p_hwfn->hw_info.first_vf_in_pf;
399         p_ramrod->num_vfs = (u8)p_hwfn->p_dev->sriov_info.total_vfs;
400
401         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
402                    "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n",
403                    sb, sb_index, p_ramrod->outer_tag);
404
405         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
406
407         if (p_tunn) {
408                 ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
409                                        p_tunn->tunn_mode);
410                 p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
411         }
412
413         return rc;
414 }
415
416 enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn)
417 {
418         struct ecore_spq_entry *p_ent = OSAL_NULL;
419         enum _ecore_status_t rc = ECORE_NOTIMPL;
420         struct ecore_sp_init_data init_data;
421
422         /* Get SPQ entry */
423         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
424         init_data.cid = ecore_spq_get_cid(p_hwfn);
425         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
426         init_data.comp_mode = ECORE_SPQ_MODE_CB;
427
428         rc = ecore_sp_init_request(p_hwfn, &p_ent,
429                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
430                                    &init_data);
431         if (rc != ECORE_SUCCESS)
432                 return rc;
433
434         ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results,
435                                         &p_ent->ramrod.pf_update);
436
437         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
438 }
439
440 /* Set pf update ramrod command params */
441 enum _ecore_status_t
442 ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn,
443                             struct ecore_tunn_update_params *p_tunn,
444                             enum spq_mode comp_mode,
445                             struct ecore_spq_comp_cb *p_comp_data)
446 {
447         struct ecore_spq_entry *p_ent = OSAL_NULL;
448         enum _ecore_status_t rc = ECORE_NOTIMPL;
449         struct ecore_sp_init_data init_data;
450
451         /* Get SPQ entry */
452         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
453         init_data.cid = ecore_spq_get_cid(p_hwfn);
454         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
455         init_data.comp_mode = comp_mode;
456         init_data.p_comp_data = p_comp_data;
457
458         rc = ecore_sp_init_request(p_hwfn, &p_ent,
459                                    COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON,
460                                    &init_data);
461         if (rc != ECORE_SUCCESS)
462                 return rc;
463
464         ecore_tunn_set_pf_update_params(p_hwfn, p_tunn,
465                                         &p_ent->ramrod.pf_update.tunnel_config);
466
467         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
468
469         if ((rc == ECORE_SUCCESS) && p_tunn) {
470                 if (p_tunn->update_vxlan_udp_port)
471                         ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt,
472                                                   p_tunn->vxlan_udp_port);
473                 if (p_tunn->update_geneve_udp_port)
474                         ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt,
475                                                    p_tunn->geneve_udp_port);
476
477                 ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt,
478                                        p_tunn->tunn_mode);
479                 p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode;
480         }
481
482         return rc;
483 }
484
485 enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn)
486 {
487         enum _ecore_status_t rc = ECORE_NOTIMPL;
488         struct ecore_spq_entry *p_ent = OSAL_NULL;
489         struct ecore_sp_init_data init_data;
490
491         /* Get SPQ entry */
492         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
493         init_data.cid = ecore_spq_get_cid(p_hwfn);
494         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
495         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
496
497         rc = ecore_sp_init_request(p_hwfn, &p_ent,
498                                    COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON,
499                                    &init_data);
500         if (rc != ECORE_SUCCESS)
501                 return rc;
502
503         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
504 }
505
506 enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn)
507 {
508         struct ecore_spq_entry *p_ent = OSAL_NULL;
509         enum _ecore_status_t rc = ECORE_NOTIMPL;
510         struct ecore_sp_init_data init_data;
511
512         /* Get SPQ entry */
513         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
514         init_data.cid = ecore_spq_get_cid(p_hwfn);
515         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
516         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
517
518         rc = ecore_sp_init_request(p_hwfn, &p_ent,
519                                    COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON,
520                                    &init_data);
521         if (rc != ECORE_SUCCESS)
522                 return rc;
523
524         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
525 }