Imported Upstream version 16.07-rc1
[deb_dpdk.git] / drivers / net / qede / base / ecore_mcp.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_status.h"
12 #include "ecore_mcp.h"
13 #include "mcp_public.h"
14 #include "reg_addr.h"
15 #include "ecore_hw.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_sriov.h"
18 #include "ecore_iov_api.h"
19 #include "ecore_gtt_reg_addr.h"
20 #include "ecore_iro.h"
21 #include "ecore_dcbx.h"
22
23 #define CHIP_MCP_RESP_ITER_US 10
24 #define EMUL_MCP_RESP_ITER_US (1000 * 1000)
25
26 #define ECORE_DRV_MB_MAX_RETRIES (500 * 1000)   /* Account for 5 sec */
27 #define ECORE_MCP_RESET_RETRIES (50 * 1000)     /* Account for 500 msec */
28
29 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
30         ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
31                  _val)
32
33 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
34         ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
35
36 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
37         DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
38                      OFFSETOF(struct public_drv_mb, _field), _val)
39
40 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
41         DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
42                      OFFSETOF(struct public_drv_mb, _field))
43
44 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
45         DRV_ID_PDA_COMP_VER_SHIFT)
46
47 #define MCP_BYTES_PER_MBIT_SHIFT 17
48
49 #ifndef ASIC_ONLY
50 static int loaded;
51 static int loaded_port[MAX_NUM_PORTS] = { 0 };
52 #endif
53
54 bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn)
55 {
56         if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base)
57                 return false;
58         return true;
59 }
60
61 void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
62 {
63         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
64                                         PUBLIC_PORT);
65         u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr);
66
67         p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize,
68                                                    MFW_PORT(p_hwfn));
69         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
70                    "port_addr = 0x%x, port_id 0x%02x\n",
71                    p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn));
72 }
73
74 void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
75 {
76         u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length);
77         OSAL_BE32 tmp;
78         u32 i;
79
80 #ifndef ASIC_ONLY
81         if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev))
82                 return;
83 #endif
84
85         if (!p_hwfn->mcp_info->public_base)
86                 return;
87
88         for (i = 0; i < length; i++) {
89                 tmp = ecore_rd(p_hwfn, p_ptt,
90                                p_hwfn->mcp_info->mfw_mb_addr +
91                                (i << 2) + sizeof(u32));
92
93                 ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] =
94                     OSAL_BE32_TO_CPU(tmp);
95         }
96 }
97
98 enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn)
99 {
100         if (p_hwfn->mcp_info) {
101                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur);
102                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow);
103                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock);
104         }
105         OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info);
106         p_hwfn->mcp_info = OSAL_NULL;
107
108         return ECORE_SUCCESS;
109 }
110
111 static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn,
112                                                    struct ecore_ptt *p_ptt)
113 {
114         struct ecore_mcp_info *p_info = p_hwfn->mcp_info;
115         u32 drv_mb_offsize, mfw_mb_offsize;
116         u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
117
118 #ifndef ASIC_ONLY
119         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
120                 DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n");
121                 p_info->public_base = 0;
122                 return ECORE_INVAL;
123         }
124 #endif
125
126         p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
127         if (!p_info->public_base)
128                 return ECORE_INVAL;
129
130         p_info->public_base |= GRCBASE_MCP;
131
132         /* Calculate the driver and MFW mailbox address */
133         drv_mb_offsize = ecore_rd(p_hwfn, p_ptt,
134                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
135                                                        PUBLIC_DRV_MB));
136         p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id);
137         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
138                    "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
139                    drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
140
141         /* Set the MFW MB address */
142         mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt,
143                                   SECTION_OFFSIZE_ADDR(p_info->public_base,
144                                                        PUBLIC_MFW_MB));
145         p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
146         p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt,
147                                                p_info->mfw_mb_addr);
148
149         /* Get the current driver mailbox sequence before sending
150          * the first command
151          */
152         p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
153             DRV_MSG_SEQ_NUMBER_MASK;
154
155         /* Get current FW pulse sequence */
156         p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) &
157             DRV_PULSE_SEQ_MASK;
158
159         p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt,
160                                           MISCS_REG_GENERIC_POR_0);
161
162         return ECORE_SUCCESS;
163 }
164
165 enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn,
166                                         struct ecore_ptt *p_ptt)
167 {
168         struct ecore_mcp_info *p_info;
169         u32 size;
170
171         /* Allocate mcp_info structure */
172         p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
173                                        sizeof(*p_hwfn->mcp_info));
174         if (!p_hwfn->mcp_info)
175                 goto err;
176         p_info = p_hwfn->mcp_info;
177
178         if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) {
179                 DP_NOTICE(p_hwfn, false, "MCP is not initialized\n");
180                 /* Do not free mcp_info here, since public_base indicate that
181                  * the MCP is not initialized
182                  */
183                 return ECORE_SUCCESS;
184         }
185
186         size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32);
187         p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
188         p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
189         if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr)
190                 goto err;
191
192         /* Initialize the MFW spinlock */
193         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock);
194         OSAL_SPIN_LOCK_INIT(&p_info->lock);
195
196         return ECORE_SUCCESS;
197
198 err:
199         DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n");
200         ecore_mcp_free(p_hwfn);
201         return ECORE_NOMEM;
202 }
203
204 enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn,
205                                      struct ecore_ptt *p_ptt)
206 {
207         u32 seq = ++p_hwfn->mcp_info->drv_mb_seq;
208         u32 delay = CHIP_MCP_RESP_ITER_US;
209         u32 org_mcp_reset_seq, cnt = 0;
210         enum _ecore_status_t rc = ECORE_SUCCESS;
211
212 #ifndef ASIC_ONLY
213         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
214                 delay = EMUL_MCP_RESP_ITER_US;
215 #endif
216
217         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
218
219         /* Set drv command along with the updated sequence */
220         org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0);
221         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq));
222
223         do {
224                 /* Wait for MFW response */
225                 OSAL_UDELAY(delay);
226                 /* Give the FW up to 500 second (50*1000*10usec) */
227         } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt,
228                                                 MISCS_REG_GENERIC_POR_0)) &&
229                  (cnt++ < ECORE_MCP_RESET_RETRIES));
230
231         if (org_mcp_reset_seq !=
232             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
233                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
234                            "MCP was reset after %d usec\n", cnt * delay);
235         } else {
236                 DP_ERR(p_hwfn, "Failed to reset MCP\n");
237                 rc = ECORE_AGAIN;
238         }
239
240         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
241
242         return rc;
243 }
244
245 /* Should be called while the dedicated spinlock is acquired */
246 static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn,
247                                              struct ecore_ptt *p_ptt,
248                                              u32 cmd, u32 param,
249                                              u32 *o_mcp_resp,
250                                              u32 *o_mcp_param)
251 {
252         u32 delay = CHIP_MCP_RESP_ITER_US;
253         u32 seq, cnt = 1, actual_mb_seq;
254         enum _ecore_status_t rc = ECORE_SUCCESS;
255
256 #ifndef ASIC_ONLY
257         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
258                 delay = EMUL_MCP_RESP_ITER_US;
259 #endif
260
261         /* Get actual driver mailbox sequence */
262         actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) &
263             DRV_MSG_SEQ_NUMBER_MASK;
264
265         /* Use MCP history register to check if MCP reset occurred between
266          * init time and now.
267          */
268         if (p_hwfn->mcp_info->mcp_hist !=
269             ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) {
270                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n");
271                 ecore_load_mcp_offsets(p_hwfn, p_ptt);
272                 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
273         }
274         seq = ++p_hwfn->mcp_info->drv_mb_seq;
275
276         /* Set drv param */
277         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param);
278
279         /* Set drv command along with the updated sequence */
280         DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq));
281
282         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
283                    "wrote command (%x) to MFW MB param 0x%08x\n",
284                    (cmd | seq), param);
285
286         do {
287                 /* Wait for MFW response */
288                 OSAL_UDELAY(delay);
289                 *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header);
290
291                 /* Give the FW up to 5 second (500*10ms) */
292         } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) &&
293                  (cnt++ < ECORE_DRV_MB_MAX_RETRIES));
294
295         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
296                    "[after %d ms] read (%x) seq is (%x) from FW MB\n",
297                    cnt * delay, *o_mcp_resp, seq);
298
299         /* Is this a reply to our command? */
300         if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) {
301                 *o_mcp_resp &= FW_MSG_CODE_MASK;
302                 /* Get the MCP param */
303                 *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param);
304         } else {
305                 /* FW BUG! */
306                 DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n",
307                        cmd, param);
308                 *o_mcp_resp = 0;
309                 rc = ECORE_AGAIN;
310                 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL);
311         }
312         return rc;
313 }
314
315 enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn,
316                                    struct ecore_ptt *p_ptt, u32 cmd, u32 param,
317                                    u32 *o_mcp_resp, u32 *o_mcp_param)
318 {
319 #ifndef ASIC_ONLY
320         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
321                 if (cmd == DRV_MSG_CODE_UNLOAD_REQ) {
322                         loaded--;
323                         loaded_port[p_hwfn->port_id]--;
324                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n",
325                                    loaded);
326                 }
327                 return ECORE_SUCCESS;
328         }
329 #endif
330
331         return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, OSAL_NULL,
332                                        o_mcp_resp, o_mcp_param);
333 }
334
335 enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn,
336                                              struct ecore_ptt *p_ptt,
337                                              u32 cmd, u32 param,
338                                              union drv_union_data *p_union_data,
339                                              u32 *o_mcp_resp,
340                                              u32 *o_mcp_param)
341 {
342         u32 union_data_addr;
343         enum _ecore_status_t rc;
344
345         /* MCP not initialized */
346         if (!ecore_mcp_is_init(p_hwfn)) {
347                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
348                 return ECORE_BUSY;
349         }
350
351         /* Acquiring a spinlock is needed to ensure that only a single thread
352          * is accessing the mailbox at a certain time.
353          */
354         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
355
356         if (p_union_data != OSAL_NULL) {
357                 union_data_addr = p_hwfn->mcp_info->drv_mb_addr +
358                     OFFSETOF(struct public_drv_mb, union_data);
359                 ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, p_union_data,
360                                 sizeof(*p_union_data));
361         }
362
363         rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
364                               o_mcp_param);
365
366         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
367
368         return rc;
369 }
370
371 enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn,
372                                           struct ecore_ptt *p_ptt,
373                                           u32 cmd,
374                                           u32 param,
375                                           u32 *o_mcp_resp,
376                                           u32 *o_mcp_param,
377                                           u32 i_txn_size, u32 *i_buf)
378 {
379         union drv_union_data union_data;
380
381         OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size);
382
383         return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, &union_data,
384                                        o_mcp_resp, o_mcp_param);
385 }
386
387 enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn,
388                                           struct ecore_ptt *p_ptt,
389                                           u32 cmd,
390                                           u32 param,
391                                           u32 *o_mcp_resp,
392                                           u32 *o_mcp_param,
393                                           u32 *o_txn_size, u32 *o_buf)
394 {
395         enum _ecore_status_t rc;
396         u32 i;
397
398         /* MCP not initialized */
399         if (!ecore_mcp_is_init(p_hwfn)) {
400                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
401                 return ECORE_BUSY;
402         }
403
404         OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock);
405         rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp,
406                               o_mcp_param);
407         if (rc != ECORE_SUCCESS)
408                 goto out;
409
410         /* Get payload after operation completes successfully */
411         *o_txn_size = *o_mcp_param;
412         for (i = 0; i < *o_txn_size; i += 4)
413                 o_buf[i / sizeof(u32)] = DRV_MB_RD(p_hwfn, p_ptt,
414                                                    union_data.raw_data[i]);
415
416 out:
417         OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock);
418         return rc;
419 }
420
421 #ifndef ASIC_ONLY
422 static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn,
423                                     u32 *p_load_code)
424 {
425         static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
426
427         if (!loaded)
428                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
429         else if (!loaded_port[p_hwfn->port_id])
430                 load_phase = FW_MSG_CODE_DRV_LOAD_PORT;
431         else
432                 load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION;
433
434         /* On CMT, always tell that it's engine */
435         if (p_hwfn->p_dev->num_hwfns > 1)
436                 load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE;
437
438         *p_load_code = load_phase;
439         loaded++;
440         loaded_port[p_hwfn->port_id]++;
441
442         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
443                    "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n",
444                    *p_load_code, loaded, p_hwfn->port_id,
445                    loaded_port[p_hwfn->port_id]);
446 }
447 #endif
448
449 enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn,
450                                         struct ecore_ptt *p_ptt,
451                                         u32 *p_load_code)
452 {
453         struct ecore_dev *p_dev = p_hwfn->p_dev;
454         union drv_union_data union_data;
455         u32 param;
456         enum _ecore_status_t rc;
457
458 #ifndef ASIC_ONLY
459         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
460                 ecore_mcp_mf_workaround(p_hwfn, p_load_code);
461                 return ECORE_SUCCESS;
462         }
463 #endif
464
465         OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE);
466
467         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ,
468                                      (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT |
469                                       p_dev->drv_type),
470                                      &union_data, p_load_code, &param);
471
472         /* if mcp fails to respond we must abort */
473         if (rc != ECORE_SUCCESS) {
474                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
475                 return rc;
476         }
477
478         /* If MFW refused (e.g. other port is in diagnostic mode) we
479          * must abort. This can happen in the following cases:
480          * - Other port is in diagnostic mode
481          * - Previously loaded function on the engine is not compliant with
482          *   the requester.
483          * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
484          *      -
485          */
486         if (!(*p_load_code) ||
487             ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) ||
488             ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) ||
489             ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) {
490                 DP_ERR(p_hwfn, "MCP refused load request, aborting\n");
491                 return ECORE_BUSY;
492         }
493
494         return ECORE_SUCCESS;
495 }
496
497 static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn,
498                                     struct ecore_ptt *p_ptt)
499 {
500         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
501                                         PUBLIC_PATH);
502         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
503         u32 path_addr = SECTION_ADDR(mfw_path_offsize,
504                                      ECORE_PATH_ID(p_hwfn));
505         u32 disabled_vfs[VF_MAX_STATIC / 32];
506         int i;
507
508         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
509                    "Reading Disabled VF information from [offset %08x],"
510                    " path_addr %08x\n",
511                    mfw_path_offsize, path_addr);
512
513         for (i = 0; i < (VF_MAX_STATIC / 32); i++) {
514                 disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt,
515                                            path_addr +
516                                            OFFSETOF(struct public_path,
517                                                     mcp_vf_disabled) +
518                                            sizeof(u32) * i);
519                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
520                            "FLR-ed VFs [%08x,...,%08x] - %08x\n",
521                            i * 32, (i + 1) * 32 - 1, disabled_vfs[i]);
522         }
523
524         if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs))
525                 OSAL_VF_FLR_UPDATE(p_hwfn);
526 }
527
528 enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn,
529                                           struct ecore_ptt *p_ptt,
530                                           u32 *vfs_to_ack)
531 {
532         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
533                                         PUBLIC_FUNC);
534         u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr);
535         u32 func_addr = SECTION_ADDR(mfw_func_offsize,
536                                      MCP_PF_ID(p_hwfn));
537         union drv_union_data union_data;
538         u32 resp, param;
539         enum _ecore_status_t rc;
540         int i;
541
542         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
543                 DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
544                            "Acking VFs [%08x,...,%08x] - %08x\n",
545                            i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]);
546
547         OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8);
548
549         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt,
550                                      DRV_MSG_CODE_VF_DISABLED_DONE, 0,
551                                      &union_data, &resp, &param);
552         if (rc != ECORE_SUCCESS) {
553                 DP_NOTICE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV),
554                           "Failed to pass ACK for VF flr to MFW\n");
555                 return ECORE_TIMEOUT;
556         }
557
558         /* TMP - clear the ACK bits; should be done by MFW */
559         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
560                 ecore_wr(p_hwfn, p_ptt,
561                          func_addr +
562                          OFFSETOF(struct public_func, drv_ack_vf_disabled) +
563                          i * sizeof(u32), 0);
564
565         return rc;
566 }
567
568 static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn,
569                                                 struct ecore_ptt *p_ptt)
570 {
571         u32 transceiver_state;
572
573         transceiver_state = ecore_rd(p_hwfn, p_ptt,
574                                      p_hwfn->mcp_info->port_addr +
575                                      OFFSETOF(struct public_port,
576                                               transceiver_data));
577
578         DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP),
579                    "Received transceiver state update [0x%08x] from mfw"
580                    "[Addr 0x%x]\n",
581                    transceiver_state, (u32)(p_hwfn->mcp_info->port_addr +
582                                             OFFSETOF(struct public_port,
583                                                      transceiver_data)));
584
585         transceiver_state = GET_FIELD(transceiver_state, PMM_TRANSCEIVER_STATE);
586
587         if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT)
588                 DP_NOTICE(p_hwfn, false, "Transceiver is present.\n");
589         else
590                 DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n");
591 }
592
593 static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn,
594                                          struct ecore_ptt *p_ptt, bool b_reset)
595 {
596         struct ecore_mcp_link_state *p_link;
597         u32 status = 0;
598
599         p_link = &p_hwfn->mcp_info->link_output;
600         OSAL_MEMSET(p_link, 0, sizeof(*p_link));
601         if (!b_reset) {
602                 status = ecore_rd(p_hwfn, p_ptt,
603                                   p_hwfn->mcp_info->port_addr +
604                                   OFFSETOF(struct public_port, link_status));
605                 DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP),
606                            "Received link update [0x%08x] from mfw"
607                            " [Addr 0x%x]\n",
608                            status, (u32)(p_hwfn->mcp_info->port_addr +
609                                           OFFSETOF(struct public_port,
610                                                    link_status)));
611         } else {
612                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
613                            "Resetting link indications\n");
614                 return;
615         }
616
617         if (p_hwfn->b_drv_link_init)
618                 p_link->link_up = !!(status & LINK_STATUS_LINK_UP);
619         else
620                 p_link->link_up = false;
621
622         p_link->full_duplex = true;
623         switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) {
624         case LINK_STATUS_SPEED_AND_DUPLEX_100G:
625                 p_link->speed = 100000;
626                 break;
627         case LINK_STATUS_SPEED_AND_DUPLEX_50G:
628                 p_link->speed = 50000;
629                 break;
630         case LINK_STATUS_SPEED_AND_DUPLEX_40G:
631                 p_link->speed = 40000;
632                 break;
633         case LINK_STATUS_SPEED_AND_DUPLEX_25G:
634                 p_link->speed = 25000;
635                 break;
636         case LINK_STATUS_SPEED_AND_DUPLEX_20G:
637                 p_link->speed = 20000;
638                 break;
639         case LINK_STATUS_SPEED_AND_DUPLEX_10G:
640                 p_link->speed = 10000;
641                 break;
642         case LINK_STATUS_SPEED_AND_DUPLEX_1000THD:
643                 p_link->full_duplex = false;
644                 /* Fall-through */
645         case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD:
646                 p_link->speed = 1000;
647                 break;
648         default:
649                 p_link->speed = 0;
650         }
651
652         /* We never store total line speed as p_link->speed is
653          * again changes according to bandwidth allocation.
654          */
655         if (p_link->link_up && p_link->speed)
656                 p_link->line_speed = p_link->speed;
657         else
658                 p_link->line_speed = 0;
659
660         /* Correct speed according to bandwidth allocation */
661         if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) {
662                 u8 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max;
663
664                 __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
665                                                    p_link, max_bw);
666         }
667
668         if (p_hwfn->mcp_info->func_info.bandwidth_min && p_link->speed) {
669                 u8 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min;
670
671                 __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
672                                                    p_link, min_bw);
673
674                 ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev,
675                                                       p_link->min_pf_rate);
676         }
677
678         p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED);
679         p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE);
680         p_link->parallel_detection = !!(status &
681                                          LINK_STATUS_PARALLEL_DETECTION_USED);
682         p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED);
683
684         p_link->partner_adv_speed |=
685             (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ?
686             ECORE_LINK_PARTNER_SPEED_1G_FD : 0;
687         p_link->partner_adv_speed |=
688             (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ?
689             ECORE_LINK_PARTNER_SPEED_1G_HD : 0;
690         p_link->partner_adv_speed |=
691             (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ?
692             ECORE_LINK_PARTNER_SPEED_10G : 0;
693         p_link->partner_adv_speed |=
694             (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ?
695             ECORE_LINK_PARTNER_SPEED_20G : 0;
696         p_link->partner_adv_speed |=
697             (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ?
698             ECORE_LINK_PARTNER_SPEED_25G : 0;
699         p_link->partner_adv_speed |=
700             (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ?
701             ECORE_LINK_PARTNER_SPEED_40G : 0;
702         p_link->partner_adv_speed |=
703             (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ?
704             ECORE_LINK_PARTNER_SPEED_50G : 0;
705         p_link->partner_adv_speed |=
706             (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ?
707             ECORE_LINK_PARTNER_SPEED_100G : 0;
708
709         p_link->partner_tx_flow_ctrl_en =
710             !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED);
711         p_link->partner_rx_flow_ctrl_en =
712             !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED);
713
714         switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) {
715         case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE:
716                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE;
717                 break;
718         case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE:
719                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE;
720                 break;
721         case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE:
722                 p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE;
723                 break;
724         default:
725                 p_link->partner_adv_pause = 0;
726         }
727
728         p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT);
729
730         if (p_link->link_up)
731                 ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, p_link->pfc_enabled);
732
733         OSAL_LINK_UPDATE(p_hwfn);
734 }
735
736 enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn,
737                                         struct ecore_ptt *p_ptt, bool b_up)
738 {
739         struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input;
740         union drv_union_data union_data;
741         struct pmm_phy_cfg *p_phy_cfg;
742         u32 param = 0, reply = 0, cmd;
743         enum _ecore_status_t rc = ECORE_SUCCESS;
744
745 #ifndef ASIC_ONLY
746         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
747                 return ECORE_SUCCESS;
748 #endif
749
750         /* Set the shmem configuration according to params */
751         p_phy_cfg = &union_data.drv_phy_cfg;
752         OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg));
753         cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET;
754         if (!params->speed.autoneg)
755                 p_phy_cfg->speed = params->speed.forced_speed;
756         p_phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0;
757         p_phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0;
758         p_phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0;
759         p_phy_cfg->adv_speed = params->speed.advertised_speeds;
760         p_phy_cfg->loopback_mode = params->loopback_mode;
761
762 #ifndef ASIC_ONLY
763         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
764                 DP_INFO(p_hwfn,
765                         "Link on FPGA - Ask for loopback mode '5' at 10G\n");
766                 p_phy_cfg->loopback_mode = 5;
767                 p_phy_cfg->speed = 10000;
768         }
769 #endif
770
771         p_hwfn->b_drv_link_init = b_up;
772
773         if (b_up)
774                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
775                            "Configuring Link: Speed 0x%08x, Pause 0x%08x,"
776                            " adv_speed 0x%08x, loopback 0x%08x,"
777                            " features 0x%08x\n",
778                            p_phy_cfg->speed, p_phy_cfg->pause,
779                            p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode,
780                            p_phy_cfg->feature_config_flags);
781         else
782                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n");
783
784         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, 0, &union_data, &reply,
785                                      &param);
786
787         /* if mcp fails to respond we must abort */
788         if (rc != ECORE_SUCCESS) {
789                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
790                 return rc;
791         }
792
793         /* Reset the link status if needed */
794         if (!b_up)
795                 ecore_mcp_handle_link_change(p_hwfn, p_ptt, true);
796
797         return rc;
798 }
799
800 u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn,
801                                    struct ecore_ptt *p_ptt)
802 {
803         u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt;
804
805         /* TODO - Add support for VFs */
806         if (IS_VF(p_hwfn->p_dev))
807                 return ECORE_INVAL;
808
809         path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
810                                                  PUBLIC_PATH);
811         path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr);
812         path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn));
813
814         proc_kill_cnt = ecore_rd(p_hwfn, p_ptt,
815                                  path_addr +
816                                  OFFSETOF(struct public_path, process_kill)) &
817             PROCESS_KILL_COUNTER_MASK;
818
819         return proc_kill_cnt;
820 }
821
822 static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn,
823                                           struct ecore_ptt *p_ptt)
824 {
825         struct ecore_dev *p_dev = p_hwfn->p_dev;
826         u32 proc_kill_cnt;
827
828         /* Prevent possible attentions/interrupts during the recovery handling
829          * and till its load phase, during which they will be re-enabled.
830          */
831         ecore_int_igu_disable_int(p_hwfn, p_ptt);
832
833         DP_NOTICE(p_hwfn, false, "Received a process kill indication\n");
834
835         /* The following operations should be done once, and thus in CMT mode
836          * are carried out by only the first HW function.
837          */
838         if (p_hwfn != ECORE_LEADING_HWFN(p_dev))
839                 return;
840
841         if (p_dev->recov_in_prog) {
842                 DP_NOTICE(p_hwfn, false,
843                           "Ignoring the indication since a recovery"
844                           " process is already in progress\n");
845                 return;
846         }
847
848         p_dev->recov_in_prog = true;
849
850         proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt);
851         DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt);
852
853         OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn);
854 }
855
856 static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn,
857                                           struct ecore_ptt *p_ptt,
858                                           enum MFW_DRV_MSG_TYPE type)
859 {
860         enum ecore_mcp_protocol_type stats_type;
861         union ecore_mcp_protocol_stats stats;
862         u32 hsi_param, param = 0, reply = 0;
863         union drv_union_data union_data;
864
865         switch (type) {
866         case MFW_DRV_MSG_GET_LAN_STATS:
867                 stats_type = ECORE_MCP_LAN_STATS;
868                 hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN;
869                 break;
870         default:
871                 DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type);
872                 return;
873         }
874
875         OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats);
876
877         OSAL_MEMCPY(&union_data, &stats, sizeof(stats));
878
879         ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_GET_STATS,
880                                 hsi_param, &union_data, &reply, &param);
881 }
882
883 static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn,
884                                     struct ecore_ptt *p_ptt,
885                                     struct public_func *p_data, int pfid)
886 {
887         u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
888                                         PUBLIC_FUNC);
889         u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr);
890         u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid);
891         u32 i, size;
892
893         OSAL_MEM_ZERO(p_data, sizeof(*p_data));
894
895         size = OSAL_MIN_T(u32, sizeof(*p_data), SECTION_SIZE(mfw_path_offsize));
896         for (i = 0; i < size / sizeof(u32); i++)
897                 ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt,
898                                               func_addr + (i << 2));
899
900         return size;
901 }
902
903 static void
904 ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn,
905                         struct public_func *p_shmem_info)
906 {
907         struct ecore_mcp_function_info *p_info;
908
909         p_info = &p_hwfn->mcp_info->func_info;
910
911         /* TODO - bandwidth min/max should have valid values of 1-100,
912          * as well as some indication that the feature is disabled.
913          * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS
914          * limit and correct value to min `1' and max `100' if limit isn't in
915          * range.
916          */
917         p_info->bandwidth_min = (p_shmem_info->config &
918                                  FUNC_MF_CFG_MIN_BW_MASK) >>
919             FUNC_MF_CFG_MIN_BW_SHIFT;
920         if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) {
921                 DP_INFO(p_hwfn,
922                         "bandwidth minimum out of bounds [%02x]. Set to 1\n",
923                         p_info->bandwidth_min);
924                 p_info->bandwidth_min = 1;
925         }
926
927         p_info->bandwidth_max = (p_shmem_info->config &
928                                  FUNC_MF_CFG_MAX_BW_MASK) >>
929             FUNC_MF_CFG_MAX_BW_SHIFT;
930         if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) {
931                 DP_INFO(p_hwfn,
932                         "bandwidth maximum out of bounds [%02x]. Set to 100\n",
933                         p_info->bandwidth_max);
934                 p_info->bandwidth_max = 100;
935         }
936 }
937
938 static void
939 ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
940 {
941         struct ecore_mcp_function_info *p_info;
942         struct public_func shmem_info;
943         u32 resp = 0, param = 0;
944
945         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
946
947         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
948
949         p_info = &p_hwfn->mcp_info->func_info;
950
951         ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min);
952
953         ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max);
954
955         /* Acknowledge the MFW */
956         ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp,
957                       &param);
958 }
959
960 static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn,
961                                          struct ecore_ptt *p_ptt)
962 {
963         /* A single notification should be sent to upper driver in CMT mode */
964         if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev))
965                 return;
966
967         DP_NOTICE(p_hwfn, false,
968                   "Fan failure was detected on the network interface card"
969                   " and it's going to be shut down.\n");
970
971         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL);
972 }
973
974 enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn,
975                                              struct ecore_ptt *p_ptt)
976 {
977         struct ecore_mcp_info *info = p_hwfn->mcp_info;
978         enum _ecore_status_t rc = ECORE_SUCCESS;
979         bool found = false;
980         u16 i;
981
982         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n");
983
984         /* Read Messages from MFW */
985         ecore_mcp_read_mb(p_hwfn, p_ptt);
986
987         /* Compare current messages to old ones */
988         for (i = 0; i < info->mfw_mb_length; i++) {
989                 if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i])
990                         continue;
991
992                 found = true;
993
994                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
995                            "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
996                            i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]);
997
998                 switch (i) {
999                 case MFW_DRV_MSG_LINK_CHANGE:
1000                         ecore_mcp_handle_link_change(p_hwfn, p_ptt, false);
1001                         break;
1002                 case MFW_DRV_MSG_VF_DISABLED:
1003                         ecore_mcp_handle_vf_flr(p_hwfn, p_ptt);
1004                         break;
1005                 case MFW_DRV_MSG_LLDP_DATA_UPDATED:
1006                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1007                                                     ECORE_DCBX_REMOTE_LLDP_MIB);
1008                         break;
1009                 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED:
1010                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1011                                                     ECORE_DCBX_REMOTE_MIB);
1012                         break;
1013                 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED:
1014                         ecore_dcbx_mib_update_event(p_hwfn, p_ptt,
1015                                                     ECORE_DCBX_OPERATIONAL_MIB);
1016                         break;
1017                 case MFW_DRV_MSG_ERROR_RECOVERY:
1018                         ecore_mcp_handle_process_kill(p_hwfn, p_ptt);
1019                         break;
1020                 case MFW_DRV_MSG_GET_LAN_STATS:
1021                 case MFW_DRV_MSG_GET_FCOE_STATS:
1022                 case MFW_DRV_MSG_GET_ISCSI_STATS:
1023                 case MFW_DRV_MSG_GET_RDMA_STATS:
1024                         ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i);
1025                         break;
1026                 case MFW_DRV_MSG_BW_UPDATE:
1027                         ecore_mcp_update_bw(p_hwfn, p_ptt);
1028                         break;
1029                 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE:
1030                         ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt);
1031                         break;
1032                 case MFW_DRV_MSG_FAILURE_DETECTED:
1033                         ecore_mcp_handle_fan_failure(p_hwfn, p_ptt);
1034                         break;
1035                 default:
1036                         /* @DPDK */
1037                         DP_NOTICE(p_hwfn, false,
1038                                   "Unimplemented MFW message %d\n", i);
1039                         rc = ECORE_INVAL;
1040                 }
1041         }
1042
1043         /* ACK everything */
1044         for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) {
1045                 OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]);
1046
1047                 /* MFW expect answer in BE, so we force write in that format */
1048                 ecore_wr(p_hwfn, p_ptt,
1049                          info->mfw_mb_addr + sizeof(u32) +
1050                          MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) *
1051                          sizeof(u32) + i * sizeof(u32), val);
1052         }
1053
1054         if (!found) {
1055                 DP_NOTICE(p_hwfn, false,
1056                           "Received an MFW message indication but no"
1057                           " new message!\n");
1058                 rc = ECORE_INVAL;
1059         }
1060
1061         /* Copy the new mfw messages into the shadow */
1062         OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length);
1063
1064         return rc;
1065 }
1066
1067 enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev,
1068                                            struct ecore_ptt *p_ptt,
1069                                            u32 *p_mfw_ver,
1070                                            u32 *p_running_bundle_id)
1071 {
1072         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1073         u32 global_offsize;
1074
1075 #ifndef ASIC_ONLY
1076         if (CHIP_REV_IS_EMUL(p_dev)) {
1077                 DP_NOTICE(p_dev, false, "Emulation - can't get MFW version\n");
1078                 return ECORE_SUCCESS;
1079         }
1080 #endif
1081
1082         if (IS_VF(p_dev)) {
1083                 if (p_hwfn->vf_iov_info) {
1084                         struct pfvf_acquire_resp_tlv *p_resp;
1085
1086                         p_resp = &p_hwfn->vf_iov_info->acquire_resp;
1087                         *p_mfw_ver = p_resp->pfdev_info.mfw_ver;
1088                         return ECORE_SUCCESS;
1089                 }
1090
1091                 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
1092                            "VF requested MFW vers prior to ACQUIRE\n");
1093                 return ECORE_INVAL;
1094         }
1095
1096         global_offsize = ecore_rd(p_hwfn, p_ptt,
1097                                   SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->
1098                                                        public_base,
1099                                                        PUBLIC_GLOBAL));
1100         *p_mfw_ver =
1101             ecore_rd(p_hwfn, p_ptt,
1102                      SECTION_ADDR(global_offsize,
1103                                   0) + OFFSETOF(struct public_global, mfw_ver));
1104
1105         if (p_running_bundle_id != OSAL_NULL) {
1106                 *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt,
1107                                                 SECTION_ADDR(global_offsize,
1108                                                              0) +
1109                                                 OFFSETOF(struct public_global,
1110                                                          running_bundle_id));
1111         }
1112
1113         return ECORE_SUCCESS;
1114 }
1115
1116 enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev,
1117                                               u32 *p_media_type)
1118 {
1119         struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0];
1120         struct ecore_ptt *p_ptt;
1121
1122         /* TODO - Add support for VFs */
1123         if (IS_VF(p_dev))
1124                 return ECORE_INVAL;
1125
1126         if (!ecore_mcp_is_init(p_hwfn)) {
1127                 DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n");
1128                 return ECORE_BUSY;
1129         }
1130
1131         *p_media_type = MEDIA_UNSPECIFIED;
1132
1133         p_ptt = ecore_ptt_acquire(p_hwfn);
1134         if (!p_ptt)
1135                 return ECORE_BUSY;
1136
1137         *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr +
1138                                  OFFSETOF(struct public_port, media_type));
1139
1140         ecore_ptt_release(p_hwfn, p_ptt);
1141
1142         return ECORE_SUCCESS;
1143 }
1144
1145 static enum _ecore_status_t
1146 ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn,
1147                           struct public_func *p_info,
1148                           enum ecore_pci_personality *p_proto)
1149 {
1150         enum _ecore_status_t rc = ECORE_SUCCESS;
1151
1152         switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) {
1153         case FUNC_MF_CFG_PROTOCOL_ETHERNET:
1154                 *p_proto = ECORE_PCI_ETH;
1155                 break;
1156         default:
1157                 rc = ECORE_INVAL;
1158         }
1159
1160         return rc;
1161 }
1162
1163 enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn,
1164                                                     struct ecore_ptt *p_ptt)
1165 {
1166         struct ecore_mcp_function_info *info;
1167         struct public_func shmem_info;
1168
1169         ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn));
1170         info = &p_hwfn->mcp_info->func_info;
1171
1172         info->pause_on_host = (shmem_info.config &
1173                                FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0;
1174
1175         if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) {
1176                 DP_ERR(p_hwfn, "Unknown personality %08x\n",
1177                        (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK));
1178                 return ECORE_INVAL;
1179         }
1180
1181         ecore_read_pf_bandwidth(p_hwfn, &shmem_info);
1182
1183         if (shmem_info.mac_upper || shmem_info.mac_lower) {
1184                 info->mac[0] = (u8)(shmem_info.mac_upper >> 8);
1185                 info->mac[1] = (u8)(shmem_info.mac_upper);
1186                 info->mac[2] = (u8)(shmem_info.mac_lower >> 24);
1187                 info->mac[3] = (u8)(shmem_info.mac_lower >> 16);
1188                 info->mac[4] = (u8)(shmem_info.mac_lower >> 8);
1189                 info->mac[5] = (u8)(shmem_info.mac_lower);
1190         } else {
1191                 /* TODO - are there protocols for which there's no MAC? */
1192                 DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n");
1193         }
1194
1195         info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK);
1196
1197         DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP),
1198                    "Read configuration from shmem: pause_on_host %02x"
1199                     " protocol %02x BW [%02x - %02x]"
1200                     " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %" PRIx64
1201                     " node %" PRIx64 " ovlan %04x\n",
1202                    info->pause_on_host, info->protocol,
1203                    info->bandwidth_min, info->bandwidth_max,
1204                    info->mac[0], info->mac[1], info->mac[2],
1205                    info->mac[3], info->mac[4], info->mac[5],
1206                    info->wwn_port, info->wwn_node, info->ovlan);
1207
1208         return ECORE_SUCCESS;
1209 }
1210
1211 struct ecore_mcp_link_params
1212 *ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn)
1213 {
1214         if (!p_hwfn || !p_hwfn->mcp_info)
1215                 return OSAL_NULL;
1216         return &p_hwfn->mcp_info->link_input;
1217 }
1218
1219 struct ecore_mcp_link_state
1220 *ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn)
1221 {
1222         if (!p_hwfn || !p_hwfn->mcp_info)
1223                 return OSAL_NULL;
1224
1225 #ifndef ASIC_ONLY
1226         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1227                 DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n");
1228                 p_hwfn->mcp_info->link_output.link_up = true;
1229         }
1230 #endif
1231
1232         return &p_hwfn->mcp_info->link_output;
1233 }
1234
1235 struct ecore_mcp_link_capabilities
1236 *ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn)
1237 {
1238         if (!p_hwfn || !p_hwfn->mcp_info)
1239                 return OSAL_NULL;
1240         return &p_hwfn->mcp_info->link_capabilities;
1241 }
1242
1243 enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn,
1244                                      struct ecore_ptt *p_ptt)
1245 {
1246         enum _ecore_status_t rc;
1247         u32 resp = 0, param = 0;
1248
1249         rc = ecore_mcp_cmd(p_hwfn, p_ptt,
1250                            DRV_MSG_CODE_NIG_DRAIN, 100, &resp, &param);
1251
1252         /* Wait for the drain to complete before returning */
1253         OSAL_MSLEEP(120);
1254
1255         return rc;
1256 }
1257
1258 const struct ecore_mcp_function_info
1259 *ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn)
1260 {
1261         if (!p_hwfn || !p_hwfn->mcp_info)
1262                 return OSAL_NULL;
1263         return &p_hwfn->mcp_info->func_info;
1264 }
1265
1266 enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn,
1267                                            struct ecore_ptt *p_ptt,
1268                                            struct ecore_mcp_nvm_params *params)
1269 {
1270         enum _ecore_status_t rc;
1271
1272         switch (params->type) {
1273         case ECORE_MCP_NVM_RD:
1274                 rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1275                                           params->nvm_common.offset,
1276                                           &params->nvm_common.resp,
1277                                           &params->nvm_common.param,
1278                                           params->nvm_rd.buf_size,
1279                                           params->nvm_rd.buf);
1280                 break;
1281         case ECORE_MCP_CMD:
1282                 rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1283                                    params->nvm_common.offset,
1284                                    &params->nvm_common.resp,
1285                                    &params->nvm_common.param);
1286                 break;
1287         case ECORE_MCP_NVM_WR:
1288                 rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd,
1289                                           params->nvm_common.offset,
1290                                           &params->nvm_common.resp,
1291                                           &params->nvm_common.param,
1292                                           params->nvm_wr.buf_size,
1293                                           params->nvm_wr.buf);
1294                 break;
1295         default:
1296                 rc = ECORE_NOTIMPL;
1297                 break;
1298         }
1299         return rc;
1300 }
1301
1302 int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn,
1303                                   struct ecore_ptt *p_ptt, u32 personalities)
1304 {
1305         enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT;
1306         struct public_func shmem_info;
1307         int i, count = 0, num_pfs;
1308
1309         num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
1310
1311         for (i = 0; i < num_pfs; i++) {
1312                 ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info,
1313                                          MCP_PF_ID_BY_REL(p_hwfn, i));
1314                 if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE)
1315                         continue;
1316
1317                 if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info,
1318                                               &protocol) != ECORE_SUCCESS)
1319                         continue;
1320
1321                 if ((1 << ((u32)protocol)) & personalities)
1322                         count++;
1323         }
1324
1325         return count;
1326 }
1327
1328 enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn,
1329                                               struct ecore_ptt *p_ptt,
1330                                               u32 *p_flash_size)
1331 {
1332         u32 flash_size;
1333
1334 #ifndef ASIC_ONLY
1335         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1336                 DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n");
1337                 return ECORE_INVAL;
1338         }
1339 #endif
1340
1341         if (IS_VF(p_hwfn->p_dev))
1342                 return ECORE_INVAL;
1343
1344         flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4);
1345         flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >>
1346             MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT;
1347         flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT));
1348
1349         *p_flash_size = flash_size;
1350
1351         return ECORE_SUCCESS;
1352 }
1353
1354 enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn,
1355                                                   struct ecore_ptt *p_ptt)
1356 {
1357         struct ecore_dev *p_dev = p_hwfn->p_dev;
1358
1359         if (p_dev->recov_in_prog) {
1360                 DP_NOTICE(p_hwfn, false,
1361                           "Avoid triggering a recovery since such a process"
1362                           " is already in progress\n");
1363                 return ECORE_AGAIN;
1364         }
1365
1366         DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n");
1367         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1);
1368
1369         return ECORE_SUCCESS;
1370 }
1371
1372 enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn,
1373                                               struct ecore_ptt *p_ptt,
1374                                               u8 vf_id, u8 num)
1375 {
1376         u32 resp = 0, param = 0, rc_param = 0;
1377         enum _ecore_status_t rc;
1378
1379         param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) &
1380             DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK;
1381         param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) &
1382             DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK;
1383
1384         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param,
1385                            &resp, &rc_param);
1386
1387         if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) {
1388                 DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n",
1389                           vf_id);
1390                 rc = ECORE_INVAL;
1391         }
1392
1393         return rc;
1394 }
1395
1396 enum _ecore_status_t
1397 ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1398                            struct ecore_mcp_drv_version *p_ver)
1399 {
1400         u32 param = 0, reply = 0, num_words, i;
1401         struct drv_version_stc *p_drv_version;
1402         union drv_union_data union_data;
1403         void *p_name;
1404         OSAL_BE32 val;
1405         enum _ecore_status_t rc;
1406
1407 #ifndef ASIC_ONLY
1408         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
1409                 return ECORE_SUCCESS;
1410 #endif
1411
1412         p_drv_version = &union_data.drv_version;
1413         p_drv_version->version = p_ver->version;
1414         num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4;
1415         for (i = 0; i < num_words; i++) {
1416                 p_name = &p_ver->name[i * sizeof(u32)];
1417                 val = OSAL_CPU_TO_BE32(*(u32 *)p_name);
1418                 *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val;
1419         }
1420
1421         rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0,
1422                                      &union_data, &reply, &param);
1423         if (rc != ECORE_SUCCESS)
1424                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1425
1426         return rc;
1427 }
1428
1429 enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn,
1430                                     struct ecore_ptt *p_ptt)
1431 {
1432         enum _ecore_status_t rc;
1433         u32 resp = 0, param = 0;
1434
1435         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
1436                            &param);
1437         if (rc != ECORE_SUCCESS)
1438                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1439
1440         return rc;
1441 }
1442
1443 enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn,
1444                                       struct ecore_ptt *p_ptt)
1445 {
1446         u32 value, cpu_mode;
1447
1448         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
1449
1450         value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1451         value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
1452         ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
1453         cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
1454
1455         return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0;
1456 }
1457
1458 enum _ecore_status_t
1459 ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn,
1460                                    struct ecore_ptt *p_ptt,
1461                                    enum ecore_ov_config_method config,
1462                                    enum ecore_ov_client client)
1463 {
1464         enum _ecore_status_t rc;
1465         u32 resp = 0, param = 0;
1466         u32 drv_mb_param;
1467
1468         switch (config) {
1469         case ECORE_OV_CLIENT_DRV:
1470                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS;
1471                 break;
1472         case ECORE_OV_CLIENT_USER:
1473                 drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER;
1474                 break;
1475         default:
1476                 DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", config);
1477                 return ECORE_INVAL;
1478         }
1479
1480         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG,
1481                            drv_mb_param, &resp, &param);
1482         if (rc != ECORE_SUCCESS)
1483                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1484
1485         return rc;
1486 }
1487
1488 enum _ecore_status_t
1489 ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn,
1490                                  struct ecore_ptt *p_ptt,
1491                                  enum ecore_ov_driver_state drv_state)
1492 {
1493         enum _ecore_status_t rc;
1494         u32 resp = 0, param = 0;
1495         u32 drv_mb_param;
1496
1497         switch (drv_state) {
1498         case ECORE_OV_DRIVER_STATE_NOT_LOADED:
1499                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED;
1500                 break;
1501         case ECORE_OV_DRIVER_STATE_DISABLED:
1502                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED;
1503                 break;
1504         case ECORE_OV_DRIVER_STATE_ACTIVE:
1505                 drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE;
1506                 break;
1507         default:
1508                 DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state);
1509                 return ECORE_INVAL;
1510         }
1511
1512         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE,
1513                            drv_state, &resp, &param);
1514         if (rc != ECORE_SUCCESS)
1515                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1516
1517         return rc;
1518 }
1519
1520 enum _ecore_status_t
1521 ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1522                          struct ecore_fc_npiv_tbl *p_table)
1523 {
1524         return 0;
1525 }
1526
1527 enum _ecore_status_t
1528 ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn,
1529                         struct ecore_ptt *p_ptt, u16 mtu)
1530 {
1531         return 0;
1532 }
1533
1534 enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn,
1535                                        struct ecore_ptt *p_ptt,
1536                                        enum ecore_led_mode mode)
1537 {
1538         u32 resp = 0, param = 0, drv_mb_param;
1539         enum _ecore_status_t rc;
1540
1541         switch (mode) {
1542         case ECORE_LED_MODE_ON:
1543                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON;
1544                 break;
1545         case ECORE_LED_MODE_OFF:
1546                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF;
1547                 break;
1548         case ECORE_LED_MODE_RESTORE:
1549                 drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER;
1550                 break;
1551         default:
1552                 DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode);
1553                 return ECORE_INVAL;
1554         }
1555
1556         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE,
1557                            drv_mb_param, &resp, &param);
1558         if (rc != ECORE_SUCCESS)
1559                 DP_ERR(p_hwfn, "MCP response failure, aborting\n");
1560
1561         return rc;
1562 }
1563
1564 enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn,
1565                                              struct ecore_ptt *p_ptt,
1566                                              u32 mask_parities)
1567 {
1568         enum _ecore_status_t rc;
1569         u32 resp = 0, param = 0;
1570
1571         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES,
1572                            mask_parities, &resp, &param);
1573
1574         if (rc != ECORE_SUCCESS) {
1575                 DP_ERR(p_hwfn,
1576                        "MCP response failure for mask parities, aborting\n");
1577         } else if (resp != FW_MSG_CODE_OK) {
1578                 DP_ERR(p_hwfn,
1579                        "MCP did not ack mask parity request. Old MFW?\n");
1580                 rc = ECORE_INVAL;
1581         }
1582
1583         return rc;
1584 }
1585
1586 enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr,
1587                                         u8 *p_buf, u32 len)
1588 {
1589         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1590         u32 bytes_left, offset, bytes_to_copy, buf_size;
1591         struct ecore_mcp_nvm_params params;
1592         struct ecore_ptt *p_ptt;
1593         enum _ecore_status_t rc = ECORE_SUCCESS;
1594
1595         p_ptt = ecore_ptt_acquire(p_hwfn);
1596         if (!p_ptt)
1597                 return ECORE_BUSY;
1598
1599         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1600         bytes_left = len;
1601         offset = 0;
1602         params.type = ECORE_MCP_NVM_RD;
1603         params.nvm_rd.buf_size = &buf_size;
1604         params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM;
1605         while (bytes_left > 0) {
1606                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
1607                                            MCP_DRV_NVM_BUF_LEN);
1608                 params.nvm_common.offset = (addr + offset) |
1609                     (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT);
1610                 params.nvm_rd.buf = (u32 *)(p_buf + offset);
1611                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1612                 if (rc != ECORE_SUCCESS || (params.nvm_common.resp !=
1613                                             FW_MSG_CODE_NVM_OK)) {
1614                         DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1615                         break;
1616                 }
1617                 offset += *params.nvm_rd.buf_size;
1618                 bytes_left -= *params.nvm_rd.buf_size;
1619         }
1620
1621         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1622         ecore_ptt_release(p_hwfn, p_ptt);
1623
1624         return rc;
1625 }
1626
1627 enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd,
1628                                         u32 addr, u8 *p_buf, u32 len)
1629 {
1630         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1631         struct ecore_mcp_nvm_params params;
1632         struct ecore_ptt *p_ptt;
1633         enum _ecore_status_t rc;
1634
1635         p_ptt = ecore_ptt_acquire(p_hwfn);
1636         if (!p_ptt)
1637                 return ECORE_BUSY;
1638
1639         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1640         params.type = ECORE_MCP_NVM_RD;
1641         params.nvm_rd.buf_size = &len;
1642         params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ?
1643             DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ;
1644         params.nvm_common.offset = addr;
1645         params.nvm_rd.buf = (u32 *)p_buf;
1646         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1647         if (rc != ECORE_SUCCESS)
1648                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1649
1650         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1651         ecore_ptt_release(p_hwfn, p_ptt);
1652
1653         return rc;
1654 }
1655
1656 enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf)
1657 {
1658         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1659         struct ecore_mcp_nvm_params params;
1660         struct ecore_ptt *p_ptt;
1661
1662         p_ptt = ecore_ptt_acquire(p_hwfn);
1663         if (!p_ptt)
1664                 return ECORE_BUSY;
1665
1666         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1667         OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp));
1668         ecore_ptt_release(p_hwfn, p_ptt);
1669
1670         return ECORE_SUCCESS;
1671 }
1672
1673 enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr)
1674 {
1675         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1676         struct ecore_mcp_nvm_params params;
1677         struct ecore_ptt *p_ptt;
1678         enum _ecore_status_t rc;
1679
1680         p_ptt = ecore_ptt_acquire(p_hwfn);
1681         if (!p_ptt)
1682                 return ECORE_BUSY;
1683         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1684         params.type = ECORE_MCP_CMD;
1685         params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE;
1686         params.nvm_common.offset = addr;
1687         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1688         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1689         ecore_ptt_release(p_hwfn, p_ptt);
1690
1691         return rc;
1692 }
1693
1694 enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev,
1695                                                   u32 addr)
1696 {
1697         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1698         struct ecore_mcp_nvm_params params;
1699         struct ecore_ptt *p_ptt;
1700         enum _ecore_status_t rc;
1701
1702         p_ptt = ecore_ptt_acquire(p_hwfn);
1703         if (!p_ptt)
1704                 return ECORE_BUSY;
1705         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1706         params.type = ECORE_MCP_CMD;
1707         params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN;
1708         params.nvm_common.offset = addr;
1709         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1710         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1711         ecore_ptt_release(p_hwfn, p_ptt);
1712
1713         return rc;
1714 }
1715
1716 /* rc receives ECORE_INVAL as default parameter because
1717  * it might not enter the while loop if the len is 0
1718  */
1719 enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd,
1720                                          u32 addr, u8 *p_buf, u32 len)
1721 {
1722         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1723         enum _ecore_status_t rc = ECORE_INVAL;
1724         struct ecore_mcp_nvm_params params;
1725         struct ecore_ptt *p_ptt;
1726         u32 buf_idx, buf_size;
1727
1728         p_ptt = ecore_ptt_acquire(p_hwfn);
1729         if (!p_ptt)
1730                 return ECORE_BUSY;
1731
1732         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1733         params.type = ECORE_MCP_NVM_WR;
1734         if (cmd == ECORE_PUT_FILE_DATA)
1735                 params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA;
1736         else
1737                 params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM;
1738         buf_idx = 0;
1739         while (buf_idx < len) {
1740                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
1741                                       MCP_DRV_NVM_BUF_LEN);
1742                 params.nvm_common.offset = ((buf_size <<
1743                                              DRV_MB_PARAM_NVM_LEN_SHIFT)
1744                                             | addr) + buf_idx;
1745                 params.nvm_wr.buf_size = buf_size;
1746                 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
1747                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1748                 if (rc != ECORE_SUCCESS ||
1749                     ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) &&
1750                      (params.nvm_common.resp !=
1751                       FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK)))
1752                         DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1753
1754                 buf_idx += buf_size;
1755         }
1756
1757         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1758         ecore_ptt_release(p_hwfn, p_ptt);
1759
1760         return rc;
1761 }
1762
1763 enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd,
1764                                          u32 addr, u8 *p_buf, u32 len)
1765 {
1766         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1767         struct ecore_mcp_nvm_params params;
1768         struct ecore_ptt *p_ptt;
1769         enum _ecore_status_t rc;
1770
1771         p_ptt = ecore_ptt_acquire(p_hwfn);
1772         if (!p_ptt)
1773                 return ECORE_BUSY;
1774
1775         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1776         params.type = ECORE_MCP_NVM_WR;
1777         params.nvm_wr.buf_size = len;
1778         params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ?
1779             DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE;
1780         params.nvm_common.offset = addr;
1781         params.nvm_wr.buf = (u32 *)p_buf;
1782         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1783         if (rc != ECORE_SUCCESS)
1784                 DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc);
1785         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1786         ecore_ptt_release(p_hwfn, p_ptt);
1787
1788         return rc;
1789 }
1790
1791 enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev,
1792                                                    u32 addr)
1793 {
1794         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
1795         struct ecore_mcp_nvm_params params;
1796         struct ecore_ptt *p_ptt;
1797         enum _ecore_status_t rc;
1798
1799         p_ptt = ecore_ptt_acquire(p_hwfn);
1800         if (!p_ptt)
1801                 return ECORE_BUSY;
1802
1803         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1804         params.type = ECORE_MCP_CMD;
1805         params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE;
1806         params.nvm_common.offset = addr;
1807         rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1808         p_dev->mcp_nvm_resp = params.nvm_common.resp;
1809         ecore_ptt_release(p_hwfn, p_ptt);
1810
1811         return rc;
1812 }
1813
1814 enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn,
1815                                             struct ecore_ptt *p_ptt,
1816                                             u32 port, u32 addr, u32 offset,
1817                                             u32 len, u8 *p_buf)
1818 {
1819         struct ecore_mcp_nvm_params params;
1820         enum _ecore_status_t rc;
1821         u32 bytes_left, bytes_to_copy, buf_size;
1822
1823         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1824         SET_FIELD(params.nvm_common.offset,
1825                   DRV_MB_PARAM_TRANSCEIVER_PORT, port);
1826         SET_FIELD(params.nvm_common.offset,
1827                   DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
1828         addr = offset;
1829         offset = 0;
1830         bytes_left = len;
1831         params.type = ECORE_MCP_NVM_RD;
1832         params.nvm_rd.buf_size = &buf_size;
1833         params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ;
1834         while (bytes_left > 0) {
1835                 bytes_to_copy = OSAL_MIN_T(u32, bytes_left,
1836                                            MAX_I2C_TRANSACTION_SIZE);
1837                 params.nvm_rd.buf = (u32 *)(p_buf + offset);
1838                 SET_FIELD(params.nvm_common.offset,
1839                           DRV_MB_PARAM_TRANSCEIVER_OFFSET, addr + offset);
1840                 SET_FIELD(params.nvm_common.offset,
1841                           DRV_MB_PARAM_TRANSCEIVER_SIZE, bytes_to_copy);
1842                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1843                 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
1844                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
1845                         return ECORE_NODEV;
1846                 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
1847                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
1848                         return ECORE_UNKNOWN_ERROR;
1849
1850                 offset += *params.nvm_rd.buf_size;
1851                 bytes_left -= *params.nvm_rd.buf_size;
1852         }
1853
1854         return ECORE_SUCCESS;
1855 }
1856
1857 enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn,
1858                                              struct ecore_ptt *p_ptt,
1859                                              u32 port, u32 addr, u32 offset,
1860                                              u32 len, u8 *p_buf)
1861 {
1862         struct ecore_mcp_nvm_params params;
1863         enum _ecore_status_t rc;
1864         u32 buf_idx, buf_size;
1865
1866         OSAL_MEMSET(&params, 0, sizeof(struct ecore_mcp_nvm_params));
1867         SET_FIELD(params.nvm_common.offset,
1868                   DRV_MB_PARAM_TRANSCEIVER_PORT, port);
1869         SET_FIELD(params.nvm_common.offset,
1870                   DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr);
1871         params.type = ECORE_MCP_NVM_WR;
1872         params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE;
1873         buf_idx = 0;
1874         while (buf_idx < len) {
1875                 buf_size = OSAL_MIN_T(u32, (len - buf_idx),
1876                                       MAX_I2C_TRANSACTION_SIZE);
1877                 SET_FIELD(params.nvm_common.offset,
1878                           DRV_MB_PARAM_TRANSCEIVER_OFFSET, offset + buf_idx);
1879                 SET_FIELD(params.nvm_common.offset,
1880                           DRV_MB_PARAM_TRANSCEIVER_SIZE, buf_size);
1881                 params.nvm_wr.buf_size = buf_size;
1882                 params.nvm_wr.buf = (u32 *)&p_buf[buf_idx];
1883                 rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, &params);
1884                 if ((params.nvm_common.resp & FW_MSG_CODE_MASK) ==
1885                     FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) {
1886                         return ECORE_NODEV;
1887                 } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) !=
1888                            FW_MSG_CODE_TRANSCEIVER_DIAG_OK)
1889                         return ECORE_UNKNOWN_ERROR;
1890
1891                 buf_idx += buf_size;
1892         }
1893
1894         return ECORE_SUCCESS;
1895 }
1896
1897 enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn,
1898                                          struct ecore_ptt *p_ptt,
1899                                          u16 gpio, u32 *gpio_val)
1900 {
1901         enum _ecore_status_t rc = ECORE_SUCCESS;
1902         u32 drv_mb_param = 0, rsp;
1903
1904         SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
1905
1906         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ,
1907                            drv_mb_param, &rsp, gpio_val);
1908
1909         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
1910                 return ECORE_UNKNOWN_ERROR;
1911
1912         return ECORE_SUCCESS;
1913 }
1914
1915 enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn,
1916                                           struct ecore_ptt *p_ptt,
1917                                           u16 gpio, u16 gpio_val)
1918 {
1919         enum _ecore_status_t rc = ECORE_SUCCESS;
1920         u32 drv_mb_param = 0, param, rsp;
1921
1922         SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio);
1923         SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_VALUE, gpio_val);
1924
1925         rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE,
1926                            drv_mb_param, &rsp, &param);
1927
1928         if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK)
1929                 return ECORE_UNKNOWN_ERROR;
1930
1931         return ECORE_SUCCESS;
1932 }