Imported Upstream version 16.07-rc1
[deb_dpdk.git] / drivers / net / qede / base / ecore_dev.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "reg_addr.h"
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore.h"
13 #include "ecore_chain.h"
14 #include "ecore_status.h"
15 #include "ecore_hw.h"
16 #include "ecore_rt_defs.h"
17 #include "ecore_init_ops.h"
18 #include "ecore_int.h"
19 #include "ecore_cxt.h"
20 #include "ecore_spq.h"
21 #include "ecore_init_fw_funcs.h"
22 #include "ecore_sp_commands.h"
23 #include "ecore_dev_api.h"
24 #include "ecore_sriov.h"
25 #include "ecore_vf.h"
26 #include "ecore_mcp.h"
27 #include "ecore_hw_defs.h"
28 #include "mcp_public.h"
29 #include "ecore_iro.h"
30 #include "nvm_cfg.h"
31 #include "ecore_dev_api.h"
32 #include "ecore_attn_values.h"
33 #include "ecore_dcbx.h"
34
35 /* Configurable */
36 #define ECORE_MIN_DPIS          (4)     /* The minimal number of DPIs required
37                                          * load the driver. The number was
38                                          * arbitrarily set.
39                                          */
40
41 /* Derived */
42 #define ECORE_MIN_PWM_REGION    ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS))
43
44 enum BAR_ID {
45         BAR_ID_0,               /* used for GRC */
46         BAR_ID_1                /* Used for doorbells */
47 };
48
49 static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id)
50 {
51         u32 bar_reg = (bar_id == BAR_ID_0 ?
52                        PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE);
53         u32 val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg);
54
55         /* The above registers were updated in the past only in CMT mode. Since
56          * they were found to be useful MFW started updating them from 8.7.7.0.
57          * In older MFW versions they are set to 0 which means disabled.
58          */
59         if (!val) {
60                 if (p_hwfn->p_dev->num_hwfns > 1) {
61                         DP_NOTICE(p_hwfn, false,
62                                   "BAR size not configured. Assuming BAR"
63                                   " size of 256kB for GRC and 512kB for DB\n");
64                         return BAR_ID_0 ? 256 * 1024 : 512 * 1024;
65                 }
66
67                 DP_NOTICE(p_hwfn, false,
68                           "BAR size not configured. Assuming BAR"
69                           " size of 512kB for GRC and 512kB for DB\n");
70                 return 512 * 1024;
71         }
72
73         return 1 << (val + 15);
74 }
75
76 void ecore_init_dp(struct ecore_dev *p_dev,
77                    u32 dp_module, u8 dp_level, void *dp_ctx)
78 {
79         u32 i;
80
81         p_dev->dp_level = dp_level;
82         p_dev->dp_module = dp_module;
83         p_dev->dp_ctx = dp_ctx;
84         for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
85                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
86
87                 p_hwfn->dp_level = dp_level;
88                 p_hwfn->dp_module = dp_module;
89                 p_hwfn->dp_ctx = dp_ctx;
90         }
91 }
92
93 void ecore_init_struct(struct ecore_dev *p_dev)
94 {
95         u8 i;
96
97         for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) {
98                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
99
100                 p_hwfn->p_dev = p_dev;
101                 p_hwfn->my_id = i;
102                 p_hwfn->b_active = false;
103
104                 OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex);
105                 OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex);
106         }
107
108         /* hwfn 0 is always active */
109         p_dev->hwfns[0].b_active = true;
110
111         /* set the default cache alignment to 128 (may be overridden later) */
112         p_dev->cache_shift = 7;
113 }
114
115 static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn)
116 {
117         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
118
119         OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params);
120         qm_info->qm_pq_params = OSAL_NULL;
121         OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params);
122         qm_info->qm_vport_params = OSAL_NULL;
123         OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params);
124         qm_info->qm_port_params = OSAL_NULL;
125         OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data);
126         qm_info->wfq_data = OSAL_NULL;
127 }
128
129 void ecore_resc_free(struct ecore_dev *p_dev)
130 {
131         int i;
132
133         if (IS_VF(p_dev))
134                 return;
135
136         OSAL_FREE(p_dev, p_dev->fw_data);
137         p_dev->fw_data = OSAL_NULL;
138
139         OSAL_FREE(p_dev, p_dev->reset_stats);
140
141         for_each_hwfn(p_dev, i) {
142                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
143
144                 OSAL_FREE(p_dev, p_hwfn->p_tx_cids);
145                 p_hwfn->p_tx_cids = OSAL_NULL;
146                 OSAL_FREE(p_dev, p_hwfn->p_rx_cids);
147                 p_hwfn->p_rx_cids = OSAL_NULL;
148         }
149
150         for_each_hwfn(p_dev, i) {
151                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
152
153                 ecore_cxt_mngr_free(p_hwfn);
154                 ecore_qm_info_free(p_hwfn);
155                 ecore_spq_free(p_hwfn);
156                 ecore_eq_free(p_hwfn, p_hwfn->p_eq);
157                 ecore_consq_free(p_hwfn, p_hwfn->p_consq);
158                 ecore_int_free(p_hwfn);
159                 ecore_iov_free(p_hwfn);
160                 ecore_dmae_info_free(p_hwfn);
161                 ecore_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info);
162                 /* @@@TBD Flush work-queue ? */
163         }
164 }
165
166 static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn,
167                                                bool b_sleepable)
168 {
169         u8 num_vports, vf_offset = 0, i, vport_id, num_ports;
170         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
171         struct init_qm_port_params *p_qm_port;
172         u16 num_pqs, multi_cos_tcs = 1;
173 #ifdef CONFIG_ECORE_SRIOV
174         u16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs;
175 #else
176         u16 num_vfs = 0;
177 #endif
178
179         OSAL_MEM_ZERO(qm_info, sizeof(*qm_info));
180
181 #ifndef ASIC_ONLY
182         /* @TMP - Don't allocate QM queues for VFs on emulation */
183         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
184                 DP_NOTICE(p_hwfn, false,
185                           "Emulation - skip configuring QM queues for VFs\n");
186                 num_vfs = 0;
187         }
188 #endif
189
190         num_pqs = multi_cos_tcs + num_vfs + 1;  /* The '1' is for pure-LB */
191         num_vports = (u8)RESC_NUM(p_hwfn, ECORE_VPORT);
192
193         /* Sanity checking that setup requires legal number of resources */
194         if (num_pqs > RESC_NUM(p_hwfn, ECORE_PQ)) {
195                 DP_ERR(p_hwfn,
196                        "Need too many Physical queues - 0x%04x when"
197                         " only %04x are available\n",
198                        num_pqs, RESC_NUM(p_hwfn, ECORE_PQ));
199                 return ECORE_INVAL;
200         }
201
202         /* PQs will be arranged as follows: First per-TC PQ, then pure-LB queue,
203          * then special queues, then per-VF PQ.
204          */
205         qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev,
206                                             b_sleepable ? GFP_KERNEL :
207                                             GFP_ATOMIC,
208                                             sizeof(struct init_qm_pq_params) *
209                                             num_pqs);
210         if (!qm_info->qm_pq_params)
211                 goto alloc_err;
212
213         qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev,
214                                                b_sleepable ? GFP_KERNEL :
215                                                GFP_ATOMIC,
216                                                sizeof(struct
217                                                       init_qm_vport_params) *
218                                                num_vports);
219         if (!qm_info->qm_vport_params)
220                 goto alloc_err;
221
222         qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev,
223                                               b_sleepable ? GFP_KERNEL :
224                                               GFP_ATOMIC,
225                                               sizeof(struct init_qm_port_params)
226                                               * MAX_NUM_PORTS);
227         if (!qm_info->qm_port_params)
228                 goto alloc_err;
229
230         qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev,
231                                         b_sleepable ? GFP_KERNEL :
232                                         GFP_ATOMIC,
233                                         sizeof(struct ecore_wfq_data) *
234                                         num_vports);
235
236         if (!qm_info->wfq_data)
237                 goto alloc_err;
238
239         vport_id = (u8)RESC_START(p_hwfn, ECORE_VPORT);
240
241         /* First init per-TC PQs */
242         for (i = 0; i < multi_cos_tcs; i++) {
243                 struct init_qm_pq_params *params = &qm_info->qm_pq_params[i];
244
245                 if (p_hwfn->hw_info.personality == ECORE_PCI_ETH) {
246                         params->vport_id = vport_id;
247                         params->tc_id = p_hwfn->hw_info.non_offload_tc;
248                         params->wrr_group = 1;  /* @@@TBD ECORE_WRR_MEDIUM */
249                 } else {
250                         params->vport_id = vport_id;
251                         params->tc_id = p_hwfn->hw_info.offload_tc;
252                         params->wrr_group = 1;  /* @@@TBD ECORE_WRR_MEDIUM */
253                 }
254         }
255
256         /* Then init pure-LB PQ */
257         qm_info->pure_lb_pq = i;
258         qm_info->qm_pq_params[i].vport_id =
259             (u8)RESC_START(p_hwfn, ECORE_VPORT);
260         qm_info->qm_pq_params[i].tc_id = PURE_LB_TC;
261         qm_info->qm_pq_params[i].wrr_group = 1;
262         i++;
263
264         /* Then init per-VF PQs */
265         vf_offset = i;
266         for (i = 0; i < num_vfs; i++) {
267                 /* First vport is used by the PF */
268                 qm_info->qm_pq_params[vf_offset + i].vport_id = vport_id +
269                     i + 1;
270                 qm_info->qm_pq_params[vf_offset + i].tc_id =
271                     p_hwfn->hw_info.non_offload_tc;
272                 qm_info->qm_pq_params[vf_offset + i].wrr_group = 1;
273         };
274
275         qm_info->vf_queues_offset = vf_offset;
276         qm_info->num_pqs = num_pqs;
277         qm_info->num_vports = num_vports;
278
279         /* Initialize qm port parameters */
280         num_ports = p_hwfn->p_dev->num_ports_in_engines;
281         for (i = 0; i < num_ports; i++) {
282                 p_qm_port = &qm_info->qm_port_params[i];
283                 p_qm_port->active = 1;
284                 /* @@@TMP - was NUM_OF_PHYS_TCS; Changed until dcbx will
285                  * be in place
286                  */
287                 if (num_ports == 4)
288                         p_qm_port->num_active_phys_tcs = 2;
289                 else
290                         p_qm_port->num_active_phys_tcs = 5;
291                 p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports;
292                 p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports;
293         }
294
295         if (ECORE_IS_AH(p_hwfn->p_dev) && (num_ports == 4))
296                 qm_info->max_phys_tcs_per_port = NUM_PHYS_TCS_4PORT_K2;
297         else
298                 qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS;
299
300         qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ);
301
302         qm_info->num_vf_pqs = num_vfs;
303         qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT);
304
305         for (i = 0; i < qm_info->num_vports; i++)
306                 qm_info->qm_vport_params[i].vport_wfq = 1;
307
308         qm_info->pf_wfq = 0;
309         qm_info->pf_rl = 0;
310         qm_info->vport_rl_en = 1;
311         qm_info->vport_wfq_en = 1;
312
313         return ECORE_SUCCESS;
314
315 alloc_err:
316         DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n");
317         ecore_qm_info_free(p_hwfn);
318         return ECORE_NOMEM;
319 }
320
321 /* This function reconfigures the QM pf on the fly.
322  * For this purpose we:
323  * 1. reconfigure the QM database
324  * 2. set new values to runtime arrat
325  * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
326  * 4. activate init tool in QM_PF stage
327  * 5. send an sdm_qm_cmd through rbc interface to release the QM
328  */
329 enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn,
330                                      struct ecore_ptt *p_ptt)
331 {
332         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
333         enum _ecore_status_t rc;
334         bool b_rc;
335
336         /* qm_info is allocated in ecore_init_qm_info() which is already called
337          * from ecore_resc_alloc() or previous call of ecore_qm_reconf().
338          * The allocated size may change each init, so we free it before next
339          * allocation.
340          */
341         ecore_qm_info_free(p_hwfn);
342
343         /* initialize ecore's qm data structure */
344         rc = ecore_init_qm_info(p_hwfn, false);
345         if (rc != ECORE_SUCCESS)
346                 return rc;
347
348         /* stop PF's qm queues */
349         b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true,
350                                       qm_info->start_pq, qm_info->num_pqs);
351         if (!b_rc)
352                 return ECORE_INVAL;
353
354         /* clear the QM_PF runtime phase leftovers from previous init */
355         ecore_init_clear_rt_data(p_hwfn);
356
357         /* prepare QM portion of runtime array */
358         ecore_qm_init_pf(p_hwfn);
359
360         /* activate init tool on runtime array */
361         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
362                             p_hwfn->hw_info.hw_mode);
363         if (rc != ECORE_SUCCESS)
364                 return rc;
365
366         /* start PF's qm queues */
367         b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true,
368                                       qm_info->start_pq, qm_info->num_pqs);
369         if (!rc)
370                 return ECORE_INVAL;
371
372         return ECORE_SUCCESS;
373 }
374
375 enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev)
376 {
377         enum _ecore_status_t rc = ECORE_SUCCESS;
378         struct ecore_consq *p_consq;
379         struct ecore_eq *p_eq;
380         int i;
381
382         if (IS_VF(p_dev))
383                 return rc;
384
385         p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL,
386                                      sizeof(struct ecore_fw_data));
387         if (!p_dev->fw_data)
388                 return ECORE_NOMEM;
389
390         /* Allocate Memory for the Queue->CID mapping */
391         for_each_hwfn(p_dev, i) {
392                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
393
394                 /* @@@TMP - resc management, change to actual required size */
395                 int tx_size = sizeof(struct ecore_hw_cid_data) *
396                     RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
397                 int rx_size = sizeof(struct ecore_hw_cid_data) *
398                     RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
399
400                 p_hwfn->p_tx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
401                                                 tx_size);
402                 if (!p_hwfn->p_tx_cids) {
403                         DP_NOTICE(p_hwfn, true,
404                                   "Failed to allocate memory for Tx Cids\n");
405                         goto alloc_no_mem;
406                 }
407
408                 p_hwfn->p_rx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
409                                                 rx_size);
410                 if (!p_hwfn->p_rx_cids) {
411                         DP_NOTICE(p_hwfn, true,
412                                   "Failed to allocate memory for Rx Cids\n");
413                         goto alloc_no_mem;
414                 }
415         }
416
417         for_each_hwfn(p_dev, i) {
418                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
419
420                 /* First allocate the context manager structure */
421                 rc = ecore_cxt_mngr_alloc(p_hwfn);
422                 if (rc)
423                         goto alloc_err;
424
425                 /* Set the HW cid/tid numbers (in the contest manager)
426                  * Must be done prior to any further computations.
427                  */
428                 rc = ecore_cxt_set_pf_params(p_hwfn);
429                 if (rc)
430                         goto alloc_err;
431
432                 /* Prepare and process QM requirements */
433                 rc = ecore_init_qm_info(p_hwfn, true);
434                 if (rc)
435                         goto alloc_err;
436
437                 /* Compute the ILT client partition */
438                 rc = ecore_cxt_cfg_ilt_compute(p_hwfn);
439                 if (rc)
440                         goto alloc_err;
441
442                 /* CID map / ILT shadow table / T2
443                  * The talbes sizes are determined by the computations above
444                  */
445                 rc = ecore_cxt_tables_alloc(p_hwfn);
446                 if (rc)
447                         goto alloc_err;
448
449                 /* SPQ, must follow ILT because initializes SPQ context */
450                 rc = ecore_spq_alloc(p_hwfn);
451                 if (rc)
452                         goto alloc_err;
453
454                 /* SP status block allocation */
455                 p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn,
456                                                            RESERVED_PTT_DPC);
457
458                 rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt);
459                 if (rc)
460                         goto alloc_err;
461
462                 rc = ecore_iov_alloc(p_hwfn);
463                 if (rc)
464                         goto alloc_err;
465
466                 /* EQ */
467                 p_eq = ecore_eq_alloc(p_hwfn, 256);
468                 if (!p_eq)
469                         goto alloc_no_mem;
470                 p_hwfn->p_eq = p_eq;
471
472                 p_consq = ecore_consq_alloc(p_hwfn);
473                 if (!p_consq)
474                         goto alloc_no_mem;
475                 p_hwfn->p_consq = p_consq;
476
477                 /* DMA info initialization */
478                 rc = ecore_dmae_info_alloc(p_hwfn);
479                 if (rc) {
480                         DP_NOTICE(p_hwfn, true,
481                                   "Failed to allocate memory for"
482                                   " dmae_info structure\n");
483                         goto alloc_err;
484                 }
485
486                 /* DCBX initialization */
487                 rc = ecore_dcbx_info_alloc(p_hwfn);
488                 if (rc) {
489                         DP_NOTICE(p_hwfn, true,
490                                   "Failed to allocate memory for dcbxstruct\n");
491                         goto alloc_err;
492                 }
493         }
494
495         p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL,
496                                          sizeof(struct ecore_eth_stats));
497         if (!p_dev->reset_stats) {
498                 DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n");
499                 goto alloc_no_mem;
500         }
501
502         return ECORE_SUCCESS;
503
504 alloc_no_mem:
505         rc = ECORE_NOMEM;
506 alloc_err:
507         ecore_resc_free(p_dev);
508         return rc;
509 }
510
511 void ecore_resc_setup(struct ecore_dev *p_dev)
512 {
513         int i;
514
515         if (IS_VF(p_dev))
516                 return;
517
518         for_each_hwfn(p_dev, i) {
519                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
520
521                 ecore_cxt_mngr_setup(p_hwfn);
522                 ecore_spq_setup(p_hwfn);
523                 ecore_eq_setup(p_hwfn, p_hwfn->p_eq);
524                 ecore_consq_setup(p_hwfn, p_hwfn->p_consq);
525
526                 /* Read shadow of current MFW mailbox */
527                 ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt);
528                 OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
529                             p_hwfn->mcp_info->mfw_mb_cur,
530                             p_hwfn->mcp_info->mfw_mb_length);
531
532                 ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt);
533
534                 ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt);
535         }
536 }
537
538 #define FINAL_CLEANUP_POLL_CNT  (100)
539 #define FINAL_CLEANUP_POLL_TIME (10)
540 enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn,
541                                          struct ecore_ptt *p_ptt,
542                                          u16 id, bool is_vf)
543 {
544         u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT;
545         enum _ecore_status_t rc = ECORE_TIMEOUT;
546
547 #ifndef ASIC_ONLY
548         if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) ||
549             CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
550                 DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n");
551                 return ECORE_SUCCESS;
552         }
553 #endif
554
555         addr = GTT_BAR0_MAP_REG_USDM_RAM +
556             USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id);
557
558         if (is_vf)
559                 id += 0x10;
560
561         command |= X_FINAL_CLEANUP_AGG_INT <<
562             SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT;
563         command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT;
564         command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT;
565         command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT;
566
567         /* Make sure notification is not set before initiating final cleanup */
568         if (REG_RD(p_hwfn, addr)) {
569                 DP_NOTICE(p_hwfn, false,
570                           "Unexpected; Found final cleanup notification "
571                           "before initiating final cleanup\n");
572                 REG_WR(p_hwfn, addr, 0);
573         }
574
575         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
576                    "Sending final cleanup for PFVF[%d] [Command %08x\n]",
577                    id, OSAL_CPU_TO_LE32(command));
578
579         ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN,
580                  OSAL_CPU_TO_LE32(command));
581
582         /* Poll until completion */
583         while (!REG_RD(p_hwfn, addr) && count--)
584                 OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME);
585
586         if (REG_RD(p_hwfn, addr))
587                 rc = ECORE_SUCCESS;
588         else
589                 DP_NOTICE(p_hwfn, true,
590                           "Failed to receive FW final cleanup notification\n");
591
592         /* Cleanup afterwards */
593         REG_WR(p_hwfn, addr, 0);
594
595         return rc;
596 }
597
598 static void ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn)
599 {
600         int hw_mode = 0;
601
602         switch (ECORE_GET_TYPE(p_hwfn->p_dev)) {
603         case CHIP_BB_A0:
604                 hw_mode |= 1 << MODE_BB_A0;
605                 break;
606         case CHIP_BB_B0:
607                 hw_mode |= 1 << MODE_BB_B0;
608                 break;
609         case CHIP_K2:
610                 hw_mode |= 1 << MODE_K2;
611                 break;
612         default:
613                 DP_NOTICE(p_hwfn, true, "Can't initialize chip ID %d\n",
614                           ECORE_GET_TYPE(p_hwfn->p_dev));
615                 return;
616         }
617
618         /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */
619         switch (p_hwfn->p_dev->num_ports_in_engines) {
620         case 1:
621                 hw_mode |= 1 << MODE_PORTS_PER_ENG_1;
622                 break;
623         case 2:
624                 hw_mode |= 1 << MODE_PORTS_PER_ENG_2;
625                 break;
626         case 4:
627                 hw_mode |= 1 << MODE_PORTS_PER_ENG_4;
628                 break;
629         default:
630                 DP_NOTICE(p_hwfn, true,
631                           "num_ports_in_engine = %d not supported\n",
632                           p_hwfn->p_dev->num_ports_in_engines);
633                 return;
634         }
635
636         switch (p_hwfn->p_dev->mf_mode) {
637         case ECORE_MF_DEFAULT:
638         case ECORE_MF_NPAR:
639                 hw_mode |= 1 << MODE_MF_SI;
640                 break;
641         case ECORE_MF_OVLAN:
642                 hw_mode |= 1 << MODE_MF_SD;
643                 break;
644         default:
645                 DP_NOTICE(p_hwfn, true,
646                           "Unsupported MF mode, init as DEFAULT\n");
647                 hw_mode |= 1 << MODE_MF_SI;
648         }
649
650 #ifndef ASIC_ONLY
651         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
652                 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
653                         hw_mode |= 1 << MODE_FPGA;
654                 } else {
655                         if (p_hwfn->p_dev->b_is_emul_full)
656                                 hw_mode |= 1 << MODE_EMUL_FULL;
657                         else
658                                 hw_mode |= 1 << MODE_EMUL_REDUCED;
659                 }
660         } else
661 #endif
662                 hw_mode |= 1 << MODE_ASIC;
663
664         if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn))
665                 hw_mode |= 1 << MODE_EAGLE_ENG1_WORKAROUND;
666
667         if (p_hwfn->p_dev->num_hwfns > 1)
668                 hw_mode |= 1 << MODE_100G;
669
670         p_hwfn->hw_info.hw_mode = hw_mode;
671
672         DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP),
673                    "Configuring function for hw_mode: 0x%08x\n",
674                    p_hwfn->hw_info.hw_mode);
675 }
676
677 #ifndef ASIC_ONLY
678 /* MFW-replacement initializations for non-ASIC */
679 static void ecore_hw_init_chip(struct ecore_hwfn *p_hwfn,
680                                struct ecore_ptt *p_ptt)
681 {
682         u32 pl_hv = 1;
683         int i;
684
685         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
686                 pl_hv |= 0x600;
687
688         ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv);
689
690         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
691                 ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2, 0x3ffffff);
692
693         /* initialize interrupt masks */
694         for (i = 0;
695              i <
696              attn_blocks[BLOCK_MISCS].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
697              num_of_int_regs; i++)
698                 ecore_wr(p_hwfn, p_ptt,
699                          attn_blocks[BLOCK_MISCS].
700                          chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[i]->
701                          mask_addr, 0);
702
703         if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev))
704                 ecore_wr(p_hwfn, p_ptt,
705                          attn_blocks[BLOCK_CNIG].
706                          chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
707                          mask_addr, 0);
708         ecore_wr(p_hwfn, p_ptt,
709                  attn_blocks[BLOCK_PGLCS].
710                  chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
711                  mask_addr, 0);
712         ecore_wr(p_hwfn, p_ptt,
713                  attn_blocks[BLOCK_CPMU].
714                  chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
715                  mask_addr, 0);
716         /* Currently A0 and B0 interrupt bits are the same in pglue_b;
717          * If this changes, need to set this according to chip type. <14/09/23>
718          */
719         ecore_wr(p_hwfn, p_ptt,
720                  attn_blocks[BLOCK_PGLUE_B].
721                  chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]->
722                  mask_addr, 0x80000);
723
724         /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */
725         /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */
726         if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev))
727                 ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0, 4);
728
729         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) {
730                 /* 2 for 4-port, 1 for 2-port, 0 for 1-port */
731                 ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE,
732                          (p_hwfn->p_dev->num_ports_in_engines >> 1));
733
734                 ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN,
735                          p_hwfn->p_dev->num_ports_in_engines == 4 ? 0 : 3);
736         }
737
738         /* Poll on RBC */
739         ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1);
740         for (i = 0; i < 100; i++) {
741                 OSAL_UDELAY(50);
742                 if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1)
743                         break;
744         }
745         if (i == 100)
746                 DP_NOTICE(p_hwfn, true,
747                           "RBC done failed to complete in PSWRQ2\n");
748 }
749 #endif
750
751 /* Init run time data for all PFs and their VFs on an engine.
752  * TBD - for VFs - Once we have parent PF info for each VF in
753  * shmem available as CAU requires knowledge of parent PF for each VF.
754  */
755 static void ecore_init_cau_rt_data(struct ecore_dev *p_dev)
756 {
757         u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET;
758         int i, sb_id;
759
760         for_each_hwfn(p_dev, i) {
761                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
762                 struct ecore_igu_info *p_igu_info;
763                 struct ecore_igu_block *p_block;
764                 struct cau_sb_entry sb_entry;
765
766                 p_igu_info = p_hwfn->hw_info.p_igu_info;
767
768                 for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev);
769                      sb_id++) {
770                         p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
771
772                         if (!p_block->is_pf)
773                                 continue;
774
775                         ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
776                                                 p_block->function_id, 0, 0);
777                         STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry);
778                 }
779         }
780 }
781
782 static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn,
783                                                  struct ecore_ptt *p_ptt,
784                                                  int hw_mode)
785 {
786         struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
787         enum _ecore_status_t rc = ECORE_SUCCESS;
788         struct ecore_dev *p_dev = p_hwfn->p_dev;
789         u8 vf_id, max_num_vfs;
790         u16 num_pfs, pf_id;
791         u32 concrete_fid;
792
793         ecore_init_cau_rt_data(p_dev);
794
795         /* Program GTT windows */
796         ecore_gtt_init(p_hwfn);
797
798 #ifndef ASIC_ONLY
799         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
800                 ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt);
801 #endif
802
803         if (p_hwfn->mcp_info) {
804                 if (p_hwfn->mcp_info->func_info.bandwidth_max)
805                         qm_info->pf_rl_en = 1;
806                 if (p_hwfn->mcp_info->func_info.bandwidth_min)
807                         qm_info->pf_wfq_en = 1;
808         }
809
810         ecore_qm_common_rt_init(p_hwfn,
811                                 p_hwfn->p_dev->num_ports_in_engines,
812                                 qm_info->max_phys_tcs_per_port,
813                                 qm_info->pf_rl_en, qm_info->pf_wfq_en,
814                                 qm_info->vport_rl_en, qm_info->vport_wfq_en,
815                                 qm_info->qm_port_params);
816
817         ecore_cxt_hw_init_common(p_hwfn);
818
819         /* Close gate from NIG to BRB/Storm; By default they are open, but
820          * we close them to prevent NIG from passing data to reset blocks.
821          * Should have been done in the ENGINE phase, but init-tool lacks
822          * proper port-pretend capabilities.
823          */
824         ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
825         ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
826         ecore_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1);
827         ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0);
828         ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0);
829         ecore_port_unpretend(p_hwfn, p_ptt);
830
831         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode);
832         if (rc != ECORE_SUCCESS)
833                 return rc;
834
835         /* @@TBD MichalK - should add VALIDATE_VFID to init tool...
836          * need to decide with which value, maybe runtime
837          */
838         ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0);
839         ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1);
840
841         if (ECORE_IS_BB(p_hwfn->p_dev)) {
842                 num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev);
843                 if (num_pfs == 1)
844                         return rc;
845                 /* pretend to original PF */
846                 ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
847         }
848
849         /* Workaround for avoiding CCFC execution error when getting packets
850          * with CRC errors, and allowing instead the invoking of the FW error
851          * handler.
852          * This is not done inside the init tool since it currently can't
853          * perform a pretending to VFs.
854          */
855         max_num_vfs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_VFS_K2
856             : MAX_NUM_VFS_BB;
857         for (vf_id = 0; vf_id < max_num_vfs; vf_id++) {
858                 concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id);
859                 ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid);
860                 ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1);
861         }
862         /* pretend to original PF */
863         ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
864
865         return rc;
866 }
867
868 #ifndef ASIC_ONLY
869 #define MISC_REG_RESET_REG_2_XMAC_BIT (1 << 4)
870 #define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1 << 5)
871
872 #define PMEG_IF_BYTE_COUNT      8
873
874 static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn,
875                              struct ecore_ptt *p_ptt,
876                              u32 addr, u64 data, u8 reg_type, u8 port)
877 {
878         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
879                    "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n",
880                    ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) |
881                    (8 << PMEG_IF_BYTE_COUNT),
882                    (reg_type << 25) | (addr << 8) | port,
883                    (u32)((data >> 32) & 0xffffffff),
884                    (u32)(data & 0xffffffff));
885
886         ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0,
887                  (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) &
888                   0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT));
889         ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB_B0,
890                  (reg_type << 25) | (addr << 8) | port);
891         ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
892                  data & 0xffffffff);
893         ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0,
894                  (data >> 32) & 0xffffffff);
895 }
896
897 #define XLPORT_MODE_REG (0x20a)
898 #define XLPORT_MAC_CONTROL (0x210)
899 #define XLPORT_FLOW_CONTROL_CONFIG (0x207)
900 #define XLPORT_ENABLE_REG (0x20b)
901
902 #define XLMAC_CTRL (0x600)
903 #define XLMAC_MODE (0x601)
904 #define XLMAC_RX_MAX_SIZE (0x608)
905 #define XLMAC_TX_CTRL (0x604)
906 #define XLMAC_PAUSE_CTRL (0x60d)
907 #define XLMAC_PFC_CTRL (0x60e)
908
909 static void ecore_emul_link_init_ah(struct ecore_hwfn *p_hwfn,
910                                     struct ecore_ptt *p_ptt)
911 {
912         u8 port = p_hwfn->port_id;
913         u32 mac_base = NWM_REG_MAC0 + (port << 2) * NWM_REG_MAC0_SIZE;
914
915         ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2 + (port << 2),
916                  (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT) |
917                  (port << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT)
918                  | (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT));
919
920         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE,
921                  1 << ETH_MAC_REG_XIF_MODE_XGMII_SHIFT);
922
923         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH,
924                  9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT);
925
926         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH,
927                  0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT);
928
929         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS,
930                  8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT);
931
932         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS,
933                  (0xA << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT) |
934                  (8 << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT));
935
936         ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG, 0xa853);
937 }
938
939 static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn,
940                                  struct ecore_ptt *p_ptt)
941 {
942         u8 loopback = 0, port = p_hwfn->port_id * 2;
943
944         DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port);
945
946         if (ECORE_IS_AH(p_hwfn->p_dev)) {
947                 ecore_emul_link_init_ah(p_hwfn, p_ptt);
948                 return;
949         }
950
951         ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1,
952                                 port);
953         ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port);
954         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x40, 0, port);
955         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 0x40, 0, port);
956         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 0x3fff, 0, port);
957         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL,
958                          0x01000000800ULL | (0xa << 12) | ((u64)1 << 38),
959                          0, port);
960         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 0x7c000, 0, port);
961         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL,
962                          0x30ffffc000ULL, 0, port);
963         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 0,
964                         port);
965         ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2),
966                         0, port);
967         ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1, 0, port);
968         ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port);
969 }
970
971 static void ecore_link_init(struct ecore_hwfn *p_hwfn,
972                             struct ecore_ptt *p_ptt, u8 port)
973 {
974         int port_offset = port ? 0x800 : 0;
975         u32 xmac_rxctrl = 0;
976
977         /* Reset of XMAC */
978         /* FIXME: move to common start */
979         ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
980                 MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */
981         OSAL_MSLEEP(1);
982         ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
983                 MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */
984
985         ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE, 1);
986
987         /* Set the number of ports on the Warp Core to 10G */
988         ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE, 3);
989
990         /* Soft reset of XMAC */
991         ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32),
992                  MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
993         OSAL_MSLEEP(1);
994         ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32),
995                  MISC_REG_RESET_REG_2_XMAC_SOFT_BIT);
996
997         /* FIXME: move to common end */
998         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
999                 ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE + port_offset, 0x20);
1000
1001         /* Set Max packet size: initialize XMAC block register for port 0 */
1002         ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE + port_offset, 0x2710);
1003
1004         /* CRC append for Tx packets: init XMAC block register for port 1 */
1005         ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO + port_offset, 0xC800);
1006
1007         /* Enable TX and RX: initialize XMAC block register for port 1 */
1008         ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL + port_offset,
1009                  XMAC_REG_CTRL_TX_EN | XMAC_REG_CTRL_RX_EN);
1010         xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset);
1011         xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE;
1012         ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset, xmac_rxctrl);
1013 }
1014 #endif
1015
1016 static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn,
1017                                                struct ecore_ptt *p_ptt,
1018                                                int hw_mode)
1019 {
1020         enum _ecore_status_t rc = ECORE_SUCCESS;
1021
1022         /* Init sequence */
1023         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id,
1024                             hw_mode);
1025         if (rc != ECORE_SUCCESS)
1026                 return rc;
1027
1028 #ifndef ASIC_ONLY
1029         if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
1030                 return ECORE_SUCCESS;
1031
1032         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1033                 if (ECORE_IS_AH(p_hwfn->p_dev))
1034                         return ECORE_SUCCESS;
1035                 ecore_link_init(p_hwfn, p_ptt, p_hwfn->port_id);
1036         } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) {
1037                 if (p_hwfn->p_dev->num_hwfns > 1) {
1038                         /* Activate OPTE in CMT */
1039                         u32 val;
1040
1041                         val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV);
1042                         val |= 0x10;
1043                         ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val);
1044                         ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1);
1045                         ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1);
1046                         ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1);
1047                         ecore_wr(p_hwfn, p_ptt,
1048                                  NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1);
1049                         ecore_wr(p_hwfn, p_ptt,
1050                                  NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555);
1051                         ecore_wr(p_hwfn, p_ptt,
1052                                  NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4,
1053                                  0x55555555);
1054                 }
1055
1056                 ecore_emul_link_init(p_hwfn, p_ptt);
1057         } else {
1058                 DP_INFO(p_hwfn->p_dev, "link is not being configured\n");
1059         }
1060 #endif
1061
1062         return rc;
1063 }
1064
1065 static enum _ecore_status_t
1066 ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn,
1067                               struct ecore_ptt *p_ptt)
1068 {
1069         u32 pwm_regsize, norm_regsize;
1070         u32 non_pwm_conn, min_addr_reg1;
1071         u32 db_bar_size, n_cpus;
1072         u32 pf_dems_shift;
1073         int rc = ECORE_SUCCESS;
1074
1075         db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1);
1076         if (p_hwfn->p_dev->num_hwfns > 1)
1077                 db_bar_size /= 2;
1078
1079         /* Calculate doorbell regions
1080          * -----------------------------------
1081          * The doorbell BAR is made of two regions. The first is called normal
1082          * region and the second is called PWM region. In the normal region
1083          * each ICID has its own set of addresses so that writing to that
1084          * specific address identifies the ICID. In the Process Window Mode
1085          * region the ICID is given in the data written to the doorbell. The
1086          * above per PF register denotes the offset in the doorbell BAR in which
1087          * the PWM region begins.
1088          * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per
1089          * non-PWM connection. The calculation below computes the total non-PWM
1090          * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is
1091          * in units of 4,096 bytes.
1092          */
1093         non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) +
1094             ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE,
1095                                           OSAL_NULL) +
1096             ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL);
1097         norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096);
1098         min_addr_reg1 = norm_regsize / 4096;
1099         pwm_regsize = db_bar_size - norm_regsize;
1100
1101         /* Check that the normal and PWM sizes are valid */
1102         if (db_bar_size < norm_regsize) {
1103                 DP_ERR(p_hwfn->p_dev,
1104                        "Doorbell BAR size 0x%x is too"
1105                        " small (normal region is 0x%0x )\n",
1106                        db_bar_size, norm_regsize);
1107                 return ECORE_NORESOURCES;
1108         }
1109         if (pwm_regsize < ECORE_MIN_PWM_REGION) {
1110                 DP_ERR(p_hwfn->p_dev,
1111                        "PWM region size 0x%0x is too small."
1112                        " Should be at least 0x%0x (Doorbell BAR size"
1113                        " is 0x%x and normal region size is 0x%0x)\n",
1114                        pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size,
1115                        norm_regsize);
1116                 return ECORE_NORESOURCES;
1117         }
1118
1119         /* Update hwfn */
1120         p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to
1121                                                   * calculate the doorbell
1122                                                   * address
1123                                                   */
1124
1125         /* Update registers */
1126         /* DEMS size is configured log2 of DWORDs, hence the division by 4 */
1127         pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4);
1128         ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift);
1129         ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1);
1130
1131         DP_INFO(p_hwfn,
1132                 "Doorbell size 0x%x, Normal region 0x%x, PWM region 0x%x\n",
1133                 db_bar_size, norm_regsize, pwm_regsize);
1134         DP_INFO(p_hwfn, "DPI size 0x%x, DPI count 0x%x\n", p_hwfn->dpi_size,
1135                 p_hwfn->dpi_count);
1136
1137         return ECORE_SUCCESS;
1138 }
1139
1140 static enum _ecore_status_t
1141 ecore_hw_init_pf(struct ecore_hwfn *p_hwfn,
1142                  struct ecore_ptt *p_ptt,
1143                  struct ecore_tunn_start_params *p_tunn,
1144                  int hw_mode,
1145                  bool b_hw_start,
1146                  enum ecore_int_mode int_mode, bool allow_npar_tx_switch)
1147 {
1148         enum _ecore_status_t rc = ECORE_SUCCESS;
1149         u8 rel_pf_id = p_hwfn->rel_pf_id;
1150         u32 prs_reg;
1151         u16 ctrl;
1152         int pos;
1153
1154         /* ILT/DQ/CM/QM */
1155         if (p_hwfn->mcp_info) {
1156                 struct ecore_mcp_function_info *p_info;
1157
1158                 p_info = &p_hwfn->mcp_info->func_info;
1159                 if (p_info->bandwidth_min)
1160                         p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min;
1161
1162                 /* Update rate limit once we'll actually have a link */
1163                 p_hwfn->qm_info.pf_rl = 100;
1164         }
1165         ecore_cxt_hw_init_pf(p_hwfn);
1166
1167         ecore_int_igu_init_rt(p_hwfn);  /* @@@TBD TODO MichalS multi hwfn ?? */
1168
1169         /* Set VLAN in NIG if needed */
1170         if (hw_mode & (1 << MODE_MF_SD)) {
1171                 DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n");
1172                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1);
1173                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET,
1174                              p_hwfn->hw_info.ovlan);
1175         }
1176
1177         /* Enable classification by MAC if needed */
1178         if (hw_mode & (1 << MODE_MF_SI)) {
1179                 DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
1180                            "Configuring TAGMAC_CLS_TYPE\n");
1181                 STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET,
1182                              1);
1183         }
1184
1185         /* Protocl Configuration  - @@@TBD - should we set 0 otherwise? */
1186         STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0);
1187
1188         /* perform debug configuration when chip is out of reset */
1189         OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id);
1190
1191         /* Cleanup chip from previous driver if such remains exist */
1192         rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false);
1193         if (rc != ECORE_SUCCESS) {
1194                 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
1195                 return rc;
1196         }
1197
1198         /* PF Init sequence */
1199         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
1200         if (rc)
1201                 return rc;
1202
1203         /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */
1204         rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode);
1205         if (rc)
1206                 return rc;
1207
1208         /* Pure runtime initializations - directly to the HW  */
1209         ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true);
1210
1211         /* PCI relaxed ordering causes a decrease in the performance on some
1212          * systems. Till a root cause is found, disable this attribute in the
1213          * PCI config space.
1214          */
1215         /* Not in use @DPDK
1216          * pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP);
1217          * if (!pos) {
1218          *      DP_NOTICE(p_hwfn, true,
1219          *                "Failed to find the PCI Express"
1220          *                " Capability structure in the PCI config space\n");
1221          *      return ECORE_IO;
1222          * }
1223          * OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL,
1224          *                           &ctrl);
1225          * ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN;
1226          * OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL,
1227          *                           &ctrl);
1228          */
1229
1230 #ifndef ASIC_ONLY
1231         /*@@TMP - On B0 build 1, need to mask the datapath_registers parity */
1232         if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev) &&
1233             (p_hwfn->p_dev->chip_metal == 1)) {
1234                 u32 reg_addr, tmp;
1235
1236                 reg_addr =
1237                     attn_blocks[BLOCK_PGLUE_B].
1238                     chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].prty_regs[0]->
1239                     mask_addr;
1240                 DP_NOTICE(p_hwfn, false,
1241                           "Masking datapath registers parity on"
1242                           " B0 emulation [build 1]\n");
1243                 tmp = ecore_rd(p_hwfn, p_ptt, reg_addr);
1244                 tmp |= (1 << 0);        /* Was PRTY_MASK_DATAPATH_REGISTERS */
1245                 ecore_wr(p_hwfn, p_ptt, reg_addr, tmp);
1246         }
1247 #endif
1248
1249         rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt);
1250         if (rc)
1251                 return rc;
1252
1253         if (b_hw_start) {
1254                 /* enable interrupts */
1255                 ecore_int_igu_enable(p_hwfn, p_ptt, int_mode);
1256
1257                 /* send function start command */
1258                 rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode,
1259                                        allow_npar_tx_switch);
1260                 if (rc) {
1261                         DP_NOTICE(p_hwfn, true,
1262                                   "Function start ramrod failed\n");
1263                 } else {
1264                         prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
1265                         DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
1266                                    "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
1267
1268                         DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
1269                                    "PRS_REG_SEARCH register after start PFn\n");
1270                         prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP);
1271                         DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
1272                                    "PRS_REG_SEARCH_TCP: %x\n", prs_reg);
1273                         prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP);
1274                         DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
1275                                    "PRS_REG_SEARCH_UDP: %x\n", prs_reg);
1276                         prs_reg = ecore_rd(p_hwfn, p_ptt,
1277                                            PRS_REG_SEARCH_TCP_FIRST_FRAG);
1278                         DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
1279                                    "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n",
1280                                    prs_reg);
1281                         prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1);
1282                         DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE,
1283                                    "PRS_REG_SEARCH_TAG1: %x\n", prs_reg);
1284                 }
1285         }
1286         return rc;
1287 }
1288
1289 static enum _ecore_status_t
1290 ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn,
1291                       struct ecore_ptt *p_ptt, u8 enable)
1292 {
1293         u32 delay_idx = 0, val, set_val = enable ? 1 : 0;
1294
1295         /* Change PF in PXP */
1296         ecore_wr(p_hwfn, p_ptt,
1297                  PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val);
1298
1299         /* wait until value is set - try for 1 second every 50us */
1300         for (delay_idx = 0; delay_idx < 20000; delay_idx++) {
1301                 val = ecore_rd(p_hwfn, p_ptt,
1302                                PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1303                 if (val == set_val)
1304                         break;
1305
1306                 OSAL_UDELAY(50);
1307         }
1308
1309         if (val != set_val) {
1310                 DP_NOTICE(p_hwfn, true,
1311                           "PFID_ENABLE_MASTER wasn't changed after a second\n");
1312                 return ECORE_UNKNOWN_ERROR;
1313         }
1314
1315         return ECORE_SUCCESS;
1316 }
1317
1318 static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn,
1319                                   struct ecore_ptt *p_main_ptt)
1320 {
1321         /* Read shadow of current MFW mailbox */
1322         ecore_mcp_read_mb(p_hwfn, p_main_ptt);
1323         OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow,
1324                     p_hwfn->mcp_info->mfw_mb_cur,
1325                     p_hwfn->mcp_info->mfw_mb_length);
1326 }
1327
1328 enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev,
1329                                    struct ecore_tunn_start_params *p_tunn,
1330                                    bool b_hw_start,
1331                                    enum ecore_int_mode int_mode,
1332                                    bool allow_npar_tx_switch,
1333                                    const u8 *bin_fw_data)
1334 {
1335         enum _ecore_status_t rc, mfw_rc;
1336         u32 load_code, param;
1337         int i, j;
1338
1339         if (IS_PF(p_dev)) {
1340                 rc = ecore_init_fw_data(p_dev, bin_fw_data);
1341                 if (rc != ECORE_SUCCESS)
1342                         return rc;
1343         }
1344
1345         for_each_hwfn(p_dev, i) {
1346                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1347
1348                 if (IS_VF(p_dev)) {
1349                         rc = ecore_vf_pf_init(p_hwfn);
1350                         if (rc)
1351                                 return rc;
1352                         continue;
1353                 }
1354
1355                 /* Enable DMAE in PXP */
1356                 rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true);
1357
1358                 ecore_calc_hw_mode(p_hwfn);
1359                 /* @@@TBD need to add here:
1360                  * Check for fan failure
1361                  * Prev_unload
1362                  */
1363                 rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code);
1364                 if (rc) {
1365                         DP_NOTICE(p_hwfn, true,
1366                                   "Failed sending LOAD_REQ command\n");
1367                         return rc;
1368                 }
1369
1370                 /* CQ75580:
1371                  * When coming back from hiberbate state, the registers from
1372                  * which shadow is read initially are not initialized. It turns
1373                  * out that these registers get initialized during the call to
1374                  * ecore_mcp_load_req request. So we need to reread them here
1375                  * to get the proper shadow register value.
1376                  * Note: This is a workaround for the missinginig MFW
1377                  * initialization. It may be removed once the implementation
1378                  * is done.
1379                  */
1380                 ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt);
1381
1382                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1383                            "Load request was sent.Resp:0x%x, Load code: 0x%x\n",
1384                            rc, load_code);
1385
1386                 /* Only relevant for recovery:
1387                  * Clear the indication after the LOAD_REQ command is responded
1388                  * by the MFW.
1389                  */
1390                 p_dev->recov_in_prog = false;
1391
1392                 p_hwfn->first_on_engine = (load_code ==
1393                                            FW_MSG_CODE_DRV_LOAD_ENGINE);
1394
1395                 switch (load_code) {
1396                 case FW_MSG_CODE_DRV_LOAD_ENGINE:
1397                         rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt,
1398                                                   p_hwfn->hw_info.hw_mode);
1399                         if (rc)
1400                                 break;
1401                         /* Fall into */
1402                 case FW_MSG_CODE_DRV_LOAD_PORT:
1403                         rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt,
1404                                                 p_hwfn->hw_info.hw_mode);
1405                         if (rc)
1406                                 break;
1407
1408                         if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) {
1409                                 struct init_nig_pri_tc_map_req tc_map;
1410
1411                                 OSAL_MEM_ZERO(&tc_map, sizeof(tc_map));
1412
1413                                 /* remove this once flow control is
1414                                  * implemented
1415                                  */
1416                                 for (j = 0; j < NUM_OF_VLAN_PRIORITIES; j++) {
1417                                         tc_map.pri[j].tc_id = 0;
1418                                         tc_map.pri[j].valid = 1;
1419                                 }
1420                                 ecore_init_nig_pri_tc_map(p_hwfn,
1421                                                           p_hwfn->p_main_ptt,
1422                                                           &tc_map);
1423                         }
1424                         /* fallthrough */
1425                 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
1426                         rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt,
1427                                               p_tunn, p_hwfn->hw_info.hw_mode,
1428                                               b_hw_start, int_mode,
1429                                               allow_npar_tx_switch);
1430                         break;
1431                 default:
1432                         rc = ECORE_NOTIMPL;
1433                         break;
1434                 }
1435
1436                 if (rc != ECORE_SUCCESS)
1437                         DP_NOTICE(p_hwfn, true,
1438                                   "init phase failed loadcode 0x%x (rc %d)\n",
1439                                   load_code, rc);
1440
1441                 /* ACK mfw regardless of success or failure of initialization */
1442                 mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1443                                        DRV_MSG_CODE_LOAD_DONE,
1444                                        0, &load_code, &param);
1445                 if (rc != ECORE_SUCCESS)
1446                         return rc;
1447                 if (mfw_rc != ECORE_SUCCESS) {
1448                         DP_NOTICE(p_hwfn, true,
1449                                   "Failed sending LOAD_DONE command\n");
1450                         return mfw_rc;
1451                 }
1452
1453                 /* send DCBX attention request command */
1454                 DP_VERBOSE(p_hwfn, ECORE_MSG_DCB,
1455                            "sending phony dcbx set command to trigger DCBx"
1456                            " attention handling\n");
1457                 mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1458                                        DRV_MSG_CODE_SET_DCBX,
1459                                        1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT,
1460                                        &load_code, &param);
1461                 if (mfw_rc != ECORE_SUCCESS) {
1462                         DP_NOTICE(p_hwfn, true,
1463                                   "Failed to send DCBX attention request\n");
1464                         return mfw_rc;
1465                 }
1466
1467                 p_hwfn->hw_init_done = true;
1468         }
1469
1470         return ECORE_SUCCESS;
1471 }
1472
1473 #define ECORE_HW_STOP_RETRY_LIMIT       (10)
1474 static OSAL_INLINE void ecore_hw_timers_stop(struct ecore_dev *p_dev,
1475                                              struct ecore_hwfn *p_hwfn,
1476                                              struct ecore_ptt *p_ptt)
1477 {
1478         int i;
1479
1480         /* close timers */
1481         ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0);
1482         ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0);
1483         for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT &&
1484                                         !p_dev->recov_in_prog; i++) {
1485                 if ((!ecore_rd(p_hwfn, p_ptt,
1486                                TM_REG_PF_SCAN_ACTIVE_CONN)) &&
1487                     (!ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK)))
1488                         break;
1489
1490                 /* Dependent on number of connection/tasks, possibly
1491                  * 1ms sleep is required between polls
1492                  */
1493                 OSAL_MSLEEP(1);
1494         }
1495         if (i == ECORE_HW_STOP_RETRY_LIMIT)
1496                 DP_NOTICE(p_hwfn, true,
1497                           "Timers linear scans are not over"
1498                           " [Connection %02x Tasks %02x]\n",
1499                           (u8)ecore_rd(p_hwfn, p_ptt,
1500                                        TM_REG_PF_SCAN_ACTIVE_CONN),
1501                           (u8)ecore_rd(p_hwfn, p_ptt,
1502                                        TM_REG_PF_SCAN_ACTIVE_TASK));
1503 }
1504
1505 void ecore_hw_timers_stop_all(struct ecore_dev *p_dev)
1506 {
1507         int j;
1508
1509         for_each_hwfn(p_dev, j) {
1510                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
1511                 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
1512
1513                 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
1514         }
1515 }
1516
1517 enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev)
1518 {
1519         enum _ecore_status_t rc = ECORE_SUCCESS, t_rc;
1520         int j;
1521
1522         for_each_hwfn(p_dev, j) {
1523                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
1524                 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
1525
1526                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n");
1527
1528                 if (IS_VF(p_dev)) {
1529                         ecore_vf_pf_int_cleanup(p_hwfn);
1530                         continue;
1531                 }
1532
1533                 /* mark the hw as uninitialized... */
1534                 p_hwfn->hw_init_done = false;
1535
1536                 rc = ecore_sp_pf_stop(p_hwfn);
1537                 if (rc)
1538                         DP_NOTICE(p_hwfn, true,
1539                                   "Failed to close PF against FW. Continue to"
1540                                   " stop HW to prevent illegal host access"
1541                                   " by the device\n");
1542
1543                 /* perform debug action after PF stop was sent */
1544                 OSAL_AFTER_PF_STOP((void *)p_hwfn->p_dev, p_hwfn->my_id);
1545
1546                 /* close NIG to BRB gate */
1547                 ecore_wr(p_hwfn, p_ptt,
1548                          NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
1549
1550                 /* close parser */
1551                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1552                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
1553                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
1554
1555                 /* @@@TBD - clean transmission queues (5.b) */
1556                 /* @@@TBD - clean BTB (5.c) */
1557
1558                 ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt);
1559
1560                 /* @@@TBD - verify DMAE requests are done (8) */
1561
1562                 /* Disable Attention Generation */
1563                 ecore_int_igu_disable_int(p_hwfn, p_ptt);
1564                 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0);
1565                 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0);
1566                 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true);
1567                 /* Need to wait 1ms to guarantee SBs are cleared */
1568                 OSAL_MSLEEP(1);
1569         }
1570
1571         if (IS_PF(p_dev)) {
1572                 /* Disable DMAE in PXP - in CMT, this should only be done for
1573                  * first hw-function, and only after all transactions have
1574                  * stopped for all active hw-functions.
1575                  */
1576                 t_rc = ecore_change_pci_hwfn(&p_dev->hwfns[0],
1577                                              p_dev->hwfns[0].p_main_ptt, false);
1578                 if (t_rc != ECORE_SUCCESS)
1579                         rc = t_rc;
1580         }
1581
1582         return rc;
1583 }
1584
1585 void ecore_hw_stop_fastpath(struct ecore_dev *p_dev)
1586 {
1587         int j;
1588
1589         for_each_hwfn(p_dev, j) {
1590                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
1591                 struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
1592
1593                 if (IS_VF(p_dev)) {
1594                         ecore_vf_pf_int_cleanup(p_hwfn);
1595                         continue;
1596                 }
1597
1598                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
1599                            "Shutting down the fastpath\n");
1600
1601                 ecore_wr(p_hwfn, p_ptt,
1602                          NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1);
1603
1604                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0);
1605                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0);
1606                 ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0);
1607
1608                 /* @@@TBD - clean transmission queues (5.b) */
1609                 /* @@@TBD - clean BTB (5.c) */
1610
1611                 /* @@@TBD - verify DMAE requests are done (8) */
1612
1613                 ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false);
1614                 /* Need to wait 1ms to guarantee SBs are cleared */
1615                 OSAL_MSLEEP(1);
1616         }
1617 }
1618
1619 void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn)
1620 {
1621         struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt;
1622
1623         if (IS_VF(p_hwfn->p_dev))
1624                 return;
1625
1626         /* Re-open incoming traffic */
1627         ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
1628                  NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0);
1629 }
1630
1631 static enum _ecore_status_t ecore_reg_assert(struct ecore_hwfn *p_hwfn,
1632                                              struct ecore_ptt *p_ptt, u32 reg,
1633                                              bool expected)
1634 {
1635         u32 assert_val = ecore_rd(p_hwfn, p_ptt, reg);
1636
1637         if (assert_val != expected) {
1638                 DP_NOTICE(p_hwfn, true, "Value at address 0x%08x != 0x%08x\n",
1639                           reg, expected);
1640                 return ECORE_UNKNOWN_ERROR;
1641         }
1642
1643         return 0;
1644 }
1645
1646 enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev)
1647 {
1648         enum _ecore_status_t rc = ECORE_SUCCESS;
1649         u32 unload_resp, unload_param;
1650         int i;
1651
1652         for_each_hwfn(p_dev, i) {
1653                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1654
1655                 if (IS_VF(p_dev)) {
1656                         rc = ecore_vf_pf_reset(p_hwfn);
1657                         if (rc)
1658                                 return rc;
1659                         continue;
1660                 }
1661
1662                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Resetting hw/fw\n");
1663
1664                 /* Check for incorrect states */
1665                 if (!p_dev->recov_in_prog) {
1666                         ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
1667                                          QM_REG_USG_CNT_PF_TX, 0);
1668                         ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt,
1669                                          QM_REG_USG_CNT_PF_OTHER, 0);
1670                         /* @@@TBD - assert on incorrect xCFC values (10.b) */
1671                 }
1672
1673                 /* Disable PF in HW blocks */
1674                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0);
1675                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0);
1676                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
1677                          TCFC_REG_STRONG_ENABLE_PF, 0);
1678                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
1679                          CCFC_REG_STRONG_ENABLE_PF, 0);
1680
1681                 if (p_dev->recov_in_prog) {
1682                         DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
1683                                    "Recovery is in progress -> skip "
1684                                    "sending unload_req/done\n");
1685                         break;
1686                 }
1687
1688                 /* Send unload command to MCP */
1689                 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1690                                    DRV_MSG_CODE_UNLOAD_REQ,
1691                                    DRV_MB_PARAM_UNLOAD_WOL_MCP,
1692                                    &unload_resp, &unload_param);
1693                 if (rc != ECORE_SUCCESS) {
1694                         DP_NOTICE(p_hwfn, true,
1695                                   "ecore_hw_reset: UNLOAD_REQ failed\n");
1696                         /* @@TBD - what to do? for now, assume ENG. */
1697                         unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE;
1698                 }
1699
1700                 rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt,
1701                                    DRV_MSG_CODE_UNLOAD_DONE,
1702                                    0, &unload_resp, &unload_param);
1703                 if (rc != ECORE_SUCCESS) {
1704                         DP_NOTICE(p_hwfn,
1705                                   true, "ecore_hw_reset: UNLOAD_DONE failed\n");
1706                         /* @@@TBD - Should it really ASSERT here ? */
1707                         return rc;
1708                 }
1709         }
1710
1711         return rc;
1712 }
1713
1714 /* Free hwfn memory and resources acquired in hw_hwfn_prepare */
1715 static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn)
1716 {
1717         ecore_ptt_pool_free(p_hwfn);
1718         OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info);
1719 }
1720
1721 /* Setup bar access */
1722 static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn)
1723 {
1724         /* clear indirect access */
1725         ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0);
1726         ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0);
1727         ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0);
1728         ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0);
1729
1730         /* Clean Previous errors if such exist */
1731         ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
1732                  PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id);
1733
1734         /* enable internal target-read */
1735         ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
1736                  PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1737 }
1738
1739 static void get_function_id(struct ecore_hwfn *p_hwfn)
1740 {
1741         /* ME Register */
1742         p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn,
1743                                                  PXP_PF_ME_OPAQUE_ADDR);
1744
1745         p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR);
1746
1747         /* Bits 16-19 from the ME registers are the pf_num */
1748         /* @@ @TBD - check, may be wrong after B0 implementation for CMT */
1749         p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf;
1750         p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1751                                       PXP_CONCRETE_FID_PFID);
1752         p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid,
1753                                     PXP_CONCRETE_FID_PORT);
1754
1755         DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
1756                    "Read ME register: Concrete 0x%08x Opaque 0x%04x\n",
1757                    p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid);
1758 }
1759
1760 static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn)
1761 {
1762         u32 *feat_num = p_hwfn->hw_info.feat_num;
1763         int num_features = 1;
1764
1765         /* L2 Queues require each: 1 status block. 1 L2 queue */
1766         feat_num[ECORE_PF_L2_QUE] =
1767             OSAL_MIN_T(u32,
1768                        RESC_NUM(p_hwfn, ECORE_SB) / num_features,
1769                        RESC_NUM(p_hwfn, ECORE_L2_QUEUE));
1770
1771         DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
1772                    "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n",
1773                    feat_num[ECORE_PF_L2_QUE],
1774                    RESC_NUM(p_hwfn, ECORE_SB), num_features);
1775 }
1776
1777 /* @@@TBD MK RESC: This info is currently hard code and set as if we were MF
1778  * need to read it from shmem...
1779  */
1780 static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn)
1781 {
1782         u32 *resc_start = p_hwfn->hw_info.resc_start;
1783         u8 num_funcs = p_hwfn->num_funcs_on_engine;
1784         u32 *resc_num = p_hwfn->hw_info.resc_num;
1785         int i, max_vf_vlan_filters;
1786         struct ecore_sb_cnt_info sb_cnt_info;
1787         bool b_ah = ECORE_IS_AH(p_hwfn->p_dev);
1788
1789         OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info));
1790
1791 #ifdef CONFIG_ECORE_SRIOV
1792         max_vf_vlan_filters = ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS;
1793 #else
1794         max_vf_vlan_filters = 0;
1795 #endif
1796
1797         ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info);
1798         resc_num[ECORE_SB] = OSAL_MIN_T(u32,
1799                                         (MAX_SB_PER_PATH_BB / num_funcs),
1800                                         sb_cnt_info.sb_cnt);
1801
1802         resc_num[ECORE_L2_QUEUE] = (b_ah ? MAX_NUM_L2_QUEUES_K2 :
1803                                     MAX_NUM_L2_QUEUES_BB) / num_funcs;
1804         resc_num[ECORE_VPORT] = (b_ah ? MAX_NUM_VPORTS_K2 :
1805                                  MAX_NUM_VPORTS_BB) / num_funcs;
1806         resc_num[ECORE_RSS_ENG] = (b_ah ? ETH_RSS_ENGINE_NUM_K2 :
1807                                    ETH_RSS_ENGINE_NUM_BB) / num_funcs;
1808         resc_num[ECORE_PQ] = (b_ah ? MAX_QM_TX_QUEUES_K2 :
1809                               MAX_QM_TX_QUEUES_BB) / num_funcs;
1810         resc_num[ECORE_RL] = 8;
1811         resc_num[ECORE_MAC] = ETH_NUM_MAC_FILTERS / num_funcs;
1812         resc_num[ECORE_VLAN] = (ETH_NUM_VLAN_FILTERS -
1813                                 max_vf_vlan_filters +
1814                                 1 /*For vlan0 */) / num_funcs;
1815
1816         /* TODO - there will be a problem in AH - there are only 11k lines */
1817         resc_num[ECORE_ILT] = (b_ah ? PXP_NUM_ILT_RECORDS_K2 :
1818                                PXP_NUM_ILT_RECORDS_BB) / num_funcs;
1819
1820 #ifndef ASIC_ONLY
1821         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1822                 /* Reduced build contains less PQs */
1823                 if (!(p_hwfn->p_dev->b_is_emul_full))
1824                         resc_num[ECORE_PQ] = 32;
1825
1826                 /* For AH emulation, since we have a possible maximal number of
1827                  * 16 enabled PFs, in case there are not enough ILT lines -
1828                  * allocate only first PF as RoCE and have all the other ETH
1829                  * only with less ILT lines.
1830                  */
1831                 if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full)
1832                         resc_num[ECORE_ILT] = resc_num[ECORE_ILT];
1833         }
1834 #endif
1835
1836         for (i = 0; i < ECORE_MAX_RESC; i++)
1837                 resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id;
1838
1839 #ifndef ASIC_ONLY
1840         /* Correct the common ILT calculation if PF0 has more */
1841         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) &&
1842             p_hwfn->p_dev->b_is_emul_full &&
1843             p_hwfn->rel_pf_id && resc_num[ECORE_ILT])
1844                 resc_start[ECORE_ILT] += resc_num[ECORE_ILT];
1845 #endif
1846
1847         /* Sanity for ILT */
1848         if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) ||
1849             (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) {
1850                 DP_NOTICE(p_hwfn, true,
1851                           "Can't assign ILT pages [%08x,...,%08x]\n",
1852                           RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn,
1853                                                                   ECORE_ILT) -
1854                           1);
1855                 return ECORE_INVAL;
1856         }
1857
1858         ecore_hw_set_feat(p_hwfn);
1859
1860         DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE,
1861                    "The numbers for each resource are:\n"
1862                    "SB = %d start = %d\n"
1863                    "L2_QUEUE = %d start = %d\n"
1864                    "VPORT = %d start = %d\n"
1865                    "PQ = %d start = %d\n"
1866                    "RL = %d start = %d\n"
1867                    "MAC = %d start = %d\n"
1868                    "VLAN = %d start = %d\n"
1869                    "ILT = %d start = %d\n"
1870                    "CMDQS_CQS = %d start = %d\n",
1871                    RESC_NUM(p_hwfn, ECORE_SB), RESC_START(p_hwfn, ECORE_SB),
1872                    RESC_NUM(p_hwfn, ECORE_L2_QUEUE),
1873                    RESC_START(p_hwfn, ECORE_L2_QUEUE),
1874                    RESC_NUM(p_hwfn, ECORE_VPORT),
1875                    RESC_START(p_hwfn, ECORE_VPORT),
1876                    RESC_NUM(p_hwfn, ECORE_PQ), RESC_START(p_hwfn, ECORE_PQ),
1877                    RESC_NUM(p_hwfn, ECORE_RL), RESC_START(p_hwfn, ECORE_RL),
1878                    RESC_NUM(p_hwfn, ECORE_MAC), RESC_START(p_hwfn, ECORE_MAC),
1879                    RESC_NUM(p_hwfn, ECORE_VLAN),
1880                    RESC_START(p_hwfn, ECORE_VLAN),
1881                    RESC_NUM(p_hwfn, ECORE_ILT), RESC_START(p_hwfn, ECORE_ILT),
1882                    RESC_NUM(p_hwfn, ECORE_CMDQS_CQS),
1883                    RESC_START(p_hwfn, ECORE_CMDQS_CQS));
1884
1885         return ECORE_SUCCESS;
1886 }
1887
1888 static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn,
1889                                                   struct ecore_ptt *p_ptt)
1890 {
1891         u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg;
1892         u32 port_cfg_addr, link_temp, device_capabilities;
1893         struct ecore_mcp_link_params *link;
1894
1895         /* Read global nvm_cfg address */
1896         u32 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0);
1897
1898         /* Verify MCP has initialized it */
1899         if (nvm_cfg_addr == 0) {
1900                 DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n");
1901                 return ECORE_INVAL;
1902         }
1903
1904         /* Read nvm_cfg1  (Notice this is just offset, and not offsize (TBD) */
1905         nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4);
1906
1907         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1908             OFFSETOF(struct nvm_cfg1, glob) + OFFSETOF(struct nvm_cfg1_glob,
1909                                                        core_cfg);
1910
1911         core_cfg = ecore_rd(p_hwfn, p_ptt, addr);
1912
1913         switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >>
1914                 NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) {
1915         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G:
1916                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G;
1917                 break;
1918         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G:
1919                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G;
1920                 break;
1921         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G:
1922                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G;
1923                 break;
1924         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F:
1925                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F;
1926                 break;
1927         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E:
1928                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E;
1929                 break;
1930         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G:
1931                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G;
1932                 break;
1933         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G:
1934                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G;
1935                 break;
1936         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G:
1937                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G;
1938                 break;
1939         case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G:
1940                 p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G;
1941                 break;
1942         default:
1943                 DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n",
1944                           core_cfg);
1945                 break;
1946         }
1947
1948         /* Read default link configuration */
1949         link = &p_hwfn->mcp_info->link_input;
1950         port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
1951             OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]);
1952         link_temp = ecore_rd(p_hwfn, p_ptt,
1953                              port_cfg_addr +
1954                              OFFSETOF(struct nvm_cfg1_port, speed_cap_mask));
1955         link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK;
1956         link->speed.advertised_speeds = link_temp;
1957
1958         link_temp = link->speed.advertised_speeds;
1959         p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp;
1960
1961         link_temp = ecore_rd(p_hwfn, p_ptt,
1962                              port_cfg_addr +
1963                              OFFSETOF(struct nvm_cfg1_port, link_settings));
1964         switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >>
1965                 NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) {
1966         case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG:
1967                 link->speed.autoneg = true;
1968                 break;
1969         case NVM_CFG1_PORT_DRV_LINK_SPEED_1G:
1970                 link->speed.forced_speed = 1000;
1971                 break;
1972         case NVM_CFG1_PORT_DRV_LINK_SPEED_10G:
1973                 link->speed.forced_speed = 10000;
1974                 break;
1975         case NVM_CFG1_PORT_DRV_LINK_SPEED_25G:
1976                 link->speed.forced_speed = 25000;
1977                 break;
1978         case NVM_CFG1_PORT_DRV_LINK_SPEED_40G:
1979                 link->speed.forced_speed = 40000;
1980                 break;
1981         case NVM_CFG1_PORT_DRV_LINK_SPEED_50G:
1982                 link->speed.forced_speed = 50000;
1983                 break;
1984         case NVM_CFG1_PORT_DRV_LINK_SPEED_100G:
1985                 link->speed.forced_speed = 100000;
1986                 break;
1987         default:
1988                 DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp);
1989         }
1990
1991         link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK;
1992         link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET;
1993         link->pause.autoneg = !!(link_temp &
1994                                   NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG);
1995         link->pause.forced_rx = !!(link_temp &
1996                                     NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX);
1997         link->pause.forced_tx = !!(link_temp &
1998                                     NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX);
1999         link->loopback_mode = 0;
2000
2001         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
2002                    "Read default link: Speed 0x%08x, Adv. Speed 0x%08x,"
2003                    " AN: 0x%02x, PAUSE AN: 0x%02x\n",
2004                    link->speed.forced_speed, link->speed.advertised_speeds,
2005                    link->speed.autoneg, link->pause.autoneg);
2006
2007         /* Read Multi-function information from shmem */
2008         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2009             OFFSETOF(struct nvm_cfg1, glob) +
2010             OFFSETOF(struct nvm_cfg1_glob, generic_cont0);
2011
2012         generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr);
2013
2014         mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >>
2015             NVM_CFG1_GLOB_MF_MODE_OFFSET;
2016
2017         switch (mf_mode) {
2018         case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED:
2019                 p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN;
2020                 break;
2021         case NVM_CFG1_GLOB_MF_MODE_NPAR1_0:
2022                 p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR;
2023                 break;
2024         case NVM_CFG1_GLOB_MF_MODE_DEFAULT:
2025                 p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT;
2026                 break;
2027         }
2028         DP_INFO(p_hwfn, "Multi function mode is %08x\n",
2029                 p_hwfn->p_dev->mf_mode);
2030
2031         /* Read Multi-function information from shmem */
2032         addr = MCP_REG_SCRATCH + nvm_cfg1_offset +
2033             OFFSETOF(struct nvm_cfg1, glob) +
2034             OFFSETOF(struct nvm_cfg1_glob, device_capabilities);
2035
2036         device_capabilities = ecore_rd(p_hwfn, p_ptt, addr);
2037         if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET)
2038                 OSAL_SET_BIT(ECORE_DEV_CAP_ETH,
2039                              &p_hwfn->hw_info.device_capabilities);
2040
2041         return ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt);
2042 }
2043
2044 static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn,
2045                                 struct ecore_ptt *p_ptt)
2046 {
2047         u8 num_funcs;
2048         u32 tmp, mask;
2049
2050         num_funcs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_PFS_K2
2051             : MAX_NUM_PFS_BB;
2052
2053         /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values
2054          * in the other bits are selected.
2055          * Bits 1-15 are for functions 1-15, respectively, and their value is
2056          * '0' only for enabled functions (function 0 always exists and
2057          * enabled).
2058          * In case of CMT, only the "even" functions are enabled, and thus the
2059          * number of functions for both hwfns is learnt from the same bits.
2060          */
2061
2062         tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE);
2063         if (tmp & 0x1) {
2064                 if (ECORE_PATH_ID(p_hwfn) && p_hwfn->p_dev->num_hwfns == 1) {
2065                         num_funcs = 0;
2066                         mask = 0xaaaa;
2067                 } else {
2068                         num_funcs = 1;
2069                         mask = 0x5554;
2070                 }
2071
2072                 tmp = (tmp ^ 0xffffffff) & mask;
2073                 while (tmp) {
2074                         if (tmp & 0x1)
2075                                 num_funcs++;
2076                         tmp >>= 0x1;
2077                 }
2078         }
2079
2080         p_hwfn->num_funcs_on_engine = num_funcs;
2081
2082 #ifndef ASIC_ONLY
2083         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2084                 DP_NOTICE(p_hwfn, false,
2085                           "FPGA: Limit number of PFs to 4 [would affect"
2086                           " resource allocation, needed for IOV]\n");
2087                 p_hwfn->num_funcs_on_engine = 4;
2088         }
2089 #endif
2090
2091         DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "num_funcs_on_engine = %d\n",
2092                    p_hwfn->num_funcs_on_engine);
2093 }
2094
2095 static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn,
2096                                       struct ecore_ptt *p_ptt)
2097 {
2098         u32 port_mode;
2099
2100 #ifndef ASIC_ONLY
2101         /* Read the port mode */
2102         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
2103                 port_mode = 4;
2104         else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) &&
2105                  (p_hwfn->p_dev->num_hwfns > 1))
2106                 /* In CMT on emulation, assume 1 port */
2107                 port_mode = 1;
2108         else
2109 #endif
2110                 port_mode = ecore_rd(p_hwfn, p_ptt,
2111                                      CNIG_REG_NW_PORT_MODE_BB_B0);
2112
2113         if (port_mode < 3) {
2114                 p_hwfn->p_dev->num_ports_in_engines = 1;
2115         } else if (port_mode <= 5) {
2116                 p_hwfn->p_dev->num_ports_in_engines = 2;
2117         } else {
2118                 DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n",
2119                           p_hwfn->p_dev->num_ports_in_engines);
2120
2121                 /* Default num_ports_in_engines to something */
2122                 p_hwfn->p_dev->num_ports_in_engines = 1;
2123         }
2124 }
2125
2126 static void ecore_hw_info_port_num_ah(struct ecore_hwfn *p_hwfn,
2127                                       struct ecore_ptt *p_ptt)
2128 {
2129         u32 port;
2130         int i;
2131
2132         p_hwfn->p_dev->num_ports_in_engines = 0;
2133
2134         for (i = 0; i < MAX_NUM_PORTS_K2; i++) {
2135                 port = ecore_rd(p_hwfn, p_ptt,
2136                                 CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4));
2137                 if (port & 1)
2138                         p_hwfn->p_dev->num_ports_in_engines++;
2139         }
2140 }
2141
2142 static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn,
2143                                    struct ecore_ptt *p_ptt)
2144 {
2145         if (ECORE_IS_BB(p_hwfn->p_dev))
2146                 ecore_hw_info_port_num_bb(p_hwfn, p_ptt);
2147         else
2148                 ecore_hw_info_port_num_ah(p_hwfn, p_ptt);
2149 }
2150
2151 static enum _ecore_status_t
2152 ecore_get_hw_info(struct ecore_hwfn *p_hwfn,
2153                   struct ecore_ptt *p_ptt,
2154                   enum ecore_pci_personality personality)
2155 {
2156         enum _ecore_status_t rc;
2157
2158         rc = ecore_iov_hw_info(p_hwfn, p_hwfn->p_main_ptt);
2159         if (rc)
2160                 return rc;
2161
2162         /* TODO In get_hw_info, amoungst others:
2163          * Get MCP FW revision and determine according to it the supported
2164          * featrues (e.g. DCB)
2165          * Get boot mode
2166          * ecore_get_pcie_width_speed, WOL capability.
2167          * Number of global CQ-s (for storage
2168          */
2169         ecore_hw_info_port_num(p_hwfn, p_ptt);
2170
2171 #ifndef ASIC_ONLY
2172         if (CHIP_REV_IS_ASIC(p_hwfn->p_dev))
2173 #endif
2174                 ecore_hw_get_nvm_info(p_hwfn, p_ptt);
2175
2176         rc = ecore_int_igu_read_cam(p_hwfn, p_ptt);
2177         if (rc)
2178                 return rc;
2179
2180 #ifndef ASIC_ONLY
2181         if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) {
2182 #endif
2183                 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr,
2184                             p_hwfn->mcp_info->func_info.mac, ETH_ALEN);
2185 #ifndef ASIC_ONLY
2186         } else {
2187                 static u8 mcp_hw_mac[6] = { 0, 2, 3, 4, 5, 6 };
2188
2189                 OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN);
2190                 p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id;
2191         }
2192 #endif
2193
2194         if (ecore_mcp_is_init(p_hwfn)) {
2195                 if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET)
2196                         p_hwfn->hw_info.ovlan =
2197                             p_hwfn->mcp_info->func_info.ovlan;
2198
2199                 ecore_mcp_cmd_port_init(p_hwfn, p_ptt);
2200         }
2201
2202         if (personality != ECORE_PCI_DEFAULT)
2203                 p_hwfn->hw_info.personality = personality;
2204         else if (ecore_mcp_is_init(p_hwfn))
2205                 p_hwfn->hw_info.personality =
2206                     p_hwfn->mcp_info->func_info.protocol;
2207
2208 #ifndef ASIC_ONLY
2209         /* To overcome ILT lack for emulation, until at least until we'll have
2210          * a definite answer from system about it, allow only PF0 to be RoCE.
2211          */
2212         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev))
2213                 p_hwfn->hw_info.personality = ECORE_PCI_ETH;
2214 #endif
2215
2216         ecore_get_num_funcs(p_hwfn, p_ptt);
2217
2218         /* Feat num is dependent on personality and on the number of functions
2219          * on the engine. Therefore it should be come after personality
2220          * initialization and after getting the number of functions.
2221          */
2222         return ecore_hw_get_resc(p_hwfn);
2223 }
2224
2225 /* @TMP - this should move to a proper .h */
2226 #define CHIP_NUM_AH                     0x8070
2227
2228 static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev)
2229 {
2230         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2231         u32 tmp;
2232
2233         /* Read Vendor Id / Device Id */
2234         OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET,
2235                                   &p_dev->vendor_id);
2236         OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET,
2237                                   &p_dev->device_id);
2238
2239         p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
2240                                          MISCS_REG_CHIP_NUM);
2241         p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
2242                                         MISCS_REG_CHIP_REV);
2243
2244         MASK_FIELD(CHIP_REV, p_dev->chip_rev);
2245
2246         /* Determine type */
2247         if (p_dev->device_id == CHIP_NUM_AH)
2248                 p_dev->type = ECORE_DEV_TYPE_AH;
2249         else
2250                 p_dev->type = ECORE_DEV_TYPE_BB;
2251
2252         /* Learn number of HW-functions */
2253         tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
2254                        MISCS_REG_CMT_ENABLED_FOR_PAIR);
2255
2256         if (tmp & (1 << p_hwfn->rel_pf_id)) {
2257                 DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n");
2258                 p_dev->num_hwfns = 2;
2259         } else {
2260                 p_dev->num_hwfns = 1;
2261         }
2262
2263 #ifndef ASIC_ONLY
2264         if (CHIP_REV_IS_EMUL(p_dev)) {
2265                 /* For some reason we have problems with this register
2266                  * in B0 emulation; Simply assume no CMT
2267                  */
2268                 DP_NOTICE(p_dev->hwfns, false,
2269                           "device on emul - assume no CMT\n");
2270                 p_dev->num_hwfns = 1;
2271         }
2272 #endif
2273
2274         p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
2275                                        MISCS_REG_CHIP_TEST_REG) >> 4;
2276         MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id);
2277         p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
2278                                           MISCS_REG_CHIP_METAL);
2279         MASK_FIELD(CHIP_METAL, p_dev->chip_metal);
2280         DP_INFO(p_dev->hwfns,
2281                 "Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x"
2282                 " Metal: %04x\n",
2283                 ECORE_IS_BB(p_dev) ? "BB" : "AH",
2284                 CHIP_REV_IS_A0(p_dev) ? 0 : 1,
2285                 p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id,
2286                 p_dev->chip_metal);
2287
2288         if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) {
2289                 DP_NOTICE(p_dev->hwfns, false,
2290                           "The chip type/rev (BB A0) is not supported!\n");
2291                 return ECORE_ABORTED;
2292         }
2293 #ifndef ASIC_ONLY
2294         if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev))
2295                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2296                          MISCS_REG_PLL_MAIN_CTRL_4, 0x1);
2297
2298         if (CHIP_REV_IS_EMUL(p_dev)) {
2299                 tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt,
2300                                MISCS_REG_ECO_RESERVED);
2301                 if (tmp & (1 << 29)) {
2302                         DP_NOTICE(p_hwfn, false,
2303                                   "Emulation: Running on a FULL build\n");
2304                         p_dev->b_is_emul_full = true;
2305                 } else {
2306                         DP_NOTICE(p_hwfn, false,
2307                                   "Emulation: Running on a REDUCED build\n");
2308                 }
2309         }
2310 #endif
2311
2312         return ECORE_SUCCESS;
2313 }
2314
2315 void ecore_prepare_hibernate(struct ecore_dev *p_dev)
2316 {
2317         int j;
2318
2319         if (IS_VF(p_dev))
2320                 return;
2321
2322         for_each_hwfn(p_dev, j) {
2323                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j];
2324
2325                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN,
2326                            "Mark hw/fw uninitialized\n");
2327
2328                 p_hwfn->hw_init_done = false;
2329                 p_hwfn->first_on_engine = false;
2330         }
2331 }
2332
2333 static enum _ecore_status_t
2334 ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn,
2335                         void OSAL_IOMEM *p_regview,
2336                         void OSAL_IOMEM *p_doorbells,
2337                         enum ecore_pci_personality personality)
2338 {
2339         enum _ecore_status_t rc = ECORE_SUCCESS;
2340
2341         /* Split PCI bars evenly between hwfns */
2342         p_hwfn->regview = p_regview;
2343         p_hwfn->doorbells = p_doorbells;
2344
2345         /* Validate that chip access is feasible */
2346         if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) {
2347                 DP_ERR(p_hwfn,
2348                        "Reading the ME register returns all Fs;"
2349                        " Preventing further chip access\n");
2350                 return ECORE_INVAL;
2351         }
2352
2353         get_function_id(p_hwfn);
2354
2355         /* Allocate PTT pool */
2356         rc = ecore_ptt_pool_alloc(p_hwfn);
2357         if (rc) {
2358                 DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n");
2359                 goto err0;
2360         }
2361
2362         /* Allocate the main PTT */
2363         p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN);
2364
2365         /* First hwfn learns basic information, e.g., number of hwfns */
2366         if (!p_hwfn->my_id) {
2367                 rc = ecore_get_dev_info(p_hwfn->p_dev);
2368                 if (rc != ECORE_SUCCESS)
2369                         goto err1;
2370         }
2371
2372         ecore_hw_hwfn_prepare(p_hwfn);
2373
2374         /* Initialize MCP structure */
2375         rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt);
2376         if (rc) {
2377                 DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n");
2378                 goto err1;
2379         }
2380
2381         /* Read the device configuration information from the HW and SHMEM */
2382         rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality);
2383         if (rc) {
2384                 DP_NOTICE(p_hwfn, true, "Failed to get HW information\n");
2385                 goto err2;
2386         }
2387
2388         /* Allocate the init RT array and initialize the init-ops engine */
2389         rc = ecore_init_alloc(p_hwfn);
2390         if (rc) {
2391                 DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n");
2392                 goto err2;
2393         }
2394 #ifndef ASIC_ONLY
2395         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2396                 DP_NOTICE(p_hwfn, false,
2397                           "FPGA: workaround; Prevent DMAE parities\n");
2398                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK, 7);
2399
2400                 DP_NOTICE(p_hwfn, false,
2401                           "FPGA: workaround: Set VF bar0 size\n");
2402                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt,
2403                          PGLUE_B_REG_VF_BAR0_SIZE, 4);
2404         }
2405 #endif
2406
2407         return rc;
2408 err2:
2409         ecore_mcp_free(p_hwfn);
2410 err1:
2411         ecore_hw_hwfn_free(p_hwfn);
2412 err0:
2413         return rc;
2414 }
2415
2416 enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality)
2417 {
2418         struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev);
2419         enum _ecore_status_t rc;
2420
2421         if (IS_VF(p_dev))
2422                 return ecore_vf_hw_prepare(p_dev);
2423
2424         /* Store the precompiled init data ptrs */
2425         ecore_init_iro_array(p_dev);
2426
2427         /* Initialize the first hwfn - will learn number of hwfns */
2428         rc = ecore_hw_prepare_single(p_hwfn,
2429                                      p_dev->regview,
2430                                      p_dev->doorbells, personality);
2431         if (rc != ECORE_SUCCESS)
2432                 return rc;
2433
2434         personality = p_hwfn->hw_info.personality;
2435
2436         /* initialalize 2nd hwfn if necessary */
2437         if (p_dev->num_hwfns > 1) {
2438                 void OSAL_IOMEM *p_regview, *p_doorbell;
2439                 u8 OSAL_IOMEM *addr;
2440
2441                 /* adjust bar offset for second engine */
2442                 addr = (u8 OSAL_IOMEM *)p_dev->regview +
2443                     ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2;
2444                 p_regview = (void OSAL_IOMEM *)addr;
2445
2446                 addr = (u8 OSAL_IOMEM *)p_dev->doorbells +
2447                     ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2;
2448                 p_doorbell = (void OSAL_IOMEM *)addr;
2449
2450                 /* prepare second hw function */
2451                 rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview,
2452                                              p_doorbell, personality);
2453
2454                 /* in case of error, need to free the previously
2455                  * initialiazed hwfn 0
2456                  */
2457                 if (rc != ECORE_SUCCESS) {
2458                         ecore_init_free(p_hwfn);
2459                         ecore_mcp_free(p_hwfn);
2460                         ecore_hw_hwfn_free(p_hwfn);
2461                         return rc;
2462                 }
2463         }
2464
2465         return ECORE_SUCCESS;
2466 }
2467
2468 void ecore_hw_remove(struct ecore_dev *p_dev)
2469 {
2470         int i;
2471
2472         for_each_hwfn(p_dev, i) {
2473                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2474
2475                 if (IS_VF(p_dev)) {
2476                         ecore_vf_pf_release(p_hwfn);
2477                         continue;
2478                 }
2479
2480                 ecore_init_free(p_hwfn);
2481                 ecore_hw_hwfn_free(p_hwfn);
2482                 ecore_mcp_free(p_hwfn);
2483
2484                 OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex);
2485         }
2486 }
2487
2488 static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev,
2489                                       struct ecore_chain *p_chain)
2490 {
2491         void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL;
2492         dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0;
2493         struct ecore_chain_next *p_next;
2494         u32 size, i;
2495
2496         if (!p_virt)
2497                 return;
2498
2499         size = p_chain->elem_size * p_chain->usable_per_page;
2500
2501         for (i = 0; i < p_chain->page_cnt; i++) {
2502                 if (!p_virt)
2503                         break;
2504
2505                 p_next = (struct ecore_chain_next *)((u8 *)p_virt + size);
2506                 p_virt_next = p_next->next_virt;
2507                 p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys);
2508
2509                 OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys,
2510                                        ECORE_CHAIN_PAGE_SIZE);
2511
2512                 p_virt = p_virt_next;
2513                 p_phys = p_phys_next;
2514         }
2515 }
2516
2517 static void ecore_chain_free_single(struct ecore_dev *p_dev,
2518                                     struct ecore_chain *p_chain)
2519 {
2520         if (!p_chain->p_virt_addr)
2521                 return;
2522
2523         OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr,
2524                                p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE);
2525 }
2526
2527 static void ecore_chain_free_pbl(struct ecore_dev *p_dev,
2528                                  struct ecore_chain *p_chain)
2529 {
2530         void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
2531         u8 *p_pbl_virt = (u8 *)p_chain->pbl.p_virt_table;
2532         u32 page_cnt = p_chain->page_cnt, i, pbl_size;
2533
2534         if (!pp_virt_addr_tbl)
2535                 return;
2536
2537         if (!p_chain->pbl.p_virt_table)
2538                 goto out;
2539
2540         for (i = 0; i < page_cnt; i++) {
2541                 if (!pp_virt_addr_tbl[i])
2542                         break;
2543
2544                 OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i],
2545                                        *(dma_addr_t *)p_pbl_virt,
2546                                        ECORE_CHAIN_PAGE_SIZE);
2547
2548                 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
2549         }
2550
2551         pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
2552         OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table,
2553                                p_chain->pbl.p_phys_table, pbl_size);
2554 out:
2555         OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl);
2556 }
2557
2558 void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
2559 {
2560         switch (p_chain->mode) {
2561         case ECORE_CHAIN_MODE_NEXT_PTR:
2562                 ecore_chain_free_next_ptr(p_dev, p_chain);
2563                 break;
2564         case ECORE_CHAIN_MODE_SINGLE:
2565                 ecore_chain_free_single(p_dev, p_chain);
2566                 break;
2567         case ECORE_CHAIN_MODE_PBL:
2568                 ecore_chain_free_pbl(p_dev, p_chain);
2569                 break;
2570         }
2571 }
2572
2573 static enum _ecore_status_t
2574 ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev,
2575                                enum ecore_chain_cnt_type cnt_type,
2576                                osal_size_t elem_size, u32 page_cnt)
2577 {
2578         u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt;
2579
2580         /* The actual chain size can be larger than the maximal possible value
2581          * after rounding up the requested elements number to pages, and after
2582          * taking into acount the unusuable elements (next-ptr elements).
2583          * The size of a "u16" chain can be (U16_MAX + 1) since the chain
2584          * size/capacity fields are of a u32 type.
2585          */
2586         if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 &&
2587              chain_size > ((u32)ECORE_U16_MAX + 1)) ||
2588             (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 &&
2589              chain_size > ECORE_U32_MAX)) {
2590                 DP_NOTICE(p_dev, true,
2591                           "The actual chain size (0x%lx) is larger than"
2592                           " the maximal possible value\n",
2593                           (unsigned long)chain_size);
2594                 return ECORE_INVAL;
2595         }
2596
2597         return ECORE_SUCCESS;
2598 }
2599
2600 static enum _ecore_status_t
2601 ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
2602 {
2603         void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL;
2604         dma_addr_t p_phys = 0;
2605         u32 i;
2606
2607         for (i = 0; i < p_chain->page_cnt; i++) {
2608                 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
2609                                                  ECORE_CHAIN_PAGE_SIZE);
2610                 if (!p_virt) {
2611                         DP_NOTICE(p_dev, true,
2612                                   "Failed to allocate chain memory\n");
2613                         return ECORE_NOMEM;
2614                 }
2615
2616                 if (i == 0) {
2617                         ecore_chain_init_mem(p_chain, p_virt, p_phys);
2618                         ecore_chain_reset(p_chain);
2619                 } else {
2620                         ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
2621                                                        p_virt, p_phys);
2622                 }
2623
2624                 p_virt_prev = p_virt;
2625         }
2626         /* Last page's next element should point to the beginning of the
2627          * chain.
2628          */
2629         ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev,
2630                                        p_chain->p_virt_addr,
2631                                        p_chain->p_phys_addr);
2632
2633         return ECORE_SUCCESS;
2634 }
2635
2636 static enum _ecore_status_t
2637 ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain)
2638 {
2639         void *p_virt = OSAL_NULL;
2640         dma_addr_t p_phys = 0;
2641
2642         p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE);
2643         if (!p_virt) {
2644                 DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n");
2645                 return ECORE_NOMEM;
2646         }
2647
2648         ecore_chain_init_mem(p_chain, p_virt, p_phys);
2649         ecore_chain_reset(p_chain);
2650
2651         return ECORE_SUCCESS;
2652 }
2653
2654 static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev,
2655                                                   struct ecore_chain *p_chain)
2656 {
2657         void *p_virt = OSAL_NULL;
2658         u8 *p_pbl_virt = OSAL_NULL;
2659         void **pp_virt_addr_tbl = OSAL_NULL;
2660         dma_addr_t p_phys = 0, p_pbl_phys = 0;
2661         u32 page_cnt = p_chain->page_cnt, size, i;
2662
2663         size = page_cnt * sizeof(*pp_virt_addr_tbl);
2664         pp_virt_addr_tbl = (void **)OSAL_VALLOC(p_dev, size);
2665         if (!pp_virt_addr_tbl) {
2666                 DP_NOTICE(p_dev, true,
2667                           "Failed to allocate memory for the chain"
2668                           " virtual addresses table\n");
2669                 return ECORE_NOMEM;
2670         }
2671         OSAL_MEM_ZERO(pp_virt_addr_tbl, size);
2672
2673         /* The allocation of the PBL table is done with its full size, since it
2674          * is expected to be successive.
2675          */
2676         size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE;
2677         p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size);
2678         if (!p_pbl_virt) {
2679                 DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n");
2680                 return ECORE_NOMEM;
2681         }
2682
2683         ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
2684                                  pp_virt_addr_tbl);
2685
2686         for (i = 0; i < page_cnt; i++) {
2687                 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
2688                                                  ECORE_CHAIN_PAGE_SIZE);
2689                 if (!p_virt) {
2690                         DP_NOTICE(p_dev, true,
2691                                   "Failed to allocate chain memory\n");
2692                         return ECORE_NOMEM;
2693                 }
2694
2695                 if (i == 0) {
2696                         ecore_chain_init_mem(p_chain, p_virt, p_phys);
2697                         ecore_chain_reset(p_chain);
2698                 }
2699
2700                 /* Fill the PBL table with the physical address of the page */
2701                 *(dma_addr_t *)p_pbl_virt = p_phys;
2702                 /* Keep the virtual address of the page */
2703                 p_chain->pbl.pp_virt_addr_tbl[i] = p_virt;
2704
2705                 p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE;
2706         }
2707
2708         return ECORE_SUCCESS;
2709 }
2710
2711 enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev,
2712                                        enum ecore_chain_use_mode intended_use,
2713                                        enum ecore_chain_mode mode,
2714                                        enum ecore_chain_cnt_type cnt_type,
2715                                        u32 num_elems, osal_size_t elem_size,
2716                                        struct ecore_chain *p_chain)
2717 {
2718         u32 page_cnt;
2719         enum _ecore_status_t rc = ECORE_SUCCESS;
2720
2721         if (mode == ECORE_CHAIN_MODE_SINGLE)
2722                 page_cnt = 1;
2723         else
2724                 page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode);
2725
2726         rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size,
2727                                             page_cnt);
2728         if (rc) {
2729                 DP_NOTICE(p_dev, true,
2730                           "Cannot allocate a chain with the given arguments:\n"
2731                           " [use_mode %d, mode %d, cnt_type %d, num_elems %d,"
2732                           " elem_size %zu]\n",
2733                           intended_use, mode, cnt_type, num_elems, elem_size);
2734                 return rc;
2735         }
2736
2737         ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use,
2738                                 mode, cnt_type);
2739
2740         switch (mode) {
2741         case ECORE_CHAIN_MODE_NEXT_PTR:
2742                 rc = ecore_chain_alloc_next_ptr(p_dev, p_chain);
2743                 break;
2744         case ECORE_CHAIN_MODE_SINGLE:
2745                 rc = ecore_chain_alloc_single(p_dev, p_chain);
2746                 break;
2747         case ECORE_CHAIN_MODE_PBL:
2748                 rc = ecore_chain_alloc_pbl(p_dev, p_chain);
2749                 break;
2750         }
2751         if (rc)
2752                 goto nomem;
2753
2754         return ECORE_SUCCESS;
2755
2756 nomem:
2757         ecore_chain_free(p_dev, p_chain);
2758         return rc;
2759 }
2760
2761 enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn,
2762                                        u16 src_id, u16 *dst_id)
2763 {
2764         if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
2765                 u16 min, max;
2766
2767                 min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE);
2768                 max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
2769                 DP_NOTICE(p_hwfn, true,
2770                           "l2_queue id [%d] is not valid, available"
2771                           " indices [%d - %d]\n",
2772                           src_id, min, max);
2773
2774                 return ECORE_INVAL;
2775         }
2776
2777         *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id;
2778
2779         return ECORE_SUCCESS;
2780 }
2781
2782 enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn,
2783                                     u8 src_id, u8 *dst_id)
2784 {
2785         if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
2786                 u8 min, max;
2787
2788                 min = (u8)RESC_START(p_hwfn, ECORE_VPORT);
2789                 max = min + RESC_NUM(p_hwfn, ECORE_VPORT);
2790                 DP_NOTICE(p_hwfn, true,
2791                           "vport id [%d] is not valid, available"
2792                           " indices [%d - %d]\n",
2793                           src_id, min, max);
2794
2795                 return ECORE_INVAL;
2796         }
2797
2798         *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id;
2799
2800         return ECORE_SUCCESS;
2801 }
2802
2803 enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn,
2804                                       u8 src_id, u8 *dst_id)
2805 {
2806         if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) {
2807                 u8 min, max;
2808
2809                 min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG);
2810                 max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG);
2811                 DP_NOTICE(p_hwfn, true,
2812                           "rss_eng id [%d] is not valid,avail idx [%d - %d]\n",
2813                           src_id, min, max);
2814
2815                 return ECORE_INVAL;
2816         }
2817
2818         *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id;
2819
2820         return ECORE_SUCCESS;
2821 }
2822
2823 enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn,
2824                                               struct ecore_ptt *p_ptt,
2825                                               u8 *p_filter)
2826 {
2827         u32 high, low, en;
2828         int i;
2829
2830         if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2831                 return ECORE_SUCCESS;
2832
2833         high = p_filter[1] | (p_filter[0] << 8);
2834         low = p_filter[5] | (p_filter[4] << 8) |
2835             (p_filter[3] << 16) | (p_filter[2] << 24);
2836
2837         /* Find a free entry and utilize it */
2838         for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2839                 en = ecore_rd(p_hwfn, p_ptt,
2840                               NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
2841                 if (en)
2842                         continue;
2843                 ecore_wr(p_hwfn, p_ptt,
2844                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2845                          2 * i * sizeof(u32), low);
2846                 ecore_wr(p_hwfn, p_ptt,
2847                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2848                          (2 * i + 1) * sizeof(u32), high);
2849                 ecore_wr(p_hwfn, p_ptt,
2850                          NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0);
2851                 ecore_wr(p_hwfn, p_ptt,
2852                          NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
2853                          i * sizeof(u32), 0);
2854                 ecore_wr(p_hwfn, p_ptt,
2855                          NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
2856                 break;
2857         }
2858         if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
2859                 DP_NOTICE(p_hwfn, false,
2860                           "Failed to find an empty LLH filter to utilize\n");
2861                 return ECORE_INVAL;
2862         }
2863
2864         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
2865                    "MAC: %x:%x:%x:%x:%x:%x is added at %d\n",
2866                    p_filter[0], p_filter[1], p_filter[2],
2867                    p_filter[3], p_filter[4], p_filter[5], i);
2868
2869         return ECORE_SUCCESS;
2870 }
2871
2872 void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn,
2873                                  struct ecore_ptt *p_ptt, u8 *p_filter)
2874 {
2875         u32 high, low;
2876         int i;
2877
2878         if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2879                 return;
2880
2881         high = p_filter[1] | (p_filter[0] << 8);
2882         low = p_filter[5] | (p_filter[4] << 8) |
2883             (p_filter[3] << 16) | (p_filter[2] << 24);
2884
2885         /* Find the entry and clean it */
2886         for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2887                 if (ecore_rd(p_hwfn, p_ptt,
2888                              NIG_REG_LLH_FUNC_FILTER_VALUE +
2889                              2 * i * sizeof(u32)) != low)
2890                         continue;
2891                 if (ecore_rd(p_hwfn, p_ptt,
2892                              NIG_REG_LLH_FUNC_FILTER_VALUE +
2893                              (2 * i + 1) * sizeof(u32)) != high)
2894                         continue;
2895
2896                 ecore_wr(p_hwfn, p_ptt,
2897                          NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
2898                 ecore_wr(p_hwfn, p_ptt,
2899                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2900                          2 * i * sizeof(u32), 0);
2901                 ecore_wr(p_hwfn, p_ptt,
2902                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2903                          (2 * i + 1) * sizeof(u32), 0);
2904                 break;
2905         }
2906         if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
2907                 DP_NOTICE(p_hwfn, false,
2908                           "Tried to remove a non-configured filter\n");
2909 }
2910
2911 enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn,
2912                                                     struct ecore_ptt *p_ptt,
2913                                                     u16 filter)
2914 {
2915         u32 high, low, en;
2916         int i;
2917
2918         if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2919                 return ECORE_SUCCESS;
2920
2921         high = filter;
2922         low = 0;
2923
2924         /* Find a free entry and utilize it */
2925         for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2926                 en = ecore_rd(p_hwfn, p_ptt,
2927                               NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32));
2928                 if (en)
2929                         continue;
2930                 ecore_wr(p_hwfn, p_ptt,
2931                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2932                          2 * i * sizeof(u32), low);
2933                 ecore_wr(p_hwfn, p_ptt,
2934                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2935                          (2 * i + 1) * sizeof(u32), high);
2936                 ecore_wr(p_hwfn, p_ptt,
2937                          NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1);
2938                 ecore_wr(p_hwfn, p_ptt,
2939                          NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE +
2940                          i * sizeof(u32), 1);
2941                 ecore_wr(p_hwfn, p_ptt,
2942                          NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1);
2943                 break;
2944         }
2945         if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) {
2946                 DP_NOTICE(p_hwfn, false,
2947                           "Failed to find an empty LLH filter to utilize\n");
2948                 return ECORE_INVAL;
2949         }
2950
2951         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
2952                    "ETH type: %x is added at %d\n", filter, i);
2953
2954         return ECORE_SUCCESS;
2955 }
2956
2957 void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn,
2958                                        struct ecore_ptt *p_ptt, u16 filter)
2959 {
2960         u32 high, low;
2961         int i;
2962
2963         if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
2964                 return;
2965
2966         high = filter;
2967         low = 0;
2968
2969         /* Find the entry and clean it */
2970         for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
2971                 if (ecore_rd(p_hwfn, p_ptt,
2972                              NIG_REG_LLH_FUNC_FILTER_VALUE +
2973                              2 * i * sizeof(u32)) != low)
2974                         continue;
2975                 if (ecore_rd(p_hwfn, p_ptt,
2976                              NIG_REG_LLH_FUNC_FILTER_VALUE +
2977                              (2 * i + 1) * sizeof(u32)) != high)
2978                         continue;
2979
2980                 ecore_wr(p_hwfn, p_ptt,
2981                          NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
2982                 ecore_wr(p_hwfn, p_ptt,
2983                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2984                          2 * i * sizeof(u32), 0);
2985                 ecore_wr(p_hwfn, p_ptt,
2986                          NIG_REG_LLH_FUNC_FILTER_VALUE +
2987                          (2 * i + 1) * sizeof(u32), 0);
2988                 break;
2989         }
2990         if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE)
2991                 DP_NOTICE(p_hwfn, false,
2992                           "Tried to remove a non-configured filter\n");
2993 }
2994
2995 void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn,
2996                                  struct ecore_ptt *p_ptt)
2997 {
2998         int i;
2999
3000         if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn)))
3001                 return;
3002
3003         for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) {
3004                 ecore_wr(p_hwfn, p_ptt,
3005                          NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0);
3006                 ecore_wr(p_hwfn, p_ptt,
3007                          NIG_REG_LLH_FUNC_FILTER_VALUE +
3008                          2 * i * sizeof(u32), 0);
3009                 ecore_wr(p_hwfn, p_ptt,
3010                          NIG_REG_LLH_FUNC_FILTER_VALUE +
3011                          (2 * i + 1) * sizeof(u32), 0);
3012         }
3013 }
3014
3015 enum _ecore_status_t ecore_test_registers(struct ecore_hwfn *p_hwfn,
3016                                           struct ecore_ptt *p_ptt)
3017 {
3018         u32 reg_tbl[] = {
3019                 BRB_REG_HEADER_SIZE,
3020                 BTB_REG_HEADER_SIZE,
3021                 CAU_REG_LONG_TIMEOUT_THRESHOLD,
3022                 CCFC_REG_ACTIVITY_COUNTER,
3023                 CDU_REG_CID_ADDR_PARAMS,
3024                 DBG_REG_CLIENT_ENABLE,
3025                 DMAE_REG_INIT,
3026                 DORQ_REG_IFEN,
3027                 GRC_REG_TIMEOUT_EN,
3028                 IGU_REG_BLOCK_CONFIGURATION,
3029                 MCM_REG_INIT,
3030                 MCP2_REG_DBG_DWORD_ENABLE,
3031                 MISC_REG_PORT_MODE,
3032                 MISCS_REG_CLK_100G_MODE,
3033                 MSDM_REG_ENABLE_IN1,
3034                 MSEM_REG_ENABLE_IN,
3035                 NIG_REG_CM_HDR,
3036                 NCSI_REG_CONFIG,
3037                 PBF_REG_INIT,
3038                 PTU_REG_ATC_INIT_ARRAY,
3039                 PCM_REG_INIT,
3040                 PGLUE_B_REG_ADMIN_PER_PF_REGION,
3041                 PRM_REG_DISABLE_PRM,
3042                 PRS_REG_SOFT_RST,
3043                 PSDM_REG_ENABLE_IN1,
3044                 PSEM_REG_ENABLE_IN,
3045                 PSWRQ_REG_DBG_SELECT,
3046                 PSWRQ2_REG_CDUT_P_SIZE,
3047                 PSWHST_REG_DISCARD_INTERNAL_WRITES,
3048                 PSWHST2_REG_DBGSYN_ALMOST_FULL_THR,
3049                 PSWRD_REG_DBG_SELECT,
3050                 PSWRD2_REG_CONF11,
3051                 PSWWR_REG_USDM_FULL_TH,
3052                 PSWWR2_REG_CDU_FULL_TH2,
3053                 QM_REG_MAXPQSIZE_0,
3054                 RSS_REG_RSS_INIT_EN,
3055                 RDIF_REG_STOP_ON_ERROR,
3056                 SRC_REG_SOFT_RST,
3057                 TCFC_REG_ACTIVITY_COUNTER,
3058                 TCM_REG_INIT,
3059                 TM_REG_PXP_READ_DATA_FIFO_INIT,
3060                 TSDM_REG_ENABLE_IN1,
3061                 TSEM_REG_ENABLE_IN,
3062                 TDIF_REG_STOP_ON_ERROR,
3063                 UCM_REG_INIT,
3064                 UMAC_REG_IPG_HD_BKP_CNTL_BB_B0,
3065                 USDM_REG_ENABLE_IN1,
3066                 USEM_REG_ENABLE_IN,
3067                 XCM_REG_INIT,
3068                 XSDM_REG_ENABLE_IN1,
3069                 XSEM_REG_ENABLE_IN,
3070                 YCM_REG_INIT,
3071                 YSDM_REG_ENABLE_IN1,
3072                 YSEM_REG_ENABLE_IN,
3073                 XYLD_REG_SCBD_STRICT_PRIO,
3074                 TMLD_REG_SCBD_STRICT_PRIO,
3075                 MULD_REG_SCBD_STRICT_PRIO,
3076                 YULD_REG_SCBD_STRICT_PRIO,
3077         };
3078         u32 test_val[] = { 0x0, 0x1 };
3079         u32 val, save_val, i, j;
3080
3081         for (i = 0; i < OSAL_ARRAY_SIZE(test_val); i++) {
3082                 for (j = 0; j < OSAL_ARRAY_SIZE(reg_tbl); j++) {
3083                         save_val = ecore_rd(p_hwfn, p_ptt, reg_tbl[j]);
3084                         ecore_wr(p_hwfn, p_ptt, reg_tbl[j], test_val[i]);
3085                         val = ecore_rd(p_hwfn, p_ptt, reg_tbl[j]);
3086                         /* Restore the original register's value */
3087                         ecore_wr(p_hwfn, p_ptt, reg_tbl[j], save_val);
3088                         if (val != test_val[i]) {
3089                                 DP_INFO(p_hwfn->p_dev,
3090                                         "offset 0x%x: val 0x%x != 0x%x\n",
3091                                         reg_tbl[j], val, test_val[i]);
3092                                 return ECORE_AGAIN;
3093                         }
3094                 }
3095         }
3096         return ECORE_SUCCESS;
3097 }
3098
3099 static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn,
3100                                                struct ecore_ptt *p_ptt,
3101                                                u32 hw_addr, void *p_qzone,
3102                                                osal_size_t qzone_size,
3103                                                u8 timeset)
3104 {
3105         struct coalescing_timeset *p_coalesce_timeset;
3106
3107         if (IS_VF(p_hwfn->p_dev)) {
3108                 DP_NOTICE(p_hwfn, true, "VF coalescing config not supported\n");
3109                 return ECORE_INVAL;
3110         }
3111
3112         if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) {
3113                 DP_NOTICE(p_hwfn, true,
3114                           "Coalescing configuration not enabled\n");
3115                 return ECORE_INVAL;
3116         }
3117
3118         OSAL_MEMSET(p_qzone, 0, qzone_size);
3119         p_coalesce_timeset = p_qzone;
3120         p_coalesce_timeset->timeset = timeset;
3121         p_coalesce_timeset->valid = 1;
3122         ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_qzone, qzone_size);
3123
3124         return ECORE_SUCCESS;
3125 }
3126
3127 enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn,
3128                                             struct ecore_ptt *p_ptt,
3129                                             u8 coalesce, u8 qid)
3130 {
3131         struct ustorm_eth_queue_zone qzone;
3132         u16 fw_qid = 0;
3133         u32 address;
3134         u8 timeset;
3135         enum _ecore_status_t rc;
3136
3137         rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
3138         if (rc != ECORE_SUCCESS)
3139                 return rc;
3140
3141         address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
3142         /* Translate the coalescing time into a timeset, according to:
3143          * Timeout[Rx] = TimeSet[Rx] << (TimerRes[Rx] + 1)
3144          */
3145         timeset = coalesce >> (ECORE_CAU_DEF_RX_TIMER_RES + 1);
3146
3147         rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &qzone,
3148                                 sizeof(struct ustorm_eth_queue_zone), timeset);
3149         if (rc != ECORE_SUCCESS)
3150                 goto out;
3151
3152         p_hwfn->p_dev->rx_coalesce_usecs = coalesce;
3153 out:
3154         return rc;
3155 }
3156
3157 enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn,
3158                                             struct ecore_ptt *p_ptt,
3159                                             u8 coalesce, u8 qid)
3160 {
3161         struct ystorm_eth_queue_zone qzone;
3162         u16 fw_qid = 0;
3163         u32 address;
3164         u8 timeset;
3165         enum _ecore_status_t rc;
3166
3167         rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid);
3168         if (rc != ECORE_SUCCESS)
3169                 return rc;
3170
3171         address = BAR0_MAP_REG_YSDM_RAM + YSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid);
3172         /* Translate the coalescing time into a timeset, according to:
3173          * Timeout[Tx] = TimeSet[Tx] << (TimerRes[Tx] + 1)
3174          */
3175         timeset = coalesce >> (ECORE_CAU_DEF_TX_TIMER_RES + 1);
3176
3177         rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &qzone,
3178                                 sizeof(struct ystorm_eth_queue_zone), timeset);
3179         if (rc != ECORE_SUCCESS)
3180                 goto out;
3181
3182         p_hwfn->p_dev->tx_coalesce_usecs = coalesce;
3183 out:
3184         return rc;
3185 }
3186
3187 /* Calculate final WFQ values for all vports and configure it.
3188  * After this configuration each vport must have
3189  * approx min rate =  vport_wfq * min_pf_rate / ECORE_WFQ_UNIT
3190  */
3191 static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
3192                                                struct ecore_ptt *p_ptt,
3193                                                u32 min_pf_rate)
3194 {
3195         struct init_qm_vport_params *vport_params;
3196         int i, num_vports;
3197
3198         vport_params = p_hwfn->qm_info.qm_vport_params;
3199         num_vports = p_hwfn->qm_info.num_vports;
3200
3201         for (i = 0; i < num_vports; i++) {
3202                 u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
3203
3204                 vport_params[i].vport_wfq =
3205                     (wfq_speed * ECORE_WFQ_UNIT) / min_pf_rate;
3206                 ecore_init_vport_wfq(p_hwfn, p_ptt,
3207                                      vport_params[i].first_tx_pq_id,
3208                                      vport_params[i].vport_wfq);
3209         }
3210 }
3211
3212 static void
3213 ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate)
3214 {
3215         int i, num_vports;
3216         u32 min_speed;
3217
3218         num_vports = p_hwfn->qm_info.num_vports;
3219         min_speed = min_pf_rate / num_vports;
3220
3221         for (i = 0; i < num_vports; i++) {
3222                 p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1;
3223                 p_hwfn->qm_info.wfq_data[i].default_min_speed = min_speed;
3224         }
3225 }
3226
3227 static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn,
3228                                              struct ecore_ptt *p_ptt,
3229                                              u32 min_pf_rate)
3230 {
3231         struct init_qm_vport_params *vport_params;
3232         int i, num_vports;
3233
3234         vport_params = p_hwfn->qm_info.qm_vport_params;
3235         num_vports = p_hwfn->qm_info.num_vports;
3236
3237         for (i = 0; i < num_vports; i++) {
3238                 ecore_init_wfq_default_param(p_hwfn, min_pf_rate);
3239                 ecore_init_vport_wfq(p_hwfn, p_ptt,
3240                                      vport_params[i].first_tx_pq_id,
3241                                      vport_params[i].vport_wfq);
3242         }
3243 }
3244
3245 /* validate wfq for a given vport and required min rate */
3246 static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn,
3247                                                  u16 vport_id, u32 req_rate,
3248                                                  u32 min_pf_rate)
3249 {
3250         u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0;
3251         int non_requested_count = 0, req_count = 0, i, num_vports;
3252
3253         num_vports = p_hwfn->qm_info.num_vports;
3254
3255         /* Check pre-set data for some of the vports */
3256         for (i = 0; i < num_vports; i++) {
3257                 u32 tmp_speed;
3258
3259                 if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) {
3260                         req_count++;
3261                         tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed;
3262                         total_req_min_rate += tmp_speed;
3263                 }
3264         }
3265
3266         /* Include current vport data as well */
3267         req_count++;
3268         total_req_min_rate += req_rate;
3269         non_requested_count = p_hwfn->qm_info.num_vports - req_count;
3270
3271         /* validate possible error cases */
3272         if (req_rate > min_pf_rate) {
3273                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3274                            "Vport [%d] - Requested rate[%d Mbps] is greater"
3275                            " than configured PF min rate[%d Mbps]\n",
3276                            vport_id, req_rate, min_pf_rate);
3277                 return ECORE_INVAL;
3278         }
3279
3280         if (req_rate * ECORE_WFQ_UNIT / min_pf_rate < 1) {
3281                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3282                            "Vport [%d] - Requested rate[%d Mbps] is less than"
3283                            " one percent of configured PF min rate[%d Mbps]\n",
3284                            vport_id, req_rate, min_pf_rate);
3285                 return ECORE_INVAL;
3286         }
3287
3288         /* TBD - for number of vports greater than 100 */
3289         if (ECORE_WFQ_UNIT / p_hwfn->qm_info.num_vports < 1) {
3290                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3291                            "Number of vports are greater than 100\n");
3292                 return ECORE_INVAL;
3293         }
3294
3295         if (total_req_min_rate > min_pf_rate) {
3296                 DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3297                            "Total requested min rate for all vports[%d Mbps]"
3298                            "is greater than configured PF min rate[%d Mbps]\n",
3299                            total_req_min_rate, min_pf_rate);
3300                 return ECORE_INVAL;
3301         }
3302
3303         /* Data left for non requested vports */
3304         total_left_rate = min_pf_rate - total_req_min_rate;
3305         left_rate_per_vp = total_left_rate / non_requested_count;
3306
3307         /* validate if non requested get < 1% of min bw */
3308         if (left_rate_per_vp * ECORE_WFQ_UNIT / min_pf_rate < 1)
3309                 return ECORE_INVAL;
3310
3311         /* now req_rate for given vport passes all scenarios.
3312          * assign final wfq rates to all vports.
3313          */
3314         p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate;
3315         p_hwfn->qm_info.wfq_data[vport_id].configured = true;
3316
3317         for (i = 0; i < num_vports; i++) {
3318                 if (p_hwfn->qm_info.wfq_data[i].configured)
3319                         continue;
3320
3321                 p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp;
3322         }
3323
3324         return ECORE_SUCCESS;
3325 }
3326
3327 static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn,
3328                                        struct ecore_ptt *p_ptt,
3329                                        u16 vp_id, u32 rate)
3330 {
3331         struct ecore_mcp_link_state *p_link;
3332         int rc = ECORE_SUCCESS;
3333
3334         p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output;
3335
3336         if (!p_link->min_pf_rate) {
3337                 p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate;
3338                 p_hwfn->qm_info.wfq_data[vp_id].configured = true;
3339                 return rc;
3340         }
3341
3342         rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate);
3343
3344         if (rc == ECORE_SUCCESS)
3345                 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt,
3346                                                    p_link->min_pf_rate);
3347         else
3348                 DP_NOTICE(p_hwfn, false,
3349                           "Validation failed while configuring min rate\n");
3350
3351         return rc;
3352 }
3353
3354 static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn,
3355                                                    struct ecore_ptt *p_ptt,
3356                                                    u32 min_pf_rate)
3357 {
3358         int rc = ECORE_SUCCESS;
3359         bool use_wfq = false;
3360         u16 i, num_vports;
3361
3362         num_vports = p_hwfn->qm_info.num_vports;
3363
3364         /* Validate all pre configured vports for wfq */
3365         for (i = 0; i < num_vports; i++) {
3366                 if (p_hwfn->qm_info.wfq_data[i].configured) {
3367                         u32 rate = p_hwfn->qm_info.wfq_data[i].min_speed;
3368
3369                         use_wfq = true;
3370                         rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate);
3371                         if (rc == ECORE_INVAL) {
3372                                 DP_NOTICE(p_hwfn, false,
3373                                           "Validation failed while"
3374                                           " configuring min rate\n");
3375                                 break;
3376                         }
3377                 }
3378         }
3379
3380         if (rc == ECORE_SUCCESS && use_wfq)
3381                 ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
3382         else
3383                 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate);
3384
3385         return rc;
3386 }
3387
3388 /* Main API for ecore clients to configure vport min rate.
3389  * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)]
3390  * rate - Speed in Mbps needs to be assigned to a given vport.
3391  */
3392 int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate)
3393 {
3394         int i, rc = ECORE_INVAL;
3395
3396         /* TBD - for multiple hardware functions - that is 100 gig */
3397         if (p_dev->num_hwfns > 1) {
3398                 DP_NOTICE(p_dev, false,
3399                           "WFQ configuration is not supported for this dev\n");
3400                 return rc;
3401         }
3402
3403         for_each_hwfn(p_dev, i) {
3404                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
3405                 struct ecore_ptt *p_ptt;
3406
3407                 p_ptt = ecore_ptt_acquire(p_hwfn);
3408                 if (!p_ptt)
3409                         return ECORE_TIMEOUT;
3410
3411                 rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate);
3412
3413                 if (rc != ECORE_SUCCESS) {
3414                         ecore_ptt_release(p_hwfn, p_ptt);
3415                         return rc;
3416                 }
3417
3418                 ecore_ptt_release(p_hwfn, p_ptt);
3419         }
3420
3421         return rc;
3422 }
3423
3424 /* API to configure WFQ from mcp link change */
3425 void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev,
3426                                            u32 min_pf_rate)
3427 {
3428         int i;
3429
3430         /* TBD - for multiple hardware functions - that is 100 gig */
3431         if (p_dev->num_hwfns > 1) {
3432                 DP_VERBOSE(p_dev, ECORE_MSG_LINK,
3433                            "WFQ configuration is not supported for this dev\n");
3434                 return;
3435         }
3436
3437         for_each_hwfn(p_dev, i) {
3438                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
3439
3440                 __ecore_configure_vp_wfq_on_link_change(p_hwfn,
3441                                                         p_hwfn->p_dpc_ptt,
3442                                                         min_pf_rate);
3443         }
3444 }
3445
3446 int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn,
3447                                        struct ecore_ptt *p_ptt,
3448                                        struct ecore_mcp_link_state *p_link,
3449                                        u8 max_bw)
3450 {
3451         int rc = ECORE_SUCCESS;
3452
3453         p_hwfn->mcp_info->func_info.bandwidth_max = max_bw;
3454
3455         if (!p_link->line_speed)
3456                 return rc;
3457
3458         p_link->speed = (p_link->line_speed * max_bw) / 100;
3459
3460         rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, p_link->speed);
3461
3462         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3463                    "Configured MAX bandwidth to be %08x Mb/sec\n",
3464                    p_link->speed);
3465
3466         return rc;
3467 }
3468
3469 /* Main API to configure PF max bandwidth where bw range is [1 - 100] */
3470 int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw)
3471 {
3472         int i, rc = ECORE_INVAL;
3473
3474         if (max_bw < 1 || max_bw > 100) {
3475                 DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n");
3476                 return rc;
3477         }
3478
3479         for_each_hwfn(p_dev, i) {
3480                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
3481                 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
3482                 struct ecore_mcp_link_state *p_link;
3483                 struct ecore_ptt *p_ptt;
3484
3485                 p_link = &p_lead->mcp_info->link_output;
3486
3487                 p_ptt = ecore_ptt_acquire(p_hwfn);
3488                 if (!p_ptt)
3489                         return ECORE_TIMEOUT;
3490
3491                 rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt,
3492                                                         p_link, max_bw);
3493                 if (rc != ECORE_SUCCESS) {
3494                         ecore_ptt_release(p_hwfn, p_ptt);
3495                         return rc;
3496                 }
3497
3498                 ecore_ptt_release(p_hwfn, p_ptt);
3499         }
3500
3501         return rc;
3502 }
3503
3504 int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn,
3505                                        struct ecore_ptt *p_ptt,
3506                                        struct ecore_mcp_link_state *p_link,
3507                                        u8 min_bw)
3508 {
3509         int rc = ECORE_SUCCESS;
3510
3511         p_hwfn->mcp_info->func_info.bandwidth_min = min_bw;
3512
3513         if (!p_link->line_speed)
3514                 return rc;
3515
3516         p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100;
3517
3518         rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw);
3519
3520         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
3521                    "Configured MIN bandwidth to be %d Mb/sec\n",
3522                    p_link->min_pf_rate);
3523
3524         return rc;
3525 }
3526
3527 /* Main API to configure PF min bandwidth where bw range is [1-100] */
3528 int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw)
3529 {
3530         int i, rc = ECORE_INVAL;
3531
3532         if (min_bw < 1 || min_bw > 100) {
3533                 DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n");
3534                 return rc;
3535         }
3536
3537         for_each_hwfn(p_dev, i) {
3538                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
3539                 struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev);
3540                 struct ecore_mcp_link_state *p_link;
3541                 struct ecore_ptt *p_ptt;
3542
3543                 p_link = &p_lead->mcp_info->link_output;
3544
3545                 p_ptt = ecore_ptt_acquire(p_hwfn);
3546                 if (!p_ptt)
3547                         return ECORE_TIMEOUT;
3548
3549                 rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt,
3550                                                         p_link, min_bw);
3551                 if (rc != ECORE_SUCCESS) {
3552                         ecore_ptt_release(p_hwfn, p_ptt);
3553                         return rc;
3554                 }
3555
3556                 if (p_link->min_pf_rate) {
3557                         u32 min_rate = p_link->min_pf_rate;
3558
3559                         rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn,
3560                                                                      p_ptt,
3561                                                                      min_rate);
3562                 }
3563
3564                 ecore_ptt_release(p_hwfn, p_ptt);
3565         }
3566
3567         return rc;
3568 }
3569
3570 void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
3571 {
3572         struct ecore_mcp_link_state *p_link;
3573
3574         p_link = &p_hwfn->mcp_info->link_output;
3575
3576         if (p_link->min_pf_rate)
3577                 ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt,
3578                                                  p_link->min_pf_rate);
3579
3580         OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0,
3581                     sizeof(*p_hwfn->qm_info.wfq_data) *
3582                     p_hwfn->qm_info.num_vports);
3583 }
3584
3585 int ecore_device_num_engines(struct ecore_dev *p_dev)
3586 {
3587         return ECORE_IS_BB(p_dev) ? 2 : 1;
3588 }
3589
3590 int ecore_device_num_ports(struct ecore_dev *p_dev)
3591 {
3592         /* in CMT always only one port */
3593         if (p_dev->num_hwfns > 1)
3594                 return 1;
3595
3596         return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev);
3597 }