New upstream version 18.08
[deb_dpdk.git] / drivers / net / qede / base / ecore_init_ops.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 /* include the precompiled configuration values - only once */
8 #include "bcm_osal.h"
9 #include "ecore_hsi_common.h"
10 #include "ecore.h"
11 #include "ecore_hw.h"
12 #include "ecore_status.h"
13 #include "ecore_rt_defs.h"
14 #include "ecore_init_fw_funcs.h"
15
16 #include "ecore_iro_values.h"
17 #include "ecore_sriov.h"
18 #include "ecore_gtt_values.h"
19 #include "reg_addr.h"
20 #include "ecore_init_ops.h"
21
22 #define ECORE_INIT_MAX_POLL_COUNT       100
23 #define ECORE_INIT_POLL_PERIOD_US       500
24
25 void ecore_init_iro_array(struct ecore_dev *p_dev)
26 {
27         p_dev->iro_arr = iro_arr;
28 }
29
30 /* Runtime configuration helpers */
31 void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn)
32 {
33         int i;
34
35         for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
36                 p_hwfn->rt_data.b_valid[i] = false;
37 }
38
39 void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val)
40 {
41         if (rt_offset >= RUNTIME_ARRAY_SIZE) {
42                 DP_ERR(p_hwfn,
43                        "Avoid storing %u in rt_data at index %u since RUNTIME_ARRAY_SIZE is %u!\n",
44                        val, rt_offset, RUNTIME_ARRAY_SIZE);
45                 return;
46         }
47
48         p_hwfn->rt_data.init_val[rt_offset] = val;
49         p_hwfn->rt_data.b_valid[rt_offset] = true;
50 }
51
52 void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn,
53                              u32 rt_offset, u32 *p_val, osal_size_t size)
54 {
55         osal_size_t i;
56
57         if ((rt_offset + size - 1) >= RUNTIME_ARRAY_SIZE) {
58                 DP_ERR(p_hwfn,
59                        "Avoid storing values in rt_data at indices %u-%u since RUNTIME_ARRAY_SIZE is %u!\n",
60                        rt_offset, (u32)(rt_offset + size - 1),
61                        RUNTIME_ARRAY_SIZE);
62                 return;
63         }
64
65         for (i = 0; i < size / sizeof(u32); i++) {
66                 p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i];
67                 p_hwfn->rt_data.b_valid[rt_offset + i] = true;
68         }
69 }
70
71 static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn,
72                                           struct ecore_ptt *p_ptt,
73                                           u32 addr,
74                                           u16 rt_offset,
75                                           u16 size, bool b_must_dmae)
76 {
77         u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
78         bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
79         u16 i, segment;
80         enum _ecore_status_t rc = ECORE_SUCCESS;
81
82         /* Since not all RT entries are initialized, go over the RT and
83          * for each segment of initialized values use DMA.
84          */
85         for (i = 0; i < size; i++) {
86                 if (!p_valid[i])
87                         continue;
88
89                 /* In case there isn't any wide-bus configuration here,
90                  * simply write the data instead of using dmae.
91                  */
92                 if (!b_must_dmae) {
93                         ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
94                         continue;
95                 }
96
97                 /* Start of a new segment */
98                 for (segment = 1; i + segment < size; segment++)
99                         if (!p_valid[i + segment])
100                                 break;
101
102                 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
103                                          (osal_uintptr_t)(p_init_val + i),
104                                          addr + (i << 2), segment, 0);
105                 if (rc != ECORE_SUCCESS)
106                         return rc;
107
108                 /* Jump over the entire segment, including invalid entry */
109                 i += segment;
110         }
111
112         return rc;
113 }
114
115 enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn)
116 {
117         struct ecore_rt_data *rt_data = &p_hwfn->rt_data;
118
119         if (IS_VF(p_hwfn->p_dev))
120                 return ECORE_SUCCESS;
121
122         rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
123                                        sizeof(bool) * RUNTIME_ARRAY_SIZE);
124         if (!rt_data->b_valid)
125                 return ECORE_NOMEM;
126
127         rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
128                                         sizeof(u32) * RUNTIME_ARRAY_SIZE);
129         if (!rt_data->init_val) {
130                 OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid);
131                 return ECORE_NOMEM;
132         }
133
134         return ECORE_SUCCESS;
135 }
136
137 void ecore_init_free(struct ecore_hwfn *p_hwfn)
138 {
139         OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val);
140         OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid);
141 }
142
143 static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn,
144                                                   struct ecore_ptt *p_ptt,
145                                                   u32 addr,
146                                                   u32 dmae_data_offset,
147                                                   u32 size, const u32 *p_buf,
148                                                   bool b_must_dmae,
149                                                   bool b_can_dmae)
150 {
151         enum _ecore_status_t rc = ECORE_SUCCESS;
152
153         /* Perform DMAE only for lengthy enough sections or for wide-bus */
154 #ifndef ASIC_ONLY
155         if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) ||
156             !b_can_dmae || (!b_must_dmae && (size < 16))) {
157 #else
158         if (!b_can_dmae || (!b_must_dmae && (size < 16))) {
159 #endif
160                 const u32 *data = p_buf + dmae_data_offset;
161                 u32 i;
162
163                 for (i = 0; i < size; i++)
164                         ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]);
165         } else {
166                 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
167                                          (osal_uintptr_t)(p_buf +
168                                                            dmae_data_offset),
169                                          addr, size, 0);
170         }
171
172         return rc;
173 }
174
175 static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn,
176                                                  struct ecore_ptt *p_ptt,
177                                                  u32 addr, u32 fill_count)
178 {
179         static u32 zero_buffer[DMAE_MAX_RW_SIZE];
180
181         OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE);
182
183         return ecore_dmae_host2grc(p_hwfn, p_ptt,
184                                    (osal_uintptr_t)&zero_buffer[0],
185                                    addr, fill_count,
186                                    ECORE_DMAE_FLAG_RW_REPL_SRC);
187 }
188
189 static void ecore_init_fill(struct ecore_hwfn *p_hwfn,
190                             struct ecore_ptt *p_ptt,
191                             u32 addr, u32 fill, u32 fill_count)
192 {
193         u32 i;
194
195         for (i = 0; i < fill_count; i++, addr += sizeof(u32))
196                 ecore_wr(p_hwfn, p_ptt, addr, fill);
197 }
198
199 static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn,
200                                                  struct ecore_ptt *p_ptt,
201                                                  struct init_write_op *cmd,
202                                                  bool b_must_dmae,
203                                                  bool b_can_dmae)
204 {
205         u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset);
206         u32 data = OSAL_LE32_TO_CPU(cmd->data);
207         u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
208 #ifdef CONFIG_ECORE_ZIPPED_FW
209         u32 offset, output_len, input_len, max_size;
210 #endif
211         struct ecore_dev *p_dev = p_hwfn->p_dev;
212         union init_array_hdr *hdr;
213         const u32 *array_data;
214         enum _ecore_status_t rc = ECORE_SUCCESS;
215         u32 size;
216
217         array_data = p_dev->fw_data->arr_data;
218
219         hdr = (union init_array_hdr *)
220                 (uintptr_t)(array_data + dmae_array_offset);
221         data = OSAL_LE32_TO_CPU(hdr->raw.data);
222         switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) {
223         case INIT_ARR_ZIPPED:
224 #ifdef CONFIG_ECORE_ZIPPED_FW
225                 offset = dmae_array_offset + 1;
226                 input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE);
227                 max_size = MAX_ZIPPED_SIZE * 4;
228                 OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size);
229
230                 output_len = OSAL_UNZIP_DATA(p_hwfn, input_len,
231                                 (u8 *)(uintptr_t)&array_data[offset],
232                                 max_size,
233                                 (u8 *)p_hwfn->unzip_buf);
234                 if (output_len) {
235                         rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0,
236                                                    output_len,
237                                                    p_hwfn->unzip_buf,
238                                                    b_must_dmae, b_can_dmae);
239                 } else {
240                         DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n");
241                         rc = ECORE_INVAL;
242                 }
243 #else
244                 DP_NOTICE(p_hwfn, true,
245                           "Using zipped firmware without config enabled\n");
246                 rc = ECORE_INVAL;
247 #endif
248                 break;
249         case INIT_ARR_PATTERN:
250                 {
251                         u32 repeats = GET_FIELD(data,
252                                         INIT_ARRAY_PATTERN_HDR_REPETITIONS);
253                         u32 i;
254
255                         size = GET_FIELD(data,
256                                          INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE);
257
258                         for (i = 0; i < repeats; i++, addr += size << 2) {
259                                 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
260                                                            dmae_array_offset +
261                                                            1, size, array_data,
262                                                            b_must_dmae,
263                                                            b_can_dmae);
264                                 if (rc)
265                                         break;
266                 }
267                 break;
268         }
269         case INIT_ARR_STANDARD:
270                 size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE);
271                 rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr,
272                                            dmae_array_offset + 1,
273                                            size, array_data,
274                                            b_must_dmae, b_can_dmae);
275                 break;
276         }
277
278         return rc;
279 }
280
281 /* init_ops write command */
282 static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn,
283                                               struct ecore_ptt *p_ptt,
284                                               struct init_write_op *p_cmd,
285                                               bool b_can_dmae)
286 {
287         u32 data = OSAL_LE32_TO_CPU(p_cmd->data);
288         bool b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS);
289         u32 addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2;
290         enum _ecore_status_t rc = ECORE_SUCCESS;
291
292         /* Sanitize */
293         if (b_must_dmae && !b_can_dmae) {
294                 DP_NOTICE(p_hwfn, true,
295                           "Need to write to %08x for Wide-bus but DMAE isn't"
296                           " allowed\n",
297                           addr);
298                 return ECORE_INVAL;
299         }
300
301         switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) {
302         case INIT_SRC_INLINE:
303                 data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val);
304                 ecore_wr(p_hwfn, p_ptt, addr, data);
305                 break;
306         case INIT_SRC_ZEROS:
307                 data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count);
308                 if (b_must_dmae || (b_can_dmae && (data >= 64)))
309                         rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, data);
310                 else
311                         ecore_init_fill(p_hwfn, p_ptt, addr, 0, data);
312                 break;
313         case INIT_SRC_ARRAY:
314                 rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd,
315                                           b_must_dmae, b_can_dmae);
316                 break;
317         case INIT_SRC_RUNTIME:
318                 rc = ecore_init_rt(p_hwfn, p_ptt, addr,
319                                    OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset),
320                                    OSAL_LE16_TO_CPU(p_cmd->args.runtime.size),
321                                    b_must_dmae);
322                 break;
323         }
324
325         return rc;
326 }
327
328 static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val)
329 {
330         return (val == expected_val);
331 }
332
333 static OSAL_INLINE bool comp_and(u32 val, u32 expected_val)
334 {
335         return (val & expected_val) == expected_val;
336 }
337
338 static OSAL_INLINE bool comp_or(u32 val, u32 expected_val)
339 {
340         return (val | expected_val) > 0;
341 }
342
343 /* init_ops read/poll commands */
344 static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn,
345                               struct ecore_ptt *p_ptt, struct init_read_op *cmd)
346 {
347         bool (*comp_check)(u32 val, u32 expected_val);
348         u32 delay = ECORE_INIT_POLL_PERIOD_US, val;
349         u32 data, addr, poll;
350         int i;
351
352         data = OSAL_LE32_TO_CPU(cmd->op_data);
353         addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2;
354         poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE);
355
356 #ifndef ASIC_ONLY
357         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
358                 delay *= 100;
359 #endif
360
361         val = ecore_rd(p_hwfn, p_ptt, addr);
362
363         if (poll == INIT_POLL_NONE)
364                 return;
365
366         switch (poll) {
367         case INIT_POLL_EQ:
368                 comp_check = comp_eq;
369                 break;
370         case INIT_POLL_OR:
371                 comp_check = comp_or;
372                 break;
373         case INIT_POLL_AND:
374                 comp_check = comp_and;
375                 break;
376         default:
377                 DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n",
378                        cmd->op_data);
379                 return;
380         }
381
382         data = OSAL_LE32_TO_CPU(cmd->expected_val);
383         for (i = 0;
384              i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) {
385                 OSAL_UDELAY(delay);
386                 val = ecore_rd(p_hwfn, p_ptt, addr);
387         }
388
389         if (i == ECORE_INIT_MAX_POLL_COUNT)
390                 DP_ERR(p_hwfn, "Timeout when polling reg: 0x%08x [ Waiting-for: %08x Got: %08x (comparison %08x)]\n",
391                        addr, OSAL_LE32_TO_CPU(cmd->expected_val), val,
392                        OSAL_LE32_TO_CPU(cmd->op_data));
393 }
394
395 /* init_ops callbacks entry point */
396 static enum _ecore_status_t ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn,
397                                               struct ecore_ptt *p_ptt,
398                                               struct init_callback_op *p_cmd)
399 {
400         enum _ecore_status_t rc;
401
402         switch (p_cmd->callback_id) {
403         case DMAE_READY_CB:
404                 rc = ecore_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
405                 break;
406         default:
407                 DP_NOTICE(p_hwfn, false, "Unexpected init op callback ID %d\n",
408                           p_cmd->callback_id);
409                 return ECORE_INVAL;
410         }
411
412         return rc;
413 }
414
415 static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn,
416                                     u16 *p_offset, int modes)
417 {
418         struct ecore_dev *p_dev = p_hwfn->p_dev;
419         const u8 *modes_tree_buf;
420         u8 arg1, arg2, tree_val;
421
422         modes_tree_buf = p_dev->fw_data->modes_tree_buf;
423         tree_val = modes_tree_buf[(*p_offset)++];
424         switch (tree_val) {
425         case INIT_MODE_OP_NOT:
426                 return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1;
427         case INIT_MODE_OP_OR:
428                 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
429                 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
430                 return arg1 | arg2;
431         case INIT_MODE_OP_AND:
432                 arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
433                 arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes);
434                 return arg1 & arg2;
435         default:
436                 tree_val -= MAX_INIT_MODE_OPS;
437                 return (modes & (1 << tree_val)) ? 1 : 0;
438         }
439 }
440
441 static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn,
442                                struct init_if_mode_op *p_cmd, int modes)
443 {
444         u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset);
445
446         if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes))
447                 return 0;
448         else
449                 return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data),
450                                  INIT_IF_MODE_OP_CMD_OFFSET);
451 }
452
453 static u32 ecore_init_cmd_phase(struct init_if_phase_op *p_cmd,
454                                 u32 phase, u32 phase_id)
455 {
456         u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data);
457         u32 op_data = OSAL_LE32_TO_CPU(p_cmd->op_data);
458
459         if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase &&
460               (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID ||
461                GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id)))
462                 return GET_FIELD(op_data, INIT_IF_PHASE_OP_CMD_OFFSET);
463         else
464                 return 0;
465 }
466
467 enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn,
468                                     struct ecore_ptt *p_ptt,
469                                     int phase, int phase_id, int modes)
470 {
471         struct ecore_dev *p_dev = p_hwfn->p_dev;
472         u32 cmd_num, num_init_ops;
473         union init_op *init_ops;
474         bool b_dmae = false;
475         enum _ecore_status_t rc = ECORE_SUCCESS;
476
477         num_init_ops = p_dev->fw_data->init_ops_size;
478         init_ops = p_dev->fw_data->init_ops;
479
480 #ifdef CONFIG_ECORE_ZIPPED_FW
481         p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
482                                         MAX_ZIPPED_SIZE * 4);
483         if (!p_hwfn->unzip_buf) {
484                 DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n");
485                 return ECORE_NOMEM;
486         }
487 #endif
488
489         for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) {
490                 union init_op *cmd = &init_ops[cmd_num];
491                 u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data);
492
493                 switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) {
494                 case INIT_OP_WRITE:
495                         rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write,
496                                                b_dmae);
497                         break;
498
499                 case INIT_OP_READ:
500                         ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read);
501                         break;
502
503                 case INIT_OP_IF_MODE:
504                         cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode,
505                                                        modes);
506                         break;
507                 case INIT_OP_IF_PHASE:
508                         cmd_num += ecore_init_cmd_phase(&cmd->if_phase, phase,
509                                                         phase_id);
510                         b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
511                         break;
512                 case INIT_OP_DELAY:
513                         /* ecore_init_run is always invoked from
514                          * sleep-able context
515                          */
516                         OSAL_UDELAY(cmd->delay.delay);
517                         break;
518
519                 case INIT_OP_CALLBACK:
520                         rc = ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
521                         break;
522                 }
523
524                 if (rc)
525                         break;
526         }
527 #ifdef CONFIG_ECORE_ZIPPED_FW
528         OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf);
529 #endif
530         return rc;
531 }
532
533 void ecore_gtt_init(struct ecore_hwfn *p_hwfn,
534                     struct ecore_ptt *p_ptt)
535 {
536         u32 gtt_base;
537         u32 i;
538
539 #ifndef ASIC_ONLY
540         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
541                 /* This is done by MFW on ASIC; regardless, this should only
542                  * be done once per chip [i.e., common]. Implementation is
543                  * not too bright, but it should work on the simple FPGA/EMUL
544                  * scenarios.
545                  */
546                 static bool initialized;
547                 int poll_cnt = 500;
548                 u32 val;
549
550                 /* initialize PTT/GTT (poll for completion) */
551                 if (!initialized) {
552                         ecore_wr(p_hwfn, p_ptt,
553                                  PGLUE_B_REG_START_INIT_PTT_GTT, 1);
554                         initialized = true;
555                 }
556
557                 do {
558                         /* ptt might be overrided by HW until this is done */
559                         OSAL_UDELAY(10);
560                         ecore_ptt_invalidate(p_hwfn);
561                         val = ecore_rd(p_hwfn, p_ptt,
562                                        PGLUE_B_REG_INIT_DONE_PTT_GTT);
563                 } while ((val != 1) && --poll_cnt);
564
565                 if (!poll_cnt)
566                         DP_ERR(p_hwfn,
567                                "PGLUE_B_REG_INIT_DONE didn't complete\n");
568         }
569 #endif
570
571         /* Set the global windows */
572         gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START;
573
574         for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++)
575                 if (pxp_global_win[i])
576                         REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE,
577                                pxp_global_win[i]);
578 }
579
580 enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev,
581 #ifdef CONFIG_ECORE_BINARY_FW
582                                         const u8 *fw_data)
583 #else
584                                         const u8 OSAL_UNUSED * fw_data)
585 #endif
586 {
587         struct ecore_fw_data *fw = p_dev->fw_data;
588
589 #ifdef CONFIG_ECORE_BINARY_FW
590         struct bin_buffer_hdr *buf_hdr;
591         u32 offset, len;
592
593         if (!fw_data) {
594                 DP_NOTICE(p_dev, true, "Invalid fw data\n");
595                 return ECORE_INVAL;
596         }
597
598         buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)fw_data;
599
600         offset = buf_hdr[BIN_BUF_INIT_FW_VER_INFO].offset;
601         fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(fw_data + offset));
602
603         offset = buf_hdr[BIN_BUF_INIT_CMD].offset;
604         fw->init_ops = (union init_op *)((uintptr_t)(fw_data + offset));
605
606         offset = buf_hdr[BIN_BUF_INIT_VAL].offset;
607         fw->arr_data = (u32 *)((uintptr_t)(fw_data + offset));
608
609         offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset;
610         fw->modes_tree_buf = (u8 *)((uintptr_t)(fw_data + offset));
611         len = buf_hdr[BIN_BUF_INIT_CMD].length;
612         fw->init_ops_size = len / sizeof(struct init_raw_op);
613 #else
614         fw->init_ops = (union init_op *)init_ops;
615         fw->arr_data = (u32 *)init_val;
616         fw->modes_tree_buf = (u8 *)modes_tree_buf;
617         fw->init_ops_size = init_ops_size;
618 #endif
619
620         return ECORE_SUCCESS;
621 }