Imported Upstream version 16.07-rc1
[deb_dpdk.git] / drivers / net / qede / base / ecore_hw.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore_hsi_common.h"
11 #include "ecore_status.h"
12 #include "ecore.h"
13 #include "ecore_hw.h"
14 #include "reg_addr.h"
15 #include "ecore_utils.h"
16 #include "ecore_iov_api.h"
17
18 #ifndef ASIC_ONLY
19 #define ECORE_EMUL_FACTOR 2000
20 #define ECORE_FPGA_FACTOR 200
21 #endif
22
23 #define ECORE_BAR_ACQUIRE_TIMEOUT 1000
24
25 /* Invalid values */
26 #define ECORE_BAR_INVALID_OFFSET                -1
27
28 struct ecore_ptt {
29         osal_list_entry_t list_entry;
30         unsigned int idx;
31         struct pxp_ptt_entry pxp;
32 };
33
34 struct ecore_ptt_pool {
35         osal_list_t free_list;
36         osal_spinlock_t lock;
37         struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
38 };
39
40 enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn)
41 {
42         struct ecore_ptt_pool *p_pool;
43         int i;
44
45         p_pool = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
46                             sizeof(struct ecore_ptt_pool));
47         if (!p_pool)
48                 return ECORE_NOMEM;
49
50         OSAL_LIST_INIT(&p_pool->free_list);
51         for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
52                 p_pool->ptts[i].idx = i;
53                 p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET;
54                 p_pool->ptts[i].pxp.pretend.control = 0;
55
56                 /* There are special PTT entries that are taken only by design.
57                  * The rest are added ot the list for general usage.
58                  */
59                 if (i >= RESERVED_PTT_MAX)
60                         OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry,
61                                             &p_pool->free_list);
62         }
63
64         p_hwfn->p_ptt_pool = p_pool;
65         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock);
66         OSAL_SPIN_LOCK_INIT(&p_pool->lock);
67
68         return ECORE_SUCCESS;
69 }
70
71 void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn)
72 {
73         struct ecore_ptt *p_ptt;
74         int i;
75
76         for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
77                 p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
78                 p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET;
79         }
80 }
81
82 void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn)
83 {
84         if (p_hwfn->p_ptt_pool)
85                 OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock);
86         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool);
87         p_hwfn->p_ptt_pool = OSAL_NULL;
88 }
89
90 struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn)
91 {
92         struct ecore_ptt *p_ptt;
93         unsigned int i;
94
95         /* Take the free PTT from the list */
96         for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) {
97                 OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
98                 if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list))
99                         break;
100                 OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
101                 OSAL_MSLEEP(1);
102         }
103
104         /* We should not time-out, but it can happen... --> Lock isn't held */
105         if (i == ECORE_BAR_ACQUIRE_TIMEOUT) {
106                 DP_NOTICE(p_hwfn, true, "Failed to allocate PTT\n");
107                 return OSAL_NULL;
108         }
109
110         p_ptt = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_ptt_pool->free_list,
111                                       struct ecore_ptt, list_entry);
112         OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry,
113                                &p_hwfn->p_ptt_pool->free_list);
114         OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
115
116         DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "allocated ptt %d\n", p_ptt->idx);
117
118         return p_ptt;
119 }
120
121 void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
122 {
123         /* This PTT should not be set to pretend if it is being released */
124
125         OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock);
126         OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
127         OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock);
128 }
129
130 u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
131 {
132         /* The HW is using DWORDS and we need to translate it to Bytes */
133         return p_ptt->pxp.offset << 2;
134 }
135
136 static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt)
137 {
138         return PXP_PF_WINDOW_ADMIN_PER_PF_START +
139             p_ptt->idx * sizeof(struct pxp_ptt_entry);
140 }
141
142 u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt)
143 {
144         return PXP_EXTERNAL_BAR_PF_WINDOW_START +
145             p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
146 }
147
148 void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn,
149                        struct ecore_ptt *p_ptt, u32 new_hw_addr)
150 {
151         u32 prev_hw_addr;
152
153         prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
154
155         if (new_hw_addr == prev_hw_addr)
156                 return;
157
158         /* Update PTT entery in admin window */
159         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
160                    "Updating PTT entry %d to offset 0x%x\n",
161                    p_ptt->idx, new_hw_addr);
162
163         /* The HW is using DWORDS and the address is in Bytes */
164         p_ptt->pxp.offset = new_hw_addr >> 2;
165
166         REG_WR(p_hwfn,
167                ecore_ptt_config_addr(p_ptt) +
168                OFFSETOF(struct pxp_ptt_entry, offset), p_ptt->pxp.offset);
169 }
170
171 static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn,
172                          struct ecore_ptt *p_ptt, u32 hw_addr)
173 {
174         u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt);
175         u32 offset;
176
177         offset = hw_addr - win_hw_addr;
178
179         /* Verify the address is within the window */
180         if (hw_addr < win_hw_addr ||
181             offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
182                 ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr);
183                 offset = 0;
184         }
185
186         return ecore_ptt_get_bar_addr(p_ptt) + offset;
187 }
188
189 struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn,
190                                          enum reserved_ptts ptt_idx)
191 {
192         if (ptt_idx >= RESERVED_PTT_MAX) {
193                 DP_NOTICE(p_hwfn, true,
194                           "Requested PTT %d is out of range\n", ptt_idx);
195                 return OSAL_NULL;
196         }
197
198         return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
199 }
200
201 void ecore_wr(struct ecore_hwfn *p_hwfn,
202               struct ecore_ptt *p_ptt, u32 hw_addr, u32 val)
203 {
204         u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
205
206         REG_WR(p_hwfn, bar_addr, val);
207         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
208                    "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
209                    bar_addr, hw_addr, val);
210
211 #ifndef ASIC_ONLY
212         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
213                 OSAL_UDELAY(100);
214 #endif
215 }
216
217 u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr)
218 {
219         u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr);
220         u32 val = REG_RD(p_hwfn, bar_addr);
221
222         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
223                    "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
224                    bar_addr, hw_addr, val);
225
226 #ifndef ASIC_ONLY
227         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
228                 OSAL_UDELAY(100);
229 #endif
230
231         return val;
232 }
233
234 static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn,
235                             struct ecore_ptt *p_ptt,
236                             void *addr,
237                             u32 hw_addr, osal_size_t n, bool to_device)
238 {
239         u32 dw_count, *host_addr, hw_offset;
240         osal_size_t quota, done = 0;
241         u32 OSAL_IOMEM *reg_addr;
242
243         while (done < n) {
244                 quota = OSAL_MIN_T(osal_size_t, n - done,
245                                    PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
246
247                 if (IS_PF(p_hwfn->p_dev)) {
248                         ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
249                         hw_offset = ecore_ptt_get_bar_addr(p_ptt);
250                 } else {
251                         hw_offset = hw_addr + done;
252                 }
253
254                 dw_count = quota / 4;
255                 host_addr = (u32 *)((u8 *)addr + done);
256                 reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset);
257
258                 if (to_device)
259                         while (dw_count--)
260                                 DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++);
261                 else
262                         while (dw_count--)
263                                 *host_addr++ = DIRECT_REG_RD(p_hwfn,
264                                                              reg_addr++);
265
266                 done += quota;
267         }
268 }
269
270 void ecore_memcpy_from(struct ecore_hwfn *p_hwfn,
271                        struct ecore_ptt *p_ptt,
272                        void *dest, u32 hw_addr, osal_size_t n)
273 {
274         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
275                    "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
276                    hw_addr, dest, hw_addr, (unsigned long)n);
277
278         ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
279 }
280
281 void ecore_memcpy_to(struct ecore_hwfn *p_hwfn,
282                      struct ecore_ptt *p_ptt,
283                      u32 hw_addr, void *src, osal_size_t n)
284 {
285         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
286                    "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
287                    hw_addr, hw_addr, src, (unsigned long)n);
288
289         ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
290 }
291
292 void ecore_fid_pretend(struct ecore_hwfn *p_hwfn,
293                        struct ecore_ptt *p_ptt, u16 fid)
294 {
295         void *p_pretend;
296         u16 control = 0;
297
298         SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
299         SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
300
301         /* Every pretend undos prev pretends, including previous port pretend */
302         SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
303         SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
304         SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
305         p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control);
306
307         if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
308                 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
309
310         p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid);
311
312         p_pretend = &p_ptt->pxp.pretend;
313         REG_WR(p_hwfn,
314                ecore_ptt_config_addr(p_ptt) +
315                OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
316 }
317
318 void ecore_port_pretend(struct ecore_hwfn *p_hwfn,
319                         struct ecore_ptt *p_ptt, u8 port_id)
320 {
321         void *p_pretend;
322         u16 control = 0;
323
324         SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
325         SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
326         SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
327         p_ptt->pxp.pretend.control = control;
328
329         p_pretend = &p_ptt->pxp.pretend;
330         REG_WR(p_hwfn,
331                ecore_ptt_config_addr(p_ptt) +
332                OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
333 }
334
335 void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
336 {
337         void *p_pretend;
338         u16 control = 0;
339
340         SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
341         SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
342         SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
343         p_ptt->pxp.pretend.control = control;
344
345         p_pretend = &p_ptt->pxp.pretend;
346         REG_WR(p_hwfn,
347                ecore_ptt_config_addr(p_ptt) +
348                OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend);
349 }
350
351 u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid)
352 {
353         u32 concrete_fid = 0;
354
355         SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
356         SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
357         SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
358
359         return concrete_fid;
360 }
361
362 /* Not in use @DPDK
363  * Ecore HW lock
364  * =============
365  * Although the implementation is ready, today we don't have any flow that
366  * utliizes said locks - and we want to keep it this way.
367  * If this changes, this needs to be revisted.
368  */
369
370 /* Ecore DMAE
371  * =============
372  */
373 static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn,
374                               const u8 is_src_type_grc,
375                               const u8 is_dst_type_grc,
376                               struct ecore_dmae_params *p_params)
377 {
378         u16 opcode_b = 0;
379         u32 opcode = 0;
380
381         /* Whether the source is the PCIe or the GRC.
382          * 0- The source is the PCIe
383          * 1- The source is the GRC.
384          */
385         opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
386                    : DMAE_CMD_SRC_MASK_PCIE) << DMAE_CMD_SRC_SHIFT;
387         opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
388             DMAE_CMD_SRC_PF_ID_SHIFT;
389
390         /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
391         opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
392                    : DMAE_CMD_DST_MASK_PCIE) << DMAE_CMD_DST_SHIFT;
393         opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
394             DMAE_CMD_DST_PF_ID_SHIFT;
395
396         /* DMAE_E4_TODO need to check which value to specifiy here. */
397         /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */
398
399         /* Whether to write a completion word to the completion destination:
400          * 0-Do not write a completion word
401          * 1-Write the completion word
402          */
403         opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT;
404         opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
405
406         if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST)
407                 opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT;
408
409         /* swapping mode 3 - big endian there should be a define ifdefed in
410          * the HSI somewhere. Since it is currently
411          */
412         opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT;
413
414         opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT;
415
416         /* reset source address in next go */
417         opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT;
418
419         /* reset dest address in next go */
420         opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT;
421
422         /* SRC/DST VFID: all 1's - pf, otherwise VF id */
423         if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) {
424                 opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT);
425                 opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT);
426         } else {
427                 opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK <<
428                              DMAE_CMD_SRC_VF_ID_SHIFT);
429         }
430         if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) {
431                 opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
432                 opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
433         } else {
434                 opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
435         }
436
437         p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode);
438         p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b);
439 }
440
441 static u32 ecore_dmae_idx_to_go_cmd(u8 idx)
442 {
443         OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4);
444
445         return DMAE_REG_GO_C0 + idx * 4;
446 }
447
448 static enum _ecore_status_t
449 ecore_dmae_post_command(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
450 {
451         struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
452         enum _ecore_status_t ecore_status = ECORE_SUCCESS;
453         u8 idx_cmd = p_hwfn->dmae_info.channel, i;
454
455         /* verify address is not OSAL_NULL */
456         if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
457              ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
458                 DP_NOTICE(p_hwfn, true,
459                           "source or destination address 0 idx_cmd=%d\n"
460                           "opcode = [0x%08x,0x%04x] len=0x%x"
461                           " src=0x%x:%x dst=0x%x:%x\n",
462                           idx_cmd, (u32)p_command->opcode,
463                           (u16)p_command->opcode_b,
464                           (int)p_command->length,
465                           (int)p_command->src_addr_hi,
466                           (int)p_command->src_addr_lo,
467                           (int)p_command->dst_addr_hi,
468                           (int)p_command->dst_addr_lo);
469
470                 return ECORE_INVAL;
471         }
472
473         DP_VERBOSE(p_hwfn, ECORE_MSG_HW,
474                    "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]"
475                    "len=0x%x src=0x%x:%x dst=0x%x:%x\n",
476                    idx_cmd, (u32)p_command->opcode,
477                    (u16)p_command->opcode_b,
478                    (int)p_command->length,
479                    (int)p_command->src_addr_hi,
480                    (int)p_command->src_addr_lo,
481                    (int)p_command->dst_addr_hi, (int)p_command->dst_addr_lo);
482
483         /* Copy the command to DMAE - need to do it before every call
484          * for source/dest address no reset.
485          * The number of commands have been increased to 16 (previous was 14)
486          * The first 9 DWs are the command registers, the 10 DW is the
487          * GO register, and
488          * the rest are result registers (which are read only by the client).
489          */
490         for (i = 0; i < DMAE_CMD_SIZE; i++) {
491                 u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
492                     *(((u32 *)p_command) + i) : 0;
493
494                 ecore_wr(p_hwfn, p_ptt,
495                          DMAE_REG_CMD_MEM +
496                          (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
497                          (i * sizeof(u32)), data);
498         }
499
500         ecore_wr(p_hwfn, p_ptt,
501                  ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
502
503         return ecore_status;
504 }
505
506 enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn)
507 {
508         dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
509         struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
510         u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
511         u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
512
513         *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32));
514         if (*p_comp == OSAL_NULL) {
515                 DP_NOTICE(p_hwfn, true,
516                           "Failed to allocate `p_completion_word'\n");
517                 ecore_dmae_info_free(p_hwfn);
518                 return ECORE_NOMEM;
519         }
520
521         p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
522         *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
523                                          sizeof(struct dmae_cmd));
524         if (*p_cmd == OSAL_NULL) {
525                 DP_NOTICE(p_hwfn, true,
526                           "Failed to allocate `struct dmae_cmd'\n");
527                 ecore_dmae_info_free(p_hwfn);
528                 return ECORE_NOMEM;
529         }
530
531         p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
532         *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr,
533                                           sizeof(u32) * DMAE_MAX_RW_SIZE);
534         if (*p_buff == OSAL_NULL) {
535                 DP_NOTICE(p_hwfn, true,
536                           "Failed to allocate `intermediate_buffer'\n");
537                 ecore_dmae_info_free(p_hwfn);
538                 return ECORE_NOMEM;
539         }
540
541         /* DMAE_E4_TODO : Need to change this to reflect proper channel */
542         p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
543
544         return ECORE_SUCCESS;
545 }
546
547 void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn)
548 {
549         dma_addr_t p_phys;
550
551         /* Just make sure no one is in the middle */
552         OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
553
554         if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) {
555                 p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
556                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
557                                        p_hwfn->dmae_info.p_completion_word,
558                                        p_phys, sizeof(u32));
559                 p_hwfn->dmae_info.p_completion_word = OSAL_NULL;
560         }
561
562         if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) {
563                 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
564                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
565                                        p_hwfn->dmae_info.p_dmae_cmd,
566                                        p_phys, sizeof(struct dmae_cmd));
567                 p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL;
568         }
569
570         if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) {
571                 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
572                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
573                                        p_hwfn->dmae_info.p_intermediate_buffer,
574                                        p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE);
575                 p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL;
576         }
577
578         OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
579 }
580
581 static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn)
582 {
583         enum _ecore_status_t ecore_status = ECORE_SUCCESS;
584         u32 wait_cnt_limit = 10000, wait_cnt = 0;
585
586 #ifndef ASIC_ONLY
587         u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ?
588                       ECORE_EMUL_FACTOR :
589                       (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ?
590                        ECORE_FPGA_FACTOR : 1));
591
592         wait_cnt_limit *= factor;
593 #endif
594
595         /* DMAE_E4_TODO : TODO check if we have to call any other function
596          * other than BARRIER to sync the completion_word since we are not
597          * using the volatile keyword for this
598          */
599         OSAL_BARRIER(p_hwfn->p_dev);
600         while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
601                 /* DMAE_E4_TODO : using OSAL_MSLEEP instead of mm_wait since mm
602                  * functions are getting depriciated. Need to review for future.
603                  */
604                 OSAL_UDELAY(DMAE_MIN_WAIT_TIME);
605                 if (++wait_cnt > wait_cnt_limit) {
606                         DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW,
607                                   "Timed-out waiting for operation to"
608                                   " complete. Completion word is 0x%08x"
609                                   " expected 0x%08x.\n",
610                                   *p_hwfn->dmae_info.p_completion_word,
611                                   DMAE_COMPLETION_VAL);
612                         ecore_status = ECORE_TIMEOUT;
613                         break;
614                 }
615                 /* to sync the completion_word since we are not
616                  * using the volatile keyword for p_completion_word
617                  */
618                 OSAL_BARRIER(p_hwfn->p_dev);
619         }
620
621         if (ecore_status == ECORE_SUCCESS)
622                 *p_hwfn->dmae_info.p_completion_word = 0;
623
624         return ecore_status;
625 }
626
627 static enum _ecore_status_t
628 ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn,
629                                  struct ecore_ptt *p_ptt,
630                                  u64 src_addr,
631                                  u64 dst_addr,
632                                  u8 src_type, u8 dst_type, u32 length)
633 {
634         dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
635         struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
636         enum _ecore_status_t ecore_status = ECORE_SUCCESS;
637
638         switch (src_type) {
639         case ECORE_DMAE_ADDRESS_GRC:
640         case ECORE_DMAE_ADDRESS_HOST_PHYS:
641                 cmd->src_addr_hi = DMA_HI(src_addr);
642                 cmd->src_addr_lo = DMA_LO(src_addr);
643                 break;
644                 /* for virt source addresses we use the intermediate buffer. */
645         case ECORE_DMAE_ADDRESS_HOST_VIRT:
646                 cmd->src_addr_hi = DMA_HI(phys);
647                 cmd->src_addr_lo = DMA_LO(phys);
648                 OSAL_MEMCPY(&p_hwfn->dmae_info.p_intermediate_buffer[0],
649                             (void *)(osal_uintptr_t)src_addr,
650                             length * sizeof(u32));
651                 break;
652         default:
653                 return ECORE_INVAL;
654         }
655
656         switch (dst_type) {
657         case ECORE_DMAE_ADDRESS_GRC:
658         case ECORE_DMAE_ADDRESS_HOST_PHYS:
659                 cmd->dst_addr_hi = DMA_HI(dst_addr);
660                 cmd->dst_addr_lo = DMA_LO(dst_addr);
661                 break;
662                 /* for virt destination address we use the intermediate buff. */
663         case ECORE_DMAE_ADDRESS_HOST_VIRT:
664                 cmd->dst_addr_hi = DMA_HI(phys);
665                 cmd->dst_addr_lo = DMA_LO(phys);
666                 break;
667         default:
668                 return ECORE_INVAL;
669         }
670
671         cmd->length = (u16)length;
672
673         if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
674             src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
675                 OSAL_DMA_SYNC(p_hwfn->p_dev,
676                               (void *)HILO_U64(cmd->src_addr_hi,
677                                                cmd->src_addr_lo),
678                               length * sizeof(u32), false);
679
680         ecore_dmae_post_command(p_hwfn, p_ptt);
681
682         ecore_status = ecore_dmae_operation_wait(p_hwfn);
683
684         /* TODO - is it true ? */
685         if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT ||
686             src_type == ECORE_DMAE_ADDRESS_HOST_PHYS)
687                 OSAL_DMA_SYNC(p_hwfn->p_dev,
688                               (void *)HILO_U64(cmd->src_addr_hi,
689                                                cmd->src_addr_lo),
690                               length * sizeof(u32), true);
691
692         if (ecore_status != ECORE_SUCCESS) {
693                 DP_NOTICE(p_hwfn, ECORE_MSG_HW,
694                           "ecore_dmae_host2grc: Wait Failed. source_addr"
695                           " 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x\n",
696                           (unsigned long)src_addr, (unsigned long)dst_addr,
697                           length);
698                 return ecore_status;
699         }
700
701         if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT)
702                 OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr),
703                             &p_hwfn->dmae_info.p_intermediate_buffer[0],
704                             length * sizeof(u32));
705
706         return ECORE_SUCCESS;
707 }
708
709 static enum _ecore_status_t
710 ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn,
711                            struct ecore_ptt *p_ptt,
712                            u64 src_addr,
713                            u64 dst_addr,
714                            u8 src_type,
715                            u8 dst_type,
716                            u32 size_in_dwords,
717                            struct ecore_dmae_params *p_params)
718 {
719         dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
720         u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
721         struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
722         enum _ecore_status_t ecore_status = ECORE_SUCCESS;
723         u64 src_addr_split = 0, dst_addr_split = 0;
724         u16 length_limit = DMAE_MAX_RW_SIZE;
725         u32 offset = 0;
726
727         ecore_dmae_opcode(p_hwfn,
728                           (src_type == ECORE_DMAE_ADDRESS_GRC),
729                           (dst_type == ECORE_DMAE_ADDRESS_GRC), p_params);
730
731         cmd->comp_addr_lo = DMA_LO(phys);
732         cmd->comp_addr_hi = DMA_HI(phys);
733         cmd->comp_val = DMAE_COMPLETION_VAL;
734
735         /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
736         cnt_split = size_in_dwords / length_limit;
737         length_mod = size_in_dwords % length_limit;
738
739         src_addr_split = src_addr;
740         dst_addr_split = dst_addr;
741
742         for (i = 0; i <= cnt_split; i++) {
743                 offset = length_limit * i;
744
745                 if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) {
746                         if (src_type == ECORE_DMAE_ADDRESS_GRC)
747                                 src_addr_split = src_addr + offset;
748                         else
749                                 src_addr_split = src_addr + (offset * 4);
750                 }
751
752                 if (dst_type == ECORE_DMAE_ADDRESS_GRC)
753                         dst_addr_split = dst_addr + offset;
754                 else
755                         dst_addr_split = dst_addr + (offset * 4);
756
757                 length_cur = (cnt_split == i) ? length_mod : length_limit;
758
759                 /* might be zero on last iteration */
760                 if (!length_cur)
761                         continue;
762
763                 ecore_status = ecore_dmae_execute_sub_operation(p_hwfn,
764                                                                 p_ptt,
765                                                                 src_addr_split,
766                                                                 dst_addr_split,
767                                                                 src_type,
768                                                                 dst_type,
769                                                                 length_cur);
770                 if (ecore_status != ECORE_SUCCESS) {
771                         DP_NOTICE(p_hwfn, false,
772                                   "ecore_dmae_execute_sub_operation Failed"
773                                   " with error 0x%x. source_addr 0x%lx,"
774                                   " dest addr 0x%lx, size_in_dwords 0x%x\n",
775                                   ecore_status, (unsigned long)src_addr,
776                                   (unsigned long)dst_addr, length_cur);
777
778                         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL);
779                         break;
780                 }
781         }
782
783         return ecore_status;
784 }
785
786 enum _ecore_status_t
787 ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn,
788                     struct ecore_ptt *p_ptt,
789                     u64 source_addr,
790                     u32 grc_addr, u32 size_in_dwords, u32 flags)
791 {
792         u32 grc_addr_in_dw = grc_addr / sizeof(u32);
793         struct ecore_dmae_params params;
794         enum _ecore_status_t rc;
795
796         OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
797         params.flags = flags;
798
799         OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
800
801         rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
802                                         grc_addr_in_dw,
803                                         ECORE_DMAE_ADDRESS_HOST_VIRT,
804                                         ECORE_DMAE_ADDRESS_GRC,
805                                         size_in_dwords, &params);
806
807         OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
808
809         return rc;
810 }
811
812 enum _ecore_status_t
813 ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn,
814                     struct ecore_ptt *p_ptt,
815                     u32 grc_addr,
816                     dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
817 {
818         u32 grc_addr_in_dw = grc_addr / sizeof(u32);
819         struct ecore_dmae_params params;
820         enum _ecore_status_t rc;
821
822         OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
823         params.flags = flags;
824
825         OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
826
827         rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
828                                         dest_addr, ECORE_DMAE_ADDRESS_GRC,
829                                         ECORE_DMAE_ADDRESS_HOST_VIRT,
830                                         size_in_dwords, &params);
831
832         OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
833
834         return rc;
835 }
836
837 enum _ecore_status_t
838 ecore_dmae_host2host(struct ecore_hwfn *p_hwfn,
839                      struct ecore_ptt *p_ptt,
840                      dma_addr_t source_addr,
841                      dma_addr_t dest_addr,
842                      u32 size_in_dwords, struct ecore_dmae_params *p_params)
843 {
844         enum _ecore_status_t rc;
845
846         OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex);
847
848         rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr,
849                                         dest_addr,
850                                         ECORE_DMAE_ADDRESS_HOST_PHYS,
851                                         ECORE_DMAE_ADDRESS_HOST_PHYS,
852                                         size_in_dwords, p_params);
853
854         OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex);
855
856         return rc;
857 }
858
859 u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn,
860                     enum protocol_type proto,
861                     union ecore_qm_pq_params *p_params)
862 {
863         u16 pq_id = 0;
864
865         if ((proto == PROTOCOLID_CORE ||
866              proto == PROTOCOLID_ETH) && !p_params) {
867                 DP_NOTICE(p_hwfn, true,
868                           "Protocol %d received NULL PQ params\n", proto);
869                 return 0;
870         }
871
872         switch (proto) {
873         case PROTOCOLID_CORE:
874                 if (p_params->core.tc == LB_TC)
875                         pq_id = p_hwfn->qm_info.pure_lb_pq;
876                 else if (p_params->core.tc == OOO_LB_TC)
877                         pq_id = p_hwfn->qm_info.ooo_pq;
878                 else
879                         pq_id = p_hwfn->qm_info.offload_pq;
880                 break;
881         case PROTOCOLID_ETH:
882                 pq_id = p_params->eth.tc;
883                 /* TODO - multi-CoS for VFs? */
884                 if (p_params->eth.is_vf)
885                         pq_id += p_hwfn->qm_info.vf_queues_offset +
886                             p_params->eth.vf_id;
887                 break;
888         default:
889                 pq_id = 0;
890         }
891
892         pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, ECORE_PQ);
893
894         return pq_id;
895 }
896
897 void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn,
898                          enum ecore_hw_err_type err_type)
899 {
900         /* Fan failure cannot be masked by handling of another HW error */
901         if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) {
902                 DP_VERBOSE(p_hwfn, ECORE_MSG_DRV,
903                            "Recovery is in progress."
904                            "Avoid notifying about HW error %d.\n",
905                            err_type);
906                 return;
907         }
908
909         OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type);
910 }