Imported Upstream version 16.07-rc1
[deb_dpdk.git] / drivers / net / qede / base / ecore_int.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_spq.h"
12 #include "reg_addr.h"
13 #include "ecore_gtt_reg_addr.h"
14 #include "ecore_init_ops.h"
15 #include "ecore_rt_defs.h"
16 #include "ecore_int.h"
17 #include "reg_addr.h"
18 #include "ecore_hw.h"
19 #include "ecore_sriov.h"
20 #include "ecore_vf.h"
21 #include "ecore_hw_defs.h"
22 #include "ecore_hsi_common.h"
23 #include "ecore_mcp.h"
24 #include "ecore_attn_values.h"
25
26 struct ecore_pi_info {
27         ecore_int_comp_cb_t comp_cb;
28         void *cookie;           /* Will be sent to the compl cb function */
29 };
30
31 struct ecore_sb_sp_info {
32         struct ecore_sb_info sb_info;
33         /* per protocol index data */
34         struct ecore_pi_info pi_info_arr[PIS_PER_SB];
35 };
36
37 enum ecore_attention_type {
38         ECORE_ATTN_TYPE_ATTN,
39         ECORE_ATTN_TYPE_PARITY,
40 };
41
42 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
43         ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
44
45 struct aeu_invert_reg_bit {
46         char bit_name[30];
47
48 #define ATTENTION_PARITY                (1 << 0)
49
50 #define ATTENTION_LENGTH_MASK           (0x00000ff0)
51 #define ATTENTION_LENGTH_SHIFT          (4)
52 #define ATTENTION_LENGTH(flags)         (((flags) & ATTENTION_LENGTH_MASK) >> \
53                                          ATTENTION_LENGTH_SHIFT)
54 #define ATTENTION_SINGLE                (1 << ATTENTION_LENGTH_SHIFT)
55 #define ATTENTION_PAR                   (ATTENTION_SINGLE | ATTENTION_PARITY)
56 #define ATTENTION_PAR_INT               ((2 << ATTENTION_LENGTH_SHIFT) | \
57                                          ATTENTION_PARITY)
58
59 /* Multiple bits start with this offset */
60 #define ATTENTION_OFFSET_MASK           (0x000ff000)
61 #define ATTENTION_OFFSET_SHIFT          (12)
62
63 #define ATTENTION_CLEAR_ENABLE          (1 << 28)
64 #define ATTENTION_FW_DUMP               (1 << 29)
65 #define ATTENTION_PANIC_DUMP            (1 << 30)
66         unsigned int flags;
67
68         /* Callback to call if attention will be triggered */
69         enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
70
71         enum block_id block_index;
72 };
73
74 struct aeu_invert_reg {
75         struct aeu_invert_reg_bit bits[32];
76 };
77
78 #define MAX_ATTN_GRPS           (8)
79 #define NUM_ATTN_REGS           (9)
80
81 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
82 {
83         u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
84
85         DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp);
86         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff);
87
88         return ECORE_SUCCESS;
89 }
90
91 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK         (0x3c000)
92 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT        (14)
93 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK         (0x03fc0)
94 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT        (6)
95 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK      (0x00020)
96 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT     (5)
97 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK     (0x0001e)
98 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT    (1)
99 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK      (0x1)
100 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT     (0)
101 #define ECORE_PSWHST_ATTENTION_VF_DISABLED              (0x1)
102 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS         (0x1)
103 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
104 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT        (0)
105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK     (0x1e)
106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT    (1)
107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK   (0x20)
108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT  (5)
109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK      (0x3fc0)
110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT     (6)
111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK      (0x3c000)
112 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT     (14)
113 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK    (0x3fc0000)
114 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT   (18)
115 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
116 {
117         u32 tmp =
118             ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
119                      PSWHST_REG_VF_DISABLED_ERROR_VALID);
120
121         /* Disabled VF access */
122         if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
123                 u32 addr, data;
124
125                 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
126                                 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
127                 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
128                                 PSWHST_REG_VF_DISABLED_ERROR_DATA);
129                 DP_INFO(p_hwfn->p_dev,
130                         "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]"
131                         " Write [0x%02x] Addr [0x%08x]\n",
132                         (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK)
133                              >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
134                         (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK)
135                              >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
136                         (u8)((data &
137                               ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
138                               ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
139                         (u8)((data &
140                               ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
141                               ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
142                         (u8)((data &
143                               ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
144                               ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
145                         addr);
146         }
147
148         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
149                        PSWHST_REG_INCORRECT_ACCESS_VALID);
150         if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
151                 u32 addr, data, length;
152
153                 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
154                                 PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
155                 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
156                                 PSWHST_REG_INCORRECT_ACCESS_DATA);
157                 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
158                                   PSWHST_REG_INCORRECT_ACCESS_LENGTH);
159
160                 DP_INFO(p_hwfn->p_dev,
161                         "Incorrect access to %08x of length %08x - PF [%02x]"
162                         " VF [%04x] [valid %02x] client [%02x] write [%02x]"
163                         " Byte-Enable [%04x] [%08x]\n",
164                         addr, length,
165                         (u8)((data &
166                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
167                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
168                         (u8)((data &
169                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
170                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
171                         (u8)((data &
172                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
173                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
174                         (u8)((data &
175                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
176                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
177                         (u8)((data &
178                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
179                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
180                         (u8)((data &
181                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
182                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
183                         data);
184         }
185
186         /* TODO - We know 'some' of these are legal due to virtualization,
187          * but is it true for all of them?
188          */
189         return ECORE_SUCCESS;
190 }
191
192 #define ECORE_GRC_ATTENTION_VALID_BIT           (1 << 0)
193 #define ECORE_GRC_ATTENTION_ADDRESS_MASK        (0x7fffff << 0)
194 #define ECORE_GRC_ATTENTION_RDWR_BIT            (1 << 23)
195 #define ECORE_GRC_ATTENTION_MASTER_MASK         (0xf << 24)
196 #define ECORE_GRC_ATTENTION_MASTER_SHIFT        (24)
197 #define ECORE_GRC_ATTENTION_PF_MASK             (0xf)
198 #define ECORE_GRC_ATTENTION_VF_MASK             (0xff << 4)
199 #define ECORE_GRC_ATTENTION_VF_SHIFT            (4)
200 #define ECORE_GRC_ATTENTION_PRIV_MASK           (0x3 << 14)
201 #define ECORE_GRC_ATTENTION_PRIV_SHIFT          (14)
202 #define ECORE_GRC_ATTENTION_PRIV_VF             (0)
203 static const char *grc_timeout_attn_master_to_str(u8 master)
204 {
205         switch (master) {
206         case 1:
207                 return "PXP";
208         case 2:
209                 return "MCP";
210         case 3:
211                 return "MSDM";
212         case 4:
213                 return "PSDM";
214         case 5:
215                 return "YSDM";
216         case 6:
217                 return "USDM";
218         case 7:
219                 return "TSDM";
220         case 8:
221                 return "XSDM";
222         case 9:
223                 return "DBU";
224         case 10:
225                 return "DMAE";
226         default:
227                 return "Unknown";
228         }
229 }
230
231 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
232 {
233         u32 tmp, tmp2;
234
235         /* We've already cleared the timeout interrupt register, so we learn
236          * of interrupts via the validity register
237          */
238         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
239                        GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
240         if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT))
241                 goto out;
242
243         /* Read the GRC timeout information */
244         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
245                        GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
246         tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
247                         GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
248
249         DP_INFO(p_hwfn->p_dev,
250                 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s]"
251                 " [PF: %02x %s %02x]\n",
252                 tmp2, tmp,
253                 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
254                 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
255                 grc_timeout_attn_master_to_str((tmp &
256                                         ECORE_GRC_ATTENTION_MASTER_MASK) >>
257                                        ECORE_GRC_ATTENTION_MASTER_SHIFT),
258                 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
259                 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
260                   ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
261                  ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
262                 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
263                 ECORE_GRC_ATTENTION_VF_SHIFT);
264
265 out:
266         /* Regardles of anything else, clean the validity bit */
267         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
268                  GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
269         return ECORE_SUCCESS;
270 }
271
272 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
273 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
274 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
275 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
276 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
277 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
278 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
279 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
280 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME      (1 << 22)
281 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
282 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
283 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
284 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
285 static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
286 {
287         u32 tmp, reg_addr;
288
289         reg_addr =
290             attn_blocks[BLOCK_PGLUE_B].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
291             int_regs[0]->mask_addr;
292
293         /* Mask unnecessary attentions -@TBD move to MFW */
294         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
295         tmp |= (1 << 19);       /* Was PGL_PCIE_ATTN */
296         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
297
298         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
299                        PGLUE_B_REG_TX_ERR_WR_DETAILS2);
300         if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
301                 u32 addr_lo, addr_hi, details;
302
303                 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
304                                    PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
305                 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
306                                    PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
307                 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
308                                    PGLUE_B_REG_TX_ERR_WR_DETAILS);
309
310                 DP_INFO(p_hwfn,
311                         "Illegal write by chip to [%08x:%08x] blocked."
312                         "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
313                         " Details2 %08x [Was_error %02x BME deassert %02x"
314                         " FID_enable deassert %02x]\n",
315                         addr_hi, addr_lo, details,
316                         (u8)((details &
317                               ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
318                              ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
319                         (u8)((details &
320                               ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
321                              ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
322                         (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
323                              ? 1 : 0), tmp,
324                         (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
325                              : 0),
326                         (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
327                              0),
328                         (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
329                              : 0));
330         }
331
332         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
333                        PGLUE_B_REG_TX_ERR_RD_DETAILS2);
334         if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
335                 u32 addr_lo, addr_hi, details;
336
337                 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
338                                    PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
339                 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
340                                    PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
341                 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
342                                    PGLUE_B_REG_TX_ERR_RD_DETAILS);
343
344                 DP_INFO(p_hwfn,
345                         "Illegal read by chip from [%08x:%08x] blocked."
346                         " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
347                         " Details2 %08x [Was_error %02x BME deassert %02x"
348                         " FID_enable deassert %02x]\n",
349                         addr_hi, addr_lo, details,
350                         (u8)((details &
351                               ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
352                              ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
353                         (u8)((details &
354                               ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
355                              ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
356                         (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
357                              ? 1 : 0), tmp,
358                         (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
359                              : 0),
360                         (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
361                              0),
362                         (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
363                              : 0));
364         }
365
366         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
367                        PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
368         if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
369                 DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
370
371         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
372                        PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
373         if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
374                 u32 addr_hi, addr_lo;
375
376                 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
377                                    PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
378                 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
379                                    PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
380
381                 DP_INFO(p_hwfn, "ICPL error - %08x [Address %08x:%08x]\n",
382                         tmp, addr_hi, addr_lo);
383         }
384
385         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
386                        PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
387         if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
388                 u32 addr_hi, addr_lo, details;
389
390                 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
391                                    PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
392                 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
393                                    PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
394                 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
395                                    PGLUE_B_REG_VF_ILT_ERR_DETAILS);
396
397                 DP_INFO(p_hwfn,
398                         "ILT error - Details %08x Details2 %08x"
399                         " [Address %08x:%08x]\n",
400                         details, tmp, addr_hi, addr_lo);
401         }
402
403         /* Clear the indications */
404         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
405                  PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
406
407         return ECORE_SUCCESS;
408 }
409
410 static enum _ecore_status_t ecore_nig_attn_cb(struct ecore_hwfn *p_hwfn)
411 {
412         u32 tmp, reg_addr;
413
414         /* Mask unnecessary attentions -@TBD move to MFW */
415         reg_addr =
416             attn_blocks[BLOCK_NIG].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
417             int_regs[3]->mask_addr;
418         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
419         tmp |= (1 << 0);        /* Was 3_P0_TX_PAUSE_TOO_LONG_INT */
420         tmp |= NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT;
421         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
422
423         reg_addr =
424             attn_blocks[BLOCK_NIG].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
425             int_regs[5]->mask_addr;
426         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr);
427         tmp |= (1 << 0);        /* Was 5_P1_TX_PAUSE_TOO_LONG_INT */
428         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp);
429
430         /* TODO - a bit risky to return success here; But alternative is to
431          * actually read the multitdue of interrupt register of the block.
432          */
433         return ECORE_SUCCESS;
434 }
435
436 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
437 {
438         DP_NOTICE(p_hwfn, false, "FW assertion!\n");
439
440         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
441
442         return ECORE_INVAL;
443 }
444
445 static enum _ecore_status_t
446 ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
447 {
448         DP_INFO(p_hwfn, "General attention 35!\n");
449
450         return ECORE_SUCCESS;
451 }
452
453 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
454 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
455 #define ECORE_DORQ_ATTENTION_SIZE_MASK   (0x7f)
456 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT  (16)
457
458 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
459 {
460         u32 reason;
461
462         reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
463             ECORE_DORQ_ATTENTION_REASON_MASK;
464         if (reason) {
465                 u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
466                                        DORQ_REG_DB_DROP_DETAILS);
467
468                 DP_INFO(p_hwfn->p_dev,
469                         "DORQ db_drop: address 0x%08x Opaque FID 0x%04x"
470                         " Size [bytes] 0x%08x Reason: 0x%08x\n",
471                         ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
472                                  DORQ_REG_DB_DROP_DETAILS_ADDRESS),
473                         (u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK),
474                         ((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >>
475                          ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason);
476         }
477
478         return ECORE_INVAL;
479 }
480
481 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
482 {
483 #ifndef ASIC_ONLY
484         if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
485                 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
486                                    TM_REG_INT_STS_1);
487
488                 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
489                             TM_REG_INT_STS_1_PEND_CONN_SCAN))
490                         return ECORE_INVAL;
491
492                 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
493                            TM_REG_INT_STS_1_PEND_CONN_SCAN))
494                         DP_INFO(p_hwfn,
495                                 "TM attention on emulation - most likely"
496                                 " results of clock-ratios\n");
497                 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
498                 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
499                     TM_REG_INT_MASK_1_PEND_TASK_SCAN;
500                 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
501
502                 return ECORE_SUCCESS;
503         }
504 #endif
505
506         return ECORE_INVAL;
507 }
508
509 /* Notice aeu_invert_reg must be defined in the same order of bits as HW;  */
510 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
511         {
512          {                      /* After Invert 1 */
513           {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
514            MAX_BLOCK_ID},
515           }
516          },
517
518         {
519          {                      /* After Invert 2 */
520           {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
521           {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
522           {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb,
523            BLOCK_PGLUE_B},
524           {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
525           {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
526           {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
527           {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
528           {"SW timers #%d",
529            (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT),
530            OSAL_NULL, MAX_BLOCK_ID},
531           {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
532            BLOCK_PGLCS},
533           }
534          },
535
536         {
537          {                      /* After Invert 3 */
538           {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
539            MAX_BLOCK_ID},
540           }
541          },
542
543         {
544          {                      /* After Invert 4 */
545           {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
546            ecore_fw_assertion, MAX_BLOCK_ID},
547           {"General Attention %d",
548            (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT),
549            OSAL_NULL, MAX_BLOCK_ID},
550           {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
551            ecore_general_attention_35, MAX_BLOCK_ID},
552           {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
553            BLOCK_CNIG},
554           {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
555           {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
556           {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
557           {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
558           {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
559           {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
560           {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
561            MAX_BLOCK_ID},
562           {"NIG", ATTENTION_PAR_INT, ecore_nig_attn_cb, BLOCK_NIG},
563           {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
564           {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
565           {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
566           {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
567           }
568          },
569
570         {
571          {                      /* After Invert 5 */
572           {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
573           {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
574           {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
575           {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
576           {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
577           {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
578           {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
579           {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
580           {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
581           {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
582           {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
583           {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
584           {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
585           {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
586           {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
587           {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
588           }
589          },
590
591         {
592          {                      /* After Invert 6 */
593           {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
594           {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
595           {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
596           {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
597           {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
598           {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
599           {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
600           {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
601           {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
602           {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
603           {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
604           {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
605           {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
606           {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
607           {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
608           {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
609           }
610          },
611
612         {
613          {                      /* After Invert 7 */
614           {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
615           {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
616           {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
617           {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
618           {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
619           {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
620           {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
621           {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
622           {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
623           {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
624           {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
625           {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
626           {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
627           {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
628           {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
629           {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
630           {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
631           }
632          },
633
634         {
635          {                      /* After Invert 8 */
636           {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
637           {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
638           {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
639           {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
640           {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
641           {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
642           {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
643           {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
644           {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
645           {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
646           {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
647           {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
648           {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
649           {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
650           {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
651           {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
652           {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
653           {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
654           {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
655           {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
656           {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
657            MAX_BLOCK_ID},
658           }
659          },
660
661         {
662          {                      /* After Invert 9 */
663           {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
664           {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
665            MAX_BLOCK_ID},
666           {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
667           {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
668           {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
669            MAX_BLOCK_ID},
670           }
671          },
672
673 };
674
675 #define ATTN_STATE_BITS         (0xfff)
676 #define ATTN_BITS_MASKABLE      (0x3ff)
677 struct ecore_sb_attn_info {
678         /* Virtual & Physical address of the SB */
679         struct atten_status_block *sb_attn;
680         dma_addr_t sb_phys;
681
682         /* Last seen running index */
683         u16 index;
684
685         /* A mask of the AEU bits resulting in a parity error */
686         u32 parity_mask[NUM_ATTN_REGS];
687
688         /* A pointer to the attention description structure */
689         struct aeu_invert_reg *p_aeu_desc;
690
691         /* Previously asserted attentions, which are still unasserted */
692         u16 known_attn;
693
694         /* Cleanup address for the link's general hw attention */
695         u32 mfw_attn_addr;
696 };
697
698 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
699                                  struct ecore_sb_attn_info *p_sb_desc)
700 {
701         u16 rc = 0, index;
702
703         OSAL_MMIOWB(p_hwfn->p_dev);
704
705         index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
706         if (p_sb_desc->index != index) {
707                 p_sb_desc->index = index;
708                 rc = ECORE_SB_ATT_IDX;
709         }
710
711         OSAL_MMIOWB(p_hwfn->p_dev);
712
713         return rc;
714 }
715
716 /**
717  * @brief ecore_int_assertion - handles asserted attention bits
718  *
719  * @param p_hwfn
720  * @param asserted_bits newly asserted bits
721  * @return enum _ecore_status_t
722  */
723 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
724                                                 u16 asserted_bits)
725 {
726         struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
727         u32 igu_mask;
728
729         /* Mask the source of the attention in the IGU */
730         igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
731                             IGU_REG_ATTENTION_ENABLE);
732         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
733                    igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
734         igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
735         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
736
737         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
738                    "inner known ATTN state: 0x%04x --> 0x%04x\n",
739                    sb_attn_sw->known_attn,
740                    sb_attn_sw->known_attn | asserted_bits);
741         sb_attn_sw->known_attn |= asserted_bits;
742
743         /* Handle MCP events */
744         if (asserted_bits & 0x100) {
745                 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
746                 /* Clean the MCP attention */
747                 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
748                          sb_attn_sw->mfw_attn_addr, 0);
749         }
750
751         /* FIXME - this will change once we'll have GOOD gtt definitions */
752         DIRECT_REG_WR(p_hwfn,
753                       (u8 OSAL_IOMEM *) p_hwfn->regview +
754                       GTT_BAR0_MAP_REG_IGU_CMD +
755                       ((IGU_CMD_ATTN_BIT_SET_UPPER -
756                         IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
757
758         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
759                    asserted_bits);
760
761         return ECORE_SUCCESS;
762 }
763
764 static void ecore_int_deassertion_print_bit(struct ecore_hwfn *p_hwfn,
765                                             struct attn_hw_reg *p_reg_desc,
766                                             struct attn_hw_block *p_block,
767                                             enum ecore_attention_type type,
768                                             u32 val, u32 mask)
769 {
770         int j;
771 #ifdef ATTN_DESC
772         const char **description;
773
774         if (type == ECORE_ATTN_TYPE_ATTN)
775                 description = p_block->int_desc;
776         else
777                 description = p_block->prty_desc;
778 #endif
779
780         for (j = 0; j < p_reg_desc->num_of_bits; j++) {
781                 if (val & (1 << j)) {
782 #ifdef ATTN_DESC
783                         DP_NOTICE(p_hwfn, false,
784                                   "%s (%s): %s [reg %d [0x%08x], bit %d]%s\n",
785                                   p_block->name,
786                                   type == ECORE_ATTN_TYPE_ATTN ? "Interrupt" :
787                                   "Parity",
788                                   description[p_reg_desc->bit_attn_idx[j]],
789                                   p_reg_desc->reg_idx,
790                                   p_reg_desc->sts_addr, j,
791                                   (mask & (1 << j)) ? " [MASKED]" : "");
792 #else
793                         DP_NOTICE(p_hwfn->p_dev, false,
794                                   "%s (%s): [reg %d [0x%08x], bit %d]%s\n",
795                                   p_block->name,
796                                   type == ECORE_ATTN_TYPE_ATTN ? "Interrupt" :
797                                   "Parity",
798                                   p_reg_desc->reg_idx,
799                                   p_reg_desc->sts_addr, j,
800                                   (mask & (1 << j)) ? " [MASKED]" : "");
801 #endif
802                 }
803         }
804 }
805
806 /**
807  * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
808  * cause of the attention
809  *
810  * @param p_hwfn
811  * @param p_aeu - descriptor of an AEU bit which caused the attention
812  * @param aeu_en_reg - register offset of the AEU enable reg. which configured
813  *  this bit to this group.
814  * @param bit_index - index of this bit in the aeu_en_reg
815  *
816  * @return enum _ecore_status_t
817  */
818 static enum _ecore_status_t
819 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
820                               struct aeu_invert_reg_bit *p_aeu,
821                               u32 aeu_en_reg, u32 bitmask)
822 {
823         enum _ecore_status_t rc = ECORE_INVAL;
824         u32 val, mask;
825
826 #ifndef REMOVE_DBG
827         u32 interrupts[20];     /* TODO- change into HSI define once supplied */
828
829         OSAL_MEMSET(interrupts, 0, sizeof(u32) * 20);   /* FIXME real size) */
830 #endif
831
832         DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
833                 p_aeu->bit_name, bitmask);
834
835         /* Call callback before clearing the interrupt status */
836         if (p_aeu->cb) {
837                 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
838                         p_aeu->bit_name);
839                 rc = p_aeu->cb(p_hwfn);
840         }
841
842         /* Handle HW block interrupt registers */
843         if (p_aeu->block_index != MAX_BLOCK_ID) {
844                 u16 chip_type = ECORE_GET_TYPE(p_hwfn->p_dev);
845                 struct attn_hw_block *p_block;
846                 int i;
847
848                 p_block = &attn_blocks[p_aeu->block_index];
849
850                 /* Handle each interrupt register */
851                 for (i = 0;
852                      i < p_block->chip_regs[chip_type].num_of_int_regs; i++) {
853                         struct attn_hw_reg *p_reg_desc;
854                         u32 sts_addr;
855
856                         p_reg_desc = p_block->chip_regs[chip_type].int_regs[i];
857
858                         /* In case of fatal attention, don't clear the status
859                          * so it would appear in idle check.
860                          */
861                         if (rc == ECORE_SUCCESS)
862                                 sts_addr = p_reg_desc->sts_clr_addr;
863                         else
864                                 sts_addr = p_reg_desc->sts_addr;
865
866                         val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr);
867                         mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
868                                         p_reg_desc->mask_addr);
869                         ecore_int_deassertion_print_bit(p_hwfn, p_reg_desc,
870                                                         p_block,
871                                                         ECORE_ATTN_TYPE_ATTN,
872                                                         val, mask);
873
874 #ifndef REMOVE_DBG
875                         interrupts[i] = val;
876 #endif
877                 }
878         }
879
880         /* Reach assertion if attention is fatal */
881         if (rc != ECORE_SUCCESS) {
882                 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
883                           p_aeu->bit_name);
884
885                 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
886         }
887
888         /* Prevent this Attention from being asserted in the future */
889         if (p_aeu->flags & ATTENTION_CLEAR_ENABLE) {
890                 u32 val;
891                 u32 mask = ~bitmask;
892                 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
893                 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
894                 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
895                         p_aeu->bit_name);
896         }
897
898         if (p_aeu->flags & (ATTENTION_FW_DUMP | ATTENTION_PANIC_DUMP)) {
899                 /* @@@TODO - what to dump? <yuvalmin 04/02/13> */
900                 DP_ERR(p_hwfn->p_dev, "`%s' - Dumps aren't implemented yet\n",
901                        p_aeu->bit_name);
902                 return ECORE_NOTIMPL;
903         }
904
905         return rc;
906 }
907
908 static void ecore_int_parity_print(struct ecore_hwfn *p_hwfn,
909                                    struct aeu_invert_reg_bit *p_aeu,
910                                    struct attn_hw_block *p_block, u8 bit_index)
911 {
912         u16 chip_type = ECORE_GET_TYPE(p_hwfn->p_dev);
913         int i;
914
915         for (i = 0; i < p_block->chip_regs[chip_type].num_of_prty_regs; i++) {
916                 struct attn_hw_reg *p_reg_desc;
917                 u32 val, mask;
918
919                 p_reg_desc = p_block->chip_regs[chip_type].prty_regs[i];
920
921                 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
922                                p_reg_desc->sts_clr_addr);
923                 mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
924                                 p_reg_desc->mask_addr);
925                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
926                            "%s[%d] - parity register[%d] is %08x [mask is %08x]\n",
927                            p_aeu->bit_name, bit_index, i, val, mask);
928                 ecore_int_deassertion_print_bit(p_hwfn, p_reg_desc,
929                                                 p_block,
930                                                 ECORE_ATTN_TYPE_PARITY,
931                                                 val, mask);
932         }
933 }
934
935 /**
936  * @brief ecore_int_deassertion_parity - handle a single parity AEU source
937  *
938  * @param p_hwfn
939  * @param p_aeu - descriptor of an AEU bit which caused the
940  *              parity
941  * @param bit_index
942  */
943 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
944                                          struct aeu_invert_reg_bit *p_aeu,
945                                          u8 bit_index)
946 {
947         u32 block_id = p_aeu->block_index;
948
949         DP_INFO(p_hwfn->p_dev, "%s[%d] parity attention is set\n",
950                 p_aeu->bit_name, bit_index);
951
952         if (block_id != MAX_BLOCK_ID) {
953                 ecore_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id],
954                                        bit_index);
955
956                 /* In A0, there's a single parity bit for several blocks */
957                 if (block_id == BLOCK_BTB) {
958                         ecore_int_parity_print(p_hwfn, p_aeu,
959                                                &attn_blocks[BLOCK_OPTE],
960                                                bit_index);
961                         ecore_int_parity_print(p_hwfn, p_aeu,
962                                                &attn_blocks[BLOCK_MCP],
963                                                bit_index);
964                 }
965         }
966 }
967
968 /**
969  * @brief - handles deassertion of previously asserted attentions.
970  *
971  * @param p_hwfn
972  * @param deasserted_bits - newly deasserted bits
973  * @return enum _ecore_status_t
974  *
975  */
976 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
977                                                   u16 deasserted_bits)
978 {
979         struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
980         u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
981         bool b_parity = false;
982         u8 i, j, k, bit_idx;
983         enum _ecore_status_t rc = ECORE_SUCCESS;
984
985         /* Read the attention registers in the AEU */
986         for (i = 0; i < NUM_ATTN_REGS; i++) {
987                 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
988                                           MISC_REG_AEU_AFTER_INVERT_1_IGU +
989                                           i * 0x4);
990                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
991                            "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
992         }
993
994         /* Handle parity attentions first */
995         for (i = 0; i < NUM_ATTN_REGS; i++) {
996                 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
997                 u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
998                                   MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
999                                   i * sizeof(u32));
1000
1001                 u32 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
1002
1003                 /* Skip register in which no parity bit is currently set */
1004                 if (!parities)
1005                         continue;
1006
1007                 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
1008                         struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
1009
1010                         if ((p_bit->flags & ATTENTION_PARITY) &&
1011                             !!(parities & (1 << bit_idx))) {
1012                                 ecore_int_deassertion_parity(p_hwfn, p_bit,
1013                                                              bit_idx);
1014                                 b_parity = true;
1015                         }
1016
1017                         bit_idx += ATTENTION_LENGTH(p_bit->flags);
1018                 }
1019         }
1020
1021         /* Find non-parity cause for attention and act */
1022         for (k = 0; k < MAX_ATTN_GRPS; k++) {
1023                 struct aeu_invert_reg_bit *p_aeu;
1024
1025                 /* Handle only groups whose attention is currently deasserted */
1026                 if (!(deasserted_bits & (1 << k)))
1027                         continue;
1028
1029                 for (i = 0; i < NUM_ATTN_REGS; i++) {
1030                         u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
1031                             i * sizeof(u32) + k * sizeof(u32) * NUM_ATTN_REGS;
1032                         u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1033                         u32 bits = aeu_inv_arr[i] & en;
1034
1035                         /* Skip if no bit from this group is currently set */
1036                         if (!bits)
1037                                 continue;
1038
1039                         /* Find all set bits from current register which belong
1040                          * to current group, making them responsible for the
1041                          * previous assertion.
1042                          */
1043                         for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
1044                                 u8 bit, bit_len;
1045                                 u32 bitmask;
1046
1047                                 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
1048
1049                                 /* No need to handle attention-only bits */
1050                                 if (p_aeu->flags == ATTENTION_PAR)
1051                                         continue;
1052
1053                                 bit = bit_idx;
1054                                 bit_len = ATTENTION_LENGTH(p_aeu->flags);
1055                                 if (p_aeu->flags & ATTENTION_PAR_INT) {
1056                                         /* Skip Parity */
1057                                         bit++;
1058                                         bit_len--;
1059                                 }
1060
1061                                 bitmask = bits & (((1 << bit_len) - 1) << bit);
1062                                 if (bitmask) {
1063                                         /* Handle source of the attention */
1064                                         ecore_int_deassertion_aeu_bit(p_hwfn,
1065                                                                       p_aeu,
1066                                                                       aeu_en,
1067                                                                       bitmask);
1068                                 }
1069
1070                                 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1071                         }
1072                 }
1073         }
1074
1075         /* Clear IGU indication for the deasserted bits */
1076         /* FIXME - this will change once we'll have GOOD gtt definitions */
1077         DIRECT_REG_WR(p_hwfn,
1078                       (u8 OSAL_IOMEM *) p_hwfn->regview +
1079                       GTT_BAR0_MAP_REG_IGU_CMD +
1080                       ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1081                         IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits));
1082
1083         /* Unmask deasserted attentions in IGU */
1084         aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1085                             IGU_REG_ATTENTION_ENABLE);
1086         aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1087         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1088
1089         /* Clear deassertion from inner state */
1090         sb_attn_sw->known_attn &= ~deasserted_bits;
1091
1092         return rc;
1093 }
1094
1095 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
1096 {
1097         struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1098         struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1099         u16 index = 0, asserted_bits, deasserted_bits;
1100         enum _ecore_status_t rc = ECORE_SUCCESS;
1101         u32 attn_bits = 0, attn_acks = 0;
1102
1103         /* Read current attention bits/acks - safeguard against attentions
1104          * by guaranting work on a synchronized timeframe
1105          */
1106         do {
1107                 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
1108                 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
1109                 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
1110         } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
1111         p_sb_attn->sb_index = index;
1112
1113         /* Attention / Deassertion are meaningful (and in correct state)
1114          * only when they differ and consistent with known state - deassertion
1115          * when previous attention & current ack, and assertion when current
1116          * attention with no previous attention
1117          */
1118         asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1119             ~p_sb_attn_sw->known_attn;
1120         deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1121             p_sb_attn_sw->known_attn;
1122
1123         if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
1124                 DP_INFO(p_hwfn,
1125                         "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1126                         index, attn_bits, attn_acks, asserted_bits,
1127                         deasserted_bits, p_sb_attn_sw->known_attn);
1128         else if (asserted_bits == 0x100)
1129                 DP_INFO(p_hwfn, "MFW indication via attention\n");
1130         else
1131                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1132                            "MFW indication [deassertion]\n");
1133
1134         if (asserted_bits) {
1135                 rc = ecore_int_assertion(p_hwfn, asserted_bits);
1136                 if (rc)
1137                         return rc;
1138         }
1139
1140         if (deasserted_bits)
1141                 rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
1142
1143         return rc;
1144 }
1145
1146 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
1147                               void OSAL_IOMEM *igu_addr, u32 ack_cons)
1148 {
1149         struct igu_prod_cons_update igu_ack = { 0 };
1150
1151         igu_ack.sb_id_and_flags =
1152             ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1153              (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1154              (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1155              (IGU_SEG_ACCESS_ATTN <<
1156               IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1157
1158         DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
1159
1160         /* Both segments (interrupts & acks) are written to same place address;
1161          * Need to guarantee all commands will be received (in-order) by HW.
1162          */
1163         OSAL_MMIOWB(p_hwfn->p_dev);
1164         OSAL_BARRIER(p_hwfn->p_dev);
1165 }
1166
1167 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
1168 {
1169         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
1170         struct ecore_pi_info *pi_info = OSAL_NULL;
1171         struct ecore_sb_attn_info *sb_attn;
1172         struct ecore_sb_info *sb_info;
1173         static int arr_size;
1174         u16 rc = 0;
1175
1176         if (!p_hwfn) {
1177                 DP_ERR(p_hwfn->p_dev, "DPC called - no hwfn!\n");
1178                 return;
1179         }
1180
1181         if (!p_hwfn->p_sp_sb) {
1182                 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
1183                 return;
1184         }
1185
1186         sb_info = &p_hwfn->p_sp_sb->sb_info;
1187         arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1188         if (!sb_info) {
1189                 DP_ERR(p_hwfn->p_dev,
1190                        "Status block is NULL - cannot ack interrupts\n");
1191                 return;
1192         }
1193
1194         if (!p_hwfn->p_sb_attn) {
1195                 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
1196                 return;
1197         }
1198         sb_attn = p_hwfn->p_sb_attn;
1199
1200         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1201                    p_hwfn, p_hwfn->my_id);
1202
1203         /* Disable ack for def status block. Required both for msix +
1204          * inta in non-mask mode, in inta does no harm.
1205          */
1206         ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1207
1208         /* Gather Interrupts/Attentions information */
1209         if (!sb_info->sb_virt) {
1210                 DP_ERR(p_hwfn->p_dev,
1211                        "Interrupt Status block is NULL -"
1212                        " cannot check for new interrupts!\n");
1213         } else {
1214                 u32 tmp_index = sb_info->sb_ack;
1215                 rc = ecore_sb_update_sb_idx(sb_info);
1216                 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1217                            "Interrupt indices: 0x%08x --> 0x%08x\n",
1218                            tmp_index, sb_info->sb_ack);
1219         }
1220
1221         if (!sb_attn || !sb_attn->sb_attn) {
1222                 DP_ERR(p_hwfn->p_dev,
1223                        "Attentions Status block is NULL -"
1224                        " cannot check for new attentions!\n");
1225         } else {
1226                 u16 tmp_index = sb_attn->index;
1227
1228                 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
1229                 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1230                            "Attention indices: 0x%08x --> 0x%08x\n",
1231                            tmp_index, sb_attn->index);
1232         }
1233
1234         /* Check if we expect interrupts at this time. if not just ack them */
1235         if (!(rc & ECORE_SB_EVENT_MASK)) {
1236                 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1237                 return;
1238         }
1239
1240         /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1241         if (!p_hwfn->p_dpc_ptt) {
1242                 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
1243                 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1244                 return;
1245         }
1246
1247         if (rc & ECORE_SB_ATT_IDX)
1248                 ecore_int_attentions(p_hwfn);
1249
1250         if (rc & ECORE_SB_IDX) {
1251                 int pi;
1252
1253                 /* Since we only looked at the SB index, it's possible more
1254                  * than a single protocol-index on the SB incremented.
1255                  * Iterate over all configured protocol indices and check
1256                  * whether something happened for each.
1257                  */
1258                 for (pi = 0; pi < arr_size; pi++) {
1259                         pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1260                         if (pi_info->comp_cb != OSAL_NULL)
1261                                 pi_info->comp_cb(p_hwfn, pi_info->cookie);
1262                 }
1263         }
1264
1265         if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
1266                 /* This should be done before the interrupts are enabled,
1267                  * since otherwise a new attention will be generated.
1268                  */
1269                 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1270         }
1271
1272         ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1273 }
1274
1275 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
1276 {
1277         struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1278
1279         if (!p_sb)
1280                 return;
1281
1282         if (p_sb->sb_attn) {
1283                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
1284                                        p_sb->sb_phys,
1285                                        SB_ATTN_ALIGNED_SIZE(p_hwfn));
1286         }
1287         OSAL_FREE(p_hwfn->p_dev, p_sb);
1288 }
1289
1290 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
1291                                     struct ecore_ptt *p_ptt)
1292 {
1293         struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1294
1295         OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1296
1297         sb_info->index = 0;
1298         sb_info->known_attn = 0;
1299
1300         /* Configure Attention Status Block in IGU */
1301         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1302                  DMA_LO(p_hwfn->p_sb_attn->sb_phys));
1303         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1304                  DMA_HI(p_hwfn->p_sb_attn->sb_phys));
1305 }
1306
1307 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
1308                                    struct ecore_ptt *p_ptt,
1309                                    void *sb_virt_addr, dma_addr_t sb_phy_addr)
1310 {
1311         struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1312         int i, j, k;
1313
1314         sb_info->sb_attn = sb_virt_addr;
1315         sb_info->sb_phys = sb_phy_addr;
1316
1317         /* Set the pointer to the AEU descriptors */
1318         sb_info->p_aeu_desc = aeu_descs;
1319
1320         /* Calculate Parity Masks */
1321         OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1322         for (i = 0; i < NUM_ATTN_REGS; i++) {
1323                 /* j is array index, k is bit index */
1324                 for (j = 0, k = 0; k < 32; j++) {
1325                         unsigned int flags = aeu_descs[i].bits[j].flags;
1326
1327                         if (flags & ATTENTION_PARITY)
1328                                 sb_info->parity_mask[i] |= 1 << k;
1329
1330                         k += ATTENTION_LENGTH(flags);
1331                 }
1332                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1333                            "Attn Mask [Reg %d]: 0x%08x\n",
1334                            i, sb_info->parity_mask[i]);
1335         }
1336
1337         /* Set the address of cleanup for the mcp attention */
1338         sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1339             MISC_REG_AEU_GENERAL_ATTN_0;
1340
1341         ecore_int_sb_attn_setup(p_hwfn, p_ptt);
1342 }
1343
1344 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
1345                                                     struct ecore_ptt *p_ptt)
1346 {
1347         struct ecore_dev *p_dev = p_hwfn->p_dev;
1348         struct ecore_sb_attn_info *p_sb;
1349         dma_addr_t p_phys = 0;
1350         void *p_virt;
1351
1352         /* SB struct */
1353         p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(struct ecore_sb_attn_info));
1354         if (!p_sb) {
1355                 DP_NOTICE(p_dev, true,
1356                           "Failed to allocate `struct ecore_sb_attn_info'");
1357                 return ECORE_NOMEM;
1358         }
1359
1360         /* SB ring  */
1361         p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
1362                                          SB_ATTN_ALIGNED_SIZE(p_hwfn));
1363         if (!p_virt) {
1364                 DP_NOTICE(p_dev, true,
1365                           "Failed to allocate status block (attentions)");
1366                 OSAL_FREE(p_dev, p_sb);
1367                 return ECORE_NOMEM;
1368         }
1369
1370         /* Attention setup */
1371         p_hwfn->p_sb_attn = p_sb;
1372         ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1373
1374         return ECORE_SUCCESS;
1375 }
1376
1377 /* coalescing timeout = timeset << (timer_res + 1) */
1378 #ifdef RTE_LIBRTE_QEDE_RX_COAL_US
1379 #define ECORE_CAU_DEF_RX_USECS RTE_LIBRTE_QEDE_RX_COAL_US
1380 #else
1381 #define ECORE_CAU_DEF_RX_USECS 24
1382 #endif
1383
1384 #ifdef RTE_LIBRTE_QEDE_TX_COAL_US
1385 #define ECORE_CAU_DEF_TX_USECS RTE_LIBRTE_QEDE_TX_COAL_US
1386 #else
1387 #define ECORE_CAU_DEF_TX_USECS 48
1388 #endif
1389
1390 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
1391                              struct cau_sb_entry *p_sb_entry,
1392                              u8 pf_id, u16 vf_number, u8 vf_valid)
1393 {
1394         struct ecore_dev *p_dev = p_hwfn->p_dev;
1395         u32 cau_state;
1396
1397         OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
1398
1399         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1400         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1401         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1402         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1403         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1404
1405         /* setting the time resultion to a fixed value ( = 1) */
1406         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0,
1407                   ECORE_CAU_DEF_RX_TIMER_RES);
1408         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1,
1409                   ECORE_CAU_DEF_TX_TIMER_RES);
1410
1411         cau_state = CAU_HC_DISABLE_STATE;
1412
1413         if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1414                 cau_state = CAU_HC_ENABLE_STATE;
1415                 if (!p_dev->rx_coalesce_usecs) {
1416                         p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
1417                         DP_INFO(p_dev, "Coalesce params rx-usecs=%u\n",
1418                                 p_dev->rx_coalesce_usecs);
1419                 }
1420                 if (!p_dev->tx_coalesce_usecs) {
1421                         p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
1422                         DP_INFO(p_dev, "Coalesce params tx-usecs=%u\n",
1423                                 p_dev->tx_coalesce_usecs);
1424                 }
1425         }
1426
1427         SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1428         SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1429 }
1430
1431 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
1432                            struct ecore_ptt *p_ptt,
1433                            dma_addr_t sb_phys, u16 igu_sb_id,
1434                            u16 vf_number, u8 vf_valid)
1435 {
1436         struct cau_sb_entry sb_entry;
1437
1438         ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1439                                 vf_number, vf_valid);
1440
1441         if (p_hwfn->hw_init_done) {
1442                 /* Wide-bus, initialize via DMAE */
1443                 u64 phys_addr = (u64)sb_phys;
1444
1445                 ecore_dmae_host2grc(p_hwfn, p_ptt,
1446                                     (u64)(osal_uintptr_t)&phys_addr,
1447                                     CAU_REG_SB_ADDR_MEMORY +
1448                                     igu_sb_id * sizeof(u64), 2, 0);
1449                 ecore_dmae_host2grc(p_hwfn, p_ptt,
1450                                     (u64)(osal_uintptr_t)&sb_entry,
1451                                     CAU_REG_SB_VAR_MEMORY +
1452                                     igu_sb_id * sizeof(u64), 2, 0);
1453         } else {
1454                 /* Initialize Status Block Address */
1455                 STORE_RT_REG_AGG(p_hwfn,
1456                                  CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
1457                                  igu_sb_id * 2, sb_phys);
1458
1459                 STORE_RT_REG_AGG(p_hwfn,
1460                                  CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
1461                                  igu_sb_id * 2, sb_entry);
1462         }
1463
1464         /* Configure pi coalescing if set */
1465         if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1466                 u8 num_tc = 1;  /* @@@TBD aelior ECORE_MULTI_COS */
1467                 u8 timeset = p_hwfn->p_dev->rx_coalesce_usecs >>
1468                     (ECORE_CAU_DEF_RX_TIMER_RES + 1);
1469                 u8 i;
1470
1471                 ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1472                                       ECORE_COAL_RX_STATE_MACHINE, timeset);
1473
1474                 timeset = p_hwfn->p_dev->tx_coalesce_usecs >>
1475                     (ECORE_CAU_DEF_TX_TIMER_RES + 1);
1476
1477                 for (i = 0; i < num_tc; i++) {
1478                         ecore_int_cau_conf_pi(p_hwfn, p_ptt,
1479                                               igu_sb_id, TX_PI(i),
1480                                               ECORE_COAL_TX_STATE_MACHINE,
1481                                               timeset);
1482                 }
1483         }
1484 }
1485
1486 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1487                            struct ecore_ptt *p_ptt,
1488                            u16 igu_sb_id, u32 pi_index,
1489                            enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
1490 {
1491         struct cau_pi_entry pi_entry;
1492         u32 sb_offset, pi_offset;
1493
1494         if (IS_VF(p_hwfn->p_dev))
1495                 return;         /* @@@TBD MichalK- VF CAU... */
1496
1497         sb_offset = igu_sb_id * PIS_PER_SB;
1498         OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
1499
1500         SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1501         if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
1502                 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1503         else
1504                 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1505
1506         pi_offset = sb_offset + pi_index;
1507         if (p_hwfn->hw_init_done) {
1508                 ecore_wr(p_hwfn, p_ptt,
1509                          CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1510                          *((u32 *)&(pi_entry)));
1511         } else {
1512                 STORE_RT_REG(p_hwfn,
1513                              CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1514                              *((u32 *)&(pi_entry)));
1515         }
1516 }
1517
1518 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
1519                         struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
1520 {
1521         /* zero status block and ack counter */
1522         sb_info->sb_ack = 0;
1523         OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1524
1525         if (IS_PF(p_hwfn->p_dev))
1526                 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1527                                       sb_info->igu_sb_id, 0, 0);
1528 }
1529
1530 /**
1531  * @brief ecore_get_igu_sb_id - given a sw sb_id return the
1532  *        igu_sb_id
1533  *
1534  * @param p_hwfn
1535  * @param sb_id
1536  *
1537  * @return u16
1538  */
1539 static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1540 {
1541         u16 igu_sb_id;
1542
1543         /* Assuming continuous set of IGU SBs dedicated for given PF */
1544         if (sb_id == ECORE_SP_SB_ID)
1545                 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1546         else if (IS_PF(p_hwfn->p_dev))
1547                 igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
1548         else
1549                 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
1550
1551         if (sb_id == ECORE_SP_SB_ID)
1552                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1553                            "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1554         else
1555                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1556                            "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1557
1558         return igu_sb_id;
1559 }
1560
1561 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
1562                                        struct ecore_ptt *p_ptt,
1563                                        struct ecore_sb_info *sb_info,
1564                                        void *sb_virt_addr,
1565                                        dma_addr_t sb_phy_addr, u16 sb_id)
1566 {
1567         sb_info->sb_virt = sb_virt_addr;
1568         sb_info->sb_phys = sb_phy_addr;
1569
1570         sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
1571
1572         if (sb_id != ECORE_SP_SB_ID) {
1573                 p_hwfn->sbs_info[sb_id] = sb_info;
1574                 p_hwfn->num_sbs++;
1575         }
1576 #ifdef ECORE_CONFIG_DIRECT_HWFN
1577         sb_info->p_hwfn = p_hwfn;
1578 #endif
1579         sb_info->p_dev = p_hwfn->p_dev;
1580
1581         /* The igu address will hold the absolute address that needs to be
1582          * written to for a specific status block
1583          */
1584         if (IS_PF(p_hwfn->p_dev)) {
1585                 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
1586                     GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
1587
1588         } else {
1589                 sb_info->igu_addr =
1590                     (u8 OSAL_IOMEM *)p_hwfn->regview +
1591                     PXP_VF_BAR0_START_IGU +
1592                     ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
1593         }
1594
1595         sb_info->flags |= ECORE_SB_INFO_INIT;
1596
1597         ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
1598
1599         return ECORE_SUCCESS;
1600 }
1601
1602 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
1603                                           struct ecore_sb_info *sb_info,
1604                                           u16 sb_id)
1605 {
1606         if (sb_id == ECORE_SP_SB_ID) {
1607                 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1608                 return ECORE_INVAL;
1609         }
1610
1611         /* zero status block and ack counter */
1612         sb_info->sb_ack = 0;
1613         OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1614
1615         if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
1616                 p_hwfn->sbs_info[sb_id] = OSAL_NULL;
1617                 p_hwfn->num_sbs--;
1618         }
1619
1620         return ECORE_SUCCESS;
1621 }
1622
1623 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
1624 {
1625         struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1626
1627         if (!p_sb)
1628                 return;
1629
1630         if (p_sb->sb_info.sb_virt) {
1631                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1632                                        p_sb->sb_info.sb_virt,
1633                                        p_sb->sb_info.sb_phys,
1634                                        SB_ALIGNED_SIZE(p_hwfn));
1635         }
1636
1637         OSAL_FREE(p_hwfn->p_dev, p_sb);
1638 }
1639
1640 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
1641                                                   struct ecore_ptt *p_ptt)
1642 {
1643         struct ecore_sb_sp_info *p_sb;
1644         dma_addr_t p_phys = 0;
1645         void *p_virt;
1646
1647         /* SB struct */
1648         p_sb =
1649             OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
1650                        sizeof(struct ecore_sb_sp_info));
1651         if (!p_sb) {
1652                 DP_NOTICE(p_hwfn, true,
1653                           "Failed to allocate `struct ecore_sb_info'");
1654                 return ECORE_NOMEM;
1655         }
1656
1657         /* SB ring  */
1658         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1659                                          &p_phys, SB_ALIGNED_SIZE(p_hwfn));
1660         if (!p_virt) {
1661                 DP_NOTICE(p_hwfn, true, "Failed to allocate status block");
1662                 OSAL_FREE(p_hwfn->p_dev, p_sb);
1663                 return ECORE_NOMEM;
1664         }
1665
1666         /* Status Block setup */
1667         p_hwfn->p_sp_sb = p_sb;
1668         ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
1669                           p_virt, p_phys, ECORE_SP_SB_ID);
1670
1671         OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1672
1673         return ECORE_SUCCESS;
1674 }
1675
1676 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
1677                                            ecore_int_comp_cb_t comp_cb,
1678                                            void *cookie,
1679                                            u8 *sb_idx, __le16 **p_fw_cons)
1680 {
1681         struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1682         enum _ecore_status_t rc = ECORE_NOMEM;
1683         u8 pi;
1684
1685         /* Look for a free index */
1686         for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1687                 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
1688                         continue;
1689
1690                 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1691                 p_sp_sb->pi_info_arr[pi].cookie = cookie;
1692                 *sb_idx = pi;
1693                 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1694                 rc = ECORE_SUCCESS;
1695                 break;
1696         }
1697
1698         return rc;
1699 }
1700
1701 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
1702 {
1703         struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1704
1705         if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
1706                 return ECORE_NOMEM;
1707
1708         p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
1709         p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
1710         return ECORE_SUCCESS;
1711 }
1712
1713 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
1714 {
1715         return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1716 }
1717
1718 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
1719                               struct ecore_ptt *p_ptt,
1720                               enum ecore_int_mode int_mode)
1721 {
1722         u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
1723
1724 #ifndef ASIC_ONLY
1725         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
1726                 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
1727         else
1728 #endif
1729                 igu_pf_conf |= IGU_PF_CONF_ATTN_BIT_EN;
1730
1731         p_hwfn->p_dev->int_mode = int_mode;
1732         switch (p_hwfn->p_dev->int_mode) {
1733         case ECORE_INT_MODE_INTA:
1734                 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1735                 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1736                 break;
1737
1738         case ECORE_INT_MODE_MSI:
1739                 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1740                 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1741                 break;
1742
1743         case ECORE_INT_MODE_MSIX:
1744                 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1745                 break;
1746         case ECORE_INT_MODE_POLL:
1747                 break;
1748         }
1749
1750         ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1751 }
1752
1753 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
1754                                       struct ecore_ptt *p_ptt)
1755 {
1756 #ifndef ASIC_ONLY
1757         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1758                 DP_INFO(p_hwfn,
1759                         "FPGA - Don't enable Attentions in IGU and MISC\n");
1760                 return;
1761         }
1762 #endif
1763
1764         /* Configure AEU signal change to produce attentions */
1765         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1766         ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1767         ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1768         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1769
1770         OSAL_MMIOWB(p_hwfn->p_dev);
1771
1772         /* Unmask AEU signals toward IGU */
1773         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1774 }
1775
1776 enum _ecore_status_t
1777 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1778                      enum ecore_int_mode int_mode)
1779 {
1780         enum _ecore_status_t rc = ECORE_SUCCESS;
1781         u32 tmp, reg_addr;
1782
1783         /* @@@tmp - Mask General HW attentions 0-31, Enable 32-36 */
1784         tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
1785         tmp |= 0xf;
1786         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE3_IGU_OUT_0, 0);
1787         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
1788
1789         /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
1790          * attentions. Since we're waiting for BRCM answer regarding this
1791          * attention, in the meanwhile we simply mask it.
1792          */
1793         tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
1794         tmp &= ~0x800;
1795         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
1796
1797         /* @@@tmp - Mask interrupt sources - should move to init tool;
1798          * Also, correct for A0 [might still change in B0.
1799          */
1800         reg_addr =
1801             attn_blocks[BLOCK_BRB].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].
1802             int_regs[0]->mask_addr;
1803         tmp = ecore_rd(p_hwfn, p_ptt, reg_addr);
1804         tmp |= (1 << 21);       /* Was PKT4_LEN_ERROR */
1805         ecore_wr(p_hwfn, p_ptt, reg_addr, tmp);
1806
1807         ecore_int_igu_enable_attn(p_hwfn, p_ptt);
1808
1809         if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1810                 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
1811                 if (rc != ECORE_SUCCESS) {
1812                         DP_NOTICE(p_hwfn, true,
1813                                   "Slowpath IRQ request failed\n");
1814                         return ECORE_NORESOURCES;
1815                 }
1816                 p_hwfn->b_int_requested = true;
1817         }
1818
1819         /* Enable interrupt Generation */
1820         ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1821
1822         p_hwfn->b_int_enabled = 1;
1823
1824         return rc;
1825 }
1826
1827 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
1828                                struct ecore_ptt *p_ptt)
1829 {
1830         p_hwfn->b_int_enabled = 0;
1831
1832         if (IS_VF(p_hwfn->p_dev))
1833                 return;
1834
1835         ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1836 }
1837
1838 #define IGU_CLEANUP_SLEEP_LENGTH                (1000)
1839 void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
1840                               struct ecore_ptt *p_ptt,
1841                               u32 sb_id, bool cleanup_set, u16 opaque_fid)
1842 {
1843         u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1844         u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
1845         u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1846         u8 type = 0;            /* FIXME MichalS type??? */
1847
1848         OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
1849                            IGU_REG_CLEANUP_STATUS_0) != 0x200);
1850
1851         /* USE Control Command Register to perform cleanup. There is an
1852          * option to do this using IGU bar, but then it can't be used for VFs.
1853          */
1854
1855         /* Set the data field */
1856         SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1857         SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
1858         SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1859
1860         /* Set the control register */
1861         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1862         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1863         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1864
1865         ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1866
1867         OSAL_BARRIER(p_hwfn->p_dev);
1868
1869         ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1870
1871         OSAL_MMIOWB(p_hwfn->p_dev);
1872
1873         /* calculate where to read the status bit from */
1874         sb_bit = 1 << (sb_id % 32);
1875         sb_bit_addr = sb_id / 32 * sizeof(u32);
1876
1877         sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
1878
1879         /* Now wait for the command to complete */
1880         while (--sleep_cnt) {
1881                 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
1882                 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
1883                         break;
1884                 OSAL_MSLEEP(5);
1885         }
1886
1887         if (!sleep_cnt)
1888                 DP_NOTICE(p_hwfn, true,
1889                           "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1890                           val, sb_id);
1891 }
1892
1893 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
1894                                        struct ecore_ptt *p_ptt,
1895                                        u32 sb_id, u16 opaque, bool b_set)
1896 {
1897         int pi;
1898
1899         /* Set */
1900         if (b_set)
1901                 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
1902
1903         /* Clear */
1904         ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
1905
1906         /* Clear the CAU for the SB */
1907         for (pi = 0; pi < 12; pi++)
1908                 ecore_wr(p_hwfn, p_ptt,
1909                          CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
1910 }
1911
1912 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
1913                                 struct ecore_ptt *p_ptt,
1914                                 bool b_set, bool b_slowpath)
1915 {
1916         u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
1917         u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
1918         u32 sb_id = 0, val = 0;
1919
1920         /* @@@TBD MichalK temporary... should be moved to init-tool... */
1921         val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
1922         val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
1923         val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
1924         ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
1925         /* end temporary */
1926
1927         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1928                    "IGU cleaning SBs [%d,...,%d]\n",
1929                    igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
1930
1931         for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
1932                 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
1933                                                   p_hwfn->hw_info.opaque_fid,
1934                                                   b_set);
1935
1936         if (!b_slowpath)
1937                 return;
1938
1939         sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1940         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1941                    "IGU cleaning slowpath SB [%d]\n", sb_id);
1942         ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
1943                                           p_hwfn->hw_info.opaque_fid, b_set);
1944 }
1945
1946 static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
1947                                         struct ecore_ptt *p_ptt, u16 sb_id)
1948 {
1949         u32 val = ecore_rd(p_hwfn, p_ptt,
1950                            IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
1951         struct ecore_igu_block *p_block;
1952
1953         p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
1954
1955         /* stop scanning when hit first invalid PF entry */
1956         if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
1957             GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
1958                 goto out;
1959
1960         /* Fill the block information */
1961         p_block->status = ECORE_IGU_STATUS_VALID;
1962         p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
1963         p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
1964         p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
1965
1966         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1967                    "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
1968                    " is_pf = %d vector_num = 0x%x\n",
1969                    sb_id, val, p_block->function_id, p_block->is_pf,
1970                    p_block->vector_number);
1971
1972 out:
1973         return val;
1974 }
1975
1976 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
1977                                             struct ecore_ptt *p_ptt)
1978 {
1979         struct ecore_igu_info *p_igu_info;
1980         struct ecore_igu_block *p_block;
1981         u16 sb_id, last_iov_sb_id = 0;
1982         u32 min_vf, max_vf, val;
1983         u16 prev_sb_id = 0xFF;
1984
1985         p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
1986                                                 GFP_KERNEL,
1987                                                 sizeof(*p_igu_info));
1988         if (!p_hwfn->hw_info.p_igu_info)
1989                 return ECORE_NOMEM;
1990
1991         OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
1992
1993         p_igu_info = p_hwfn->hw_info.p_igu_info;
1994
1995         /* Initialize base sb / sb cnt for PFs and VFs */
1996         p_igu_info->igu_base_sb = 0xffff;
1997         p_igu_info->igu_sb_cnt = 0;
1998         p_igu_info->igu_dsb_id = 0xffff;
1999         p_igu_info->igu_base_sb_iov = 0xffff;
2000
2001 #ifdef CONFIG_ECORE_SRIOV
2002         min_vf = p_hwfn->hw_info.first_vf_in_pf;
2003         max_vf = p_hwfn->hw_info.first_vf_in_pf +
2004             p_hwfn->p_dev->sriov_info.total_vfs;
2005 #else
2006         min_vf = 0;
2007         max_vf = 0;
2008 #endif
2009
2010         for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2011              sb_id++) {
2012                 p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
2013                 val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
2014                 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
2015                     GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
2016                         break;
2017
2018                 if (p_block->is_pf) {
2019                         if (p_block->function_id == p_hwfn->rel_pf_id) {
2020                                 p_block->status |= ECORE_IGU_STATUS_PF;
2021
2022                                 if (p_block->vector_number == 0) {
2023                                         if (p_igu_info->igu_dsb_id == 0xffff)
2024                                                 p_igu_info->igu_dsb_id = sb_id;
2025                                 } else {
2026                                         if (p_igu_info->igu_base_sb == 0xffff) {
2027                                                 p_igu_info->igu_base_sb = sb_id;
2028                                         } else if (prev_sb_id != sb_id - 1) {
2029                                                 DP_NOTICE(p_hwfn->p_dev, false,
2030                                                           "consecutive igu"
2031                                                           " vectors for HWFN"
2032                                                           " %x broken",
2033                                                           p_hwfn->rel_pf_id);
2034                                                 break;
2035                                         }
2036                                         prev_sb_id = sb_id;
2037                                         /* we don't count the default */
2038                                         (p_igu_info->igu_sb_cnt)++;
2039                                 }
2040                         }
2041                 } else {
2042                         if ((p_block->function_id >= min_vf) &&
2043                             (p_block->function_id < max_vf)) {
2044                                 /* Available for VFs of this PF */
2045                                 if (p_igu_info->igu_base_sb_iov == 0xffff) {
2046                                         p_igu_info->igu_base_sb_iov = sb_id;
2047                                 } else if (last_iov_sb_id != sb_id - 1) {
2048                                         if (!val)
2049                                                 DP_VERBOSE(p_hwfn->p_dev,
2050                                                            ECORE_MSG_INTR,
2051                                                            "First uninited IGU"
2052                                                            " CAM entry at"
2053                                                            " index 0x%04x\n",
2054                                                            sb_id);
2055                                         else
2056                                                 DP_NOTICE(p_hwfn->p_dev, false,
2057                                                           "Consecutive igu"
2058                                                           " vectors for HWFN"
2059                                                           " %x vfs is broken"
2060                                                           " [jumps from %04x"
2061                                                           " to %04x]\n",
2062                                                           p_hwfn->rel_pf_id,
2063                                                           last_iov_sb_id,
2064                                                           sb_id);
2065                                         break;
2066                                 }
2067                                 p_block->status |= ECORE_IGU_STATUS_FREE;
2068                                 p_hwfn->hw_info.p_igu_info->free_blks++;
2069                                 last_iov_sb_id = sb_id;
2070                         }
2071                 }
2072         }
2073         p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
2074
2075         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2076                    "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
2077                    "igu_dsb_id=0x%x\n",
2078                    p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
2079                    p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
2080                    p_igu_info->igu_dsb_id);
2081
2082         if (p_igu_info->igu_base_sb == 0xffff ||
2083             p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
2084                 DP_NOTICE(p_hwfn, true,
2085                           "IGU CAM returned invalid values igu_base_sb=0x%x "
2086                           "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
2087                           p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
2088                           p_igu_info->igu_dsb_id);
2089                 return ECORE_INVAL;
2090         }
2091
2092         return ECORE_SUCCESS;
2093 }
2094
2095 /**
2096  * @brief Initialize igu runtime registers
2097  *
2098  * @param p_hwfn
2099  */
2100 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
2101 {
2102         u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2103
2104         STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2105 }
2106
2107 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
2108                           IGU_CMD_INT_ACK_BASE)
2109 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
2110                           IGU_CMD_INT_ACK_BASE)
2111 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
2112 {
2113         u32 intr_status_hi = 0, intr_status_lo = 0;
2114         u64 intr_status = 0;
2115
2116         intr_status_lo = REG_RD(p_hwfn,
2117                                 GTT_BAR0_MAP_REG_IGU_CMD +
2118                                 LSB_IGU_CMD_ADDR * 8);
2119         intr_status_hi = REG_RD(p_hwfn,
2120                                 GTT_BAR0_MAP_REG_IGU_CMD +
2121                                 MSB_IGU_CMD_ADDR * 8);
2122         intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2123
2124         return intr_status;
2125 }
2126
2127 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
2128 {
2129         OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
2130         p_hwfn->b_sp_dpc_enabled = true;
2131 }
2132
2133 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
2134 {
2135         p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
2136         if (!p_hwfn->sp_dpc)
2137                 return ECORE_NOMEM;
2138
2139         return ECORE_SUCCESS;
2140 }
2141
2142 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
2143 {
2144         OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
2145 }
2146
2147 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
2148                                      struct ecore_ptt *p_ptt)
2149 {
2150         enum _ecore_status_t rc = ECORE_SUCCESS;
2151
2152         rc = ecore_int_sp_dpc_alloc(p_hwfn);
2153         if (rc != ECORE_SUCCESS) {
2154                 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
2155                 return rc;
2156         }
2157
2158         rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
2159         if (rc != ECORE_SUCCESS) {
2160                 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
2161                 return rc;
2162         }
2163
2164         rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
2165         if (rc != ECORE_SUCCESS)
2166                 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
2167
2168         return rc;
2169 }
2170
2171 void ecore_int_free(struct ecore_hwfn *p_hwfn)
2172 {
2173         ecore_int_sp_sb_free(p_hwfn);
2174         ecore_int_sb_attn_free(p_hwfn);
2175         ecore_int_sp_dpc_free(p_hwfn);
2176 }
2177
2178 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2179 {
2180         if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
2181                 return;
2182
2183         ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2184         ecore_int_sb_attn_setup(p_hwfn, p_ptt);
2185         ecore_int_sp_dpc_setup(p_hwfn);
2186 }
2187
2188 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
2189                            struct ecore_sb_cnt_info *p_sb_cnt_info)
2190 {
2191         struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
2192
2193         if (!info || !p_sb_cnt_info)
2194                 return;
2195
2196         p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
2197         p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
2198         p_sb_cnt_info->sb_free_blk = info->free_blks;
2199 }
2200
2201 u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
2202 {
2203         struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2204
2205         /* Determine origin of SB id */
2206         if ((sb_id >= p_info->igu_base_sb) &&
2207             (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
2208                 return sb_id - p_info->igu_base_sb;
2209         } else if ((sb_id >= p_info->igu_base_sb_iov) &&
2210                    (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
2211                 return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
2212         }
2213
2214         DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
2215                   sb_id);
2216         return 0;
2217 }
2218
2219 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
2220 {
2221         int i;
2222
2223         for_each_hwfn(p_dev, i)
2224                 p_dev->hwfns[i].b_int_requested = false;
2225 }