New upstream version 17.11.4
[deb_dpdk.git] / drivers / net / qede / base / ecore_int.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "ecore_spq.h"
12 #include "ecore_gtt_reg_addr.h"
13 #include "ecore_init_ops.h"
14 #include "ecore_rt_defs.h"
15 #include "ecore_int.h"
16 #include "reg_addr.h"
17 #include "ecore_hw.h"
18 #include "ecore_sriov.h"
19 #include "ecore_vf.h"
20 #include "ecore_hw_defs.h"
21 #include "ecore_hsi_common.h"
22 #include "ecore_mcp.h"
23
24 struct ecore_pi_info {
25         ecore_int_comp_cb_t comp_cb;
26         void *cookie;           /* Will be sent to the compl cb function */
27 };
28
29 struct ecore_sb_sp_info {
30         struct ecore_sb_info sb_info;
31         /* per protocol index data */
32         struct ecore_pi_info pi_info_arr[PIS_PER_SB_E4];
33 };
34
35 enum ecore_attention_type {
36         ECORE_ATTN_TYPE_ATTN,
37         ECORE_ATTN_TYPE_PARITY,
38 };
39
40 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
41         ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
42
43 struct aeu_invert_reg_bit {
44         char bit_name[30];
45
46 #define ATTENTION_PARITY                (1 << 0)
47
48 #define ATTENTION_LENGTH_MASK           (0x00000ff0)
49 #define ATTENTION_LENGTH_SHIFT          (4)
50 #define ATTENTION_LENGTH(flags)         (((flags) & ATTENTION_LENGTH_MASK) >> \
51                                          ATTENTION_LENGTH_SHIFT)
52 #define ATTENTION_SINGLE                (1 << ATTENTION_LENGTH_SHIFT)
53 #define ATTENTION_PAR                   (ATTENTION_SINGLE | ATTENTION_PARITY)
54 #define ATTENTION_PAR_INT               ((2 << ATTENTION_LENGTH_SHIFT) | \
55                                          ATTENTION_PARITY)
56
57 /* Multiple bits start with this offset */
58 #define ATTENTION_OFFSET_MASK           (0x000ff000)
59 #define ATTENTION_OFFSET_SHIFT          (12)
60
61 #define ATTENTION_BB_MASK               (0x00700000)
62 #define ATTENTION_BB_SHIFT              (20)
63 #define ATTENTION_BB(value)             ((value) << ATTENTION_BB_SHIFT)
64 #define ATTENTION_BB_DIFFERENT          (1 << 23)
65
66 #define ATTENTION_CLEAR_ENABLE          (1 << 28)
67         unsigned int flags;
68
69         /* Callback to call if attention will be triggered */
70         enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
71
72         enum block_id block_index;
73 };
74
75 struct aeu_invert_reg {
76         struct aeu_invert_reg_bit bits[32];
77 };
78
79 #define MAX_ATTN_GRPS           (8)
80 #define NUM_ATTN_REGS           (9)
81
82 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
83 {
84         u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
85
86         DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp);
87         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff);
88
89         return ECORE_SUCCESS;
90 }
91
92 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK         (0x3c000)
93 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT        (14)
94 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK         (0x03fc0)
95 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT        (6)
96 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK      (0x00020)
97 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT     (5)
98 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK     (0x0001e)
99 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT    (1)
100 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK      (0x1)
101 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT     (0)
102 #define ECORE_PSWHST_ATTENTION_VF_DISABLED              (0x1)
103 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS         (0x1)
104 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK         (0x1)
105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT        (0)
106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK     (0x1e)
107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT    (1)
108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK   (0x20)
109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT  (5)
110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK      (0x3fc0)
111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT     (6)
112 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK      (0x3c000)
113 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT     (14)
114 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK    (0x3fc0000)
115 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT   (18)
116 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
117 {
118         u32 tmp =
119             ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
120                      PSWHST_REG_VF_DISABLED_ERROR_VALID);
121
122         /* Disabled VF access */
123         if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
124                 u32 addr, data;
125
126                 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
127                                 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
128                 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
129                                 PSWHST_REG_VF_DISABLED_ERROR_DATA);
130                 DP_INFO(p_hwfn->p_dev,
131                         "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]"
132                         " Write [0x%02x] Addr [0x%08x]\n",
133                         (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK)
134                              >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
135                         (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK)
136                              >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
137                         (u8)((data &
138                               ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
139                               ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
140                         (u8)((data &
141                               ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
142                               ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
143                         (u8)((data &
144                               ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
145                               ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
146                         addr);
147         }
148
149         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
150                        PSWHST_REG_INCORRECT_ACCESS_VALID);
151         if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
152                 u32 addr, data, length;
153
154                 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
155                                 PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
156                 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
157                                 PSWHST_REG_INCORRECT_ACCESS_DATA);
158                 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
159                                   PSWHST_REG_INCORRECT_ACCESS_LENGTH);
160
161                 DP_INFO(p_hwfn->p_dev,
162                         "Incorrect access to %08x of length %08x - PF [%02x]"
163                         " VF [%04x] [valid %02x] client [%02x] write [%02x]"
164                         " Byte-Enable [%04x] [%08x]\n",
165                         addr, length,
166                         (u8)((data &
167                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
168                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
169                         (u8)((data &
170                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
171                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
172                         (u8)((data &
173                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
174                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
175                         (u8)((data &
176                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
177                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
178                         (u8)((data &
179                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
180                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
181                         (u8)((data &
182                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
183                       ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
184                         data);
185         }
186
187         /* TODO - We know 'some' of these are legal due to virtualization,
188          * but is it true for all of them?
189          */
190         return ECORE_SUCCESS;
191 }
192
193 #define ECORE_GRC_ATTENTION_VALID_BIT           (1 << 0)
194 #define ECORE_GRC_ATTENTION_ADDRESS_MASK        (0x7fffff << 0)
195 #define ECORE_GRC_ATTENTION_RDWR_BIT            (1 << 23)
196 #define ECORE_GRC_ATTENTION_MASTER_MASK         (0xf << 24)
197 #define ECORE_GRC_ATTENTION_MASTER_SHIFT        (24)
198 #define ECORE_GRC_ATTENTION_PF_MASK             (0xf)
199 #define ECORE_GRC_ATTENTION_VF_MASK             (0xff << 4)
200 #define ECORE_GRC_ATTENTION_VF_SHIFT            (4)
201 #define ECORE_GRC_ATTENTION_PRIV_MASK           (0x3 << 14)
202 #define ECORE_GRC_ATTENTION_PRIV_SHIFT          (14)
203 #define ECORE_GRC_ATTENTION_PRIV_VF             (0)
204 static const char *grc_timeout_attn_master_to_str(u8 master)
205 {
206         switch (master) {
207         case 1:
208                 return "PXP";
209         case 2:
210                 return "MCP";
211         case 3:
212                 return "MSDM";
213         case 4:
214                 return "PSDM";
215         case 5:
216                 return "YSDM";
217         case 6:
218                 return "USDM";
219         case 7:
220                 return "TSDM";
221         case 8:
222                 return "XSDM";
223         case 9:
224                 return "DBU";
225         case 10:
226                 return "DMAE";
227         default:
228                 return "Unknown";
229         }
230 }
231
232 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
233 {
234         enum _ecore_status_t rc = ECORE_SUCCESS;
235         u32 tmp, tmp2;
236
237         /* We've already cleared the timeout interrupt register, so we learn
238          * of interrupts via the validity register.
239          * Any attention which is not for a timeout event is treated as fatal.
240          */
241         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
242                        GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
243         if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) {
244                 rc = ECORE_INVAL;
245                 goto out;
246         }
247
248         /* Read the GRC timeout information */
249         tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
250                        GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
251         tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
252                         GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
253
254         DP_NOTICE(p_hwfn->p_dev, false,
255                   "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s] [PF: %02x %s %02x]\n",
256                   tmp2, tmp,
257                   (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to"
258                                                        : "Read from",
259                   (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
260                   grc_timeout_attn_master_to_str(
261                         (tmp & ECORE_GRC_ATTENTION_MASTER_MASK) >>
262                          ECORE_GRC_ATTENTION_MASTER_SHIFT),
263                   (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
264                   (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
265                   ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
266                   ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
267                   (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
268                   ECORE_GRC_ATTENTION_VF_SHIFT);
269
270         /* Clean the validity bit */
271         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
272                  GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
273 out:
274         return rc;
275 }
276
277 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
278 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
279 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
280 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
281 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
282 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
283 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
284 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
285 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME      (1 << 22)
286 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
287 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
288 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
289 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
290
291 enum _ecore_status_t ecore_pglueb_rbc_attn_handler(struct ecore_hwfn *p_hwfn,
292                                                    struct ecore_ptt *p_ptt)
293 {
294         u32 tmp;
295
296         tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS2);
297         if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
298                 u32 addr_lo, addr_hi, details;
299
300                 addr_lo = ecore_rd(p_hwfn, p_ptt,
301                                    PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
302                 addr_hi = ecore_rd(p_hwfn, p_ptt,
303                                    PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
304                 details = ecore_rd(p_hwfn, p_ptt,
305                                    PGLUE_B_REG_TX_ERR_WR_DETAILS);
306
307                 DP_NOTICE(p_hwfn, false,
308                           "Illegal write by chip to [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
309                           addr_hi, addr_lo, details,
310                           (u8)((details &
311                                 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
312                                ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
313                           (u8)((details &
314                                 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
315                                ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
316                           (u8)((details &
317                                ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
318                           tmp,
319                           (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
320                                 1 : 0),
321                           (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
322                                 1 : 0),
323                           (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
324                                 1 : 0));
325         }
326
327         tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_RD_DETAILS2);
328         if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
329                 u32 addr_lo, addr_hi, details;
330
331                 addr_lo = ecore_rd(p_hwfn, p_ptt,
332                                    PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
333                 addr_hi = ecore_rd(p_hwfn, p_ptt,
334                                    PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
335                 details = ecore_rd(p_hwfn, p_ptt,
336                                    PGLUE_B_REG_TX_ERR_RD_DETAILS);
337
338                 DP_NOTICE(p_hwfn, false,
339                           "Illegal read by chip from [%08x:%08x] blocked. Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x] Details2 %08x [Was_error %02x BME deassert %02x FID_enable deassert %02x]\n",
340                           addr_hi, addr_lo, details,
341                           (u8)((details &
342                                 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
343                                ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
344                           (u8)((details &
345                                 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
346                                ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
347                           (u8)((details &
348                                ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) ? 1 : 0),
349                           tmp,
350                           (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ?
351                                 1 : 0),
352                           (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ?
353                                 1 : 0),
354                           (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ?
355                                 1 : 0));
356         }
357
358         tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
359         if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
360                 DP_NOTICE(p_hwfn, false, "ICPL erorr - %08x\n", tmp);
361
362         tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
363         if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
364                 u32 addr_hi, addr_lo;
365
366                 addr_lo = ecore_rd(p_hwfn, p_ptt,
367                                    PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
368                 addr_hi = ecore_rd(p_hwfn, p_ptt,
369                                    PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
370
371                 DP_NOTICE(p_hwfn, false,
372                           "ICPL erorr - %08x [Address %08x:%08x]\n",
373                           tmp, addr_hi, addr_lo);
374         }
375
376         tmp = ecore_rd(p_hwfn, p_ptt, PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
377         if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
378                 u32 addr_hi, addr_lo, details;
379
380                 addr_lo = ecore_rd(p_hwfn, p_ptt,
381                                    PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
382                 addr_hi = ecore_rd(p_hwfn, p_ptt,
383                                    PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
384                 details = ecore_rd(p_hwfn, p_ptt,
385                                    PGLUE_B_REG_VF_ILT_ERR_DETAILS);
386
387                 DP_NOTICE(p_hwfn, false,
388                           "ILT error - Details %08x Details2 %08x [Address %08x:%08x]\n",
389                           details, tmp, addr_hi, addr_lo);
390         }
391
392         /* Clear the indications */
393         ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
394
395         return ECORE_SUCCESS;
396 }
397
398 static enum _ecore_status_t ecore_pglueb_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
399 {
400         return ecore_pglueb_rbc_attn_handler(p_hwfn, p_hwfn->p_dpc_ptt);
401 }
402
403 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
404 {
405         DP_NOTICE(p_hwfn, false, "FW assertion!\n");
406
407         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
408
409         return ECORE_INVAL;
410 }
411
412 static enum _ecore_status_t
413 ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
414 {
415         DP_INFO(p_hwfn, "General attention 35!\n");
416
417         return ECORE_SUCCESS;
418 }
419
420 #define ECORE_DORQ_ATTENTION_REASON_MASK        (0xfffff)
421 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK        (0xffff)
422 #define ECORE_DORQ_ATTENTION_OPAQUE_SHIFT       (0x0)
423 #define ECORE_DORQ_ATTENTION_SIZE_MASK          (0x7f)
424 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT         (16)
425
426 #define ECORE_DB_REC_COUNT                      10
427 #define ECORE_DB_REC_INTERVAL                   100
428
429 /* assumes sticky overflow indication was set for this PF */
430 static enum _ecore_status_t ecore_db_rec_attn(struct ecore_hwfn *p_hwfn,
431                                               struct ecore_ptt *p_ptt)
432 {
433         u8 count = ECORE_DB_REC_COUNT;
434         u32 usage = 1;
435
436         /* wait for usage to zero or count to run out. This is necessary since
437          * EDPM doorbell transactions can take multiple 64b cycles, and as such
438          * can "split" over the pci. Possibly, the doorbell drop can happen with
439          * half an EDPM in the queue and other half dropped. Another EDPM
440          * doorbell to the same address (from doorbell recovery mechanism or
441          * from the doorbelling entity) could have first half dropped and second
442          * half interperted as continuation of the first. To prevent such
443          * malformed doorbells from reaching the device, flush the queue before
444          * releaseing the overflow sticky indication.
445          */
446         while (count-- && usage) {
447                 usage = ecore_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT);
448                 OSAL_UDELAY(ECORE_DB_REC_INTERVAL);
449         }
450
451         /* should have been depleted by now */
452         if (usage) {
453                 DP_NOTICE(p_hwfn->p_dev, false,
454                           "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n",
455                           ECORE_DB_REC_INTERVAL * ECORE_DB_REC_COUNT, usage);
456                 return ECORE_TIMEOUT;
457         }
458
459         /* flush any pedning (e)dpm as they may never arrive */
460         ecore_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1);
461
462         /* release overflow sticky indication (stop silently dropping
463          * everything)
464          */
465         ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0);
466
467         /* repeat all last doorbells (doorbell drop recovery) */
468         ecore_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL);
469
470         return ECORE_SUCCESS;
471 }
472
473 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
474 {
475         u32 int_sts, first_drop_reason, details, address, overflow,
476                 all_drops_reason;
477         struct ecore_ptt *p_ptt = p_hwfn->p_dpc_ptt;
478         enum _ecore_status_t rc;
479
480         int_sts = ecore_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS);
481         DP_NOTICE(p_hwfn->p_dev, false, "DORQ attention. int_sts was %x\n",
482                   int_sts);
483
484         /* int_sts may be zero since all PFs were interrupted for doorbell
485          * overflow but another one already handled it. Can abort here. If
486          * This PF also requires overflow recovery we will be interrupted again
487          */
488         if (!int_sts)
489                 return ECORE_SUCCESS;
490
491         /* check if db_drop or overflow happened */
492         if (int_sts & (DORQ_REG_INT_STS_DB_DROP |
493                        DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) {
494                 /* obtain data about db drop/overflow */
495                 first_drop_reason = ecore_rd(p_hwfn, p_ptt,
496                                   DORQ_REG_DB_DROP_REASON) &
497                                   ECORE_DORQ_ATTENTION_REASON_MASK;
498                 details = ecore_rd(p_hwfn, p_ptt,
499                                    DORQ_REG_DB_DROP_DETAILS);
500                 address = ecore_rd(p_hwfn, p_ptt,
501                                    DORQ_REG_DB_DROP_DETAILS_ADDRESS);
502                 overflow = ecore_rd(p_hwfn, p_ptt,
503                                     DORQ_REG_PF_OVFL_STICKY);
504                 all_drops_reason = ecore_rd(p_hwfn, p_ptt,
505                                             DORQ_REG_DB_DROP_DETAILS_REASON);
506
507                 /* log info */
508                 DP_NOTICE(p_hwfn->p_dev, false,
509                           "Doorbell drop occurred\n"
510                           "Address\t\t0x%08x\t(second BAR address)\n"
511                           "FID\t\t0x%04x\t\t(Opaque FID)\n"
512                           "Size\t\t0x%04x\t\t(in bytes)\n"
513                           "1st drop reason\t0x%08x\t(details on first drop since last handling)\n"
514                           "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n"
515                           "Overflow\t0x%x\t\t(a per PF indication)\n",
516                           address,
517                           GET_FIELD(details, ECORE_DORQ_ATTENTION_OPAQUE),
518                           GET_FIELD(details, ECORE_DORQ_ATTENTION_SIZE) * 4,
519                           first_drop_reason, all_drops_reason, overflow);
520
521                 /* if this PF caused overflow, initiate recovery */
522                 if (overflow) {
523                         rc = ecore_db_rec_attn(p_hwfn, p_ptt);
524                         if (rc != ECORE_SUCCESS)
525                                 return rc;
526                 }
527
528                 /* clear the doorbell drop details and prepare for next drop */
529                 ecore_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0);
530
531                 /* mark interrupt as handeld (note: even if drop was due to a
532                  * different reason than overflow we mark as handled)
533                  */
534                 ecore_wr(p_hwfn, p_ptt, DORQ_REG_INT_STS_WR,
535                          DORQ_REG_INT_STS_DB_DROP |
536                          DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR);
537
538                 /* if there are no indications otherthan drop indications,
539                  * success
540                  */
541                 if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP |
542                                  DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR |
543                                  DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0)
544                         return ECORE_SUCCESS;
545         }
546
547         /* some other indication was present - non recoverable */
548         DP_INFO(p_hwfn, "DORQ fatal attention\n");
549
550         return ECORE_INVAL;
551 }
552
553 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
554 {
555 #ifndef ASIC_ONLY
556         if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
557                 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
558                                    TM_REG_INT_STS_1);
559
560                 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
561                             TM_REG_INT_STS_1_PEND_CONN_SCAN))
562                         return ECORE_INVAL;
563
564                 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
565                            TM_REG_INT_STS_1_PEND_CONN_SCAN))
566                         DP_INFO(p_hwfn,
567                                 "TM attention on emulation - most likely"
568                                 " results of clock-ratios\n");
569                 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
570                 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
571                     TM_REG_INT_MASK_1_PEND_TASK_SCAN;
572                 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
573
574                 return ECORE_SUCCESS;
575         }
576 #endif
577
578         return ECORE_INVAL;
579 }
580
581 /* Instead of major changes to the data-structure, we have a some 'special'
582  * identifiers for sources that changed meaning between adapters.
583  */
584 enum aeu_invert_reg_special_type {
585         AEU_INVERT_REG_SPECIAL_CNIG_0,
586         AEU_INVERT_REG_SPECIAL_CNIG_1,
587         AEU_INVERT_REG_SPECIAL_CNIG_2,
588         AEU_INVERT_REG_SPECIAL_CNIG_3,
589         AEU_INVERT_REG_SPECIAL_MAX,
590 };
591
592 static struct aeu_invert_reg_bit
593 aeu_descs_special[AEU_INVERT_REG_SPECIAL_MAX] = {
594         {"CNIG port 0", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
595         {"CNIG port 1", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
596         {"CNIG port 2", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
597         {"CNIG port 3", ATTENTION_SINGLE, OSAL_NULL, BLOCK_CNIG},
598 };
599
600 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
601 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
602         {
603          {                      /* After Invert 1 */
604           {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
605            MAX_BLOCK_ID},
606           }
607          },
608
609         {
610          {                      /* After Invert 2 */
611           {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
612           {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
613           {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglueb_rbc_attn_cb,
614            BLOCK_PGLUE_B},
615           {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
616           {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
617           {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
618           {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
619           {"SW timers #%d",
620            (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT),
621            OSAL_NULL, MAX_BLOCK_ID},
622           {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
623            BLOCK_PGLCS},
624           }
625          },
626
627         {
628          {                      /* After Invert 3 */
629           {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
630            MAX_BLOCK_ID},
631           }
632          },
633
634         {
635          {                      /* After Invert 4 */
636           {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
637            ecore_fw_assertion, MAX_BLOCK_ID},
638           {"General Attention %d",
639            (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT),
640            OSAL_NULL, MAX_BLOCK_ID},
641           {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
642            ecore_general_attention_35, MAX_BLOCK_ID},
643           {"NWS Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
644                          ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_0),
645                          OSAL_NULL, BLOCK_NWS},
646           {"NWS Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
647                             ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_1),
648                             OSAL_NULL, BLOCK_NWS},
649           {"NWM Parity", ATTENTION_PAR | ATTENTION_BB_DIFFERENT |
650                          ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_2),
651                          OSAL_NULL, BLOCK_NWM},
652           {"NWM Interrupt", ATTENTION_SINGLE | ATTENTION_BB_DIFFERENT |
653                             ATTENTION_BB(AEU_INVERT_REG_SPECIAL_CNIG_3),
654                             OSAL_NULL, BLOCK_NWM},
655           {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
656           {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
657           {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
658           {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
659           {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
660           {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
661           {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
662            MAX_BLOCK_ID},
663           {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
664           {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
665           {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
666           {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
667           {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
668           }
669          },
670
671         {
672          {                      /* After Invert 5 */
673           {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
674           {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
675           {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
676           {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
677           {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
678           {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
679           {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
680           {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
681           {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
682           {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
683           {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
684           {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
685           {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
686           {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
687           {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
688           {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
689           }
690          },
691
692         {
693          {                      /* After Invert 6 */
694           {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
695           {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
696           {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
697           {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
698           {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
699           {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
700           {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
701           {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
702           {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
703           {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
704           {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
705           {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
706           {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
707           {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
708           {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
709           {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
710           }
711          },
712
713         {
714          {                      /* After Invert 7 */
715           {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
716           {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
717           {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
718           {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
719           {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
720           {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
721           {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
722           {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
723           {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
724           {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
725           {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
726           {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
727           {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
728           {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
729           {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
730           {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
731           {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
732           }
733          },
734
735         {
736          {                      /* After Invert 8 */
737           {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
738           {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
739           {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
740           {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
741           {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
742           {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
743           {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
744           {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
745           {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
746           {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
747           {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
748           {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
749           {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
750           {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
751           {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
752           {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
753           {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
754           {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
755           {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
756           {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
757           {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
758            MAX_BLOCK_ID},
759           }
760          },
761
762         {
763          {                      /* After Invert 9 */
764           {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
765           {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
766            MAX_BLOCK_ID},
767           {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
768           {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
769           {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
770            MAX_BLOCK_ID},
771           }
772          },
773
774 };
775
776 static struct aeu_invert_reg_bit *
777 ecore_int_aeu_translate(struct ecore_hwfn *p_hwfn,
778                         struct aeu_invert_reg_bit *p_bit)
779 {
780         if (!ECORE_IS_BB(p_hwfn->p_dev))
781                 return p_bit;
782
783         if (!(p_bit->flags & ATTENTION_BB_DIFFERENT))
784                 return p_bit;
785
786         return &aeu_descs_special[(p_bit->flags & ATTENTION_BB_MASK) >>
787                                   ATTENTION_BB_SHIFT];
788 }
789
790 static bool ecore_int_is_parity_flag(struct ecore_hwfn *p_hwfn,
791                                      struct aeu_invert_reg_bit *p_bit)
792 {
793         return !!(ecore_int_aeu_translate(p_hwfn, p_bit)->flags &
794                   ATTENTION_PARITY);
795 }
796
797 #define ATTN_STATE_BITS         (0xfff)
798 #define ATTN_BITS_MASKABLE      (0x3ff)
799 struct ecore_sb_attn_info {
800         /* Virtual & Physical address of the SB */
801         struct atten_status_block *sb_attn;
802         dma_addr_t sb_phys;
803
804         /* Last seen running index */
805         u16 index;
806
807         /* A mask of the AEU bits resulting in a parity error */
808         u32 parity_mask[NUM_ATTN_REGS];
809
810         /* A pointer to the attention description structure */
811         struct aeu_invert_reg *p_aeu_desc;
812
813         /* Previously asserted attentions, which are still unasserted */
814         u16 known_attn;
815
816         /* Cleanup address for the link's general hw attention */
817         u32 mfw_attn_addr;
818 };
819
820 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
821                                  struct ecore_sb_attn_info *p_sb_desc)
822 {
823         u16 rc = 0, index;
824
825         OSAL_MMIOWB(p_hwfn->p_dev);
826
827         index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
828         if (p_sb_desc->index != index) {
829                 p_sb_desc->index = index;
830                 rc = ECORE_SB_ATT_IDX;
831         }
832
833         OSAL_MMIOWB(p_hwfn->p_dev);
834
835         return rc;
836 }
837
838 /**
839  * @brief ecore_int_assertion - handles asserted attention bits
840  *
841  * @param p_hwfn
842  * @param asserted_bits newly asserted bits
843  * @return enum _ecore_status_t
844  */
845 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
846                                                 u16 asserted_bits)
847 {
848         struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
849         u32 igu_mask;
850
851         /* Mask the source of the attention in the IGU */
852         igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
853                             IGU_REG_ATTENTION_ENABLE);
854         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
855                    igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
856         igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
857         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
858
859         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
860                    "inner known ATTN state: 0x%04x --> 0x%04x\n",
861                    sb_attn_sw->known_attn,
862                    sb_attn_sw->known_attn | asserted_bits);
863         sb_attn_sw->known_attn |= asserted_bits;
864
865         /* Handle MCP events */
866         if (asserted_bits & 0x100) {
867                 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
868                 /* Clean the MCP attention */
869                 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
870                          sb_attn_sw->mfw_attn_addr, 0);
871         }
872
873         /* FIXME - this will change once we'll have GOOD gtt definitions */
874         DIRECT_REG_WR(p_hwfn,
875                       (u8 OSAL_IOMEM *) p_hwfn->regview +
876                       GTT_BAR0_MAP_REG_IGU_CMD +
877                       ((IGU_CMD_ATTN_BIT_SET_UPPER -
878                         IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
879
880         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
881                    asserted_bits);
882
883         return ECORE_SUCCESS;
884 }
885
886 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
887                                  enum block_id id, enum dbg_attn_type type,
888                                  bool b_clear)
889 {
890         /* @DPDK */
891         DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type);
892 }
893
894 /**
895  * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
896  * cause of the attention
897  *
898  * @param p_hwfn
899  * @param p_aeu - descriptor of an AEU bit which caused the attention
900  * @param aeu_en_reg - register offset of the AEU enable reg. which configured
901  *  this bit to this group.
902  * @param bit_index - index of this bit in the aeu_en_reg
903  *
904  * @return enum _ecore_status_t
905  */
906 static enum _ecore_status_t
907 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
908                               struct aeu_invert_reg_bit *p_aeu,
909                               u32 aeu_en_reg,
910                               const char *p_bit_name,
911                               u32 bitmask)
912 {
913         enum _ecore_status_t rc = ECORE_INVAL;
914         bool b_fatal = false;
915
916         DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
917                 p_bit_name, bitmask);
918
919         /* Call callback before clearing the interrupt status */
920         if (p_aeu->cb) {
921                 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
922                         p_bit_name);
923                 rc = p_aeu->cb(p_hwfn);
924         }
925
926         if (rc != ECORE_SUCCESS)
927                 b_fatal = true;
928
929         /* Print HW block interrupt registers */
930         if (p_aeu->block_index != MAX_BLOCK_ID) {
931                 ecore_int_attn_print(p_hwfn, p_aeu->block_index,
932                                      ATTN_TYPE_INTERRUPT, !b_fatal);
933 }
934
935         /* @DPDK */
936         /* Reach assertion if attention is fatal */
937         if (b_fatal || (strcmp(p_bit_name, "PGLUE B RBC") == 0)) {
938                 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
939                           p_bit_name);
940
941                 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
942         }
943
944         /* Prevent this Attention from being asserted in the future */
945         if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
946             p_hwfn->p_dev->attn_clr_en) {
947                 u32 val;
948                 u32 mask = ~bitmask;
949                 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
950                 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
951                 DP_ERR(p_hwfn, "`%s' - Disabled future attentions\n",
952                         p_bit_name);
953         }
954
955         return rc;
956 }
957
958 /**
959  * @brief ecore_int_deassertion_parity - handle a single parity AEU source
960  *
961  * @param p_hwfn
962  * @param p_aeu - descriptor of an AEU bit which caused the parity
963  * @param aeu_en_reg - address of the AEU enable register
964  * @param bit_index
965  */
966 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
967                                          struct aeu_invert_reg_bit *p_aeu,
968                                          u32 aeu_en_reg, u8 bit_index)
969 {
970         u32 block_id = p_aeu->block_index, mask, val;
971
972         DP_NOTICE(p_hwfn->p_dev, false,
973                   "%s parity attention is set [address 0x%08x, bit %d]\n",
974                   p_aeu->bit_name, aeu_en_reg, bit_index);
975
976         if (block_id != MAX_BLOCK_ID) {
977                 ecore_int_attn_print(p_hwfn, block_id, ATTN_TYPE_PARITY, false);
978
979                 /* In A0, there's a single parity bit for several blocks */
980                 if (block_id == BLOCK_BTB) {
981                         ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
982                                              ATTN_TYPE_PARITY, false);
983                         ecore_int_attn_print(p_hwfn, BLOCK_MCP,
984                                              ATTN_TYPE_PARITY, false);
985                 }
986         }
987
988         /* Prevent this parity error from being re-asserted */
989         mask = ~(0x1 << bit_index);
990         val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
991         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, val & mask);
992         DP_INFO(p_hwfn, "`%s' - Disabled future parity errors\n",
993                 p_aeu->bit_name);
994 }
995
996 /**
997  * @brief - handles deassertion of previously asserted attentions.
998  *
999  * @param p_hwfn
1000  * @param deasserted_bits - newly deasserted bits
1001  * @return enum _ecore_status_t
1002  *
1003  */
1004 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
1005                                                   u16 deasserted_bits)
1006 {
1007         struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
1008         u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask, aeu_en, en;
1009         u8 i, j, k, bit_idx;
1010         enum _ecore_status_t rc = ECORE_SUCCESS;
1011
1012         /* Read the attention registers in the AEU */
1013         for (i = 0; i < NUM_ATTN_REGS; i++) {
1014                 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1015                                           MISC_REG_AEU_AFTER_INVERT_1_IGU +
1016                                           i * 0x4);
1017                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1018                            "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
1019         }
1020
1021         /* Handle parity attentions first */
1022         for (i = 0; i < NUM_ATTN_REGS; i++) {
1023                 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
1024                 u32 parities;
1025
1026                 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + i * sizeof(u32);
1027                 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1028                 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
1029
1030                 /* Skip register in which no parity bit is currently set */
1031                 if (!parities)
1032                         continue;
1033
1034                 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
1035                         struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
1036
1037                         if (ecore_int_is_parity_flag(p_hwfn, p_bit) &&
1038                             !!(parities & (1 << bit_idx)))
1039                                 ecore_int_deassertion_parity(p_hwfn, p_bit,
1040                                                              aeu_en, bit_idx);
1041
1042                         bit_idx += ATTENTION_LENGTH(p_bit->flags);
1043                 }
1044         }
1045
1046         /* Find non-parity cause for attention and act */
1047         for (k = 0; k < MAX_ATTN_GRPS; k++) {
1048                 struct aeu_invert_reg_bit *p_aeu;
1049
1050                 /* Handle only groups whose attention is currently deasserted */
1051                 if (!(deasserted_bits & (1 << k)))
1052                         continue;
1053
1054                 for (i = 0; i < NUM_ATTN_REGS; i++) {
1055                         u32 bits;
1056
1057                         aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
1058                                  i * sizeof(u32) +
1059                                  k * sizeof(u32) * NUM_ATTN_REGS;
1060                         en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
1061                         bits = aeu_inv_arr[i] & en;
1062
1063                         /* Skip if no bit from this group is currently set */
1064                         if (!bits)
1065                                 continue;
1066
1067                         /* Find all set bits from current register which belong
1068                          * to current group, making them responsible for the
1069                          * previous assertion.
1070                          */
1071                         for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
1072                                 unsigned long int bitmask;
1073                                 u8 bit, bit_len;
1074
1075                                 /* Need to account bits with changed meaning */
1076                                 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
1077
1078                                 bit = bit_idx;
1079                                 bit_len = ATTENTION_LENGTH(p_aeu->flags);
1080                                 if (ecore_int_is_parity_flag(p_hwfn, p_aeu)) {
1081                                         /* Skip Parity */
1082                                         bit++;
1083                                         bit_len--;
1084                                 }
1085
1086                                 /* Find the bits relating to HW-block, then
1087                                  * shift so they'll become LSB.
1088                                  */
1089                                 bitmask = bits & (((1 << bit_len) - 1) << bit);
1090                                 bitmask >>= bit;
1091
1092                                 if (bitmask) {
1093                                         u32 flags = p_aeu->flags;
1094                                         char bit_name[30];
1095                                         u8 num;
1096
1097                                         num = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
1098                                                                 bit_len);
1099
1100                                         /* Some bits represent more than a
1101                                          * a single interrupt. Correctly print
1102                                          * their name.
1103                                          */
1104                                         if (ATTENTION_LENGTH(flags) > 2 ||
1105                                             ((flags & ATTENTION_PAR_INT) &&
1106                                             ATTENTION_LENGTH(flags) > 1))
1107                                                 OSAL_SNPRINTF(bit_name, 30,
1108                                                               p_aeu->bit_name,
1109                                                               num);
1110                                         else
1111                                                 OSAL_STRNCPY(bit_name,
1112                                                              p_aeu->bit_name,
1113                                                              30);
1114
1115                                         /* We now need to pass bitmask in its
1116                                          * correct position.
1117                                          */
1118                                         bitmask <<= bit;
1119
1120                                         /* Handle source of the attention */
1121                                         ecore_int_deassertion_aeu_bit(p_hwfn,
1122                                                                       p_aeu,
1123                                                                       aeu_en,
1124                                                                       bit_name,
1125                                                                       bitmask);
1126                                 }
1127
1128                                 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
1129                         }
1130                 }
1131         }
1132
1133         /* Clear IGU indication for the deasserted bits */
1134         /* FIXME - this will change once we'll have GOOD gtt definitions */
1135         DIRECT_REG_WR(p_hwfn,
1136                       (u8 OSAL_IOMEM *) p_hwfn->regview +
1137                       GTT_BAR0_MAP_REG_IGU_CMD +
1138                       ((IGU_CMD_ATTN_BIT_CLR_UPPER -
1139                         IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits));
1140
1141         /* Unmask deasserted attentions in IGU */
1142         aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
1143                             IGU_REG_ATTENTION_ENABLE);
1144         aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
1145         ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
1146
1147         /* Clear deassertion from inner state */
1148         sb_attn_sw->known_attn &= ~deasserted_bits;
1149
1150         return rc;
1151 }
1152
1153 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
1154 {
1155         struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
1156         struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
1157         u16 index = 0, asserted_bits, deasserted_bits;
1158         u32 attn_bits = 0, attn_acks = 0;
1159         enum _ecore_status_t rc = ECORE_SUCCESS;
1160
1161         /* Read current attention bits/acks - safeguard against attentions
1162          * by guaranting work on a synchronized timeframe
1163          */
1164         do {
1165                 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
1166                 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
1167                 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
1168         } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
1169         p_sb_attn->sb_index = index;
1170
1171         /* Attention / Deassertion are meaningful (and in correct state)
1172          * only when they differ and consistent with known state - deassertion
1173          * when previous attention & current ack, and assertion when current
1174          * attention with no previous attention
1175          */
1176         asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1177             ~p_sb_attn_sw->known_attn;
1178         deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1179             p_sb_attn_sw->known_attn;
1180
1181         if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
1182                 DP_INFO(p_hwfn,
1183                         "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1184                         index, attn_bits, attn_acks, asserted_bits,
1185                         deasserted_bits, p_sb_attn_sw->known_attn);
1186         else if (asserted_bits == 0x100)
1187                 DP_INFO(p_hwfn, "MFW indication via attention\n");
1188         else
1189                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1190                            "MFW indication [deassertion]\n");
1191
1192         if (asserted_bits) {
1193                 rc = ecore_int_assertion(p_hwfn, asserted_bits);
1194                 if (rc)
1195                         return rc;
1196         }
1197
1198         if (deasserted_bits)
1199                 rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
1200
1201         return rc;
1202 }
1203
1204 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
1205                               void OSAL_IOMEM *igu_addr, u32 ack_cons)
1206 {
1207         struct igu_prod_cons_update igu_ack = { 0 };
1208
1209         igu_ack.sb_id_and_flags =
1210             ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1211              (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1212              (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1213              (IGU_SEG_ACCESS_ATTN <<
1214               IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1215
1216         DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
1217
1218         /* Both segments (interrupts & acks) are written to same place address;
1219          * Need to guarantee all commands will be received (in-order) by HW.
1220          */
1221         OSAL_MMIOWB(p_hwfn->p_dev);
1222         OSAL_BARRIER(p_hwfn->p_dev);
1223 }
1224
1225 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
1226 {
1227         struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
1228         struct ecore_pi_info *pi_info = OSAL_NULL;
1229         struct ecore_sb_attn_info *sb_attn;
1230         struct ecore_sb_info *sb_info;
1231         int arr_size;
1232         u16 rc = 0;
1233
1234         if (!p_hwfn)
1235                 return;
1236
1237         if (!p_hwfn->p_sp_sb) {
1238                 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
1239                 return;
1240         }
1241
1242         sb_info = &p_hwfn->p_sp_sb->sb_info;
1243         arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1244         if (!sb_info) {
1245                 DP_ERR(p_hwfn->p_dev,
1246                        "Status block is NULL - cannot ack interrupts\n");
1247                 return;
1248         }
1249
1250         if (!p_hwfn->p_sb_attn) {
1251                 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
1252                 return;
1253         }
1254         sb_attn = p_hwfn->p_sb_attn;
1255
1256         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1257                    p_hwfn, p_hwfn->my_id);
1258
1259         /* Disable ack for def status block. Required both for msix +
1260          * inta in non-mask mode, in inta does no harm.
1261          */
1262         ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1263
1264         /* Gather Interrupts/Attentions information */
1265         if (!sb_info->sb_virt) {
1266                 DP_ERR(p_hwfn->p_dev,
1267                        "Interrupt Status block is NULL -"
1268                        " cannot check for new interrupts!\n");
1269         } else {
1270                 u32 tmp_index = sb_info->sb_ack;
1271                 rc = ecore_sb_update_sb_idx(sb_info);
1272                 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1273                            "Interrupt indices: 0x%08x --> 0x%08x\n",
1274                            tmp_index, sb_info->sb_ack);
1275         }
1276
1277         if (!sb_attn || !sb_attn->sb_attn) {
1278                 DP_ERR(p_hwfn->p_dev,
1279                        "Attentions Status block is NULL -"
1280                        " cannot check for new attentions!\n");
1281         } else {
1282                 u16 tmp_index = sb_attn->index;
1283
1284                 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
1285                 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1286                            "Attention indices: 0x%08x --> 0x%08x\n",
1287                            tmp_index, sb_attn->index);
1288         }
1289
1290         /* Check if we expect interrupts at this time. if not just ack them */
1291         if (!(rc & ECORE_SB_EVENT_MASK)) {
1292                 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1293                 return;
1294         }
1295
1296 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1297
1298         if (!p_hwfn->p_dpc_ptt) {
1299                 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
1300                 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1301                 return;
1302         }
1303
1304         if (rc & ECORE_SB_ATT_IDX)
1305                 ecore_int_attentions(p_hwfn);
1306
1307         if (rc & ECORE_SB_IDX) {
1308                 int pi;
1309
1310                 /* Since we only looked at the SB index, it's possible more
1311                  * than a single protocol-index on the SB incremented.
1312                  * Iterate over all configured protocol indices and check
1313                  * whether something happened for each.
1314                  */
1315                 for (pi = 0; pi < arr_size; pi++) {
1316                         pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1317                         if (pi_info->comp_cb != OSAL_NULL)
1318                                 pi_info->comp_cb(p_hwfn, pi_info->cookie);
1319                 }
1320         }
1321
1322         if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
1323                 /* This should be done before the interrupts are enabled,
1324                  * since otherwise a new attention will be generated.
1325                  */
1326                 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1327         }
1328
1329         ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1330 }
1331
1332 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
1333 {
1334         struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1335
1336         if (!p_sb)
1337                 return;
1338
1339         if (p_sb->sb_attn) {
1340                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
1341                                        p_sb->sb_phys,
1342                                        SB_ATTN_ALIGNED_SIZE(p_hwfn));
1343         }
1344         OSAL_FREE(p_hwfn->p_dev, p_sb);
1345 }
1346
1347 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
1348                                     struct ecore_ptt *p_ptt)
1349 {
1350         struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1351
1352         OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1353
1354         sb_info->index = 0;
1355         sb_info->known_attn = 0;
1356
1357         /* Configure Attention Status Block in IGU */
1358         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1359                  DMA_LO(p_hwfn->p_sb_attn->sb_phys));
1360         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1361                  DMA_HI(p_hwfn->p_sb_attn->sb_phys));
1362 }
1363
1364 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
1365                                    struct ecore_ptt *p_ptt,
1366                                    void *sb_virt_addr, dma_addr_t sb_phy_addr)
1367 {
1368         struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1369         int i, j, k;
1370
1371         sb_info->sb_attn = sb_virt_addr;
1372         sb_info->sb_phys = sb_phy_addr;
1373
1374         /* Set the pointer to the AEU descriptors */
1375         sb_info->p_aeu_desc = aeu_descs;
1376
1377         /* Calculate Parity Masks */
1378         OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1379         for (i = 0; i < NUM_ATTN_REGS; i++) {
1380                 /* j is array index, k is bit index */
1381                 for (j = 0, k = 0; k < 32; j++) {
1382                         struct aeu_invert_reg_bit *p_aeu;
1383
1384                         p_aeu = &aeu_descs[i].bits[j];
1385                         if (ecore_int_is_parity_flag(p_hwfn, p_aeu))
1386                                 sb_info->parity_mask[i] |= 1 << k;
1387
1388                         k += ATTENTION_LENGTH(p_aeu->flags);
1389                 }
1390                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1391                            "Attn Mask [Reg %d]: 0x%08x\n",
1392                            i, sb_info->parity_mask[i]);
1393         }
1394
1395         /* Set the address of cleanup for the mcp attention */
1396         sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1397             MISC_REG_AEU_GENERAL_ATTN_0;
1398
1399         ecore_int_sb_attn_setup(p_hwfn, p_ptt);
1400 }
1401
1402 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
1403                                                     struct ecore_ptt *p_ptt)
1404 {
1405         struct ecore_dev *p_dev = p_hwfn->p_dev;
1406         struct ecore_sb_attn_info *p_sb;
1407         dma_addr_t p_phys = 0;
1408         void *p_virt;
1409
1410         /* SB struct */
1411         p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
1412         if (!p_sb) {
1413                 DP_NOTICE(p_dev, true,
1414                           "Failed to allocate `struct ecore_sb_attn_info'\n");
1415                 return ECORE_NOMEM;
1416         }
1417
1418         /* SB ring  */
1419         p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
1420                                          SB_ATTN_ALIGNED_SIZE(p_hwfn));
1421         if (!p_virt) {
1422                 DP_NOTICE(p_dev, true,
1423                           "Failed to allocate status block (attentions)\n");
1424                 OSAL_FREE(p_dev, p_sb);
1425                 return ECORE_NOMEM;
1426         }
1427
1428         /* Attention setup */
1429         p_hwfn->p_sb_attn = p_sb;
1430         ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1431
1432         return ECORE_SUCCESS;
1433 }
1434
1435 /* coalescing timeout = timeset << (timer_res + 1) */
1436 #define ECORE_CAU_DEF_RX_USECS 24
1437 #define ECORE_CAU_DEF_TX_USECS 48
1438
1439 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
1440                              struct cau_sb_entry *p_sb_entry,
1441                              u8 pf_id, u16 vf_number, u8 vf_valid)
1442 {
1443         struct ecore_dev *p_dev = p_hwfn->p_dev;
1444         u32 cau_state;
1445         u8 timer_res;
1446
1447         OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
1448
1449         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1450         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1451         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1452         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1453         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1454
1455         cau_state = CAU_HC_DISABLE_STATE;
1456
1457         if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1458                 cau_state = CAU_HC_ENABLE_STATE;
1459                 if (!p_dev->rx_coalesce_usecs)
1460                         p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
1461                 if (!p_dev->tx_coalesce_usecs)
1462                         p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
1463         }
1464
1465         /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1466         if (p_dev->rx_coalesce_usecs <= 0x7F)
1467                 timer_res = 0;
1468         else if (p_dev->rx_coalesce_usecs <= 0xFF)
1469                 timer_res = 1;
1470         else
1471                 timer_res = 2;
1472         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1473
1474         if (p_dev->tx_coalesce_usecs <= 0x7F)
1475                 timer_res = 0;
1476         else if (p_dev->tx_coalesce_usecs <= 0xFF)
1477                 timer_res = 1;
1478         else
1479                 timer_res = 2;
1480         SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1481
1482         SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1483         SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1484 }
1485
1486 static void _ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1487                                    struct ecore_ptt *p_ptt,
1488                                    u16 igu_sb_id, u32 pi_index,
1489                                    enum ecore_coalescing_fsm coalescing_fsm,
1490                                    u8 timeset)
1491 {
1492         struct cau_pi_entry pi_entry;
1493         u32 sb_offset, pi_offset;
1494
1495         if (IS_VF(p_hwfn->p_dev))
1496                 return;/* @@@TBD MichalK- VF CAU... */
1497
1498         sb_offset = igu_sb_id * PIS_PER_SB_E4;
1499         OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
1500
1501         SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1502         if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
1503                 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1504         else
1505                 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1506
1507         pi_offset = sb_offset + pi_index;
1508         if (p_hwfn->hw_init_done) {
1509                 ecore_wr(p_hwfn, p_ptt,
1510                          CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1511                          *((u32 *)&(pi_entry)));
1512         } else {
1513                 STORE_RT_REG(p_hwfn,
1514                              CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1515                              *((u32 *)&(pi_entry)));
1516         }
1517 }
1518
1519 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1520                            struct ecore_ptt *p_ptt,
1521                            struct ecore_sb_info *p_sb, u32 pi_index,
1522                            enum ecore_coalescing_fsm coalescing_fsm,
1523                            u8 timeset)
1524 {
1525         _ecore_int_cau_conf_pi(p_hwfn, p_ptt, p_sb->igu_sb_id,
1526                                pi_index, coalescing_fsm, timeset);
1527 }
1528
1529 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
1530                            struct ecore_ptt *p_ptt,
1531                            dma_addr_t sb_phys, u16 igu_sb_id,
1532                            u16 vf_number, u8 vf_valid)
1533 {
1534         struct cau_sb_entry sb_entry;
1535
1536         ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1537                                 vf_number, vf_valid);
1538
1539         if (p_hwfn->hw_init_done) {
1540                 /* Wide-bus, initialize via DMAE */
1541                 u64 phys_addr = (u64)sb_phys;
1542
1543                 ecore_dmae_host2grc(p_hwfn, p_ptt,
1544                                     (u64)(osal_uintptr_t)&phys_addr,
1545                                     CAU_REG_SB_ADDR_MEMORY +
1546                                     igu_sb_id * sizeof(u64), 2, 0);
1547                 ecore_dmae_host2grc(p_hwfn, p_ptt,
1548                                     (u64)(osal_uintptr_t)&sb_entry,
1549                                     CAU_REG_SB_VAR_MEMORY +
1550                                     igu_sb_id * sizeof(u64), 2, 0);
1551         } else {
1552                 /* Initialize Status Block Address */
1553                 STORE_RT_REG_AGG(p_hwfn,
1554                                  CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
1555                                  igu_sb_id * 2, sb_phys);
1556
1557                 STORE_RT_REG_AGG(p_hwfn,
1558                                  CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
1559                                  igu_sb_id * 2, sb_entry);
1560         }
1561
1562         /* Configure pi coalescing if set */
1563         if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1564                 /* eth will open queues for all tcs, so configure all of them
1565                  * properly, rather than just the active ones
1566                  */
1567                 u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1568
1569                 u8 timeset, timer_res;
1570                 u8 i;
1571
1572                 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1573                 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F)
1574                         timer_res = 0;
1575                 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF)
1576                         timer_res = 1;
1577                 else
1578                         timer_res = 2;
1579                 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
1580                 _ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1581                                        ECORE_COAL_RX_STATE_MACHINE,
1582                                        timeset);
1583
1584                 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
1585                         timer_res = 0;
1586                 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF)
1587                         timer_res = 1;
1588                 else
1589                         timer_res = 2;
1590                 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
1591                 for (i = 0; i < num_tc; i++) {
1592                         _ecore_int_cau_conf_pi(p_hwfn, p_ptt,
1593                                                igu_sb_id, TX_PI(i),
1594                                                ECORE_COAL_TX_STATE_MACHINE,
1595                                                timeset);
1596                 }
1597         }
1598 }
1599
1600 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
1601                         struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
1602 {
1603         /* zero status block and ack counter */
1604         sb_info->sb_ack = 0;
1605         OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1606
1607         if (IS_PF(p_hwfn->p_dev))
1608                 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1609                                       sb_info->igu_sb_id, 0, 0);
1610 }
1611
1612 struct ecore_igu_block *
1613 ecore_get_igu_free_sb(struct ecore_hwfn *p_hwfn, bool b_is_pf)
1614 {
1615         struct ecore_igu_block *p_block;
1616         u16 igu_id;
1617
1618         for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1619              igu_id++) {
1620                 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1621
1622                 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1623                     !(p_block->status & ECORE_IGU_STATUS_FREE))
1624                         continue;
1625
1626                 if (!!(p_block->status & ECORE_IGU_STATUS_PF) ==
1627                     b_is_pf)
1628                         return p_block;
1629         }
1630
1631         return OSAL_NULL;
1632 }
1633
1634 static u16 ecore_get_pf_igu_sb_id(struct ecore_hwfn *p_hwfn,
1635                                   u16 vector_id)
1636 {
1637         struct ecore_igu_block *p_block;
1638         u16 igu_id;
1639
1640         for (igu_id = 0; igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1641              igu_id++) {
1642                 p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_id];
1643
1644                 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
1645                     !p_block->is_pf ||
1646                     p_block->vector_number != vector_id)
1647                         continue;
1648
1649                 return igu_id;
1650         }
1651
1652         return ECORE_SB_INVALID_IDX;
1653 }
1654
1655 u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1656 {
1657         u16 igu_sb_id;
1658
1659         /* Assuming continuous set of IGU SBs dedicated for given PF */
1660         if (sb_id == ECORE_SP_SB_ID)
1661                 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1662         else if (IS_PF(p_hwfn->p_dev))
1663                 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
1664         else
1665                 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
1666
1667         if (igu_sb_id == ECORE_SB_INVALID_IDX)
1668                 DP_NOTICE(p_hwfn, true,
1669                           "Slowpath SB vector %04x doesn't exist\n",
1670                           sb_id);
1671         else if (sb_id == ECORE_SP_SB_ID)
1672                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1673                            "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1674         else
1675                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1676                            "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1677
1678         return igu_sb_id;
1679 }
1680
1681 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
1682                                        struct ecore_ptt *p_ptt,
1683                                        struct ecore_sb_info *sb_info,
1684                                        void *sb_virt_addr,
1685                                        dma_addr_t sb_phy_addr, u16 sb_id)
1686 {
1687         sb_info->sb_virt = sb_virt_addr;
1688         sb_info->sb_phys = sb_phy_addr;
1689
1690         sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
1691
1692         if (sb_info->igu_sb_id == ECORE_SB_INVALID_IDX)
1693                 return ECORE_INVAL;
1694
1695         /* Let the igu info reference the client's SB info */
1696         if (sb_id != ECORE_SP_SB_ID) {
1697                 if (IS_PF(p_hwfn->p_dev)) {
1698                         struct ecore_igu_info *p_info;
1699                         struct ecore_igu_block *p_block;
1700
1701                         p_info = p_hwfn->hw_info.p_igu_info;
1702                         p_block = &p_info->entry[sb_info->igu_sb_id];
1703
1704                         p_block->sb_info = sb_info;
1705                         p_block->status &= ~ECORE_IGU_STATUS_FREE;
1706                         p_info->usage.free_cnt--;
1707                 } else {
1708                         ecore_vf_set_sb_info(p_hwfn, sb_id, sb_info);
1709                 }
1710         }
1711 #ifdef ECORE_CONFIG_DIRECT_HWFN
1712         sb_info->p_hwfn = p_hwfn;
1713 #endif
1714         sb_info->p_dev = p_hwfn->p_dev;
1715
1716         /* The igu address will hold the absolute address that needs to be
1717          * written to for a specific status block
1718          */
1719         if (IS_PF(p_hwfn->p_dev)) {
1720                 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
1721                     GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
1722
1723         } else {
1724                 sb_info->igu_addr =
1725                     (u8 OSAL_IOMEM *)p_hwfn->regview +
1726                     PXP_VF_BAR0_START_IGU +
1727                     ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
1728         }
1729
1730         sb_info->flags |= ECORE_SB_INFO_INIT;
1731
1732         ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
1733
1734         return ECORE_SUCCESS;
1735 }
1736
1737 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
1738                                           struct ecore_sb_info *sb_info,
1739                                           u16 sb_id)
1740 {
1741         struct ecore_igu_info *p_info;
1742         struct ecore_igu_block *p_block;
1743
1744         if (sb_info == OSAL_NULL)
1745                 return ECORE_SUCCESS;
1746
1747         /* zero status block and ack counter */
1748         sb_info->sb_ack = 0;
1749         OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1750
1751         if (IS_VF(p_hwfn->p_dev)) {
1752                 ecore_vf_set_sb_info(p_hwfn, sb_id, OSAL_NULL);
1753                 return ECORE_SUCCESS;
1754         }
1755
1756         p_info = p_hwfn->hw_info.p_igu_info;
1757         p_block = &p_info->entry[sb_info->igu_sb_id];
1758
1759         /* Vector 0 is reserved to Default SB */
1760         if (p_block->vector_number == 0) {
1761                 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1762                 return ECORE_INVAL;
1763         }
1764
1765         /* Lose reference to client's SB info, and fix counters */
1766         p_block->sb_info = OSAL_NULL;
1767         p_block->status |= ECORE_IGU_STATUS_FREE;
1768         p_info->usage.free_cnt++;
1769
1770         return ECORE_SUCCESS;
1771 }
1772
1773 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
1774 {
1775         struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1776
1777         if (!p_sb)
1778                 return;
1779
1780         if (p_sb->sb_info.sb_virt) {
1781                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1782                                        p_sb->sb_info.sb_virt,
1783                                        p_sb->sb_info.sb_phys,
1784                                        SB_ALIGNED_SIZE(p_hwfn));
1785         }
1786
1787         OSAL_FREE(p_hwfn->p_dev, p_sb);
1788 }
1789
1790 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
1791                                                   struct ecore_ptt *p_ptt)
1792 {
1793         struct ecore_sb_sp_info *p_sb;
1794         dma_addr_t p_phys = 0;
1795         void *p_virt;
1796
1797         /* SB struct */
1798         p_sb =
1799             OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
1800                        sizeof(*p_sb));
1801         if (!p_sb) {
1802                 DP_NOTICE(p_hwfn, true,
1803                           "Failed to allocate `struct ecore_sb_info'\n");
1804                 return ECORE_NOMEM;
1805         }
1806
1807         /* SB ring  */
1808         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1809                                          &p_phys, SB_ALIGNED_SIZE(p_hwfn));
1810         if (!p_virt) {
1811                 DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n");
1812                 OSAL_FREE(p_hwfn->p_dev, p_sb);
1813                 return ECORE_NOMEM;
1814         }
1815
1816         /* Status Block setup */
1817         p_hwfn->p_sp_sb = p_sb;
1818         ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
1819                           p_virt, p_phys, ECORE_SP_SB_ID);
1820
1821         OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1822
1823         return ECORE_SUCCESS;
1824 }
1825
1826 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
1827                                            ecore_int_comp_cb_t comp_cb,
1828                                            void *cookie,
1829                                            u8 *sb_idx, __le16 **p_fw_cons)
1830 {
1831         struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1832         enum _ecore_status_t rc = ECORE_NOMEM;
1833         u8 pi;
1834
1835         /* Look for a free index */
1836         for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1837                 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
1838                         continue;
1839
1840                 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1841                 p_sp_sb->pi_info_arr[pi].cookie = cookie;
1842                 *sb_idx = pi;
1843                 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1844                 rc = ECORE_SUCCESS;
1845                 break;
1846         }
1847
1848         return rc;
1849 }
1850
1851 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
1852 {
1853         struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1854
1855         if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
1856                 return ECORE_NOMEM;
1857
1858         p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
1859         p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
1860         return ECORE_SUCCESS;
1861 }
1862
1863 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
1864 {
1865         return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1866 }
1867
1868 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
1869                               struct ecore_ptt *p_ptt,
1870                               enum ecore_int_mode int_mode)
1871 {
1872         u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1873
1874 #ifndef ASIC_ONLY
1875         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1876                 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
1877                 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN;
1878         }
1879 #endif
1880
1881         p_hwfn->p_dev->int_mode = int_mode;
1882         switch (p_hwfn->p_dev->int_mode) {
1883         case ECORE_INT_MODE_INTA:
1884                 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1885                 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1886                 break;
1887
1888         case ECORE_INT_MODE_MSI:
1889                 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1890                 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1891                 break;
1892
1893         case ECORE_INT_MODE_MSIX:
1894                 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1895                 break;
1896         case ECORE_INT_MODE_POLL:
1897                 break;
1898         }
1899
1900         ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1901 }
1902
1903 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
1904                                       struct ecore_ptt *p_ptt)
1905 {
1906 #ifndef ASIC_ONLY
1907         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1908                 DP_INFO(p_hwfn,
1909                         "FPGA - Don't enable Attentions in IGU and MISC\n");
1910                 return;
1911         }
1912 #endif
1913
1914         /* Configure AEU signal change to produce attentions */
1915         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1916         ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1917         ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1918         ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1919
1920         /* Flush the writes to IGU */
1921         OSAL_MMIOWB(p_hwfn->p_dev);
1922
1923         /* Unmask AEU signals toward IGU */
1924         ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1925 }
1926
1927 enum _ecore_status_t
1928 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1929                           enum ecore_int_mode int_mode)
1930 {
1931         enum _ecore_status_t rc = ECORE_SUCCESS;
1932
1933         ecore_int_igu_enable_attn(p_hwfn, p_ptt);
1934
1935         if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1936                 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
1937                 if (rc != ECORE_SUCCESS) {
1938                         DP_NOTICE(p_hwfn, true,
1939                                   "Slowpath IRQ request failed\n");
1940                         return ECORE_NORESOURCES;
1941                 }
1942                 p_hwfn->b_int_requested = true;
1943         }
1944
1945         /* Enable interrupt Generation */
1946         ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1947
1948         p_hwfn->b_int_enabled = 1;
1949
1950         return rc;
1951 }
1952
1953 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
1954                                struct ecore_ptt *p_ptt)
1955 {
1956         p_hwfn->b_int_enabled = 0;
1957
1958         if (IS_VF(p_hwfn->p_dev))
1959                 return;
1960
1961         ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1962 }
1963
1964 #define IGU_CLEANUP_SLEEP_LENGTH                (1000)
1965 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
1966                                      struct ecore_ptt *p_ptt,
1967                                      u32 igu_sb_id,
1968                                      bool cleanup_set,
1969                                      u16 opaque_fid)
1970 {
1971         u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1972         u32 pxp_addr = IGU_CMD_INT_ACK_BASE + igu_sb_id;
1973         u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1974         u8 type = 0;            /* FIXME MichalS type??? */
1975
1976         OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
1977                            IGU_REG_CLEANUP_STATUS_0) != 0x200);
1978
1979         /* USE Control Command Register to perform cleanup. There is an
1980          * option to do this using IGU bar, but then it can't be used for VFs.
1981          */
1982
1983         /* Set the data field */
1984         SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1985         SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
1986         SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1987
1988         /* Set the control register */
1989         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1990         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1991         SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1992
1993         ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1994
1995         OSAL_BARRIER(p_hwfn->p_dev);
1996
1997         ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1998
1999         /* Flush the write to IGU */
2000         OSAL_MMIOWB(p_hwfn->p_dev);
2001
2002         /* calculate where to read the status bit from */
2003         sb_bit = 1 << (igu_sb_id % 32);
2004         sb_bit_addr = igu_sb_id / 32 * sizeof(u32);
2005
2006         sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
2007
2008         /* Now wait for the command to complete */
2009         while (--sleep_cnt) {
2010                 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
2011                 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
2012                         break;
2013                 OSAL_MSLEEP(5);
2014         }
2015
2016         if (!sleep_cnt)
2017                 DP_NOTICE(p_hwfn, true,
2018                           "Timeout waiting for clear status 0x%08x [for sb %d]\n",
2019                           val, igu_sb_id);
2020 }
2021
2022 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
2023                                        struct ecore_ptt *p_ptt,
2024                                        u16 igu_sb_id, u16 opaque, bool b_set)
2025 {
2026         struct ecore_igu_block *p_block;
2027         int pi, i;
2028
2029         p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2030         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2031                    "Cleaning SB [%04x]: func_id= %d is_pf = %d vector_num = 0x%0x\n",
2032                    igu_sb_id, p_block->function_id, p_block->is_pf,
2033                    p_block->vector_number);
2034
2035         /* Set */
2036         if (b_set)
2037                 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, opaque);
2038
2039         /* Clear */
2040         ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, opaque);
2041
2042         /* Wait for the IGU SB to cleanup */
2043         for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
2044                 u32 val;
2045
2046                 val = ecore_rd(p_hwfn, p_ptt,
2047                                IGU_REG_WRITE_DONE_PENDING +
2048                                ((igu_sb_id / 32) * 4));
2049                 if (val & (1 << (igu_sb_id % 32)))
2050                         OSAL_UDELAY(10);
2051                 else
2052                         break;
2053         }
2054         if (i == IGU_CLEANUP_SLEEP_LENGTH)
2055                 DP_NOTICE(p_hwfn, true,
2056                           "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
2057                           igu_sb_id);
2058
2059         /* Clear the CAU for the SB */
2060         for (pi = 0; pi < 12; pi++)
2061                 ecore_wr(p_hwfn, p_ptt,
2062                          CAU_REG_PI_MEMORY + (igu_sb_id * 12 + pi) * 4, 0);
2063 }
2064
2065 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
2066                                 struct ecore_ptt *p_ptt,
2067                                 bool b_set, bool b_slowpath)
2068 {
2069         struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2070         struct ecore_igu_block *p_block;
2071         u16 igu_sb_id = 0;
2072         u32 val = 0;
2073
2074         /* @@@TBD MichalK temporary... should be moved to init-tool... */
2075         val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
2076         val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
2077         val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
2078         ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
2079         /* end temporary */
2080
2081         for (igu_sb_id = 0;
2082              igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2083              igu_sb_id++) {
2084                 p_block = &p_info->entry[igu_sb_id];
2085
2086                 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
2087                     !p_block->is_pf ||
2088                     (p_block->status & ECORE_IGU_STATUS_DSB))
2089                         continue;
2090
2091                 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, igu_sb_id,
2092                                                   p_hwfn->hw_info.opaque_fid,
2093                                                   b_set);
2094         }
2095
2096         if (b_slowpath)
2097                 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2098                                                   p_info->igu_dsb_id,
2099                                                   p_hwfn->hw_info.opaque_fid,
2100                                                   b_set);
2101 }
2102
2103 int ecore_int_igu_reset_cam(struct ecore_hwfn *p_hwfn,
2104                             struct ecore_ptt *p_ptt)
2105 {
2106         struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2107         struct ecore_igu_block *p_block;
2108         int pf_sbs, vf_sbs;
2109         u16 igu_sb_id;
2110         u32 val, rval;
2111
2112         if (!RESC_NUM(p_hwfn, ECORE_SB)) {
2113                 /* We're using an old MFW - have to prevent any switching
2114                  * of SBs between PF and VFs as later driver wouldn't be
2115                  * able to tell which belongs to which.
2116                  */
2117                 p_info->b_allow_pf_vf_change = false;
2118         } else {
2119                 /* Use the numbers the MFW have provided -
2120                  * don't forget MFW accounts for the default SB as well.
2121                  */
2122                 p_info->b_allow_pf_vf_change = true;
2123
2124                 if (p_info->usage.cnt != RESC_NUM(p_hwfn, ECORE_SB) - 1) {
2125                         DP_INFO(p_hwfn,
2126                                 "MFW notifies of 0x%04x PF SBs; IGU indicates of only 0x%04x\n",
2127                                 RESC_NUM(p_hwfn, ECORE_SB) - 1,
2128                                 p_info->usage.cnt);
2129                         p_info->usage.cnt = RESC_NUM(p_hwfn, ECORE_SB) - 1;
2130                 }
2131
2132                 /* TODO - how do we learn about VF SBs from MFW? */
2133                 if (IS_PF_SRIOV(p_hwfn)) {
2134                         u16 vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
2135
2136                         if (vfs != p_info->usage.iov_cnt)
2137                                 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2138                                            "0x%04x VF SBs in IGU CAM != PCI configuration 0x%04x\n",
2139                                            p_info->usage.iov_cnt, vfs);
2140
2141                         /* At this point we know how many SBs we have totally
2142                          * in IGU + number of PF SBs. So we can validate that
2143                          * we'd have sufficient for VF.
2144                          */
2145                         if (vfs > p_info->usage.free_cnt +
2146                                   p_info->usage.free_cnt_iov -
2147                                   p_info->usage.cnt) {
2148                                 DP_NOTICE(p_hwfn, true,
2149                                           "Not enough SBs for VFs - 0x%04x SBs, from which %04x PFs and %04x are required\n",
2150                                           p_info->usage.free_cnt +
2151                                           p_info->usage.free_cnt_iov,
2152                                           p_info->usage.cnt, vfs);
2153                                 return ECORE_INVAL;
2154                         }
2155                 }
2156         }
2157
2158         /* Cap the number of VFs SBs by the number of VFs */
2159         if (IS_PF_SRIOV(p_hwfn))
2160                 p_info->usage.iov_cnt = p_hwfn->p_dev->p_iov_info->total_vfs;
2161
2162         /* Mark all SBs as free, now in the right PF/VFs division */
2163         p_info->usage.free_cnt = p_info->usage.cnt;
2164         p_info->usage.free_cnt_iov = p_info->usage.iov_cnt;
2165         p_info->usage.orig = p_info->usage.cnt;
2166         p_info->usage.iov_orig = p_info->usage.iov_cnt;
2167
2168         /* We now proceed to re-configure the IGU cam to reflect the initial
2169          * configuration. We can start with the Default SB.
2170          */
2171         pf_sbs = p_info->usage.cnt;
2172         vf_sbs = p_info->usage.iov_cnt;
2173
2174         for (igu_sb_id = p_info->igu_dsb_id;
2175              igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2176              igu_sb_id++) {
2177                 p_block = &p_info->entry[igu_sb_id];
2178                 val = 0;
2179
2180                 if (!(p_block->status & ECORE_IGU_STATUS_VALID))
2181                         continue;
2182
2183                 if (p_block->status & ECORE_IGU_STATUS_DSB) {
2184                         p_block->function_id = p_hwfn->rel_pf_id;
2185                         p_block->is_pf = 1;
2186                         p_block->vector_number = 0;
2187                         p_block->status = ECORE_IGU_STATUS_VALID |
2188                                           ECORE_IGU_STATUS_PF |
2189                                           ECORE_IGU_STATUS_DSB;
2190                 } else if (pf_sbs) {
2191                         pf_sbs--;
2192                         p_block->function_id = p_hwfn->rel_pf_id;
2193                         p_block->is_pf = 1;
2194                         p_block->vector_number = p_info->usage.cnt - pf_sbs;
2195                         p_block->status = ECORE_IGU_STATUS_VALID |
2196                                           ECORE_IGU_STATUS_PF |
2197                                           ECORE_IGU_STATUS_FREE;
2198                 } else if (vf_sbs) {
2199                         p_block->function_id =
2200                                 p_hwfn->p_dev->p_iov_info->first_vf_in_pf +
2201                                 p_info->usage.iov_cnt - vf_sbs;
2202                         p_block->is_pf = 0;
2203                         p_block->vector_number = 0;
2204                         p_block->status = ECORE_IGU_STATUS_VALID |
2205                                           ECORE_IGU_STATUS_FREE;
2206                         vf_sbs--;
2207                 } else {
2208                         p_block->function_id = 0;
2209                         p_block->is_pf = 0;
2210                         p_block->vector_number = 0;
2211                 }
2212
2213                 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2214                           p_block->function_id);
2215                 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2216                 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2217                           p_block->vector_number);
2218
2219                 /* VF entries would be enabled when VF is initializaed */
2220                 SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2221
2222                 rval = ecore_rd(p_hwfn, p_ptt,
2223                                 IGU_REG_MAPPING_MEMORY +
2224                                 sizeof(u32) * igu_sb_id);
2225
2226                 if (rval != val) {
2227                         ecore_wr(p_hwfn, p_ptt,
2228                                  IGU_REG_MAPPING_MEMORY +
2229                                  sizeof(u32) * igu_sb_id,
2230                                  val);
2231
2232                         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2233                                    "IGU reset: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x [%08x -> %08x]\n",
2234                                    igu_sb_id, p_block->function_id,
2235                                    p_block->is_pf, p_block->vector_number,
2236                                    rval, val);
2237                 }
2238         }
2239
2240         return 0;
2241 }
2242
2243 int ecore_int_igu_reset_cam_default(struct ecore_hwfn *p_hwfn,
2244                                     struct ecore_ptt *p_ptt)
2245 {
2246         struct ecore_sb_cnt_info *p_cnt = &p_hwfn->hw_info.p_igu_info->usage;
2247
2248         /* Return all the usage indications to default prior to the reset;
2249          * The reset expects the !orig to reflect the initial status of the
2250          * SBs, and would re-calculate the originals based on those.
2251          */
2252         p_cnt->cnt = p_cnt->orig;
2253         p_cnt->free_cnt = p_cnt->orig;
2254         p_cnt->iov_cnt = p_cnt->iov_orig;
2255         p_cnt->free_cnt_iov = p_cnt->iov_orig;
2256         p_cnt->orig = 0;
2257         p_cnt->iov_orig = 0;
2258
2259         /* TODO - we probably need to re-configure the CAU as well... */
2260         return ecore_int_igu_reset_cam(p_hwfn, p_ptt);
2261 }
2262
2263 static void ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
2264                                          struct ecore_ptt *p_ptt,
2265                                          u16 igu_sb_id)
2266 {
2267         u32 val = ecore_rd(p_hwfn, p_ptt,
2268                            IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id);
2269         struct ecore_igu_block *p_block;
2270
2271         p_block = &p_hwfn->hw_info.p_igu_info->entry[igu_sb_id];
2272
2273         /* Fill the block information */
2274         p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
2275         p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
2276         p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
2277
2278         p_block->igu_sb_id = igu_sb_id;
2279 }
2280
2281 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
2282                                             struct ecore_ptt *p_ptt)
2283 {
2284         struct ecore_igu_info *p_igu_info;
2285         struct ecore_igu_block *p_block;
2286         u32 min_vf = 0, max_vf = 0;
2287         u16 igu_sb_id;
2288
2289         p_hwfn->hw_info.p_igu_info = OSAL_ZALLOC(p_hwfn->p_dev,
2290                                                  GFP_KERNEL,
2291                                                  sizeof(*p_igu_info));
2292         if (!p_hwfn->hw_info.p_igu_info)
2293                 return ECORE_NOMEM;
2294         p_igu_info = p_hwfn->hw_info.p_igu_info;
2295
2296         /* Distinguish between existent and onn-existent default SB */
2297         p_igu_info->igu_dsb_id = ECORE_SB_INVALID_IDX;
2298
2299         /* Find the range of VF ids whose SB belong to this PF */
2300         if (p_hwfn->p_dev->p_iov_info) {
2301                 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
2302
2303                 min_vf = p_iov->first_vf_in_pf;
2304                 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
2305         }
2306
2307         for (igu_sb_id = 0;
2308              igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2309              igu_sb_id++) {
2310                 /* Read current entry; Notice it might not belong to this PF */
2311                 ecore_int_igu_read_cam_block(p_hwfn, p_ptt, igu_sb_id);
2312                 p_block = &p_igu_info->entry[igu_sb_id];
2313
2314                 if ((p_block->is_pf) &&
2315                     (p_block->function_id == p_hwfn->rel_pf_id)) {
2316                         p_block->status = ECORE_IGU_STATUS_PF |
2317                                           ECORE_IGU_STATUS_VALID |
2318                                           ECORE_IGU_STATUS_FREE;
2319
2320                         if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2321                                 p_igu_info->usage.cnt++;
2322                 } else if (!(p_block->is_pf) &&
2323                            (p_block->function_id >= min_vf) &&
2324                            (p_block->function_id < max_vf)) {
2325                         /* Available for VFs of this PF */
2326                         p_block->status = ECORE_IGU_STATUS_VALID |
2327                                           ECORE_IGU_STATUS_FREE;
2328
2329                         if (p_igu_info->igu_dsb_id != ECORE_SB_INVALID_IDX)
2330                                 p_igu_info->usage.iov_cnt++;
2331                 }
2332
2333                 /* Mark the First entry belonging to the PF or its VFs
2334                  * as the default SB [we'll reset IGU prior to first usage].
2335                  */
2336                 if ((p_block->status & ECORE_IGU_STATUS_VALID) &&
2337                     (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX)) {
2338                         p_igu_info->igu_dsb_id = igu_sb_id;
2339                         p_block->status |= ECORE_IGU_STATUS_DSB;
2340                 }
2341
2342                 /* While this isn't suitable for all clients, limit number
2343                  * of prints by having each PF print only its entries with the
2344                  * exception of PF0 which would print everything.
2345                  */
2346                 if ((p_block->status & ECORE_IGU_STATUS_VALID) ||
2347                     (p_hwfn->abs_pf_id == 0))
2348                         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2349                                    "IGU_BLOCK: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2350                                    igu_sb_id, p_block->function_id,
2351                                    p_block->is_pf, p_block->vector_number);
2352         }
2353
2354         if (p_igu_info->igu_dsb_id == ECORE_SB_INVALID_IDX) {
2355                 DP_NOTICE(p_hwfn, true,
2356                           "IGU CAM returned invalid values igu_dsb_id=0x%x\n",
2357                           p_igu_info->igu_dsb_id);
2358                 return ECORE_INVAL;
2359         }
2360
2361         /* All non default SB are considered free at this point */
2362         p_igu_info->usage.free_cnt = p_igu_info->usage.cnt;
2363         p_igu_info->usage.free_cnt_iov = p_igu_info->usage.iov_cnt;
2364
2365         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2366                    "igu_dsb_id=0x%x, num Free SBs - PF: %04x VF: %04x [might change after resource allocation]\n",
2367                    p_igu_info->igu_dsb_id, p_igu_info->usage.cnt,
2368                    p_igu_info->usage.iov_cnt);
2369
2370         return ECORE_SUCCESS;
2371 }
2372
2373 enum _ecore_status_t
2374 ecore_int_igu_relocate_sb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
2375                           u16 sb_id, bool b_to_vf)
2376 {
2377         struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2378         struct ecore_igu_block *p_block = OSAL_NULL;
2379         u16 igu_sb_id = 0, vf_num = 0;
2380         u32 val = 0;
2381
2382         if (IS_VF(p_hwfn->p_dev) || !IS_PF_SRIOV(p_hwfn))
2383                 return ECORE_INVAL;
2384
2385         if (sb_id == ECORE_SP_SB_ID)
2386                 return ECORE_INVAL;
2387
2388         if (!p_info->b_allow_pf_vf_change) {
2389                 DP_INFO(p_hwfn, "Can't relocate SBs as MFW is too old.\n");
2390                 return ECORE_INVAL;
2391         }
2392
2393         /* If we're moving a SB from PF to VF, the client had to specify
2394          * which vector it wants to move.
2395          */
2396         if (b_to_vf) {
2397                 igu_sb_id = ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1);
2398                 if (igu_sb_id == ECORE_SB_INVALID_IDX)
2399                         return ECORE_INVAL;
2400         }
2401
2402         /* If we're moving a SB from VF to PF, need to validate there isn't
2403          * already a line configured for that vector.
2404          */
2405         if (!b_to_vf) {
2406                 if (ecore_get_pf_igu_sb_id(p_hwfn, sb_id + 1) !=
2407                     ECORE_SB_INVALID_IDX)
2408                         return ECORE_INVAL;
2409         }
2410
2411         /* We need to validate that the SB can actually be relocated.
2412          * This would also handle the previous case where we've explicitly
2413          * stated which IGU SB needs to move.
2414          */
2415         for (; igu_sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
2416              igu_sb_id++) {
2417                 p_block = &p_info->entry[igu_sb_id];
2418
2419                 if (!(p_block->status & ECORE_IGU_STATUS_VALID) ||
2420                     !(p_block->status & ECORE_IGU_STATUS_FREE) ||
2421                     (!!(p_block->status & ECORE_IGU_STATUS_PF) != b_to_vf)) {
2422                         if (b_to_vf)
2423                                 return ECORE_INVAL;
2424                         else
2425                                 continue;
2426                 }
2427
2428                 break;
2429         }
2430
2431         if (igu_sb_id == ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev)) {
2432                 DP_VERBOSE(p_hwfn, (ECORE_MSG_INTR | ECORE_MSG_IOV),
2433                            "Failed to find a free SB to move\n");
2434                 return ECORE_INVAL;
2435         }
2436
2437         /* At this point, p_block points to the SB we want to relocate */
2438         if (b_to_vf) {
2439                 p_block->status &= ~ECORE_IGU_STATUS_PF;
2440
2441                 /* It doesn't matter which VF number we choose, since we're
2442                  * going to disable the line; But let's keep it in range.
2443                  */
2444                 vf_num = (u16)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
2445
2446                 p_block->function_id = (u8)vf_num;
2447                 p_block->is_pf = 0;
2448                 p_block->vector_number = 0;
2449
2450                 p_info->usage.cnt--;
2451                 p_info->usage.free_cnt--;
2452                 p_info->usage.iov_cnt++;
2453                 p_info->usage.free_cnt_iov++;
2454
2455                 /* TODO - if SBs aren't really the limiting factor,
2456                  * then it might not be accurate [in the since that
2457                  * we might not need decrement the feature].
2458                  */
2459                 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]--;
2460                 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]++;
2461         } else {
2462                 p_block->status |= ECORE_IGU_STATUS_PF;
2463                 p_block->function_id = p_hwfn->rel_pf_id;
2464                 p_block->is_pf = 1;
2465                 p_block->vector_number = sb_id + 1;
2466
2467                 p_info->usage.cnt++;
2468                 p_info->usage.free_cnt++;
2469                 p_info->usage.iov_cnt--;
2470                 p_info->usage.free_cnt_iov--;
2471
2472                 p_hwfn->hw_info.feat_num[ECORE_PF_L2_QUE]++;
2473                 p_hwfn->hw_info.feat_num[ECORE_VF_L2_QUE]--;
2474         }
2475
2476         /* Update the IGU and CAU with the new configuration */
2477         SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER,
2478                   p_block->function_id);
2479         SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, p_block->is_pf);
2480         SET_FIELD(val, IGU_MAPPING_LINE_VALID, p_block->is_pf);
2481         SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER,
2482                   p_block->vector_number);
2483
2484         ecore_wr(p_hwfn, p_ptt,
2485                  IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_sb_id,
2486                  val);
2487
2488         ecore_int_cau_conf_sb(p_hwfn, p_ptt, 0,
2489                               igu_sb_id, vf_num,
2490                               p_block->is_pf ? 0 : 1);
2491
2492         DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
2493                    "Relocation: [SB 0x%04x] func_id = %d is_pf = %d vector_num = 0x%x\n",
2494                    igu_sb_id, p_block->function_id,
2495                    p_block->is_pf, p_block->vector_number);
2496
2497         return ECORE_SUCCESS;
2498 }
2499
2500 /**
2501  * @brief Initialize igu runtime registers
2502  *
2503  * @param p_hwfn
2504  */
2505 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
2506 {
2507         u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2508
2509         STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2510 }
2511
2512 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
2513                           IGU_CMD_INT_ACK_BASE)
2514 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
2515                           IGU_CMD_INT_ACK_BASE)
2516 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
2517 {
2518         u32 intr_status_hi = 0, intr_status_lo = 0;
2519         u64 intr_status = 0;
2520
2521         intr_status_lo = REG_RD(p_hwfn,
2522                                 GTT_BAR0_MAP_REG_IGU_CMD +
2523                                 LSB_IGU_CMD_ADDR * 8);
2524         intr_status_hi = REG_RD(p_hwfn,
2525                                 GTT_BAR0_MAP_REG_IGU_CMD +
2526                                 MSB_IGU_CMD_ADDR * 8);
2527         intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2528
2529         return intr_status;
2530 }
2531
2532 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
2533 {
2534         OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
2535         p_hwfn->b_sp_dpc_enabled = true;
2536 }
2537
2538 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
2539 {
2540         p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
2541         if (!p_hwfn->sp_dpc)
2542                 return ECORE_NOMEM;
2543
2544         return ECORE_SUCCESS;
2545 }
2546
2547 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
2548 {
2549         OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
2550 }
2551
2552 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
2553                                      struct ecore_ptt *p_ptt)
2554 {
2555         enum _ecore_status_t rc = ECORE_SUCCESS;
2556
2557         rc = ecore_int_sp_dpc_alloc(p_hwfn);
2558         if (rc != ECORE_SUCCESS) {
2559                 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
2560                 return rc;
2561         }
2562
2563         rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
2564         if (rc != ECORE_SUCCESS) {
2565                 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
2566                 return rc;
2567         }
2568
2569         rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
2570         if (rc != ECORE_SUCCESS)
2571                 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
2572
2573         return rc;
2574 }
2575
2576 void ecore_int_free(struct ecore_hwfn *p_hwfn)
2577 {
2578         ecore_int_sp_sb_free(p_hwfn);
2579         ecore_int_sb_attn_free(p_hwfn);
2580         ecore_int_sp_dpc_free(p_hwfn);
2581 }
2582
2583 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2584 {
2585         if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
2586                 return;
2587
2588         ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2589         ecore_int_sb_attn_setup(p_hwfn, p_ptt);
2590         ecore_int_sp_dpc_setup(p_hwfn);
2591 }
2592
2593 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
2594                            struct ecore_sb_cnt_info *p_sb_cnt_info)
2595 {
2596         struct ecore_igu_info *p_igu_info = p_hwfn->hw_info.p_igu_info;
2597
2598         if (!p_igu_info || !p_sb_cnt_info)
2599                 return;
2600
2601         OSAL_MEMCPY(p_sb_cnt_info, &p_igu_info->usage,
2602                     sizeof(*p_sb_cnt_info));
2603 }
2604
2605 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
2606 {
2607         int i;
2608
2609         for_each_hwfn(p_dev, i)
2610                 p_dev->hwfns[i].b_int_requested = false;
2611 }
2612
2613 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable)
2614 {
2615         p_dev->attn_clr_en = clr_enable;
2616 }
2617
2618 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
2619                                              struct ecore_ptt *p_ptt,
2620                                              u8 timer_res, u16 sb_id, bool tx)
2621 {
2622         struct cau_sb_entry sb_entry;
2623         enum _ecore_status_t rc;
2624
2625         if (!p_hwfn->hw_init_done) {
2626                 DP_ERR(p_hwfn, "hardware not initialized yet\n");
2627                 return ECORE_INVAL;
2628         }
2629
2630         rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2631                                  sb_id * sizeof(u64),
2632                                  (u64)(osal_uintptr_t)&sb_entry, 2, 0);
2633         if (rc != ECORE_SUCCESS) {
2634                 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2635                 return rc;
2636         }
2637
2638         if (tx)
2639                 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2640         else
2641                 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2642
2643         rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
2644                                  (u64)(osal_uintptr_t)&sb_entry,
2645                                  CAU_REG_SB_VAR_MEMORY +
2646                                  sb_id * sizeof(u64), 2, 0);
2647         if (rc != ECORE_SUCCESS) {
2648                 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);
2649                 return rc;
2650         }
2651
2652         return rc;
2653 }
2654
2655 enum _ecore_status_t ecore_int_get_sb_dbg(struct ecore_hwfn *p_hwfn,
2656                                           struct ecore_ptt *p_ptt,
2657                                           struct ecore_sb_info *p_sb,
2658                                           struct ecore_sb_info_dbg *p_info)
2659 {
2660         u16 sbid = p_sb->igu_sb_id;
2661         int i;
2662
2663         if (IS_VF(p_hwfn->p_dev))
2664                 return ECORE_INVAL;
2665
2666         if (sbid > NUM_OF_SBS(p_hwfn->p_dev))
2667                 return ECORE_INVAL;
2668
2669         p_info->igu_prod = ecore_rd(p_hwfn, p_ptt,
2670                                     IGU_REG_PRODUCER_MEMORY + sbid * 4);
2671         p_info->igu_cons = ecore_rd(p_hwfn, p_ptt,
2672                                     IGU_REG_CONSUMER_MEM + sbid * 4);
2673
2674         for (i = 0; i < PIS_PER_SB_E4; i++)
2675                 p_info->pi[i] = (u16)ecore_rd(p_hwfn, p_ptt,
2676                                               CAU_REG_PI_MEMORY +
2677                                               sbid * 4 * PIS_PER_SB_E4 +
2678                                               i * 4);
2679
2680         return ECORE_SUCCESS;
2681 }