2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
9 #include <rte_string_fns.h>
13 #include "ecore_spq.h"
15 #include "ecore_gtt_reg_addr.h"
16 #include "ecore_init_ops.h"
17 #include "ecore_rt_defs.h"
18 #include "ecore_int.h"
21 #include "ecore_sriov.h"
23 #include "ecore_hw_defs.h"
24 #include "ecore_hsi_common.h"
25 #include "ecore_mcp.h"
27 struct ecore_pi_info {
28 ecore_int_comp_cb_t comp_cb;
29 void *cookie; /* Will be sent to the compl cb function */
32 struct ecore_sb_sp_info {
33 struct ecore_sb_info sb_info;
34 /* per protocol index data */
35 struct ecore_pi_info pi_info_arr[PIS_PER_SB];
38 enum ecore_attention_type {
40 ECORE_ATTN_TYPE_PARITY,
43 #define SB_ATTN_ALIGNED_SIZE(p_hwfn) \
44 ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn)
46 struct aeu_invert_reg_bit {
49 #define ATTENTION_PARITY (1 << 0)
51 #define ATTENTION_LENGTH_MASK (0x00000ff0)
52 #define ATTENTION_LENGTH_SHIFT (4)
53 #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
54 ATTENTION_LENGTH_SHIFT)
55 #define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
56 #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
57 #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
60 /* Multiple bits start with this offset */
61 #define ATTENTION_OFFSET_MASK (0x000ff000)
62 #define ATTENTION_OFFSET_SHIFT (12)
64 #define ATTENTION_CLEAR_ENABLE (1 << 28)
67 /* Callback to call if attention will be triggered */
68 enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn);
70 enum block_id block_index;
73 struct aeu_invert_reg {
74 struct aeu_invert_reg_bit bits[32];
77 #define MAX_ATTN_GRPS (8)
78 #define NUM_ATTN_REGS (9)
80 static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn)
82 u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE);
84 DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp);
85 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff);
90 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000)
91 #define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14)
92 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0)
93 #define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6)
94 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020)
95 #define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5)
96 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e)
97 #define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1)
98 #define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1)
99 #define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0)
100 #define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1)
101 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1)
102 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1)
103 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0)
104 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e)
105 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1)
106 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20)
107 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5)
108 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0)
109 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6)
110 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000)
111 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14)
112 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000)
113 #define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18)
114 static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn)
117 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
118 PSWHST_REG_VF_DISABLED_ERROR_VALID);
120 /* Disabled VF access */
121 if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) {
124 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
125 PSWHST_REG_VF_DISABLED_ERROR_ADDRESS);
126 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
127 PSWHST_REG_VF_DISABLED_ERROR_DATA);
128 DP_INFO(p_hwfn->p_dev,
129 "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]"
130 " Write [0x%02x] Addr [0x%08x]\n",
131 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK)
132 >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT),
133 (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK)
134 >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT),
136 ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >>
137 ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT),
139 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >>
140 ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT),
142 ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >>
143 ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT),
147 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
148 PSWHST_REG_INCORRECT_ACCESS_VALID);
149 if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) {
150 u32 addr, data, length;
152 addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
153 PSWHST_REG_INCORRECT_ACCESS_ADDRESS);
154 data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
155 PSWHST_REG_INCORRECT_ACCESS_DATA);
156 length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
157 PSWHST_REG_INCORRECT_ACCESS_LENGTH);
159 DP_INFO(p_hwfn->p_dev,
160 "Incorrect access to %08x of length %08x - PF [%02x]"
161 " VF [%04x] [valid %02x] client [%02x] write [%02x]"
162 " Byte-Enable [%04x] [%08x]\n",
165 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >>
166 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT),
168 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >>
169 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT),
171 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >>
172 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT),
174 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >>
175 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT),
177 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >>
178 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT),
180 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >>
181 ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT),
185 /* TODO - We know 'some' of these are legal due to virtualization,
186 * but is it true for all of them?
188 return ECORE_SUCCESS;
191 #define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0)
192 #define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0)
193 #define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23)
194 #define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24)
195 #define ECORE_GRC_ATTENTION_MASTER_SHIFT (24)
196 #define ECORE_GRC_ATTENTION_PF_MASK (0xf)
197 #define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4)
198 #define ECORE_GRC_ATTENTION_VF_SHIFT (4)
199 #define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14)
200 #define ECORE_GRC_ATTENTION_PRIV_SHIFT (14)
201 #define ECORE_GRC_ATTENTION_PRIV_VF (0)
202 static const char *grc_timeout_attn_master_to_str(u8 master)
230 static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn)
232 enum _ecore_status_t rc = ECORE_SUCCESS;
235 /* We've already cleared the timeout interrupt register, so we learn
236 * of interrupts via the validity register.
237 * Any attention which is not for a timeout event is treated as fatal.
239 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
240 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID);
241 if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) {
246 /* Read the GRC timeout information */
247 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
248 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0);
249 tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
250 GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1);
252 DP_INFO(p_hwfn->p_dev,
253 "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s]"
254 " [PF: %02x %s %02x]\n",
256 (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from",
257 (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2,
258 grc_timeout_attn_master_to_str((tmp &
259 ECORE_GRC_ATTENTION_MASTER_MASK) >>
260 ECORE_GRC_ATTENTION_MASTER_SHIFT),
261 (tmp2 & ECORE_GRC_ATTENTION_PF_MASK),
262 (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >>
263 ECORE_GRC_ATTENTION_PRIV_SHIFT) ==
264 ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)",
265 (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >>
266 ECORE_GRC_ATTENTION_VF_SHIFT);
268 /* Clean the validity bit */
269 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
270 GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0);
275 #define ECORE_PGLUE_ATTENTION_VALID (1 << 29)
276 #define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26)
277 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20)
278 #define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20)
279 #define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19)
280 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24)
281 #define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24)
282 #define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21)
283 #define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22)
284 #define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23)
285 #define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23)
286 #define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25)
287 #define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23)
288 static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn)
292 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
293 PGLUE_B_REG_TX_ERR_WR_DETAILS2);
294 if (tmp & ECORE_PGLUE_ATTENTION_VALID) {
295 u32 addr_lo, addr_hi, details;
297 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
298 PGLUE_B_REG_TX_ERR_WR_ADD_31_0);
299 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
300 PGLUE_B_REG_TX_ERR_WR_ADD_63_32);
301 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
302 PGLUE_B_REG_TX_ERR_WR_DETAILS);
305 "Illegal write by chip to [%08x:%08x] blocked."
306 "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
307 " Details2 %08x [Was_error %02x BME deassert %02x"
308 " FID_enable deassert %02x]\n",
309 addr_hi, addr_lo, details,
311 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
312 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
314 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
315 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
316 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
318 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
320 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
322 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
326 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
327 PGLUE_B_REG_TX_ERR_RD_DETAILS2);
328 if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) {
329 u32 addr_lo, addr_hi, details;
331 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
332 PGLUE_B_REG_TX_ERR_RD_ADD_31_0);
333 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
334 PGLUE_B_REG_TX_ERR_RD_ADD_63_32);
335 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
336 PGLUE_B_REG_TX_ERR_RD_DETAILS);
339 "Illegal read by chip from [%08x:%08x] blocked."
340 " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]"
341 " Details2 %08x [Was_error %02x BME deassert %02x"
342 " FID_enable deassert %02x]\n",
343 addr_hi, addr_lo, details,
345 ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >>
346 ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT),
348 ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >>
349 ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT),
350 (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID)
352 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1
354 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 :
356 (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1
360 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
361 PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL);
362 if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID)
363 DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp);
365 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
366 PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS);
367 if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) {
368 u32 addr_hi, addr_lo;
370 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
371 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0);
372 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
373 PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32);
375 DP_INFO(p_hwfn, "ICPL error - %08x [Address %08x:%08x]\n",
376 tmp, addr_hi, addr_lo);
379 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
380 PGLUE_B_REG_VF_ILT_ERR_DETAILS2);
381 if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) {
382 u32 addr_hi, addr_lo, details;
384 addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
385 PGLUE_B_REG_VF_ILT_ERR_ADD_31_0);
386 addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
387 PGLUE_B_REG_VF_ILT_ERR_ADD_63_32);
388 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
389 PGLUE_B_REG_VF_ILT_ERR_DETAILS);
392 "ILT error - Details %08x Details2 %08x"
393 " [Address %08x:%08x]\n",
394 details, tmp, addr_hi, addr_lo);
397 /* Clear the indications */
398 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
399 PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2));
401 return ECORE_SUCCESS;
404 static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn)
406 DP_NOTICE(p_hwfn, false, "FW assertion!\n");
408 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT);
413 static enum _ecore_status_t
414 ecore_general_attention_35(struct ecore_hwfn *p_hwfn)
416 DP_INFO(p_hwfn, "General attention 35!\n");
418 return ECORE_SUCCESS;
421 #define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff)
422 #define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff)
423 #define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f)
424 #define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16)
426 static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn)
430 reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) &
431 ECORE_DORQ_ATTENTION_REASON_MASK;
433 u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
434 DORQ_REG_DB_DROP_DETAILS);
436 DP_INFO(p_hwfn->p_dev,
437 "DORQ db_drop: address 0x%08x Opaque FID 0x%04x"
438 " Size [bytes] 0x%08x Reason: 0x%08x\n",
439 ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
440 DORQ_REG_DB_DROP_DETAILS_ADDRESS),
441 (u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK),
442 ((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >>
443 ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason);
449 static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn)
452 if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) {
453 u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
456 if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN |
457 TM_REG_INT_STS_1_PEND_CONN_SCAN))
460 if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN |
461 TM_REG_INT_STS_1_PEND_CONN_SCAN))
463 "TM attention on emulation - most likely"
464 " results of clock-ratios\n");
465 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1);
466 val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN |
467 TM_REG_INT_MASK_1_PEND_TASK_SCAN;
468 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val);
470 return ECORE_SUCCESS;
477 /* Notice aeu_invert_reg must be defined in the same order of bits as HW; */
478 static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = {
480 { /* After Invert 1 */
481 {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
487 { /* After Invert 2 */
488 {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
489 {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
490 {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb,
492 {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
493 {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
494 {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
495 {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
497 (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT),
498 OSAL_NULL, MAX_BLOCK_ID},
499 {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
505 { /* After Invert 3 */
506 {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
512 { /* After Invert 4 */
513 {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
514 ecore_fw_assertion, MAX_BLOCK_ID},
515 {"General Attention %d",
516 (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT),
517 OSAL_NULL, MAX_BLOCK_ID},
518 {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE,
519 ecore_general_attention_35, MAX_BLOCK_ID},
520 {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
522 {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID},
523 {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
524 {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
525 {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
526 {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
527 {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
528 {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
530 {"NIG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NIG},
531 {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB},
532 {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB},
533 {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB},
534 {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS},
539 { /* After Invert 5 */
540 {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC},
541 {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1},
542 {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2},
543 {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB},
544 {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF},
545 {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM},
546 {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM},
547 {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM},
548 {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM},
549 {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM},
550 {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM},
551 {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM},
552 {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM},
553 {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM},
554 {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM},
555 {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM},
560 { /* After Invert 6 */
561 {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM},
562 {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM},
563 {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM},
564 {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM},
565 {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM},
566 {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM},
567 {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM},
568 {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM},
569 {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM},
570 {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD},
571 {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD},
572 {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD},
573 {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD},
574 {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ},
575 {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG},
576 {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC},
581 { /* After Invert 7 */
582 {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC},
583 {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU},
584 {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE},
585 {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU},
586 {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID},
587 {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU},
588 {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU},
589 {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM},
590 {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC},
591 {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF},
592 {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF},
593 {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS},
594 {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC},
595 {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS},
596 {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE},
597 {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
598 {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ},
603 { /* After Invert 8 */
604 {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2},
605 {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR},
606 {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2},
607 {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD},
608 {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2},
609 {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST},
610 {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2},
611 {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC},
612 {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU},
613 {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI},
614 {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
615 {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
616 {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
617 {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
618 {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
619 {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
620 {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS},
621 {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS},
622 {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
623 {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID},
624 {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
630 { /* After Invert 9 */
631 {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
632 {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL,
634 {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
635 {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID},
636 {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL,
643 #define ATTN_STATE_BITS (0xfff)
644 #define ATTN_BITS_MASKABLE (0x3ff)
645 struct ecore_sb_attn_info {
646 /* Virtual & Physical address of the SB */
647 struct atten_status_block *sb_attn;
650 /* Last seen running index */
653 /* A mask of the AEU bits resulting in a parity error */
654 u32 parity_mask[NUM_ATTN_REGS];
656 /* A pointer to the attention description structure */
657 struct aeu_invert_reg *p_aeu_desc;
659 /* Previously asserted attentions, which are still unasserted */
662 /* Cleanup address for the link's general hw attention */
666 static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn,
667 struct ecore_sb_attn_info *p_sb_desc)
671 OSAL_MMIOWB(p_hwfn->p_dev);
673 index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index);
674 if (p_sb_desc->index != index) {
675 p_sb_desc->index = index;
676 rc = ECORE_SB_ATT_IDX;
679 OSAL_MMIOWB(p_hwfn->p_dev);
685 * @brief ecore_int_assertion - handles asserted attention bits
688 * @param asserted_bits newly asserted bits
689 * @return enum _ecore_status_t
691 static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn,
694 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
697 /* Mask the source of the attention in the IGU */
698 igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
699 IGU_REG_ATTENTION_ENABLE);
700 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n",
701 igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE));
702 igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE);
703 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask);
705 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
706 "inner known ATTN state: 0x%04x --> 0x%04x\n",
707 sb_attn_sw->known_attn,
708 sb_attn_sw->known_attn | asserted_bits);
709 sb_attn_sw->known_attn |= asserted_bits;
711 /* Handle MCP events */
712 if (asserted_bits & 0x100) {
713 ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt);
714 /* Clean the MCP attention */
715 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt,
716 sb_attn_sw->mfw_attn_addr, 0);
719 /* FIXME - this will change once we'll have GOOD gtt definitions */
720 DIRECT_REG_WR(p_hwfn,
721 (u8 OSAL_IOMEM *) p_hwfn->regview +
722 GTT_BAR0_MAP_REG_IGU_CMD +
723 ((IGU_CMD_ATTN_BIT_SET_UPPER -
724 IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits);
726 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n",
729 return ECORE_SUCCESS;
732 static void ecore_int_attn_print(struct ecore_hwfn *p_hwfn,
733 enum block_id id, enum dbg_attn_type type,
737 DP_NOTICE(p_hwfn->p_dev, false, "[block_id %d type %d]\n", id, type);
741 * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single
742 * cause of the attention
745 * @param p_aeu - descriptor of an AEU bit which caused the attention
746 * @param aeu_en_reg - register offset of the AEU enable reg. which configured
747 * this bit to this group.
748 * @param bit_index - index of this bit in the aeu_en_reg
750 * @return enum _ecore_status_t
752 static enum _ecore_status_t
753 ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn,
754 struct aeu_invert_reg_bit *p_aeu,
756 const char *p_bit_name,
759 enum _ecore_status_t rc = ECORE_INVAL;
760 bool b_fatal = false;
762 DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n",
763 p_bit_name, bitmask);
765 /* Call callback before clearing the interrupt status */
767 DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n",
769 rc = p_aeu->cb(p_hwfn);
772 if (rc != ECORE_SUCCESS)
775 /* Print HW block interrupt registers */
776 if (p_aeu->block_index != MAX_BLOCK_ID) {
777 ecore_int_attn_print(p_hwfn, p_aeu->block_index,
778 ATTN_TYPE_INTERRUPT, !b_fatal);
781 /* Reach assertion if attention is fatal */
783 DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n",
786 ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN);
789 /* Prevent this Attention from being asserted in the future */
790 if (p_aeu->flags & ATTENTION_CLEAR_ENABLE ||
791 p_hwfn->p_dev->attn_clr_en) {
794 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg);
795 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask));
796 DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n",
804 * @brief ecore_int_deassertion_parity - handle a single parity AEU source
807 * @param p_aeu - descriptor of an AEU bit which caused the
811 static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn,
812 struct aeu_invert_reg_bit *p_aeu,
815 u32 block_id = p_aeu->block_index;
817 DP_INFO(p_hwfn->p_dev, "%s[%d] parity attention is set\n",
818 p_aeu->bit_name, bit_index);
820 if (block_id == MAX_BLOCK_ID)
823 ecore_int_attn_print(p_hwfn, block_id,
824 ATTN_TYPE_PARITY, false);
826 /* In A0, there's a single parity bit for several blocks */
827 if (block_id == BLOCK_BTB) {
828 ecore_int_attn_print(p_hwfn, BLOCK_OPTE,
829 ATTN_TYPE_PARITY, false);
830 ecore_int_attn_print(p_hwfn, BLOCK_MCP,
831 ATTN_TYPE_PARITY, false);
836 * @brief - handles deassertion of previously asserted attentions.
839 * @param deasserted_bits - newly deasserted bits
840 * @return enum _ecore_status_t
843 static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn,
846 struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn;
847 u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask;
848 bool b_parity = false;
850 enum _ecore_status_t rc = ECORE_SUCCESS;
852 /* Read the attention registers in the AEU */
853 for (i = 0; i < NUM_ATTN_REGS; i++) {
854 aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
855 MISC_REG_AEU_AFTER_INVERT_1_IGU +
857 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
858 "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]);
861 /* Handle parity attentions first */
862 for (i = 0; i < NUM_ATTN_REGS; i++) {
863 struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i];
864 u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
865 MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
868 u32 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en;
870 /* Skip register in which no parity bit is currently set */
874 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
875 struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j];
877 if ((p_bit->flags & ATTENTION_PARITY) &&
878 !!(parities & (1 << bit_idx))) {
879 ecore_int_deassertion_parity(p_hwfn, p_bit,
884 bit_idx += ATTENTION_LENGTH(p_bit->flags);
888 /* Find non-parity cause for attention and act */
889 for (k = 0; k < MAX_ATTN_GRPS; k++) {
890 struct aeu_invert_reg_bit *p_aeu;
892 /* Handle only groups whose attention is currently deasserted */
893 if (!(deasserted_bits & (1 << k)))
896 for (i = 0; i < NUM_ATTN_REGS; i++) {
897 u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 +
898 i * sizeof(u32) + k * sizeof(u32) * NUM_ATTN_REGS;
899 u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en);
900 u32 bits = aeu_inv_arr[i] & en;
902 /* Skip if no bit from this group is currently set */
906 /* Find all set bits from current register which belong
907 * to current group, making them responsible for the
908 * previous assertion.
910 for (j = 0, bit_idx = 0; bit_idx < 32; j++) {
911 unsigned long int bitmask;
914 p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j];
916 /* No need to handle attention-only bits */
917 if (p_aeu->flags == ATTENTION_PAR)
921 bit_len = ATTENTION_LENGTH(p_aeu->flags);
922 if (p_aeu->flags & ATTENTION_PAR_INT) {
928 bitmask = bits & (((1 << bit_len) - 1) << bit);
930 u32 flags = p_aeu->flags;
933 bit = (u8)OSAL_FIND_FIRST_BIT(&bitmask,
936 /* Some bits represent more than a
937 * a single interrupt. Correctly print
940 if (ATTENTION_LENGTH(flags) > 2 ||
941 ((flags & ATTENTION_PAR_INT) &&
942 ATTENTION_LENGTH(flags) > 1))
943 OSAL_SNPRINTF(bit_name, 30,
951 /* Handle source of the attention */
952 ecore_int_deassertion_aeu_bit(p_hwfn,
959 bit_idx += ATTENTION_LENGTH(p_aeu->flags);
964 /* Clear IGU indication for the deasserted bits */
965 /* FIXME - this will change once we'll have GOOD gtt definitions */
966 DIRECT_REG_WR(p_hwfn,
967 (u8 OSAL_IOMEM *) p_hwfn->regview +
968 GTT_BAR0_MAP_REG_IGU_CMD +
969 ((IGU_CMD_ATTN_BIT_CLR_UPPER -
970 IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits));
972 /* Unmask deasserted attentions in IGU */
973 aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt,
974 IGU_REG_ATTENTION_ENABLE);
975 aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE);
976 ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask);
978 /* Clear deassertion from inner state */
979 sb_attn_sw->known_attn &= ~deasserted_bits;
984 static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn)
986 struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn;
987 struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn;
988 u16 index = 0, asserted_bits, deasserted_bits;
989 u32 attn_bits = 0, attn_acks = 0;
990 enum _ecore_status_t rc = ECORE_SUCCESS;
992 /* Read current attention bits/acks - safeguard against attentions
993 * by guaranting work on a synchronized timeframe
996 index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index);
997 attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits);
998 attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack);
999 } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index));
1000 p_sb_attn->sb_index = index;
1002 /* Attention / Deassertion are meaningful (and in correct state)
1003 * only when they differ and consistent with known state - deassertion
1004 * when previous attention & current ack, and assertion when current
1005 * attention with no previous attention
1007 asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) &
1008 ~p_sb_attn_sw->known_attn;
1009 deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) &
1010 p_sb_attn_sw->known_attn;
1012 if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100))
1014 "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n",
1015 index, attn_bits, attn_acks, asserted_bits,
1016 deasserted_bits, p_sb_attn_sw->known_attn);
1017 else if (asserted_bits == 0x100)
1018 DP_INFO(p_hwfn, "MFW indication via attention\n");
1020 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1021 "MFW indication [deassertion]\n");
1023 if (asserted_bits) {
1024 rc = ecore_int_assertion(p_hwfn, asserted_bits);
1029 if (deasserted_bits)
1030 rc = ecore_int_deassertion(p_hwfn, deasserted_bits);
1035 static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn,
1036 void OSAL_IOMEM *igu_addr, u32 ack_cons)
1038 struct igu_prod_cons_update igu_ack = { 0 };
1040 igu_ack.sb_id_and_flags =
1041 ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) |
1042 (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) |
1043 (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) |
1044 (IGU_SEG_ACCESS_ATTN <<
1045 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT));
1047 DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags);
1049 /* Both segments (interrupts & acks) are written to same place address;
1050 * Need to guarantee all commands will be received (in-order) by HW.
1052 OSAL_MMIOWB(p_hwfn->p_dev);
1053 OSAL_BARRIER(p_hwfn->p_dev);
1056 void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie)
1058 struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie;
1059 struct ecore_pi_info *pi_info = OSAL_NULL;
1060 struct ecore_sb_attn_info *sb_attn;
1061 struct ecore_sb_info *sb_info;
1068 if (!p_hwfn->p_sp_sb) {
1069 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n");
1073 sb_info = &p_hwfn->p_sp_sb->sb_info;
1074 arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr);
1076 DP_ERR(p_hwfn->p_dev,
1077 "Status block is NULL - cannot ack interrupts\n");
1081 if (!p_hwfn->p_sb_attn) {
1082 DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn");
1085 sb_attn = p_hwfn->p_sb_attn;
1087 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n",
1088 p_hwfn, p_hwfn->my_id);
1090 /* Disable ack for def status block. Required both for msix +
1091 * inta in non-mask mode, in inta does no harm.
1093 ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0);
1095 /* Gather Interrupts/Attentions information */
1096 if (!sb_info->sb_virt) {
1097 DP_ERR(p_hwfn->p_dev,
1098 "Interrupt Status block is NULL -"
1099 " cannot check for new interrupts!\n");
1101 u32 tmp_index = sb_info->sb_ack;
1102 rc = ecore_sb_update_sb_idx(sb_info);
1103 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1104 "Interrupt indices: 0x%08x --> 0x%08x\n",
1105 tmp_index, sb_info->sb_ack);
1108 if (!sb_attn || !sb_attn->sb_attn) {
1109 DP_ERR(p_hwfn->p_dev,
1110 "Attentions Status block is NULL -"
1111 " cannot check for new attentions!\n");
1113 u16 tmp_index = sb_attn->index;
1115 rc |= ecore_attn_update_idx(p_hwfn, sb_attn);
1116 DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR,
1117 "Attention indices: 0x%08x --> 0x%08x\n",
1118 tmp_index, sb_attn->index);
1121 /* Check if we expect interrupts at this time. if not just ack them */
1122 if (!(rc & ECORE_SB_EVENT_MASK)) {
1123 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1127 /* Check the validity of the DPC ptt. If not ack interrupts and fail */
1129 if (!p_hwfn->p_dpc_ptt) {
1130 DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n");
1131 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1135 if (rc & ECORE_SB_ATT_IDX)
1136 ecore_int_attentions(p_hwfn);
1138 if (rc & ECORE_SB_IDX) {
1141 /* Since we only looked at the SB index, it's possible more
1142 * than a single protocol-index on the SB incremented.
1143 * Iterate over all configured protocol indices and check
1144 * whether something happened for each.
1146 for (pi = 0; pi < arr_size; pi++) {
1147 pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi];
1148 if (pi_info->comp_cb != OSAL_NULL)
1149 pi_info->comp_cb(p_hwfn, pi_info->cookie);
1153 if (sb_attn && (rc & ECORE_SB_ATT_IDX)) {
1154 /* This should be done before the interrupts are enabled,
1155 * since otherwise a new attention will be generated.
1157 ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index);
1160 ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1);
1163 static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn)
1165 struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn;
1170 if (p_sb->sb_attn) {
1171 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn,
1173 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1175 OSAL_FREE(p_hwfn->p_dev, p_sb);
1178 static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn,
1179 struct ecore_ptt *p_ptt)
1181 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1183 OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn));
1186 sb_info->known_attn = 0;
1188 /* Configure Attention Status Block in IGU */
1189 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L,
1190 DMA_LO(p_hwfn->p_sb_attn->sb_phys));
1191 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H,
1192 DMA_HI(p_hwfn->p_sb_attn->sb_phys));
1195 static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn,
1196 struct ecore_ptt *p_ptt,
1197 void *sb_virt_addr, dma_addr_t sb_phy_addr)
1199 struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn;
1202 sb_info->sb_attn = sb_virt_addr;
1203 sb_info->sb_phys = sb_phy_addr;
1205 /* Set the pointer to the AEU descriptors */
1206 sb_info->p_aeu_desc = aeu_descs;
1208 /* Calculate Parity Masks */
1209 OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS);
1210 for (i = 0; i < NUM_ATTN_REGS; i++) {
1211 /* j is array index, k is bit index */
1212 for (j = 0, k = 0; k < 32; j++) {
1213 unsigned int flags = aeu_descs[i].bits[j].flags;
1215 if (flags & ATTENTION_PARITY)
1216 sb_info->parity_mask[i] |= 1 << k;
1218 k += ATTENTION_LENGTH(flags);
1220 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1221 "Attn Mask [Reg %d]: 0x%08x\n",
1222 i, sb_info->parity_mask[i]);
1225 /* Set the address of cleanup for the mcp attention */
1226 sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) +
1227 MISC_REG_AEU_GENERAL_ATTN_0;
1229 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
1232 static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn,
1233 struct ecore_ptt *p_ptt)
1235 struct ecore_dev *p_dev = p_hwfn->p_dev;
1236 struct ecore_sb_attn_info *p_sb;
1237 dma_addr_t p_phys = 0;
1241 p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(*p_sb));
1243 DP_NOTICE(p_dev, true,
1244 "Failed to allocate `struct ecore_sb_attn_info'");
1249 p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys,
1250 SB_ATTN_ALIGNED_SIZE(p_hwfn));
1252 DP_NOTICE(p_dev, true,
1253 "Failed to allocate status block (attentions)");
1254 OSAL_FREE(p_dev, p_sb);
1258 /* Attention setup */
1259 p_hwfn->p_sb_attn = p_sb;
1260 ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys);
1262 return ECORE_SUCCESS;
1265 /* coalescing timeout = timeset << (timer_res + 1) */
1266 #define ECORE_CAU_DEF_RX_USECS 24
1267 #define ECORE_CAU_DEF_TX_USECS 48
1269 void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn,
1270 struct cau_sb_entry *p_sb_entry,
1271 u8 pf_id, u16 vf_number, u8 vf_valid)
1273 struct ecore_dev *p_dev = p_hwfn->p_dev;
1277 OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry));
1279 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id);
1280 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number);
1281 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid);
1282 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F);
1283 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F);
1285 cau_state = CAU_HC_DISABLE_STATE;
1287 if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1288 cau_state = CAU_HC_ENABLE_STATE;
1289 if (!p_dev->rx_coalesce_usecs)
1290 p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS;
1291 if (!p_dev->tx_coalesce_usecs)
1292 p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS;
1295 /* Coalesce = (timeset << timer-res), timeset is 7bit wide */
1296 if (p_dev->rx_coalesce_usecs <= 0x7F)
1298 else if (p_dev->rx_coalesce_usecs <= 0xFF)
1302 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
1304 if (p_dev->tx_coalesce_usecs <= 0x7F)
1306 else if (p_dev->tx_coalesce_usecs <= 0xFF)
1310 SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
1312 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state);
1313 SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state);
1316 void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn,
1317 struct ecore_ptt *p_ptt,
1318 dma_addr_t sb_phys, u16 igu_sb_id,
1319 u16 vf_number, u8 vf_valid)
1321 struct cau_sb_entry sb_entry;
1323 ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id,
1324 vf_number, vf_valid);
1326 if (p_hwfn->hw_init_done) {
1327 /* Wide-bus, initialize via DMAE */
1328 u64 phys_addr = (u64)sb_phys;
1330 ecore_dmae_host2grc(p_hwfn, p_ptt,
1331 (u64)(osal_uintptr_t)&phys_addr,
1332 CAU_REG_SB_ADDR_MEMORY +
1333 igu_sb_id * sizeof(u64), 2, 0);
1334 ecore_dmae_host2grc(p_hwfn, p_ptt,
1335 (u64)(osal_uintptr_t)&sb_entry,
1336 CAU_REG_SB_VAR_MEMORY +
1337 igu_sb_id * sizeof(u64), 2, 0);
1339 /* Initialize Status Block Address */
1340 STORE_RT_REG_AGG(p_hwfn,
1341 CAU_REG_SB_ADDR_MEMORY_RT_OFFSET +
1342 igu_sb_id * 2, sb_phys);
1344 STORE_RT_REG_AGG(p_hwfn,
1345 CAU_REG_SB_VAR_MEMORY_RT_OFFSET +
1346 igu_sb_id * 2, sb_entry);
1349 /* Configure pi coalescing if set */
1350 if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) {
1351 /* eth will open queues for all tcs, so configure all of them
1352 * properly, rather than just the active ones
1354 u8 num_tc = p_hwfn->hw_info.num_hw_tc;
1356 u8 timeset, timer_res;
1359 /* timeset = (coalesce >> timer-res), timeset is 7bit wide */
1360 if (p_hwfn->p_dev->rx_coalesce_usecs <= 0x7F)
1362 else if (p_hwfn->p_dev->rx_coalesce_usecs <= 0xFF)
1366 timeset = (u8)(p_hwfn->p_dev->rx_coalesce_usecs >> timer_res);
1367 ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI,
1368 ECORE_COAL_RX_STATE_MACHINE, timeset);
1370 if (p_hwfn->p_dev->tx_coalesce_usecs <= 0x7F)
1372 else if (p_hwfn->p_dev->tx_coalesce_usecs <= 0xFF)
1376 timeset = (u8)(p_hwfn->p_dev->tx_coalesce_usecs >> timer_res);
1377 for (i = 0; i < num_tc; i++) {
1378 ecore_int_cau_conf_pi(p_hwfn, p_ptt,
1379 igu_sb_id, TX_PI(i),
1380 ECORE_COAL_TX_STATE_MACHINE,
1386 void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn,
1387 struct ecore_ptt *p_ptt,
1388 u16 igu_sb_id, u32 pi_index,
1389 enum ecore_coalescing_fsm coalescing_fsm, u8 timeset)
1391 struct cau_pi_entry pi_entry;
1392 u32 sb_offset, pi_offset;
1394 if (IS_VF(p_hwfn->p_dev))
1395 return; /* @@@TBD MichalK- VF CAU... */
1397 sb_offset = igu_sb_id * PIS_PER_SB;
1398 OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry));
1400 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
1401 if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE)
1402 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0);
1404 SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1);
1406 pi_offset = sb_offset + pi_index;
1407 if (p_hwfn->hw_init_done) {
1408 ecore_wr(p_hwfn, p_ptt,
1409 CAU_REG_PI_MEMORY + pi_offset * sizeof(u32),
1410 *((u32 *)&(pi_entry)));
1412 STORE_RT_REG(p_hwfn,
1413 CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset,
1414 *((u32 *)&(pi_entry)));
1418 void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn,
1419 struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info)
1421 /* zero status block and ack counter */
1422 sb_info->sb_ack = 0;
1423 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1425 if (IS_PF(p_hwfn->p_dev))
1426 ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys,
1427 sb_info->igu_sb_id, 0, 0);
1431 * @brief ecore_get_igu_sb_id - given a sw sb_id return the
1439 static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
1443 /* Assuming continuous set of IGU SBs dedicated for given PF */
1444 if (sb_id == ECORE_SP_SB_ID)
1445 igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1446 else if (IS_PF(p_hwfn->p_dev))
1447 igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb;
1449 igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id);
1451 if (sb_id == ECORE_SP_SB_ID)
1452 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1453 "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id);
1455 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1456 "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id);
1461 enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn,
1462 struct ecore_ptt *p_ptt,
1463 struct ecore_sb_info *sb_info,
1465 dma_addr_t sb_phy_addr, u16 sb_id)
1467 sb_info->sb_virt = sb_virt_addr;
1468 sb_info->sb_phys = sb_phy_addr;
1470 sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id);
1472 if (sb_id != ECORE_SP_SB_ID) {
1473 p_hwfn->sbs_info[sb_id] = sb_info;
1476 #ifdef ECORE_CONFIG_DIRECT_HWFN
1477 sb_info->p_hwfn = p_hwfn;
1479 sb_info->p_dev = p_hwfn->p_dev;
1481 /* The igu address will hold the absolute address that needs to be
1482 * written to for a specific status block
1484 if (IS_PF(p_hwfn->p_dev)) {
1485 sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview +
1486 GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3);
1490 (u8 OSAL_IOMEM *)p_hwfn->regview +
1491 PXP_VF_BAR0_START_IGU +
1492 ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3);
1495 sb_info->flags |= ECORE_SB_INFO_INIT;
1497 ecore_int_sb_setup(p_hwfn, p_ptt, sb_info);
1499 return ECORE_SUCCESS;
1502 enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn,
1503 struct ecore_sb_info *sb_info,
1506 if (sb_id == ECORE_SP_SB_ID) {
1507 DP_ERR(p_hwfn, "Do Not free sp sb using this function");
1511 /* zero status block and ack counter */
1512 sb_info->sb_ack = 0;
1513 OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt));
1515 if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) {
1516 p_hwfn->sbs_info[sb_id] = OSAL_NULL;
1520 return ECORE_SUCCESS;
1523 static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn)
1525 struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb;
1530 if (p_sb->sb_info.sb_virt) {
1531 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
1532 p_sb->sb_info.sb_virt,
1533 p_sb->sb_info.sb_phys,
1534 SB_ALIGNED_SIZE(p_hwfn));
1537 OSAL_FREE(p_hwfn->p_dev, p_sb);
1540 static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn,
1541 struct ecore_ptt *p_ptt)
1543 struct ecore_sb_sp_info *p_sb;
1544 dma_addr_t p_phys = 0;
1549 OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL,
1552 DP_NOTICE(p_hwfn, true,
1553 "Failed to allocate `struct ecore_sb_info'\n");
1558 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
1559 &p_phys, SB_ALIGNED_SIZE(p_hwfn));
1561 DP_NOTICE(p_hwfn, true, "Failed to allocate status block\n");
1562 OSAL_FREE(p_hwfn->p_dev, p_sb);
1566 /* Status Block setup */
1567 p_hwfn->p_sp_sb = p_sb;
1568 ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info,
1569 p_virt, p_phys, ECORE_SP_SB_ID);
1571 OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr));
1573 return ECORE_SUCCESS;
1576 enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn,
1577 ecore_int_comp_cb_t comp_cb,
1579 u8 *sb_idx, __le16 **p_fw_cons)
1581 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1582 enum _ecore_status_t rc = ECORE_NOMEM;
1585 /* Look for a free index */
1586 for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) {
1587 if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL)
1590 p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb;
1591 p_sp_sb->pi_info_arr[pi].cookie = cookie;
1593 *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi];
1601 enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi)
1603 struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb;
1605 if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL)
1608 p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL;
1609 p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL;
1610 return ECORE_SUCCESS;
1613 u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn)
1615 return p_hwfn->p_sp_sb->sb_info.igu_sb_id;
1618 void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn,
1619 struct ecore_ptt *p_ptt,
1620 enum ecore_int_mode int_mode)
1622 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN | IGU_PF_CONF_ATTN_BIT_EN;
1625 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1626 DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n");
1627 igu_pf_conf &= ~IGU_PF_CONF_ATTN_BIT_EN;
1631 p_hwfn->p_dev->int_mode = int_mode;
1632 switch (p_hwfn->p_dev->int_mode) {
1633 case ECORE_INT_MODE_INTA:
1634 igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN;
1635 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1638 case ECORE_INT_MODE_MSI:
1639 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1640 igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
1643 case ECORE_INT_MODE_MSIX:
1644 igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN;
1646 case ECORE_INT_MODE_POLL:
1650 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf);
1653 static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn,
1654 struct ecore_ptt *p_ptt)
1657 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1659 "FPGA - Don't enable Attentions in IGU and MISC\n");
1664 /* Configure AEU signal change to produce attentions */
1665 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0);
1666 ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff);
1667 ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff);
1668 ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff);
1670 /* Flush the writes to IGU */
1671 OSAL_MMIOWB(p_hwfn->p_dev);
1673 /* Unmask AEU signals toward IGU */
1674 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff);
1677 enum _ecore_status_t
1678 ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1679 enum ecore_int_mode int_mode)
1681 enum _ecore_status_t rc = ECORE_SUCCESS;
1684 /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop
1685 * attentions. Since we're waiting for BRCM answer regarding this
1686 * attention, in the meanwhile we simply mask it.
1688 tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0);
1690 ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp);
1692 ecore_int_igu_enable_attn(p_hwfn, p_ptt);
1694 if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) {
1695 rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn);
1696 if (rc != ECORE_SUCCESS) {
1697 DP_NOTICE(p_hwfn, true,
1698 "Slowpath IRQ request failed\n");
1699 return ECORE_NORESOURCES;
1701 p_hwfn->b_int_requested = true;
1704 /* Enable interrupt Generation */
1705 ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode);
1707 p_hwfn->b_int_enabled = 1;
1712 void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn,
1713 struct ecore_ptt *p_ptt)
1715 p_hwfn->b_int_enabled = 0;
1717 if (IS_VF(p_hwfn->p_dev))
1720 ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0);
1723 #define IGU_CLEANUP_SLEEP_LENGTH (1000)
1724 static void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn,
1725 struct ecore_ptt *p_ptt,
1726 u32 sb_id, bool cleanup_set, u16 opaque_fid)
1728 u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0;
1729 u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id;
1730 u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH;
1731 u8 type = 0; /* FIXME MichalS type??? */
1733 OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 -
1734 IGU_REG_CLEANUP_STATUS_0) != 0x200);
1736 /* USE Control Command Register to perform cleanup. There is an
1737 * option to do this using IGU bar, but then it can't be used for VFs.
1740 /* Set the data field */
1741 SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0);
1742 SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type);
1743 SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET);
1745 /* Set the control register */
1746 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr);
1747 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid);
1748 SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR);
1750 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data);
1752 OSAL_BARRIER(p_hwfn->p_dev);
1754 ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl);
1756 /* Flush the write to IGU */
1757 OSAL_MMIOWB(p_hwfn->p_dev);
1759 /* calculate where to read the status bit from */
1760 sb_bit = 1 << (sb_id % 32);
1761 sb_bit_addr = sb_id / 32 * sizeof(u32);
1763 sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type);
1765 /* Now wait for the command to complete */
1766 while (--sleep_cnt) {
1767 val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr);
1768 if ((val & sb_bit) == (cleanup_set ? sb_bit : 0))
1774 DP_NOTICE(p_hwfn, true,
1775 "Timeout waiting for clear status 0x%08x [for sb %d]\n",
1779 void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn,
1780 struct ecore_ptt *p_ptt,
1781 u32 sb_id, u16 opaque, bool b_set)
1787 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque);
1790 ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque);
1792 /* Wait for the IGU SB to cleanup */
1793 for (i = 0; i < IGU_CLEANUP_SLEEP_LENGTH; i++) {
1796 val = ecore_rd(p_hwfn, p_ptt,
1797 IGU_REG_WRITE_DONE_PENDING +
1798 ((sb_id / 32) * 4));
1799 if (val & (1 << (sb_id % 32)))
1804 if (i == IGU_CLEANUP_SLEEP_LENGTH)
1805 DP_NOTICE(p_hwfn, true,
1806 "Failed SB[0x%08x] still appearing in WRITE_DONE_PENDING\n",
1809 /* Clear the CAU for the SB */
1810 for (pi = 0; pi < 12; pi++)
1811 ecore_wr(p_hwfn, p_ptt,
1812 CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0);
1815 void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn,
1816 struct ecore_ptt *p_ptt,
1817 bool b_set, bool b_slowpath)
1819 u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb;
1820 u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt;
1821 u32 sb_id = 0, val = 0;
1823 /* @@@TBD MichalK temporary... should be moved to init-tool... */
1824 val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION);
1825 val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN;
1826 val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN;
1827 ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val);
1830 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1831 "IGU cleaning SBs [%d,...,%d]\n",
1832 igu_base_sb, igu_base_sb + igu_sb_cnt - 1);
1834 for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++)
1835 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
1836 p_hwfn->hw_info.opaque_fid,
1842 sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id;
1843 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1844 "IGU cleaning slowpath SB [%d]\n", sb_id);
1845 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id,
1846 p_hwfn->hw_info.opaque_fid, b_set);
1849 static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn,
1850 struct ecore_ptt *p_ptt, u16 sb_id)
1852 u32 val = ecore_rd(p_hwfn, p_ptt,
1853 IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id);
1854 struct ecore_igu_block *p_block;
1856 p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
1858 /* stop scanning when hit first invalid PF entry */
1859 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
1860 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
1863 /* Fill the block information */
1864 p_block->status = ECORE_IGU_STATUS_VALID;
1865 p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER);
1866 p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID);
1867 p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER);
1869 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1870 "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d"
1871 " is_pf = %d vector_num = 0x%x\n",
1872 sb_id, val, p_block->function_id, p_block->is_pf,
1873 p_block->vector_number);
1879 enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn,
1880 struct ecore_ptt *p_ptt)
1882 struct ecore_igu_info *p_igu_info;
1883 struct ecore_igu_block *p_block;
1884 u32 min_vf = 0, max_vf = 0, val;
1885 u16 sb_id, last_iov_sb_id = 0;
1886 u16 prev_sb_id = 0xFF;
1888 p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev,
1890 sizeof(*p_igu_info));
1891 if (!p_hwfn->hw_info.p_igu_info)
1894 OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info));
1896 p_igu_info = p_hwfn->hw_info.p_igu_info;
1898 /* Initialize base sb / sb cnt for PFs and VFs */
1899 p_igu_info->igu_base_sb = 0xffff;
1900 p_igu_info->igu_sb_cnt = 0;
1901 p_igu_info->igu_dsb_id = 0xffff;
1902 p_igu_info->igu_base_sb_iov = 0xffff;
1904 if (p_hwfn->p_dev->p_iov_info) {
1905 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
1907 min_vf = p_iov->first_vf_in_pf;
1908 max_vf = p_iov->first_vf_in_pf + p_iov->total_vfs;
1911 sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev);
1913 p_block = &p_igu_info->igu_map.igu_blocks[sb_id];
1914 val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id);
1915 if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) &&
1916 GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID))
1919 if (p_block->is_pf) {
1920 if (p_block->function_id == p_hwfn->rel_pf_id) {
1921 p_block->status |= ECORE_IGU_STATUS_PF;
1923 if (p_block->vector_number == 0) {
1924 if (p_igu_info->igu_dsb_id == 0xffff)
1925 p_igu_info->igu_dsb_id = sb_id;
1927 if (p_igu_info->igu_base_sb == 0xffff) {
1928 p_igu_info->igu_base_sb = sb_id;
1929 } else if (prev_sb_id != sb_id - 1) {
1930 DP_NOTICE(p_hwfn->p_dev, false,
1938 /* we don't count the default */
1939 (p_igu_info->igu_sb_cnt)++;
1943 if ((p_block->function_id >= min_vf) &&
1944 (p_block->function_id < max_vf)) {
1945 /* Available for VFs of this PF */
1946 if (p_igu_info->igu_base_sb_iov == 0xffff) {
1947 p_igu_info->igu_base_sb_iov = sb_id;
1948 } else if (last_iov_sb_id != sb_id - 1) {
1950 DP_VERBOSE(p_hwfn->p_dev,
1952 "First uninited IGU"
1957 DP_NOTICE(p_hwfn->p_dev, false,
1968 p_block->status |= ECORE_IGU_STATUS_FREE;
1969 p_hwfn->hw_info.p_igu_info->free_blks++;
1970 last_iov_sb_id = sb_id;
1974 p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks;
1976 DP_VERBOSE(p_hwfn, ECORE_MSG_INTR,
1977 "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] "
1978 "igu_dsb_id=0x%x\n",
1979 p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov,
1980 p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov,
1981 p_igu_info->igu_dsb_id);
1983 if (p_igu_info->igu_base_sb == 0xffff ||
1984 p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) {
1985 DP_NOTICE(p_hwfn, true,
1986 "IGU CAM returned invalid values igu_base_sb=0x%x "
1987 "igu_sb_cnt=%d igu_dsb_id=0x%x\n",
1988 p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt,
1989 p_igu_info->igu_dsb_id);
1993 return ECORE_SUCCESS;
1997 * @brief Initialize igu runtime registers
2001 void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn)
2003 u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN;
2005 STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf);
2008 #define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \
2009 IGU_CMD_INT_ACK_BASE)
2010 #define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \
2011 IGU_CMD_INT_ACK_BASE)
2012 u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn)
2014 u32 intr_status_hi = 0, intr_status_lo = 0;
2015 u64 intr_status = 0;
2017 intr_status_lo = REG_RD(p_hwfn,
2018 GTT_BAR0_MAP_REG_IGU_CMD +
2019 LSB_IGU_CMD_ADDR * 8);
2020 intr_status_hi = REG_RD(p_hwfn,
2021 GTT_BAR0_MAP_REG_IGU_CMD +
2022 MSB_IGU_CMD_ADDR * 8);
2023 intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo;
2028 static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn)
2030 OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn);
2031 p_hwfn->b_sp_dpc_enabled = true;
2034 static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn)
2036 p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn);
2037 if (!p_hwfn->sp_dpc)
2040 return ECORE_SUCCESS;
2043 static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn)
2045 OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc);
2048 enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn,
2049 struct ecore_ptt *p_ptt)
2051 enum _ecore_status_t rc = ECORE_SUCCESS;
2053 rc = ecore_int_sp_dpc_alloc(p_hwfn);
2054 if (rc != ECORE_SUCCESS) {
2055 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n");
2059 rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt);
2060 if (rc != ECORE_SUCCESS) {
2061 DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n");
2065 rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt);
2066 if (rc != ECORE_SUCCESS)
2067 DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n");
2072 void ecore_int_free(struct ecore_hwfn *p_hwfn)
2074 ecore_int_sp_sb_free(p_hwfn);
2075 ecore_int_sb_attn_free(p_hwfn);
2076 ecore_int_sp_dpc_free(p_hwfn);
2079 void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
2081 if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn)
2084 ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info);
2085 ecore_int_sb_attn_setup(p_hwfn, p_ptt);
2086 ecore_int_sp_dpc_setup(p_hwfn);
2089 void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn,
2090 struct ecore_sb_cnt_info *p_sb_cnt_info)
2092 struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info;
2094 if (!info || !p_sb_cnt_info)
2097 p_sb_cnt_info->sb_cnt = info->igu_sb_cnt;
2098 p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov;
2099 p_sb_cnt_info->sb_free_blk = info->free_blks;
2102 u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id)
2104 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
2106 /* Determine origin of SB id */
2107 if ((sb_id >= p_info->igu_base_sb) &&
2108 (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) {
2109 return sb_id - p_info->igu_base_sb;
2110 } else if ((sb_id >= p_info->igu_base_sb_iov) &&
2111 (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) {
2112 return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt;
2114 DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n",
2120 void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev)
2124 for_each_hwfn(p_dev, i)
2125 p_dev->hwfns[i].b_int_requested = false;
2128 void ecore_int_attn_clr_enable(struct ecore_dev *p_dev, bool clr_enable)
2130 p_dev->attn_clr_en = clr_enable;
2133 enum _ecore_status_t ecore_int_set_timer_res(struct ecore_hwfn *p_hwfn,
2134 struct ecore_ptt *p_ptt,
2135 u8 timer_res, u16 sb_id, bool tx)
2137 enum _ecore_status_t rc;
2138 struct cau_sb_entry sb_entry;
2140 if (!p_hwfn->hw_init_done) {
2141 DP_ERR(p_hwfn, "hardware not initialized yet\n");
2145 rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2146 sb_id * sizeof(u64),
2147 (u64)(osal_uintptr_t)&sb_entry, 2, 0);
2148 if (rc != ECORE_SUCCESS) {
2149 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2154 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1, timer_res);
2156 SET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0, timer_res);
2158 rc = ecore_dmae_host2grc(p_hwfn, p_ptt,
2159 (u64)(osal_uintptr_t)&sb_entry,
2160 CAU_REG_SB_VAR_MEMORY +
2161 sb_id * sizeof(u64), 2, 0);
2162 if (rc != ECORE_SUCCESS) {
2163 DP_ERR(p_hwfn, "dmae_host2grc failed %d\n", rc);