X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fbase%2Fecore_spq.c;h=88ad961e7ef3d019a0544b3d33833ba7ddce4448;hb=refs%2Ftags%2Fupstream%2F18.11-rc1;hp=b263693f35957c7ce775ce8b3bdc4da26c3651a2;hpb=8b25d1ad5d2264bdfc2818c7bda74ee2697df6db;p=deb_dpdk.git diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c index b263693f..88ad961e 100644 --- a/drivers/net/qede/base/ecore_spq.c +++ b/drivers/net/qede/base/ecore_spq.c @@ -1,9 +1,7 @@ -/* - * Copyright (c) 2016 QLogic Corporation. +/* SPDX-License-Identifier: BSD-3-Clause + * Copyright (c) 2016 - 2018 Cavium Inc. * All rights reserved. - * www.qlogic.com - * - * See LICENSE.qede_pmd for copyright and licensing details. + * www.cavium.com */ #include "bcm_osal.h" @@ -27,14 +25,17 @@ ***************************************************************************/ #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) -#define SPQ_BLOCK_SLEEP_LENGTH (1000) + +#define SPQ_BLOCK_DELAY_MAX_ITER (10) +#define SPQ_BLOCK_DELAY_US (10) +#define SPQ_BLOCK_SLEEP_MAX_ITER (200) +#define SPQ_BLOCK_SLEEP_MS (5) /*************************************************************************** * Blocking Imp. (BLOCK/EBLOCK mode) ***************************************************************************/ -static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, - void *cookie, - union event_ring_data *data, +static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie, + union event_ring_data OSAL_UNUSED * data, u8 fw_return_code) { struct ecore_spq_comp_done *comp_done; @@ -48,53 +49,86 @@ static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, OSAL_SMP_WMB(p_hwfn->p_dev); } -static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn, - struct ecore_spq_entry *p_ent, - u8 *p_fw_ret) +static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent, + u8 *p_fw_ret, + bool sleep_between_iter) { - int sleep_count = SPQ_BLOCK_SLEEP_LENGTH; struct ecore_spq_comp_done *comp_done; - enum _ecore_status_t rc; + u32 iter_cnt; comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie; - while (sleep_count) { + iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter + : SPQ_BLOCK_DELAY_MAX_ITER; +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter) + iter_cnt *= 5; +#endif + + while (iter_cnt--) { OSAL_POLL_MODE_DPC(p_hwfn); - /* validate we receive completion update */ OSAL_SMP_RMB(p_hwfn->p_dev); if (comp_done->done == 1) { if (p_fw_ret) *p_fw_ret = comp_done->fw_return_code; return ECORE_SUCCESS; } - OSAL_MSLEEP(5); - sleep_count--; + + if (sleep_between_iter) + OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS); + else + OSAL_UDELAY(SPQ_BLOCK_DELAY_US); + } + + return ECORE_TIMEOUT; +} + +static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent, + u8 *p_fw_ret, bool skip_quick_poll) +{ + struct ecore_spq_comp_done *comp_done; + struct ecore_ptt *p_ptt; + enum _ecore_status_t rc; + + /* A relatively short polling period w/o sleeping, to allow the FW to + * complete the ramrod and thus possibly to avoid the following sleeps. + */ + if (!skip_quick_poll) { + rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false); + if (rc == ECORE_SUCCESS) + return ECORE_SUCCESS; } + /* Move to polling with a sleeping period between iterations */ + rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true); + if (rc == ECORE_SUCCESS) + return ECORE_SUCCESS; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_AGAIN; + DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); - rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt); - if (rc != ECORE_SUCCESS) + rc = ecore_mcp_drain(p_hwfn, p_ptt); + ecore_ptt_release(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS) { DP_NOTICE(p_hwfn, true, "MCP drain failed\n"); + goto err; + } /* Retry after drain */ - sleep_count = SPQ_BLOCK_SLEEP_LENGTH; - while (sleep_count) { - /* validate we receive completion update */ - OSAL_SMP_RMB(p_hwfn->p_dev); - if (comp_done->done == 1) { - if (p_fw_ret) - *p_fw_ret = comp_done->fw_return_code; - return ECORE_SUCCESS; - } - OSAL_MSLEEP(5); - sleep_count--; - } + rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true); + if (rc == ECORE_SUCCESS) + return ECORE_SUCCESS; + comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie; if (comp_done->done == 1) { if (p_fw_ret) *p_fw_ret = comp_done->fw_return_code; return ECORE_SUCCESS; } - +err: DP_NOTICE(p_hwfn, true, "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n", OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid), @@ -106,6 +140,14 @@ static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn, return ECORE_BUSY; } +void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn, + u32 spq_timeout_ms) +{ + p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ? + spq_timeout_ms / SPQ_BLOCK_SLEEP_MS : + SPQ_BLOCK_SLEEP_MAX_ITER; +} + /*************************************************************************** * SPQ entries inner API ***************************************************************************/ @@ -146,10 +188,9 @@ ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent) static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn, struct ecore_spq *p_spq) { - u16 pq; + struct e4_core_conn_context *p_cxt; struct ecore_cxt_info cxt_info; - struct core_conn_context *p_cxt; - union ecore_qm_pq_params pq_params; + u16 physical_q; enum _ecore_status_t rc; cxt_info.iid = p_spq->cid; @@ -157,40 +198,41 @@ static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn, rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info); if (rc < 0) { - DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d", + DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n", p_spq->cid); return; } p_cxt = cxt_info.p_cxt; - SET_FIELD(p_cxt->xstorm_ag_context.flags10, - XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); - SET_FIELD(p_cxt->xstorm_ag_context.flags1, - XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); - /* SET_FIELD(p_cxt->xstorm_ag_context.flags10, - * XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1); - */ - SET_FIELD(p_cxt->xstorm_ag_context.flags9, - XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); + /* @@@TBD we zero the context until we have ilt_reset implemented. */ + OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt)); + + if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) { + SET_FIELD(p_cxt->xstorm_ag_context.flags10, + E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); + SET_FIELD(p_cxt->xstorm_ag_context.flags1, + E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); + /* SET_FIELD(p_cxt->xstorm_ag_context.flags10, + * E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1); + */ + SET_FIELD(p_cxt->xstorm_ag_context.flags9, + E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); + } /* CDU validation - FIXME currently disabled */ /* QM physical queue */ - OSAL_MEMSET(&pq_params, 0, sizeof(pq_params)); - pq_params.core.tc = LB_TC; - pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); - p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq); + physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); + p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q); p_cxt->xstorm_st_context.spq_base_lo = DMA_LO_LE(p_spq->chain.p_phys_addr); p_cxt->xstorm_st_context.spq_base_hi = DMA_HI_LE(p_spq->chain.p_phys_addr); - p_cxt->xstorm_st_context.consolid_base_addr.lo = - DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr); - p_cxt->xstorm_st_context.consolid_base_addr.hi = - DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr); + DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr, + p_hwfn->p_consq->chain.p_phys_addr); } static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn, @@ -198,9 +240,9 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent) { struct ecore_chain *p_chain = &p_hwfn->p_spq->chain; + struct core_db_data *p_db_data = &p_spq->db_data; u16 echo = ecore_chain_get_prod_idx(p_chain); struct slow_path_element *elem; - struct core_db_data db; p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo); elem = ecore_chain_produce(p_chain); @@ -209,34 +251,24 @@ static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn, return ECORE_INVAL; } - *elem = p_ent->elem; /* struct assignment */ + *elem = p_ent->elem; /* Struct assignment */ - /* send a doorbell on the slow hwfn session */ - OSAL_MEMSET(&db, 0, sizeof(db)); - SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM); - SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); - SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, - DQ_XCM_CORE_SPQ_PROD_CMD); - db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; - - /* validate producer is up to-date */ - OSAL_RMB(p_hwfn->p_dev); + p_db_data->spq_prod = + OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain)); - db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain)); + /* Make sure the SPQE is updated before the doorbell */ + OSAL_WMB(p_hwfn->p_dev); - /* do not reorder */ - OSAL_BARRIER(p_hwfn->p_dev); + DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data); - DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db); - - /* make sure doorbell is rang */ - OSAL_MMIOWB(p_hwfn->p_dev); + /* Make sure doorbell is rang */ + OSAL_WMB(p_hwfn->p_dev); DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x" " agg_params: %02x, prod: %04x\n", - DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params, - db.agg_flags, ecore_chain_get_prod_idx(p_chain)); + p_spq->db_addr_offset, p_spq->cid, p_db_data->params, + p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain)); return ECORE_SUCCESS; } @@ -249,17 +281,53 @@ static enum _ecore_status_t ecore_async_event_completion(struct ecore_hwfn *p_hwfn, struct event_ring_entry *p_eqe) { - switch (p_eqe->protocol_id) { - case PROTOCOLID_COMMON: - return ecore_sriov_eqe_event(p_hwfn, - p_eqe->opcode, - p_eqe->echo, &p_eqe->data); - default: + ecore_spq_async_comp_cb cb; + enum _ecore_status_t rc; + + if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) { + DP_ERR(p_hwfn, "Wrong protocol: %d\n", p_eqe->protocol_id); + return ECORE_INVAL; + } + + cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id]; + if (!cb) { DP_NOTICE(p_hwfn, true, "Unknown Async completion for protocol: %d\n", p_eqe->protocol_id); return ECORE_INVAL; } + + rc = cb(p_hwfn, p_eqe->opcode, p_eqe->echo, + &p_eqe->data, p_eqe->fw_return_code); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, true, + "Async completion callback failed, rc = %d [opcode %x, echo %x, fw_return_code %x]", + rc, p_eqe->opcode, p_eqe->echo, + p_eqe->fw_return_code); + + return rc; +} + +enum _ecore_status_t +ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn, + enum protocol_type protocol_id, + ecore_spq_async_comp_cb cb) +{ + if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) + return ECORE_INVAL; + + p_hwfn->p_spq->async_comp_cb[protocol_id] = cb; + return ECORE_SUCCESS; +} + +void +ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn, + enum protocol_type protocol_id) +{ + if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE)) + return; + + p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL; } /*************************************************************************** @@ -281,10 +349,16 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn, { struct ecore_eq *p_eq = cookie; struct ecore_chain *p_chain = &p_eq->chain; - enum _ecore_status_t rc = 0; + u16 fw_cons_idx = 0; + enum _ecore_status_t rc = ECORE_SUCCESS; + + if (!p_hwfn->p_spq) { + DP_ERR(p_hwfn, "Unexpected NULL p_spq\n"); + return ECORE_INVAL; + } /* take a snapshot of the FW consumer */ - u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons); + fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons); DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx); @@ -300,30 +374,30 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn, while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) { struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain); if (!p_eqe) { - rc = ECORE_INVAL; + DP_ERR(p_hwfn, + "Unexpected NULL chain consumer entry\n"); break; } DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, - "op %x prot %x res0 %x echo %x " - "fwret %x flags %x\n", p_eqe->opcode, + "op %x prot %x res0 %x echo %x fwret %x flags %x\n", + p_eqe->opcode, /* Event Opcode */ p_eqe->protocol_id, /* Event Protocol ID */ p_eqe->reserved0, /* Reserved */ + /* Echo value from ramrod data on the host */ OSAL_LE16_TO_CPU(p_eqe->echo), - p_eqe->fw_return_code, /* FW return code for SP - * ramrods - */ + p_eqe->fw_return_code, /* FW return code for SP + * ramrods + */ p_eqe->flags); - if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) { - if (ecore_async_event_completion(p_hwfn, p_eqe)) - rc = ECORE_INVAL; - } else if (ecore_spq_completion(p_hwfn, - p_eqe->echo, - p_eqe->fw_return_code, - &p_eqe->data)) { - rc = ECORE_INVAL; - } + if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) + ecore_async_event_completion(p_hwfn, p_eqe); + else + ecore_spq_completion(p_hwfn, + p_eqe->echo, + p_eqe->fw_return_code, + &p_eqe->data); ecore_chain_recycle_consumed(p_chain); } @@ -333,52 +407,56 @@ enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn, return rc; } -struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem) +enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem) { struct ecore_eq *p_eq; /* Allocate EQ struct */ - p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_eq)); + p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq)); if (!p_eq) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_eq'\n"); - return OSAL_NULL; + return ECORE_NOMEM; } - /* Allocate and initialize EQ chain */ + /* Allocate and initialize EQ chain*/ if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE, ECORE_CHAIN_MODE_PBL, ECORE_CHAIN_CNT_TYPE_U16, num_elem, - sizeof(union event_ring_element), &p_eq->chain)) { - DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain"); + sizeof(union event_ring_element), + &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n"); goto eq_allocate_fail; } /* register EQ completion on the SP SB */ - ecore_int_register_cb(p_hwfn, - ecore_eq_completion, + ecore_int_register_cb(p_hwfn, ecore_eq_completion, p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons); - return p_eq; + p_hwfn->p_eq = p_eq; + return ECORE_SUCCESS; eq_allocate_fail: - ecore_eq_free(p_hwfn, p_eq); - return OSAL_NULL; + OSAL_FREE(p_hwfn->p_dev, p_eq); + return ECORE_NOMEM; } -void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq) +void ecore_eq_setup(struct ecore_hwfn *p_hwfn) { - ecore_chain_reset(&p_eq->chain); + ecore_chain_reset(&p_hwfn->p_eq->chain); } -void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq) +void ecore_eq_free(struct ecore_hwfn *p_hwfn) { - if (!p_eq) + if (!p_hwfn->p_eq) return; - ecore_chain_free(p_hwfn->p_dev, &p_eq->chain); - OSAL_FREE(p_hwfn->p_dev, p_eq); + + ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain); + + OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq); + p_hwfn->p_eq = OSAL_NULL; } /*************************************************************************** @@ -419,10 +497,13 @@ enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn, ***************************************************************************/ void ecore_spq_setup(struct ecore_hwfn *p_hwfn) { - struct ecore_spq_entry *p_virt = OSAL_NULL; struct ecore_spq *p_spq = p_hwfn->p_spq; + struct ecore_spq_entry *p_virt = OSAL_NULL; + struct core_db_data *p_db_data; + void OSAL_IOMEM *db_addr; dma_addr_t p_phys = 0; u32 i, capacity; + enum _ecore_status_t rc; OSAL_LIST_INIT(&p_spq->pending); OSAL_LIST_INIT(&p_spq->completion_pending); @@ -436,8 +517,7 @@ void ecore_spq_setup(struct ecore_hwfn *p_hwfn) capacity = ecore_chain_get_capacity(&p_spq->chain); for (i = 0; i < capacity; i++) { - p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys); - p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys); + DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys); OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool); @@ -461,6 +541,24 @@ void ecore_spq_setup(struct ecore_hwfn *p_hwfn) /* reset the chain itself */ ecore_chain_reset(&p_spq->chain); + + /* Initialize the address/data of the SPQ doorbell */ + p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY); + p_db_data = &p_spq->db_data; + OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data)); + SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX); + SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL, + DQ_XCM_CORE_SPQ_PROD_CMD); + p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD; + + /* Register the SPQ doorbell with the doorbell recovery mechanism */ + db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset); + rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data, + DB_REC_WIDTH_32B, DB_REC_KERNEL); + if (rc != ECORE_SUCCESS) + DP_INFO(p_hwfn, + "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n"); } enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn) @@ -474,17 +572,19 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn) p_spq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq)); if (!p_spq) { - DP_NOTICE(p_hwfn, true, - "Failed to allocate `struct ecore_spq'"); + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n"); return ECORE_NOMEM; } /* SPQ ring */ - if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE, - ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0, - /* N/A when the mode is SINGLE */ - sizeof(struct slow_path_element), &p_spq->chain)) { - DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain"); + if (ecore_chain_alloc(p_hwfn->p_dev, + ECORE_CHAIN_USE_TO_PRODUCE, + ECORE_CHAIN_MODE_SINGLE, + ECORE_CHAIN_CNT_TYPE_U16, + 0, /* N/A when the mode is SINGLE */ + sizeof(struct slow_path_element), + &p_spq->chain, OSAL_NULL)) { + DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n"); goto spq_allocate_fail; } @@ -499,7 +599,10 @@ enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn) p_spq->p_virt = p_virt; p_spq->p_phys = p_phys; - OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock); +#ifdef CONFIG_ECORE_LOCK_ALLOC + if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock)) + goto spq_allocate_fail; +#endif p_hwfn->p_spq = p_spq; return ECORE_SUCCESS; @@ -513,11 +616,16 @@ spq_allocate_fail: void ecore_spq_free(struct ecore_hwfn *p_hwfn) { struct ecore_spq *p_spq = p_hwfn->p_spq; + void OSAL_IOMEM *db_addr; u32 capacity; if (!p_spq) return; + /* Delete the SPQ doorbell from the doorbell recovery mechanism */ + db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset); + ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data); + if (p_spq->p_virt) { capacity = ecore_chain_get_capacity(&p_spq->chain); OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, @@ -528,7 +636,10 @@ void ecore_spq_free(struct ecore_hwfn *p_hwfn) } ecore_chain_free(p_hwfn->p_dev, &p_spq->chain); +#ifdef CONFIG_ECORE_LOCK_ALLOC OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock); +#endif + OSAL_FREE(p_hwfn->p_dev, p_spq); } @@ -537,18 +648,16 @@ ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent) { struct ecore_spq *p_spq = p_hwfn->p_spq; struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_SUCCESS; OSAL_SPIN_LOCK(&p_spq->lock); if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) { - p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, - sizeof(struct ecore_spq_entry)); + p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent)); if (!p_ent) { - OSAL_SPIN_UNLOCK(&p_spq->lock); - DP_NOTICE(p_hwfn, true, - "Failed to allocate an SPQ entry" - " for a pending ramrod\n"); - return ECORE_NOMEM; + DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n"); + rc = ECORE_NOMEM; + goto out_unlock; } p_ent->queue = &p_spq->unlimited_pending; } else { @@ -560,9 +669,9 @@ ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent) *pp_ent = p_ent; +out_unlock: OSAL_SPIN_UNLOCK(&p_spq->lock); - - return ECORE_SUCCESS; + return rc; } /* Locked variant; Should be called while the SPQ lock is taken */ @@ -607,32 +716,29 @@ ecore_spq_add_entry(struct ecore_hwfn *p_hwfn, p_spq->unlimited_pending_count++; return ECORE_SUCCESS; - } - - struct ecore_spq_entry *p_en2; - p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool, - struct ecore_spq_entry, - list); - OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool); + } else { + struct ecore_spq_entry *p_en2; - /* Copy the ring element physical pointer to the new - * entry, since we are about to override the entire ring - * entry and don't want to lose the pointer. - */ - p_ent->elem.data_ptr = p_en2->elem.data_ptr; + p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool, + struct ecore_spq_entry, + list); + OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool); - /* Setting the cookie to the comp_done of the - * new element. - */ - if (p_ent->comp_cb.cookie == &p_ent->comp_done) - p_ent->comp_cb.cookie = &p_en2->comp_done; + /* Copy the ring element physical pointer to the new + * entry, since we are about to override the entire ring + * entry and don't want to lose the pointer. + */ + p_ent->elem.data_ptr = p_en2->elem.data_ptr; - *p_en2 = *p_ent; + *p_en2 = *p_ent; - OSAL_FREE(p_hwfn->p_dev, p_ent); + /* EBLOCK responsible to free the allocated p_ent */ + if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK) + OSAL_FREE(p_hwfn->p_dev, p_ent); - p_ent = p_en2; + p_ent = p_en2; + } } /* entry is to be placed in 'pending' queue */ @@ -682,16 +788,22 @@ static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn, !OSAL_LIST_IS_EMPTY(head)) { struct ecore_spq_entry *p_ent = OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list); - OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head); - OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending); - p_spq->comp_sent_count++; - - rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent); - if (rc) { - OSAL_LIST_REMOVE_ENTRY(&p_ent->list, - &p_spq->completion_pending); - __ecore_spq_return_entry(p_hwfn, p_ent); - return rc; + if (p_ent != OSAL_NULL) { +#if defined(_NTDDK_) +#pragma warning(suppress : 6011 28182) +#endif + OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head); + OSAL_LIST_PUSH_TAIL(&p_ent->list, + &p_spq->completion_pending); + p_spq->comp_sent_count++; + + rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent); + if (rc) { + OSAL_LIST_REMOVE_ENTRY(&p_ent->list, + &p_spq->completion_pending); + __ecore_spq_return_entry(p_hwfn, p_ent); + return rc; + } } } @@ -700,7 +812,6 @@ static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn, static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn) { - enum _ecore_status_t rc = ECORE_NOTIMPL; struct ecore_spq *p_spq = p_hwfn->p_spq; struct ecore_spq_entry *p_ent = OSAL_NULL; @@ -713,17 +824,16 @@ static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn) if (!p_ent) return ECORE_INVAL; +#if defined(_NTDDK_) +#pragma warning(suppress : 6011) +#endif OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending); ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority); } - rc = ecore_spq_post_list(p_hwfn, + return ecore_spq_post_list(p_hwfn, &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT); - if (rc) - return rc; - - return ECORE_SUCCESS; } enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn, @@ -745,7 +855,7 @@ enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn, if (p_hwfn->p_dev->recov_in_prog) { DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "Recovery is in progress -> skip spq post" - " [cmd %02x protocol %02x]", + " [cmd %02x protocol %02x]\n", p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id); /* Return success to let the flows to be completed successfully * w/o any error handling. @@ -785,7 +895,21 @@ enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn, * access p_ent here to see whether it's successful or not. * Thus, after gaining the answer perform the cleanup here. */ - rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code); + rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code, + p_ent->queue == &p_spq->unlimited_pending); + + if (p_ent->queue == &p_spq->unlimited_pending) { + /* This is an allocated p_ent which does not need to + * return to pool. + */ + OSAL_FREE(p_hwfn->p_dev, p_ent); + + /* TBD: handle error flow and remove p_ent from + * completion pending + */ + return rc; + } + if (rc) goto spq_post_fail2; @@ -819,12 +943,11 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *found = OSAL_NULL; enum _ecore_status_t rc; - if (!p_hwfn) - return ECORE_INVAL; - p_spq = p_hwfn->p_spq; - if (!p_spq) + if (!p_spq) { + DP_ERR(p_hwfn, "Unexpected NULL p_spq\n"); return ECORE_INVAL; + } OSAL_SPIN_LOCK(&p_spq->lock); OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent, @@ -884,11 +1007,17 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn, if (found->comp_cb.function) found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, fw_return_code); + else + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Got a completion without a callback function\n"); - if (found->comp_mode != ECORE_SPQ_MODE_EBLOCK) { - /* EBLOCK is responsible for freeing its own entry */ + if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) || + (found->queue == &p_spq->unlimited_pending)) + /* EBLOCK is responsible for returning its own entry into the + * free list, unless it originally added the entry into the + * unlimited pending list. + */ ecore_spq_return_entry(p_hwfn, found); - } /* Attempt to post pending requests */ OSAL_SPIN_LOCK(&p_spq->lock); @@ -898,17 +1027,17 @@ enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn, return rc; } -struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn) +enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn) { struct ecore_consq *p_consq; /* Allocate ConsQ struct */ p_consq = - OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_consq)); + OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq)); if (!p_consq) { - DP_NOTICE(p_hwfn, true, + DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_consq'\n"); - return OSAL_NULL; + return ECORE_NOMEM; } /* Allocate and initialize EQ chain */ @@ -917,27 +1046,30 @@ struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn) ECORE_CHAIN_MODE_PBL, ECORE_CHAIN_CNT_TYPE_U16, ECORE_CHAIN_PAGE_SIZE / 0x80, - 0x80, &p_consq->chain)) { - DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain"); + 0x80, + &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain"); goto consq_allocate_fail; } - return p_consq; + p_hwfn->p_consq = p_consq; + return ECORE_SUCCESS; consq_allocate_fail: - ecore_consq_free(p_hwfn, p_consq); - return OSAL_NULL; + OSAL_FREE(p_hwfn->p_dev, p_consq); + return ECORE_NOMEM; } -void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq) +void ecore_consq_setup(struct ecore_hwfn *p_hwfn) { - ecore_chain_reset(&p_consq->chain); + ecore_chain_reset(&p_hwfn->p_consq->chain); } -void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq) +void ecore_consq_free(struct ecore_hwfn *p_hwfn) { - if (!p_consq) + if (!p_hwfn->p_consq) return; - ecore_chain_free(p_hwfn->p_dev, &p_consq->chain); - OSAL_FREE(p_hwfn->p_dev, p_consq); + + ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq); }