New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / net / qede / base / ecore_spq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include "bcm_osal.h"
8 #include "reg_addr.h"
9 #include "ecore_gtt_reg_addr.h"
10 #include "ecore_hsi_common.h"
11 #include "ecore.h"
12 #include "ecore_sp_api.h"
13 #include "ecore_spq.h"
14 #include "ecore_iro.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_cxt.h"
17 #include "ecore_int.h"
18 #include "ecore_dev_api.h"
19 #include "ecore_mcp.h"
20 #include "ecore_hw.h"
21 #include "ecore_sriov.h"
22
23 /***************************************************************************
24  * Structures & Definitions
25  ***************************************************************************/
26
27 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
28
29 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
30 #define SPQ_BLOCK_DELAY_US              (10)
31 #define SPQ_BLOCK_SLEEP_MAX_ITER        (200)
32 #define SPQ_BLOCK_SLEEP_MS              (5)
33
34 /***************************************************************************
35  * Blocking Imp. (BLOCK/EBLOCK mode)
36  ***************************************************************************/
37 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
38                                   union event_ring_data OSAL_UNUSED * data,
39                                   u8 fw_return_code)
40 {
41         struct ecore_spq_comp_done *comp_done;
42
43         comp_done = (struct ecore_spq_comp_done *)cookie;
44
45         comp_done->done = 0x1;
46         comp_done->fw_return_code = fw_return_code;
47
48         /* make update visible to waiting thread */
49         OSAL_SMP_WMB(p_hwfn->p_dev);
50 }
51
52 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
53                                               struct ecore_spq_entry *p_ent,
54                                               u8 *p_fw_ret,
55                                               bool sleep_between_iter)
56 {
57         struct ecore_spq_comp_done *comp_done;
58         u32 iter_cnt;
59
60         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
61         iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter
62                                       : SPQ_BLOCK_DELAY_MAX_ITER;
63 #ifndef ASIC_ONLY
64         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
65                 iter_cnt *= 5;
66 #endif
67
68         while (iter_cnt--) {
69                 OSAL_POLL_MODE_DPC(p_hwfn);
70                 OSAL_SMP_RMB(p_hwfn->p_dev);
71                 if (comp_done->done == 1) {
72                         if (p_fw_ret)
73                                 *p_fw_ret = comp_done->fw_return_code;
74                         return ECORE_SUCCESS;
75                 }
76
77                 if (sleep_between_iter)
78                         OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
79                 else
80                         OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
81         }
82
83         return ECORE_TIMEOUT;
84 }
85
86 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
87                                             struct ecore_spq_entry *p_ent,
88                                             u8 *p_fw_ret, bool skip_quick_poll)
89 {
90         struct ecore_spq_comp_done *comp_done;
91         struct ecore_ptt *p_ptt;
92         enum _ecore_status_t rc;
93
94         /* A relatively short polling period w/o sleeping, to allow the FW to
95          * complete the ramrod and thus possibly to avoid the following sleeps.
96          */
97         if (!skip_quick_poll) {
98                 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
99                 if (rc == ECORE_SUCCESS)
100                         return ECORE_SUCCESS;
101         }
102
103         /* Move to polling with a sleeping period between iterations */
104         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
105         if (rc == ECORE_SUCCESS)
106                 return ECORE_SUCCESS;
107
108         p_ptt = ecore_ptt_acquire(p_hwfn);
109         if (!p_ptt)
110                 return ECORE_AGAIN;
111
112         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
113         rc = ecore_mcp_drain(p_hwfn, p_ptt);
114         ecore_ptt_release(p_hwfn, p_ptt);
115         if (rc != ECORE_SUCCESS) {
116                 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
117                 goto err;
118         }
119
120         /* Retry after drain */
121         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
122         if (rc == ECORE_SUCCESS)
123                 return ECORE_SUCCESS;
124
125         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
126         if (comp_done->done == 1) {
127                 if (p_fw_ret)
128                         *p_fw_ret = comp_done->fw_return_code;
129                 return ECORE_SUCCESS;
130         }
131 err:
132         DP_NOTICE(p_hwfn, true,
133                   "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
134                   OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
135                   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
136                   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
137
138         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
139
140         return ECORE_BUSY;
141 }
142
143 void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
144                                  u32 spq_timeout_ms)
145 {
146         p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ?
147                 spq_timeout_ms / SPQ_BLOCK_SLEEP_MS :
148                 SPQ_BLOCK_SLEEP_MAX_ITER;
149 }
150
151 /***************************************************************************
152  * SPQ entries inner API
153  ***************************************************************************/
154 static enum _ecore_status_t
155 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
156 {
157         p_ent->flags = 0;
158
159         switch (p_ent->comp_mode) {
160         case ECORE_SPQ_MODE_EBLOCK:
161         case ECORE_SPQ_MODE_BLOCK:
162                 p_ent->comp_cb.function = ecore_spq_blocking_cb;
163                 break;
164         case ECORE_SPQ_MODE_CB:
165                 break;
166         default:
167                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
168                           p_ent->comp_mode);
169                 return ECORE_INVAL;
170         }
171
172         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
173                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
174                    " Data pointer: [%08x:%08x] Completion Mode: %s\n",
175                    p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
176                    p_ent->elem.hdr.protocol_id,
177                    p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
178                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
179                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
180                            "MODE_CB"));
181
182         return ECORE_SUCCESS;
183 }
184
185 /***************************************************************************
186  * HSI access
187  ***************************************************************************/
188 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
189                                     struct ecore_spq *p_spq)
190 {
191         struct e4_core_conn_context *p_cxt;
192         struct ecore_cxt_info cxt_info;
193         u16 physical_q;
194         enum _ecore_status_t rc;
195
196         cxt_info.iid = p_spq->cid;
197
198         rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
199
200         if (rc < 0) {
201                 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
202                           p_spq->cid);
203                 return;
204         }
205
206         p_cxt = cxt_info.p_cxt;
207
208         /* @@@TBD we zero the context until we have ilt_reset implemented. */
209         OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
210
211         if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
212                 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
213                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
214                 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
215                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
216                 /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
217                  *        E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
218                  */
219                 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
220                           E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
221         }
222
223         /* CDU validation - FIXME currently disabled */
224
225         /* QM physical queue */
226         physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
227         p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
228
229         p_cxt->xstorm_st_context.spq_base_lo =
230             DMA_LO_LE(p_spq->chain.p_phys_addr);
231         p_cxt->xstorm_st_context.spq_base_hi =
232             DMA_HI_LE(p_spq->chain.p_phys_addr);
233
234         DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
235                        p_hwfn->p_consq->chain.p_phys_addr);
236 }
237
238 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
239                                               struct ecore_spq *p_spq,
240                                               struct ecore_spq_entry *p_ent)
241 {
242         struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
243         struct core_db_data *p_db_data = &p_spq->db_data;
244         u16 echo = ecore_chain_get_prod_idx(p_chain);
245         struct slow_path_element *elem;
246
247         p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
248         elem = ecore_chain_produce(p_chain);
249         if (!elem) {
250                 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
251                 return ECORE_INVAL;
252         }
253
254         *elem = p_ent->elem;    /* Struct assignment */
255
256         p_db_data->spq_prod =
257                 OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
258
259         /* Make sure the SPQE is updated before the doorbell */
260         OSAL_WMB(p_hwfn->p_dev);
261
262         DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
263
264         /* Make sure doorbell is rang */
265         OSAL_WMB(p_hwfn->p_dev);
266
267         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
268                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
269                    " agg_params: %02x, prod: %04x\n",
270                    p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
271                    p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
272
273         return ECORE_SUCCESS;
274 }
275
276 /***************************************************************************
277  * Asynchronous events
278  ***************************************************************************/
279
280 static enum _ecore_status_t
281 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
282                              struct event_ring_entry *p_eqe)
283 {
284         ecore_spq_async_comp_cb cb;
285         enum _ecore_status_t rc;
286
287         if (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE) {
288                 DP_ERR(p_hwfn, "Wrong protocol: %d\n", p_eqe->protocol_id);
289                 return ECORE_INVAL;
290         }
291
292         cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
293         if (!cb) {
294                 DP_NOTICE(p_hwfn,
295                           true, "Unknown Async completion for protocol: %d\n",
296                           p_eqe->protocol_id);
297                 return ECORE_INVAL;
298         }
299
300         rc = cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
301                 &p_eqe->data, p_eqe->fw_return_code);
302         if (rc != ECORE_SUCCESS)
303                 DP_NOTICE(p_hwfn, true,
304                           "Async completion callback failed, rc = %d [opcode %x, echo %x, fw_return_code %x]",
305                           rc, p_eqe->opcode, p_eqe->echo,
306                           p_eqe->fw_return_code);
307
308         return rc;
309 }
310
311 enum _ecore_status_t
312 ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
313                             enum protocol_type protocol_id,
314                             ecore_spq_async_comp_cb cb)
315 {
316         if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
317                 return ECORE_INVAL;
318
319         p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
320         return ECORE_SUCCESS;
321 }
322
323 void
324 ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
325                               enum protocol_type protocol_id)
326 {
327         if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
328                 return;
329
330         p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
331 }
332
333 /***************************************************************************
334  * EQ API
335  ***************************************************************************/
336 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
337 {
338         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
339             USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
340
341         REG_WR16(p_hwfn, addr, prod);
342
343         /* keep prod updates ordered */
344         OSAL_MMIOWB(p_hwfn->p_dev);
345 }
346
347 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
348                                          void *cookie)
349 {
350         struct ecore_eq *p_eq = cookie;
351         struct ecore_chain *p_chain = &p_eq->chain;
352         u16 fw_cons_idx             = 0;
353         enum _ecore_status_t rc = ECORE_SUCCESS;
354
355         if (!p_hwfn->p_spq) {
356                 DP_ERR(p_hwfn, "Unexpected NULL p_spq\n");
357                 return ECORE_INVAL;
358         }
359
360         /* take a snapshot of the FW consumer */
361         fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
362
363         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
364
365         /* Need to guarantee the fw_cons index we use points to a usuable
366          * element (to comply with our chain), so our macros would comply
367          */
368         if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
369             ecore_chain_get_usable_per_page(p_chain)) {
370                 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
371         }
372
373         /* Complete current segment of eq entries */
374         while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
375                 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
376                 if (!p_eqe) {
377                         DP_ERR(p_hwfn,
378                                "Unexpected NULL chain consumer entry\n");
379                         break;
380                 }
381
382                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
383                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
384                            p_eqe->opcode,            /* Event Opcode */
385                            p_eqe->protocol_id,  /* Event Protocol ID */
386                            p_eqe->reserved0,    /* Reserved */
387                            /* Echo value from ramrod data on the host */
388                            OSAL_LE16_TO_CPU(p_eqe->echo),
389                            p_eqe->fw_return_code,    /* FW return code for SP
390                                                       * ramrods
391                                                       */
392                            p_eqe->flags);
393
394                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC))
395                         ecore_async_event_completion(p_hwfn, p_eqe);
396                 else
397                         ecore_spq_completion(p_hwfn,
398                                              p_eqe->echo,
399                                              p_eqe->fw_return_code,
400                                              &p_eqe->data);
401
402                 ecore_chain_recycle_consumed(p_chain);
403         }
404
405         ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
406
407         return rc;
408 }
409
410 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
411 {
412         struct ecore_eq *p_eq;
413
414         /* Allocate EQ struct */
415         p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
416         if (!p_eq) {
417                 DP_NOTICE(p_hwfn, false,
418                           "Failed to allocate `struct ecore_eq'\n");
419                 return ECORE_NOMEM;
420         }
421
422         /* Allocate and initialize EQ chain*/
423         if (ecore_chain_alloc(p_hwfn->p_dev,
424                               ECORE_CHAIN_USE_TO_PRODUCE,
425                               ECORE_CHAIN_MODE_PBL,
426                               ECORE_CHAIN_CNT_TYPE_U16,
427                               num_elem,
428                               sizeof(union event_ring_element),
429                               &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
430                 DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
431                 goto eq_allocate_fail;
432         }
433
434         /* register EQ completion on the SP SB */
435         ecore_int_register_cb(p_hwfn, ecore_eq_completion,
436                               p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
437
438         p_hwfn->p_eq = p_eq;
439         return ECORE_SUCCESS;
440
441 eq_allocate_fail:
442         OSAL_FREE(p_hwfn->p_dev, p_eq);
443         return ECORE_NOMEM;
444 }
445
446 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
447 {
448         ecore_chain_reset(&p_hwfn->p_eq->chain);
449 }
450
451 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
452 {
453         if (!p_hwfn->p_eq)
454                 return;
455
456         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
457
458         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
459         p_hwfn->p_eq = OSAL_NULL;
460 }
461
462 /***************************************************************************
463 * CQE API - manipulate EQ functionality
464 ***************************************************************************/
465 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
466                                                  struct eth_slow_path_rx_cqe
467                                                  *cqe,
468                                                  enum protocol_type protocol)
469 {
470         if (IS_VF(p_hwfn->p_dev))
471                 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
472
473         /* @@@tmp - it's possible we'll eventually want to handle some
474          * actual commands that can arrive here, but for now this is only
475          * used to complete the ramrod using the echo value on the cqe
476          */
477         return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
478 }
479
480 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
481                                               struct eth_slow_path_rx_cqe *cqe)
482 {
483         enum _ecore_status_t rc;
484
485         rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
486         if (rc) {
487                 DP_NOTICE(p_hwfn, true,
488                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
489                           cqe->ramrod_cmd_id);
490         }
491
492         return rc;
493 }
494
495 /***************************************************************************
496  * Slow hwfn Queue (spq)
497  ***************************************************************************/
498 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
499 {
500         struct ecore_spq *p_spq = p_hwfn->p_spq;
501         struct ecore_spq_entry *p_virt = OSAL_NULL;
502         struct core_db_data *p_db_data;
503         void OSAL_IOMEM *db_addr;
504         dma_addr_t p_phys = 0;
505         u32 i, capacity;
506         enum _ecore_status_t rc;
507
508         OSAL_LIST_INIT(&p_spq->pending);
509         OSAL_LIST_INIT(&p_spq->completion_pending);
510         OSAL_LIST_INIT(&p_spq->free_pool);
511         OSAL_LIST_INIT(&p_spq->unlimited_pending);
512         OSAL_SPIN_LOCK_INIT(&p_spq->lock);
513
514         /* SPQ empty pool */
515         p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
516         p_virt = p_spq->p_virt;
517
518         capacity = ecore_chain_get_capacity(&p_spq->chain);
519         for (i = 0; i < capacity; i++) {
520                 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
521
522                 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
523
524                 p_virt++;
525                 p_phys += sizeof(struct ecore_spq_entry);
526         }
527
528         /* Statistics */
529         p_spq->normal_count = 0;
530         p_spq->comp_count = 0;
531         p_spq->comp_sent_count = 0;
532         p_spq->unlimited_pending_count = 0;
533
534         OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
535                       SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
536         p_spq->comp_bitmap_idx = 0;
537
538         /* SPQ cid, cannot fail */
539         ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
540         ecore_spq_hw_initialize(p_hwfn, p_spq);
541
542         /* reset the chain itself */
543         ecore_chain_reset(&p_spq->chain);
544
545         /* Initialize the address/data of the SPQ doorbell */
546         p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
547         p_db_data = &p_spq->db_data;
548         OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
549         SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
550         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
551         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
552                   DQ_XCM_CORE_SPQ_PROD_CMD);
553         p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
554
555         /* Register the SPQ doorbell with the doorbell recovery mechanism */
556         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
557         rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
558                                    DB_REC_WIDTH_32B, DB_REC_KERNEL);
559         if (rc != ECORE_SUCCESS)
560                 DP_INFO(p_hwfn,
561                         "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
562 }
563
564 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
565 {
566         struct ecore_spq_entry *p_virt = OSAL_NULL;
567         struct ecore_spq *p_spq = OSAL_NULL;
568         dma_addr_t p_phys = 0;
569         u32 capacity;
570
571         /* SPQ struct */
572         p_spq =
573             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
574         if (!p_spq) {
575                 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
576                 return ECORE_NOMEM;
577         }
578
579         /* SPQ ring  */
580         if (ecore_chain_alloc(p_hwfn->p_dev,
581                               ECORE_CHAIN_USE_TO_PRODUCE,
582                               ECORE_CHAIN_MODE_SINGLE,
583                               ECORE_CHAIN_CNT_TYPE_U16,
584                               0, /* N/A when the mode is SINGLE */
585                               sizeof(struct slow_path_element),
586                               &p_spq->chain, OSAL_NULL)) {
587                 DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
588                 goto spq_allocate_fail;
589         }
590
591         /* allocate and fill the SPQ elements (incl. ramrod data list) */
592         capacity = ecore_chain_get_capacity(&p_spq->chain);
593         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
594                                          capacity *
595                                          sizeof(struct ecore_spq_entry));
596         if (!p_virt)
597                 goto spq_allocate_fail;
598
599         p_spq->p_virt = p_virt;
600         p_spq->p_phys = p_phys;
601
602 #ifdef CONFIG_ECORE_LOCK_ALLOC
603         if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
604                 goto spq_allocate_fail;
605 #endif
606
607         p_hwfn->p_spq = p_spq;
608         return ECORE_SUCCESS;
609
610 spq_allocate_fail:
611         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
612         OSAL_FREE(p_hwfn->p_dev, p_spq);
613         return ECORE_NOMEM;
614 }
615
616 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
617 {
618         struct ecore_spq *p_spq = p_hwfn->p_spq;
619         void OSAL_IOMEM *db_addr;
620         u32 capacity;
621
622         if (!p_spq)
623                 return;
624
625         /* Delete the SPQ doorbell from the doorbell recovery mechanism */
626         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
627         ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
628
629         if (p_spq->p_virt) {
630                 capacity = ecore_chain_get_capacity(&p_spq->chain);
631                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
632                                        p_spq->p_virt,
633                                        p_spq->p_phys,
634                                        capacity *
635                                        sizeof(struct ecore_spq_entry));
636         }
637
638         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
639 #ifdef CONFIG_ECORE_LOCK_ALLOC
640         OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
641 #endif
642
643         OSAL_FREE(p_hwfn->p_dev, p_spq);
644 }
645
646 enum _ecore_status_t
647 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
648 {
649         struct ecore_spq *p_spq = p_hwfn->p_spq;
650         struct ecore_spq_entry *p_ent = OSAL_NULL;
651         enum _ecore_status_t rc = ECORE_SUCCESS;
652
653         OSAL_SPIN_LOCK(&p_spq->lock);
654
655         if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
656                 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
657                 if (!p_ent) {
658                         DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
659                         rc = ECORE_NOMEM;
660                         goto out_unlock;
661                 }
662                 p_ent->queue = &p_spq->unlimited_pending;
663         } else {
664                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
665                                               struct ecore_spq_entry, list);
666                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
667                 p_ent->queue = &p_spq->pending;
668         }
669
670         *pp_ent = p_ent;
671
672 out_unlock:
673         OSAL_SPIN_UNLOCK(&p_spq->lock);
674         return rc;
675 }
676
677 /* Locked variant; Should be called while the SPQ lock is taken */
678 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
679                                      struct ecore_spq_entry *p_ent)
680 {
681         OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
682 }
683
684 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
685                             struct ecore_spq_entry *p_ent)
686 {
687         OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
688         __ecore_spq_return_entry(p_hwfn, p_ent);
689         OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
690 }
691
692 /**
693  * @brief ecore_spq_add_entry - adds a new entry to the pending
694  *        list. Should be used while lock is being held.
695  *
696  * Addes an entry to the pending list is there is room (en empty
697  * element is available in the free_pool), or else places the
698  * entry in the unlimited_pending pool.
699  *
700  * @param p_hwfn
701  * @param p_ent
702  * @param priority
703  *
704  * @return enum _ecore_status_t
705  */
706 static enum _ecore_status_t
707 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
708                     struct ecore_spq_entry *p_ent, enum spq_priority priority)
709 {
710         struct ecore_spq *p_spq = p_hwfn->p_spq;
711
712         if (p_ent->queue == &p_spq->unlimited_pending) {
713                 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
714                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
715                                             &p_spq->unlimited_pending);
716                         p_spq->unlimited_pending_count++;
717
718                         return ECORE_SUCCESS;
719
720                 } else {
721                         struct ecore_spq_entry *p_en2;
722
723                         p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
724                                                      struct ecore_spq_entry,
725                                                      list);
726                         OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
727
728                         /* Copy the ring element physical pointer to the new
729                          * entry, since we are about to override the entire ring
730                          * entry and don't want to lose the pointer.
731                          */
732                         p_ent->elem.data_ptr = p_en2->elem.data_ptr;
733
734                         *p_en2 = *p_ent;
735
736                         /* EBLOCK responsible to free the allocated p_ent */
737                         if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
738                                 OSAL_FREE(p_hwfn->p_dev, p_ent);
739
740                         p_ent = p_en2;
741                 }
742         }
743
744         /* entry is to be placed in 'pending' queue */
745         switch (priority) {
746         case ECORE_SPQ_PRIORITY_NORMAL:
747                 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
748                 p_spq->normal_count++;
749                 break;
750         case ECORE_SPQ_PRIORITY_HIGH:
751                 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
752                 p_spq->high_count++;
753                 break;
754         default:
755                 return ECORE_INVAL;
756         }
757
758         return ECORE_SUCCESS;
759 }
760
761 /***************************************************************************
762  * Accessor
763  ***************************************************************************/
764
765 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
766 {
767         if (!p_hwfn->p_spq)
768                 return 0xffffffff;      /* illegal */
769         return p_hwfn->p_spq->cid;
770 }
771
772 /***************************************************************************
773  * Posting new Ramrods
774  ***************************************************************************/
775
776 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
777                                                 osal_list_t *head,
778                                                 u32 keep_reserve)
779 {
780         struct ecore_spq *p_spq = p_hwfn->p_spq;
781         enum _ecore_status_t rc;
782
783         /* TODO - implementation might be wasteful; will always keep room
784          * for an additional high priority ramrod (even if one is already
785          * pending FW)
786          */
787         while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
788                !OSAL_LIST_IS_EMPTY(head)) {
789                 struct ecore_spq_entry *p_ent =
790                     OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
791                 if (p_ent != OSAL_NULL) {
792 #if defined(_NTDDK_)
793 #pragma warning(suppress : 6011 28182)
794 #endif
795                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
796                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
797                                             &p_spq->completion_pending);
798                         p_spq->comp_sent_count++;
799
800                         rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
801                         if (rc) {
802                                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
803                                                     &p_spq->completion_pending);
804                                 __ecore_spq_return_entry(p_hwfn, p_ent);
805                                 return rc;
806                         }
807                 }
808         }
809
810         return ECORE_SUCCESS;
811 }
812
813 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
814 {
815         struct ecore_spq *p_spq = p_hwfn->p_spq;
816         struct ecore_spq_entry *p_ent = OSAL_NULL;
817
818         while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
819                 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
820                         break;
821
822                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
823                                               struct ecore_spq_entry, list);
824                 if (!p_ent)
825                         return ECORE_INVAL;
826
827 #if defined(_NTDDK_)
828 #pragma warning(suppress : 6011)
829 #endif
830                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
831
832                 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
833         }
834
835         return ecore_spq_post_list(p_hwfn,
836                                  &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
837 }
838
839 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
840                                     struct ecore_spq_entry *p_ent,
841                                     u8 *fw_return_code)
842 {
843         enum _ecore_status_t rc = ECORE_SUCCESS;
844         struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
845         bool b_ret_ent = true;
846
847         if (!p_hwfn)
848                 return ECORE_INVAL;
849
850         if (!p_ent) {
851                 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
852                 return ECORE_INVAL;
853         }
854
855         if (p_hwfn->p_dev->recov_in_prog) {
856                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
857                            "Recovery is in progress -> skip spq post"
858                            " [cmd %02x protocol %02x]\n",
859                            p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
860                 /* Return success to let the flows to be completed successfully
861                  * w/o any error handling.
862                  */
863                 return ECORE_SUCCESS;
864         }
865
866         OSAL_SPIN_LOCK(&p_spq->lock);
867
868         /* Complete the entry */
869         rc = ecore_spq_fill_entry(p_hwfn, p_ent);
870
871         /* Check return value after LOCK is taken for cleaner error flow */
872         if (rc)
873                 goto spq_post_fail;
874
875         /* Add the request to the pending queue */
876         rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
877         if (rc)
878                 goto spq_post_fail;
879
880         rc = ecore_spq_pend_post(p_hwfn);
881         if (rc) {
882                 /* Since it's possible that pending failed for a different
883                  * entry [although unlikely], the failed entry was already
884                  * dealt with; No need to return it here.
885                  */
886                 b_ret_ent = false;
887                 goto spq_post_fail;
888         }
889
890         OSAL_SPIN_UNLOCK(&p_spq->lock);
891
892         if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
893                 /* For entries in ECORE BLOCK mode, the completion code cannot
894                  * perform the necessary cleanup - if it did, we couldn't
895                  * access p_ent here to see whether it's successful or not.
896                  * Thus, after gaining the answer perform the cleanup here.
897                  */
898                 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
899                                      p_ent->queue == &p_spq->unlimited_pending);
900
901                 if (p_ent->queue == &p_spq->unlimited_pending) {
902                         /* This is an allocated p_ent which does not need to
903                          * return to pool.
904                          */
905                         OSAL_FREE(p_hwfn->p_dev, p_ent);
906
907                         /* TBD: handle error flow and remove p_ent from
908                          * completion pending
909                          */
910                         return rc;
911                 }
912
913                 if (rc)
914                         goto spq_post_fail2;
915
916                 /* return to pool */
917                 ecore_spq_return_entry(p_hwfn, p_ent);
918         }
919         return rc;
920
921 spq_post_fail2:
922         OSAL_SPIN_LOCK(&p_spq->lock);
923         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
924         ecore_chain_return_produced(&p_spq->chain);
925
926 spq_post_fail:
927         /* return to the free pool */
928         if (b_ret_ent)
929                 __ecore_spq_return_entry(p_hwfn, p_ent);
930         OSAL_SPIN_UNLOCK(&p_spq->lock);
931
932         return rc;
933 }
934
935 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
936                                           __le16 echo,
937                                           u8 fw_return_code,
938                                           union event_ring_data *p_data)
939 {
940         struct ecore_spq *p_spq;
941         struct ecore_spq_entry *p_ent = OSAL_NULL;
942         struct ecore_spq_entry *tmp;
943         struct ecore_spq_entry *found = OSAL_NULL;
944         enum _ecore_status_t rc;
945
946         p_spq = p_hwfn->p_spq;
947         if (!p_spq) {
948                 DP_ERR(p_hwfn, "Unexpected NULL p_spq\n");
949                 return ECORE_INVAL;
950         }
951
952         OSAL_SPIN_LOCK(&p_spq->lock);
953         OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
954                                       tmp,
955                                       &p_spq->completion_pending,
956                                       list, struct ecore_spq_entry) {
957                 if (p_ent->elem.hdr.echo == echo) {
958                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
959                                                &p_spq->completion_pending);
960
961                         /* Avoid overriding of SPQ entries when getting
962                          * out-of-order completions, by marking the completions
963                          * in a bitmap and increasing the chain consumer only
964                          * for the first successive completed entries.
965                          */
966                         SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
967                         while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
968                                                       p_spq->comp_bitmap_idx)) {
969                                 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
970                                                         p_spq->comp_bitmap_idx);
971                                 p_spq->comp_bitmap_idx++;
972                                 ecore_chain_return_produced(&p_spq->chain);
973                         }
974
975                         p_spq->comp_count++;
976                         found = p_ent;
977                         break;
978                 }
979
980                 /* This is debug and should be relatively uncommon - depends
981                  * on scenarios which have mutliple per-PF sent ramrods.
982                  */
983                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
984                            "Got completion for echo %04x - doesn't match"
985                            " echo %04x in completion pending list\n",
986                            OSAL_LE16_TO_CPU(echo),
987                            OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
988         }
989
990         /* Release lock before callback, as callback may post
991          * an additional ramrod.
992          */
993         OSAL_SPIN_UNLOCK(&p_spq->lock);
994
995         if (!found) {
996                 DP_NOTICE(p_hwfn, true,
997                           "Failed to find an entry this"
998                           " EQE [echo %04x] completes\n",
999                           OSAL_LE16_TO_CPU(echo));
1000                 return ECORE_EXISTS;
1001         }
1002
1003         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1004                    "Complete EQE [echo %04x]: func %p cookie %p)\n",
1005                    OSAL_LE16_TO_CPU(echo),
1006                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
1007         if (found->comp_cb.function)
1008                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
1009                                         fw_return_code);
1010         else
1011                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
1012                            "Got a completion without a callback function\n");
1013
1014         if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
1015             (found->queue == &p_spq->unlimited_pending))
1016                 /* EBLOCK  is responsible for returning its own entry into the
1017                  * free list, unless it originally added the entry into the
1018                  * unlimited pending list.
1019                  */
1020                 ecore_spq_return_entry(p_hwfn, found);
1021
1022         /* Attempt to post pending requests */
1023         OSAL_SPIN_LOCK(&p_spq->lock);
1024         rc = ecore_spq_pend_post(p_hwfn);
1025         OSAL_SPIN_UNLOCK(&p_spq->lock);
1026
1027         return rc;
1028 }
1029
1030 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1031 {
1032         struct ecore_consq *p_consq;
1033
1034         /* Allocate ConsQ struct */
1035         p_consq =
1036             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1037         if (!p_consq) {
1038                 DP_NOTICE(p_hwfn, false,
1039                           "Failed to allocate `struct ecore_consq'\n");
1040                 return ECORE_NOMEM;
1041         }
1042
1043         /* Allocate and initialize EQ chain */
1044         if (ecore_chain_alloc(p_hwfn->p_dev,
1045                               ECORE_CHAIN_USE_TO_PRODUCE,
1046                               ECORE_CHAIN_MODE_PBL,
1047                               ECORE_CHAIN_CNT_TYPE_U16,
1048                               ECORE_CHAIN_PAGE_SIZE / 0x80,
1049                               0x80,
1050                               &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1051                 DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
1052                 goto consq_allocate_fail;
1053         }
1054
1055         p_hwfn->p_consq = p_consq;
1056         return ECORE_SUCCESS;
1057
1058 consq_allocate_fail:
1059         OSAL_FREE(p_hwfn->p_dev, p_consq);
1060         return ECORE_NOMEM;
1061 }
1062
1063 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1064 {
1065         ecore_chain_reset(&p_hwfn->p_consq->chain);
1066 }
1067
1068 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1069 {
1070         if (!p_hwfn->p_consq)
1071                 return;
1072
1073         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1074         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1075 }