New upstream version 18.08
[deb_dpdk.git] / drivers / net / qede / base / ecore_spq.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include "bcm_osal.h"
8 #include "reg_addr.h"
9 #include "ecore_gtt_reg_addr.h"
10 #include "ecore_hsi_common.h"
11 #include "ecore.h"
12 #include "ecore_sp_api.h"
13 #include "ecore_spq.h"
14 #include "ecore_iro.h"
15 #include "ecore_init_fw_funcs.h"
16 #include "ecore_cxt.h"
17 #include "ecore_int.h"
18 #include "ecore_dev_api.h"
19 #include "ecore_mcp.h"
20 #include "ecore_hw.h"
21 #include "ecore_sriov.h"
22
23 /***************************************************************************
24  * Structures & Definitions
25  ***************************************************************************/
26
27 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
28
29 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
30 #define SPQ_BLOCK_DELAY_US              (10)
31 #define SPQ_BLOCK_SLEEP_MAX_ITER        (200)
32 #define SPQ_BLOCK_SLEEP_MS              (5)
33
34 /***************************************************************************
35  * Blocking Imp. (BLOCK/EBLOCK mode)
36  ***************************************************************************/
37 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
38                                   union event_ring_data OSAL_UNUSED * data,
39                                   u8 fw_return_code)
40 {
41         struct ecore_spq_comp_done *comp_done;
42
43         comp_done = (struct ecore_spq_comp_done *)cookie;
44
45         comp_done->done = 0x1;
46         comp_done->fw_return_code = fw_return_code;
47
48         /* make update visible to waiting thread */
49         OSAL_SMP_WMB(p_hwfn->p_dev);
50 }
51
52 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
53                                               struct ecore_spq_entry *p_ent,
54                                               u8 *p_fw_ret,
55                                               bool sleep_between_iter)
56 {
57         struct ecore_spq_comp_done *comp_done;
58         u32 iter_cnt;
59
60         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
61         iter_cnt = sleep_between_iter ? p_hwfn->p_spq->block_sleep_max_iter
62                                       : SPQ_BLOCK_DELAY_MAX_ITER;
63 #ifndef ASIC_ONLY
64         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && sleep_between_iter)
65                 iter_cnt *= 5;
66 #endif
67
68         while (iter_cnt--) {
69                 OSAL_POLL_MODE_DPC(p_hwfn);
70                 OSAL_SMP_RMB(p_hwfn->p_dev);
71                 if (comp_done->done == 1) {
72                         if (p_fw_ret)
73                                 *p_fw_ret = comp_done->fw_return_code;
74                         return ECORE_SUCCESS;
75                 }
76
77                 if (sleep_between_iter)
78                         OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
79                 else
80                         OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
81         }
82
83         return ECORE_TIMEOUT;
84 }
85
86 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
87                                             struct ecore_spq_entry *p_ent,
88                                             u8 *p_fw_ret, bool skip_quick_poll)
89 {
90         struct ecore_spq_comp_done *comp_done;
91         struct ecore_ptt *p_ptt;
92         enum _ecore_status_t rc;
93
94         /* A relatively short polling period w/o sleeping, to allow the FW to
95          * complete the ramrod and thus possibly to avoid the following sleeps.
96          */
97         if (!skip_quick_poll) {
98                 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
99                 if (rc == ECORE_SUCCESS)
100                         return ECORE_SUCCESS;
101         }
102
103         /* Move to polling with a sleeping period between iterations */
104         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
105         if (rc == ECORE_SUCCESS)
106                 return ECORE_SUCCESS;
107
108         p_ptt = ecore_ptt_acquire(p_hwfn);
109         if (!p_ptt)
110                 return ECORE_AGAIN;
111
112         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
113         rc = ecore_mcp_drain(p_hwfn, p_ptt);
114         ecore_ptt_release(p_hwfn, p_ptt);
115         if (rc != ECORE_SUCCESS) {
116                 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
117                 goto err;
118         }
119
120         /* Retry after drain */
121         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
122         if (rc == ECORE_SUCCESS)
123                 return ECORE_SUCCESS;
124
125         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
126         if (comp_done->done == 1) {
127                 if (p_fw_ret)
128                         *p_fw_ret = comp_done->fw_return_code;
129                 return ECORE_SUCCESS;
130         }
131 err:
132         DP_NOTICE(p_hwfn, true,
133                   "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
134                   OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
135                   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
136                   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
137
138         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
139
140         return ECORE_BUSY;
141 }
142
143 void ecore_set_spq_block_timeout(struct ecore_hwfn *p_hwfn,
144                                  u32 spq_timeout_ms)
145 {
146         p_hwfn->p_spq->block_sleep_max_iter = spq_timeout_ms ?
147                 spq_timeout_ms / SPQ_BLOCK_SLEEP_MS :
148                 SPQ_BLOCK_SLEEP_MAX_ITER;
149 }
150
151 /***************************************************************************
152  * SPQ entries inner API
153  ***************************************************************************/
154 static enum _ecore_status_t
155 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
156 {
157         p_ent->flags = 0;
158
159         switch (p_ent->comp_mode) {
160         case ECORE_SPQ_MODE_EBLOCK:
161         case ECORE_SPQ_MODE_BLOCK:
162                 p_ent->comp_cb.function = ecore_spq_blocking_cb;
163                 break;
164         case ECORE_SPQ_MODE_CB:
165                 break;
166         default:
167                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
168                           p_ent->comp_mode);
169                 return ECORE_INVAL;
170         }
171
172         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
173                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
174                    " Data pointer: [%08x:%08x] Completion Mode: %s\n",
175                    p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
176                    p_ent->elem.hdr.protocol_id,
177                    p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
178                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
179                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
180                            "MODE_CB"));
181
182         return ECORE_SUCCESS;
183 }
184
185 /***************************************************************************
186  * HSI access
187  ***************************************************************************/
188 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
189                                     struct ecore_spq *p_spq)
190 {
191         struct e4_core_conn_context *p_cxt;
192         struct ecore_cxt_info cxt_info;
193         u16 physical_q;
194         enum _ecore_status_t rc;
195
196         cxt_info.iid = p_spq->cid;
197
198         rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
199
200         if (rc < 0) {
201                 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
202                           p_spq->cid);
203                 return;
204         }
205
206         p_cxt = cxt_info.p_cxt;
207
208         /* @@@TBD we zero the context until we have ilt_reset implemented. */
209         OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
210
211         if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
212                 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
213                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
214                 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
215                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
216                 /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
217                  *        E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
218                  */
219                 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
220                           E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
221         }
222
223         /* CDU validation - FIXME currently disabled */
224
225         /* QM physical queue */
226         physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
227         p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
228
229         p_cxt->xstorm_st_context.spq_base_lo =
230             DMA_LO_LE(p_spq->chain.p_phys_addr);
231         p_cxt->xstorm_st_context.spq_base_hi =
232             DMA_HI_LE(p_spq->chain.p_phys_addr);
233
234         DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
235                        p_hwfn->p_consq->chain.p_phys_addr);
236 }
237
238 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
239                                               struct ecore_spq *p_spq,
240                                               struct ecore_spq_entry *p_ent)
241 {
242         struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
243         struct core_db_data *p_db_data = &p_spq->db_data;
244         u16 echo = ecore_chain_get_prod_idx(p_chain);
245         struct slow_path_element *elem;
246
247         p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
248         elem = ecore_chain_produce(p_chain);
249         if (!elem) {
250                 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
251                 return ECORE_INVAL;
252         }
253
254         *elem = p_ent->elem;    /* Struct assignment */
255
256         p_db_data->spq_prod =
257                 OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
258
259         /* Make sure the SPQE is updated before the doorbell */
260         OSAL_WMB(p_hwfn->p_dev);
261
262         DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
263
264         /* Make sure doorbell is rang */
265         OSAL_WMB(p_hwfn->p_dev);
266
267         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
268                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
269                    " agg_params: %02x, prod: %04x\n",
270                    p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
271                    p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
272
273         return ECORE_SUCCESS;
274 }
275
276 /***************************************************************************
277  * Asynchronous events
278  ***************************************************************************/
279
280 static enum _ecore_status_t
281 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
282                              struct event_ring_entry *p_eqe)
283 {
284         ecore_spq_async_comp_cb cb;
285
286         if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
287                 return ECORE_INVAL;
288
289         cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
290         if (cb) {
291                 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
292                           &p_eqe->data, p_eqe->fw_return_code);
293         } else {
294                 DP_NOTICE(p_hwfn,
295                           true, "Unknown Async completion for protocol: %d\n",
296                           p_eqe->protocol_id);
297                 return ECORE_INVAL;
298         }
299 }
300
301 enum _ecore_status_t
302 ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
303                             enum protocol_type protocol_id,
304                             ecore_spq_async_comp_cb cb)
305 {
306         if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
307                 return ECORE_INVAL;
308
309         p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
310         return ECORE_SUCCESS;
311 }
312
313 void
314 ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
315                               enum protocol_type protocol_id)
316 {
317         if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
318                 return;
319
320         p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
321 }
322
323 /***************************************************************************
324  * EQ API
325  ***************************************************************************/
326 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
327 {
328         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
329             USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
330
331         REG_WR16(p_hwfn, addr, prod);
332
333         /* keep prod updates ordered */
334         OSAL_MMIOWB(p_hwfn->p_dev);
335 }
336
337 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
338                                          void *cookie)
339 {
340         struct ecore_eq *p_eq = cookie;
341         struct ecore_chain *p_chain = &p_eq->chain;
342         enum _ecore_status_t rc = 0;
343
344         /* take a snapshot of the FW consumer */
345         u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
346
347         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
348
349         /* Need to guarantee the fw_cons index we use points to a usuable
350          * element (to comply with our chain), so our macros would comply
351          */
352         if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
353             ecore_chain_get_usable_per_page(p_chain)) {
354                 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
355         }
356
357         /* Complete current segment of eq entries */
358         while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
359                 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
360                 if (!p_eqe) {
361                         rc = ECORE_INVAL;
362                         break;
363                 }
364
365                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
366                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
367                            p_eqe->opcode,            /* Event Opcode */
368                            p_eqe->protocol_id,  /* Event Protocol ID */
369                            p_eqe->reserved0,    /* Reserved */
370                            /* Echo value from ramrod data on the host */
371                            OSAL_LE16_TO_CPU(p_eqe->echo),
372                            p_eqe->fw_return_code,    /* FW return code for SP
373                                                       * ramrods
374                                                       */
375                            p_eqe->flags);
376
377                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
378                         if (ecore_async_event_completion(p_hwfn, p_eqe))
379                                 rc = ECORE_INVAL;
380                 } else if (ecore_spq_completion(p_hwfn,
381                                                 p_eqe->echo,
382                                                 p_eqe->fw_return_code,
383                                                 &p_eqe->data)) {
384                         rc = ECORE_INVAL;
385                 }
386
387                 ecore_chain_recycle_consumed(p_chain);
388         }
389
390         ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
391
392         return rc;
393 }
394
395 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
396 {
397         struct ecore_eq *p_eq;
398
399         /* Allocate EQ struct */
400         p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
401         if (!p_eq) {
402                 DP_NOTICE(p_hwfn, false,
403                           "Failed to allocate `struct ecore_eq'\n");
404                 return ECORE_NOMEM;
405         }
406
407         /* Allocate and initialize EQ chain*/
408         if (ecore_chain_alloc(p_hwfn->p_dev,
409                               ECORE_CHAIN_USE_TO_PRODUCE,
410                               ECORE_CHAIN_MODE_PBL,
411                               ECORE_CHAIN_CNT_TYPE_U16,
412                               num_elem,
413                               sizeof(union event_ring_element),
414                               &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
415                 DP_NOTICE(p_hwfn, false, "Failed to allocate eq chain\n");
416                 goto eq_allocate_fail;
417         }
418
419         /* register EQ completion on the SP SB */
420         ecore_int_register_cb(p_hwfn, ecore_eq_completion,
421                               p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
422
423         p_hwfn->p_eq = p_eq;
424         return ECORE_SUCCESS;
425
426 eq_allocate_fail:
427         OSAL_FREE(p_hwfn->p_dev, p_eq);
428         return ECORE_NOMEM;
429 }
430
431 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
432 {
433         ecore_chain_reset(&p_hwfn->p_eq->chain);
434 }
435
436 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
437 {
438         if (!p_hwfn->p_eq)
439                 return;
440
441         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
442
443         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
444         p_hwfn->p_eq = OSAL_NULL;
445 }
446
447 /***************************************************************************
448 * CQE API - manipulate EQ functionality
449 ***************************************************************************/
450 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
451                                                  struct eth_slow_path_rx_cqe
452                                                  *cqe,
453                                                  enum protocol_type protocol)
454 {
455         if (IS_VF(p_hwfn->p_dev))
456                 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
457
458         /* @@@tmp - it's possible we'll eventually want to handle some
459          * actual commands that can arrive here, but for now this is only
460          * used to complete the ramrod using the echo value on the cqe
461          */
462         return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
463 }
464
465 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
466                                               struct eth_slow_path_rx_cqe *cqe)
467 {
468         enum _ecore_status_t rc;
469
470         rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
471         if (rc) {
472                 DP_NOTICE(p_hwfn, true,
473                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
474                           cqe->ramrod_cmd_id);
475         }
476
477         return rc;
478 }
479
480 /***************************************************************************
481  * Slow hwfn Queue (spq)
482  ***************************************************************************/
483 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
484 {
485         struct ecore_spq *p_spq = p_hwfn->p_spq;
486         struct ecore_spq_entry *p_virt = OSAL_NULL;
487         struct core_db_data *p_db_data;
488         void OSAL_IOMEM *db_addr;
489         dma_addr_t p_phys = 0;
490         u32 i, capacity;
491         enum _ecore_status_t rc;
492
493         OSAL_LIST_INIT(&p_spq->pending);
494         OSAL_LIST_INIT(&p_spq->completion_pending);
495         OSAL_LIST_INIT(&p_spq->free_pool);
496         OSAL_LIST_INIT(&p_spq->unlimited_pending);
497         OSAL_SPIN_LOCK_INIT(&p_spq->lock);
498
499         /* SPQ empty pool */
500         p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
501         p_virt = p_spq->p_virt;
502
503         capacity = ecore_chain_get_capacity(&p_spq->chain);
504         for (i = 0; i < capacity; i++) {
505                 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
506
507                 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
508
509                 p_virt++;
510                 p_phys += sizeof(struct ecore_spq_entry);
511         }
512
513         /* Statistics */
514         p_spq->normal_count = 0;
515         p_spq->comp_count = 0;
516         p_spq->comp_sent_count = 0;
517         p_spq->unlimited_pending_count = 0;
518
519         OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
520                       SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
521         p_spq->comp_bitmap_idx = 0;
522
523         /* SPQ cid, cannot fail */
524         ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
525         ecore_spq_hw_initialize(p_hwfn, p_spq);
526
527         /* reset the chain itself */
528         ecore_chain_reset(&p_spq->chain);
529
530         /* Initialize the address/data of the SPQ doorbell */
531         p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
532         p_db_data = &p_spq->db_data;
533         OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
534         SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
535         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
536         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
537                   DQ_XCM_CORE_SPQ_PROD_CMD);
538         p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
539
540         /* Register the SPQ doorbell with the doorbell recovery mechanism */
541         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
542         rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
543                                    DB_REC_WIDTH_32B, DB_REC_KERNEL);
544         if (rc != ECORE_SUCCESS)
545                 DP_INFO(p_hwfn,
546                         "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
547 }
548
549 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
550 {
551         struct ecore_spq_entry *p_virt = OSAL_NULL;
552         struct ecore_spq *p_spq = OSAL_NULL;
553         dma_addr_t p_phys = 0;
554         u32 capacity;
555
556         /* SPQ struct */
557         p_spq =
558             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
559         if (!p_spq) {
560                 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_spq'\n");
561                 return ECORE_NOMEM;
562         }
563
564         /* SPQ ring  */
565         if (ecore_chain_alloc(p_hwfn->p_dev,
566                               ECORE_CHAIN_USE_TO_PRODUCE,
567                               ECORE_CHAIN_MODE_SINGLE,
568                               ECORE_CHAIN_CNT_TYPE_U16,
569                               0, /* N/A when the mode is SINGLE */
570                               sizeof(struct slow_path_element),
571                               &p_spq->chain, OSAL_NULL)) {
572                 DP_NOTICE(p_hwfn, false, "Failed to allocate spq chain\n");
573                 goto spq_allocate_fail;
574         }
575
576         /* allocate and fill the SPQ elements (incl. ramrod data list) */
577         capacity = ecore_chain_get_capacity(&p_spq->chain);
578         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
579                                          capacity *
580                                          sizeof(struct ecore_spq_entry));
581         if (!p_virt)
582                 goto spq_allocate_fail;
583
584         p_spq->p_virt = p_virt;
585         p_spq->p_phys = p_phys;
586
587 #ifdef CONFIG_ECORE_LOCK_ALLOC
588         if (OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock))
589                 goto spq_allocate_fail;
590 #endif
591
592         p_hwfn->p_spq = p_spq;
593         return ECORE_SUCCESS;
594
595 spq_allocate_fail:
596         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
597         OSAL_FREE(p_hwfn->p_dev, p_spq);
598         return ECORE_NOMEM;
599 }
600
601 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
602 {
603         struct ecore_spq *p_spq = p_hwfn->p_spq;
604         void OSAL_IOMEM *db_addr;
605         u32 capacity;
606
607         if (!p_spq)
608                 return;
609
610         /* Delete the SPQ doorbell from the doorbell recovery mechanism */
611         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
612         ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
613
614         if (p_spq->p_virt) {
615                 capacity = ecore_chain_get_capacity(&p_spq->chain);
616                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
617                                        p_spq->p_virt,
618                                        p_spq->p_phys,
619                                        capacity *
620                                        sizeof(struct ecore_spq_entry));
621         }
622
623         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
624 #ifdef CONFIG_ECORE_LOCK_ALLOC
625         OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
626 #endif
627
628         OSAL_FREE(p_hwfn->p_dev, p_spq);
629 }
630
631 enum _ecore_status_t
632 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
633 {
634         struct ecore_spq *p_spq = p_hwfn->p_spq;
635         struct ecore_spq_entry *p_ent = OSAL_NULL;
636         enum _ecore_status_t rc = ECORE_SUCCESS;
637
638         OSAL_SPIN_LOCK(&p_spq->lock);
639
640         if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
641                 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
642                 if (!p_ent) {
643                         DP_NOTICE(p_hwfn, false, "Failed to allocate an SPQ entry for a pending ramrod\n");
644                         rc = ECORE_NOMEM;
645                         goto out_unlock;
646                 }
647                 p_ent->queue = &p_spq->unlimited_pending;
648         } else {
649                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
650                                               struct ecore_spq_entry, list);
651                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
652                 p_ent->queue = &p_spq->pending;
653         }
654
655         *pp_ent = p_ent;
656
657 out_unlock:
658         OSAL_SPIN_UNLOCK(&p_spq->lock);
659         return rc;
660 }
661
662 /* Locked variant; Should be called while the SPQ lock is taken */
663 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
664                                      struct ecore_spq_entry *p_ent)
665 {
666         OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
667 }
668
669 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
670                             struct ecore_spq_entry *p_ent)
671 {
672         OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
673         __ecore_spq_return_entry(p_hwfn, p_ent);
674         OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
675 }
676
677 /**
678  * @brief ecore_spq_add_entry - adds a new entry to the pending
679  *        list. Should be used while lock is being held.
680  *
681  * Addes an entry to the pending list is there is room (en empty
682  * element is available in the free_pool), or else places the
683  * entry in the unlimited_pending pool.
684  *
685  * @param p_hwfn
686  * @param p_ent
687  * @param priority
688  *
689  * @return enum _ecore_status_t
690  */
691 static enum _ecore_status_t
692 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
693                     struct ecore_spq_entry *p_ent, enum spq_priority priority)
694 {
695         struct ecore_spq *p_spq = p_hwfn->p_spq;
696
697         if (p_ent->queue == &p_spq->unlimited_pending) {
698                 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
699                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
700                                             &p_spq->unlimited_pending);
701                         p_spq->unlimited_pending_count++;
702
703                         return ECORE_SUCCESS;
704
705                 } else {
706                         struct ecore_spq_entry *p_en2;
707
708                         p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
709                                                      struct ecore_spq_entry,
710                                                      list);
711                         OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
712
713                         /* Copy the ring element physical pointer to the new
714                          * entry, since we are about to override the entire ring
715                          * entry and don't want to lose the pointer.
716                          */
717                         p_ent->elem.data_ptr = p_en2->elem.data_ptr;
718
719                         *p_en2 = *p_ent;
720
721                         /* EBLOCK responsible to free the allocated p_ent */
722                         if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
723                                 OSAL_FREE(p_hwfn->p_dev, p_ent);
724
725                         p_ent = p_en2;
726                 }
727         }
728
729         /* entry is to be placed in 'pending' queue */
730         switch (priority) {
731         case ECORE_SPQ_PRIORITY_NORMAL:
732                 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
733                 p_spq->normal_count++;
734                 break;
735         case ECORE_SPQ_PRIORITY_HIGH:
736                 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
737                 p_spq->high_count++;
738                 break;
739         default:
740                 return ECORE_INVAL;
741         }
742
743         return ECORE_SUCCESS;
744 }
745
746 /***************************************************************************
747  * Accessor
748  ***************************************************************************/
749
750 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
751 {
752         if (!p_hwfn->p_spq)
753                 return 0xffffffff;      /* illegal */
754         return p_hwfn->p_spq->cid;
755 }
756
757 /***************************************************************************
758  * Posting new Ramrods
759  ***************************************************************************/
760
761 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
762                                                 osal_list_t *head,
763                                                 u32 keep_reserve)
764 {
765         struct ecore_spq *p_spq = p_hwfn->p_spq;
766         enum _ecore_status_t rc;
767
768         /* TODO - implementation might be wasteful; will always keep room
769          * for an additional high priority ramrod (even if one is already
770          * pending FW)
771          */
772         while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
773                !OSAL_LIST_IS_EMPTY(head)) {
774                 struct ecore_spq_entry *p_ent =
775                     OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
776                 if (p_ent != OSAL_NULL) {
777 #if defined(_NTDDK_)
778 #pragma warning(suppress : 6011 28182)
779 #endif
780                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
781                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
782                                             &p_spq->completion_pending);
783                         p_spq->comp_sent_count++;
784
785                         rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
786                         if (rc) {
787                                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
788                                                     &p_spq->completion_pending);
789                                 __ecore_spq_return_entry(p_hwfn, p_ent);
790                                 return rc;
791                         }
792                 }
793         }
794
795         return ECORE_SUCCESS;
796 }
797
798 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
799 {
800         struct ecore_spq *p_spq = p_hwfn->p_spq;
801         struct ecore_spq_entry *p_ent = OSAL_NULL;
802
803         while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
804                 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
805                         break;
806
807                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
808                                               struct ecore_spq_entry, list);
809                 if (!p_ent)
810                         return ECORE_INVAL;
811
812 #if defined(_NTDDK_)
813 #pragma warning(suppress : 6011)
814 #endif
815                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
816
817                 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
818         }
819
820         return ecore_spq_post_list(p_hwfn,
821                                  &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
822 }
823
824 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
825                                     struct ecore_spq_entry *p_ent,
826                                     u8 *fw_return_code)
827 {
828         enum _ecore_status_t rc = ECORE_SUCCESS;
829         struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
830         bool b_ret_ent = true;
831
832         if (!p_hwfn)
833                 return ECORE_INVAL;
834
835         if (!p_ent) {
836                 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
837                 return ECORE_INVAL;
838         }
839
840         if (p_hwfn->p_dev->recov_in_prog) {
841                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
842                            "Recovery is in progress -> skip spq post"
843                            " [cmd %02x protocol %02x]\n",
844                            p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
845                 /* Return success to let the flows to be completed successfully
846                  * w/o any error handling.
847                  */
848                 return ECORE_SUCCESS;
849         }
850
851         OSAL_SPIN_LOCK(&p_spq->lock);
852
853         /* Complete the entry */
854         rc = ecore_spq_fill_entry(p_hwfn, p_ent);
855
856         /* Check return value after LOCK is taken for cleaner error flow */
857         if (rc)
858                 goto spq_post_fail;
859
860         /* Add the request to the pending queue */
861         rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
862         if (rc)
863                 goto spq_post_fail;
864
865         rc = ecore_spq_pend_post(p_hwfn);
866         if (rc) {
867                 /* Since it's possible that pending failed for a different
868                  * entry [although unlikely], the failed entry was already
869                  * dealt with; No need to return it here.
870                  */
871                 b_ret_ent = false;
872                 goto spq_post_fail;
873         }
874
875         OSAL_SPIN_UNLOCK(&p_spq->lock);
876
877         if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
878                 /* For entries in ECORE BLOCK mode, the completion code cannot
879                  * perform the necessary cleanup - if it did, we couldn't
880                  * access p_ent here to see whether it's successful or not.
881                  * Thus, after gaining the answer perform the cleanup here.
882                  */
883                 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
884                                      p_ent->queue == &p_spq->unlimited_pending);
885
886                 if (p_ent->queue == &p_spq->unlimited_pending) {
887                         /* This is an allocated p_ent which does not need to
888                          * return to pool.
889                          */
890                         OSAL_FREE(p_hwfn->p_dev, p_ent);
891
892                         /* TBD: handle error flow and remove p_ent from
893                          * completion pending
894                          */
895                         return rc;
896                 }
897
898                 if (rc)
899                         goto spq_post_fail2;
900
901                 /* return to pool */
902                 ecore_spq_return_entry(p_hwfn, p_ent);
903         }
904         return rc;
905
906 spq_post_fail2:
907         OSAL_SPIN_LOCK(&p_spq->lock);
908         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
909         ecore_chain_return_produced(&p_spq->chain);
910
911 spq_post_fail:
912         /* return to the free pool */
913         if (b_ret_ent)
914                 __ecore_spq_return_entry(p_hwfn, p_ent);
915         OSAL_SPIN_UNLOCK(&p_spq->lock);
916
917         return rc;
918 }
919
920 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
921                                           __le16 echo,
922                                           u8 fw_return_code,
923                                           union event_ring_data *p_data)
924 {
925         struct ecore_spq *p_spq;
926         struct ecore_spq_entry *p_ent = OSAL_NULL;
927         struct ecore_spq_entry *tmp;
928         struct ecore_spq_entry *found = OSAL_NULL;
929         enum _ecore_status_t rc;
930
931         if (!p_hwfn)
932                 return ECORE_INVAL;
933
934         p_spq = p_hwfn->p_spq;
935         if (!p_spq)
936                 return ECORE_INVAL;
937
938         OSAL_SPIN_LOCK(&p_spq->lock);
939         OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
940                                       tmp,
941                                       &p_spq->completion_pending,
942                                       list, struct ecore_spq_entry) {
943                 if (p_ent->elem.hdr.echo == echo) {
944                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
945                                                &p_spq->completion_pending);
946
947                         /* Avoid overriding of SPQ entries when getting
948                          * out-of-order completions, by marking the completions
949                          * in a bitmap and increasing the chain consumer only
950                          * for the first successive completed entries.
951                          */
952                         SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
953                         while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
954                                                       p_spq->comp_bitmap_idx)) {
955                                 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
956                                                         p_spq->comp_bitmap_idx);
957                                 p_spq->comp_bitmap_idx++;
958                                 ecore_chain_return_produced(&p_spq->chain);
959                         }
960
961                         p_spq->comp_count++;
962                         found = p_ent;
963                         break;
964                 }
965
966                 /* This is debug and should be relatively uncommon - depends
967                  * on scenarios which have mutliple per-PF sent ramrods.
968                  */
969                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
970                            "Got completion for echo %04x - doesn't match"
971                            " echo %04x in completion pending list\n",
972                            OSAL_LE16_TO_CPU(echo),
973                            OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
974         }
975
976         /* Release lock before callback, as callback may post
977          * an additional ramrod.
978          */
979         OSAL_SPIN_UNLOCK(&p_spq->lock);
980
981         if (!found) {
982                 DP_NOTICE(p_hwfn, true,
983                           "Failed to find an entry this"
984                           " EQE [echo %04x] completes\n",
985                           OSAL_LE16_TO_CPU(echo));
986                 return ECORE_EXISTS;
987         }
988
989         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
990                    "Complete EQE [echo %04x]: func %p cookie %p)\n",
991                    OSAL_LE16_TO_CPU(echo),
992                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
993         if (found->comp_cb.function)
994                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
995                                         fw_return_code);
996         else
997                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
998                            "Got a completion without a callback function\n");
999
1000         if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
1001             (found->queue == &p_spq->unlimited_pending))
1002                 /* EBLOCK  is responsible for returning its own entry into the
1003                  * free list, unless it originally added the entry into the
1004                  * unlimited pending list.
1005                  */
1006                 ecore_spq_return_entry(p_hwfn, found);
1007
1008         /* Attempt to post pending requests */
1009         OSAL_SPIN_LOCK(&p_spq->lock);
1010         rc = ecore_spq_pend_post(p_hwfn);
1011         OSAL_SPIN_UNLOCK(&p_spq->lock);
1012
1013         return rc;
1014 }
1015
1016 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1017 {
1018         struct ecore_consq *p_consq;
1019
1020         /* Allocate ConsQ struct */
1021         p_consq =
1022             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1023         if (!p_consq) {
1024                 DP_NOTICE(p_hwfn, false,
1025                           "Failed to allocate `struct ecore_consq'\n");
1026                 return ECORE_NOMEM;
1027         }
1028
1029         /* Allocate and initialize EQ chain */
1030         if (ecore_chain_alloc(p_hwfn->p_dev,
1031                               ECORE_CHAIN_USE_TO_PRODUCE,
1032                               ECORE_CHAIN_MODE_PBL,
1033                               ECORE_CHAIN_CNT_TYPE_U16,
1034                               ECORE_CHAIN_PAGE_SIZE / 0x80,
1035                               0x80,
1036                               &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1037                 DP_NOTICE(p_hwfn, false, "Failed to allocate consq chain");
1038                 goto consq_allocate_fail;
1039         }
1040
1041         p_hwfn->p_consq = p_consq;
1042         return ECORE_SUCCESS;
1043
1044 consq_allocate_fail:
1045         OSAL_FREE(p_hwfn->p_dev, p_consq);
1046         return ECORE_NOMEM;
1047 }
1048
1049 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1050 {
1051         ecore_chain_reset(&p_hwfn->p_consq->chain);
1052 }
1053
1054 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1055 {
1056         if (!p_hwfn->p_consq)
1057                 return;
1058
1059         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1060         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1061 }