Imported Upstream version 16.07-rc1
[deb_dpdk.git] / drivers / net / qede / base / ecore_spq.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "reg_addr.h"
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore_hsi_common.h"
13 #include "ecore.h"
14 #include "ecore_sp_api.h"
15 #include "ecore_spq.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_int.h"
20 #include "ecore_dev_api.h"
21 #include "ecore_mcp.h"
22 #include "ecore_hw.h"
23 #include "ecore_sriov.h"
24
25 /***************************************************************************
26  * Structures & Definitions
27  ***************************************************************************/
28
29 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
30 #define SPQ_BLOCK_SLEEP_LENGTH          (1000)
31
32 /***************************************************************************
33  * Blocking Imp. (BLOCK/EBLOCK mode)
34  ***************************************************************************/
35 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn,
36                                   void *cookie,
37                                   union event_ring_data *data,
38                                   u8 fw_return_code)
39 {
40         struct ecore_spq_comp_done *comp_done;
41
42         comp_done = (struct ecore_spq_comp_done *)cookie;
43
44         comp_done->done = 0x1;
45         comp_done->fw_return_code = fw_return_code;
46
47         /* make update visible to waiting thread */
48         OSAL_SMP_WMB(p_hwfn->p_dev);
49 }
50
51 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
52                                             struct ecore_spq_entry *p_ent,
53                                             u8 *p_fw_ret)
54 {
55         int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
56         struct ecore_spq_comp_done *comp_done;
57         enum _ecore_status_t rc;
58
59         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
60         while (sleep_count) {
61                 OSAL_POLL_MODE_DPC(p_hwfn);
62                 /* validate we receive completion update */
63                 OSAL_SMP_RMB(p_hwfn->p_dev);
64                 if (comp_done->done == 1) {
65                         if (p_fw_ret)
66                                 *p_fw_ret = comp_done->fw_return_code;
67                         return ECORE_SUCCESS;
68                 }
69                 OSAL_MSLEEP(5);
70                 sleep_count--;
71         }
72
73         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
74         rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
75         if (rc != ECORE_SUCCESS)
76                 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
77
78         /* Retry after drain */
79         sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
80         while (sleep_count) {
81                 /* validate we receive completion update */
82                 OSAL_SMP_RMB(p_hwfn->p_dev);
83                 if (comp_done->done == 1) {
84                         if (p_fw_ret)
85                                 *p_fw_ret = comp_done->fw_return_code;
86                         return ECORE_SUCCESS;
87                 }
88                 OSAL_MSLEEP(5);
89                 sleep_count--;
90         }
91
92         if (comp_done->done == 1) {
93                 if (p_fw_ret)
94                         *p_fw_ret = comp_done->fw_return_code;
95                 return ECORE_SUCCESS;
96         }
97
98         DP_NOTICE(p_hwfn, true,
99                   "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
100                   OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
101                   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
102                   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
103
104         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
105
106         return ECORE_BUSY;
107 }
108
109 /***************************************************************************
110  * SPQ entries inner API
111  ***************************************************************************/
112 static enum _ecore_status_t
113 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
114 {
115         p_ent->flags = 0;
116
117         switch (p_ent->comp_mode) {
118         case ECORE_SPQ_MODE_EBLOCK:
119         case ECORE_SPQ_MODE_BLOCK:
120                 p_ent->comp_cb.function = ecore_spq_blocking_cb;
121                 break;
122         case ECORE_SPQ_MODE_CB:
123                 break;
124         default:
125                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
126                           p_ent->comp_mode);
127                 return ECORE_INVAL;
128         }
129
130         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
131                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
132                    " Data pointer: [%08x:%08x] Completion Mode: %s\n",
133                    p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
134                    p_ent->elem.hdr.protocol_id,
135                    p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
136                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
137                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
138                            "MODE_CB"));
139
140         return ECORE_SUCCESS;
141 }
142
143 /***************************************************************************
144  * HSI access
145  ***************************************************************************/
146 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
147                                     struct ecore_spq *p_spq)
148 {
149         u16 pq;
150         struct ecore_cxt_info cxt_info;
151         struct core_conn_context *p_cxt;
152         union ecore_qm_pq_params pq_params;
153         enum _ecore_status_t rc;
154
155         cxt_info.iid = p_spq->cid;
156
157         rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
158
159         if (rc < 0) {
160                 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d",
161                           p_spq->cid);
162                 return;
163         }
164
165         p_cxt = cxt_info.p_cxt;
166
167         SET_FIELD(p_cxt->xstorm_ag_context.flags10,
168                   XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
169         SET_FIELD(p_cxt->xstorm_ag_context.flags1,
170                   XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
171         /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
172          *           XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
173          */
174         SET_FIELD(p_cxt->xstorm_ag_context.flags9,
175                   XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
176
177         /* CDU validation - FIXME currently disabled */
178
179         /* QM physical queue */
180         OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
181         pq_params.core.tc = LB_TC;
182         pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
183         p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq);
184
185         p_cxt->xstorm_st_context.spq_base_lo =
186             DMA_LO_LE(p_spq->chain.p_phys_addr);
187         p_cxt->xstorm_st_context.spq_base_hi =
188             DMA_HI_LE(p_spq->chain.p_phys_addr);
189
190         p_cxt->xstorm_st_context.consolid_base_addr.lo =
191             DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr);
192         p_cxt->xstorm_st_context.consolid_base_addr.hi =
193             DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr);
194 }
195
196 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
197                                               struct ecore_spq *p_spq,
198                                               struct ecore_spq_entry *p_ent)
199 {
200         struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
201         u16 echo = ecore_chain_get_prod_idx(p_chain);
202         struct slow_path_element *elem;
203         struct core_db_data db;
204
205         p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
206         elem = ecore_chain_produce(p_chain);
207         if (!elem) {
208                 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
209                 return ECORE_INVAL;
210         }
211
212         *elem = p_ent->elem;    /* struct assignment */
213
214         /* send a doorbell on the slow hwfn session */
215         OSAL_MEMSET(&db, 0, sizeof(db));
216         SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
217         SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
218         SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
219                   DQ_XCM_CORE_SPQ_PROD_CMD);
220         db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
221
222         /* validate producer is up to-date */
223         OSAL_RMB(p_hwfn->p_dev);
224
225         db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
226
227         /* do not reorder */
228         OSAL_BARRIER(p_hwfn->p_dev);
229
230         DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
231
232         /* make sure doorbell is rang */
233         OSAL_MMIOWB(p_hwfn->p_dev);
234
235         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
236                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
237                    " agg_params: %02x, prod: %04x\n",
238                    DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params,
239                    db.agg_flags, ecore_chain_get_prod_idx(p_chain));
240
241         return ECORE_SUCCESS;
242 }
243
244 /***************************************************************************
245  * Asynchronous events
246  ***************************************************************************/
247
248 static enum _ecore_status_t
249 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
250                              struct event_ring_entry *p_eqe)
251 {
252         switch (p_eqe->protocol_id) {
253         case PROTOCOLID_COMMON:
254                 return ecore_sriov_eqe_event(p_hwfn,
255                                              p_eqe->opcode,
256                                              p_eqe->echo, &p_eqe->data);
257         default:
258                 DP_NOTICE(p_hwfn,
259                           true, "Unknown Async completion for protocol: %d\n",
260                           p_eqe->protocol_id);
261                 return ECORE_INVAL;
262         }
263 }
264
265 /***************************************************************************
266  * EQ API
267  ***************************************************************************/
268 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
269 {
270         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
271             USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
272
273         REG_WR16(p_hwfn, addr, prod);
274
275         /* keep prod updates ordered */
276         OSAL_MMIOWB(p_hwfn->p_dev);
277 }
278
279 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
280                                          void *cookie)
281 {
282         struct ecore_eq *p_eq = cookie;
283         struct ecore_chain *p_chain = &p_eq->chain;
284         enum _ecore_status_t rc = 0;
285
286         /* take a snapshot of the FW consumer */
287         u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
288
289         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
290
291         /* Need to guarantee the fw_cons index we use points to a usuable
292          * element (to comply with our chain), so our macros would comply
293          */
294         if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
295             ecore_chain_get_usable_per_page(p_chain)) {
296                 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
297         }
298
299         /* Complete current segment of eq entries */
300         while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
301                 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
302                 if (!p_eqe) {
303                         rc = ECORE_INVAL;
304                         break;
305                 }
306
307                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
308                                 "op %x prot %x res0 %x echo %x "
309                                 "fwret %x flags %x\n", p_eqe->opcode,
310                            p_eqe->protocol_id,  /* Event Protocol ID */
311                            p_eqe->reserved0,    /* Reserved */
312                            OSAL_LE16_TO_CPU(p_eqe->echo),
313                            p_eqe->fw_return_code,       /* FW return code for SP
314                                                          * ramrods
315                                                          */
316                            p_eqe->flags);
317
318                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
319                         if (ecore_async_event_completion(p_hwfn, p_eqe))
320                                 rc = ECORE_INVAL;
321                 } else if (ecore_spq_completion(p_hwfn,
322                                                 p_eqe->echo,
323                                                 p_eqe->fw_return_code,
324                                                 &p_eqe->data)) {
325                         rc = ECORE_INVAL;
326                 }
327
328                 ecore_chain_recycle_consumed(p_chain);
329         }
330
331         ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
332
333         return rc;
334 }
335
336 struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
337 {
338         struct ecore_eq *p_eq;
339
340         /* Allocate EQ struct */
341         p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_eq));
342         if (!p_eq) {
343                 DP_NOTICE(p_hwfn, true,
344                           "Failed to allocate `struct ecore_eq'\n");
345                 return OSAL_NULL;
346         }
347
348         /* Allocate and initialize EQ chain */
349         if (ecore_chain_alloc(p_hwfn->p_dev,
350                               ECORE_CHAIN_USE_TO_PRODUCE,
351                               ECORE_CHAIN_MODE_PBL,
352                               ECORE_CHAIN_CNT_TYPE_U16,
353                               num_elem,
354                               sizeof(union event_ring_element), &p_eq->chain)) {
355                 DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain");
356                 goto eq_allocate_fail;
357         }
358
359         /* register EQ completion on the SP SB */
360         ecore_int_register_cb(p_hwfn,
361                               ecore_eq_completion,
362                               p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
363
364         return p_eq;
365
366 eq_allocate_fail:
367         ecore_eq_free(p_hwfn, p_eq);
368         return OSAL_NULL;
369 }
370
371 void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
372 {
373         ecore_chain_reset(&p_eq->chain);
374 }
375
376 void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq)
377 {
378         if (!p_eq)
379                 return;
380         ecore_chain_free(p_hwfn->p_dev, &p_eq->chain);
381         OSAL_FREE(p_hwfn->p_dev, p_eq);
382 }
383
384 /***************************************************************************
385 * CQE API - manipulate EQ functionality
386 ***************************************************************************/
387 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
388                                                  struct eth_slow_path_rx_cqe
389                                                  *cqe,
390                                                  enum protocol_type protocol)
391 {
392         if (IS_VF(p_hwfn->p_dev))
393                 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
394
395         /* @@@tmp - it's possible we'll eventually want to handle some
396          * actual commands that can arrive here, but for now this is only
397          * used to complete the ramrod using the echo value on the cqe
398          */
399         return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
400 }
401
402 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
403                                               struct eth_slow_path_rx_cqe *cqe)
404 {
405         enum _ecore_status_t rc;
406
407         rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
408         if (rc) {
409                 DP_NOTICE(p_hwfn, true,
410                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
411                           cqe->ramrod_cmd_id);
412         }
413
414         return rc;
415 }
416
417 /***************************************************************************
418  * Slow hwfn Queue (spq)
419  ***************************************************************************/
420 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
421 {
422         struct ecore_spq_entry *p_virt = OSAL_NULL;
423         struct ecore_spq *p_spq = p_hwfn->p_spq;
424         dma_addr_t p_phys = 0;
425         u32 i, capacity;
426
427         OSAL_LIST_INIT(&p_spq->pending);
428         OSAL_LIST_INIT(&p_spq->completion_pending);
429         OSAL_LIST_INIT(&p_spq->free_pool);
430         OSAL_LIST_INIT(&p_spq->unlimited_pending);
431         OSAL_SPIN_LOCK_INIT(&p_spq->lock);
432
433         /* SPQ empty pool */
434         p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
435         p_virt = p_spq->p_virt;
436
437         capacity = ecore_chain_get_capacity(&p_spq->chain);
438         for (i = 0; i < capacity; i++) {
439                 p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys);
440                 p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys);
441
442                 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
443
444                 p_virt++;
445                 p_phys += sizeof(struct ecore_spq_entry);
446         }
447
448         /* Statistics */
449         p_spq->normal_count = 0;
450         p_spq->comp_count = 0;
451         p_spq->comp_sent_count = 0;
452         p_spq->unlimited_pending_count = 0;
453
454         OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
455                       SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
456         p_spq->comp_bitmap_idx = 0;
457
458         /* SPQ cid, cannot fail */
459         ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
460         ecore_spq_hw_initialize(p_hwfn, p_spq);
461
462         /* reset the chain itself */
463         ecore_chain_reset(&p_spq->chain);
464 }
465
466 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
467 {
468         struct ecore_spq_entry *p_virt = OSAL_NULL;
469         struct ecore_spq *p_spq = OSAL_NULL;
470         dma_addr_t p_phys = 0;
471         u32 capacity;
472
473         /* SPQ struct */
474         p_spq =
475             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
476         if (!p_spq) {
477                 DP_NOTICE(p_hwfn, true,
478                           "Failed to allocate `struct ecore_spq'");
479                 return ECORE_NOMEM;
480         }
481
482         /* SPQ ring  */
483         if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE,
484                         ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0,
485                         /* N/A when the mode is SINGLE */
486                         sizeof(struct slow_path_element), &p_spq->chain)) {
487                 DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain");
488                 goto spq_allocate_fail;
489         }
490
491         /* allocate and fill the SPQ elements (incl. ramrod data list) */
492         capacity = ecore_chain_get_capacity(&p_spq->chain);
493         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
494                                          capacity *
495                                          sizeof(struct ecore_spq_entry));
496         if (!p_virt)
497                 goto spq_allocate_fail;
498
499         p_spq->p_virt = p_virt;
500         p_spq->p_phys = p_phys;
501
502         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
503
504         p_hwfn->p_spq = p_spq;
505         return ECORE_SUCCESS;
506
507 spq_allocate_fail:
508         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
509         OSAL_FREE(p_hwfn->p_dev, p_spq);
510         return ECORE_NOMEM;
511 }
512
513 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
514 {
515         struct ecore_spq *p_spq = p_hwfn->p_spq;
516         u32 capacity;
517
518         if (!p_spq)
519                 return;
520
521         if (p_spq->p_virt) {
522                 capacity = ecore_chain_get_capacity(&p_spq->chain);
523                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
524                                        p_spq->p_virt,
525                                        p_spq->p_phys,
526                                        capacity *
527                                        sizeof(struct ecore_spq_entry));
528         }
529
530         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
531         OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
532         OSAL_FREE(p_hwfn->p_dev, p_spq);
533 }
534
535 enum _ecore_status_t
536 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
537 {
538         struct ecore_spq *p_spq = p_hwfn->p_spq;
539         struct ecore_spq_entry *p_ent = OSAL_NULL;
540
541         OSAL_SPIN_LOCK(&p_spq->lock);
542
543         if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
544                 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC,
545                                     sizeof(struct ecore_spq_entry));
546                 if (!p_ent) {
547                         OSAL_SPIN_UNLOCK(&p_spq->lock);
548                         DP_NOTICE(p_hwfn, true,
549                                   "Failed to allocate an SPQ entry"
550                                   " for a pending ramrod\n");
551                         return ECORE_NOMEM;
552                 }
553                 p_ent->queue = &p_spq->unlimited_pending;
554         } else {
555                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
556                                               struct ecore_spq_entry, list);
557                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
558                 p_ent->queue = &p_spq->pending;
559         }
560
561         *pp_ent = p_ent;
562
563         OSAL_SPIN_UNLOCK(&p_spq->lock);
564
565         return ECORE_SUCCESS;
566 }
567
568 /* Locked variant; Should be called while the SPQ lock is taken */
569 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
570                                      struct ecore_spq_entry *p_ent)
571 {
572         OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
573 }
574
575 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
576                             struct ecore_spq_entry *p_ent)
577 {
578         OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
579         __ecore_spq_return_entry(p_hwfn, p_ent);
580         OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
581 }
582
583 /**
584  * @brief ecore_spq_add_entry - adds a new entry to the pending
585  *        list. Should be used while lock is being held.
586  *
587  * Addes an entry to the pending list is there is room (en empty
588  * element is available in the free_pool), or else places the
589  * entry in the unlimited_pending pool.
590  *
591  * @param p_hwfn
592  * @param p_ent
593  * @param priority
594  *
595  * @return enum _ecore_status_t
596  */
597 static enum _ecore_status_t
598 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
599                     struct ecore_spq_entry *p_ent, enum spq_priority priority)
600 {
601         struct ecore_spq *p_spq = p_hwfn->p_spq;
602
603         if (p_ent->queue == &p_spq->unlimited_pending) {
604                 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
605                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
606                                             &p_spq->unlimited_pending);
607                         p_spq->unlimited_pending_count++;
608
609                         return ECORE_SUCCESS;
610                 }
611
612                 struct ecore_spq_entry *p_en2;
613
614                 p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
615                                               struct ecore_spq_entry,
616                                               list);
617                 OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
618
619                 /* Copy the ring element physical pointer to the new
620                  * entry, since we are about to override the entire ring
621                  * entry and don't want to lose the pointer.
622                  */
623                 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
624
625                 /* Setting the cookie to the comp_done of the
626                  * new element.
627                  */
628                 if (p_ent->comp_cb.cookie == &p_ent->comp_done)
629                         p_ent->comp_cb.cookie = &p_en2->comp_done;
630
631                 *p_en2 = *p_ent;
632
633                 OSAL_FREE(p_hwfn->p_dev, p_ent);
634
635                 p_ent = p_en2;
636         }
637
638         /* entry is to be placed in 'pending' queue */
639         switch (priority) {
640         case ECORE_SPQ_PRIORITY_NORMAL:
641                 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
642                 p_spq->normal_count++;
643                 break;
644         case ECORE_SPQ_PRIORITY_HIGH:
645                 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
646                 p_spq->high_count++;
647                 break;
648         default:
649                 return ECORE_INVAL;
650         }
651
652         return ECORE_SUCCESS;
653 }
654
655 /***************************************************************************
656  * Accessor
657  ***************************************************************************/
658
659 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
660 {
661         if (!p_hwfn->p_spq)
662                 return 0xffffffff;      /* illegal */
663         return p_hwfn->p_spq->cid;
664 }
665
666 /***************************************************************************
667  * Posting new Ramrods
668  ***************************************************************************/
669
670 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
671                                                 osal_list_t *head,
672                                                 u32 keep_reserve)
673 {
674         struct ecore_spq *p_spq = p_hwfn->p_spq;
675         enum _ecore_status_t rc;
676
677         /* TODO - implementation might be wasteful; will always keep room
678          * for an additional high priority ramrod (even if one is already
679          * pending FW)
680          */
681         while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
682                !OSAL_LIST_IS_EMPTY(head)) {
683                 struct ecore_spq_entry *p_ent =
684                     OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
685                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
686                 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending);
687                 p_spq->comp_sent_count++;
688
689                 rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
690                 if (rc) {
691                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
692                                                &p_spq->completion_pending);
693                         __ecore_spq_return_entry(p_hwfn, p_ent);
694                         return rc;
695                 }
696         }
697
698         return ECORE_SUCCESS;
699 }
700
701 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
702 {
703         enum _ecore_status_t rc = ECORE_NOTIMPL;
704         struct ecore_spq *p_spq = p_hwfn->p_spq;
705         struct ecore_spq_entry *p_ent = OSAL_NULL;
706
707         while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
708                 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
709                         break;
710
711                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
712                                               struct ecore_spq_entry, list);
713                 if (!p_ent)
714                         return ECORE_INVAL;
715
716                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
717
718                 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
719         }
720
721         rc = ecore_spq_post_list(p_hwfn,
722                                  &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
723         if (rc)
724                 return rc;
725
726         return ECORE_SUCCESS;
727 }
728
729 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
730                                     struct ecore_spq_entry *p_ent,
731                                     u8 *fw_return_code)
732 {
733         enum _ecore_status_t rc = ECORE_SUCCESS;
734         struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
735         bool b_ret_ent = true;
736
737         if (!p_hwfn)
738                 return ECORE_INVAL;
739
740         if (!p_ent) {
741                 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
742                 return ECORE_INVAL;
743         }
744
745         if (p_hwfn->p_dev->recov_in_prog) {
746                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
747                            "Recovery is in progress -> skip spq post"
748                            " [cmd %02x protocol %02x]",
749                            p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
750                 /* Return success to let the flows to be completed successfully
751                  * w/o any error handling.
752                  */
753                 return ECORE_SUCCESS;
754         }
755
756         OSAL_SPIN_LOCK(&p_spq->lock);
757
758         /* Complete the entry */
759         rc = ecore_spq_fill_entry(p_hwfn, p_ent);
760
761         /* Check return value after LOCK is taken for cleaner error flow */
762         if (rc)
763                 goto spq_post_fail;
764
765         /* Add the request to the pending queue */
766         rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
767         if (rc)
768                 goto spq_post_fail;
769
770         rc = ecore_spq_pend_post(p_hwfn);
771         if (rc) {
772                 /* Since it's possible that pending failed for a different
773                  * entry [although unlikely], the failed entry was already
774                  * dealt with; No need to return it here.
775                  */
776                 b_ret_ent = false;
777                 goto spq_post_fail;
778         }
779
780         OSAL_SPIN_UNLOCK(&p_spq->lock);
781
782         if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
783                 /* For entries in ECORE BLOCK mode, the completion code cannot
784                  * perform the necessary cleanup - if it did, we couldn't
785                  * access p_ent here to see whether it's successful or not.
786                  * Thus, after gaining the answer perform the cleanup here.
787                  */
788                 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code);
789                 if (rc)
790                         goto spq_post_fail2;
791
792                 /* return to pool */
793                 ecore_spq_return_entry(p_hwfn, p_ent);
794         }
795         return rc;
796
797 spq_post_fail2:
798         OSAL_SPIN_LOCK(&p_spq->lock);
799         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
800         ecore_chain_return_produced(&p_spq->chain);
801
802 spq_post_fail:
803         /* return to the free pool */
804         if (b_ret_ent)
805                 __ecore_spq_return_entry(p_hwfn, p_ent);
806         OSAL_SPIN_UNLOCK(&p_spq->lock);
807
808         return rc;
809 }
810
811 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
812                                           __le16 echo,
813                                           u8 fw_return_code,
814                                           union event_ring_data *p_data)
815 {
816         struct ecore_spq *p_spq;
817         struct ecore_spq_entry *p_ent = OSAL_NULL;
818         struct ecore_spq_entry *tmp;
819         struct ecore_spq_entry *found = OSAL_NULL;
820         enum _ecore_status_t rc;
821
822         if (!p_hwfn)
823                 return ECORE_INVAL;
824
825         p_spq = p_hwfn->p_spq;
826         if (!p_spq)
827                 return ECORE_INVAL;
828
829         OSAL_SPIN_LOCK(&p_spq->lock);
830         OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
831                                       tmp,
832                                       &p_spq->completion_pending,
833                                       list, struct ecore_spq_entry) {
834                 if (p_ent->elem.hdr.echo == echo) {
835                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
836                                                &p_spq->completion_pending);
837
838                         /* Avoid overriding of SPQ entries when getting
839                          * out-of-order completions, by marking the completions
840                          * in a bitmap and increasing the chain consumer only
841                          * for the first successive completed entries.
842                          */
843                         SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
844                         while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
845                                                       p_spq->comp_bitmap_idx)) {
846                                 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
847                                                         p_spq->comp_bitmap_idx);
848                                 p_spq->comp_bitmap_idx++;
849                                 ecore_chain_return_produced(&p_spq->chain);
850                         }
851
852                         p_spq->comp_count++;
853                         found = p_ent;
854                         break;
855                 }
856
857                 /* This is debug and should be relatively uncommon - depends
858                  * on scenarios which have mutliple per-PF sent ramrods.
859                  */
860                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
861                            "Got completion for echo %04x - doesn't match"
862                            " echo %04x in completion pending list\n",
863                            OSAL_LE16_TO_CPU(echo),
864                            OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
865         }
866
867         /* Release lock before callback, as callback may post
868          * an additional ramrod.
869          */
870         OSAL_SPIN_UNLOCK(&p_spq->lock);
871
872         if (!found) {
873                 DP_NOTICE(p_hwfn, true,
874                           "Failed to find an entry this"
875                           " EQE [echo %04x] completes\n",
876                           OSAL_LE16_TO_CPU(echo));
877                 return ECORE_EXISTS;
878         }
879
880         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
881                    "Complete EQE [echo %04x]: func %p cookie %p)\n",
882                    OSAL_LE16_TO_CPU(echo),
883                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
884         if (found->comp_cb.function)
885                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
886                                         fw_return_code);
887
888         if (found->comp_mode != ECORE_SPQ_MODE_EBLOCK) {
889                 /* EBLOCK is responsible for freeing its own entry */
890                 ecore_spq_return_entry(p_hwfn, found);
891         }
892
893         /* Attempt to post pending requests */
894         OSAL_SPIN_LOCK(&p_spq->lock);
895         rc = ecore_spq_pend_post(p_hwfn);
896         OSAL_SPIN_UNLOCK(&p_spq->lock);
897
898         return rc;
899 }
900
901 struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
902 {
903         struct ecore_consq *p_consq;
904
905         /* Allocate ConsQ struct */
906         p_consq =
907             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_consq));
908         if (!p_consq) {
909                 DP_NOTICE(p_hwfn, true,
910                           "Failed to allocate `struct ecore_consq'\n");
911                 return OSAL_NULL;
912         }
913
914         /* Allocate and initialize EQ chain */
915         if (ecore_chain_alloc(p_hwfn->p_dev,
916                               ECORE_CHAIN_USE_TO_PRODUCE,
917                               ECORE_CHAIN_MODE_PBL,
918                               ECORE_CHAIN_CNT_TYPE_U16,
919                               ECORE_CHAIN_PAGE_SIZE / 0x80,
920                               0x80, &p_consq->chain)) {
921                 DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
922                 goto consq_allocate_fail;
923         }
924
925         return p_consq;
926
927 consq_allocate_fail:
928         ecore_consq_free(p_hwfn, p_consq);
929         return OSAL_NULL;
930 }
931
932 void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
933 {
934         ecore_chain_reset(&p_consq->chain);
935 }
936
937 void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq)
938 {
939         if (!p_consq)
940                 return;
941         ecore_chain_free(p_hwfn->p_dev, &p_consq->chain);
942         OSAL_FREE(p_hwfn->p_dev, p_consq);
943 }