New upstream version 17.11-rc3
[deb_dpdk.git] / drivers / net / qede / base / ecore_spq.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "reg_addr.h"
11 #include "ecore_gtt_reg_addr.h"
12 #include "ecore_hsi_common.h"
13 #include "ecore.h"
14 #include "ecore_sp_api.h"
15 #include "ecore_spq.h"
16 #include "ecore_iro.h"
17 #include "ecore_init_fw_funcs.h"
18 #include "ecore_cxt.h"
19 #include "ecore_int.h"
20 #include "ecore_dev_api.h"
21 #include "ecore_mcp.h"
22 #include "ecore_hw.h"
23 #include "ecore_sriov.h"
24
25 /***************************************************************************
26  * Structures & Definitions
27  ***************************************************************************/
28
29 #define SPQ_HIGH_PRI_RESERVE_DEFAULT    (1)
30
31 #define SPQ_BLOCK_DELAY_MAX_ITER        (10)
32 #define SPQ_BLOCK_DELAY_US              (10)
33 #define SPQ_BLOCK_SLEEP_MAX_ITER        (1000)
34 #define SPQ_BLOCK_SLEEP_MS              (5)
35
36 /***************************************************************************
37  * Blocking Imp. (BLOCK/EBLOCK mode)
38  ***************************************************************************/
39 static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, void *cookie,
40                                   union event_ring_data OSAL_UNUSED * data,
41                                   u8 fw_return_code)
42 {
43         struct ecore_spq_comp_done *comp_done;
44
45         comp_done = (struct ecore_spq_comp_done *)cookie;
46
47         comp_done->done = 0x1;
48         comp_done->fw_return_code = fw_return_code;
49
50         /* make update visible to waiting thread */
51         OSAL_SMP_WMB(p_hwfn->p_dev);
52 }
53
54 static enum _ecore_status_t __ecore_spq_block(struct ecore_hwfn *p_hwfn,
55                                               struct ecore_spq_entry *p_ent,
56                                               u8 *p_fw_ret,
57                                               bool sleep_between_iter)
58 {
59         struct ecore_spq_comp_done *comp_done;
60         u32 iter_cnt;
61
62         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
63         iter_cnt = sleep_between_iter ? SPQ_BLOCK_SLEEP_MAX_ITER
64                                       : SPQ_BLOCK_DELAY_MAX_ITER;
65
66         while (iter_cnt--) {
67                 OSAL_POLL_MODE_DPC(p_hwfn);
68                 OSAL_SMP_RMB(p_hwfn->p_dev);
69                 if (comp_done->done == 1) {
70                         if (p_fw_ret)
71                                 *p_fw_ret = comp_done->fw_return_code;
72                         return ECORE_SUCCESS;
73                 }
74
75                 if (sleep_between_iter)
76                         OSAL_MSLEEP(SPQ_BLOCK_SLEEP_MS);
77                 else
78                         OSAL_UDELAY(SPQ_BLOCK_DELAY_US);
79         }
80
81         return ECORE_TIMEOUT;
82 }
83
84 static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn,
85                                             struct ecore_spq_entry *p_ent,
86                                             u8 *p_fw_ret, bool skip_quick_poll)
87 {
88         struct ecore_spq_comp_done *comp_done;
89         struct ecore_ptt *p_ptt;
90         enum _ecore_status_t rc;
91
92         /* A relatively short polling period w/o sleeping, to allow the FW to
93          * complete the ramrod and thus possibly to avoid the following sleeps.
94          */
95         if (!skip_quick_poll) {
96                 rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, false);
97                 if (rc == ECORE_SUCCESS)
98                         return ECORE_SUCCESS;
99         }
100
101         /* Move to polling with a sleeping period between iterations */
102         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
103         if (rc == ECORE_SUCCESS)
104                 return ECORE_SUCCESS;
105
106         p_ptt = ecore_ptt_acquire(p_hwfn);
107         if (!p_ptt)
108                 return ECORE_AGAIN;
109
110         DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
111         rc = ecore_mcp_drain(p_hwfn, p_ptt);
112         ecore_ptt_release(p_hwfn, p_ptt);
113         if (rc != ECORE_SUCCESS) {
114                 DP_NOTICE(p_hwfn, true, "MCP drain failed\n");
115                 goto err;
116         }
117
118         /* Retry after drain */
119         rc = __ecore_spq_block(p_hwfn, p_ent, p_fw_ret, true);
120         if (rc == ECORE_SUCCESS)
121                 return ECORE_SUCCESS;
122
123         comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie;
124         if (comp_done->done == 1) {
125                 if (p_fw_ret)
126                         *p_fw_ret = comp_done->fw_return_code;
127                 return ECORE_SUCCESS;
128         }
129 err:
130         DP_NOTICE(p_hwfn, true,
131                   "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n",
132                   OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid),
133                   p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id,
134                   OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
135
136         ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL);
137
138         return ECORE_BUSY;
139 }
140
141 /***************************************************************************
142  * SPQ entries inner API
143  ***************************************************************************/
144 static enum _ecore_status_t
145 ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent)
146 {
147         p_ent->flags = 0;
148
149         switch (p_ent->comp_mode) {
150         case ECORE_SPQ_MODE_EBLOCK:
151         case ECORE_SPQ_MODE_BLOCK:
152                 p_ent->comp_cb.function = ecore_spq_blocking_cb;
153                 break;
154         case ECORE_SPQ_MODE_CB:
155                 break;
156         default:
157                 DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n",
158                           p_ent->comp_mode);
159                 return ECORE_INVAL;
160         }
161
162         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
163                    "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]"
164                    " Data pointer: [%08x:%08x] Completion Mode: %s\n",
165                    p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id,
166                    p_ent->elem.hdr.protocol_id,
167                    p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo,
168                    D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK,
169                            ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
170                            "MODE_CB"));
171
172         return ECORE_SUCCESS;
173 }
174
175 /***************************************************************************
176  * HSI access
177  ***************************************************************************/
178 static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn,
179                                     struct ecore_spq *p_spq)
180 {
181         struct e4_core_conn_context *p_cxt;
182         struct ecore_cxt_info cxt_info;
183         u16 physical_q;
184         enum _ecore_status_t rc;
185
186         cxt_info.iid = p_spq->cid;
187
188         rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info);
189
190         if (rc < 0) {
191                 DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d\n",
192                           p_spq->cid);
193                 return;
194         }
195
196         p_cxt = cxt_info.p_cxt;
197
198         /* @@@TBD we zero the context until we have ilt_reset implemented. */
199         OSAL_MEM_ZERO(p_cxt, sizeof(*p_cxt));
200
201         if (ECORE_IS_BB(p_hwfn->p_dev) || ECORE_IS_AH(p_hwfn->p_dev)) {
202                 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
203                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
204                 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
205                           E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
206                 /* SET_FIELD(p_cxt->xstorm_ag_context.flags10,
207                  *        E4_XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1);
208                  */
209                 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
210                           E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
211         }
212
213         /* CDU validation - FIXME currently disabled */
214
215         /* QM physical queue */
216         physical_q = ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
217         p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(physical_q);
218
219         p_cxt->xstorm_st_context.spq_base_lo =
220             DMA_LO_LE(p_spq->chain.p_phys_addr);
221         p_cxt->xstorm_st_context.spq_base_hi =
222             DMA_HI_LE(p_spq->chain.p_phys_addr);
223
224         DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
225                        p_hwfn->p_consq->chain.p_phys_addr);
226 }
227
228 static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn,
229                                               struct ecore_spq *p_spq,
230                                               struct ecore_spq_entry *p_ent)
231 {
232         struct ecore_chain *p_chain = &p_hwfn->p_spq->chain;
233         struct core_db_data *p_db_data = &p_spq->db_data;
234         u16 echo = ecore_chain_get_prod_idx(p_chain);
235         struct slow_path_element *elem;
236
237         p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo);
238         elem = ecore_chain_produce(p_chain);
239         if (!elem) {
240                 DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n");
241                 return ECORE_INVAL;
242         }
243
244         *elem = p_ent->elem;    /* Struct assignment */
245
246         p_db_data->spq_prod =
247                 OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain));
248
249         /* Make sure the SPQE is updated before the doorbell */
250         OSAL_WMB(p_hwfn->p_dev);
251
252         DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data);
253
254         /* Make sure doorbell is rang */
255         OSAL_WMB(p_hwfn->p_dev);
256
257         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
258                    "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x"
259                    " agg_params: %02x, prod: %04x\n",
260                    p_spq->db_addr_offset, p_spq->cid, p_db_data->params,
261                    p_db_data->agg_flags, ecore_chain_get_prod_idx(p_chain));
262
263         return ECORE_SUCCESS;
264 }
265
266 /***************************************************************************
267  * Asynchronous events
268  ***************************************************************************/
269
270 static enum _ecore_status_t
271 ecore_async_event_completion(struct ecore_hwfn *p_hwfn,
272                              struct event_ring_entry *p_eqe)
273 {
274         ecore_spq_async_comp_cb cb;
275
276         if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
277                 return ECORE_INVAL;
278
279         cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
280         if (cb) {
281                 return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
282                           &p_eqe->data, p_eqe->fw_return_code);
283         } else {
284                 DP_NOTICE(p_hwfn,
285                           true, "Unknown Async completion for protocol: %d\n",
286                           p_eqe->protocol_id);
287                 return ECORE_INVAL;
288         }
289 }
290
291 enum _ecore_status_t
292 ecore_spq_register_async_cb(struct ecore_hwfn *p_hwfn,
293                             enum protocol_type protocol_id,
294                             ecore_spq_async_comp_cb cb)
295 {
296         if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
297                 return ECORE_INVAL;
298
299         p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
300         return ECORE_SUCCESS;
301 }
302
303 void
304 ecore_spq_unregister_async_cb(struct ecore_hwfn *p_hwfn,
305                               enum protocol_type protocol_id)
306 {
307         if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
308                 return;
309
310         p_hwfn->p_spq->async_comp_cb[protocol_id] = OSAL_NULL;
311 }
312
313 /***************************************************************************
314  * EQ API
315  ***************************************************************************/
316 void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod)
317 {
318         u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
319             USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
320
321         REG_WR16(p_hwfn, addr, prod);
322
323         /* keep prod updates ordered */
324         OSAL_MMIOWB(p_hwfn->p_dev);
325 }
326
327 enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn,
328                                          void *cookie)
329 {
330         struct ecore_eq *p_eq = cookie;
331         struct ecore_chain *p_chain = &p_eq->chain;
332         enum _ecore_status_t rc = 0;
333
334         /* take a snapshot of the FW consumer */
335         u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons);
336
337         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
338
339         /* Need to guarantee the fw_cons index we use points to a usuable
340          * element (to comply with our chain), so our macros would comply
341          */
342         if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) ==
343             ecore_chain_get_usable_per_page(p_chain)) {
344                 fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain);
345         }
346
347         /* Complete current segment of eq entries */
348         while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) {
349                 struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain);
350                 if (!p_eqe) {
351                         rc = ECORE_INVAL;
352                         break;
353                 }
354
355                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
356                            "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
357                            p_eqe->opcode,            /* Event Opcode */
358                            p_eqe->protocol_id,  /* Event Protocol ID */
359                            p_eqe->reserved0,    /* Reserved */
360                            /* Echo value from ramrod data on the host */
361                            OSAL_LE16_TO_CPU(p_eqe->echo),
362                            p_eqe->fw_return_code,    /* FW return code for SP
363                                                       * ramrods
364                                                       */
365                            p_eqe->flags);
366
367                 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
368                         if (ecore_async_event_completion(p_hwfn, p_eqe))
369                                 rc = ECORE_INVAL;
370                 } else if (ecore_spq_completion(p_hwfn,
371                                                 p_eqe->echo,
372                                                 p_eqe->fw_return_code,
373                                                 &p_eqe->data)) {
374                         rc = ECORE_INVAL;
375                 }
376
377                 ecore_chain_recycle_consumed(p_chain);
378         }
379
380         ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain));
381
382         return rc;
383 }
384
385 enum _ecore_status_t ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem)
386 {
387         struct ecore_eq *p_eq;
388
389         /* Allocate EQ struct */
390         p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_eq));
391         if (!p_eq) {
392                 DP_NOTICE(p_hwfn, true,
393                           "Failed to allocate `struct ecore_eq'\n");
394                 return ECORE_NOMEM;
395         }
396
397         /* Allocate and initialize EQ chain*/
398         if (ecore_chain_alloc(p_hwfn->p_dev,
399                               ECORE_CHAIN_USE_TO_PRODUCE,
400                               ECORE_CHAIN_MODE_PBL,
401                               ECORE_CHAIN_CNT_TYPE_U16,
402                               num_elem,
403                               sizeof(union event_ring_element),
404                               &p_eq->chain, OSAL_NULL) != ECORE_SUCCESS) {
405                 DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain\n");
406                 goto eq_allocate_fail;
407         }
408
409         /* register EQ completion on the SP SB */
410         ecore_int_register_cb(p_hwfn, ecore_eq_completion,
411                               p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
412
413         p_hwfn->p_eq = p_eq;
414         return ECORE_SUCCESS;
415
416 eq_allocate_fail:
417         OSAL_FREE(p_hwfn->p_dev, p_eq);
418         return ECORE_NOMEM;
419 }
420
421 void ecore_eq_setup(struct ecore_hwfn *p_hwfn)
422 {
423         ecore_chain_reset(&p_hwfn->p_eq->chain);
424 }
425
426 void ecore_eq_free(struct ecore_hwfn *p_hwfn)
427 {
428         if (!p_hwfn->p_eq)
429                 return;
430
431         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_eq->chain);
432
433         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_eq);
434         p_hwfn->p_eq = OSAL_NULL;
435 }
436
437 /***************************************************************************
438 * CQE API - manipulate EQ functionality
439 ***************************************************************************/
440 static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn,
441                                                  struct eth_slow_path_rx_cqe
442                                                  *cqe,
443                                                  enum protocol_type protocol)
444 {
445         if (IS_VF(p_hwfn->p_dev))
446                 return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol);
447
448         /* @@@tmp - it's possible we'll eventually want to handle some
449          * actual commands that can arrive here, but for now this is only
450          * used to complete the ramrod using the echo value on the cqe
451          */
452         return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL);
453 }
454
455 enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn,
456                                               struct eth_slow_path_rx_cqe *cqe)
457 {
458         enum _ecore_status_t rc;
459
460         rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
461         if (rc) {
462                 DP_NOTICE(p_hwfn, true,
463                           "Failed to handle RXQ CQE [cmd 0x%02x]\n",
464                           cqe->ramrod_cmd_id);
465         }
466
467         return rc;
468 }
469
470 /***************************************************************************
471  * Slow hwfn Queue (spq)
472  ***************************************************************************/
473 void ecore_spq_setup(struct ecore_hwfn *p_hwfn)
474 {
475         struct ecore_spq *p_spq = p_hwfn->p_spq;
476         struct ecore_spq_entry *p_virt = OSAL_NULL;
477         struct core_db_data *p_db_data;
478         void OSAL_IOMEM *db_addr;
479         dma_addr_t p_phys = 0;
480         u32 i, capacity;
481         enum _ecore_status_t rc;
482
483         OSAL_LIST_INIT(&p_spq->pending);
484         OSAL_LIST_INIT(&p_spq->completion_pending);
485         OSAL_LIST_INIT(&p_spq->free_pool);
486         OSAL_LIST_INIT(&p_spq->unlimited_pending);
487         OSAL_SPIN_LOCK_INIT(&p_spq->lock);
488
489         /* SPQ empty pool */
490         p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod);
491         p_virt = p_spq->p_virt;
492
493         capacity = ecore_chain_get_capacity(&p_spq->chain);
494         for (i = 0; i < capacity; i++) {
495                 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
496
497                 OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool);
498
499                 p_virt++;
500                 p_phys += sizeof(struct ecore_spq_entry);
501         }
502
503         /* Statistics */
504         p_spq->normal_count = 0;
505         p_spq->comp_count = 0;
506         p_spq->comp_sent_count = 0;
507         p_spq->unlimited_pending_count = 0;
508
509         OSAL_MEM_ZERO(p_spq->p_comp_bitmap,
510                       SPQ_COMP_BMAP_SIZE * sizeof(unsigned long));
511         p_spq->comp_bitmap_idx = 0;
512
513         /* SPQ cid, cannot fail */
514         ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
515         ecore_spq_hw_initialize(p_hwfn, p_spq);
516
517         /* reset the chain itself */
518         ecore_chain_reset(&p_spq->chain);
519
520         /* Initialize the address/data of the SPQ doorbell */
521         p_spq->db_addr_offset = DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY);
522         p_db_data = &p_spq->db_data;
523         OSAL_MEM_ZERO(p_db_data, sizeof(*p_db_data));
524         SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM);
525         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX);
526         SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL,
527                   DQ_XCM_CORE_SPQ_PROD_CMD);
528         p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
529
530         /* Register the SPQ doorbell with the doorbell recovery mechanism */
531         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
532         rc = ecore_db_recovery_add(p_hwfn->p_dev, db_addr, &p_spq->db_data,
533                                    DB_REC_WIDTH_32B, DB_REC_KERNEL);
534         if (rc != ECORE_SUCCESS)
535                 DP_INFO(p_hwfn,
536                         "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n");
537 }
538
539 enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn)
540 {
541         struct ecore_spq_entry *p_virt = OSAL_NULL;
542         struct ecore_spq *p_spq = OSAL_NULL;
543         dma_addr_t p_phys = 0;
544         u32 capacity;
545
546         /* SPQ struct */
547         p_spq =
548             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq));
549         if (!p_spq) {
550                 DP_NOTICE(p_hwfn, true,
551                           "Failed to allocate `struct ecore_spq'\n");
552                 return ECORE_NOMEM;
553         }
554
555         /* SPQ ring  */
556         if (ecore_chain_alloc(p_hwfn->p_dev,
557                               ECORE_CHAIN_USE_TO_PRODUCE,
558                               ECORE_CHAIN_MODE_SINGLE,
559                               ECORE_CHAIN_CNT_TYPE_U16,
560                               0, /* N/A when the mode is SINGLE */
561                               sizeof(struct slow_path_element),
562                               &p_spq->chain, OSAL_NULL)) {
563                 DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain\n");
564                 goto spq_allocate_fail;
565         }
566
567         /* allocate and fill the SPQ elements (incl. ramrod data list) */
568         capacity = ecore_chain_get_capacity(&p_spq->chain);
569         p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys,
570                                          capacity *
571                                          sizeof(struct ecore_spq_entry));
572         if (!p_virt)
573                 goto spq_allocate_fail;
574
575         p_spq->p_virt = p_virt;
576         p_spq->p_phys = p_phys;
577
578 #ifdef CONFIG_ECORE_LOCK_ALLOC
579         OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock);
580 #endif
581
582         p_hwfn->p_spq = p_spq;
583         return ECORE_SUCCESS;
584
585 spq_allocate_fail:
586         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
587         OSAL_FREE(p_hwfn->p_dev, p_spq);
588         return ECORE_NOMEM;
589 }
590
591 void ecore_spq_free(struct ecore_hwfn *p_hwfn)
592 {
593         struct ecore_spq *p_spq = p_hwfn->p_spq;
594         void OSAL_IOMEM *db_addr;
595         u32 capacity;
596
597         if (!p_spq)
598                 return;
599
600         /* Delete the SPQ doorbell from the doorbell recovery mechanism */
601         db_addr = (void *)((u8 *)p_hwfn->doorbells + p_spq->db_addr_offset);
602         ecore_db_recovery_del(p_hwfn->p_dev, db_addr, &p_spq->db_data);
603
604         if (p_spq->p_virt) {
605                 capacity = ecore_chain_get_capacity(&p_spq->chain);
606                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
607                                        p_spq->p_virt,
608                                        p_spq->p_phys,
609                                        capacity *
610                                        sizeof(struct ecore_spq_entry));
611         }
612
613         ecore_chain_free(p_hwfn->p_dev, &p_spq->chain);
614 #ifdef CONFIG_ECORE_LOCK_ALLOC
615         OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock);
616 #endif
617
618         OSAL_FREE(p_hwfn->p_dev, p_spq);
619 }
620
621 enum _ecore_status_t
622 ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent)
623 {
624         struct ecore_spq *p_spq = p_hwfn->p_spq;
625         struct ecore_spq_entry *p_ent = OSAL_NULL;
626         enum _ecore_status_t rc = ECORE_SUCCESS;
627
628         OSAL_SPIN_LOCK(&p_spq->lock);
629
630         if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
631                 p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, sizeof(*p_ent));
632                 if (!p_ent) {
633                         DP_NOTICE(p_hwfn, true,
634                                  "Failed to allocate an SPQ entry for a pending"
635                                  " ramrod\n");
636                         rc = ECORE_NOMEM;
637                         goto out_unlock;
638                 }
639                 p_ent->queue = &p_spq->unlimited_pending;
640         } else {
641                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
642                                               struct ecore_spq_entry, list);
643                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool);
644                 p_ent->queue = &p_spq->pending;
645         }
646
647         *pp_ent = p_ent;
648
649 out_unlock:
650         OSAL_SPIN_UNLOCK(&p_spq->lock);
651         return rc;
652 }
653
654 /* Locked variant; Should be called while the SPQ lock is taken */
655 static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
656                                      struct ecore_spq_entry *p_ent)
657 {
658         OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool);
659 }
660
661 void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn,
662                             struct ecore_spq_entry *p_ent)
663 {
664         OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock);
665         __ecore_spq_return_entry(p_hwfn, p_ent);
666         OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock);
667 }
668
669 /**
670  * @brief ecore_spq_add_entry - adds a new entry to the pending
671  *        list. Should be used while lock is being held.
672  *
673  * Addes an entry to the pending list is there is room (en empty
674  * element is available in the free_pool), or else places the
675  * entry in the unlimited_pending pool.
676  *
677  * @param p_hwfn
678  * @param p_ent
679  * @param priority
680  *
681  * @return enum _ecore_status_t
682  */
683 static enum _ecore_status_t
684 ecore_spq_add_entry(struct ecore_hwfn *p_hwfn,
685                     struct ecore_spq_entry *p_ent, enum spq_priority priority)
686 {
687         struct ecore_spq *p_spq = p_hwfn->p_spq;
688
689         if (p_ent->queue == &p_spq->unlimited_pending) {
690                 if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
691                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
692                                             &p_spq->unlimited_pending);
693                         p_spq->unlimited_pending_count++;
694
695                         return ECORE_SUCCESS;
696
697                 } else {
698                         struct ecore_spq_entry *p_en2;
699
700                         p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool,
701                                                      struct ecore_spq_entry,
702                                                      list);
703                         OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool);
704
705                         /* Copy the ring element physical pointer to the new
706                          * entry, since we are about to override the entire ring
707                          * entry and don't want to lose the pointer.
708                          */
709                         p_ent->elem.data_ptr = p_en2->elem.data_ptr;
710
711                         *p_en2 = *p_ent;
712
713                         /* EBLOCK responsible to free the allocated p_ent */
714                         if (p_ent->comp_mode != ECORE_SPQ_MODE_EBLOCK)
715                                 OSAL_FREE(p_hwfn->p_dev, p_ent);
716
717                         p_ent = p_en2;
718                 }
719         }
720
721         /* entry is to be placed in 'pending' queue */
722         switch (priority) {
723         case ECORE_SPQ_PRIORITY_NORMAL:
724                 OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending);
725                 p_spq->normal_count++;
726                 break;
727         case ECORE_SPQ_PRIORITY_HIGH:
728                 OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending);
729                 p_spq->high_count++;
730                 break;
731         default:
732                 return ECORE_INVAL;
733         }
734
735         return ECORE_SUCCESS;
736 }
737
738 /***************************************************************************
739  * Accessor
740  ***************************************************************************/
741
742 u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn)
743 {
744         if (!p_hwfn->p_spq)
745                 return 0xffffffff;      /* illegal */
746         return p_hwfn->p_spq->cid;
747 }
748
749 /***************************************************************************
750  * Posting new Ramrods
751  ***************************************************************************/
752
753 static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn,
754                                                 osal_list_t *head,
755                                                 u32 keep_reserve)
756 {
757         struct ecore_spq *p_spq = p_hwfn->p_spq;
758         enum _ecore_status_t rc;
759
760         /* TODO - implementation might be wasteful; will always keep room
761          * for an additional high priority ramrod (even if one is already
762          * pending FW)
763          */
764         while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
765                !OSAL_LIST_IS_EMPTY(head)) {
766                 struct ecore_spq_entry *p_ent =
767                     OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list);
768                 if (p_ent != OSAL_NULL) {
769 #if defined(_NTDDK_)
770 #pragma warning(suppress : 6011 28182)
771 #endif
772                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head);
773                         OSAL_LIST_PUSH_TAIL(&p_ent->list,
774                                             &p_spq->completion_pending);
775                         p_spq->comp_sent_count++;
776
777                         rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent);
778                         if (rc) {
779                                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
780                                                     &p_spq->completion_pending);
781                                 __ecore_spq_return_entry(p_hwfn, p_ent);
782                                 return rc;
783                         }
784                 }
785         }
786
787         return ECORE_SUCCESS;
788 }
789
790 static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn)
791 {
792         struct ecore_spq *p_spq = p_hwfn->p_spq;
793         struct ecore_spq_entry *p_ent = OSAL_NULL;
794
795         while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) {
796                 if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending))
797                         break;
798
799                 p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending,
800                                               struct ecore_spq_entry, list);
801                 if (!p_ent)
802                         return ECORE_INVAL;
803
804 #if defined(_NTDDK_)
805 #pragma warning(suppress : 6011)
806 #endif
807                 OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending);
808
809                 ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
810         }
811
812         return ecore_spq_post_list(p_hwfn,
813                                  &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT);
814 }
815
816 enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn,
817                                     struct ecore_spq_entry *p_ent,
818                                     u8 *fw_return_code)
819 {
820         enum _ecore_status_t rc = ECORE_SUCCESS;
821         struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL;
822         bool b_ret_ent = true;
823
824         if (!p_hwfn)
825                 return ECORE_INVAL;
826
827         if (!p_ent) {
828                 DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n");
829                 return ECORE_INVAL;
830         }
831
832         if (p_hwfn->p_dev->recov_in_prog) {
833                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
834                            "Recovery is in progress -> skip spq post"
835                            " [cmd %02x protocol %02x]\n",
836                            p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id);
837                 /* Return success to let the flows to be completed successfully
838                  * w/o any error handling.
839                  */
840                 return ECORE_SUCCESS;
841         }
842
843         OSAL_SPIN_LOCK(&p_spq->lock);
844
845         /* Complete the entry */
846         rc = ecore_spq_fill_entry(p_hwfn, p_ent);
847
848         /* Check return value after LOCK is taken for cleaner error flow */
849         if (rc)
850                 goto spq_post_fail;
851
852         /* Add the request to the pending queue */
853         rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
854         if (rc)
855                 goto spq_post_fail;
856
857         rc = ecore_spq_pend_post(p_hwfn);
858         if (rc) {
859                 /* Since it's possible that pending failed for a different
860                  * entry [although unlikely], the failed entry was already
861                  * dealt with; No need to return it here.
862                  */
863                 b_ret_ent = false;
864                 goto spq_post_fail;
865         }
866
867         OSAL_SPIN_UNLOCK(&p_spq->lock);
868
869         if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) {
870                 /* For entries in ECORE BLOCK mode, the completion code cannot
871                  * perform the necessary cleanup - if it did, we couldn't
872                  * access p_ent here to see whether it's successful or not.
873                  * Thus, after gaining the answer perform the cleanup here.
874                  */
875                 rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code,
876                                      p_ent->queue == &p_spq->unlimited_pending);
877
878                 if (p_ent->queue == &p_spq->unlimited_pending) {
879                         /* This is an allocated p_ent which does not need to
880                          * return to pool.
881                          */
882                         OSAL_FREE(p_hwfn->p_dev, p_ent);
883
884                         /* TBD: handle error flow and remove p_ent from
885                          * completion pending
886                          */
887                         return rc;
888                 }
889
890                 if (rc)
891                         goto spq_post_fail2;
892
893                 /* return to pool */
894                 ecore_spq_return_entry(p_hwfn, p_ent);
895         }
896         return rc;
897
898 spq_post_fail2:
899         OSAL_SPIN_LOCK(&p_spq->lock);
900         OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending);
901         ecore_chain_return_produced(&p_spq->chain);
902
903 spq_post_fail:
904         /* return to the free pool */
905         if (b_ret_ent)
906                 __ecore_spq_return_entry(p_hwfn, p_ent);
907         OSAL_SPIN_UNLOCK(&p_spq->lock);
908
909         return rc;
910 }
911
912 enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn,
913                                           __le16 echo,
914                                           u8 fw_return_code,
915                                           union event_ring_data *p_data)
916 {
917         struct ecore_spq *p_spq;
918         struct ecore_spq_entry *p_ent = OSAL_NULL;
919         struct ecore_spq_entry *tmp;
920         struct ecore_spq_entry *found = OSAL_NULL;
921         enum _ecore_status_t rc;
922
923         if (!p_hwfn)
924                 return ECORE_INVAL;
925
926         p_spq = p_hwfn->p_spq;
927         if (!p_spq)
928                 return ECORE_INVAL;
929
930         OSAL_SPIN_LOCK(&p_spq->lock);
931         OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent,
932                                       tmp,
933                                       &p_spq->completion_pending,
934                                       list, struct ecore_spq_entry) {
935                 if (p_ent->elem.hdr.echo == echo) {
936                         OSAL_LIST_REMOVE_ENTRY(&p_ent->list,
937                                                &p_spq->completion_pending);
938
939                         /* Avoid overriding of SPQ entries when getting
940                          * out-of-order completions, by marking the completions
941                          * in a bitmap and increasing the chain consumer only
942                          * for the first successive completed entries.
943                          */
944                         SPQ_COMP_BMAP_SET_BIT(p_spq, echo);
945                         while (SPQ_COMP_BMAP_TEST_BIT(p_spq,
946                                                       p_spq->comp_bitmap_idx)) {
947                                 SPQ_COMP_BMAP_CLEAR_BIT(p_spq,
948                                                         p_spq->comp_bitmap_idx);
949                                 p_spq->comp_bitmap_idx++;
950                                 ecore_chain_return_produced(&p_spq->chain);
951                         }
952
953                         p_spq->comp_count++;
954                         found = p_ent;
955                         break;
956                 }
957
958                 /* This is debug and should be relatively uncommon - depends
959                  * on scenarios which have mutliple per-PF sent ramrods.
960                  */
961                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
962                            "Got completion for echo %04x - doesn't match"
963                            " echo %04x in completion pending list\n",
964                            OSAL_LE16_TO_CPU(echo),
965                            OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo));
966         }
967
968         /* Release lock before callback, as callback may post
969          * an additional ramrod.
970          */
971         OSAL_SPIN_UNLOCK(&p_spq->lock);
972
973         if (!found) {
974                 DP_NOTICE(p_hwfn, true,
975                           "Failed to find an entry this"
976                           " EQE [echo %04x] completes\n",
977                           OSAL_LE16_TO_CPU(echo));
978                 return ECORE_EXISTS;
979         }
980
981         DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
982                    "Complete EQE [echo %04x]: func %p cookie %p)\n",
983                    OSAL_LE16_TO_CPU(echo),
984                    p_ent->comp_cb.function, p_ent->comp_cb.cookie);
985         if (found->comp_cb.function)
986                 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
987                                         fw_return_code);
988         else
989                 DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ,
990                            "Got a completion without a callback function\n");
991
992         if ((found->comp_mode != ECORE_SPQ_MODE_EBLOCK) ||
993             (found->queue == &p_spq->unlimited_pending))
994                 /* EBLOCK  is responsible for returning its own entry into the
995                  * free list, unless it originally added the entry into the
996                  * unlimited pending list.
997                  */
998                 ecore_spq_return_entry(p_hwfn, found);
999
1000         /* Attempt to post pending requests */
1001         OSAL_SPIN_LOCK(&p_spq->lock);
1002         rc = ecore_spq_pend_post(p_hwfn);
1003         OSAL_SPIN_UNLOCK(&p_spq->lock);
1004
1005         return rc;
1006 }
1007
1008 enum _ecore_status_t ecore_consq_alloc(struct ecore_hwfn *p_hwfn)
1009 {
1010         struct ecore_consq *p_consq;
1011
1012         /* Allocate ConsQ struct */
1013         p_consq =
1014             OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_consq));
1015         if (!p_consq) {
1016                 DP_NOTICE(p_hwfn, true,
1017                           "Failed to allocate `struct ecore_consq'\n");
1018                 return ECORE_NOMEM;
1019         }
1020
1021         /* Allocate and initialize EQ chain */
1022         if (ecore_chain_alloc(p_hwfn->p_dev,
1023                               ECORE_CHAIN_USE_TO_PRODUCE,
1024                               ECORE_CHAIN_MODE_PBL,
1025                               ECORE_CHAIN_CNT_TYPE_U16,
1026                               ECORE_CHAIN_PAGE_SIZE / 0x80,
1027                               0x80,
1028                               &p_consq->chain, OSAL_NULL) != ECORE_SUCCESS) {
1029                 DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain");
1030                 goto consq_allocate_fail;
1031         }
1032
1033         p_hwfn->p_consq = p_consq;
1034         return ECORE_SUCCESS;
1035
1036 consq_allocate_fail:
1037         OSAL_FREE(p_hwfn->p_dev, p_consq);
1038         return ECORE_NOMEM;
1039 }
1040
1041 void ecore_consq_setup(struct ecore_hwfn *p_hwfn)
1042 {
1043         ecore_chain_reset(&p_hwfn->p_consq->chain);
1044 }
1045
1046 void ecore_consq_free(struct ecore_hwfn *p_hwfn)
1047 {
1048         if (!p_hwfn->p_consq)
1049                 return;
1050
1051         ecore_chain_free(p_hwfn->p_dev, &p_hwfn->p_consq->chain);
1052         OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_consq);
1053 }