New upstream version 18.08
[deb_dpdk.git] / drivers / net / qede / base / ecore_chain.h
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #ifndef __ECORE_CHAIN_H__
8 #define __ECORE_CHAIN_H__
9
10 #include <assert.h>             /* @DPDK */
11
12 #include "common_hsi.h"
13 #include "ecore_utils.h"
14
15 enum ecore_chain_mode {
16         /* Each Page contains a next pointer at its end */
17         ECORE_CHAIN_MODE_NEXT_PTR,
18
19         /* Chain is a single page (next ptr) is unrequired */
20         ECORE_CHAIN_MODE_SINGLE,
21
22         /* Page pointers are located in a side list */
23         ECORE_CHAIN_MODE_PBL,
24 };
25
26 enum ecore_chain_use_mode {
27         ECORE_CHAIN_USE_TO_PRODUCE,     /* Chain starts empty */
28         ECORE_CHAIN_USE_TO_CONSUME,     /* Chain starts full */
29         ECORE_CHAIN_USE_TO_CONSUME_PRODUCE,     /* Chain starts empty */
30 };
31
32 enum ecore_chain_cnt_type {
33         /* The chain's size/prod/cons are kept in 16-bit variables */
34         ECORE_CHAIN_CNT_TYPE_U16,
35
36         /* The chain's size/prod/cons are kept in 32-bit variables  */
37         ECORE_CHAIN_CNT_TYPE_U32,
38 };
39
40 struct ecore_chain_next {
41         struct regpair next_phys;
42         void *next_virt;
43 };
44
45 struct ecore_chain_pbl_u16 {
46         u16 prod_page_idx;
47         u16 cons_page_idx;
48 };
49
50 struct ecore_chain_pbl_u32 {
51         u32 prod_page_idx;
52         u32 cons_page_idx;
53 };
54
55 struct ecore_chain_ext_pbl {
56         dma_addr_t p_pbl_phys;
57         void *p_pbl_virt;
58 };
59
60 struct ecore_chain_u16 {
61         /* Cyclic index of next element to produce/consme */
62         u16 prod_idx;
63         u16 cons_idx;
64 };
65
66 struct ecore_chain_u32 {
67         /* Cyclic index of next element to produce/consme */
68         u32 prod_idx;
69         u32 cons_idx;
70 };
71
72 struct ecore_chain {
73         /* fastpath portion of the chain - required for commands such
74          * as produce / consume.
75          */
76         /* Point to next element to produce/consume */
77         void *p_prod_elem;
78         void *p_cons_elem;
79
80         /* Fastpath portions of the PBL [if exists] */
81
82         struct {
83                 /* Table for keeping the virtual addresses of the chain pages,
84                  * respectively to the physical addresses in the pbl table.
85                  */
86                 void            **pp_virt_addr_tbl;
87
88                 union {
89                         struct ecore_chain_pbl_u16      u16;
90                         struct ecore_chain_pbl_u32      u32;
91                 } c;
92         } pbl;
93
94         union {
95                 struct ecore_chain_u16 chain16;
96                 struct ecore_chain_u32 chain32;
97         } u;
98
99         /* Capacity counts only usable elements */
100         u32                             capacity;
101         u32                             page_cnt;
102
103         /* A u8 would suffice for mode, but it would save as a lot of headaches
104          * on castings & defaults.
105          */
106         enum ecore_chain_mode           mode;
107
108         /* Elements information for fast calculations */
109         u16 elem_per_page;
110         u16 elem_per_page_mask;
111         u16 elem_size;
112         u16 next_page_mask;
113         u16 usable_per_page;
114         u8 elem_unusable;
115
116         u8                              cnt_type;
117
118         /* Slowpath of the chain - required for initialization and destruction,
119          * but isn't involved in regular functionality.
120          */
121
122         /* Base address of a pre-allocated buffer for pbl */
123         struct {
124                 dma_addr_t              p_phys_table;
125                 void                    *p_virt_table;
126         } pbl_sp;
127
128         /* Address of first page of the chain  - the address is required
129          * for fastpath operation [consume/produce] but only for the SINGLE
130          * flavour which isn't considered fastpath [== SPQ].
131          */
132         void                            *p_virt_addr;
133         dma_addr_t                      p_phys_addr;
134
135         /* Total number of elements [for entire chain] */
136         u32                             size;
137
138         u8                              intended_use;
139
140         /* TBD - do we really need this? Couldn't find usage for it */
141         bool                            b_external_pbl;
142
143         void *dp_ctx;
144 };
145
146 #define ECORE_CHAIN_PBL_ENTRY_SIZE      (8)
147 #define ECORE_CHAIN_PAGE_SIZE           (0x1000)
148 #define ELEMS_PER_PAGE(elem_size)       (ECORE_CHAIN_PAGE_SIZE / (elem_size))
149
150 #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)                \
151           ((mode == ECORE_CHAIN_MODE_NEXT_PTR) ?                \
152            (u8)(1 + ((sizeof(struct ecore_chain_next) - 1) /    \
153                      (elem_size))) : 0)
154
155 #define USABLE_ELEMS_PER_PAGE(elem_size, mode)          \
156         ((u32)(ELEMS_PER_PAGE(elem_size) -                      \
157         UNUSABLE_ELEMS_PER_PAGE(elem_size, mode)))
158
159 #define ECORE_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode)         \
160         DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode))
161
162 #define is_chain_u16(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U16)
163 #define is_chain_u32(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U32)
164
165 /* Accessors */
166 static OSAL_INLINE u16 ecore_chain_get_prod_idx(struct ecore_chain *p_chain)
167 {
168         OSAL_ASSERT(is_chain_u16(p_chain));
169         return p_chain->u.chain16.prod_idx;
170 }
171
172 static OSAL_INLINE u32 ecore_chain_get_prod_idx_u32(struct ecore_chain *p_chain)
173 {
174         OSAL_ASSERT(is_chain_u32(p_chain));
175         return p_chain->u.chain32.prod_idx;
176 }
177
178 static OSAL_INLINE u16 ecore_chain_get_cons_idx(struct ecore_chain *p_chain)
179 {
180         OSAL_ASSERT(is_chain_u16(p_chain));
181         return p_chain->u.chain16.cons_idx;
182 }
183
184 static OSAL_INLINE u32 ecore_chain_get_cons_idx_u32(struct ecore_chain *p_chain)
185 {
186         OSAL_ASSERT(is_chain_u32(p_chain));
187         return p_chain->u.chain32.cons_idx;
188 }
189
190 /* FIXME:
191  * Should create OSALs for the below definitions.
192  * For Linux, replace them with the existing U16_MAX and U32_MAX, and handle
193  * kernel versions that lack them.
194  */
195 #define ECORE_U16_MAX   ((u16)~0U)
196 #define ECORE_U32_MAX   ((u32)~0U)
197
198 static OSAL_INLINE u16 ecore_chain_get_elem_left(struct ecore_chain *p_chain)
199 {
200         u16 used;
201
202         OSAL_ASSERT(is_chain_u16(p_chain));
203
204         used = (u16)(((u32)ECORE_U16_MAX + 1 +
205                       (u32)(p_chain->u.chain16.prod_idx)) -
206                      (u32)p_chain->u.chain16.cons_idx);
207         if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
208                 used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page -
209                         p_chain->u.chain16.cons_idx / p_chain->elem_per_page;
210
211         return (u16)(p_chain->capacity - used);
212 }
213
214 static OSAL_INLINE u32
215 ecore_chain_get_elem_left_u32(struct ecore_chain *p_chain)
216 {
217         u32 used;
218
219         OSAL_ASSERT(is_chain_u32(p_chain));
220
221         used = (u32)(((u64)ECORE_U32_MAX + 1 +
222                       (u64)(p_chain->u.chain32.prod_idx)) -
223                      (u64)p_chain->u.chain32.cons_idx);
224         if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR)
225                 used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page -
226                         p_chain->u.chain32.cons_idx / p_chain->elem_per_page;
227
228         return p_chain->capacity - used;
229 }
230
231 static OSAL_INLINE u8 ecore_chain_is_full(struct ecore_chain *p_chain)
232 {
233         if (is_chain_u16(p_chain))
234                 return (ecore_chain_get_elem_left(p_chain) ==
235                         p_chain->capacity);
236         else
237                 return (ecore_chain_get_elem_left_u32(p_chain) ==
238                         p_chain->capacity);
239 }
240
241 static OSAL_INLINE u8 ecore_chain_is_empty(struct ecore_chain *p_chain)
242 {
243         if (is_chain_u16(p_chain))
244                 return (ecore_chain_get_elem_left(p_chain) == 0);
245         else
246                 return (ecore_chain_get_elem_left_u32(p_chain) == 0);
247 }
248
249 static OSAL_INLINE
250 u16 ecore_chain_get_elem_per_page(struct ecore_chain *p_chain)
251 {
252         return p_chain->elem_per_page;
253 }
254
255 static OSAL_INLINE
256 u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain)
257 {
258         return p_chain->usable_per_page;
259 }
260
261 static OSAL_INLINE
262 u8 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain)
263 {
264         return p_chain->elem_unusable;
265 }
266
267 static OSAL_INLINE u32 ecore_chain_get_size(struct ecore_chain *p_chain)
268 {
269         return p_chain->size;
270 }
271
272 static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain)
273 {
274         return p_chain->page_cnt;
275 }
276
277 static OSAL_INLINE
278 dma_addr_t ecore_chain_get_pbl_phys(struct ecore_chain *p_chain)
279 {
280         return p_chain->pbl_sp.p_phys_table;
281 }
282
283 /**
284  * @brief ecore_chain_advance_page -
285  *
286  * Advance the next element accros pages for a linked chain
287  *
288  * @param p_chain
289  * @param p_next_elem
290  * @param idx_to_inc
291  * @param page_to_inc
292  */
293 static OSAL_INLINE void
294 ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem,
295                          void *idx_to_inc, void *page_to_inc)
296 {
297         struct ecore_chain_next *p_next = OSAL_NULL;
298         u32 page_index = 0;
299
300         switch (p_chain->mode) {
301         case ECORE_CHAIN_MODE_NEXT_PTR:
302                 p_next = (struct ecore_chain_next *)(*p_next_elem);
303                 *p_next_elem = p_next->next_virt;
304                 if (is_chain_u16(p_chain))
305                         *(u16 *)idx_to_inc += (u16)p_chain->elem_unusable;
306                 else
307                         *(u32 *)idx_to_inc += (u16)p_chain->elem_unusable;
308                 break;
309         case ECORE_CHAIN_MODE_SINGLE:
310                 *p_next_elem = p_chain->p_virt_addr;
311                 break;
312         case ECORE_CHAIN_MODE_PBL:
313                 if (is_chain_u16(p_chain)) {
314                         if (++(*(u16 *)page_to_inc) == p_chain->page_cnt)
315                                 *(u16 *)page_to_inc = 0;
316                         page_index = *(u16 *)page_to_inc;
317                 } else {
318                         if (++(*(u32 *)page_to_inc) == p_chain->page_cnt)
319                                 *(u32 *)page_to_inc = 0;
320                         page_index = *(u32 *)page_to_inc;
321                 }
322                 *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index];
323         }
324 }
325
326 #define is_unusable_idx(p, idx)                 \
327         (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
328
329 #define is_unusable_idx_u32(p, idx)             \
330         (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page)
331
332 #define is_unusable_next_idx(p, idx)            \
333         ((((p)->u.chain16.idx + 1) &            \
334         (p)->elem_per_page_mask) == (p)->usable_per_page)
335
336 #define is_unusable_next_idx_u32(p, idx)        \
337         ((((p)->u.chain32.idx + 1) &            \
338         (p)->elem_per_page_mask) == (p)->usable_per_page)
339
340 #define test_and_skip(p, idx)                                           \
341         do {                                                            \
342                 if (is_chain_u16(p)) {                                  \
343                         if (is_unusable_idx(p, idx))                    \
344                                 (p)->u.chain16.idx +=                   \
345                                         (p)->elem_unusable;             \
346                 } else {                                                \
347                         if (is_unusable_idx_u32(p, idx))                \
348                                 (p)->u.chain32.idx +=                   \
349                                         (p)->elem_unusable;             \
350                 }                                                       \
351         } while (0)
352
353 /**
354  * @brief ecore_chain_return_multi_produced -
355  *
356  * A chain in which the driver "Produces" elements should use this API
357  * to indicate previous produced elements are now consumed.
358  *
359  * @param p_chain
360  * @param num
361  */
362 static OSAL_INLINE
363 void ecore_chain_return_multi_produced(struct ecore_chain *p_chain, u32 num)
364 {
365         if (is_chain_u16(p_chain))
366                 p_chain->u.chain16.cons_idx += (u16)num;
367         else
368                 p_chain->u.chain32.cons_idx += num;
369         test_and_skip(p_chain, cons_idx);
370 }
371
372 /**
373  * @brief ecore_chain_return_produced -
374  *
375  * A chain in which the driver "Produces" elements should use this API
376  * to indicate previous produced elements are now consumed.
377  *
378  * @param p_chain
379  */
380 static OSAL_INLINE void ecore_chain_return_produced(struct ecore_chain *p_chain)
381 {
382         if (is_chain_u16(p_chain))
383                 p_chain->u.chain16.cons_idx++;
384         else
385                 p_chain->u.chain32.cons_idx++;
386         test_and_skip(p_chain, cons_idx);
387 }
388
389 /**
390  * @brief ecore_chain_produce -
391  *
392  * A chain in which the driver "Produces" elements should use this to get
393  * a pointer to the next element which can be "Produced". It's driver
394  * responsibility to validate that the chain has room for new element.
395  *
396  * @param p_chain
397  *
398  * @return void*, a pointer to next element
399  */
400 static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain)
401 {
402         void *p_ret = OSAL_NULL, *p_prod_idx, *p_prod_page_idx;
403
404         if (is_chain_u16(p_chain)) {
405                 if ((p_chain->u.chain16.prod_idx &
406                      p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
407                         p_prod_idx = &p_chain->u.chain16.prod_idx;
408                         p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
409                         ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
410                                                  p_prod_idx, p_prod_page_idx);
411                 }
412                 p_chain->u.chain16.prod_idx++;
413         } else {
414                 if ((p_chain->u.chain32.prod_idx &
415                      p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
416                         p_prod_idx = &p_chain->u.chain32.prod_idx;
417                         p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
418                         ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem,
419                                                  p_prod_idx, p_prod_page_idx);
420                 }
421                 p_chain->u.chain32.prod_idx++;
422         }
423
424         p_ret = p_chain->p_prod_elem;
425         p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) +
426                                         p_chain->elem_size);
427
428         return p_ret;
429 }
430
431 /**
432  * @brief ecore_chain_get_capacity -
433  *
434  * Get the maximum number of BDs in chain
435  *
436  * @param p_chain
437  * @param num
438  *
439  * @return number of unusable BDs
440  */
441 static OSAL_INLINE u32 ecore_chain_get_capacity(struct ecore_chain *p_chain)
442 {
443         return p_chain->capacity;
444 }
445
446 /**
447  * @brief ecore_chain_recycle_consumed -
448  *
449  * Returns an element which was previously consumed;
450  * Increments producers so they could be written to FW.
451  *
452  * @param p_chain
453  */
454 static OSAL_INLINE
455 void ecore_chain_recycle_consumed(struct ecore_chain *p_chain)
456 {
457         test_and_skip(p_chain, prod_idx);
458         if (is_chain_u16(p_chain))
459                 p_chain->u.chain16.prod_idx++;
460         else
461                 p_chain->u.chain32.prod_idx++;
462 }
463
464 /**
465  * @brief ecore_chain_consume -
466  *
467  * A Chain in which the driver utilizes data written by a different source
468  * (i.e., FW) should use this to access passed buffers.
469  *
470  * @param p_chain
471  *
472  * @return void*, a pointer to the next buffer written
473  */
474 static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain)
475 {
476         void *p_ret = OSAL_NULL, *p_cons_idx, *p_cons_page_idx;
477
478         if (is_chain_u16(p_chain)) {
479                 if ((p_chain->u.chain16.cons_idx &
480                      p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
481                         p_cons_idx = &p_chain->u.chain16.cons_idx;
482                         p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
483                         ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
484                                                  p_cons_idx, p_cons_page_idx);
485                 }
486                 p_chain->u.chain16.cons_idx++;
487         } else {
488                 if ((p_chain->u.chain32.cons_idx &
489                      p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
490                         p_cons_idx = &p_chain->u.chain32.cons_idx;
491                         p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
492                         ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem,
493                                                  p_cons_idx, p_cons_page_idx);
494                 }
495                 p_chain->u.chain32.cons_idx++;
496         }
497
498         p_ret = p_chain->p_cons_elem;
499         p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) +
500                                         p_chain->elem_size);
501
502         return p_ret;
503 }
504
505 /**
506  * @brief ecore_chain_reset -
507  *
508  * Resets the chain to its start state
509  *
510  * @param p_chain pointer to a previously allocted chain
511  */
512 static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain)
513 {
514         u32 i;
515
516         if (is_chain_u16(p_chain)) {
517                 p_chain->u.chain16.prod_idx = 0;
518                 p_chain->u.chain16.cons_idx = 0;
519         } else {
520                 p_chain->u.chain32.prod_idx = 0;
521                 p_chain->u.chain32.cons_idx = 0;
522         }
523         p_chain->p_cons_elem = p_chain->p_virt_addr;
524         p_chain->p_prod_elem = p_chain->p_virt_addr;
525
526         if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
527                 /* Use "page_cnt-1" as a reset value for the prod/cons page's
528                  * indices, to avoid unnecessary page advancing on the first
529                  * call to ecore_chain_produce/consume. Instead, the indices
530                  * will be advanced to page_cnt and then will be wrapped to 0.
531                  */
532                 u32 reset_val = p_chain->page_cnt - 1;
533
534                 if (is_chain_u16(p_chain)) {
535                         p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
536                         p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
537                 } else {
538                         p_chain->pbl.c.u32.prod_page_idx = reset_val;
539                         p_chain->pbl.c.u32.cons_page_idx = reset_val;
540                 }
541         }
542
543         switch (p_chain->intended_use) {
544         case ECORE_CHAIN_USE_TO_CONSUME:
545                 /* produce empty elements */
546                 for (i = 0; i < p_chain->capacity; i++)
547                         ecore_chain_recycle_consumed(p_chain);
548                 break;
549
550         case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE:
551         case ECORE_CHAIN_USE_TO_PRODUCE:
552         default:
553                 /* Do nothing */
554                 break;
555         }
556 }
557
558 /**
559  * @brief ecore_chain_init_params -
560  *
561  * Initalizes a basic chain struct
562  *
563  * @param p_chain
564  * @param page_cnt      number of pages in the allocated buffer
565  * @param elem_size     size of each element in the chain
566  * @param intended_use
567  * @param mode
568  * @param cnt_type
569  * @param dp_ctx
570  */
571 static OSAL_INLINE void
572 ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size,
573                         enum ecore_chain_use_mode intended_use,
574                         enum ecore_chain_mode mode,
575                         enum ecore_chain_cnt_type cnt_type, void *dp_ctx)
576 {
577         /* chain fixed parameters */
578         p_chain->p_virt_addr = OSAL_NULL;
579         p_chain->p_phys_addr = 0;
580         p_chain->elem_size = elem_size;
581         p_chain->intended_use = (u8)intended_use;
582         p_chain->mode = mode;
583         p_chain->cnt_type = (u8)cnt_type;
584
585         p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
586         p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
587         p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
588         p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
589         p_chain->next_page_mask = (p_chain->usable_per_page &
590                                    p_chain->elem_per_page_mask);
591
592         p_chain->page_cnt = page_cnt;
593         p_chain->capacity = p_chain->usable_per_page * page_cnt;
594         p_chain->size = p_chain->elem_per_page * page_cnt;
595         p_chain->b_external_pbl = false;
596         p_chain->pbl_sp.p_phys_table = 0;
597         p_chain->pbl_sp.p_virt_table = OSAL_NULL;
598         p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL;
599
600         p_chain->dp_ctx = dp_ctx;
601 }
602
603 /**
604  * @brief ecore_chain_init_mem -
605  *
606  * Initalizes a basic chain struct with its chain buffers
607  *
608  * @param p_chain
609  * @param p_virt_addr   virtual address of allocated buffer's beginning
610  * @param p_phys_addr   physical address of allocated buffer's beginning
611  *
612  */
613 static OSAL_INLINE void ecore_chain_init_mem(struct ecore_chain *p_chain,
614                                              void *p_virt_addr,
615                                              dma_addr_t p_phys_addr)
616 {
617         p_chain->p_virt_addr = p_virt_addr;
618         p_chain->p_phys_addr = p_phys_addr;
619 }
620
621 /**
622  * @brief ecore_chain_init_pbl_mem -
623  *
624  * Initalizes a basic chain struct with its pbl buffers
625  *
626  * @param p_chain
627  * @param p_virt_pbl    pointer to a pre allocated side table which will hold
628  *                      virtual page addresses.
629  * @param p_phys_pbl    pointer to a pre-allocated side table which will hold
630  *                      physical page addresses.
631  * @param pp_virt_addr_tbl
632  *                      pointer to a pre-allocated side table which will hold
633  *                      the virtual addresses of the chain pages.
634  *
635  */
636 static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain,
637                                                  void *p_virt_pbl,
638                                                  dma_addr_t p_phys_pbl,
639                                                  void **pp_virt_addr_tbl)
640 {
641         p_chain->pbl_sp.p_phys_table = p_phys_pbl;
642         p_chain->pbl_sp.p_virt_table = p_virt_pbl;
643         p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
644 }
645
646 /**
647  * @brief ecore_chain_init_next_ptr_elem -
648  *
649  * Initalizes a next pointer element
650  *
651  * @param p_chain
652  * @param p_virt_curr   virtual address of a chain page of which the next
653  *                      pointer element is initialized
654  * @param p_virt_next   virtual address of the next chain page
655  * @param p_phys_next   physical address of the next chain page
656  *
657  */
658 static OSAL_INLINE void
659 ecore_chain_init_next_ptr_elem(struct ecore_chain *p_chain, void *p_virt_curr,
660                                void *p_virt_next, dma_addr_t p_phys_next)
661 {
662         struct ecore_chain_next *p_next;
663         u32 size;
664
665         size = p_chain->elem_size * p_chain->usable_per_page;
666         p_next = (struct ecore_chain_next *)((u8 *)p_virt_curr + size);
667
668         DMA_REGPAIR_LE(p_next->next_phys, p_phys_next);
669
670         p_next->next_virt = p_virt_next;
671 }
672
673 /**
674  * @brief ecore_chain_get_last_elem -
675  *
676  * Returns a pointer to the last element of the chain
677  *
678  * @param p_chain
679  *
680  * @return void*
681  */
682 static OSAL_INLINE void *ecore_chain_get_last_elem(struct ecore_chain *p_chain)
683 {
684         struct ecore_chain_next *p_next = OSAL_NULL;
685         void *p_virt_addr = OSAL_NULL;
686         u32 size, last_page_idx;
687
688         if (!p_chain->p_virt_addr)
689                 goto out;
690
691         switch (p_chain->mode) {
692         case ECORE_CHAIN_MODE_NEXT_PTR:
693                 size = p_chain->elem_size * p_chain->usable_per_page;
694                 p_virt_addr = p_chain->p_virt_addr;
695                 p_next = (struct ecore_chain_next *)((u8 *)p_virt_addr + size);
696                 while (p_next->next_virt != p_chain->p_virt_addr) {
697                         p_virt_addr = p_next->next_virt;
698                         p_next =
699                             (struct ecore_chain_next *)((u8 *)p_virt_addr +
700                                                         size);
701                 }
702                 break;
703         case ECORE_CHAIN_MODE_SINGLE:
704                 p_virt_addr = p_chain->p_virt_addr;
705                 break;
706         case ECORE_CHAIN_MODE_PBL:
707                 last_page_idx = p_chain->page_cnt - 1;
708                 p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx];
709                 break;
710         }
711         /* p_virt_addr points at this stage to the last page of the chain */
712         size = p_chain->elem_size * (p_chain->usable_per_page - 1);
713         p_virt_addr = ((u8 *)p_virt_addr + size);
714 out:
715         return p_virt_addr;
716 }
717
718 /**
719  * @brief ecore_chain_set_prod - sets the prod to the given value
720  *
721  * @param prod_idx
722  * @param p_prod_elem
723  */
724 static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain,
725                                              u32 prod_idx, void *p_prod_elem)
726 {
727         if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
728                 /* Use "prod_idx-1" since ecore_chain_produce() advances the
729                  * page index before the producer index when getting to
730                  * "next_page_mask".
731                  */
732                 u32 elem_idx =
733                         (prod_idx - 1 + p_chain->capacity) % p_chain->capacity;
734                 u32 page_idx = elem_idx / p_chain->elem_per_page;
735
736                 if (is_chain_u16(p_chain))
737                         p_chain->pbl.c.u16.prod_page_idx = (u16)page_idx;
738                 else
739                         p_chain->pbl.c.u32.prod_page_idx = page_idx;
740         }
741
742         if (is_chain_u16(p_chain))
743                 p_chain->u.chain16.prod_idx = (u16)prod_idx;
744         else
745                 p_chain->u.chain32.prod_idx = prod_idx;
746         p_chain->p_prod_elem = p_prod_elem;
747 }
748
749 /**
750  * @brief ecore_chain_set_cons - sets the cons to the given value
751  *
752  * @param cons_idx
753  * @param p_cons_elem
754  */
755 static OSAL_INLINE void ecore_chain_set_cons(struct ecore_chain *p_chain,
756                                              u32 cons_idx, void *p_cons_elem)
757 {
758         if (p_chain->mode == ECORE_CHAIN_MODE_PBL) {
759                 /* Use "cons_idx-1" since ecore_chain_consume() advances the
760                  * page index before the consumer index when getting to
761                  * "next_page_mask".
762                  */
763                 u32 elem_idx =
764                         (cons_idx - 1 + p_chain->capacity) % p_chain->capacity;
765                 u32 page_idx = elem_idx / p_chain->elem_per_page;
766
767                 if (is_chain_u16(p_chain))
768                         p_chain->pbl.c.u16.cons_page_idx = (u16)page_idx;
769                 else
770                         p_chain->pbl.c.u32.cons_page_idx = page_idx;
771         }
772
773         if (is_chain_u16(p_chain))
774                 p_chain->u.chain16.cons_idx = (u16)cons_idx;
775         else
776                 p_chain->u.chain32.cons_idx = cons_idx;
777
778         p_chain->p_cons_elem = p_cons_elem;
779 }
780
781 /**
782  * @brief ecore_chain_pbl_zero_mem - set chain memory to 0
783  *
784  * @param p_chain
785  */
786 static OSAL_INLINE void ecore_chain_pbl_zero_mem(struct ecore_chain *p_chain)
787 {
788         u32 i, page_cnt;
789
790         if (p_chain->mode != ECORE_CHAIN_MODE_PBL)
791                 return;
792
793         page_cnt = ecore_chain_get_page_cnt(p_chain);
794
795         for (i = 0; i < page_cnt; i++)
796                 OSAL_MEM_ZERO(p_chain->pbl.pp_virt_addr_tbl[i],
797                               ECORE_CHAIN_PAGE_SIZE);
798 }
799
800 int ecore_chain_print(struct ecore_chain *p_chain, char *buffer,
801                       u32 buffer_size, u32 *element_indx, u32 stop_indx,
802                       bool print_metadata,
803                       int (*func_ptr_print_element)(struct ecore_chain *p_chain,
804                                                     void *p_element,
805                                                     char *buffer),
806                       int (*func_ptr_print_metadata)(struct ecore_chain
807                                                      *p_chain,
808                                                      char *buffer));
809
810 #endif /* __ECORE_CHAIN_H__ */