New upstream version 17.11-rc3
[deb_dpdk.git] / drivers / net / qede / base / ecore_init_fw_funcs.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore_hw.h"
11 #include "ecore_init_ops.h"
12 #include "reg_addr.h"
13 #include "ecore_rt_defs.h"
14 #include "ecore_hsi_common.h"
15 #include "ecore_hsi_init_func.h"
16 #include "ecore_hsi_eth.h"
17 #include "ecore_hsi_init_tool.h"
18 #include "ecore_iro.h"
19 #include "ecore_init_fw_funcs.h"
20
21 #define CDU_VALIDATION_DEFAULT_CFG 61
22
23 static u16 con_region_offsets[3][NUM_OF_CONNECTION_TYPES_E4] = {
24         { 400,  336,  352,  304,  304,  384,  416,  352}, /* region 3 offsets */
25         { 528,  496,  416,  448,  448,  512,  544,  480}, /* region 4 offsets */
26         { 608,  544,  496,  512,  576,  592,  624,  560}  /* region 5 offsets */
27 };
28 static u16 task_region_offsets[1][NUM_OF_CONNECTION_TYPES_E4] = {
29         { 240,  240,  112,    0,    0,    0,    0,   96}  /* region 1 offsets */
30 };
31
32 /* General constants */
33 #define QM_PQ_MEM_4KB(pq_size) (pq_size ? DIV_ROUND_UP((pq_size + 1) * \
34                                 QM_PQ_ELEMENT_SIZE, 0x1000) : 0)
35 #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : \
36                                   0)
37 #define QM_INVALID_PQ_ID                0xffff
38
39 /* Feature enable */
40 #define QM_BYPASS_EN                    1
41 #define QM_BYTE_CRD_EN                  1
42
43 /* Other PQ constants */
44 #define QM_OTHER_PQS_PER_PF             4
45
46 /* VOQ constants */
47 #define QM_E5_NUM_EXT_VOQ               (MAX_NUM_PORTS_E5 * NUM_OF_TCS)
48
49 /* WFQ constants: */
50
51 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
52 #define QM_WFQ_UPPER_BOUND              62500000
53
54 /* Bit  of VOQ in WFQ VP PQ map */
55 #define QM_WFQ_VP_PQ_VOQ_SHIFT          0
56
57 /* Bit  of PF in WFQ VP PQ map */
58 #define QM_WFQ_VP_PQ_PF_E4_SHIFT        5
59 #define QM_WFQ_VP_PQ_PF_E5_SHIFT        6
60
61 /* 0x9000 = 4*9*1024 */
62 #define QM_WFQ_INC_VAL(weight)          ((weight) * 0x9000)
63
64 /* Max WFQ increment value is 0.7 * upper bound */
65 #define QM_WFQ_MAX_INC_VAL              ((QM_WFQ_UPPER_BOUND * 7) / 10)
66
67 /* Number of VOQs in E5 QmWfqCrd register */
68 #define QM_WFQ_CRD_E5_NUM_VOQS          16
69
70 /* RL constants: */
71
72 /* Period in us */
73 #define QM_RL_PERIOD                    5
74
75 /* Period in 25MHz cycles */
76 #define QM_RL_PERIOD_CLK_25M            (25 * QM_RL_PERIOD)
77
78 /* RL increment value - rate is specified in mbps. the factor of 1.01 was
79 * added after seeing only 99% factor reached in a 25Gbps port with DPDK RFC
80 * 2544 test. In this scenario the PF RL was reducing the line rate to 99%
81 * although the credit increment value was the correct one and FW calculated
82 * correct packet sizes. The reason for the inaccuracy of the RL is unknown at
83 * this point.
84 */
85 #define QM_RL_INC_VAL(rate) \
86         OSAL_MAX_T(u32, (u32)(((rate ? rate : 100000) * QM_RL_PERIOD * 101) / \
87         (8 * 100)), 1)
88
89 /* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
90 #define QM_PF_RL_UPPER_BOUND            62500000
91
92 /* Max PF RL increment value is 0.7 * upper bound */
93 #define QM_PF_RL_MAX_INC_VAL            ((QM_PF_RL_UPPER_BOUND * 7) / 10)
94
95 /* Vport RL Upper bound, link speed is in Mpbs */
96 #define QM_VP_RL_UPPER_BOUND(speed) \
97         ((u32)OSAL_MAX_T(u32, QM_RL_INC_VAL(speed), 9700 + 1000))
98
99 /* Max Vport RL increment value is the Vport RL upper bound */
100 #define QM_VP_RL_MAX_INC_VAL(speed)     QM_VP_RL_UPPER_BOUND(speed)
101
102 /* Vport RL credit threshold in case of QM bypass */
103 #define QM_VP_RL_BYPASS_THRESH_SPEED    (QM_VP_RL_UPPER_BOUND(10000) - 1)
104
105 /* AFullOprtnstcCrdMask constants */
106 #define QM_OPPOR_LINE_VOQ_DEF           1
107 #define QM_OPPOR_FW_STOP_DEF            0
108 #define QM_OPPOR_PQ_EMPTY_DEF           1
109
110 /* Command Queue constants: */
111
112 /* Pure LB CmdQ lines (+spare) */
113 #define PBF_CMDQ_PURE_LB_LINES          150
114
115 #define PBF_CMDQ_LINES_E5_RSVD_RATIO    8
116
117 #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
118         (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
119          ext_voq * \
120          (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
121           PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
122
123 #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
124         (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
125          ext_voq * \
126          (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
127           PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
128
129 #define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
130 ((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
131
132 /* BTB: blocks constants (block size = 256B) */
133
134 /* 256B blocks in 9700B packet */
135 #define BTB_JUMBO_PKT_BLOCKS            38
136
137 /* Headroom per-port */
138 #define BTB_HEADROOM_BLOCKS             BTB_JUMBO_PKT_BLOCKS
139 #define BTB_PURE_LB_FACTOR              10
140
141 /* Factored (hence really 0.7) */
142 #define BTB_PURE_LB_RATIO               7
143
144 /* QM stop command constants */
145 #define QM_STOP_PQ_MASK_WIDTH           32
146 #define QM_STOP_CMD_ADDR                2
147 #define QM_STOP_CMD_STRUCT_SIZE         2
148 #define QM_STOP_CMD_PAUSE_MASK_OFFSET   0
149 #define QM_STOP_CMD_PAUSE_MASK_SHIFT    0
150 #define QM_STOP_CMD_PAUSE_MASK_MASK     0xffffffff /* @DPDK */
151 #define QM_STOP_CMD_GROUP_ID_OFFSET     1
152 #define QM_STOP_CMD_GROUP_ID_SHIFT      16
153 #define QM_STOP_CMD_GROUP_ID_MASK       15
154 #define QM_STOP_CMD_PQ_TYPE_OFFSET      1
155 #define QM_STOP_CMD_PQ_TYPE_SHIFT       24
156 #define QM_STOP_CMD_PQ_TYPE_MASK        1
157 #define QM_STOP_CMD_MAX_POLL_COUNT      100
158 #define QM_STOP_CMD_POLL_PERIOD_US      500
159
160 /* QM command macros */
161 #define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE
162 #define QM_CMD_SET_FIELD(var, cmd, field, value) \
163         SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value)
164
165 #define QM_INIT_TX_PQ_MAP(p_hwfn, map, chip, pq_id, rl_valid, \
166                           vp_pq_id, rl_id, ext_voq, wrr) \
167         do {                                            \
168                 OSAL_MEMSET(&map, 0, sizeof(map)); \
169                 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_PQ_VALID, 1); \
170                 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_VALID, rl_valid); \
171                 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VP_PQ_ID, vp_pq_id); \
172                 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_RL_ID, rl_id); \
173                 SET_FIELD(map.reg, QM_RF_PQ_MAP_##chip##_VOQ, ext_voq); \
174                 SET_FIELD(map.reg, \
175                           QM_RF_PQ_MAP_##chip##_WRR_WEIGHT_GROUP, wrr); \
176                 STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, \
177                              *((u32 *)&map)); \
178         } while (0)
179
180 #define WRITE_PQ_INFO_TO_RAM            1
181 #define PQ_INFO_ELEMENT(vp, pf, tc, port, rl_valid, rl) \
182         (((vp) << 0) | ((pf) << 12) | ((tc) << 16) |    \
183          ((port) << 20) | ((rl_valid) << 22) | ((rl) << 24))
184 #define PQ_INFO_RAM_GRC_ADDRESS(pq_id) \
185         (XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM + 21768 + (pq_id) * 4)
186
187 /******************** INTERNAL IMPLEMENTATION *********************/
188
189 /* Returns the external VOQ number */
190 static u8 ecore_get_ext_voq(struct ecore_hwfn *p_hwfn,
191                             u8 port_id,
192                             u8 tc,
193                             u8 max_phys_tcs_per_port)
194 {
195         if (tc == PURE_LB_TC)
196                 return NUM_OF_PHYS_TCS * (MAX_NUM_PORTS_BB) + port_id;
197         else
198                 return port_id * (max_phys_tcs_per_port) + tc;
199 }
200
201 /* Prepare PF RL enable/disable runtime init values */
202 static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en)
203 {
204         STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0);
205         if (pf_rl_en) {
206                 u8 num_ext_voqs = MAX_NUM_VOQS_E4;
207                 u64 voq_bit_mask = ((u64)1 << num_ext_voqs) - 1;
208
209                 /* Enable RLs for all VOQs */
210                 STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET,
211                              (u32)voq_bit_mask);
212 #ifdef QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET
213                 if (num_ext_voqs >= 32)
214                         STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_MSB_RT_OFFSET,
215                                      (u32)(voq_bit_mask >> 32));
216 #endif
217
218                 /* Write RL period */
219                 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET,
220                              QM_RL_PERIOD_CLK_25M);
221                 STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET,
222                              QM_RL_PERIOD_CLK_25M);
223
224                 /* Set credit threshold for QM bypass flow */
225                 if (QM_BYPASS_EN)
226                         STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET,
227                                      QM_PF_RL_UPPER_BOUND);
228         }
229 }
230
231 /* Prepare PF WFQ enable/disable runtime init values */
232 static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en)
233 {
234         STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0);
235
236         /* Set credit threshold for QM bypass flow */
237         if (pf_wfq_en && QM_BYPASS_EN)
238                 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET,
239                              QM_WFQ_UPPER_BOUND);
240 }
241
242 /* Prepare VPORT RL enable/disable runtime init values */
243 static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en)
244 {
245         STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET,
246                      vport_rl_en ? 1 : 0);
247         if (vport_rl_en) {
248                 /* Write RL period (use timer 0 only) */
249                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET,
250                              QM_RL_PERIOD_CLK_25M);
251                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET,
252                              QM_RL_PERIOD_CLK_25M);
253
254                 /* Set credit threshold for QM bypass flow */
255                 if (QM_BYPASS_EN)
256                         STORE_RT_REG(p_hwfn,
257                                      QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET,
258                                      QM_VP_RL_BYPASS_THRESH_SPEED);
259         }
260 }
261
262 /* Prepare VPORT WFQ enable/disable runtime init values */
263 static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en)
264 {
265         STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET,
266                      vport_wfq_en ? 1 : 0);
267
268         /* Set credit threshold for QM bypass flow */
269         if (vport_wfq_en && QM_BYPASS_EN)
270                 STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET,
271                              QM_WFQ_UPPER_BOUND);
272 }
273
274 /* Prepare runtime init values to allocate PBF command queue lines for
275  * the specified VOQ
276  */
277 static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn,
278                                          u8 ext_voq,
279                                          u16 cmdq_lines)
280 {
281         u32 qm_line_crd;
282
283         qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines);
284
285         OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq),
286                          (u32)cmdq_lines);
287         STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + ext_voq,
288                          qm_line_crd);
289         STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + ext_voq,
290                          qm_line_crd);
291 }
292
293 /* Prepare runtime init values to allocate PBF command queue lines. */
294 static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn,
295                                      u8 max_ports_per_engine,
296                                      u8 max_phys_tcs_per_port,
297                                      struct init_qm_port_params
298                                      port_params[MAX_NUM_PORTS])
299 {
300         u8 tc, ext_voq, port_id, num_tcs_in_port;
301         u8 num_ext_voqs = MAX_NUM_VOQS_E4;
302
303         /* Clear PBF lines of all VOQs */
304         for (ext_voq = 0; ext_voq < num_ext_voqs; ext_voq++)
305                 STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(ext_voq), 0);
306
307         for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
308                 u16 phys_lines, phys_lines_per_tc;
309
310                 if (!port_params[port_id].active)
311                         continue;
312
313                 /* Find number of command queue lines to divide between the
314                  * active physical TCs. In E5, 1/8 of the lines are reserved.
315                  * the lines for pure LB TC are subtracted.
316                  */
317                 phys_lines = port_params[port_id].num_pbf_cmd_lines;
318                 phys_lines -= PBF_CMDQ_PURE_LB_LINES;
319
320                 /* Find #lines per active physical TC */
321                 num_tcs_in_port = 0;
322                 for (tc = 0; tc < max_phys_tcs_per_port; tc++)
323                         if (((port_params[port_id].active_phys_tcs >> tc) &
324                               0x1) == 1)
325                                 num_tcs_in_port++;
326                 phys_lines_per_tc = phys_lines / num_tcs_in_port;
327
328                 /* Init registers per active TC */
329                 for (tc = 0; tc < max_phys_tcs_per_port; tc++) {
330                         ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
331                                                     max_phys_tcs_per_port);
332                         if (((port_params[port_id].active_phys_tcs >> tc) &
333                             0x1) == 1)
334                                 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
335                                                              phys_lines_per_tc);
336                 }
337
338                 /* Init registers for pure LB TC */
339                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
340                                             max_phys_tcs_per_port);
341                 ecore_cmdq_lines_voq_rt_init(p_hwfn, ext_voq,
342                                              PBF_CMDQ_PURE_LB_LINES);
343         }
344 }
345
346 /*
347  * Prepare runtime init values to allocate guaranteed BTB blocks for the
348  * specified port. The guaranteed BTB space is divided between the TCs as
349  * follows (shared space Is currently not used):
350  * 1. Parameters:
351  *     B BTB blocks for this port
352  *     C Number of physical TCs for this port
353  * 2. Calculation:
354  *     a. 38 blocks (9700B jumbo frame) are allocated for global per port
355  *        headroom
356  *     b. B = B 38 (remainder after global headroom allocation)
357  *     c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ.
358  *     d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation).
359  *     e. B/C blocks are allocated for each physical TC.
360  * Assumptions:
361  * - MTU is up to 9700 bytes (38 blocks)
362  * - All TCs are considered symmetrical (same rate and packet size)
363  * - No optimization for lossy TC (all are considered lossless). Shared space is
364  *   not enabled and allocated for each TC.
365  */
366 static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn,
367                                      u8 max_ports_per_engine,
368                                      u8 max_phys_tcs_per_port,
369                                      struct init_qm_port_params
370                                      port_params[MAX_NUM_PORTS])
371 {
372         u32 usable_blocks, pure_lb_blocks, phys_blocks;
373         u8 tc, ext_voq, port_id, num_tcs_in_port;
374
375         for (port_id = 0; port_id < max_ports_per_engine; port_id++) {
376                 if (!port_params[port_id].active)
377                         continue;
378
379                 /* Subtract headroom blocks */
380                 usable_blocks = port_params[port_id].num_btb_blocks -
381                                 BTB_HEADROOM_BLOCKS;
382
383                 /* Find blocks per physical TC. use factor to avoid floating
384                  * arithmethic.
385                  */
386                 num_tcs_in_port = 0;
387                 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++)
388                         if (((port_params[port_id].active_phys_tcs >> tc) &
389                               0x1) == 1)
390                                 num_tcs_in_port++;
391
392                 pure_lb_blocks = (usable_blocks * BTB_PURE_LB_FACTOR) /
393                                   (num_tcs_in_port * BTB_PURE_LB_FACTOR +
394                                    BTB_PURE_LB_RATIO);
395                 pure_lb_blocks = OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS,
396                                             pure_lb_blocks /
397                                             BTB_PURE_LB_FACTOR);
398                 phys_blocks = (usable_blocks - pure_lb_blocks) /
399                               num_tcs_in_port;
400
401                 /* Init physical TCs */
402                 for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
403                         if (((port_params[port_id].active_phys_tcs >> tc) &
404                              0x1) == 1) {
405                                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, tc,
406                                                          max_phys_tcs_per_port);
407                                 STORE_RT_REG(p_hwfn,
408                                         PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
409                                         phys_blocks);
410                         }
411                 }
412
413                 /* Init pure LB TC */
414                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, PURE_LB_TC,
415                                             max_phys_tcs_per_port);
416                 STORE_RT_REG(p_hwfn, PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq),
417                              pure_lb_blocks);
418         }
419 }
420
421 /* Prepare Tx PQ mapping runtime init values for the specified PF */
422 static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
423                                     struct ecore_ptt *p_ptt,
424                                     u8 port_id,
425                                     u8 pf_id,
426                                     u8 max_phys_tcs_per_port,
427                                     u32 num_pf_cids,
428                                     u32 num_vf_cids,
429                                     u16 start_pq,
430                                     u16 num_pf_pqs,
431                                     u16 num_vf_pqs,
432                                     u8 start_vport,
433                                     u32 base_mem_addr_4kb,
434                                     struct init_qm_pq_params *pq_params,
435                                     struct init_qm_vport_params *vport_params)
436 {
437         /* A bit per Tx PQ indicating if the PQ is associated with a VF */
438         u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 };
439         u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE;
440         u16 num_pqs, first_pq_group, last_pq_group, i, pq_id, pq_group;
441         u32 pq_mem_4kb, vport_pq_mem_4kb, mem_addr_4kb;
442
443         num_pqs = num_pf_pqs + num_vf_pqs;
444
445         first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE;
446         last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE;
447
448         pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids);
449         vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids);
450         mem_addr_4kb = base_mem_addr_4kb;
451
452         /* Set mapping from PQ group to PF */
453         for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++)
454                 STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group,
455                              (u32)(pf_id));
456
457         /* Set PQ sizes */
458         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET,
459                      QM_PQ_SIZE_256B(num_pf_cids));
460         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET,
461                      QM_PQ_SIZE_256B(num_vf_cids));
462
463         /* Go over all Tx PQs */
464         for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) {
465                 u32 max_qm_global_rls = MAX_QM_GLOBAL_RLS;
466                 u8 ext_voq, vport_id_in_pf;
467                 bool is_vf_pq, rl_valid;
468                 u16 first_tx_pq_id;
469
470                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id,
471                                             max_phys_tcs_per_port);
472                 is_vf_pq = (i >= num_pf_pqs);
473                 rl_valid = pq_params[i].rl_valid && pq_params[i].vport_id <
474                            max_qm_global_rls;
475
476                 /* Update first Tx PQ of VPORT/TC */
477                 vport_id_in_pf = pq_params[i].vport_id - start_vport;
478                 first_tx_pq_id =
479                 vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i].tc_id];
480                 if (first_tx_pq_id == QM_INVALID_PQ_ID) {
481                         u32 map_val = (ext_voq << QM_WFQ_VP_PQ_VOQ_SHIFT) |
482                                        (pf_id << (QM_WFQ_VP_PQ_PF_E4_SHIFT));
483
484                         /* Create new VP PQ */
485                         vport_params[vport_id_in_pf].
486                             first_tx_pq_id[pq_params[i].tc_id] = pq_id;
487                         first_tx_pq_id = pq_id;
488
489                         /* Map VP PQ to VOQ and PF */
490                         STORE_RT_REG(p_hwfn, QM_REG_WFQVPMAP_RT_OFFSET +
491                                      first_tx_pq_id, map_val);
492                 }
493
494                 /* Check RL ID */
495                 if (pq_params[i].rl_valid && pq_params[i].vport_id >=
496                                                         max_qm_global_rls)
497                         DP_NOTICE(p_hwfn, true,
498                                   "Invalid VPORT ID for rate limiter config\n");
499
500                 /* Prepare PQ map entry */
501                 struct qm_rf_pq_map_e4 tx_pq_map;
502                 QM_INIT_TX_PQ_MAP(p_hwfn, tx_pq_map, E4, pq_id, rl_valid ?
503                                   1 : 0,
504                                   first_tx_pq_id, rl_valid ?
505                                   pq_params[i].vport_id : 0,
506                                   ext_voq, pq_params[i].wrr_group);
507
508                 /* Set base address */
509                 STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id,
510                              mem_addr_4kb);
511
512                 /* Write PQ info to RAM */
513                 if (WRITE_PQ_INFO_TO_RAM != 0) {
514                         u32 pq_info = 0;
515                         pq_info = PQ_INFO_ELEMENT(first_tx_pq_id, pf_id,
516                                                   pq_params[i].tc_id, port_id,
517                                                   rl_valid ? 1 : 0, rl_valid ?
518                                                   pq_params[i].vport_id : 0);
519                         ecore_wr(p_hwfn, p_ptt, PQ_INFO_RAM_GRC_ADDRESS(pq_id),
520                                  pq_info);
521                 }
522
523                 /* If VF PQ, add indication to PQ VF mask */
524                 if (is_vf_pq) {
525                         tx_pq_vf_mask[pq_id / QM_PF_QUEUE_GROUP_SIZE] |=
526                                 (1 << (pq_id % QM_PF_QUEUE_GROUP_SIZE));
527                         mem_addr_4kb += vport_pq_mem_4kb;
528                 } else {
529                         mem_addr_4kb += pq_mem_4kb;
530                 }
531         }
532
533         /* Store Tx PQ VF mask to size select register */
534         for (i = 0; i < num_tx_pq_vf_masks; i++)
535                 if (tx_pq_vf_mask[i])
536                         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET +
537                                      i, tx_pq_vf_mask[i]);
538 }
539
540 /* Prepare Other PQ mapping runtime init values for the specified PF */
541 static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn,
542                                        u8 pf_id,
543                                        u32 num_pf_cids,
544                                        u32 num_tids,
545                                        u32 base_mem_addr_4kb)
546 {
547         u32 pq_size, pq_mem_4kb, mem_addr_4kb;
548         u16 i, pq_id, pq_group;
549
550         /* A single other PQ group is used in each PF, where PQ group i is used
551          * in PF i.
552          */
553         pq_group = pf_id;
554         pq_size = num_pf_cids + num_tids;
555         pq_mem_4kb = QM_PQ_MEM_4KB(pq_size);
556         mem_addr_4kb = base_mem_addr_4kb;
557
558         /* Map PQ group to PF */
559         STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group,
560                      (u32)(pf_id));
561
562         /* Set PQ sizes */
563         STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET,
564                      QM_PQ_SIZE_256B(pq_size));
565
566         /* Set base address */
567         for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE;
568              i < QM_OTHER_PQS_PER_PF; i++, pq_id++) {
569                 STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id,
570                              mem_addr_4kb);
571                 mem_addr_4kb += pq_mem_4kb;
572         }
573 }
574
575 /* Prepare PF WFQ runtime init values for the specified PF.
576  * Return -1 on error.
577  */
578 static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn,
579                                 u8 port_id,
580                                 u8 pf_id,
581                                 u16 pf_wfq,
582                                 u8 max_phys_tcs_per_port,
583                                 u16 num_tx_pqs,
584                                 struct init_qm_pq_params *pq_params)
585 {
586         u32 inc_val, crd_reg_offset;
587         u8 ext_voq;
588         u16 i;
589
590         inc_val = QM_WFQ_INC_VAL(pf_wfq);
591         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
592                 DP_NOTICE(p_hwfn, true,
593                           "Invalid PF WFQ weight configuration\n");
594                 return -1;
595         }
596
597         for (i = 0; i < num_tx_pqs; i++) {
598                 ext_voq = ecore_get_ext_voq(p_hwfn, port_id, pq_params[i].tc_id,
599                                             max_phys_tcs_per_port);
600                 crd_reg_offset = (pf_id < MAX_NUM_PFS_BB ?
601                                   QM_REG_WFQPFCRD_RT_OFFSET :
602                                   QM_REG_WFQPFCRD_MSB_RT_OFFSET) +
603                                  ext_voq * MAX_NUM_PFS_BB +
604                                  (pf_id % MAX_NUM_PFS_BB);
605                 OVERWRITE_RT_REG(p_hwfn, crd_reg_offset,
606                                  (u32)QM_WFQ_CRD_REG_SIGN_BIT);
607                 STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id,
608                              QM_WFQ_UPPER_BOUND | (u32)QM_WFQ_CRD_REG_SIGN_BIT);
609                 STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id,
610                              inc_val);
611         }
612
613         return 0;
614 }
615
616 /* Prepare PF RL runtime init values for the specified PF.
617  * Return -1 on error.
618  */
619 static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl)
620 {
621         u32 inc_val;
622
623         inc_val = QM_RL_INC_VAL(pf_rl);
624         if (inc_val > QM_PF_RL_MAX_INC_VAL) {
625                 DP_NOTICE(p_hwfn, true,
626                           "Invalid PF rate limit configuration\n");
627                 return -1;
628         }
629
630         STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id,
631                      (u32)QM_RL_CRD_REG_SIGN_BIT);
632         STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id,
633                      QM_PF_RL_UPPER_BOUND | (u32)QM_RL_CRD_REG_SIGN_BIT);
634         STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val);
635
636         return 0;
637 }
638
639 /* Prepare VPORT WFQ runtime init values for the specified VPORTs.
640  * Return -1 on error.
641  */
642 static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn,
643                                 u8 num_vports,
644                                 struct init_qm_vport_params *vport_params)
645 {
646         u16 vport_pq_id;
647         u32 inc_val;
648         u8 tc, i;
649
650         /* Go over all PF VPORTs */
651         for (i = 0; i < num_vports; i++) {
652                 if (!vport_params[i].vport_wfq)
653                         continue;
654
655                 inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq);
656                 if (inc_val > QM_WFQ_MAX_INC_VAL) {
657                         DP_NOTICE(p_hwfn, true,
658                                   "Invalid VPORT WFQ weight configuration\n");
659                         return -1;
660                 }
661
662                 /* Each VPORT can have several VPORT PQ IDs for various TCs */
663                 for (tc = 0; tc < NUM_OF_TCS; tc++) {
664                         vport_pq_id = vport_params[i].first_tx_pq_id[tc];
665                         if (vport_pq_id != QM_INVALID_PQ_ID) {
666                                 STORE_RT_REG(p_hwfn, QM_REG_WFQVPCRD_RT_OFFSET +
667                                              vport_pq_id,
668                                              (u32)QM_WFQ_CRD_REG_SIGN_BIT);
669                                 STORE_RT_REG(p_hwfn,
670                                              QM_REG_WFQVPWEIGHT_RT_OFFSET +
671                                              vport_pq_id, inc_val);
672                         }
673                 }
674         }
675         return 0;
676 }
677
678 /* Prepare VPORT RL runtime init values for the specified VPORTs.
679  * Return -1 on error.
680  */
681 static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn,
682                                   u8 start_vport,
683                                   u8 num_vports,
684                                   u32 link_speed,
685                                   struct init_qm_vport_params *vport_params)
686 {
687         u8 i, vport_id;
688         u32 inc_val;
689
690         if (start_vport + num_vports >= MAX_QM_GLOBAL_RLS) {
691                 DP_NOTICE(p_hwfn, true,
692                           "Invalid VPORT ID for rate limiter configuration\n");
693                 return -1;
694         }
695
696         /* Go over all PF VPORTs */
697         for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) {
698                 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl ?
699                           vport_params[i].vport_rl : link_speed);
700                 if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
701                         DP_NOTICE(p_hwfn, true,
702                                   "Invalid VPORT rate-limit configuration\n");
703                         return -1;
704                 }
705
706                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id,
707                              (u32)QM_RL_CRD_REG_SIGN_BIT);
708                 STORE_RT_REG(p_hwfn,
709                              QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id,
710                              QM_VP_RL_UPPER_BOUND(link_speed) |
711                              (u32)QM_RL_CRD_REG_SIGN_BIT);
712                 STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id,
713                              inc_val);
714         }
715
716         return 0;
717 }
718
719 static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn,
720                                        struct ecore_ptt *p_ptt)
721 {
722         u32 reg_val, i;
723
724         for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && !reg_val;
725              i++) {
726                 OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US);
727                 reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY);
728         }
729
730         /* Check if timeout while waiting for SDM command ready */
731         if (i == QM_STOP_CMD_MAX_POLL_COUNT) {
732                 DP_VERBOSE(p_hwfn, ECORE_MSG_DEBUG,
733                            "Timeout waiting for QM SDM cmd ready signal\n");
734                 return false;
735         }
736
737         return true;
738 }
739
740 static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn,
741                               struct ecore_ptt *p_ptt,
742                                                           u32 cmd_addr,
743                                                           u32 cmd_data_lsb,
744                                                           u32 cmd_data_msb)
745 {
746         if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt))
747                 return false;
748
749         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr);
750         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb);
751         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb);
752         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1);
753         ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0);
754
755         return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt);
756 }
757
758
759 /******************** INTERFACE IMPLEMENTATION *********************/
760
761 u32 ecore_qm_pf_mem_size(u32 num_pf_cids,
762                                                  u32 num_vf_cids,
763                                                  u32 num_tids,
764                                                  u16 num_pf_pqs,
765                                                  u16 num_vf_pqs)
766 {
767         return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs +
768             QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs +
769             QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF;
770 }
771
772 int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn,
773                             u8 max_ports_per_engine,
774                             u8 max_phys_tcs_per_port,
775                             bool pf_rl_en,
776                             bool pf_wfq_en,
777                             bool vport_rl_en,
778                             bool vport_wfq_en,
779                             struct init_qm_port_params
780                             port_params[MAX_NUM_PORTS])
781 {
782         u32 mask;
783
784         /* Init AFullOprtnstcCrdMask */
785         mask = (QM_OPPOR_LINE_VOQ_DEF <<
786                 QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) |
787                 (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) |
788                 (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) |
789                 (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) |
790                 (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) |
791                 (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) |
792                 (QM_OPPOR_FW_STOP_DEF <<
793                  QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) |
794                 (QM_OPPOR_PQ_EMPTY_DEF <<
795                  QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT);
796         STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask);
797
798         /* Enable/disable PF RL */
799         ecore_enable_pf_rl(p_hwfn, pf_rl_en);
800
801         /* Enable/disable PF WFQ */
802         ecore_enable_pf_wfq(p_hwfn, pf_wfq_en);
803
804         /* Enable/disable VPORT RL */
805         ecore_enable_vport_rl(p_hwfn, vport_rl_en);
806
807         /* Enable/disable VPORT WFQ */
808         ecore_enable_vport_wfq(p_hwfn, vport_wfq_en);
809
810         /* Init PBF CMDQ line credit */
811         ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine,
812                                  max_phys_tcs_per_port, port_params);
813
814         /* Init BTB blocks in PBF */
815         ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine,
816                                  max_phys_tcs_per_port, port_params);
817
818         return 0;
819 }
820
821 int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn,
822                         struct ecore_ptt *p_ptt,
823                         u8 port_id,
824                         u8 pf_id,
825                         u8 max_phys_tcs_per_port,
826                         u32 num_pf_cids,
827                         u32 num_vf_cids,
828                         u32 num_tids,
829                         u16 start_pq,
830                         u16 num_pf_pqs,
831                         u16 num_vf_pqs,
832                         u8 start_vport,
833                         u8 num_vports,
834                         u16 pf_wfq,
835                         u32 pf_rl,
836                         u32 link_speed,
837                         struct init_qm_pq_params *pq_params,
838                         struct init_qm_vport_params *vport_params)
839 {
840         u32 other_mem_size_4kb;
841         u8 tc, i;
842
843         other_mem_size_4kb = QM_PQ_MEM_4KB(num_pf_cids + num_tids) *
844                              QM_OTHER_PQS_PER_PF;
845
846         /* Clear first Tx PQ ID array for each VPORT */
847         for (i = 0; i < num_vports; i++)
848                 for (tc = 0; tc < NUM_OF_TCS; tc++)
849                         vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID;
850
851         /* Map Other PQs (if any) */
852 #if QM_OTHER_PQS_PER_PF > 0
853         ecore_other_pq_map_rt_init(p_hwfn, pf_id, num_pf_cids, num_tids, 0);
854 #endif
855
856         /* Map Tx PQs */
857         ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id,
858                                 max_phys_tcs_per_port, num_pf_cids, num_vf_cids,
859                                 start_pq, num_pf_pqs, num_vf_pqs, start_vport,
860                                 other_mem_size_4kb, pq_params, vport_params);
861
862         /* Init PF WFQ */
863         if (pf_wfq)
864                 if (ecore_pf_wfq_rt_init
865                     (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port,
866                      num_pf_pqs + num_vf_pqs, pq_params))
867                         return -1;
868
869         /* Init PF RL */
870         if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl))
871                 return -1;
872
873         /* Set VPORT WFQ */
874         if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params))
875                 return -1;
876
877         /* Set VPORT RL */
878         if (ecore_vport_rl_rt_init
879             (p_hwfn, start_vport, num_vports, link_speed, vport_params))
880                 return -1;
881
882         return 0;
883 }
884
885 int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn,
886                       struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq)
887 {
888         u32 inc_val;
889
890         inc_val = QM_WFQ_INC_VAL(pf_wfq);
891         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
892                 DP_NOTICE(p_hwfn, true,
893                           "Invalid PF WFQ weight configuration\n");
894                 return -1;
895         }
896
897         ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val);
898
899         return 0;
900 }
901
902 int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn,
903                      struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl)
904 {
905         u32 inc_val;
906
907         inc_val = QM_RL_INC_VAL(pf_rl);
908         if (inc_val > QM_PF_RL_MAX_INC_VAL) {
909                 DP_NOTICE(p_hwfn, true,
910                           "Invalid PF rate limit configuration\n");
911                 return -1;
912         }
913
914         ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4,
915                  (u32)QM_RL_CRD_REG_SIGN_BIT);
916         ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val);
917
918         return 0;
919 }
920
921 int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn,
922                          struct ecore_ptt *p_ptt,
923                          u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq)
924 {
925         u16 vport_pq_id;
926         u32 inc_val;
927         u8 tc;
928
929         inc_val = QM_WFQ_INC_VAL(vport_wfq);
930         if (!inc_val || inc_val > QM_WFQ_MAX_INC_VAL) {
931                 DP_NOTICE(p_hwfn, true,
932                           "Invalid VPORT WFQ weight configuration\n");
933                 return -1;
934         }
935
936         for (tc = 0; tc < NUM_OF_TCS; tc++) {
937                 vport_pq_id = first_tx_pq_id[tc];
938                 if (vport_pq_id != QM_INVALID_PQ_ID) {
939                         ecore_wr(p_hwfn, p_ptt,
940                                  QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val);
941                 }
942         }
943
944         return 0;
945 }
946
947 int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn,
948                         struct ecore_ptt *p_ptt, u8 vport_id,
949                                                 u32 vport_rl,
950                                                 u32 link_speed)
951 {
952         u32 inc_val, max_qm_global_rls = MAX_QM_GLOBAL_RLS;
953
954         if (vport_id >= max_qm_global_rls) {
955                 DP_NOTICE(p_hwfn, true,
956                           "Invalid VPORT ID for rate limiter configuration\n");
957                 return -1;
958         }
959
960         inc_val = QM_RL_INC_VAL(vport_rl ? vport_rl : link_speed);
961         if (inc_val > QM_VP_RL_MAX_INC_VAL(link_speed)) {
962                 DP_NOTICE(p_hwfn, true,
963                           "Invalid VPORT rate-limit configuration\n");
964                 return -1;
965         }
966
967         ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4,
968                  (u32)QM_RL_CRD_REG_SIGN_BIT);
969         ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val);
970
971         return 0;
972 }
973
974 bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn,
975                             struct ecore_ptt *p_ptt,
976                             bool is_release_cmd,
977                             bool is_tx_pq, u16 start_pq, u16 num_pqs)
978 {
979         u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 };
980         u32 pq_mask = 0, last_pq, pq_id;
981
982         last_pq = start_pq + num_pqs - 1;
983
984         /* Set command's PQ type */
985         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1);
986
987         /* Go over requested PQs */
988         for (pq_id = start_pq; pq_id <= last_pq; pq_id++) {
989                 /* Set PQ bit in mask (stop command only) */
990                 if (!is_release_cmd)
991                         pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH));
992
993                 /* If last PQ or end of PQ mask, write command */
994                 if ((pq_id == last_pq) ||
995                     (pq_id % QM_STOP_PQ_MASK_WIDTH ==
996                     (QM_STOP_PQ_MASK_WIDTH - 1))) {
997                         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK,
998                                          pq_mask);
999                         QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID,
1000                                          pq_id / QM_STOP_PQ_MASK_WIDTH);
1001                         if (!ecore_send_qm_cmd
1002                             (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0],
1003                              cmd_arr[1]))
1004                                 return false;
1005                         pq_mask = 0;
1006                 }
1007         }
1008
1009         return true;
1010 }
1011
1012
1013 /* NIG: ETS configuration constants */
1014 #define NIG_TX_ETS_CLIENT_OFFSET        4
1015 #define NIG_LB_ETS_CLIENT_OFFSET        1
1016 #define NIG_ETS_MIN_WFQ_BYTES           1600
1017
1018 /* NIG: ETS constants */
1019 #define NIG_ETS_UP_BOUND(weight, mtu) \
1020         (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1021
1022 /* NIG: RL constants */
1023
1024 /* Byte base type value */
1025 #define NIG_RL_BASE_TYPE                1
1026
1027 /* Period in us */
1028 #define NIG_RL_PERIOD                   1
1029
1030 /* Period in 25MHz cycles */
1031 #define NIG_RL_PERIOD_CLK_25M           (25 * NIG_RL_PERIOD)
1032
1033 /* Rate in mbps */
1034 #define NIG_RL_INC_VAL(rate)            (((rate) * NIG_RL_PERIOD) / 8)
1035
1036 #define NIG_RL_MAX_VAL(inc_val, mtu) \
1037         (2 * ((inc_val) > (mtu) ? (inc_val) : (mtu)))
1038
1039 /* NIG: packet prioritry configuration constants */
1040 #define NIG_PRIORITY_MAP_TC_BITS        4
1041
1042
1043 void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn,
1044                         struct ecore_ptt *p_ptt,
1045                         struct init_ets_req *req, bool is_lb)
1046 {
1047         u32 min_weight, tc_weight_base_addr, tc_weight_addr_diff;
1048         u32 tc_bound_base_addr, tc_bound_addr_diff;
1049         u8 sp_tc_map = 0, wfq_tc_map = 0;
1050         u8 tc, num_tc, tc_client_offset;
1051
1052         num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS;
1053         tc_client_offset = is_lb ? NIG_LB_ETS_CLIENT_OFFSET :
1054                                    NIG_TX_ETS_CLIENT_OFFSET;
1055         min_weight = 0xffffffff;
1056         tc_weight_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1057                                       NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1058         tc_weight_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 -
1059                                       NIG_REG_LB_ARB_CREDIT_WEIGHT_0 :
1060                                       NIG_REG_TX_ARB_CREDIT_WEIGHT_1 -
1061                                       NIG_REG_TX_ARB_CREDIT_WEIGHT_0;
1062         tc_bound_base_addr = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1063                                      NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1064         tc_bound_addr_diff = is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 -
1065                                      NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 :
1066                                      NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 -
1067                                      NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0;
1068
1069         for (tc = 0; tc < num_tc; tc++) {
1070                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1071
1072                 /* Update SP map */
1073                 if (tc_req->use_sp)
1074                         sp_tc_map |= (1 << tc);
1075
1076                 if (!tc_req->use_wfq)
1077                         continue;
1078
1079                 /* Update WFQ map */
1080                 wfq_tc_map |= (1 << tc);
1081
1082                 /* Find minimal weight */
1083                 if (tc_req->weight < min_weight)
1084                         min_weight = tc_req->weight;
1085         }
1086
1087         /* Write SP map */
1088         ecore_wr(p_hwfn, p_ptt,
1089                  is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT :
1090                  NIG_REG_TX_ARB_CLIENT_IS_STRICT,
1091                  (sp_tc_map << tc_client_offset));
1092
1093         /* Write WFQ map */
1094         ecore_wr(p_hwfn, p_ptt,
1095                  is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ :
1096                  NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ,
1097                  (wfq_tc_map << tc_client_offset));
1098         /* write WFQ weights */
1099         for (tc = 0; tc < num_tc; tc++, tc_client_offset++) {
1100                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1101                 u32 byte_weight;
1102
1103                 if (!tc_req->use_wfq)
1104                         continue;
1105
1106                 /* Translate weight to bytes */
1107                 byte_weight = (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1108                               min_weight;
1109
1110                 /* Write WFQ weight */
1111                 ecore_wr(p_hwfn, p_ptt, tc_weight_base_addr +
1112                          tc_weight_addr_diff * tc_client_offset, byte_weight);
1113
1114                 /* Write WFQ upper bound */
1115                 ecore_wr(p_hwfn, p_ptt, tc_bound_base_addr +
1116                          tc_bound_addr_diff * tc_client_offset,
1117                          NIG_ETS_UP_BOUND(byte_weight, req->mtu));
1118         }
1119 }
1120
1121 void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn,
1122                           struct ecore_ptt *p_ptt,
1123                           struct init_nig_lb_rl_req *req)
1124 {
1125         u32 ctrl, inc_val, reg_offset;
1126         u8 tc;
1127
1128         /* Disable global MAC+LB RL */
1129         ctrl =
1130             NIG_RL_BASE_TYPE <<
1131             NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT;
1132         ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1133
1134         /* Configure and enable global MAC+LB RL */
1135         if (req->lb_mac_rate) {
1136                 /* Configure  */
1137                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD,
1138                          NIG_RL_PERIOD_CLK_25M);
1139                 inc_val = NIG_RL_INC_VAL(req->lb_mac_rate);
1140                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE,
1141                          inc_val);
1142                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE,
1143                          NIG_RL_MAX_VAL(inc_val, req->mtu));
1144
1145                 /* Enable */
1146                 ctrl |=
1147                     1 <<
1148                     NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT;
1149                 ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl);
1150         }
1151
1152         /* Disable global LB-only RL */
1153         ctrl =
1154             NIG_RL_BASE_TYPE <<
1155             NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT;
1156         ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1157
1158         /* Configure and enable global LB-only RL */
1159         if (req->lb_rate) {
1160                 /* Configure  */
1161                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD,
1162                          NIG_RL_PERIOD_CLK_25M);
1163                 inc_val = NIG_RL_INC_VAL(req->lb_rate);
1164                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE,
1165                          inc_val);
1166                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE,
1167                          NIG_RL_MAX_VAL(inc_val, req->mtu));
1168
1169                 /* Enable */
1170                 ctrl |=
1171                     1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT;
1172                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl);
1173         }
1174
1175         /* Per-TC RLs */
1176         for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS;
1177              tc++, reg_offset += 4) {
1178                 /* Disable TC RL */
1179                 ctrl =
1180                     NIG_RL_BASE_TYPE <<
1181                 NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT;
1182                 ecore_wr(p_hwfn, p_ptt,
1183                          NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl);
1184
1185                 /* Configure and enable TC RL */
1186                 if (!req->tc_rate[tc])
1187                         continue;
1188
1189                 /* Configure */
1190                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 +
1191                          reg_offset, NIG_RL_PERIOD_CLK_25M);
1192                 inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]);
1193                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 +
1194                          reg_offset, inc_val);
1195                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 +
1196                          reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu));
1197
1198                 /* Enable */
1199                 ctrl |= 1 <<
1200                         NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT;
1201                 ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_TCRATELIMIT_CTRL_0 +
1202                          reg_offset, ctrl);
1203         }
1204 }
1205
1206 void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn,
1207                                struct ecore_ptt *p_ptt,
1208                                struct init_nig_pri_tc_map_req *req)
1209 {
1210         u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 };
1211         u32 pri_tc_mask = 0;
1212         u8 pri, tc;
1213
1214         for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) {
1215                 if (!req->pri[pri].valid)
1216                         continue;
1217
1218                 pri_tc_mask |= (req->pri[pri].tc_id <<
1219                                 (pri * NIG_PRIORITY_MAP_TC_BITS));
1220                 tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri);
1221         }
1222
1223         /* Write priority -> TC mask */
1224         ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask);
1225
1226         /* Write TC -> priority mask */
1227         for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) {
1228                 ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4,
1229                          tc_pri_mask[tc]);
1230                 ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4,
1231                          tc_pri_mask[tc]);
1232         }
1233 }
1234
1235
1236 /* PRS: ETS configuration constants */
1237 #define PRS_ETS_MIN_WFQ_BYTES           1600
1238 #define PRS_ETS_UP_BOUND(weight, mtu) \
1239         (2 * ((weight) > (mtu) ? (weight) : (mtu)))
1240
1241
1242 void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn,
1243                         struct ecore_ptt *p_ptt, struct init_ets_req *req)
1244 {
1245         u32 tc_weight_addr_diff, tc_bound_addr_diff, min_weight = 0xffffffff;
1246         u8 tc, sp_tc_map = 0, wfq_tc_map = 0;
1247
1248         tc_weight_addr_diff = PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 -
1249                               PRS_REG_ETS_ARB_CREDIT_WEIGHT_0;
1250         tc_bound_addr_diff = PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 -
1251                              PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0;
1252
1253         for (tc = 0; tc < NUM_OF_TCS; tc++) {
1254                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1255
1256                 /* Update SP map */
1257                 if (tc_req->use_sp)
1258                         sp_tc_map |= (1 << tc);
1259
1260                 if (!tc_req->use_wfq)
1261                         continue;
1262
1263                 /* Update WFQ map */
1264                 wfq_tc_map |= (1 << tc);
1265
1266                 /* Find minimal weight */
1267                 if (tc_req->weight < min_weight)
1268                         min_weight = tc_req->weight;
1269         }
1270
1271         /* write SP map */
1272         ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map);
1273
1274         /* write WFQ map */
1275         ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ,
1276                  wfq_tc_map);
1277
1278         /* write WFQ weights */
1279         for (tc = 0; tc < NUM_OF_TCS; tc++) {
1280                 struct init_ets_tc_req *tc_req = &req->tc_req[tc];
1281                 u32 byte_weight;
1282
1283                 if (!tc_req->use_wfq)
1284                         continue;
1285
1286                 /* Translate weight to bytes */
1287                 byte_weight = (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) /
1288                               min_weight;
1289
1290                 /* Write WFQ weight */
1291                 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + tc *
1292                          tc_weight_addr_diff, byte_weight);
1293
1294                 /* Write WFQ upper bound */
1295                 ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 +
1296                          tc * tc_bound_addr_diff, PRS_ETS_UP_BOUND(byte_weight,
1297                                                                    req->mtu));
1298         }
1299 }
1300
1301
1302 /* BRB: RAM configuration constants */
1303 #define BRB_TOTAL_RAM_BLOCKS_BB 4800
1304 #define BRB_TOTAL_RAM_BLOCKS_K2 5632
1305 #define BRB_BLOCK_SIZE          128
1306 #define BRB_MIN_BLOCKS_PER_TC   9
1307 #define BRB_HYST_BYTES          10240
1308 #define BRB_HYST_BLOCKS         (BRB_HYST_BYTES / BRB_BLOCK_SIZE)
1309
1310 /* Temporary big RAM allocation - should be updated */
1311 void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn,
1312                         struct ecore_ptt *p_ptt, struct init_brb_ram_req *req)
1313 {
1314         u32 tc_headroom_blocks, min_pkt_size_blocks, total_blocks;
1315         u32 active_port_blocks, reg_offset = 0;
1316         u8 port, active_ports = 0;
1317
1318         tc_headroom_blocks = (u32)DIV_ROUND_UP(req->headroom_per_tc,
1319                                                BRB_BLOCK_SIZE);
1320         min_pkt_size_blocks = (u32)DIV_ROUND_UP(req->min_pkt_size,
1321                                                 BRB_BLOCK_SIZE);
1322         total_blocks = ECORE_IS_K2(p_hwfn->p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 :
1323                                                     BRB_TOTAL_RAM_BLOCKS_BB;
1324
1325         /* Find number of active ports */
1326         for (port = 0; port < MAX_NUM_PORTS; port++)
1327                 if (req->num_active_tcs[port])
1328                         active_ports++;
1329
1330         active_port_blocks = (u32)(total_blocks / active_ports);
1331
1332         for (port = 0; port < req->max_ports_per_engine; port++) {
1333                 u32 port_blocks, port_shared_blocks, port_guaranteed_blocks;
1334                 u32 full_xoff_th, full_xon_th, pause_xoff_th, pause_xon_th;
1335                 u32 tc_guaranteed_blocks;
1336                 u8 tc;
1337
1338                 /* Calculate per-port sizes */
1339                 tc_guaranteed_blocks = (u32)DIV_ROUND_UP(req->guranteed_per_tc,
1340                                                          BRB_BLOCK_SIZE);
1341                 port_blocks = req->num_active_tcs[port] ? active_port_blocks :
1342                                                           0;
1343                 port_guaranteed_blocks = req->num_active_tcs[port] *
1344                                          tc_guaranteed_blocks;
1345                 port_shared_blocks = port_blocks - port_guaranteed_blocks;
1346                 full_xoff_th = req->num_active_tcs[port] *
1347                                BRB_MIN_BLOCKS_PER_TC;
1348                 full_xon_th = full_xoff_th + min_pkt_size_blocks;
1349                 pause_xoff_th = tc_headroom_blocks;
1350                 pause_xon_th = pause_xoff_th + min_pkt_size_blocks;
1351
1352                 /* Init total size per port */
1353                 ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4,
1354                          port_blocks);
1355
1356                 /* Init shared size per port */
1357                 ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4,
1358                          port_shared_blocks);
1359
1360                 for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) {
1361                         /* Clear init values for non-active TCs */
1362                         if (tc == req->num_active_tcs[port]) {
1363                                 tc_guaranteed_blocks = 0;
1364                                 full_xoff_th = 0;
1365                                 full_xon_th = 0;
1366                                 pause_xoff_th = 0;
1367                                 pause_xon_th = 0;
1368                         }
1369
1370                         /* Init guaranteed size per TC */
1371                         ecore_wr(p_hwfn, p_ptt,
1372                                  BRB_REG_TC_GUARANTIED_0 + reg_offset,
1373                                  tc_guaranteed_blocks);
1374                         ecore_wr(p_hwfn, p_ptt,
1375                                  BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset,
1376                                  BRB_HYST_BLOCKS);
1377
1378                         /* Init pause/full thresholds per physical TC - for
1379                          * loopback traffic.
1380                          */
1381                         ecore_wr(p_hwfn, p_ptt,
1382                                  BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 +
1383                                  reg_offset, full_xoff_th);
1384                         ecore_wr(p_hwfn, p_ptt,
1385                                  BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 +
1386                                  reg_offset, full_xon_th);
1387                         ecore_wr(p_hwfn, p_ptt,
1388                                  BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 +
1389                                  reg_offset, pause_xoff_th);
1390                         ecore_wr(p_hwfn, p_ptt,
1391                                  BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 +
1392                                  reg_offset, pause_xon_th);
1393
1394                         /* Init pause/full thresholds per physical TC - for
1395                          * main traffic.
1396                          */
1397                         ecore_wr(p_hwfn, p_ptt,
1398                                  BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 +
1399                                  reg_offset, full_xoff_th);
1400                         ecore_wr(p_hwfn, p_ptt,
1401                                  BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 +
1402                                  reg_offset, full_xon_th);
1403                         ecore_wr(p_hwfn, p_ptt,
1404                                  BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 +
1405                                  reg_offset, pause_xoff_th);
1406                         ecore_wr(p_hwfn, p_ptt,
1407                                  BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 +
1408                                  reg_offset, pause_xon_th);
1409                 }
1410         }
1411 }
1412
1413 /* In MF should be called once per port to set EtherType of OuterTag */
1414 void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, u32 ethType)
1415 {
1416         /* Update DORQ register */
1417         STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, ethType);
1418 }
1419
1420 #define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \
1421 (var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0))
1422 #define PRS_ETH_TUNN_FIC_FORMAT        -188897008
1423 void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn,
1424                                struct ecore_ptt *p_ptt, u16 dest_port)
1425 {
1426         /* Update PRS register */
1427         ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port);
1428
1429         /* Update NIG register */
1430         ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_CTRL, dest_port);
1431
1432         /* Update PBF register */
1433         ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port);
1434 }
1435
1436 void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn,
1437                             struct ecore_ptt *p_ptt, bool vxlan_enable)
1438 {
1439         u32 reg_val;
1440
1441         /* Update PRS register */
1442         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1443         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1444                            PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT,
1445                            vxlan_enable);
1446         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1447         if (reg_val) {
1448                 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1449                          (u32)PRS_ETH_TUNN_FIC_FORMAT);
1450         }
1451
1452         /* Update NIG register */
1453         reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1454         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1455                                    NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT,
1456                                    vxlan_enable);
1457         ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1458
1459         /* Update DORQ register */
1460         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN,
1461                  vxlan_enable ? 1 : 0);
1462 }
1463
1464 void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn,
1465                           struct ecore_ptt *p_ptt,
1466                           bool eth_gre_enable, bool ip_gre_enable)
1467 {
1468         u32 reg_val;
1469
1470         /* Update PRS register */
1471         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1472         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1473                    PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT,
1474                    eth_gre_enable);
1475         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1476                    PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT,
1477                    ip_gre_enable);
1478         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1479         if (reg_val) {
1480                 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1481                          (u32)PRS_ETH_TUNN_FIC_FORMAT);
1482         }
1483
1484         /* Update NIG register */
1485         reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE);
1486         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1487                    NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT,
1488                    eth_gre_enable);
1489         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1490                    NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT,
1491                    ip_gre_enable);
1492         ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val);
1493
1494         /* Update DORQ registers */
1495         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN,
1496                  eth_gre_enable ? 1 : 0);
1497         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN,
1498                  ip_gre_enable ? 1 : 0);
1499 }
1500
1501 void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn,
1502                                 struct ecore_ptt *p_ptt, u16 dest_port)
1503 {
1504         /* Update PRS register */
1505         ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port);
1506
1507         /* Update NIG register */
1508         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port);
1509
1510         /* Update PBF register */
1511         ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port);
1512 }
1513
1514 void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn,
1515                              struct ecore_ptt *p_ptt,
1516                              bool eth_geneve_enable, bool ip_geneve_enable)
1517 {
1518         u32 reg_val;
1519
1520         /* Update PRS register */
1521         reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN);
1522         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1523                    PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT,
1524                    eth_geneve_enable);
1525         SET_TUNNEL_TYPE_ENABLE_BIT(reg_val,
1526                    PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT,
1527                    ip_geneve_enable);
1528         ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val);
1529         if (reg_val) {
1530                 ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0_BB_K2,
1531                          (u32)PRS_ETH_TUNN_FIC_FORMAT);
1532         }
1533
1534         /* Update NIG register */
1535         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE,
1536                  eth_geneve_enable ? 1 : 0);
1537         ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE,
1538                  ip_geneve_enable ? 1 : 0);
1539
1540         /* EDPM with geneve tunnel not supported in BB */
1541         if (ECORE_IS_BB_B0(p_hwfn->p_dev))
1542                 return;
1543
1544         /* Update DORQ registers */
1545         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN_K2_E5,
1546                  eth_geneve_enable ? 1 : 0);
1547         ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN_K2_E5,
1548                  ip_geneve_enable ? 1 : 0);
1549 }
1550
1551
1552 #define T_ETH_PACKET_ACTION_GFT_EVENTID  23
1553 #define PARSER_ETH_CONN_GFT_ACTION_CM_HDR  272
1554 #define T_ETH_PACKET_MATCH_RFS_EVENTID 25
1555 #define PARSER_ETH_CONN_CM_HDR 0
1556 #define CAM_LINE_SIZE sizeof(u32)
1557 #define RAM_LINE_SIZE sizeof(u64)
1558 #define REG_SIZE sizeof(u32)
1559
1560 void ecore_gft_disable(struct ecore_hwfn *p_hwfn,
1561                        struct ecore_ptt *p_ptt,
1562                        u16 pf_id)
1563 {
1564         /* disable gft search for PF */
1565         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 0);
1566
1567         /* Clean ram & cam for next gft session*/
1568
1569         /* Zero camline */
1570         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id, 0);
1571
1572         /* Zero ramline */
1573         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1574                                 RAM_LINE_SIZE * pf_id, 0);
1575         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM +
1576                                 RAM_LINE_SIZE * pf_id + REG_SIZE, 0);
1577 }
1578
1579
1580 void ecore_set_gft_event_id_cm_hdr(struct ecore_hwfn *p_hwfn,
1581                                    struct ecore_ptt *p_ptt)
1582 {
1583         u32 rfs_cm_hdr_event_id;
1584
1585         /* Set RFS event ID to be awakened i Tstorm By Prs */
1586         rfs_cm_hdr_event_id = ecore_rd(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT);
1587         rfs_cm_hdr_event_id |= T_ETH_PACKET_ACTION_GFT_EVENTID <<
1588             PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1589         rfs_cm_hdr_event_id |= PARSER_ETH_CONN_GFT_ACTION_CM_HDR <<
1590             PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1591         ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, rfs_cm_hdr_event_id);
1592 }
1593
1594 void ecore_gft_config(struct ecore_hwfn *p_hwfn,
1595                                struct ecore_ptt *p_ptt,
1596                                u16 pf_id,
1597                                bool tcp,
1598                                bool udp,
1599                                bool ipv4,
1600                                bool ipv6,
1601                                enum gft_profile_type profile_type)
1602 {
1603         u32 reg_val, cam_line, ram_line_lo, ram_line_hi;
1604
1605         if (!ipv6 && !ipv4)
1606                 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - ipv4 or ipv6'\n");
1607         if (!tcp && !udp)
1608                 DP_NOTICE(p_hwfn, true, "gft_config: must accept at least on of - udp or tcp\n");
1609         if (profile_type >= MAX_GFT_PROFILE_TYPE)
1610                 DP_NOTICE(p_hwfn, true, "gft_config: unsupported gft_profile_type\n");
1611
1612         /* Set RFS event ID to be awakened i Tstorm By Prs */
1613         reg_val = T_ETH_PACKET_MATCH_RFS_EVENTID <<
1614                   PRS_REG_CM_HDR_GFT_EVENT_ID_SHIFT;
1615         reg_val |= PARSER_ETH_CONN_CM_HDR << PRS_REG_CM_HDR_GFT_CM_HDR_SHIFT;
1616         ecore_wr(p_hwfn, p_ptt, PRS_REG_CM_HDR_GFT, reg_val);
1617
1618         /* Do not load context only cid in PRS on match. */
1619         ecore_wr(p_hwfn, p_ptt, PRS_REG_LOAD_L2_FILTER, 0);
1620
1621         /* Do not use tenant ID exist bit for gft search*/
1622         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TENANT_ID, 0);
1623
1624         /* Set Cam */
1625         cam_line = 0;
1626         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_VALID, 1);
1627
1628         /* Filters are per PF!! */
1629         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID_MASK,
1630                   GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK);
1631         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_PF_ID, pf_id);
1632
1633         if (!(tcp && udp)) {
1634                 SET_FIELD(cam_line,
1635                           GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK,
1636                           GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK);
1637                 if (tcp)
1638                         SET_FIELD(cam_line,
1639                                   GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1640                                   GFT_PROFILE_TCP_PROTOCOL);
1641                 else
1642                         SET_FIELD(cam_line,
1643                                   GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE,
1644                                   GFT_PROFILE_UDP_PROTOCOL);
1645         }
1646
1647         if (!(ipv4 && ipv6)) {
1648                 SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION_MASK, 1);
1649                 if (ipv4)
1650                         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1651                                   GFT_PROFILE_IPV4);
1652                 else
1653                         SET_FIELD(cam_line, GFT_CAM_LINE_MAPPED_IP_VERSION,
1654                                   GFT_PROFILE_IPV6);
1655         }
1656
1657         /* Write characteristics to cam */
1658         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id,
1659                  cam_line);
1660         cam_line = ecore_rd(p_hwfn, p_ptt,
1661                             PRS_REG_GFT_CAM + CAM_LINE_SIZE * pf_id);
1662
1663         /* Write line to RAM - compare to filter 4 tuple */
1664         ram_line_lo = 0;
1665         ram_line_hi = 0;
1666
1667         if (profile_type == GFT_PROFILE_TYPE_4_TUPLE) {
1668                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1669                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_SRC_IP, 1);
1670                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1671                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1672                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_SRC_PORT, 1);
1673                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1674         } else if (profile_type == GFT_PROFILE_TYPE_L4_DST_PORT) {
1675                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_OVER_IP_PROTOCOL, 1);
1676                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1677                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_DST_PORT, 1);
1678         } else if (profile_type == GFT_PROFILE_TYPE_IP_DST_PORT) {
1679                 SET_FIELD(ram_line_hi, GFT_RAM_LINE_DST_IP, 1);
1680                 SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
1681         }
1682
1683         ecore_wr(p_hwfn, p_ptt,
1684                  PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
1685                  ram_line_lo);
1686         ecore_wr(p_hwfn, p_ptt,
1687                  PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id +
1688                  REG_SIZE, ram_line_hi);
1689
1690         /* Set default profile so that no filter match will happen */
1691         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1692                  PRS_GFT_CAM_LINES_NO_MATCH, 0xffffffff);
1693         ecore_wr(p_hwfn, p_ptt, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE *
1694                  PRS_GFT_CAM_LINES_NO_MATCH + REG_SIZE, 0x3ff);
1695
1696         /* Enable gft search */
1697         ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_GFT, 1);
1698 }
1699
1700 /* Configure VF zone size mode */
1701 void ecore_config_vf_zone_size_mode(struct ecore_hwfn *p_hwfn,
1702                                     struct ecore_ptt *p_ptt, u16 mode,
1703                                     bool runtime_init)
1704 {
1705         u32 msdm_vf_size_log = MSTORM_VF_ZONE_DEFAULT_SIZE_LOG;
1706         u32 msdm_vf_offset_mask;
1707
1708         if (mode == VF_ZONE_SIZE_MODE_DOUBLE)
1709                 msdm_vf_size_log += 1;
1710         else if (mode == VF_ZONE_SIZE_MODE_QUAD)
1711                 msdm_vf_size_log += 2;
1712
1713         msdm_vf_offset_mask = (1 << msdm_vf_size_log) - 1;
1714
1715         if (runtime_init) {
1716                 STORE_RT_REG(p_hwfn,
1717                              PGLUE_REG_B_MSDM_VF_SHIFT_B_RT_OFFSET,
1718                              msdm_vf_size_log);
1719                 STORE_RT_REG(p_hwfn,
1720                              PGLUE_REG_B_MSDM_OFFSET_MASK_B_RT_OFFSET,
1721                              msdm_vf_offset_mask);
1722         } else {
1723                 ecore_wr(p_hwfn, p_ptt,
1724                          PGLUE_B_REG_MSDM_VF_SHIFT_B, msdm_vf_size_log);
1725                 ecore_wr(p_hwfn, p_ptt,
1726                          PGLUE_B_REG_MSDM_OFFSET_MASK_B, msdm_vf_offset_mask);
1727         }
1728 }
1729
1730 /* Get mstorm statistics for offset by VF zone size mode */
1731 u32 ecore_get_mstorm_queue_stat_offset(struct ecore_hwfn *p_hwfn,
1732                                        u16 stat_cnt_id,
1733                                        u16 vf_zone_size_mode)
1734 {
1735         u32 offset = MSTORM_QUEUE_STAT_OFFSET(stat_cnt_id);
1736
1737         if ((vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) &&
1738             (stat_cnt_id > MAX_NUM_PFS)) {
1739                 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1740                         offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1741                             (stat_cnt_id - MAX_NUM_PFS);
1742                 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1743                         offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1744                             (stat_cnt_id - MAX_NUM_PFS);
1745         }
1746
1747         return offset;
1748 }
1749
1750 /* Get mstorm VF producer offset by VF zone size mode */
1751 u32 ecore_get_mstorm_eth_vf_prods_offset(struct ecore_hwfn *p_hwfn,
1752                                          u8 vf_id,
1753                                          u8 vf_queue_id,
1754                                          u16 vf_zone_size_mode)
1755 {
1756         u32 offset = MSTORM_ETH_VF_PRODS_OFFSET(vf_id, vf_queue_id);
1757
1758         if (vf_zone_size_mode != VF_ZONE_SIZE_MODE_DEFAULT) {
1759                 if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_DOUBLE)
1760                         offset += (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1761                                    vf_id;
1762                 else if (vf_zone_size_mode == VF_ZONE_SIZE_MODE_QUAD)
1763                         offset += 3 * (1 << MSTORM_VF_ZONE_DEFAULT_SIZE_LOG) *
1764                                   vf_id;
1765         }
1766
1767         return offset;
1768 }
1769
1770 #ifndef LINUX_REMOVE
1771 #define CRC8_INIT_VALUE 0xFF
1772 #endif
1773 static u8 cdu_crc8_table[CRC8_TABLE_SIZE];
1774
1775 /* Calculate and return CDU validation byte per connection type / region /
1776  * cid
1777  */
1778 static u8 ecore_calc_cdu_validation_byte(u8 conn_type, u8 region, u32 cid)
1779 {
1780         const u8 validation_cfg = CDU_VALIDATION_DEFAULT_CFG;
1781
1782         static u8 crc8_table_valid;     /*automatically initialized to 0*/
1783         u8 crc, validation_byte = 0;
1784         u32 validation_string = 0;
1785         u32 data_to_crc;
1786
1787         if (crc8_table_valid == 0) {
1788                 OSAL_CRC8_POPULATE(cdu_crc8_table, 0x07);
1789                 crc8_table_valid = 1;
1790         }
1791
1792         /*
1793          * The CRC is calculated on the String-to-compress:
1794          * [31:8]  = {CID[31:20],CID[11:0]}
1795          * [7:4]   = Region
1796          * [3:0]   = Type
1797          */
1798         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_CID) & 1)
1799                 validation_string |= (cid & 0xFFF00000) | ((cid & 0xFFF) << 8);
1800
1801         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_REGION) & 1)
1802                 validation_string |= ((region & 0xF) << 4);
1803
1804         if ((validation_cfg >> CDU_CONTEXT_VALIDATION_CFG_USE_TYPE) & 1)
1805                 validation_string |= (conn_type & 0xF);
1806
1807         /* Convert to big-endian and calculate CRC8*/
1808         data_to_crc = OSAL_BE32_TO_CPU(validation_string);
1809
1810         crc = OSAL_CRC8(cdu_crc8_table, (u8 *)&data_to_crc, sizeof(data_to_crc),
1811                         CRC8_INIT_VALUE);
1812
1813         /* The validation byte [7:0] is composed:
1814          * for type A validation
1815          * [7]          = active configuration bit
1816          * [6:0]        = crc[6:0]
1817          *
1818          * for type B validation
1819          * [7]          = active configuration bit
1820          * [6:3]        = connection_type[3:0]
1821          * [2:0]        = crc[2:0]
1822          */
1823
1824         validation_byte |= ((validation_cfg >>
1825                              CDU_CONTEXT_VALIDATION_CFG_USE_ACTIVE) & 1) << 7;
1826
1827         if ((validation_cfg >>
1828              CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT) & 1)
1829                 validation_byte |= ((conn_type & 0xF) << 3) | (crc & 0x7);
1830         else
1831                 validation_byte |= crc & 0x7F;
1832
1833         return validation_byte;
1834 }
1835
1836 /* Calcualte and set validation bytes for session context */
1837 void ecore_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size,
1838                                        u8 ctx_type, u32 cid)
1839 {
1840         u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1841
1842         p_ctx = (u8 *)p_ctx_mem;
1843         x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1844         t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1845         u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1846
1847         OSAL_MEMSET(p_ctx, 0, ctx_size);
1848
1849         *x_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 3, cid);
1850         *t_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 4, cid);
1851         *u_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 5, cid);
1852 }
1853
1854 /* Calcualte and set validation bytes for task context */
1855 void ecore_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type,
1856                                     u32 tid)
1857 {
1858         u8 *p_ctx, *region1_val_ptr;
1859
1860         p_ctx = (u8 *)p_ctx_mem;
1861         region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1862
1863         OSAL_MEMSET(p_ctx, 0, ctx_size);
1864
1865         *region1_val_ptr = ecore_calc_cdu_validation_byte(ctx_type, 1, tid);
1866 }
1867
1868 /* Memset session context to 0 while preserving validation bytes */
1869 void ecore_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1870 {
1871         u8 *x_val_ptr, *t_val_ptr, *u_val_ptr, *p_ctx;
1872         u8 x_val, t_val, u_val;
1873
1874         p_ctx = (u8 *)p_ctx_mem;
1875         x_val_ptr = &p_ctx[con_region_offsets[0][ctx_type]];
1876         t_val_ptr = &p_ctx[con_region_offsets[1][ctx_type]];
1877         u_val_ptr = &p_ctx[con_region_offsets[2][ctx_type]];
1878
1879         x_val = *x_val_ptr;
1880         t_val = *t_val_ptr;
1881         u_val = *u_val_ptr;
1882
1883         OSAL_MEMSET(p_ctx, 0, ctx_size);
1884
1885         *x_val_ptr = x_val;
1886         *t_val_ptr = t_val;
1887         *u_val_ptr = u_val;
1888 }
1889
1890 /* Memset task context to 0 while preserving validation bytes */
1891 void ecore_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type)
1892 {
1893         u8 *p_ctx, *region1_val_ptr;
1894         u8 region1_val;
1895
1896         p_ctx = (u8 *)p_ctx_mem;
1897         region1_val_ptr = &p_ctx[task_region_offsets[0][ctx_type]];
1898
1899         region1_val = *region1_val_ptr;
1900
1901         OSAL_MEMSET(p_ctx, 0, ctx_size);
1902
1903         *region1_val_ptr = region1_val;
1904 }
1905
1906 /* Enable and configure context validation */
1907 void ecore_enable_context_validation(struct ecore_hwfn *p_hwfn,
1908                                      struct ecore_ptt *p_ptt)
1909 {
1910         u32 ctx_validation;
1911
1912         /* Enable validation for connection region 3 - bits [31:24] */
1913         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 24;
1914         ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID0, ctx_validation);
1915
1916         /* Enable validation for connection region 5 - bits [15: 8] */
1917         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1918         ecore_wr(p_hwfn, p_ptt, CDU_REG_CCFC_CTX_VALID1, ctx_validation);
1919
1920         /* Enable validation for connection region 1 - bits [15: 8] */
1921         ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
1922         ecore_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
1923 }