1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2016 - 2018 Cavium Inc.
9 #include "common_hsi.h"
10 #include "ecore_hsi_common.h"
11 #include "ecore_hsi_eth.h"
12 #include "ecore_rt_defs.h"
13 #include "ecore_status.h"
15 #include "ecore_init_ops.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_cxt.h"
19 #include "ecore_dev_api.h"
20 #include "ecore_sriov.h"
21 #include "ecore_mcp.h"
23 /* Max number of connection types in HW (DQ/CDU etc.) */
24 #define MAX_CONN_TYPES PROTOCOLID_COMMON
25 #define NUM_TASK_TYPES 2
26 #define NUM_TASK_PF_SEGMENTS 4
27 #define NUM_TASK_VF_SEGMENTS 1
29 /* Doorbell-Queue constants */
30 #define DQ_RANGE_SHIFT 4
31 #define DQ_RANGE_ALIGN (1 << DQ_RANGE_SHIFT)
33 /* Searcher constants */
34 #define SRC_MIN_NUM_ELEMS 256
36 /* Timers constants */
38 #define TM_ALIGN (1 << TM_SHIFT)
39 #define TM_ELEM_SIZE 4
42 #define ILT_DEFAULT_HW_P_SIZE 4
44 #define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12))
45 #define ILT_CFG_REG(cli, reg) PSWRQ2_REG_##cli##_##reg##_RT_OFFSET
47 /* ILT entry structure */
48 #define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL
49 #define ILT_ENTRY_PHY_ADDR_SHIFT 0
50 #define ILT_ENTRY_VALID_MASK 0x1ULL
51 #define ILT_ENTRY_VALID_SHIFT 52
52 #define ILT_ENTRY_IN_REGS 2
53 #define ILT_REG_SIZE_IN_BYTES 4
55 /* connection context union */
57 struct e4_core_conn_context core_ctx;
58 struct e4_eth_conn_context eth_ctx;
61 /* TYPE-0 task context - iSCSI, FCOE */
62 union type0_task_context {
65 /* TYPE-1 task context - ROCE */
66 union type1_task_context {
67 struct regpair reserved; /* @DPDK */
75 #define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
76 #define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
78 #define CONN_CXT_SIZE(p_hwfn) \
79 ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
81 #define SRQ_CXT_SIZE (sizeof(struct regpair) * 8) /* @DPDK */
83 #define TYPE0_TASK_CXT_SIZE(p_hwfn) \
84 ALIGNED_TYPE_SIZE(union type0_task_context, p_hwfn)
86 /* Alignment is inherent to the type1_task_context structure */
87 #define TYPE1_TASK_CXT_SIZE(p_hwfn) sizeof(union type1_task_context)
89 /* PF per protocl configuration object */
90 #define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
91 #define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
93 struct ecore_tid_seg {
99 struct ecore_conn_type_cfg {
102 struct ecore_tid_seg tid_seg[TASK_SEGMENTS];
105 /* ILT Client configuration,
106 * Per connection type (protocol) resources (cids, tis, vf cids etc.)
107 * 1 - for connection context (CDUC) and for each task context we need two
108 * values, for regular task context and for force load memory
110 #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
111 #define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
114 #define CDUT_SEG_BLK(n) (1 + (u8)(n))
115 #define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_##X##_SEGMENTS)
127 struct ilt_cfg_pair {
132 struct ecore_ilt_cli_blk {
133 u32 total_size; /* 0 means not active */
134 u32 real_size_in_page;
136 u32 dynamic_line_cnt;
139 struct ecore_ilt_client_cfg {
143 struct ilt_cfg_pair first;
144 struct ilt_cfg_pair last;
145 struct ilt_cfg_pair p_size;
147 /* ILT client blocks for PF */
148 struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
151 /* ILT client blocks for VFs */
152 struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
158 * Protocol acquired CID lists
159 * PF start line in ILT
161 struct ecore_dma_mem {
167 #define MAP_WORD_SIZE sizeof(unsigned long)
168 #define BITS_PER_MAP_WORD (MAP_WORD_SIZE * 8)
170 struct ecore_cid_acquired_map {
173 unsigned long *cid_map;
176 struct ecore_cxt_mngr {
177 /* Per protocl configuration */
178 struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
180 /* computed ILT structure */
181 struct ecore_ilt_client_cfg clients[ILT_CLI_MAX];
183 /* Task type sizes */
184 u32 task_type_size[NUM_TASK_TYPES];
186 /* total number of VFs for this hwfn -
187 * ALL VFs are symmetric in terms of HW resources
192 struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES];
193 /* TBD - do we want this allocated to reserve space? */
194 struct ecore_cid_acquired_map
195 acquired_vf[MAX_CONN_TYPES][COMMON_MAX_NUM_VFS];
197 /* ILT shadow table */
198 struct ecore_dma_mem *ilt_shadow;
201 /* Mutex for a dynamic ILT allocation */
205 struct ecore_dma_mem *t2;
210 /* The infrastructure originally was very generic and context/task
211 * oriented - per connection-type we would set how many of those
212 * are needed, and later when determining how much memory we're
213 * needing for a given block we'd iterate over all the relevant
215 * But since then we've had some additional resources, some of which
216 * require memory which is indepent of the general context/task
217 * scheme. We add those here explicitly per-feature.
220 /* total number of SRQ's for this hwfn */
223 /* Maximal number of L2 steering filters */
226 /* TODO - VF arfs filters ? */
229 static OSAL_INLINE bool tm_cid_proto(enum protocol_type type)
231 return type == PROTOCOLID_TOE;
234 static bool tm_tid_proto(enum protocol_type type)
236 return type == PROTOCOLID_FCOE;
239 /* counts the iids for the CDU/CDUC ILT client configuration */
240 struct ecore_cdu_iids {
245 static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr,
246 struct ecore_cdu_iids *iids)
250 for (type = 0; type < MAX_CONN_TYPES; type++) {
251 iids->pf_cids += p_mngr->conn_cfg[type].cid_count;
252 iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
256 /* counts the iids for the Searcher block configuration */
257 struct ecore_src_iids {
262 static void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr,
263 struct ecore_src_iids *iids)
267 for (i = 0; i < MAX_CONN_TYPES; i++) {
268 iids->pf_cids += p_mngr->conn_cfg[i].cid_count;
269 iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf;
272 /* Add L2 filtering filters in addition */
273 iids->pf_cids += p_mngr->arfs_count;
276 /* counts the iids for the Timers block configuration */
277 struct ecore_tm_iids {
279 u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */
285 static void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr,
286 struct ecore_tm_iids *iids)
288 bool tm_vf_required = false;
289 bool tm_required = false;
292 for (i = 0; i < MAX_CONN_TYPES; i++) {
293 struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i];
295 if (tm_cid_proto(i) || tm_required) {
296 if (p_cfg->cid_count)
299 iids->pf_cids += p_cfg->cid_count;
302 if (tm_cid_proto(i) || tm_vf_required) {
303 if (p_cfg->cids_per_vf)
304 tm_vf_required = true;
308 if (tm_tid_proto(i)) {
309 struct ecore_tid_seg *segs = p_cfg->tid_seg;
311 /* for each segment there is at most one
312 * protocol for which count is not 0.
314 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
315 iids->pf_tids[j] += segs[j].count;
317 /* The last array elelment is for the VFs. As for PF
318 * segments there can be only one protocol for
319 * which this value is not 0.
321 iids->per_vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
325 iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN);
326 iids->per_vf_cids = ROUNDUP(iids->per_vf_cids, TM_ALIGN);
327 iids->per_vf_tids = ROUNDUP(iids->per_vf_tids, TM_ALIGN);
329 for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) {
330 iids->pf_tids[j] = ROUNDUP(iids->pf_tids[j], TM_ALIGN);
331 iids->pf_tids_total += iids->pf_tids[j];
335 static void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn,
336 struct ecore_qm_iids *iids)
338 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
339 struct ecore_tid_seg *segs;
340 u32 vf_cids = 0, type, j;
343 for (type = 0; type < MAX_CONN_TYPES; type++) {
344 iids->cids += p_mngr->conn_cfg[type].cid_count;
345 vf_cids += p_mngr->conn_cfg[type].cids_per_vf;
347 segs = p_mngr->conn_cfg[type].tid_seg;
348 /* for each segment there is at most one
349 * protocol for which count is not 0.
351 for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++)
352 iids->tids += segs[j].count;
354 /* The last array elelment is for the VFs. As for PF
355 * segments there can be only one protocol for
356 * which this value is not 0.
358 vf_tids += segs[NUM_TASK_PF_SEGMENTS].count;
361 iids->vf_cids += vf_cids * p_mngr->vf_count;
362 iids->tids += vf_tids * p_mngr->vf_count;
364 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
365 "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n",
366 iids->cids, iids->vf_cids, iids->tids, vf_tids);
369 static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn,
372 struct ecore_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr;
375 /* Find the protocol with tid count > 0 for this segment.
376 * Note: there can only be one and this is already validated.
378 for (i = 0; i < MAX_CONN_TYPES; i++) {
379 if (p_cfg->conn_cfg[i].tid_seg[seg].count)
380 return &p_cfg->conn_cfg[i].tid_seg[seg];
385 static void ecore_cxt_set_srq_count(struct ecore_hwfn *p_hwfn, u32 num_srqs)
387 struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
389 p_mgr->srq_count = num_srqs;
392 u32 ecore_cxt_get_srq_count(struct ecore_hwfn *p_hwfn)
394 struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
396 return p_mgr->srq_count;
399 /* set the iids (cid/tid) count per protocol */
400 static void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn,
401 enum protocol_type type,
402 u32 cid_count, u32 vf_cid_cnt)
404 struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr;
405 struct ecore_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type];
407 p_conn->cid_count = ROUNDUP(cid_count, DQ_RANGE_ALIGN);
408 p_conn->cids_per_vf = ROUNDUP(vf_cid_cnt, DQ_RANGE_ALIGN);
411 u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn,
412 enum protocol_type type, u32 *vf_cid)
415 *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf;
417 return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
420 u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn,
421 enum protocol_type type)
423 return p_hwfn->p_cxt_mngr->acquired[type].start_cid;
426 u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn,
427 enum protocol_type type)
432 for (i = 0; i < TASK_SEGMENTS; i++)
433 cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count;
438 static OSAL_INLINE void
439 ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn,
440 enum protocol_type proto,
441 u8 seg, u8 seg_type, u32 count, bool has_fl)
443 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
444 struct ecore_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg];
446 p_seg->count = count;
447 p_seg->has_fl_mem = has_fl;
448 p_seg->type = seg_type;
451 /* the *p_line parameter must be either 0 for the first invocation or the
452 * value returned in the previous invocation.
454 static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli,
455 struct ecore_ilt_cli_blk *p_blk,
457 u32 total_size, u32 elem_size)
459 u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val);
461 /* verify that it's called once for each block */
462 if (p_blk->total_size)
465 p_blk->total_size = total_size;
466 p_blk->real_size_in_page = 0;
468 p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size;
469 p_blk->start_line = start_line;
472 static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn,
473 struct ecore_ilt_client_cfg *p_cli,
474 struct ecore_ilt_cli_blk *p_blk,
475 u32 *p_line, enum ilt_clients client_id)
477 if (!p_blk->total_size)
481 p_cli->first.val = *p_line;
483 p_cli->active = true;
484 *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page);
485 p_cli->last.val = *p_line - 1;
487 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
488 "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x"
489 " [Real %08x] Start line %d\n",
490 client_id, p_cli->first.val, p_cli->last.val,
491 p_blk->total_size, p_blk->real_size_in_page,
495 static u32 ecore_ilt_get_dynamic_line_cnt(struct ecore_hwfn *p_hwfn,
496 enum ilt_clients ilt_client)
498 u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count;
499 struct ecore_ilt_client_cfg *p_cli;
500 u32 lines_to_skip = 0;
503 /* TBD MK: ILT code should be simplified once PROTO enum is changed */
505 if (ilt_client == ILT_CLI_CDUC) {
506 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
508 cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) /
509 (u32)CONN_CXT_SIZE(p_hwfn);
511 lines_to_skip = cid_count / cxts_per_p;
514 return lines_to_skip;
517 enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn)
519 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
520 u32 curr_line, total, i, task_size, line;
521 struct ecore_ilt_client_cfg *p_cli;
522 struct ecore_ilt_cli_blk *p_blk;
523 struct ecore_cdu_iids cdu_iids;
524 struct ecore_src_iids src_iids;
525 struct ecore_qm_iids qm_iids;
526 struct ecore_tm_iids tm_iids;
527 struct ecore_tid_seg *p_seg;
529 OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids));
530 OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids));
531 OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
532 OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
534 p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT);
536 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
537 "hwfn [%d] - Set context mngr starting line to be 0x%08x\n",
538 p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line);
541 p_cli = &p_mngr->clients[ILT_CLI_CDUC];
543 curr_line = p_mngr->pf_start_line;
546 p_cli->pf_total_lines = 0;
548 /* get the counters for the CDUC,CDUC and QM clients */
549 ecore_cxt_cdu_iids(p_mngr, &cdu_iids);
551 p_blk = &p_cli->pf_blks[CDUC_BLK];
553 total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn);
555 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
556 total, CONN_CXT_SIZE(p_hwfn));
558 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
559 p_cli->pf_total_lines = curr_line - p_blk->start_line;
561 p_blk->dynamic_line_cnt = ecore_ilt_get_dynamic_line_cnt(p_hwfn,
565 p_blk = &p_cli->vf_blks[CDUC_BLK];
566 total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn);
568 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
569 total, CONN_CXT_SIZE(p_hwfn));
571 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC);
572 p_cli->vf_total_lines = curr_line - p_blk->start_line;
574 for (i = 1; i < p_mngr->vf_count; i++)
575 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
579 p_cli = &p_mngr->clients[ILT_CLI_CDUT];
580 p_cli->first.val = curr_line;
582 /* first the 'working' task memory */
583 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
584 p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
585 if (!p_seg || p_seg->count == 0)
588 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)];
589 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
590 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total,
591 p_mngr->task_type_size[p_seg->type]);
593 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
597 /* next the 'init' task memory (forced load memory) */
598 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
599 p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
600 if (!p_seg || p_seg->count == 0)
603 p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)];
605 if (!p_seg->has_fl_mem) {
606 /* The segment is active (total size pf 'working'
607 * memory is > 0) but has no FL (forced-load, Init)
610 * 1. The total-size in the corrsponding FL block of
611 * the ILT client is set to 0 - No ILT line are
612 * provisioned and no ILT memory allocated.
614 * 2. The start-line of said block is set to the
615 * start line of the matching working memory
616 * block in the ILT client. This is later used to
617 * configure the CDU segment offset registers and
618 * results in an FL command for TIDs of this
619 * segment behaves as regular load commands
620 * (loading TIDs from the working memory).
622 line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line;
624 ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
627 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
629 ecore_ilt_cli_blk_fill(p_cli, p_blk,
631 p_mngr->task_type_size[p_seg->type]);
633 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
636 p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line;
639 p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF);
640 if (p_seg && p_seg->count) {
641 /* Stricly speaking we need to iterate over all VF
642 * task segment types, but a VF has only 1 segment
645 /* 'working' memory */
646 total = p_seg->count * p_mngr->task_type_size[p_seg->type];
648 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
649 ecore_ilt_cli_blk_fill(p_cli, p_blk,
651 p_mngr->task_type_size[p_seg->type]);
653 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
657 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
658 if (!p_seg->has_fl_mem) {
659 /* see comment above */
660 line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line;
661 ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0);
663 task_size = p_mngr->task_type_size[p_seg->type];
664 ecore_ilt_cli_blk_fill(p_cli, p_blk,
665 curr_line, total, task_size);
666 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
669 p_cli->vf_total_lines = curr_line -
670 p_cli->vf_blks[0].start_line;
672 /* Now for the rest of the VFs */
673 for (i = 1; i < p_mngr->vf_count; i++) {
674 p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)];
675 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
678 p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)];
679 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
685 p_cli = &p_mngr->clients[ILT_CLI_QM];
686 p_blk = &p_cli->pf_blks[0];
688 ecore_cxt_qm_iids(p_hwfn, &qm_iids);
689 total = ecore_qm_pf_mem_size(qm_iids.cids,
690 qm_iids.vf_cids, qm_iids.tids,
691 p_hwfn->qm_info.num_pqs,
692 p_hwfn->qm_info.num_vf_pqs);
694 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
695 "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d,"
696 " num_vf_pqs=%d, memory_size=%d)\n",
697 qm_iids.cids, qm_iids.vf_cids, qm_iids.tids,
698 p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total);
700 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000,
703 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM);
704 p_cli->pf_total_lines = curr_line - p_blk->start_line;
707 p_cli = &p_mngr->clients[ILT_CLI_SRC];
708 ecore_cxt_src_iids(p_mngr, &src_iids);
710 /* Both the PF and VFs searcher connections are stored in the per PF
711 * database. Thus sum the PF searcher cids and all the VFs searcher
714 total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
716 u32 local_max = OSAL_MAX_T(u32, total,
719 total = OSAL_ROUNDUP_POW_OF_TWO(local_max);
721 p_blk = &p_cli->pf_blks[0];
722 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
723 total * sizeof(struct src_ent),
724 sizeof(struct src_ent));
726 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
728 p_cli->pf_total_lines = curr_line - p_blk->start_line;
732 p_cli = &p_mngr->clients[ILT_CLI_TM];
733 ecore_cxt_tm_iids(p_mngr, &tm_iids);
734 total = tm_iids.pf_cids + tm_iids.pf_tids_total;
736 p_blk = &p_cli->pf_blks[0];
737 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
738 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
740 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
742 p_cli->pf_total_lines = curr_line - p_blk->start_line;
746 total = tm_iids.per_vf_cids + tm_iids.per_vf_tids;
748 p_blk = &p_cli->vf_blks[0];
749 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
750 total * TM_ELEM_SIZE, TM_ELEM_SIZE);
752 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
755 p_cli->vf_total_lines = curr_line - p_blk->start_line;
756 for (i = 1; i < p_mngr->vf_count; i++) {
757 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
762 /* TSDM (SRQ CONTEXT) */
763 total = ecore_cxt_get_srq_count(p_hwfn);
766 p_cli = &p_mngr->clients[ILT_CLI_TSDM];
767 p_blk = &p_cli->pf_blks[SRQ_BLK];
768 ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line,
769 total * SRQ_CXT_SIZE, SRQ_CXT_SIZE);
771 ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line,
773 p_cli->pf_total_lines = curr_line - p_blk->start_line;
776 if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line >
777 RESC_NUM(p_hwfn, ECORE_ILT)) {
778 DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n",
779 curr_line - p_hwfn->p_cxt_mngr->pf_start_line);
783 return ECORE_SUCCESS;
786 static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn)
788 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
794 for (i = 0; i < p_mngr->t2_num_pages; i++)
795 if (p_mngr->t2[i].p_virt)
796 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
797 p_mngr->t2[i].p_virt,
798 p_mngr->t2[i].p_phys,
801 OSAL_FREE(p_hwfn->p_dev, p_mngr->t2);
804 static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn)
806 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
807 u32 conn_num, total_size, ent_per_page, psz, i;
808 struct ecore_ilt_client_cfg *p_src;
809 struct ecore_src_iids src_iids;
810 struct ecore_dma_mem *p_t2;
811 enum _ecore_status_t rc;
813 OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
815 /* if the SRC ILT client is inactive - there are no connection
816 * requiring the searcer, leave.
818 p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC];
820 return ECORE_SUCCESS;
822 ecore_cxt_src_iids(p_mngr, &src_iids);
823 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
824 total_size = conn_num * sizeof(struct src_ent);
826 /* use the same page size as the SRC ILT client */
827 psz = ILT_PAGE_IN_BYTES(p_src->p_size.val);
828 p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz);
831 p_mngr->t2 = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
832 p_mngr->t2_num_pages *
833 sizeof(struct ecore_dma_mem));
835 DP_NOTICE(p_hwfn, false, "Failed to allocate t2 table\n");
840 /* allocate t2 pages */
841 for (i = 0; i < p_mngr->t2_num_pages; i++) {
842 u32 size = OSAL_MIN_T(u32, total_size, psz);
843 void **p_virt = &p_mngr->t2[i].p_virt;
845 *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
846 &p_mngr->t2[i].p_phys, size);
847 if (!p_mngr->t2[i].p_virt) {
851 OSAL_MEM_ZERO(*p_virt, size);
852 p_mngr->t2[i].size = size;
856 /* Set the t2 pointers */
858 /* entries per page - must be a power of two */
859 ent_per_page = psz / sizeof(struct src_ent);
861 p_mngr->first_free = (u64)p_mngr->t2[0].p_phys;
863 p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page];
864 p_mngr->last_free = (u64)p_t2->p_phys +
865 ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent);
867 for (i = 0; i < p_mngr->t2_num_pages; i++) {
868 u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num);
869 struct src_ent *entries = p_mngr->t2[i].p_virt;
870 u64 p_ent_phys = (u64)p_mngr->t2[i].p_phys, val;
873 for (j = 0; j < ent_num - 1; j++) {
874 val = p_ent_phys + (j + 1) * sizeof(struct src_ent);
875 entries[j].next = OSAL_CPU_TO_BE64(val);
878 if (i < p_mngr->t2_num_pages - 1)
879 val = (u64)p_mngr->t2[i + 1].p_phys;
882 entries[j].next = OSAL_CPU_TO_BE64(val);
887 return ECORE_SUCCESS;
890 ecore_cxt_src_t2_free(p_hwfn);
894 #define for_each_ilt_valid_client(pos, clients) \
895 for (pos = 0; pos < ILT_CLI_MAX; pos++) \
896 if (!clients[pos].active) { \
901 /* Total number of ILT lines used by this PF */
902 static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients)
907 for_each_ilt_valid_client(i, ilt_clients)
908 size += (ilt_clients[i].last.val -
909 ilt_clients[i].first.val + 1);
914 static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn)
916 struct ecore_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients;
917 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
920 if (p_mngr->ilt_shadow == OSAL_NULL)
923 ilt_size = ecore_cxt_ilt_shadow_size(p_cli);
925 for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) {
926 struct ecore_dma_mem *p_dma = &p_mngr->ilt_shadow[i];
929 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
931 p_dma->p_phys, p_dma->size);
932 p_dma->p_virt = OSAL_NULL;
934 OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow);
935 p_mngr->ilt_shadow = OSAL_NULL;
938 static enum _ecore_status_t
939 ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn,
940 struct ecore_ilt_cli_blk *p_blk,
941 enum ilt_clients ilt_client, u32 start_line_offset)
943 struct ecore_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow;
944 u32 lines, line, sz_left, lines_to_skip = 0;
946 /* Special handling for RoCE that supports dynamic allocation */
947 if (ilt_client == ILT_CLI_CDUT || ilt_client == ILT_CLI_TSDM)
948 return ECORE_SUCCESS;
950 lines_to_skip = p_blk->dynamic_line_cnt;
952 if (!p_blk->total_size)
953 return ECORE_SUCCESS;
955 sz_left = p_blk->total_size;
956 lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip;
957 line = p_blk->start_line + start_line_offset -
958 p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip;
960 for (; lines; lines--) {
965 size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page);
968 #define ILT_BLOCK_ALIGN_SIZE 0x1000
969 p_virt = OSAL_DMA_ALLOC_COHERENT_ALIGNED(p_hwfn->p_dev,
971 ILT_BLOCK_ALIGN_SIZE);
974 OSAL_MEM_ZERO(p_virt, size);
976 ilt_shadow[line].p_phys = p_phys;
977 ilt_shadow[line].p_virt = p_virt;
978 ilt_shadow[line].size = size;
980 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
981 "ILT shadow: Line [%d] Physical 0x%lx"
982 " Virtual %p Size %d\n",
983 line, (unsigned long)p_phys, p_virt, size);
989 return ECORE_SUCCESS;
992 static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn)
994 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
995 struct ecore_ilt_client_cfg *clients = p_mngr->clients;
996 struct ecore_ilt_cli_blk *p_blk;
998 enum _ecore_status_t rc;
1000 size = ecore_cxt_ilt_shadow_size(clients);
1001 p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL,
1002 size * sizeof(struct ecore_dma_mem));
1004 if (!p_mngr->ilt_shadow) {
1005 DP_NOTICE(p_hwfn, false, "Failed to allocate ilt shadow table\n");
1007 goto ilt_shadow_fail;
1010 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
1011 "Allocated 0x%x bytes for ilt shadow\n",
1012 (u32)(size * sizeof(struct ecore_dma_mem)));
1014 for_each_ilt_valid_client(i, clients) {
1015 for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) {
1016 p_blk = &clients[i].pf_blks[j];
1017 rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, i, 0);
1018 if (rc != ECORE_SUCCESS)
1019 goto ilt_shadow_fail;
1021 for (k = 0; k < p_mngr->vf_count; k++) {
1022 for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) {
1023 u32 lines = clients[i].vf_total_lines * k;
1025 p_blk = &clients[i].vf_blks[j];
1026 rc = ecore_ilt_blk_alloc(p_hwfn, p_blk,
1028 if (rc != ECORE_SUCCESS)
1029 goto ilt_shadow_fail;
1034 return ECORE_SUCCESS;
1037 ecore_ilt_shadow_free(p_hwfn);
1041 static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn)
1043 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1046 for (type = 0; type < MAX_CONN_TYPES; type++) {
1047 OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map);
1048 p_mngr->acquired[type].cid_map = OSAL_NULL;
1049 p_mngr->acquired[type].max_count = 0;
1050 p_mngr->acquired[type].start_cid = 0;
1052 for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
1053 OSAL_FREE(p_hwfn->p_dev,
1054 p_mngr->acquired_vf[type][vf].cid_map);
1055 p_mngr->acquired_vf[type][vf].cid_map = OSAL_NULL;
1056 p_mngr->acquired_vf[type][vf].max_count = 0;
1057 p_mngr->acquired_vf[type][vf].start_cid = 0;
1062 static enum _ecore_status_t
1063 ecore_cid_map_alloc_single(struct ecore_hwfn *p_hwfn, u32 type,
1064 u32 cid_start, u32 cid_count,
1065 struct ecore_cid_acquired_map *p_map)
1070 return ECORE_SUCCESS;
1072 size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_count, BITS_PER_MAP_WORD);
1073 p_map->cid_map = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size);
1074 if (p_map->cid_map == OSAL_NULL)
1077 p_map->max_count = cid_count;
1078 p_map->start_cid = cid_start;
1080 DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
1081 "Type %08x start: %08x count %08x\n",
1082 type, p_map->start_cid, p_map->max_count);
1084 return ECORE_SUCCESS;
1087 static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn)
1089 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1090 u32 start_cid = 0, vf_start_cid = 0;
1093 for (type = 0; type < MAX_CONN_TYPES; type++) {
1094 struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
1095 struct ecore_cid_acquired_map *p_map;
1097 /* Handle PF maps */
1098 p_map = &p_mngr->acquired[type];
1099 if (ecore_cid_map_alloc_single(p_hwfn, type, start_cid,
1100 p_cfg->cid_count, p_map))
1103 /* Handle VF maps */
1104 for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
1105 p_map = &p_mngr->acquired_vf[type][vf];
1106 if (ecore_cid_map_alloc_single(p_hwfn, type,
1113 start_cid += p_cfg->cid_count;
1114 vf_start_cid += p_cfg->cids_per_vf;
1117 return ECORE_SUCCESS;
1120 ecore_cid_map_free(p_hwfn);
1124 enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn)
1126 struct ecore_ilt_client_cfg *clients;
1127 struct ecore_cxt_mngr *p_mngr;
1130 p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr));
1132 DP_NOTICE(p_hwfn, false, "Failed to allocate `struct ecore_cxt_mngr'\n");
1136 /* Initialize ILT client registers */
1137 clients = p_mngr->clients;
1138 clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT);
1139 clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT);
1140 clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE);
1142 clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT);
1143 clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT);
1144 clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE);
1146 clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT);
1147 clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT);
1148 clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE);
1150 clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT);
1151 clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT);
1152 clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE);
1154 clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT);
1155 clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT);
1156 clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE);
1158 clients[ILT_CLI_TSDM].first.reg = ILT_CFG_REG(TSDM, FIRST_ILT);
1159 clients[ILT_CLI_TSDM].last.reg = ILT_CFG_REG(TSDM, LAST_ILT);
1160 clients[ILT_CLI_TSDM].p_size.reg = ILT_CFG_REG(TSDM, P_SIZE);
1162 /* default ILT page size for all clients is 64K */
1163 for (i = 0; i < ILT_CLI_MAX; i++)
1164 p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE;
1166 /* due to removal of ISCSI/FCoE files union type0_task_context
1167 * task_type_size will be 0. So hardcoded for now.
1169 p_mngr->task_type_size[0] = 512; /* @DPDK */
1170 p_mngr->task_type_size[1] = 128; /* @DPDK */
1172 if (p_hwfn->p_dev->p_iov_info)
1173 p_mngr->vf_count = p_hwfn->p_dev->p_iov_info->total_vfs;
1175 /* Initialize the dynamic ILT allocation mutex */
1176 #ifdef CONFIG_ECORE_LOCK_ALLOC
1177 OSAL_MUTEX_ALLOC(p_hwfn, &p_mngr->mutex);
1179 OSAL_MUTEX_INIT(&p_mngr->mutex);
1181 /* Set the cxt mangr pointer priori to further allocations */
1182 p_hwfn->p_cxt_mngr = p_mngr;
1184 return ECORE_SUCCESS;
1187 enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn)
1189 enum _ecore_status_t rc;
1191 /* Allocate the ILT shadow table */
1192 rc = ecore_ilt_shadow_alloc(p_hwfn);
1194 DP_NOTICE(p_hwfn, false, "Failed to allocate ilt memory\n");
1195 goto tables_alloc_fail;
1198 /* Allocate the T2 table */
1199 rc = ecore_cxt_src_t2_alloc(p_hwfn);
1201 DP_NOTICE(p_hwfn, false, "Failed to allocate T2 memory\n");
1202 goto tables_alloc_fail;
1205 /* Allocate and initialize the acquired cids bitmaps */
1206 rc = ecore_cid_map_alloc(p_hwfn);
1208 DP_NOTICE(p_hwfn, false, "Failed to allocate cid maps\n");
1209 goto tables_alloc_fail;
1212 return ECORE_SUCCESS;
1215 ecore_cxt_mngr_free(p_hwfn);
1219 void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn)
1221 if (!p_hwfn->p_cxt_mngr)
1224 ecore_cid_map_free(p_hwfn);
1225 ecore_cxt_src_t2_free(p_hwfn);
1226 ecore_ilt_shadow_free(p_hwfn);
1227 #ifdef CONFIG_ECORE_LOCK_ALLOC
1228 OSAL_MUTEX_DEALLOC(&p_hwfn->p_cxt_mngr->mutex);
1230 OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr);
1233 void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn)
1235 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1236 struct ecore_cid_acquired_map *p_map;
1237 struct ecore_conn_type_cfg *p_cfg;
1241 /* Reset acquired cids */
1242 for (type = 0; type < MAX_CONN_TYPES; type++) {
1245 p_cfg = &p_mngr->conn_cfg[type];
1246 if (p_cfg->cid_count) {
1247 p_map = &p_mngr->acquired[type];
1248 len = DIV_ROUND_UP(p_map->max_count,
1249 BITS_PER_MAP_WORD) *
1251 OSAL_MEM_ZERO(p_map->cid_map, len);
1254 if (!p_cfg->cids_per_vf)
1257 for (vf = 0; vf < COMMON_MAX_NUM_VFS; vf++) {
1258 p_map = &p_mngr->acquired_vf[type][vf];
1259 len = DIV_ROUND_UP(p_map->max_count,
1260 BITS_PER_MAP_WORD) *
1262 OSAL_MEM_ZERO(p_map->cid_map, len);
1267 /* HW initialization helper (per Block, per phase) */
1270 #define CDUC_CXT_SIZE_SHIFT \
1271 CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT
1273 #define CDUC_CXT_SIZE_MASK \
1274 (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT)
1276 #define CDUC_BLOCK_WASTE_SHIFT \
1277 CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT
1279 #define CDUC_BLOCK_WASTE_MASK \
1280 (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT)
1282 #define CDUC_NCIB_SHIFT \
1283 CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT
1285 #define CDUC_NCIB_MASK \
1286 (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT)
1288 #define CDUT_TYPE0_CXT_SIZE_SHIFT \
1289 CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT
1291 #define CDUT_TYPE0_CXT_SIZE_MASK \
1292 (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \
1293 CDUT_TYPE0_CXT_SIZE_SHIFT)
1295 #define CDUT_TYPE0_BLOCK_WASTE_SHIFT \
1296 CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT
1298 #define CDUT_TYPE0_BLOCK_WASTE_MASK \
1299 (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \
1300 CDUT_TYPE0_BLOCK_WASTE_SHIFT)
1302 #define CDUT_TYPE0_NCIB_SHIFT \
1303 CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT
1305 #define CDUT_TYPE0_NCIB_MASK \
1306 (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \
1307 CDUT_TYPE0_NCIB_SHIFT)
1309 #define CDUT_TYPE1_CXT_SIZE_SHIFT \
1310 CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT
1312 #define CDUT_TYPE1_CXT_SIZE_MASK \
1313 (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \
1314 CDUT_TYPE1_CXT_SIZE_SHIFT)
1316 #define CDUT_TYPE1_BLOCK_WASTE_SHIFT \
1317 CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT
1319 #define CDUT_TYPE1_BLOCK_WASTE_MASK \
1320 (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \
1321 CDUT_TYPE1_BLOCK_WASTE_SHIFT)
1323 #define CDUT_TYPE1_NCIB_SHIFT \
1324 CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT
1326 #define CDUT_TYPE1_NCIB_MASK \
1327 (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \
1328 CDUT_TYPE1_NCIB_SHIFT)
1330 static void ecore_cdu_init_common(struct ecore_hwfn *p_hwfn)
1332 u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0;
1334 /* CDUC - connection configuration */
1335 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1336 cxt_size = CONN_CXT_SIZE(p_hwfn);
1337 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1338 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1340 SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size);
1341 SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste);
1342 SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page);
1343 STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params);
1345 /* CDUT - type-0 tasks configuration */
1346 page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val;
1347 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0];
1348 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1349 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1351 /* cxt size and block-waste are multipes of 8 */
1353 SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3));
1354 SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3));
1355 SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page);
1356 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params);
1358 /* CDUT - type-1 tasks configuration */
1359 cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1];
1360 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
1361 block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size;
1363 /* cxt size and block-waste are multipes of 8 */
1365 SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3));
1366 SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3));
1367 SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page);
1368 STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params);
1372 #define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT
1373 #define CDU_SEG_REG_TYPE_MASK 0x1
1374 #define CDU_SEG_REG_OFFSET_SHIFT 0
1375 #define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK
1377 static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn)
1379 struct ecore_ilt_client_cfg *p_cli;
1380 struct ecore_tid_seg *p_seg;
1381 u32 cdu_seg_params, offset;
1384 static const u32 rt_type_offset_arr[] = {
1385 CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET,
1386 CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET,
1387 CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET,
1388 CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET
1391 static const u32 rt_type_offset_fl_arr[] = {
1392 CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET,
1393 CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET,
1394 CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET,
1395 CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET
1398 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1400 /* There are initializations only for CDUT during pf Phase */
1401 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1403 p_seg = ecore_cxt_tid_seg_info(p_hwfn, i);
1407 /* Note: start_line is already adjusted for the CDU
1408 * segment register granularity, so we just need to
1409 * divide. Adjustment is implicit as we assume ILT
1410 * Page size is larger than 32K!
1412 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1413 (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line -
1414 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1417 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1418 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1419 STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params);
1421 offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) *
1422 (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line -
1423 p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES;
1426 SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type);
1427 SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset);
1428 STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params);
1432 void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt,
1435 struct ecore_qm_info *qm_info = &p_hwfn->qm_info;
1436 struct ecore_mcp_link_state *p_link;
1437 struct ecore_qm_iids iids;
1439 OSAL_MEM_ZERO(&iids, sizeof(iids));
1440 ecore_cxt_qm_iids(p_hwfn, &iids);
1442 p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
1444 ecore_qm_pf_rt_init(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
1445 qm_info->max_phys_tcs_per_port,
1447 iids.cids, iids.vf_cids, iids.tids,
1449 qm_info->num_pqs - qm_info->num_vf_pqs,
1450 qm_info->num_vf_pqs,
1451 qm_info->start_vport,
1452 qm_info->num_vports, qm_info->pf_wfq,
1453 qm_info->pf_rl, p_link->speed,
1454 p_hwfn->qm_info.qm_pq_params,
1455 p_hwfn->qm_info.qm_vport_params);
1459 static void ecore_cm_init_pf(struct ecore_hwfn *p_hwfn)
1461 STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET,
1462 ecore_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB));
1466 static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn)
1468 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1469 u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0;
1471 dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT);
1472 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid);
1474 dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT);
1475 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid);
1477 dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT);
1478 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid);
1480 dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT);
1481 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid);
1483 dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT);
1484 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid);
1486 dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT);
1487 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid);
1489 dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT);
1490 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid);
1492 dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT);
1493 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid);
1495 dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT);
1496 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid);
1498 dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT);
1499 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid);
1501 dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT);
1502 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid);
1504 dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT);
1505 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid);
1507 /* Connection types 6 & 7 are not in use, yet they must be configured
1508 * as the highest possible connection. Not configuring them means the
1509 * defaults will be used, and with a large number of cids a bug may
1510 * occur, if the defaults will be smaller than dq_pf_max_cid /
1513 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid);
1514 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid);
1516 STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid);
1517 STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid);
1520 static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn)
1522 struct ecore_ilt_client_cfg *ilt_clients;
1525 ilt_clients = p_hwfn->p_cxt_mngr->clients;
1526 for_each_ilt_valid_client(i, ilt_clients) {
1527 STORE_RT_REG(p_hwfn,
1528 ilt_clients[i].first.reg,
1529 ilt_clients[i].first.val);
1530 STORE_RT_REG(p_hwfn,
1531 ilt_clients[i].last.reg, ilt_clients[i].last.val);
1532 STORE_RT_REG(p_hwfn,
1533 ilt_clients[i].p_size.reg,
1534 ilt_clients[i].p_size.val);
1538 static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn)
1540 struct ecore_ilt_client_cfg *p_cli;
1543 /* For simplicty we set the 'block' to be an ILT page */
1544 if (p_hwfn->p_dev->p_iov_info) {
1545 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
1547 STORE_RT_REG(p_hwfn,
1548 PSWRQ2_REG_VF_BASE_RT_OFFSET,
1549 p_iov->first_vf_in_pf);
1550 STORE_RT_REG(p_hwfn,
1551 PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET,
1552 p_iov->first_vf_in_pf + p_iov->total_vfs);
1555 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
1556 blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1557 if (p_cli->active) {
1558 STORE_RT_REG(p_hwfn,
1559 PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET,
1561 STORE_RT_REG(p_hwfn,
1562 PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1563 p_cli->pf_total_lines);
1564 STORE_RT_REG(p_hwfn,
1565 PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET,
1566 p_cli->vf_total_lines);
1569 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
1570 blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1571 if (p_cli->active) {
1572 STORE_RT_REG(p_hwfn,
1573 PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET,
1575 STORE_RT_REG(p_hwfn,
1576 PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1577 p_cli->pf_total_lines);
1578 STORE_RT_REG(p_hwfn,
1579 PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET,
1580 p_cli->vf_total_lines);
1583 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM];
1584 blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10);
1585 if (p_cli->active) {
1586 STORE_RT_REG(p_hwfn,
1587 PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor);
1588 STORE_RT_REG(p_hwfn,
1589 PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET,
1590 p_cli->pf_total_lines);
1591 STORE_RT_REG(p_hwfn,
1592 PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET,
1593 p_cli->vf_total_lines);
1597 /* ILT (PSWRQ2) PF */
1598 static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn)
1600 struct ecore_ilt_client_cfg *clients;
1601 struct ecore_cxt_mngr *p_mngr;
1602 struct ecore_dma_mem *p_shdw;
1603 u32 line, rt_offst, i;
1605 ecore_ilt_bounds_init(p_hwfn);
1606 ecore_ilt_vf_bounds_init(p_hwfn);
1608 p_mngr = p_hwfn->p_cxt_mngr;
1609 p_shdw = p_mngr->ilt_shadow;
1610 clients = p_hwfn->p_cxt_mngr->clients;
1612 for_each_ilt_valid_client(i, clients) {
1613 /* Client's 1st val and RT array are absolute, ILT shadows'
1614 * lines are relative.
1616 line = clients[i].first.val - p_mngr->pf_start_line;
1617 rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET +
1618 clients[i].first.val * ILT_ENTRY_IN_REGS;
1620 for (; line <= clients[i].last.val - p_mngr->pf_start_line;
1621 line++, rt_offst += ILT_ENTRY_IN_REGS) {
1622 u64 ilt_hw_entry = 0;
1624 /** p_virt could be OSAL_NULL incase of dynamic
1627 if (p_shdw[line].p_virt != OSAL_NULL) {
1628 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
1629 SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR,
1630 (p_shdw[line].p_phys >> 12));
1632 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
1633 "Setting RT[0x%08x] from"
1634 " ILT[0x%08x] [Client is %d] to"
1635 " Physical addr: 0x%lx\n",
1637 (unsigned long)(p_shdw[line].
1641 STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry);
1646 /* SRC (Searcher) PF */
1647 static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn)
1649 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1650 u32 rounded_conn_num, conn_num, conn_max;
1651 struct ecore_src_iids src_iids;
1653 OSAL_MEM_ZERO(&src_iids, sizeof(src_iids));
1654 ecore_cxt_src_iids(p_mngr, &src_iids);
1655 conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count;
1659 conn_max = OSAL_MAX_T(u32, conn_num, SRC_MIN_NUM_ELEMS);
1660 rounded_conn_num = OSAL_ROUNDUP_POW_OF_TWO(conn_max);
1662 STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num);
1663 STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET,
1664 OSAL_LOG2(rounded_conn_num));
1666 STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET,
1667 p_hwfn->p_cxt_mngr->first_free);
1668 STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET,
1669 p_hwfn->p_cxt_mngr->last_free);
1670 DP_VERBOSE(p_hwfn, ECORE_MSG_ILT,
1671 "Configured SEARCHER for 0x%08x connections\n",
1676 #define TM_CFG_NUM_IDS_SHIFT 0
1677 #define TM_CFG_NUM_IDS_MASK 0xFFFFULL
1678 #define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16
1679 #define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL
1680 #define TM_CFG_PARENT_PF_SHIFT 25
1681 #define TM_CFG_PARENT_PF_MASK 0x7ULL
1683 #define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30
1684 #define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL
1686 #define TM_CFG_TID_OFFSET_SHIFT 30
1687 #define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL
1688 #define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49
1689 #define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL
1691 static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn)
1693 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1694 u32 active_seg_mask = 0, tm_offset, rt_reg;
1695 struct ecore_tm_iids tm_iids;
1699 OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids));
1700 ecore_cxt_tm_iids(p_mngr, &tm_iids);
1702 /* @@@TBD No pre-scan for now */
1704 /* Note: We assume consecutive VFs for a PF */
1705 for (i = 0; i < p_mngr->vf_count; i++) {
1707 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids);
1708 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1709 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1710 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
1712 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1713 (sizeof(cfg_word) / sizeof(u32)) *
1714 (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
1715 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1719 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids);
1720 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1721 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */
1722 SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); /* scan all */
1724 rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET +
1725 (sizeof(cfg_word) / sizeof(u32)) *
1726 (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id);
1727 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1730 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET,
1731 tm_iids.pf_cids ? 0x1 : 0x0);
1733 /* @@@TBD how to enable the scan for the VFs */
1735 tm_offset = tm_iids.per_vf_cids;
1737 /* Note: We assume consecutive VFs for a PF */
1738 for (i = 0; i < p_mngr->vf_count; i++) {
1740 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids);
1741 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1742 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id);
1743 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1744 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
1746 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1747 (sizeof(cfg_word) / sizeof(u32)) *
1748 (p_hwfn->p_dev->p_iov_info->first_vf_in_pf + i);
1750 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1753 tm_offset = tm_iids.pf_cids;
1754 for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) {
1756 SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]);
1757 SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0);
1758 SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0);
1759 SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset);
1760 SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0);
1762 rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET +
1763 (sizeof(cfg_word) / sizeof(u32)) *
1764 (NUM_OF_VFS(p_hwfn->p_dev) +
1765 p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i);
1767 STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word);
1768 active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0);
1770 tm_offset += tm_iids.pf_tids[i];
1773 STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask);
1775 /* @@@TBD how to enable the scan for the VFs */
1778 static void ecore_prs_init_pf(struct ecore_hwfn *p_hwfn)
1780 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1781 struct ecore_conn_type_cfg *p_fcoe;
1782 struct ecore_tid_seg *p_tid;
1784 p_fcoe = &p_mngr->conn_cfg[PROTOCOLID_FCOE];
1786 /* If FCoE is active set the MAX OX_ID (tid) in the Parser */
1787 if (!p_fcoe->cid_count)
1790 p_tid = &p_fcoe->tid_seg[ECORE_CXT_FCOE_TID_SEG];
1791 STORE_RT_REG_AGG(p_hwfn,
1792 PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET,
1796 void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn)
1798 /* CDU configuration */
1799 ecore_cdu_init_common(p_hwfn);
1802 void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
1804 ecore_qm_init_pf(p_hwfn, p_ptt, true);
1805 ecore_cm_init_pf(p_hwfn);
1806 ecore_dq_init_pf(p_hwfn);
1807 ecore_cdu_init_pf(p_hwfn);
1808 ecore_ilt_init_pf(p_hwfn);
1809 ecore_src_init_pf(p_hwfn);
1810 ecore_tm_init_pf(p_hwfn);
1811 ecore_prs_init_pf(p_hwfn);
1814 enum _ecore_status_t _ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
1815 enum protocol_type type,
1816 u32 *p_cid, u8 vfid)
1818 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1819 struct ecore_cid_acquired_map *p_map;
1822 if (type >= MAX_CONN_TYPES) {
1823 DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
1827 if (vfid >= COMMON_MAX_NUM_VFS && vfid != ECORE_CXT_PF_CID) {
1828 DP_NOTICE(p_hwfn, true, "VF [%02x] is out of range\n", vfid);
1832 /* Determine the right map to take this CID from */
1833 if (vfid == ECORE_CXT_PF_CID)
1834 p_map = &p_mngr->acquired[type];
1836 p_map = &p_mngr->acquired_vf[type][vfid];
1838 if (p_map->cid_map == OSAL_NULL) {
1839 DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type);
1843 rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_map->cid_map,
1846 if (rel_cid >= p_map->max_count) {
1847 DP_NOTICE(p_hwfn, false, "no CID available for protocol %d\n",
1849 return ECORE_NORESOURCES;
1852 OSAL_SET_BIT(rel_cid, p_map->cid_map);
1854 *p_cid = rel_cid + p_map->start_cid;
1856 DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
1857 "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
1858 *p_cid, rel_cid, vfid, type);
1860 return ECORE_SUCCESS;
1863 enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn,
1864 enum protocol_type type,
1867 return _ecore_cxt_acquire_cid(p_hwfn, type, p_cid, ECORE_CXT_PF_CID);
1870 static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn,
1872 enum protocol_type *p_type,
1873 struct ecore_cid_acquired_map **pp_map)
1875 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1878 /* Iterate over protocols and find matching cid range */
1879 for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
1880 if (vfid == ECORE_CXT_PF_CID)
1881 *pp_map = &p_mngr->acquired[*p_type];
1883 *pp_map = &p_mngr->acquired_vf[*p_type][vfid];
1885 if (!((*pp_map)->cid_map))
1887 if (cid >= (*pp_map)->start_cid &&
1888 cid < (*pp_map)->start_cid + (*pp_map)->max_count) {
1892 if (*p_type == MAX_CONN_TYPES) {
1893 DP_NOTICE(p_hwfn, true, "Invalid CID %d vfid %02x", cid, vfid);
1897 rel_cid = cid - (*pp_map)->start_cid;
1898 if (!OSAL_TEST_BIT(rel_cid, (*pp_map)->cid_map)) {
1899 DP_NOTICE(p_hwfn, true,
1900 "CID %d [vifd %02x] not acquired", cid, vfid);
1906 *p_type = MAX_CONN_TYPES;
1907 *pp_map = OSAL_NULL;
1911 void _ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid, u8 vfid)
1913 struct ecore_cid_acquired_map *p_map = OSAL_NULL;
1914 enum protocol_type type;
1918 if (vfid != ECORE_CXT_PF_CID && vfid > COMMON_MAX_NUM_VFS) {
1919 DP_NOTICE(p_hwfn, true,
1920 "Trying to return incorrect CID belonging to VF %02x\n",
1925 /* Test acquired and find matching per-protocol map */
1926 b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, vfid,
1932 rel_cid = cid - p_map->start_cid;
1933 OSAL_CLEAR_BIT(rel_cid, p_map->cid_map);
1935 DP_VERBOSE(p_hwfn, ECORE_MSG_CXT,
1936 "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
1937 cid, rel_cid, vfid, type);
1940 void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid)
1942 _ecore_cxt_release_cid(p_hwfn, cid, ECORE_CXT_PF_CID);
1945 enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn,
1946 struct ecore_cxt_info *p_info)
1948 struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
1949 struct ecore_cid_acquired_map *p_map = OSAL_NULL;
1950 u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
1951 enum protocol_type type;
1954 /* Test acquired and find matching per-protocol map */
1955 b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid,
1962 /* set the protocl type */
1963 p_info->type = type;
1965 /* compute context virtual pointer */
1966 hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val;
1968 conn_cxt_size = CONN_CXT_SIZE(p_hwfn);
1969 cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size;
1970 line = p_info->iid / cxts_per_p;
1972 /* Make sure context is allocated (dynamic allocation) */
1973 if (!p_mngr->ilt_shadow[line].p_virt)
1976 p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].p_virt +
1977 p_info->iid % cxts_per_p * conn_cxt_size;
1979 DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT),
1980 "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n",
1981 (p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid);
1983 return ECORE_SUCCESS;
1986 enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn)
1988 /* Set the number of required CORE connections */
1989 u32 core_cids = 1; /* SPQ */
1991 ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0);
1993 switch (p_hwfn->hw_info.personality) {
1998 struct ecore_eth_pf_params *p_params =
1999 &p_hwfn->pf_params.eth_pf_params;
2001 if (!p_params->num_vf_cons)
2002 p_params->num_vf_cons = ETH_PF_PARAMS_VF_CONS_DEFAULT;
2003 ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_ETH,
2005 p_params->num_vf_cons);
2007 count = p_params->num_arfs_filters;
2009 if (!OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS,
2010 &p_hwfn->p_dev->mf_bits))
2011 p_hwfn->p_cxt_mngr->arfs_count = count;
2019 return ECORE_SUCCESS;
2022 /* This function is very RoCE oriented, if another protocol in the future
2023 * will want this feature we'll need to modify the function to be more generic
2025 enum _ecore_status_t
2026 ecore_cxt_dynamic_ilt_alloc(struct ecore_hwfn *p_hwfn,
2027 enum ecore_cxt_elem_type elem_type,
2030 u32 reg_offset, shadow_line, elem_size, hw_p_size, elems_per_p, line;
2031 struct ecore_ilt_client_cfg *p_cli;
2032 struct ecore_ilt_cli_blk *p_blk;
2033 struct ecore_ptt *p_ptt;
2037 enum _ecore_status_t rc = ECORE_SUCCESS;
2039 switch (elem_type) {
2040 case ECORE_ELEM_CXT:
2041 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2042 elem_size = CONN_CXT_SIZE(p_hwfn);
2043 p_blk = &p_cli->pf_blks[CDUC_BLK];
2045 case ECORE_ELEM_SRQ:
2046 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2047 elem_size = SRQ_CXT_SIZE;
2048 p_blk = &p_cli->pf_blks[SRQ_BLK];
2050 case ECORE_ELEM_TASK:
2051 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2052 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2053 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
2056 DP_NOTICE(p_hwfn, false,
2057 "ECORE_INVALID elem type = %d", elem_type);
2061 /* Calculate line in ilt */
2062 hw_p_size = p_cli->p_size.val;
2063 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2064 line = p_blk->start_line + (iid / elems_per_p);
2065 shadow_line = line - p_hwfn->p_cxt_mngr->pf_start_line;
2067 /* If line is already allocated, do nothing, otherwise allocate it and
2068 * write it to the PSWRQ2 registers.
2069 * This section can be run in parallel from different contexts and thus
2070 * a mutex protection is needed.
2073 OSAL_MUTEX_ACQUIRE(&p_hwfn->p_cxt_mngr->mutex);
2075 if (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt)
2078 p_ptt = ecore_ptt_acquire(p_hwfn);
2080 DP_NOTICE(p_hwfn, false,
2081 "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
2086 p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
2088 p_blk->real_size_in_page);
2093 OSAL_MEM_ZERO(p_virt, p_blk->real_size_in_page);
2095 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_virt = p_virt;
2096 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys = p_phys;
2097 p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].size =
2098 p_blk->real_size_in_page;
2100 /* compute absolute offset */
2101 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2102 (line * ILT_REG_SIZE_IN_BYTES * ILT_ENTRY_IN_REGS);
2105 SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL);
2106 SET_FIELD(ilt_hw_entry,
2108 (p_hwfn->p_cxt_mngr->ilt_shadow[shadow_line].p_phys >> 12));
2110 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a wide-bus */
2112 ecore_dmae_host2grc(p_hwfn, p_ptt, (u64)(osal_uintptr_t)&ilt_hw_entry,
2113 reg_offset, sizeof(ilt_hw_entry) / sizeof(u32),
2116 if (elem_type == ECORE_ELEM_CXT) {
2117 u32 last_cid_allocated = (1 + (iid / elems_per_p)) *
2120 /* Update the relevant register in the parser */
2121 ecore_wr(p_hwfn, p_ptt, PRS_REG_ROCE_DEST_QP_MAX_PF,
2122 last_cid_allocated - 1);
2124 if (!p_hwfn->b_rdma_enabled_in_prs) {
2125 /* Enable RoCE search */
2126 ecore_wr(p_hwfn, p_ptt, p_hwfn->rdma_prs_search_reg, 1);
2127 p_hwfn->b_rdma_enabled_in_prs = true;
2132 ecore_ptt_release(p_hwfn, p_ptt);
2134 OSAL_MUTEX_RELEASE(&p_hwfn->p_cxt_mngr->mutex);
2139 /* This function is very RoCE oriented, if another protocol in the future
2140 * will want this feature we'll need to modify the function to be more generic
2142 static enum _ecore_status_t
2143 ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn,
2144 enum ecore_cxt_elem_type elem_type,
2145 u32 start_iid, u32 count)
2147 u32 start_line, end_line, shadow_start_line, shadow_end_line;
2148 u32 reg_offset, elem_size, hw_p_size, elems_per_p;
2149 struct ecore_ilt_client_cfg *p_cli;
2150 struct ecore_ilt_cli_blk *p_blk;
2151 u32 end_iid = start_iid + count;
2152 struct ecore_ptt *p_ptt;
2153 u64 ilt_hw_entry = 0;
2156 switch (elem_type) {
2157 case ECORE_ELEM_CXT:
2158 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC];
2159 elem_size = CONN_CXT_SIZE(p_hwfn);
2160 p_blk = &p_cli->pf_blks[CDUC_BLK];
2162 case ECORE_ELEM_SRQ:
2163 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TSDM];
2164 elem_size = SRQ_CXT_SIZE;
2165 p_blk = &p_cli->pf_blks[SRQ_BLK];
2167 case ECORE_ELEM_TASK:
2168 p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT];
2169 elem_size = TYPE1_TASK_CXT_SIZE(p_hwfn);
2170 p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(ECORE_CXT_ROCE_TID_SEG)];
2173 DP_NOTICE(p_hwfn, false,
2174 "ECORE_INVALID elem type = %d", elem_type);
2178 /* Calculate line in ilt */
2179 hw_p_size = p_cli->p_size.val;
2180 elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size;
2181 start_line = p_blk->start_line + (start_iid / elems_per_p);
2182 end_line = p_blk->start_line + (end_iid / elems_per_p);
2183 if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p))
2186 shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line;
2187 shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line;
2189 p_ptt = ecore_ptt_acquire(p_hwfn);
2191 DP_NOTICE(p_hwfn, false,
2192 "ECORE_TIME_OUT on ptt acquire - dynamic allocation");
2193 return ECORE_TIMEOUT;
2196 for (i = shadow_start_line; i < shadow_end_line; i++) {
2197 if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt)
2200 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
2201 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt,
2202 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys,
2203 p_hwfn->p_cxt_mngr->ilt_shadow[i].size);
2205 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = OSAL_NULL;
2206 p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0;
2207 p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0;
2209 /* compute absolute offset */
2210 reg_offset = PSWRQ2_REG_ILT_MEMORY +
2211 ((start_line++) * ILT_REG_SIZE_IN_BYTES *
2214 /* Write via DMAE since the PSWRQ2_REG_ILT_MEMORY line is a
2217 ecore_dmae_host2grc(p_hwfn, p_ptt,
2218 (u64)(osal_uintptr_t)&ilt_hw_entry,
2220 sizeof(ilt_hw_entry) / sizeof(u32),
2224 ecore_ptt_release(p_hwfn, p_ptt);
2226 return ECORE_SUCCESS;