2 * Copyright (c) 2016 QLogic Corporation.
6 * See LICENSE.qede_pmd for copyright and licensing details.
12 #include "ecore_sriov.h"
13 #include "ecore_status.h"
15 #include "ecore_hw_defs.h"
16 #include "ecore_int.h"
17 #include "ecore_hsi_eth.h"
19 #include "ecore_vfpf_if.h"
20 #include "ecore_rt_defs.h"
21 #include "ecore_init_ops.h"
22 #include "ecore_gtt_reg_addr.h"
23 #include "ecore_iro.h"
24 #include "ecore_mcp.h"
25 #include "ecore_cxt.h"
27 #include "ecore_init_fw_funcs.h"
28 #include "ecore_sp_commands.h"
30 const char *ecore_channel_tlvs_string[] = {
31 "CHANNEL_TLV_NONE", /* ends tlv sequence */
32 "CHANNEL_TLV_ACQUIRE",
33 "CHANNEL_TLV_VPORT_START",
34 "CHANNEL_TLV_VPORT_UPDATE",
35 "CHANNEL_TLV_VPORT_TEARDOWN",
36 "CHANNEL_TLV_START_RXQ",
37 "CHANNEL_TLV_START_TXQ",
38 "CHANNEL_TLV_STOP_RXQ",
39 "CHANNEL_TLV_STOP_TXQ",
40 "CHANNEL_TLV_UPDATE_RXQ",
41 "CHANNEL_TLV_INT_CLEANUP",
43 "CHANNEL_TLV_RELEASE",
44 "CHANNEL_TLV_LIST_END",
45 "CHANNEL_TLV_UCAST_FILTER",
46 "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
47 "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
48 "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
49 "CHANNEL_TLV_VPORT_UPDATE_MCAST",
50 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
51 "CHANNEL_TLV_VPORT_UPDATE_RSS",
52 "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
53 "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
58 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
59 struct ecore_vf_info *p_vf)
61 struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
62 struct ecore_spq_entry *p_ent = OSAL_NULL;
63 struct ecore_sp_init_data init_data;
64 enum _ecore_status_t rc = ECORE_NOTIMPL;
68 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
69 init_data.cid = ecore_spq_get_cid(p_hwfn);
70 init_data.opaque_fid = p_vf->opaque_fid;
71 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
73 rc = ecore_sp_init_request(p_hwfn, &p_ent,
74 COMMON_RAMROD_VF_START,
75 PROTOCOLID_COMMON, &init_data);
76 if (rc != ECORE_SUCCESS)
79 p_ramrod = &p_ent->ramrod.vf_start;
81 p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
82 p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
84 switch (p_hwfn->hw_info.personality) {
86 p_ramrod->personality = PERSONALITY_ETH;
88 case ECORE_PCI_ETH_ROCE:
89 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
92 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
93 p_hwfn->hw_info.personality);
97 fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
98 if (fp_minor > ETH_HSI_VER_MINOR &&
99 fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
100 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
101 "VF [%d] - Requested fp hsi %02x.%02x which is"
102 " slightly newer than PF's %02x.%02x; Configuring"
105 ETH_HSI_VER_MAJOR, fp_minor,
106 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
107 fp_minor = ETH_HSI_VER_MINOR;
110 p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
111 p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
113 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
114 "VF[%d] - Starting using HSI %02x.%02x\n",
115 p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
117 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
120 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
124 struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
125 struct ecore_spq_entry *p_ent = OSAL_NULL;
126 struct ecore_sp_init_data init_data;
127 enum _ecore_status_t rc = ECORE_NOTIMPL;
130 OSAL_MEMSET(&init_data, 0, sizeof(init_data));
131 init_data.cid = ecore_spq_get_cid(p_hwfn);
132 init_data.opaque_fid = opaque_vfid;
133 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
135 rc = ecore_sp_init_request(p_hwfn, &p_ent,
136 COMMON_RAMROD_VF_STOP,
137 PROTOCOLID_COMMON, &init_data);
138 if (rc != ECORE_SUCCESS)
141 p_ramrod = &p_ent->ramrod.vf_stop;
143 p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
145 return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
148 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
151 if (!p_hwfn->pf_iov_info) {
152 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
156 if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
160 if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
167 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
171 struct ecore_vf_info *vf = OSAL_NULL;
173 if (!p_hwfn->pf_iov_info) {
174 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
178 if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only))
179 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
181 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
187 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
188 struct ecore_vf_info *p_vf,
191 if (rx_qid >= p_vf->num_rxqs)
192 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
193 "VF[0x%02x] - can't touch Rx queue[%04x];"
194 " Only 0x%04x are allocated\n",
195 p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
196 return rx_qid < p_vf->num_rxqs;
199 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
200 struct ecore_vf_info *p_vf,
203 if (tx_qid >= p_vf->num_txqs)
204 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
205 "VF[0x%02x] - can't touch Tx queue[%04x];"
206 " Only 0x%04x are allocated\n",
207 p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
208 return tx_qid < p_vf->num_txqs;
211 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
212 struct ecore_vf_info *p_vf,
217 for (i = 0; i < p_vf->num_sbs; i++)
218 if (p_vf->igu_sbs[i] == sb_idx)
221 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
222 "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
223 " one of its 0x%02x SBs\n",
224 p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
229 /* TODO - this is linux crc32; Need a way to ifdef it out for linux */
230 u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
236 for (i = 0; i < 8; i++)
237 crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
242 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
244 struct ecore_ptt *p_ptt)
246 struct ecore_bulletin_content *p_bulletin;
247 int crc_size = sizeof(p_bulletin->crc);
248 struct ecore_dmae_params params;
249 struct ecore_vf_info *p_vf;
251 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
255 /* TODO - check VF is in a state where it can accept message */
256 if (!p_vf->vf_bulletin)
259 p_bulletin = p_vf->bulletin.p_virt;
261 /* Increment bulletin board version and compute crc */
262 p_bulletin->version++;
263 p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
264 p_vf->bulletin.size - crc_size);
266 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
267 "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
268 p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
270 /* propagate bulletin board via dmae to vm memory */
271 OSAL_MEMSET(¶ms, 0, sizeof(params));
272 params.flags = ECORE_DMAE_FLAG_VF_DST;
273 params.dst_vfid = p_vf->abs_vf_id;
274 return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
275 p_vf->vf_bulletin, p_vf->bulletin.size / 4,
279 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
281 struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
284 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
285 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
287 OSAL_PCI_READ_CONFIG_WORD(p_dev,
288 pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
289 OSAL_PCI_READ_CONFIG_WORD(p_dev,
290 pos + PCI_SRIOV_INITIAL_VF,
293 OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
295 /* @@@TODO - in future we might want to add an OSAL here to
296 * allow each OS to decide on its own how to act.
298 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
299 "Number of VFs are already set to non-zero value."
300 " Ignoring PCI configuration value\n");
304 OSAL_PCI_READ_CONFIG_WORD(p_dev,
305 pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
307 OSAL_PCI_READ_CONFIG_WORD(p_dev,
308 pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
310 OSAL_PCI_READ_CONFIG_WORD(p_dev,
311 pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
313 OSAL_PCI_READ_CONFIG_DWORD(p_dev,
314 pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
316 OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
318 OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
320 DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info[%d]: nres %d, cap 0x%x,"
321 "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
322 " stride %d, page size 0x%x\n", 0,
323 /* @@@TBD MichalK - function id */
324 iov->nres, iov->cap, iov->ctrl,
325 iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
326 iov->offset, iov->stride, iov->pgsz);
328 /* Some sanity checks */
329 if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
330 iov->total_vfs > NUM_OF_VFS(p_dev)) {
331 /* This can happen only due to a bug. In this case we set
332 * num_vfs to zero to avoid memory corruption in the code that
333 * assumes max number of vfs
335 DP_NOTICE(p_dev, false,
336 "IOV: Unexpected number of vfs set: %d"
337 " setting num_vf to zero\n",
344 return ECORE_SUCCESS;
347 static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
348 struct ecore_ptt *p_ptt)
350 struct ecore_igu_block *p_sb;
354 if (!p_hwfn->hw_info.p_igu_info) {
356 "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
361 sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
362 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
363 if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
364 !(p_sb->status & ECORE_IGU_STATUS_PF)) {
365 val = ecore_rd(p_hwfn, p_ptt,
366 IGU_REG_MAPPING_MEMORY + sb_id * 4);
367 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
368 ecore_wr(p_hwfn, p_ptt,
369 IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
374 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
376 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
377 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
378 struct ecore_bulletin_content *p_bulletin_virt;
379 dma_addr_t req_p, rply_p, bulletin_p;
380 union pfvf_tlvs *p_reply_virt_addr;
381 union vfpf_tlvs *p_req_virt_addr;
384 OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
386 p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
387 req_p = p_iov_info->mbx_msg_phys_addr;
388 p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
389 rply_p = p_iov_info->mbx_reply_phys_addr;
390 p_bulletin_virt = p_iov_info->p_bulletins;
391 bulletin_p = p_iov_info->bulletins_phys;
392 if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
394 "ecore_iov_setup_vfdb called without alloc mem first\n");
398 p_iov_info->base_vport_id = 1; /* @@@TBD resource allocation */
400 for (idx = 0; idx < p_iov->total_vfs; idx++) {
401 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
404 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
405 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
406 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
407 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
409 #ifdef CONFIG_ECORE_SW_CHANNEL
410 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
411 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
413 vf->state = VF_STOPPED;
416 vf->bulletin.phys = idx *
417 sizeof(struct ecore_bulletin_content) + bulletin_p;
418 vf->bulletin.p_virt = p_bulletin_virt + idx;
419 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
421 vf->relative_vf_id = idx;
422 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
423 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
424 vf->concrete_fid = concrete;
425 /* TODO - need to devise a better way of getting opaque */
426 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
427 (vf->abs_vf_id << 8);
428 /* @@TBD MichalK - add base vport_id of VFs to equation */
429 vf->vport_id = p_iov_info->base_vport_id + idx;
431 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
432 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
436 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
438 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
442 num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
444 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
445 "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
447 /* Allocate PF Mailbox buffer (per-VF) */
448 p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
449 p_v_addr = &p_iov_info->mbx_msg_virt_addr;
450 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
451 &p_iov_info->mbx_msg_phys_addr,
452 p_iov_info->mbx_msg_size);
456 /* Allocate PF Mailbox Reply buffer (per-VF) */
457 p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
458 p_v_addr = &p_iov_info->mbx_reply_virt_addr;
459 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
460 &p_iov_info->mbx_reply_phys_addr,
461 p_iov_info->mbx_reply_size);
465 p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
467 p_v_addr = &p_iov_info->p_bulletins;
468 *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
469 &p_iov_info->bulletins_phys,
470 p_iov_info->bulletins_size);
474 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
475 "PF's Requests mailbox [%p virt 0x%lx phys], "
476 "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
477 " [%p virt 0x%lx phys]\n",
478 p_iov_info->mbx_msg_virt_addr,
479 (unsigned long)p_iov_info->mbx_msg_phys_addr,
480 p_iov_info->mbx_reply_virt_addr,
481 (unsigned long)p_iov_info->mbx_reply_phys_addr,
482 p_iov_info->p_bulletins,
483 (unsigned long)p_iov_info->bulletins_phys);
485 return ECORE_SUCCESS;
488 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
490 struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
492 if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
493 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
494 p_iov_info->mbx_msg_virt_addr,
495 p_iov_info->mbx_msg_phys_addr,
496 p_iov_info->mbx_msg_size);
498 if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
499 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
500 p_iov_info->mbx_reply_virt_addr,
501 p_iov_info->mbx_reply_phys_addr,
502 p_iov_info->mbx_reply_size);
504 if (p_iov_info->p_bulletins)
505 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
506 p_iov_info->p_bulletins,
507 p_iov_info->bulletins_phys,
508 p_iov_info->bulletins_size);
511 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
513 struct ecore_pf_iov *p_sriov;
515 if (!IS_PF_SRIOV(p_hwfn)) {
516 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
517 "No SR-IOV - no need for IOV db\n");
518 return ECORE_SUCCESS;
521 p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
523 DP_NOTICE(p_hwfn, true,
524 "Failed to allocate `struct ecore_sriov'\n");
528 p_hwfn->pf_iov_info = p_sriov;
530 return ecore_iov_allocate_vfdb(p_hwfn);
533 void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
535 if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
538 ecore_iov_setup_vfdb(p_hwfn);
539 ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
542 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
544 if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
545 ecore_iov_free_vfdb(p_hwfn);
546 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
550 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
552 OSAL_FREE(p_dev, p_dev->p_iov_info);
553 p_dev->p_iov_info = OSAL_NULL;
556 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
558 struct ecore_dev *p_dev = p_hwfn->p_dev;
560 enum _ecore_status_t rc;
562 if (IS_VF(p_hwfn->p_dev))
563 return ECORE_SUCCESS;
565 /* Learn the PCI configuration */
566 pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
567 PCI_EXT_CAP_ID_SRIOV);
569 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
570 return ECORE_SUCCESS;
573 /* Allocate a new struct for IOV information */
574 /* TODO - can change to VALLOC when its available */
575 p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
576 sizeof(*p_dev->p_iov_info));
577 if (!p_dev->p_iov_info) {
578 DP_NOTICE(p_hwfn, true,
579 "Can't support IOV due to lack of memory\n");
582 p_dev->p_iov_info->pos = pos;
584 rc = ecore_iov_pci_cfg_info(p_dev);
588 /* We want PF IOV to be synonemous with the existence of p_iov_info;
589 * In case the capability is published but there are no VFs, simply
590 * de-allocate the struct.
592 if (!p_dev->p_iov_info->total_vfs) {
593 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
594 "IOV capabilities, but no VFs are published\n");
595 OSAL_FREE(p_dev, p_dev->p_iov_info);
596 p_dev->p_iov_info = OSAL_NULL;
597 return ECORE_SUCCESS;
600 /* Calculate the first VF index - this is a bit tricky; Basically,
601 * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin
602 * after the first engine's VFs.
604 p_dev->p_iov_info->first_vf_in_pf = p_hwfn->p_dev->p_iov_info->offset +
605 p_hwfn->abs_pf_id - 16;
606 if (ECORE_PATH_ID(p_hwfn))
607 p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
609 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
610 "First VF in hwfn 0x%08x\n",
611 p_dev->p_iov_info->first_vf_in_pf);
613 return ECORE_SUCCESS;
616 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
618 /* Check PF supports sriov */
619 if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
620 !IS_PF_SRIOV_ALLOC(p_hwfn))
623 /* Check VF validity */
624 if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true))
630 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
631 u16 rel_vf_id, u8 to_disable)
633 struct ecore_vf_info *vf;
636 for_each_hwfn(p_dev, i) {
637 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
639 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
643 vf->to_disable = to_disable;
647 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
652 if (!IS_ECORE_SRIOV(p_dev))
655 for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
656 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
660 /* @@@TBD Consider taking outside of ecore... */
661 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
665 enum _ecore_status_t rc = ECORE_SUCCESS;
666 struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
668 if (vf != OSAL_NULL) {
670 #ifdef CONFIG_ECORE_SW_CHANNEL
671 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
674 rc = ECORE_UNKNOWN_ERROR;
680 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn,
681 struct ecore_ptt *p_ptt,
684 ecore_wr(p_hwfn, p_ptt,
685 PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
686 1 << (abs_vfid & 0x1f));
689 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
690 struct ecore_ptt *p_ptt,
691 struct ecore_vf_info *vf)
695 /* Set VF masks and configuration - pretend */
696 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
698 ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
701 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
703 /* iterate over all queues, clear sb consumer */
704 for (i = 0; i < vf->num_sbs; i++)
705 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
707 vf->opaque_fid, true);
710 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
711 struct ecore_ptt *p_ptt,
712 struct ecore_vf_info *vf, bool enable)
716 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
718 igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
721 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
723 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
725 ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
728 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
731 static enum _ecore_status_t
732 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
733 struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
735 u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
736 enum _ecore_status_t rc;
739 return ECORE_SUCCESS;
741 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
742 "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
743 ECORE_VF_ABS_ID(p_hwfn, vf));
745 ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
746 ECORE_VF_ABS_ID(p_hwfn, vf));
748 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
750 rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
751 vf->abs_vf_id, vf->num_sbs);
752 if (rc != ECORE_SUCCESS)
755 ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
757 SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
758 STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
760 ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
761 p_hwfn->hw_info.hw_mode);
764 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
773 * @brief ecore_iov_config_perm_table - configure the permission
775 * In E4, queue zone permission table size is 320x9. There
776 * are 320 VF queues for single engine device (256 for dual
777 * engine device), and each entry has the following format:
784 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
785 struct ecore_ptt *p_ptt,
786 struct ecore_vf_info *vf, u8 enable)
792 for (qid = 0; qid < vf->num_rxqs; qid++) {
793 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
796 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
797 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
798 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
802 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
803 struct ecore_ptt *p_ptt,
804 struct ecore_vf_info *vf)
806 /* Reset vf in IGU - interrupts are still disabled */
807 ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
809 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
811 /* Permission Table */
812 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
815 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
816 struct ecore_ptt *p_ptt,
817 struct ecore_vf_info *vf,
820 struct ecore_igu_block *igu_blocks;
821 int qid = 0, igu_id = 0;
824 igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
826 if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
827 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
829 p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
831 SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
832 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
833 SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
835 while ((qid < num_rx_queues) &&
836 (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
837 if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
838 struct cau_sb_entry sb_entry;
840 vf->igu_sbs[qid] = (u16)igu_id;
841 igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
843 SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
845 ecore_wr(p_hwfn, p_ptt,
846 IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
849 /* Configure igu sb in CAU which were marked valid */
850 ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
853 ecore_dmae_host2grc(p_hwfn, p_ptt,
854 (u64)(osal_uintptr_t)&sb_entry,
855 CAU_REG_SB_VAR_MEMORY +
856 igu_id * sizeof(u64), 2, 0);
862 vf->num_sbs = (u8)num_rx_queues;
869 * @brief The function invalidates all the VF entries,
870 * technically this isn't required, but added for
871 * cleaness and ease of debugging incase a VF attempts to
872 * produce an interrupt after it has been taken down.
878 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
879 struct ecore_ptt *p_ptt,
880 struct ecore_vf_info *vf)
882 struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
886 /* Invalidate igu CAM lines and mark them as free */
887 for (idx = 0; idx < vf->num_sbs; idx++) {
888 igu_id = vf->igu_sbs[idx];
889 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
891 val = ecore_rd(p_hwfn, p_ptt, addr);
892 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
893 ecore_wr(p_hwfn, p_ptt, addr, val);
895 p_info->igu_map.igu_blocks[igu_id].status |=
896 ECORE_IGU_STATUS_FREE;
898 p_hwfn->hw_info.p_igu_info->free_blks++;
904 enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
905 struct ecore_ptt *p_ptt,
906 u16 rel_vf_id, u16 num_rx_queues)
908 u8 num_of_vf_available_chains = 0;
909 struct ecore_vf_info *vf = OSAL_NULL;
910 enum _ecore_status_t rc = ECORE_SUCCESS;
914 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
916 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
917 return ECORE_UNKNOWN_ERROR;
921 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
926 /* Limit number of queues according to number of CIDs */
927 ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
928 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
929 "VF[%d] - requesting to initialize for 0x%04x queues"
930 " [0x%04x CIDs available]\n",
931 vf->relative_vf_id, num_rx_queues, (u16)cids);
932 num_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16)cids));
934 num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
938 if (num_of_vf_available_chains == 0) {
939 DP_ERR(p_hwfn, "no available igu sbs\n");
943 /* Choose queue number and index ranges */
944 vf->num_rxqs = num_of_vf_available_chains;
945 vf->num_txqs = num_of_vf_available_chains;
947 for (i = 0; i < vf->num_rxqs; i++) {
948 u16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn,
951 if (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) {
952 DP_NOTICE(p_hwfn, true,
953 "VF[%d] will require utilizing of"
954 " out-of-bounds queues - %04x\n",
955 vf->relative_vf_id, queue_id);
956 /* TODO - cleanup the already allocate SBs */
960 /* CIDs are per-VF, so no problem having them 0-based. */
961 vf->vf_queues[i].fw_rx_qid = queue_id;
962 vf->vf_queues[i].fw_tx_qid = queue_id;
963 vf->vf_queues[i].fw_cid = i;
965 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
966 "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n",
967 vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i);
970 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
972 if (rc == ECORE_SUCCESS) {
974 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
975 (1ULL << (vf->relative_vf_id % 64));
977 if (IS_LEAD_HWFN(p_hwfn))
978 p_hwfn->p_dev->p_iov_info->num_vfs++;
984 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
986 struct ecore_mcp_link_params *params,
987 struct ecore_mcp_link_state *link,
988 struct ecore_mcp_link_capabilities *p_caps)
990 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
991 struct ecore_bulletin_content *p_bulletin;
996 p_bulletin = p_vf->bulletin.p_virt;
997 p_bulletin->req_autoneg = params->speed.autoneg;
998 p_bulletin->req_adv_speed = params->speed.advertised_speeds;
999 p_bulletin->req_forced_speed = params->speed.forced_speed;
1000 p_bulletin->req_autoneg_pause = params->pause.autoneg;
1001 p_bulletin->req_forced_rx = params->pause.forced_rx;
1002 p_bulletin->req_forced_tx = params->pause.forced_tx;
1003 p_bulletin->req_loopback = params->loopback_mode;
1005 p_bulletin->link_up = link->link_up;
1006 p_bulletin->speed = link->speed;
1007 p_bulletin->full_duplex = link->full_duplex;
1008 p_bulletin->autoneg = link->an;
1009 p_bulletin->autoneg_complete = link->an_complete;
1010 p_bulletin->parallel_detection = link->parallel_detection;
1011 p_bulletin->pfc_enabled = link->pfc_enabled;
1012 p_bulletin->partner_adv_speed = link->partner_adv_speed;
1013 p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
1014 p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
1015 p_bulletin->partner_adv_pause = link->partner_adv_pause;
1016 p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
1018 p_bulletin->capability_speed = p_caps->speed_capabilities;
1021 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1022 struct ecore_ptt *p_ptt,
1025 struct ecore_mcp_link_capabilities caps;
1026 struct ecore_mcp_link_params params;
1027 struct ecore_mcp_link_state link;
1028 struct ecore_vf_info *vf = OSAL_NULL;
1030 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1032 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1033 return ECORE_UNKNOWN_ERROR;
1036 if (vf->bulletin.p_virt)
1037 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1038 sizeof(*vf->bulletin.p_virt));
1040 OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1042 /* Get the link configuration back in bulletin so
1043 * that when VFs are re-enabled they get the actual
1044 * link configuration.
1046 OSAL_MEMCPY(¶ms, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1047 OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1048 OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1050 ecore_iov_set_link(p_hwfn, rel_vf_id, ¶ms, &link, &caps);
1052 /* Forget the VF's acquisition message */
1053 OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1055 /* disablng interrupts and resetting permission table was done during
1056 * vf-close, however, we could get here without going through vf_close
1058 /* Disable Interrupts for VF */
1059 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1061 /* Reset Permission table */
1062 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1066 ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1070 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1071 ~(1ULL << (vf->relative_vf_id / 64));
1073 if (IS_LEAD_HWFN(p_hwfn))
1074 p_hwfn->p_dev->p_iov_info->num_vfs--;
1077 return ECORE_SUCCESS;
1080 static bool ecore_iov_tlv_supported(u16 tlvtype)
1082 return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1085 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1086 struct ecore_vf_info *vf, u16 tlv)
1088 /* lock the channel */
1089 /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1091 /* record the locking op */
1092 /* vf->op_current = tlv; @@@TBD MichalK */
1095 if (ecore_iov_tlv_supported(tlv))
1098 "VF[%d]: vf pf channel locked by %s\n",
1100 ecore_channel_tlvs_string[tlv]);
1104 "VF[%d]: vf pf channel locked by %04x\n",
1105 vf->abs_vf_id, tlv);
1108 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1109 struct ecore_vf_info *vf,
1112 /* log the unlock */
1113 if (ecore_iov_tlv_supported(expected_tlv))
1116 "VF[%d]: vf pf channel unlocked by %s\n",
1118 ecore_channel_tlvs_string[expected_tlv]);
1122 "VF[%d]: vf pf channel unlocked by %04x\n",
1123 vf->abs_vf_id, expected_tlv);
1125 /* record the locking op */
1126 /* vf->op_current = CHANNEL_TLV_NONE; */
1129 /* place a given tlv on the tlv buffer, continuing current tlv list */
1130 void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
1131 u8 **offset, u16 type, u16 length)
1133 struct channel_tlv *tl = (struct channel_tlv *)*offset;
1136 tl->length = length;
1138 /* Offset should keep pointing to next TLV (the end of the last) */
1141 /* Return a pointer to the start of the added tlv */
1142 return *offset - length;
1145 /* list the types and lengths of the tlvs on the buffer */
1146 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1148 u16 i = 1, total_length = 0;
1149 struct channel_tlv *tlv;
1152 /* cast current tlv list entry to channel tlv header */
1153 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1156 if (ecore_iov_tlv_supported(tlv->type))
1157 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1158 "TLV number %d: type %s, length %d\n",
1159 i, ecore_channel_tlvs_string[tlv->type],
1162 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1163 "TLV number %d: type %d, length %d\n",
1164 i, tlv->type, tlv->length);
1166 if (tlv->type == CHANNEL_TLV_LIST_END)
1169 /* Validate entry - protect against malicious VFs */
1171 DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1174 total_length += tlv->length;
1175 if (total_length >= sizeof(struct tlv_buffer_size)) {
1176 DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1184 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1185 struct ecore_ptt *p_ptt,
1186 struct ecore_vf_info *p_vf,
1187 u16 length, u8 status)
1189 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1190 struct ecore_dmae_params params;
1193 mbx->reply_virt->default_resp.hdr.status = status;
1195 ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1197 #ifdef CONFIG_ECORE_SW_CHANNEL
1198 mbx->sw_mbx.response_size =
1199 length + sizeof(struct channel_list_end_tlv);
1201 if (!p_hwfn->p_dev->b_hw_channel)
1205 eng_vf_id = p_vf->abs_vf_id;
1207 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
1208 params.flags = ECORE_DMAE_FLAG_VF_DST;
1209 params.dst_vfid = eng_vf_id;
1211 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1212 mbx->req_virt->first_tlv.reply_address +
1214 (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1217 /* Once PF copies the rc to the VF, the latter can continue and
1218 * and send an additional message. So we have to make sure the
1219 * channel would be re-set to ready prior to that.
1222 GTT_BAR0_MAP_REG_USDM_RAM +
1223 USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1225 ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1226 mbx->req_virt->first_tlv.reply_address,
1227 sizeof(u64) / 4, ¶ms);
1230 static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
1231 enum ecore_iov_vport_update_flag flag)
1234 case ECORE_IOV_VP_UPDATE_ACTIVATE:
1235 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1236 case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1237 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1238 case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1239 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1240 case ECORE_IOV_VP_UPDATE_MCAST:
1241 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1242 case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1243 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1244 case ECORE_IOV_VP_UPDATE_RSS:
1245 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1246 case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1247 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1248 case ECORE_IOV_VP_UPDATE_SGE_TPA:
1249 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1255 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1256 struct ecore_vf_info *p_vf,
1257 struct ecore_iov_vf_mbx *p_mbx,
1258 u8 status, u16 tlvs_mask,
1261 struct pfvf_def_resp_tlv *resp;
1262 u16 size, total_len, i;
1264 OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1265 p_mbx->offset = (u8 *)p_mbx->reply_virt;
1266 size = sizeof(struct pfvf_def_resp_tlv);
1269 ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1271 /* Prepare response for all extended tlvs if they are found by PF */
1272 for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1273 if (!(tlvs_mask & (1 << i)))
1276 resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
1277 ecore_iov_vport_to_tlv(p_hwfn, i), size);
1279 if (tlvs_accepted & (1 << i))
1280 resp->hdr.status = status;
1282 resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1284 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1285 "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1286 p_vf->relative_vf_id,
1287 ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1292 ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1293 sizeof(struct channel_list_end_tlv));
1298 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1299 struct ecore_ptt *p_ptt,
1300 struct ecore_vf_info *vf_info,
1301 u16 type, u16 length, u8 status)
1303 struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1305 mbx->offset = (u8 *)mbx->reply_virt;
1307 ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
1308 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1309 sizeof(struct channel_list_end_tlv));
1311 ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1313 OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
1316 struct ecore_public_vf_info
1317 *ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1319 bool b_enabled_only)
1321 struct ecore_vf_info *vf = OSAL_NULL;
1323 vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1327 return &vf->p_vf_info;
1330 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1331 struct ecore_vf_info *p_vf)
1334 p_vf->vf_bulletin = 0;
1335 p_vf->vport_instance = 0;
1336 p_vf->configured_features = 0;
1338 /* If VF previously requested less resources, go back to default */
1339 p_vf->num_rxqs = p_vf->num_sbs;
1340 p_vf->num_txqs = p_vf->num_sbs;
1342 p_vf->num_active_rxqs = 0;
1344 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++)
1345 p_vf->vf_queues[i].rxq_active = 0;
1347 OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1348 OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1349 OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1352 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1353 struct ecore_ptt *p_ptt,
1354 struct ecore_vf_info *p_vf,
1355 struct vf_pf_resc_request *p_req,
1356 struct pf_vf_resc *p_resp)
1360 /* Queue related information */
1361 p_resp->num_rxqs = p_vf->num_rxqs;
1362 p_resp->num_txqs = p_vf->num_txqs;
1363 p_resp->num_sbs = p_vf->num_sbs;
1365 for (i = 0; i < p_resp->num_sbs; i++) {
1366 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1367 /* TODO - what's this sb_qid field? Is it deprecated?
1368 * or is there an ecore_client that looks at this?
1370 p_resp->hw_sbs[i].sb_qid = 0;
1373 /* These fields are filled for backward compatibility.
1374 * Unused by modern vfs.
1376 for (i = 0; i < p_resp->num_rxqs; i++) {
1377 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1378 (u16 *)&p_resp->hw_qid[i]);
1379 p_resp->cid[i] = p_vf->vf_queues[i].fw_cid;
1382 /* Filter related information */
1383 p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1384 p_req->num_mac_filters);
1385 p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1386 p_req->num_vlan_filters);
1388 /* This isn't really needed/enforced, but some legacy VFs might depend
1389 * on the correct filling of this field.
1391 p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1393 /* Validate sufficient resources for VF */
1394 if (p_resp->num_rxqs < p_req->num_rxqs ||
1395 p_resp->num_txqs < p_req->num_txqs ||
1396 p_resp->num_sbs < p_req->num_sbs ||
1397 p_resp->num_mac_filters < p_req->num_mac_filters ||
1398 p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1399 p_resp->num_mc_filters < p_req->num_mc_filters) {
1400 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1401 "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
1402 " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
1403 " vlan [%02x/%02x] mc [%02x/%02x]\n",
1405 p_req->num_rxqs, p_resp->num_rxqs,
1406 p_req->num_rxqs, p_resp->num_txqs,
1407 p_req->num_sbs, p_resp->num_sbs,
1408 p_req->num_mac_filters, p_resp->num_mac_filters,
1409 p_req->num_vlan_filters, p_resp->num_vlan_filters,
1410 p_req->num_mc_filters, p_resp->num_mc_filters);
1412 /* Some legacy OSes are incapable of correctly handling this
1415 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1416 ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1417 (p_vf->acquire.vfdev_info.os_type ==
1418 VFPF_ACQUIRE_OS_WINDOWS))
1419 return PFVF_STATUS_SUCCESS;
1421 return PFVF_STATUS_NO_RESOURCE;
1424 return PFVF_STATUS_SUCCESS;
1427 static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
1428 struct pfvf_stats_info *p_stats)
1430 p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1431 OFFSETOF(struct mstorm_vf_zone,
1432 non_trigger.eth_queue_stat);
1433 p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1434 p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1435 OFFSETOF(struct ustorm_vf_zone,
1436 non_trigger.eth_queue_stat);
1437 p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1438 p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1439 OFFSETOF(struct pstorm_vf_zone,
1440 non_trigger.eth_queue_stat);
1441 p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1442 p_stats->tstats.address = 0;
1443 p_stats->tstats.len = 0;
1446 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn,
1447 struct ecore_ptt *p_ptt,
1448 struct ecore_vf_info *vf)
1450 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1451 struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1452 struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1453 struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1454 u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1455 struct pf_vf_resc *resc = &resp->resc;
1456 enum _ecore_status_t rc;
1458 OSAL_MEMSET(resp, 0, sizeof(*resp));
1460 /* Write the PF version so that VF would know which version
1461 * is supported - might be later overridden. This guarantees that
1462 * VF could recognize legacy PF based on lack of versions in reply.
1464 pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1465 pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1467 /* Validate FW compatibility */
1468 if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1469 if (req->vfdev_info.capabilities &
1470 VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1471 struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1473 /* This legacy support would need to be removed once
1474 * the major has changed.
1476 OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1478 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1479 "VF[%d] is pre-fastpath HSI\n",
1481 p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1482 p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1485 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1486 " incompatible with loaded FW's faspath"
1489 req->vfdev_info.eth_fp_hsi_major,
1490 req->vfdev_info.eth_fp_hsi_minor,
1491 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1497 /* On 100g PFs, prevent old VFs from loading */
1498 if ((p_hwfn->p_dev->num_hwfns > 1) &&
1499 !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1501 "VF[%d] is running an old driver that doesn't support"
1507 #ifndef __EXTRACT__LINUX__
1508 if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1509 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1514 /* Store the acquire message */
1515 OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1517 vf->opaque_fid = req->vfdev_info.opaque_fid;
1519 vf->vf_bulletin = req->bulletin_addr;
1520 vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1521 vf->bulletin.size : req->bulletin_size;
1523 /* fill in pfdev info */
1524 pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1525 pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */
1526 pfdev_info->indices_per_sb = PIS_PER_SB;
1528 pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1529 PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1530 if (p_hwfn->p_dev->num_hwfns > 1)
1531 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1533 ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1535 OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1538 pfdev_info->fw_major = FW_MAJOR_VERSION;
1539 pfdev_info->fw_minor = FW_MINOR_VERSION;
1540 pfdev_info->fw_rev = FW_REVISION_VERSION;
1541 pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1543 /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1546 pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1547 req->vfdev_info.eth_fp_hsi_minor);
1548 pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1549 ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1552 pfdev_info->dev_type = p_hwfn->p_dev->type;
1553 pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1555 /* Fill resources available to VF; Make sure there are enough to
1556 * satisfy the VF's request.
1558 vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1559 &req->resc_request, resc);
1560 if (vfpf_status != PFVF_STATUS_SUCCESS)
1563 /* Start the VF in FW */
1564 rc = ecore_sp_vf_start(p_hwfn, vf);
1565 if (rc != ECORE_SUCCESS) {
1566 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1568 vfpf_status = PFVF_STATUS_FAILURE;
1572 /* Fill agreed size of bulletin board in response, and post
1573 * an initial image to the bulletin board.
1575 resp->bulletin_size = vf->bulletin.size;
1576 ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1578 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1579 "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1580 " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1581 "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1582 " n_vlans-%d, n_mcs-%d\n",
1583 vf->abs_vf_id, resp->pfdev_info.chip_num,
1584 resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1585 (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1586 resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1587 resc->num_vlan_filters, resc->num_mc_filters);
1589 vf->state = VF_ACQUIRED;
1592 /* Prepare Response */
1593 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1594 sizeof(struct pfvf_acquire_resp_tlv),
1598 static enum _ecore_status_t
1599 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1600 struct ecore_vf_info *p_vf, bool val)
1602 struct ecore_sp_vport_update_params params;
1603 enum _ecore_status_t rc;
1605 if (val == p_vf->spoof_chk) {
1606 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1607 "Spoofchk value[%d] is already configured\n", val);
1608 return ECORE_SUCCESS;
1611 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params));
1612 params.opaque_fid = p_vf->opaque_fid;
1613 params.vport_id = p_vf->vport_id;
1614 params.update_anti_spoofing_en_flg = 1;
1615 params.anti_spoofing_en = val;
1617 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
1619 if (rc == ECORE_SUCCESS) {
1620 p_vf->spoof_chk = val;
1621 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1622 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1623 "Spoofchk val[%d] configured\n", val);
1625 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1626 "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1627 val, p_vf->relative_vf_id);
1633 static enum _ecore_status_t
1634 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1635 struct ecore_vf_info *p_vf)
1637 struct ecore_filter_ucast filter;
1638 enum _ecore_status_t rc = ECORE_SUCCESS;
1641 OSAL_MEMSET(&filter, 0, sizeof(filter));
1642 filter.is_rx_filter = 1;
1643 filter.is_tx_filter = 1;
1644 filter.vport_to_add_to = p_vf->vport_id;
1645 filter.opcode = ECORE_FILTER_ADD;
1647 /* Reconfigure vlans */
1648 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1649 if (!p_vf->shadow_config.vlans[i].used)
1652 filter.type = ECORE_FILTER_VLAN;
1653 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1654 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1655 "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1656 filter.vlan, p_vf->relative_vf_id);
1657 rc = ecore_sp_eth_filter_ucast(p_hwfn,
1663 DP_NOTICE(p_hwfn, true,
1664 "Failed to configure VLAN [%04x]"
1666 filter.vlan, p_vf->relative_vf_id);
1674 static enum _ecore_status_t
1675 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1676 struct ecore_vf_info *p_vf, u64 events)
1678 enum _ecore_status_t rc = ECORE_SUCCESS;
1680 /*TODO - what about MACs? */
1682 if ((events & (1 << VLAN_ADDR_FORCED)) &&
1683 !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1684 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1689 static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
1690 struct ecore_vf_info *p_vf,
1693 enum _ecore_status_t rc = ECORE_SUCCESS;
1694 struct ecore_filter_ucast filter;
1696 if (!p_vf->vport_instance)
1699 if (events & (1 << MAC_ADDR_FORCED)) {
1700 /* Since there's no way [currently] of removing the MAC,
1701 * we can always assume this means we need to force it.
1703 OSAL_MEMSET(&filter, 0, sizeof(filter));
1704 filter.type = ECORE_FILTER_MAC;
1705 filter.opcode = ECORE_FILTER_REPLACE;
1706 filter.is_rx_filter = 1;
1707 filter.is_tx_filter = 1;
1708 filter.vport_to_add_to = p_vf->vport_id;
1709 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
1711 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1713 ECORE_SPQ_MODE_CB, OSAL_NULL);
1715 DP_NOTICE(p_hwfn, true,
1716 "PF failed to configure MAC for VF\n");
1720 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1723 if (events & (1 << VLAN_ADDR_FORCED)) {
1724 struct ecore_sp_vport_update_params vport_update;
1728 OSAL_MEMSET(&filter, 0, sizeof(filter));
1729 filter.type = ECORE_FILTER_VLAN;
1730 filter.is_rx_filter = 1;
1731 filter.is_tx_filter = 1;
1732 filter.vport_to_add_to = p_vf->vport_id;
1733 filter.vlan = p_vf->bulletin.p_virt->pvid;
1734 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
1737 /* Send the ramrod */
1738 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1740 ECORE_SPQ_MODE_CB, OSAL_NULL);
1742 DP_NOTICE(p_hwfn, true,
1743 "PF failed to configure VLAN for VF\n");
1747 /* Update the default-vlan & silent vlan stripping */
1748 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
1749 vport_update.opaque_fid = p_vf->opaque_fid;
1750 vport_update.vport_id = p_vf->vport_id;
1751 vport_update.update_default_vlan_enable_flg = 1;
1752 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1753 vport_update.update_default_vlan_flg = 1;
1754 vport_update.default_vlan = filter.vlan;
1756 vport_update.update_inner_vlan_removal_flg = 1;
1757 removal = filter.vlan ?
1758 1 : p_vf->shadow_config.inner_vlan_removal;
1759 vport_update.inner_vlan_removal_flg = removal;
1760 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1761 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
1762 ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
1764 DP_NOTICE(p_hwfn, true,
1765 "PF failed to configure VF vport for vlan\n");
1769 /* Update all the Rx queues */
1770 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1773 if (!p_vf->vf_queues[i].rxq_active)
1776 qid = p_vf->vf_queues[i].fw_rx_qid;
1778 rc = ecore_sp_eth_rx_queues_update(p_hwfn, qid,
1780 ECORE_SPQ_MODE_EBLOCK,
1783 DP_NOTICE(p_hwfn, true,
1784 "Failed to send Rx update"
1785 " fo queue[0x%04x]\n",
1792 p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1794 p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
1797 /* If forced features are terminated, we need to configure the shadow
1798 * configuration back again.
1801 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
1806 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
1807 struct ecore_ptt *p_ptt,
1808 struct ecore_vf_info *vf)
1810 struct ecore_sp_vport_start_params params = { 0 };
1811 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1812 struct vfpf_vport_start_tlv *start;
1813 u8 status = PFVF_STATUS_SUCCESS;
1814 struct ecore_vf_info *vf_info;
1817 enum _ecore_status_t rc;
1819 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
1821 DP_NOTICE(p_hwfn->p_dev, true,
1822 "Failed to get VF info, invalid vfid [%d]\n",
1823 vf->relative_vf_id);
1827 vf->state = VF_ENABLED;
1828 start = &mbx->req_virt->start_vport;
1830 /* Initialize Status block in CAU */
1831 for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
1832 if (!start->sb_addr[sb_id]) {
1833 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1834 "VF[%d] did not fill the address of SB %d\n",
1835 vf->relative_vf_id, sb_id);
1839 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
1840 start->sb_addr[sb_id],
1844 ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
1846 vf->mtu = start->mtu;
1847 vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
1849 /* Take into consideration configuration forced by hypervisor;
1850 * If none is configured, use the supplied VF values [for old
1851 * vfs that would still be fine, since they passed '0' as padding].
1853 p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
1854 if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
1855 u8 vf_req = start->only_untagged;
1857 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
1858 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
1861 params.tpa_mode = start->tpa_mode;
1862 params.remove_inner_vlan = start->inner_vlan_removal;
1863 params.tx_switching = true;
1866 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
1867 DP_NOTICE(p_hwfn, false,
1868 "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
1869 params.tx_switching = false;
1873 params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
1874 params.drop_ttl0 = false;
1875 params.concrete_fid = vf->concrete_fid;
1876 params.opaque_fid = vf->opaque_fid;
1877 params.vport_id = vf->vport_id;
1878 params.max_buffers_per_cqe = start->max_buffers_per_cqe;
1879 params.mtu = vf->mtu;
1880 params.check_mac = true;
1882 rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms);
1883 if (rc != ECORE_SUCCESS) {
1885 "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
1886 status = PFVF_STATUS_FAILURE;
1888 vf->vport_instance++;
1890 /* Force configuration if needed on the newly opened vport */
1891 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
1892 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
1893 vf->vport_id, vf->opaque_fid);
1894 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
1897 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
1898 sizeof(struct pfvf_def_resp_tlv), status);
1901 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
1902 struct ecore_ptt *p_ptt,
1903 struct ecore_vf_info *vf)
1905 u8 status = PFVF_STATUS_SUCCESS;
1906 enum _ecore_status_t rc;
1908 vf->vport_instance--;
1909 vf->spoof_chk = false;
1911 rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
1912 if (rc != ECORE_SUCCESS) {
1914 "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
1915 status = PFVF_STATUS_FAILURE;
1918 /* Forget the configuration on the vport */
1919 vf->configured_features = 0;
1920 OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
1922 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
1923 sizeof(struct pfvf_def_resp_tlv), status);
1926 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
1927 struct ecore_ptt *p_ptt,
1928 struct ecore_vf_info *vf,
1929 u8 status, bool b_legacy)
1931 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1932 struct pfvf_start_queue_resp_tlv *p_tlv;
1933 struct vfpf_start_rxq_tlv *req;
1936 mbx->offset = (u8 *)mbx->reply_virt;
1938 /* Taking a bigger struct instead of adding a TLV to list was a
1939 * mistake, but one which we're now stuck with, as some older
1940 * clients assume the size of the previous response.
1943 length = sizeof(*p_tlv);
1945 length = sizeof(struct pfvf_def_resp_tlv);
1947 p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
1949 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1950 sizeof(struct channel_list_end_tlv));
1952 /* Update the TLV with the response */
1953 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
1954 req = &mbx->req_virt->start_rxq;
1955 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
1956 OFFSETOF(struct mstorm_vf_zone,
1957 non_trigger.eth_rx_queue_producers) +
1958 sizeof(struct eth_rx_prod_data) * req->rx_qid;
1961 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
1964 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
1965 struct ecore_ptt *p_ptt,
1966 struct ecore_vf_info *vf)
1968 struct ecore_queue_start_common_params p_params;
1969 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1970 u8 status = PFVF_STATUS_NO_RESOURCE;
1971 struct vfpf_start_rxq_tlv *req;
1972 bool b_legacy_vf = false;
1973 enum _ecore_status_t rc;
1975 req = &mbx->req_virt->start_rxq;
1976 OSAL_MEMSET(&p_params, 0, sizeof(p_params));
1977 p_params.queue_id = (u8)vf->vf_queues[req->rx_qid].fw_rx_qid;
1978 p_params.vf_qid = req->rx_qid;
1979 p_params.vport_id = vf->vport_id;
1980 p_params.stats_id = vf->abs_vf_id + 0x10,
1981 p_params.sb = req->hw_sb;
1982 p_params.sb_idx = req->sb_index;
1984 if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid) ||
1985 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
1988 /* Legacy VFs have their Producers in a different location, which they
1989 * calculate on their own and clean the producer prior to this.
1991 if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1992 ETH_HSI_VER_NO_PKT_LEN_TUNN)
1996 GTT_BAR0_MAP_REG_MSDM_RAM +
1997 MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2000 rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid,
2001 vf->vf_queues[req->rx_qid].fw_cid,
2010 status = PFVF_STATUS_FAILURE;
2012 status = PFVF_STATUS_SUCCESS;
2013 vf->vf_queues[req->rx_qid].rxq_active = true;
2014 vf->num_active_rxqs++;
2018 ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf,
2019 status, b_legacy_vf);
2022 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2023 struct ecore_ptt *p_ptt,
2024 struct ecore_vf_info *p_vf,
2027 struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2028 struct pfvf_start_queue_resp_tlv *p_tlv;
2029 bool b_legacy = false;
2032 mbx->offset = (u8 *)mbx->reply_virt;
2034 /* Taking a bigger struct instead of adding a TLV to list was a
2035 * mistake, but one which we're now stuck with, as some older
2036 * clients assume the size of the previous response.
2038 if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2039 ETH_HSI_VER_NO_PKT_LEN_TUNN)
2043 length = sizeof(*p_tlv);
2045 length = sizeof(struct pfvf_def_resp_tlv);
2047 p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2049 ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2050 sizeof(struct channel_list_end_tlv));
2052 /* Update the TLV with the response */
2053 if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2054 u16 qid = mbx->req_virt->start_txq.tx_qid;
2056 p_tlv->offset = DB_ADDR_VF(p_vf->vf_queues[qid].fw_cid,
2060 ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2063 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2064 struct ecore_ptt *p_ptt,
2065 struct ecore_vf_info *vf)
2067 struct ecore_queue_start_common_params p_params;
2068 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2069 u8 status = PFVF_STATUS_NO_RESOURCE;
2070 union ecore_qm_pq_params pq_params;
2071 struct vfpf_start_txq_tlv *req;
2072 enum _ecore_status_t rc;
2074 /* Prepare the parameters which would choose the right PQ */
2075 OSAL_MEMSET(&pq_params, 0, sizeof(pq_params));
2076 pq_params.eth.is_vf = 1;
2077 pq_params.eth.vf_id = vf->relative_vf_id;
2079 req = &mbx->req_virt->start_txq;
2080 OSAL_MEMSET(&p_params, 0, sizeof(p_params));
2081 p_params.queue_id = (u8)vf->vf_queues[req->tx_qid].fw_tx_qid;
2082 p_params.vport_id = vf->vport_id;
2083 p_params.stats_id = vf->abs_vf_id + 0x10,
2084 p_params.sb = req->hw_sb;
2085 p_params.sb_idx = req->sb_index;
2087 if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid) ||
2088 !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2091 rc = ecore_sp_eth_txq_start_ramrod(
2094 vf->vf_queues[req->tx_qid].fw_cid,
2101 status = PFVF_STATUS_FAILURE;
2103 status = PFVF_STATUS_SUCCESS;
2104 vf->vf_queues[req->tx_qid].txq_active = true;
2108 ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf, status);
2111 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2112 struct ecore_vf_info *vf,
2115 bool cqe_completion)
2117 enum _ecore_status_t rc = ECORE_SUCCESS;
2120 if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
2123 for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
2124 if (vf->vf_queues[qid].rxq_active) {
2125 rc = ecore_sp_eth_rx_queue_stop(p_hwfn,
2133 vf->vf_queues[qid].rxq_active = false;
2134 vf->num_active_rxqs--;
2140 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2141 struct ecore_vf_info *vf,
2142 u16 txq_id, u8 num_txqs)
2144 enum _ecore_status_t rc = ECORE_SUCCESS;
2147 if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues))
2150 for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
2151 if (vf->vf_queues[qid].txq_active) {
2152 rc = ecore_sp_eth_tx_queue_stop(p_hwfn,
2159 vf->vf_queues[qid].txq_active = false;
2164 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2165 struct ecore_ptt *p_ptt,
2166 struct ecore_vf_info *vf)
2168 u16 length = sizeof(struct pfvf_def_resp_tlv);
2169 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2170 u8 status = PFVF_STATUS_SUCCESS;
2171 struct vfpf_stop_rxqs_tlv *req;
2172 enum _ecore_status_t rc;
2174 /* We give the option of starting from qid != 0, in this case we
2175 * need to make sure that qid + num_qs doesn't exceed the actual
2176 * amount of queues that exist.
2178 req = &mbx->req_virt->stop_rxqs;
2179 rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2180 req->num_rxqs, req->cqe_completion);
2182 status = PFVF_STATUS_FAILURE;
2184 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2188 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2189 struct ecore_ptt *p_ptt,
2190 struct ecore_vf_info *vf)
2192 u16 length = sizeof(struct pfvf_def_resp_tlv);
2193 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2194 u8 status = PFVF_STATUS_SUCCESS;
2195 struct vfpf_stop_txqs_tlv *req;
2196 enum _ecore_status_t rc;
2198 /* We give the option of starting from qid != 0, in this case we
2199 * need to make sure that qid + num_qs doesn't exceed the actual
2200 * amount of queues that exist.
2202 req = &mbx->req_virt->stop_txqs;
2203 rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
2205 status = PFVF_STATUS_FAILURE;
2207 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2211 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2212 struct ecore_ptt *p_ptt,
2213 struct ecore_vf_info *vf)
2215 u16 length = sizeof(struct pfvf_def_resp_tlv);
2216 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2217 struct vfpf_update_rxq_tlv *req;
2218 u8 status = PFVF_STATUS_SUCCESS;
2219 u8 complete_event_flg;
2220 u8 complete_cqe_flg;
2222 enum _ecore_status_t rc;
2225 req = &mbx->req_virt->update_rxq;
2226 complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2227 complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2229 for (i = 0; i < req->num_rxqs; i++) {
2230 qid = req->rx_qid + i;
2232 if (!vf->vf_queues[qid].rxq_active) {
2233 DP_NOTICE(p_hwfn, true,
2234 "VF rx_qid = %d isn`t active!\n", qid);
2235 status = PFVF_STATUS_FAILURE;
2239 rc = ecore_sp_eth_rx_queues_update(p_hwfn,
2240 vf->vf_queues[qid].fw_rx_qid,
2244 ECORE_SPQ_MODE_EBLOCK,
2248 status = PFVF_STATUS_FAILURE;
2253 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2257 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2258 void *p_tlvs_list, u16 req_type)
2260 struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2264 if (!p_tlv->length) {
2265 DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2269 if (p_tlv->type == req_type) {
2270 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2271 "Extended tlv type %s, length %d found\n",
2272 ecore_channel_tlvs_string[p_tlv->type],
2277 len += p_tlv->length;
2278 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2280 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2281 DP_NOTICE(p_hwfn, true,
2282 "TLVs has overrun the buffer size\n");
2285 } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2291 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
2292 struct ecore_sp_vport_update_params *p_data,
2293 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2295 struct vfpf_vport_update_activate_tlv *p_act_tlv;
2296 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2298 p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2299 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2303 p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2304 p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2305 p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2306 p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2307 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
2311 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
2312 struct ecore_sp_vport_update_params *p_data,
2313 struct ecore_vf_info *p_vf,
2314 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2316 struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2317 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2319 p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2320 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2324 p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2326 /* Ignore the VF request if we're forcing a vlan */
2327 if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2328 p_data->update_inner_vlan_removal_flg = 1;
2329 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2332 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
2336 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
2337 struct ecore_sp_vport_update_params *p_data,
2338 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2340 struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2341 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2343 p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2344 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2345 if (!p_tx_switch_tlv)
2349 if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2350 DP_NOTICE(p_hwfn, false,
2351 "FPGA: Ignore tx-switching configuration originating"
2357 p_data->update_tx_switching_flg = 1;
2358 p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2359 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
2363 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
2364 struct ecore_sp_vport_update_params *p_data,
2365 struct ecore_iov_vf_mbx *p_mbx,
2368 struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2369 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2371 p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2372 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2376 p_data->update_approx_mcast_flg = 1;
2377 OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
2378 sizeof(unsigned long) *
2379 ETH_MULTICAST_MAC_BINS_IN_REGS);
2380 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
2384 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
2385 struct ecore_sp_vport_update_params *p_data,
2386 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2388 struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
2389 struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2390 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2392 p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2393 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2397 p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2398 p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2399 p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2400 p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2401 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
2405 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
2406 struct ecore_sp_vport_update_params *p_data,
2407 struct ecore_iov_vf_mbx *p_mbx,
2410 struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2411 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2413 p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2414 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2415 if (!p_accept_any_vlan)
2418 p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2419 p_data->update_accept_any_vlan_flg =
2420 p_accept_any_vlan->update_accept_any_vlan_flg;
2421 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2425 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
2426 struct ecore_vf_info *vf,
2427 struct ecore_sp_vport_update_params *p_data,
2428 struct ecore_rss_params *p_rss,
2429 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2431 struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2432 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2433 u16 i, q_idx, max_q_idx;
2436 p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2437 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2439 p_data->rss_params = OSAL_NULL;
2443 OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
2445 p_rss->update_rss_config =
2446 !!(p_rss_tlv->update_rss_flags &
2447 VFPF_UPDATE_RSS_CONFIG_FLAG);
2448 p_rss->update_rss_capabilities =
2449 !!(p_rss_tlv->update_rss_flags &
2450 VFPF_UPDATE_RSS_CAPS_FLAG);
2451 p_rss->update_rss_ind_table =
2452 !!(p_rss_tlv->update_rss_flags &
2453 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2454 p_rss->update_rss_key =
2455 !!(p_rss_tlv->update_rss_flags &
2456 VFPF_UPDATE_RSS_KEY_FLAG);
2458 p_rss->rss_enable = p_rss_tlv->rss_enable;
2459 p_rss->rss_eng_id = vf->relative_vf_id + 1;
2460 p_rss->rss_caps = p_rss_tlv->rss_caps;
2461 p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2462 OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table,
2463 sizeof(p_rss->rss_ind_table));
2464 OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
2465 sizeof(p_rss->rss_key));
2467 table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
2468 (1 << p_rss_tlv->rss_table_size_log));
2470 max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues);
2472 for (i = 0; i < table_size; i++) {
2473 u16 index = vf->vf_queues[0].fw_rx_qid;
2475 q_idx = p_rss->rss_ind_table[i];
2476 if (q_idx >= max_q_idx)
2477 DP_NOTICE(p_hwfn, true,
2478 "rss_ind_table[%d] = %d,"
2479 " rxq is out of range\n",
2481 else if (!vf->vf_queues[q_idx].rxq_active)
2482 DP_NOTICE(p_hwfn, true,
2483 "rss_ind_table[%d] = %d, rxq is not active\n",
2486 index = vf->vf_queues[q_idx].fw_rx_qid;
2487 p_rss->rss_ind_table[i] = index;
2490 p_data->rss_params = p_rss;
2491 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2495 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
2496 struct ecore_vf_info *vf,
2497 struct ecore_sp_vport_update_params *p_data,
2498 struct ecore_sge_tpa_params *p_sge_tpa,
2499 struct ecore_iov_vf_mbx *p_mbx,
2502 struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2503 u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2505 p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2506 ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2508 if (!p_sge_tpa_tlv) {
2509 p_data->sge_tpa_params = OSAL_NULL;
2513 OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
2515 p_sge_tpa->update_tpa_en_flg =
2516 !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2517 p_sge_tpa->update_tpa_param_flg =
2518 !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2519 VFPF_UPDATE_TPA_PARAM_FLAG);
2521 p_sge_tpa->tpa_ipv4_en_flg =
2522 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2523 p_sge_tpa->tpa_ipv6_en_flg =
2524 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2525 p_sge_tpa->tpa_pkt_split_flg =
2526 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2527 p_sge_tpa->tpa_hdr_data_split_flg =
2528 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2529 p_sge_tpa->tpa_gro_consistent_flg =
2530 !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2532 p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2533 p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2534 p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2535 p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2536 p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2538 p_data->sge_tpa_params = p_sge_tpa;
2540 *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
2543 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
2544 struct ecore_ptt *p_ptt,
2545 struct ecore_vf_info *vf)
2547 struct ecore_sp_vport_update_params params;
2548 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2549 struct ecore_sge_tpa_params sge_tpa_params;
2550 u16 tlvs_mask = 0, tlvs_accepted = 0;
2551 struct ecore_rss_params rss_params;
2552 u8 status = PFVF_STATUS_SUCCESS;
2554 enum _ecore_status_t rc;
2556 /* Valiate PF can send such a request */
2557 if (!vf->vport_instance) {
2558 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2559 "No VPORT instance available for VF[%d],"
2560 " failing vport update\n",
2562 status = PFVF_STATUS_FAILURE;
2566 OSAL_MEMSET(¶ms, 0, sizeof(params));
2567 params.opaque_fid = vf->opaque_fid;
2568 params.vport_id = vf->vport_id;
2569 params.rss_params = OSAL_NULL;
2571 /* Search for extended tlvs list and update values
2572 * from VF in struct ecore_sp_vport_update_params.
2574 ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
2575 ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask);
2576 ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask);
2577 ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask);
2578 ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask);
2579 ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params,
2581 ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask);
2582 ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms,
2583 &sge_tpa_params, mbx, &tlvs_mask);
2585 /* Just log a message if there is no single extended tlv in buffer.
2586 * When all features of vport update ramrod would be requested by VF
2587 * as extended TLVs in buffer then an error can be returned in response
2588 * if there is no extended TLV present in buffer.
2590 tlvs_accepted = tlvs_mask;
2592 #ifndef LINUX_REMOVE
2593 if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
2594 ¶ms, &tlvs_accepted) !=
2597 status = PFVF_STATUS_NOT_SUPPORTED;
2602 if (!tlvs_accepted) {
2604 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2605 "Upper-layer prevents said VF"
2606 " configuration\n");
2608 DP_NOTICE(p_hwfn, true,
2609 "No feature tlvs found for vport update\n");
2610 status = PFVF_STATUS_NOT_SUPPORTED;
2614 rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK,
2618 status = PFVF_STATUS_FAILURE;
2621 length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
2622 tlvs_mask, tlvs_accepted);
2623 ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2626 static enum _ecore_status_t
2627 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
2628 struct ecore_vf_info *p_vf,
2629 struct ecore_filter_ucast *p_params)
2633 /* First remove entries and then add new ones */
2634 if (p_params->opcode == ECORE_FILTER_REMOVE) {
2635 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2636 if (p_vf->shadow_config.vlans[i].used &&
2637 p_vf->shadow_config.vlans[i].vid ==
2639 p_vf->shadow_config.vlans[i].used = false;
2642 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
2643 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2644 "VF [%d] - Tries to remove a non-existing"
2646 p_vf->relative_vf_id);
2649 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
2650 p_params->opcode == ECORE_FILTER_FLUSH) {
2651 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
2652 p_vf->shadow_config.vlans[i].used = false;
2655 /* In forced mode, we're willing to remove entries - but we don't add
2658 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
2659 return ECORE_SUCCESS;
2661 if (p_params->opcode == ECORE_FILTER_ADD ||
2662 p_params->opcode == ECORE_FILTER_REPLACE) {
2663 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
2664 if (p_vf->shadow_config.vlans[i].used)
2667 p_vf->shadow_config.vlans[i].used = true;
2668 p_vf->shadow_config.vlans[i].vid = p_params->vlan;
2672 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
2673 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2674 "VF [%d] - Tries to configure more than %d"
2676 p_vf->relative_vf_id,
2677 ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
2682 return ECORE_SUCCESS;
2685 static enum _ecore_status_t
2686 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
2687 struct ecore_vf_info *p_vf,
2688 struct ecore_filter_ucast *p_params)
2690 char empty_mac[ETH_ALEN];
2693 OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
2695 /* If we're in forced-mode, we don't allow any change */
2696 /* TODO - this would change if we were ever to implement logic for
2697 * removing a forced MAC altogether [in which case, like for vlans,
2698 * we should be able to re-trace previous configuration.
2700 if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
2701 return ECORE_SUCCESS;
2703 /* First remove entries and then add new ones */
2704 if (p_params->opcode == ECORE_FILTER_REMOVE) {
2705 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
2706 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
2707 p_params->mac, ETH_ALEN)) {
2708 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
2714 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
2715 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2716 "MAC isn't configured\n");
2719 } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
2720 p_params->opcode == ECORE_FILTER_FLUSH) {
2721 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
2722 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
2725 /* List the new MAC address */
2726 if (p_params->opcode != ECORE_FILTER_ADD &&
2727 p_params->opcode != ECORE_FILTER_REPLACE)
2728 return ECORE_SUCCESS;
2730 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
2731 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
2732 empty_mac, ETH_ALEN)) {
2733 OSAL_MEMCPY(p_vf->shadow_config.macs[i],
2734 p_params->mac, ETH_ALEN);
2735 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2736 "Added MAC at %d entry in shadow\n", i);
2741 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
2742 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2743 "No available place for MAC\n");
2747 return ECORE_SUCCESS;
2750 static enum _ecore_status_t
2751 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
2752 struct ecore_vf_info *p_vf,
2753 struct ecore_filter_ucast *p_params)
2755 enum _ecore_status_t rc = ECORE_SUCCESS;
2757 if (p_params->type == ECORE_FILTER_MAC) {
2758 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
2759 if (rc != ECORE_SUCCESS)
2763 if (p_params->type == ECORE_FILTER_VLAN)
2764 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
2769 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
2770 struct ecore_ptt *p_ptt,
2771 struct ecore_vf_info *vf)
2773 struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
2774 struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2775 struct vfpf_ucast_filter_tlv *req;
2776 u8 status = PFVF_STATUS_SUCCESS;
2777 struct ecore_filter_ucast params;
2778 enum _ecore_status_t rc;
2780 /* Prepare the unicast filter params */
2781 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast));
2782 req = &mbx->req_virt->ucast_filter;
2783 params.opcode = (enum ecore_filter_opcode)req->opcode;
2784 params.type = (enum ecore_filter_ucast_type)req->type;
2786 /* @@@TBD - We might need logic on HV side in determining this */
2787 params.is_rx_filter = 1;
2788 params.is_tx_filter = 1;
2789 params.vport_to_remove_from = vf->vport_id;
2790 params.vport_to_add_to = vf->vport_id;
2791 OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
2792 params.vlan = req->vlan;
2794 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2795 "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
2796 " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
2797 vf->abs_vf_id, params.opcode, params.type,
2798 params.is_rx_filter ? "RX" : "",
2799 params.is_tx_filter ? "TX" : "",
2800 params.vport_to_add_to,
2801 params.mac[0], params.mac[1], params.mac[2],
2802 params.mac[3], params.mac[4], params.mac[5], params.vlan);
2804 if (!vf->vport_instance) {
2805 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2806 "No VPORT instance available for VF[%d],"
2807 " failing ucast MAC configuration\n",
2809 status = PFVF_STATUS_FAILURE;
2813 /* Update shadow copy of the VF configuration. In case shadow indicates
2814 * the action should be blocked return success to VF to imitate the
2815 * firmware behaviour in such case.
2817 if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) !=
2821 /* Determine if the unicast filtering is acceptible by PF */
2822 if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
2823 (params.type == ECORE_FILTER_VLAN ||
2824 params.type == ECORE_FILTER_MAC_VLAN)) {
2825 /* Once VLAN is forced or PVID is set, do not allow
2826 * to add/replace any further VLANs.
2828 if (params.opcode == ECORE_FILTER_ADD ||
2829 params.opcode == ECORE_FILTER_REPLACE)
2830 status = PFVF_STATUS_FORCED;
2834 if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
2835 (params.type == ECORE_FILTER_MAC ||
2836 params.type == ECORE_FILTER_MAC_VLAN)) {
2837 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
2838 (params.opcode != ECORE_FILTER_ADD &&
2839 params.opcode != ECORE_FILTER_REPLACE))
2840 status = PFVF_STATUS_FORCED;
2844 rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms);
2845 if (rc == ECORE_EXISTS) {
2847 } else if (rc == ECORE_INVAL) {
2848 status = PFVF_STATUS_FAILURE;
2852 rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms,
2853 ECORE_SPQ_MODE_CB, OSAL_NULL);
2855 status = PFVF_STATUS_FAILURE;
2858 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
2859 sizeof(struct pfvf_def_resp_tlv), status);
2862 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
2863 struct ecore_ptt *p_ptt,
2864 struct ecore_vf_info *vf)
2869 for (i = 0; i < vf->num_sbs; i++)
2870 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
2872 vf->opaque_fid, false);
2874 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
2875 sizeof(struct pfvf_def_resp_tlv),
2876 PFVF_STATUS_SUCCESS);
2879 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
2880 struct ecore_ptt *p_ptt,
2881 struct ecore_vf_info *vf)
2883 u16 length = sizeof(struct pfvf_def_resp_tlv);
2884 u8 status = PFVF_STATUS_SUCCESS;
2886 /* Disable Interrupts for VF */
2887 ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
2889 /* Reset Permission table */
2890 ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
2892 ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
2896 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
2897 struct ecore_ptt *p_ptt,
2898 struct ecore_vf_info *p_vf)
2900 u16 length = sizeof(struct pfvf_def_resp_tlv);
2901 u8 status = PFVF_STATUS_SUCCESS;
2902 enum _ecore_status_t rc = ECORE_SUCCESS;
2904 ecore_iov_vf_cleanup(p_hwfn, p_vf);
2906 if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
2907 /* Stopping the VF */
2908 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
2911 if (rc != ECORE_SUCCESS) {
2912 DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
2914 status = PFVF_STATUS_FAILURE;
2917 p_vf->state = VF_STOPPED;
2920 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
2924 static enum _ecore_status_t
2925 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
2926 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
2931 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
2933 for (cnt = 0; cnt < 50; cnt++) {
2934 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
2939 ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
2943 "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
2944 p_vf->abs_vf_id, val);
2945 return ECORE_TIMEOUT;
2948 return ECORE_SUCCESS;
2951 static enum _ecore_status_t
2952 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
2953 struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
2955 u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
2958 /* Read initial consumers & producers */
2959 for (i = 0; i < MAX_NUM_VOQS; i++) {
2962 cons[i] = ecore_rd(p_hwfn, p_ptt,
2963 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
2965 prod = ecore_rd(p_hwfn, p_ptt,
2966 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
2968 distance[i] = prod - cons[i];
2971 /* Wait for consumers to pass the producers */
2973 for (cnt = 0; cnt < 50; cnt++) {
2974 for (; i < MAX_NUM_VOQS; i++) {
2977 tmp = ecore_rd(p_hwfn, p_ptt,
2978 PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
2980 if (distance[i] > tmp - cons[i])
2984 if (i == MAX_NUM_VOQS)
2991 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
2992 p_vf->abs_vf_id, i);
2993 return ECORE_TIMEOUT;
2996 return ECORE_SUCCESS;
2999 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
3000 struct ecore_vf_info *p_vf,
3001 struct ecore_ptt *p_ptt)
3003 enum _ecore_status_t rc;
3005 /* TODO - add SRC and TM polling once we add storage IOV */
3007 rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3011 rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3015 return ECORE_SUCCESS;
3018 static enum _ecore_status_t
3019 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3020 struct ecore_ptt *p_ptt,
3021 u16 rel_vf_id, u32 *ack_vfs)
3023 struct ecore_vf_info *p_vf;
3024 enum _ecore_status_t rc = ECORE_SUCCESS;
3026 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3028 return ECORE_SUCCESS;
3030 if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3031 (1ULL << (rel_vf_id % 64))) {
3032 u16 vfid = p_vf->abs_vf_id;
3034 /* TODO - should we lock channel? */
3036 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3037 "VF[%d] - Handling FLR\n", vfid);
3039 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3041 /* If VF isn't active, no need for anything but SW */
3045 /* TODO - what to do in case of failure? */
3046 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3047 if (rc != ECORE_SUCCESS)
3050 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
3052 /* TODO - what's now? What a mess.... */
3053 DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3057 /* VF_STOPPED has to be set only after final cleanup
3058 * but prior to re-enabling the VF.
3060 p_vf->state = VF_STOPPED;
3062 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3064 /* TODO - again, a mess... */
3065 DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3070 /* Mark VF for ack and clean pending state */
3071 if (p_vf->state == VF_RESET)
3072 p_vf->state = VF_STOPPED;
3073 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
3074 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3075 ~(1ULL << (rel_vf_id % 64));
3076 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
3077 ~(1ULL << (rel_vf_id % 64));
3083 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3084 struct ecore_ptt *p_ptt)
3086 u32 ack_vfs[VF_MAX_STATIC / 32];
3087 enum _ecore_status_t rc = ECORE_SUCCESS;
3090 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3092 /* Since BRB <-> PRS interface can't be tested as part of the flr
3093 * polling due to HW limitations, simply sleep a bit. And since
3094 * there's no need to wait per-vf, do it before looping.
3098 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
3099 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3101 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3105 enum _ecore_status_t
3106 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3107 struct ecore_ptt *p_ptt, u16 rel_vf_id)
3109 u32 ack_vfs[VF_MAX_STATIC / 32];
3110 enum _ecore_status_t rc = ECORE_SUCCESS;
3112 OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3114 /* Wait instead of polling the BRB <-> PRS interface */
3117 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
3119 rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3123 int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
3127 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
3128 for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3129 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3130 "[%08x,...,%08x]: %08x\n",
3131 i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3133 if (!p_hwfn->p_dev->p_iov_info) {
3134 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
3139 for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
3140 struct ecore_vf_info *p_vf;
3143 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
3147 vfid = p_vf->abs_vf_id;
3148 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3149 u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3150 u16 rel_vf_id = p_vf->relative_vf_id;
3152 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3153 "VF[%d] [rel %d] got FLR-ed\n",
3156 p_vf->state = VF_RESET;
3158 /* No need to lock here, since pending_flr should
3159 * only change here and before ACKing MFw. Since
3160 * MFW will not trigger an additional attention for
3161 * VF flr until ACKs, we're safe.
3163 p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3171 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
3173 struct ecore_mcp_link_params *p_params,
3174 struct ecore_mcp_link_state *p_link,
3175 struct ecore_mcp_link_capabilities *p_caps)
3177 struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
3178 struct ecore_bulletin_content *p_bulletin;
3183 p_bulletin = p_vf->bulletin.p_virt;
3186 __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3188 __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3190 __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3193 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
3194 struct ecore_ptt *p_ptt, int vfid)
3196 struct ecore_iov_vf_mbx *mbx;
3197 struct ecore_vf_info *p_vf;
3199 p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3203 mbx = &p_vf->vf_mbx;
3205 /* ecore_iov_process_mbx_request */
3208 "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
3210 mbx->first_tlv = mbx->req_virt->first_tlv;
3212 OSAL_IOV_VF_MSG_TYPE(p_hwfn,
3213 p_vf->relative_vf_id,
3214 mbx->first_tlv.tl.type);
3216 /* Lock the per vf op mutex and note the locker's identity.
3217 * The unlock will take place in mbx response.
3219 ecore_iov_lock_vf_pf_channel(p_hwfn,
3220 p_vf, mbx->first_tlv.tl.type);
3222 /* check if tlv type is known */
3223 if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3224 /* switch on the opcode */
3225 switch (mbx->first_tlv.tl.type) {
3226 case CHANNEL_TLV_ACQUIRE:
3227 ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3229 case CHANNEL_TLV_VPORT_START:
3230 ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3232 case CHANNEL_TLV_VPORT_TEARDOWN:
3233 ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3235 case CHANNEL_TLV_START_RXQ:
3236 ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3238 case CHANNEL_TLV_START_TXQ:
3239 ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3241 case CHANNEL_TLV_STOP_RXQS:
3242 ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3244 case CHANNEL_TLV_STOP_TXQS:
3245 ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3247 case CHANNEL_TLV_UPDATE_RXQ:
3248 ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3250 case CHANNEL_TLV_VPORT_UPDATE:
3251 ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3253 case CHANNEL_TLV_UCAST_FILTER:
3254 ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3256 case CHANNEL_TLV_CLOSE:
3257 ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3259 case CHANNEL_TLV_INT_CLEANUP:
3260 ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3262 case CHANNEL_TLV_RELEASE:
3263 ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3267 /* unknown TLV - this may belong to a VF driver from the future
3268 * - a version written after this PF driver was written, which
3269 * supports features unknown as of yet. Too bad since we don't
3270 * support them. Or this may be because someone wrote a crappy
3271 * VF driver and is sending garbage over the channel.
3273 DP_NOTICE(p_hwfn, false,
3274 "VF[%02x]: unknown TLV. type %04x length %04x"
3275 " padding %08x reply address %lu\n",
3277 mbx->first_tlv.tl.type,
3278 mbx->first_tlv.tl.length,
3279 mbx->first_tlv.padding,
3280 (unsigned long)mbx->first_tlv.reply_address);
3282 /* Try replying in case reply address matches the acquisition's
3285 if (p_vf->acquire.first_tlv.reply_address &&
3286 (mbx->first_tlv.reply_address ==
3287 p_vf->acquire.first_tlv.reply_address))
3288 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3289 mbx->first_tlv.tl.type,
3290 sizeof(struct pfvf_def_resp_tlv),
3291 PFVF_STATUS_NOT_SUPPORTED);
3293 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3294 "VF[%02x]: Can't respond to TLV -"
3295 " no valid reply address\n",
3299 ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
3300 mbx->first_tlv.tl.type);
3302 #ifdef CONFIG_ECORE_SW_CHANNEL
3303 mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
3304 mbx->sw_mbx.response_offset = 0;
3308 void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
3310 u64 add_bit = 1ULL << (vfid % 64);
3312 /* TODO - add locking mechanisms [no atomics in ecore, so we can't
3313 * add the lock inside the ecore_pf_iov struct].
3315 p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
3318 void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
3321 u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
3323 /* TODO - Take a lock */
3324 OSAL_MEMCPY(events, p_pending_events,
3325 sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
3326 OSAL_MEMSET(p_pending_events, 0,
3327 sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
3330 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
3332 struct regpair *vf_msg)
3334 u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
3335 struct ecore_vf_info *p_vf;
3337 if (!ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min)) {
3338 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3339 "Got a message from VF [abs 0x%08x] that cannot be"
3342 return ECORE_SUCCESS;
3344 p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
3346 /* List the physical address of the request so that handler
3347 * could later on copy the message from it.
3349 p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
3351 return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
3354 enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
3357 union event_ring_data *data)
3360 case COMMON_EVENT_VF_PF_CHANNEL:
3361 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
3362 &data->vf_pf_channel.msg_addr);
3363 case COMMON_EVENT_VF_FLR:
3364 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3365 "VF-FLR is still not supported\n");
3366 return ECORE_SUCCESS;
3368 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
3374 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3376 return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3377 (1ULL << (rel_vf_id % 64)));
3380 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3382 struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
3388 for (i = rel_vf_id; i < p_iov->total_vfs; i++)
3389 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true))
3396 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
3397 struct ecore_ptt *ptt, int vfid)
3399 struct ecore_dmae_params params;
3400 struct ecore_vf_info *vf_info;
3402 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3406 OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params));
3407 params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
3408 params.src_vfid = vf_info->abs_vf_id;
3410 if (ecore_dmae_host2host(p_hwfn, ptt,
3411 vf_info->vf_mbx.pending_req,
3412 vf_info->vf_mbx.req_phys,
3413 sizeof(union vfpf_tlvs) / 4, ¶ms)) {
3414 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3415 "Failed to copy message from VF 0x%02x\n", vfid);
3420 return ECORE_SUCCESS;
3423 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
3426 struct ecore_vf_info *vf_info;
3429 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3431 DP_NOTICE(p_hwfn->p_dev, true,
3432 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3436 feature = 1 << MAC_ADDR_FORCED;
3437 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3439 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3440 /* Forced MAC will disable MAC_ADDR */
3441 vf_info->bulletin.p_virt->valid_bitmap &=
3442 ~(1 << VFPF_BULLETIN_MAC_ADDR);
3444 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3447 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
3450 struct ecore_vf_info *vf_info;
3453 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3455 DP_NOTICE(p_hwfn->p_dev, true,
3456 "Can not set MAC, invalid vfid [%d]\n", vfid);
3460 if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
3461 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3462 "Can not set MAC, Forced MAC is configured\n");
3466 feature = 1 << VFPF_BULLETIN_MAC_ADDR;
3467 OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
3469 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3471 return ECORE_SUCCESS;
3474 enum _ecore_status_t
3475 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
3476 bool b_untagged_only, int vfid)
3478 struct ecore_vf_info *vf_info;
3481 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3483 DP_NOTICE(p_hwfn->p_dev, true,
3484 "Can not set forced MAC, invalid vfid [%d]\n", vfid);
3488 /* Since this is configurable only during vport-start, don't take it
3489 * if we're past that point.
3491 if (vf_info->state == VF_ENABLED) {
3492 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3493 "Can't support untagged change for vfid[%d] -"
3494 " VF is already active\n",
3499 /* Set configuration; This will later be taken into account during the
3500 * VF initialization.
3502 feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
3503 (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
3504 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3506 vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
3509 return ECORE_SUCCESS;
3512 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
3515 struct ecore_vf_info *vf_info;
3517 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3521 *opaque_fid = vf_info->opaque_fid;
3524 void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid,
3527 struct ecore_vf_info *vf_info;
3529 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3533 *p_vort_id = vf_info->vport_id;
3536 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
3539 struct ecore_vf_info *vf_info;
3542 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3544 DP_NOTICE(p_hwfn->p_dev, true,
3545 "Can not set forced MAC, invalid vfid [%d]\n",
3550 feature = 1 << VLAN_ADDR_FORCED;
3551 vf_info->bulletin.p_virt->pvid = pvid;
3553 vf_info->bulletin.p_virt->valid_bitmap |= feature;
3555 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
3557 ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
3560 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
3562 struct ecore_vf_info *p_vf_info;
3564 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3568 return !!p_vf_info->vport_instance;
3571 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
3573 struct ecore_vf_info *p_vf_info;
3575 p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3579 return p_vf_info->state == VF_STOPPED;
3582 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
3584 struct ecore_vf_info *vf_info;
3586 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3590 return vf_info->spoof_chk;
3593 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
3596 struct ecore_vf_info *vf;
3597 enum _ecore_status_t rc = ECORE_INVAL;
3599 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
3600 DP_NOTICE(p_hwfn, true,
3601 "SR-IOV sanity check failed, can't set spoofchk\n");
3605 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3609 if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
3610 /* After VF VPORT start PF will configure spoof check */
3611 vf->req_spoofchk_val = val;
3616 rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
3622 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
3624 u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
3626 max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
3627 : ECORE_MAX_VF_CHAINS_PER_PF;
3629 return max_chains_per_vf;
3632 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
3634 void **pp_req_virt_addr,
3635 u16 *p_req_virt_size)
3637 struct ecore_vf_info *vf_info =
3638 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3643 if (pp_req_virt_addr)
3644 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
3646 if (p_req_virt_size)
3647 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
3650 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
3652 void **pp_reply_virt_addr,
3653 u16 *p_reply_virt_size)
3655 struct ecore_vf_info *vf_info =
3656 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3661 if (pp_reply_virt_addr)
3662 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
3664 if (p_reply_virt_size)
3665 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
3668 #ifdef CONFIG_ECORE_SW_CHANNEL
3669 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
3672 struct ecore_vf_info *vf_info =
3673 ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3678 return &vf_info->vf_mbx.sw_mbx;
3682 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
3684 return (length >= sizeof(struct vfpf_first_tlv) &&
3685 (length <= sizeof(union vfpf_tlvs)));
3688 u32 ecore_iov_pfvf_msg_length(void)
3690 return sizeof(union pfvf_tlvs);
3693 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3695 struct ecore_vf_info *p_vf;
3697 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3698 if (!p_vf || !p_vf->bulletin.p_virt)
3701 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
3704 return p_vf->bulletin.p_virt->mac;
3707 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
3710 struct ecore_vf_info *p_vf;
3712 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3713 if (!p_vf || !p_vf->bulletin.p_virt)
3716 if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
3719 return p_vf->bulletin.p_virt->pvid;
3722 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
3723 struct ecore_ptt *p_ptt,
3726 struct ecore_vf_info *vf;
3728 enum _ecore_status_t rc;
3730 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3735 rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
3736 if (rc != ECORE_SUCCESS)
3739 return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
3742 enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev,
3745 struct ecore_vf_info *vf;
3749 for_each_hwfn(p_dev, i) {
3750 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
3752 if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
3753 DP_NOTICE(p_hwfn, true,
3754 "SR-IOV sanity check failed,"
3755 " can't set min rate\n");
3760 vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true);
3761 vport_id = vf->vport_id;
3763 return ecore_configure_vport_wfq(p_dev, vport_id, rate);
3766 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
3767 struct ecore_ptt *p_ptt,
3769 struct ecore_eth_stats *p_stats)
3771 struct ecore_vf_info *vf;
3773 vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3777 if (vf->state != VF_ENABLED)
3780 __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
3781 vf->abs_vf_id + 0x10, false);
3783 return ECORE_SUCCESS;
3786 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3788 struct ecore_vf_info *p_vf;
3790 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3794 return p_vf->num_rxqs;
3797 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3799 struct ecore_vf_info *p_vf;
3801 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3805 return p_vf->num_active_rxqs;
3808 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3810 struct ecore_vf_info *p_vf;
3812 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3819 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3821 struct ecore_vf_info *p_vf;
3823 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3827 return p_vf->num_sbs;
3830 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3832 struct ecore_vf_info *p_vf;
3834 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3838 return (p_vf->state == VF_FREE);
3841 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
3844 struct ecore_vf_info *p_vf;
3846 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3850 return (p_vf->state == VF_ACQUIRED);
3853 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3855 struct ecore_vf_info *p_vf;
3857 p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
3861 return (p_vf->state == VF_ENABLED);
3864 int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
3866 struct ecore_wfq_data *vf_vp_wfq;
3867 struct ecore_vf_info *vf_info;
3869 vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3873 vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
3875 if (vf_vp_wfq->configured)
3876 return vf_vp_wfq->min_speed;