db2873e7f650883a7d557e0864fdf54960115149
[deb_dpdk.git] / drivers / net / qede / base / ecore_sriov.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10 #include "ecore.h"
11 #include "reg_addr.h"
12 #include "ecore_sriov.h"
13 #include "ecore_status.h"
14 #include "ecore_hw.h"
15 #include "ecore_hw_defs.h"
16 #include "ecore_int.h"
17 #include "ecore_hsi_eth.h"
18 #include "ecore_l2.h"
19 #include "ecore_vfpf_if.h"
20 #include "ecore_rt_defs.h"
21 #include "ecore_init_ops.h"
22 #include "ecore_gtt_reg_addr.h"
23 #include "ecore_iro.h"
24 #include "ecore_mcp.h"
25 #include "ecore_cxt.h"
26 #include "ecore_vf.h"
27 #include "ecore_init_fw_funcs.h"
28 #include "ecore_sp_commands.h"
29
30 const char *ecore_channel_tlvs_string[] = {
31         "CHANNEL_TLV_NONE",     /* ends tlv sequence */
32         "CHANNEL_TLV_ACQUIRE",
33         "CHANNEL_TLV_VPORT_START",
34         "CHANNEL_TLV_VPORT_UPDATE",
35         "CHANNEL_TLV_VPORT_TEARDOWN",
36         "CHANNEL_TLV_START_RXQ",
37         "CHANNEL_TLV_START_TXQ",
38         "CHANNEL_TLV_STOP_RXQ",
39         "CHANNEL_TLV_STOP_TXQ",
40         "CHANNEL_TLV_UPDATE_RXQ",
41         "CHANNEL_TLV_INT_CLEANUP",
42         "CHANNEL_TLV_CLOSE",
43         "CHANNEL_TLV_RELEASE",
44         "CHANNEL_TLV_LIST_END",
45         "CHANNEL_TLV_UCAST_FILTER",
46         "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE",
47         "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH",
48         "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP",
49         "CHANNEL_TLV_VPORT_UPDATE_MCAST",
50         "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM",
51         "CHANNEL_TLV_VPORT_UPDATE_RSS",
52         "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN",
53         "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA",
54         "CHANNEL_TLV_UPDATE_TUNN_PARAM",
55         "CHANNEL_TLV_COALESCE_UPDATE",
56         "CHANNEL_TLV_MAX"
57 };
58
59 /* IOV ramrods */
60 static enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn,
61                                               struct ecore_vf_info *p_vf)
62 {
63         struct vf_start_ramrod_data *p_ramrod = OSAL_NULL;
64         struct ecore_spq_entry *p_ent = OSAL_NULL;
65         struct ecore_sp_init_data init_data;
66         enum _ecore_status_t rc = ECORE_NOTIMPL;
67         u8 fp_minor;
68
69         /* Get SPQ entry */
70         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
71         init_data.cid = ecore_spq_get_cid(p_hwfn);
72         init_data.opaque_fid = p_vf->opaque_fid;
73         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
74
75         rc = ecore_sp_init_request(p_hwfn, &p_ent,
76                                    COMMON_RAMROD_VF_START,
77                                    PROTOCOLID_COMMON, &init_data);
78         if (rc != ECORE_SUCCESS)
79                 return rc;
80
81         p_ramrod = &p_ent->ramrod.vf_start;
82
83         p_ramrod->vf_id = GET_FIELD(p_vf->concrete_fid, PXP_CONCRETE_FID_VFID);
84         p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(p_vf->opaque_fid);
85
86         switch (p_hwfn->hw_info.personality) {
87         case ECORE_PCI_ETH:
88                 p_ramrod->personality = PERSONALITY_ETH;
89                 break;
90         case ECORE_PCI_ETH_ROCE:
91         case ECORE_PCI_ETH_IWARP:
92                 p_ramrod->personality = PERSONALITY_RDMA_AND_ETH;
93                 break;
94         default:
95                 DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n",
96                           p_hwfn->hw_info.personality);
97                 return ECORE_INVAL;
98         }
99
100         fp_minor = p_vf->acquire.vfdev_info.eth_fp_hsi_minor;
101         if (fp_minor > ETH_HSI_VER_MINOR &&
102             fp_minor != ETH_HSI_VER_NO_PKT_LEN_TUNN) {
103                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
104                            "VF [%d] - Requested fp hsi %02x.%02x which is"
105                            " slightly newer than PF's %02x.%02x; Configuring"
106                            " PFs version\n",
107                            p_vf->abs_vf_id,
108                            ETH_HSI_VER_MAJOR, fp_minor,
109                            ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
110                 fp_minor = ETH_HSI_VER_MINOR;
111         }
112
113         p_ramrod->hsi_fp_ver.major_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MAJOR;
114         p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = fp_minor;
115
116         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
117                    "VF[%d] - Starting using HSI %02x.%02x\n",
118                    p_vf->abs_vf_id, ETH_HSI_VER_MAJOR, fp_minor);
119
120         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
121 }
122
123 static enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn,
124                                              u32 concrete_vfid,
125                                              u16 opaque_vfid)
126 {
127         struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL;
128         struct ecore_spq_entry *p_ent = OSAL_NULL;
129         struct ecore_sp_init_data init_data;
130         enum _ecore_status_t rc = ECORE_NOTIMPL;
131
132         /* Get SPQ entry */
133         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
134         init_data.cid = ecore_spq_get_cid(p_hwfn);
135         init_data.opaque_fid = opaque_vfid;
136         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
137
138         rc = ecore_sp_init_request(p_hwfn, &p_ent,
139                                    COMMON_RAMROD_VF_STOP,
140                                    PROTOCOLID_COMMON, &init_data);
141         if (rc != ECORE_SUCCESS)
142                 return rc;
143
144         p_ramrod = &p_ent->ramrod.vf_stop;
145
146         p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID);
147
148         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
149 }
150
151 bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id,
152                              bool b_enabled_only, bool b_non_malicious)
153 {
154         if (!p_hwfn->pf_iov_info) {
155                 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
156                 return false;
157         }
158
159         if ((rel_vf_id >= p_hwfn->p_dev->p_iov_info->total_vfs) ||
160             (rel_vf_id < 0))
161                 return false;
162
163         if ((!p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_init) &&
164             b_enabled_only)
165                 return false;
166
167         if ((p_hwfn->pf_iov_info->vfs_array[rel_vf_id].b_malicious) &&
168             b_non_malicious)
169                 return false;
170
171         return true;
172 }
173
174 struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn,
175                                             u16 relative_vf_id,
176                                             bool b_enabled_only)
177 {
178         struct ecore_vf_info *vf = OSAL_NULL;
179
180         if (!p_hwfn->pf_iov_info) {
181                 DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n");
182                 return OSAL_NULL;
183         }
184
185         if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id,
186                                     b_enabled_only, false))
187                 vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id];
188         else
189                 DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n",
190                        relative_vf_id);
191
192         return vf;
193 }
194
195 static struct ecore_queue_cid *
196 ecore_iov_get_vf_rx_queue_cid(struct ecore_hwfn *p_hwfn,
197                               struct ecore_vf_info *p_vf,
198                               struct ecore_vf_queue *p_queue)
199 {
200         int i;
201
202         for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
203                 if (p_queue->cids[i].p_cid &&
204                     !p_queue->cids[i].b_is_tx)
205                         return p_queue->cids[i].p_cid;
206         }
207
208         return OSAL_NULL;
209 }
210
211 enum ecore_iov_validate_q_mode {
212         ECORE_IOV_VALIDATE_Q_NA,
213         ECORE_IOV_VALIDATE_Q_ENABLE,
214         ECORE_IOV_VALIDATE_Q_DISABLE,
215 };
216
217 static bool ecore_iov_validate_queue_mode(struct ecore_hwfn *p_hwfn,
218                                           struct ecore_vf_info *p_vf,
219                                           u16 qid,
220                                           enum ecore_iov_validate_q_mode mode,
221                                           bool b_is_tx)
222 {
223         int i;
224
225         if (mode == ECORE_IOV_VALIDATE_Q_NA)
226                 return true;
227
228         for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
229                 struct ecore_vf_queue_cid *p_qcid;
230
231                 p_qcid = &p_vf->vf_queues[qid].cids[i];
232
233                 if (p_qcid->p_cid == OSAL_NULL)
234                         continue;
235
236                 if (p_qcid->b_is_tx != b_is_tx)
237                         continue;
238
239                 /* Found. It's enabled. */
240                 return (mode == ECORE_IOV_VALIDATE_Q_ENABLE);
241         }
242
243         /* In case we haven't found any valid cid, then its disabled */
244         return (mode == ECORE_IOV_VALIDATE_Q_DISABLE);
245 }
246
247 static bool ecore_iov_validate_rxq(struct ecore_hwfn *p_hwfn,
248                                    struct ecore_vf_info *p_vf,
249                                    u16 rx_qid,
250                                    enum ecore_iov_validate_q_mode mode)
251 {
252         if (rx_qid >= p_vf->num_rxqs) {
253                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
254                            "VF[0x%02x] - can't touch Rx queue[%04x];"
255                            " Only 0x%04x are allocated\n",
256                            p_vf->abs_vf_id, rx_qid, p_vf->num_rxqs);
257                 return false;
258         }
259
260         return ecore_iov_validate_queue_mode(p_hwfn, p_vf, rx_qid,
261                                              mode, false);
262 }
263
264 static bool ecore_iov_validate_txq(struct ecore_hwfn *p_hwfn,
265                                    struct ecore_vf_info *p_vf,
266                                    u16 tx_qid,
267                                    enum ecore_iov_validate_q_mode mode)
268 {
269         if (tx_qid >= p_vf->num_txqs) {
270                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
271                            "VF[0x%02x] - can't touch Tx queue[%04x];"
272                            " Only 0x%04x are allocated\n",
273                            p_vf->abs_vf_id, tx_qid, p_vf->num_txqs);
274                 return false;
275         }
276
277         return ecore_iov_validate_queue_mode(p_hwfn, p_vf, tx_qid,
278                                              mode, true);
279 }
280
281 static bool ecore_iov_validate_sb(struct ecore_hwfn *p_hwfn,
282                                   struct ecore_vf_info *p_vf,
283                                   u16 sb_idx)
284 {
285         int i;
286
287         for (i = 0; i < p_vf->num_sbs; i++)
288                 if (p_vf->igu_sbs[i] == sb_idx)
289                         return true;
290
291         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
292                    "VF[0%02x] - tried using sb_idx %04x which doesn't exist as"
293                    " one of its 0x%02x SBs\n",
294                    p_vf->abs_vf_id, sb_idx, p_vf->num_sbs);
295
296         return false;
297 }
298
299 /* Is there at least 1 queue open? */
300 static bool ecore_iov_validate_active_rxq(struct ecore_hwfn *p_hwfn,
301                                           struct ecore_vf_info *p_vf)
302 {
303         u8 i;
304
305         for (i = 0; i < p_vf->num_rxqs; i++)
306                 if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
307                                                   ECORE_IOV_VALIDATE_Q_ENABLE,
308                                                   false))
309                         return true;
310
311         return false;
312 }
313
314 static bool ecore_iov_validate_active_txq(struct ecore_hwfn *p_hwfn,
315                                           struct ecore_vf_info *p_vf)
316 {
317         u8 i;
318
319         for (i = 0; i < p_vf->num_txqs; i++)
320                 if (ecore_iov_validate_queue_mode(p_hwfn, p_vf, i,
321                                                   ECORE_IOV_VALIDATE_Q_ENABLE,
322                                                   true))
323                         return true;
324
325         return false;
326 }
327
328 /* TODO - this is linux crc32; Need a way to ifdef it out for linux */
329 u32 ecore_crc32(u32 crc, u8 *ptr, u32 length)
330 {
331         int i;
332
333         while (length--) {
334                 crc ^= *ptr++;
335                 for (i = 0; i < 8; i++)
336                         crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0);
337         }
338         return crc;
339 }
340
341 enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn,
342                                                 int vfid,
343                                                 struct ecore_ptt *p_ptt)
344 {
345         struct ecore_bulletin_content *p_bulletin;
346         int crc_size = sizeof(p_bulletin->crc);
347         struct ecore_dmae_params params;
348         struct ecore_vf_info *p_vf;
349
350         p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
351         if (!p_vf)
352                 return ECORE_INVAL;
353
354         /* TODO - check VF is in a state where it can accept message */
355         if (!p_vf->vf_bulletin)
356                 return ECORE_INVAL;
357
358         p_bulletin = p_vf->bulletin.p_virt;
359
360         /* Increment bulletin board version and compute crc */
361         p_bulletin->version++;
362         p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size,
363                                       p_vf->bulletin.size - crc_size);
364
365         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
366                    "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n",
367                    p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc);
368
369         /* propagate bulletin board via dmae to vm memory */
370         OSAL_MEMSET(&params, 0, sizeof(params));
371         params.flags = ECORE_DMAE_FLAG_VF_DST;
372         params.dst_vfid = p_vf->abs_vf_id;
373         return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
374                                     p_vf->vf_bulletin, p_vf->bulletin.size / 4,
375                                     &params);
376 }
377
378 static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev)
379 {
380         struct ecore_hw_sriov_info *iov = p_dev->p_iov_info;
381         int pos = iov->pos;
382
383         DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos);
384         OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl);
385
386         OSAL_PCI_READ_CONFIG_WORD(p_dev,
387                                   pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs);
388         OSAL_PCI_READ_CONFIG_WORD(p_dev,
389                                   pos + PCI_SRIOV_INITIAL_VF,
390                                   &iov->initial_vfs);
391
392         OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs);
393         if (iov->num_vfs) {
394                 /* @@@TODO - in future we might want to add an OSAL here to
395                  * allow each OS to decide on its own how to act.
396                  */
397                 DP_VERBOSE(p_dev, ECORE_MSG_IOV,
398                            "Number of VFs are already set to non-zero value."
399                            " Ignoring PCI configuration value\n");
400                 iov->num_vfs = 0;
401         }
402
403         OSAL_PCI_READ_CONFIG_WORD(p_dev,
404                                   pos + PCI_SRIOV_VF_OFFSET, &iov->offset);
405
406         OSAL_PCI_READ_CONFIG_WORD(p_dev,
407                                   pos + PCI_SRIOV_VF_STRIDE, &iov->stride);
408
409         OSAL_PCI_READ_CONFIG_WORD(p_dev,
410                                   pos + PCI_SRIOV_VF_DID, &iov->vf_device_id);
411
412         OSAL_PCI_READ_CONFIG_DWORD(p_dev,
413                                    pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz);
414
415         OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap);
416
417         OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link);
418
419         DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info: nres %d, cap 0x%x,"
420                    "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d,"
421                    " stride %d, page size 0x%x\n",
422                    iov->nres, iov->cap, iov->ctrl,
423                    iov->total_vfs, iov->initial_vfs, iov->nr_virtfn,
424                    iov->offset, iov->stride, iov->pgsz);
425
426         /* Some sanity checks */
427         if (iov->num_vfs > NUM_OF_VFS(p_dev) ||
428             iov->total_vfs > NUM_OF_VFS(p_dev)) {
429                 /* This can happen only due to a bug. In this case we set
430                  * num_vfs to zero to avoid memory corruption in the code that
431                  * assumes max number of vfs
432                  */
433                 DP_NOTICE(p_dev, false,
434                           "IOV: Unexpected number of vfs set: %d"
435                           " setting num_vf to zero\n",
436                           iov->num_vfs);
437
438                 iov->num_vfs = 0;
439                 iov->total_vfs = 0;
440         }
441
442         return ECORE_SUCCESS;
443 }
444
445 static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn,
446                                           struct ecore_ptt *p_ptt)
447 {
448         struct ecore_igu_block *p_sb;
449         u16 sb_id;
450         u32 val;
451
452         if (!p_hwfn->hw_info.p_igu_info) {
453                 DP_ERR(p_hwfn,
454                        "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n");
455                 return;
456         }
457
458         for (sb_id = 0;
459              sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) {
460                 p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id];
461                 if ((p_sb->status & ECORE_IGU_STATUS_FREE) &&
462                     !(p_sb->status & ECORE_IGU_STATUS_PF)) {
463                         val = ecore_rd(p_hwfn, p_ptt,
464                                        IGU_REG_MAPPING_MEMORY + sb_id * 4);
465                         SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
466                         ecore_wr(p_hwfn, p_ptt,
467                                  IGU_REG_MAPPING_MEMORY + 4 * sb_id, val);
468                 }
469         }
470 }
471
472 static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn)
473 {
474         struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
475         struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
476         struct ecore_bulletin_content *p_bulletin_virt;
477         dma_addr_t req_p, rply_p, bulletin_p;
478         union pfvf_tlvs *p_reply_virt_addr;
479         union vfpf_tlvs *p_req_virt_addr;
480         u8 idx = 0;
481
482         OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array));
483
484         p_req_virt_addr = p_iov_info->mbx_msg_virt_addr;
485         req_p = p_iov_info->mbx_msg_phys_addr;
486         p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr;
487         rply_p = p_iov_info->mbx_reply_phys_addr;
488         p_bulletin_virt = p_iov_info->p_bulletins;
489         bulletin_p = p_iov_info->bulletins_phys;
490         if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) {
491                 DP_ERR(p_hwfn,
492                        "ecore_iov_setup_vfdb called without alloc mem first\n");
493                 return;
494         }
495
496         for (idx = 0; idx < p_iov->total_vfs; idx++) {
497                 struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx];
498                 u32 concrete;
499
500                 vf->vf_mbx.req_virt = p_req_virt_addr + idx;
501                 vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs);
502                 vf->vf_mbx.reply_virt = p_reply_virt_addr + idx;
503                 vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs);
504
505 #ifdef CONFIG_ECORE_SW_CHANNEL
506                 vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs);
507                 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
508 #endif
509                 vf->state = VF_STOPPED;
510                 vf->b_init = false;
511
512                 vf->bulletin.phys = idx *
513                     sizeof(struct ecore_bulletin_content) + bulletin_p;
514                 vf->bulletin.p_virt = p_bulletin_virt + idx;
515                 vf->bulletin.size = sizeof(struct ecore_bulletin_content);
516
517                 vf->relative_vf_id = idx;
518                 vf->abs_vf_id = idx + p_iov->first_vf_in_pf;
519                 concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id);
520                 vf->concrete_fid = concrete;
521                 /* TODO - need to devise a better way of getting opaque */
522                 vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) |
523                     (vf->abs_vf_id << 8);
524
525                 vf->num_mac_filters = ECORE_ETH_VF_NUM_MAC_FILTERS;
526                 vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS;
527         }
528 }
529
530 static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn)
531 {
532         struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
533         void **p_v_addr;
534         u16 num_vfs = 0;
535
536         num_vfs = p_hwfn->p_dev->p_iov_info->total_vfs;
537
538         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
539                    "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs);
540
541         /* Allocate PF Mailbox buffer (per-VF) */
542         p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs;
543         p_v_addr = &p_iov_info->mbx_msg_virt_addr;
544         *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
545                                             &p_iov_info->mbx_msg_phys_addr,
546                                             p_iov_info->mbx_msg_size);
547         if (!*p_v_addr)
548                 return ECORE_NOMEM;
549
550         /* Allocate PF Mailbox Reply buffer (per-VF) */
551         p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs;
552         p_v_addr = &p_iov_info->mbx_reply_virt_addr;
553         *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
554                                             &p_iov_info->mbx_reply_phys_addr,
555                                             p_iov_info->mbx_reply_size);
556         if (!*p_v_addr)
557                 return ECORE_NOMEM;
558
559         p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) *
560             num_vfs;
561         p_v_addr = &p_iov_info->p_bulletins;
562         *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev,
563                                             &p_iov_info->bulletins_phys,
564                                             p_iov_info->bulletins_size);
565         if (!*p_v_addr)
566                 return ECORE_NOMEM;
567
568         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
569                    "PF's Requests mailbox [%p virt 0x%lx phys],  "
570                    "Response mailbox [%p virt 0x%lx phys] Bulletinsi"
571                    " [%p virt 0x%lx phys]\n",
572                    p_iov_info->mbx_msg_virt_addr,
573                    (unsigned long)p_iov_info->mbx_msg_phys_addr,
574                    p_iov_info->mbx_reply_virt_addr,
575                    (unsigned long)p_iov_info->mbx_reply_phys_addr,
576                    p_iov_info->p_bulletins,
577                    (unsigned long)p_iov_info->bulletins_phys);
578
579         return ECORE_SUCCESS;
580 }
581
582 static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn)
583 {
584         struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info;
585
586         if (p_hwfn->pf_iov_info->mbx_msg_virt_addr)
587                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
588                                        p_iov_info->mbx_msg_virt_addr,
589                                        p_iov_info->mbx_msg_phys_addr,
590                                        p_iov_info->mbx_msg_size);
591
592         if (p_hwfn->pf_iov_info->mbx_reply_virt_addr)
593                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
594                                        p_iov_info->mbx_reply_virt_addr,
595                                        p_iov_info->mbx_reply_phys_addr,
596                                        p_iov_info->mbx_reply_size);
597
598         if (p_iov_info->p_bulletins)
599                 OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev,
600                                        p_iov_info->p_bulletins,
601                                        p_iov_info->bulletins_phys,
602                                        p_iov_info->bulletins_size);
603 }
604
605 enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn)
606 {
607         struct ecore_pf_iov *p_sriov;
608
609         if (!IS_PF_SRIOV(p_hwfn)) {
610                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
611                            "No SR-IOV - no need for IOV db\n");
612                 return ECORE_SUCCESS;
613         }
614
615         p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov));
616         if (!p_sriov) {
617                 DP_NOTICE(p_hwfn, true,
618                           "Failed to allocate `struct ecore_sriov'\n");
619                 return ECORE_NOMEM;
620         }
621
622         p_hwfn->pf_iov_info = p_sriov;
623
624         return ecore_iov_allocate_vfdb(p_hwfn);
625 }
626
627 void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt)
628 {
629         if (!IS_PF_SRIOV(p_hwfn) || !IS_PF_SRIOV_ALLOC(p_hwfn))
630                 return;
631
632         ecore_iov_setup_vfdb(p_hwfn);
633         ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt);
634 }
635
636 void ecore_iov_free(struct ecore_hwfn *p_hwfn)
637 {
638         if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
639                 ecore_iov_free_vfdb(p_hwfn);
640                 OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info);
641         }
642 }
643
644 void ecore_iov_free_hw_info(struct ecore_dev *p_dev)
645 {
646         OSAL_FREE(p_dev, p_dev->p_iov_info);
647 }
648
649 enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn)
650 {
651         struct ecore_dev *p_dev = p_hwfn->p_dev;
652         int pos;
653         enum _ecore_status_t rc;
654
655         if (IS_VF(p_hwfn->p_dev))
656                 return ECORE_SUCCESS;
657
658         /* Learn the PCI configuration */
659         pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev,
660                                            PCI_EXT_CAP_ID_SRIOV);
661         if (!pos) {
662                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n");
663                 return ECORE_SUCCESS;
664         }
665
666         /* Allocate a new struct for IOV information */
667         /* TODO - can change to VALLOC when its available */
668         p_dev->p_iov_info = OSAL_ZALLOC(p_dev, GFP_KERNEL,
669                                         sizeof(*p_dev->p_iov_info));
670         if (!p_dev->p_iov_info) {
671                 DP_NOTICE(p_hwfn, true,
672                           "Can't support IOV due to lack of memory\n");
673                 return ECORE_NOMEM;
674         }
675         p_dev->p_iov_info->pos = pos;
676
677         rc = ecore_iov_pci_cfg_info(p_dev);
678         if (rc)
679                 return rc;
680
681         /* We want PF IOV to be synonemous with the existence of p_iov_info;
682          * In case the capability is published but there are no VFs, simply
683          * de-allocate the struct.
684          */
685         if (!p_dev->p_iov_info->total_vfs) {
686                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
687                            "IOV capabilities, but no VFs are published\n");
688                 OSAL_FREE(p_dev, p_dev->p_iov_info);
689                 return ECORE_SUCCESS;
690         }
691
692         /* First VF index based on offset is tricky:
693          *  - If ARI is supported [likely], offset - (16 - pf_id) would
694          *    provide the number for eng0. 2nd engine Vfs would begin
695          *    after the first engine's VFs.
696          *  - If !ARI, VFs would start on next device.
697          *    so offset - (256 - pf_id) would provide the number.
698          * Utilize the fact that (256 - pf_id) is achieved only be later
699          * to diffrentiate between the two.
700          */
701
702         if (p_hwfn->p_dev->p_iov_info->offset < (256 - p_hwfn->abs_pf_id)) {
703                 u32 first = p_hwfn->p_dev->p_iov_info->offset +
704                             p_hwfn->abs_pf_id - 16;
705
706                 p_dev->p_iov_info->first_vf_in_pf = first;
707
708                 if (ECORE_PATH_ID(p_hwfn))
709                         p_dev->p_iov_info->first_vf_in_pf -= MAX_NUM_VFS_BB;
710         } else {
711                 u32 first = p_hwfn->p_dev->p_iov_info->offset +
712                             p_hwfn->abs_pf_id - 256;
713
714                 p_dev->p_iov_info->first_vf_in_pf = first;
715         }
716
717         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
718                    "First VF in hwfn 0x%08x\n",
719                    p_dev->p_iov_info->first_vf_in_pf);
720
721         return ECORE_SUCCESS;
722 }
723
724 static bool _ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid,
725                                        bool b_fail_malicious)
726 {
727         /* Check PF supports sriov */
728         if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) ||
729             !IS_PF_SRIOV_ALLOC(p_hwfn))
730                 return false;
731
732         /* Check VF validity */
733         if (!ecore_iov_is_valid_vfid(p_hwfn, vfid, true, b_fail_malicious))
734                 return false;
735
736         return true;
737 }
738
739 bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid)
740 {
741         return _ecore_iov_pf_sanity_check(p_hwfn, vfid, true);
742 }
743
744 void ecore_iov_set_vf_to_disable(struct ecore_dev *p_dev,
745                                  u16 rel_vf_id, u8 to_disable)
746 {
747         struct ecore_vf_info *vf;
748         int i;
749
750         for_each_hwfn(p_dev, i) {
751                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
752
753                 vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
754                 if (!vf)
755                         continue;
756
757                 vf->to_disable = to_disable;
758         }
759 }
760
761 void ecore_iov_set_vfs_to_disable(struct ecore_dev *p_dev,
762                                   u8 to_disable)
763 {
764         u16 i;
765
766         if (!IS_ECORE_SRIOV(p_dev))
767                 return;
768
769         for (i = 0; i < p_dev->p_iov_info->total_vfs; i++)
770                 ecore_iov_set_vf_to_disable(p_dev, i, to_disable);
771 }
772
773 #ifndef LINUX_REMOVE
774 /* @@@TBD Consider taking outside of ecore... */
775 enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn,
776                                           u16               vf_id,
777                                           void              *ctx)
778 {
779         enum _ecore_status_t rc = ECORE_SUCCESS;
780         struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true);
781
782         if (vf != OSAL_NULL) {
783                 vf->ctx = ctx;
784 #ifdef CONFIG_ECORE_SW_CHANNEL
785                 vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST;
786 #endif
787         } else {
788                 rc = ECORE_UNKNOWN_ERROR;
789         }
790         return rc;
791 }
792 #endif
793
794 static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn      *p_hwfn,
795                                          struct ecore_ptt       *p_ptt,
796                                          u8                     abs_vfid)
797 {
798         ecore_wr(p_hwfn, p_ptt,
799                  PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4,
800                  1 << (abs_vfid & 0x1f));
801 }
802
803 static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn,
804                                    struct ecore_ptt *p_ptt,
805                                    struct ecore_vf_info *vf)
806 {
807         int i;
808
809         /* Set VF masks and configuration - pretend */
810         ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
811
812         ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0);
813
814         /* unpretend */
815         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
816
817         /* iterate over all queues, clear sb consumer */
818         for (i = 0; i < vf->num_sbs; i++)
819                 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
820                                                   vf->igu_sbs[i],
821                                                   vf->opaque_fid, true);
822 }
823
824 static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn,
825                                      struct ecore_ptt *p_ptt,
826                                      struct ecore_vf_info *vf, bool enable)
827 {
828         u32 igu_vf_conf;
829
830         ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
831
832         igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION);
833
834         if (enable)
835                 igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN;
836         else
837                 igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN;
838
839         ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf);
840
841         /* unpretend */
842         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
843 }
844
845 static enum _ecore_status_t
846 ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn,
847                            struct ecore_ptt *p_ptt, struct ecore_vf_info *vf)
848 {
849         u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN;
850         enum _ecore_status_t rc;
851
852         if (vf->to_disable)
853                 return ECORE_SUCCESS;
854
855         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
856                    "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id,
857                    ECORE_VF_ABS_ID(p_hwfn, vf));
858
859         ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt,
860                                      ECORE_VF_ABS_ID(p_hwfn, vf));
861
862         ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
863
864         /* It's possible VF was previously considered malicious */
865         vf->b_malicious = false;
866
867         rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt,
868                                       vf->abs_vf_id, vf->num_sbs);
869         if (rc != ECORE_SUCCESS)
870                 return rc;
871
872         ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid);
873
874         SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id);
875         STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf);
876
877         ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id,
878                        p_hwfn->hw_info.hw_mode);
879
880         /* unpretend */
881         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
882
883         vf->state = VF_FREE;
884
885         return rc;
886 }
887
888 /**
889  *
890  * @brief ecore_iov_config_perm_table - configure the permission
891  *      zone table.
892  *      In E4, queue zone permission table size is 320x9. There
893  *      are 320 VF queues for single engine device (256 for dual
894  *      engine device), and each entry has the following format:
895  *      {Valid, VF[7:0]}
896  * @param p_hwfn
897  * @param p_ptt
898  * @param vf
899  * @param enable
900  */
901 static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn,
902                                         struct ecore_ptt *p_ptt,
903                                         struct ecore_vf_info *vf, u8 enable)
904 {
905         u32 reg_addr, val;
906         u16 qzone_id = 0;
907         int qid;
908
909         for (qid = 0; qid < vf->num_rxqs; qid++) {
910                 ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid,
911                                   &qzone_id);
912
913                 reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4;
914                 val = enable ? (vf->abs_vf_id | (1 << 8)) : 0;
915                 ecore_wr(p_hwfn, p_ptt, reg_addr, val);
916         }
917 }
918
919 static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn,
920                                         struct ecore_ptt *p_ptt,
921                                         struct ecore_vf_info *vf)
922 {
923         /* Reset vf in IGU - interrupts are still disabled */
924         ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf);
925
926         ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1);
927
928         /* Permission Table */
929         ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true);
930 }
931
932 static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
933                                      struct ecore_ptt *p_ptt,
934                                      struct ecore_vf_info *vf,
935                                      u16 num_rx_queues)
936 {
937         struct ecore_igu_block *igu_blocks;
938         int qid = 0, igu_id = 0;
939         u32 val = 0;
940
941         igu_blocks = p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks;
942
943         if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks)
944                 num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks;
945
946         p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues;
947
948         SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id);
949         SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1);
950         SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0);
951
952         while ((qid < num_rx_queues) &&
953                (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) {
954                 if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) {
955                         struct cau_sb_entry sb_entry;
956
957                         vf->igu_sbs[qid] = (u16)igu_id;
958                         igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE;
959
960                         SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid);
961
962                         ecore_wr(p_hwfn, p_ptt,
963                                  IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id,
964                                  val);
965
966                         /* Configure igu sb in CAU which were marked valid */
967                         ecore_init_cau_sb_entry(p_hwfn, &sb_entry,
968                                                 p_hwfn->rel_pf_id,
969                                                 vf->abs_vf_id, 1);
970                         ecore_dmae_host2grc(p_hwfn, p_ptt,
971                                             (u64)(osal_uintptr_t)&sb_entry,
972                                             CAU_REG_SB_VAR_MEMORY +
973                                             igu_id * sizeof(u64), 2, 0);
974                         qid++;
975                 }
976                 igu_id++;
977         }
978
979         vf->num_sbs = (u8)num_rx_queues;
980
981         return vf->num_sbs;
982 }
983
984 /**
985  *
986  * @brief The function invalidates all the VF entries,
987  *        technically this isn't required, but added for
988  *        cleaness and ease of debugging incase a VF attempts to
989  *        produce an interrupt after it has been taken down.
990  *
991  * @param p_hwfn
992  * @param p_ptt
993  * @param vf
994  */
995 static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn,
996                                       struct ecore_ptt *p_ptt,
997                                       struct ecore_vf_info *vf)
998 {
999         struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info;
1000         int idx, igu_id;
1001         u32 addr, val;
1002
1003         /* Invalidate igu CAM lines and mark them as free */
1004         for (idx = 0; idx < vf->num_sbs; idx++) {
1005                 igu_id = vf->igu_sbs[idx];
1006                 addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id;
1007
1008                 val = ecore_rd(p_hwfn, p_ptt, addr);
1009                 SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0);
1010                 ecore_wr(p_hwfn, p_ptt, addr, val);
1011
1012                 p_info->igu_map.igu_blocks[igu_id].status |=
1013                     ECORE_IGU_STATUS_FREE;
1014
1015                 p_hwfn->hw_info.p_igu_info->free_blks++;
1016         }
1017
1018         vf->num_sbs = 0;
1019 }
1020
1021 void ecore_iov_set_link(struct ecore_hwfn *p_hwfn,
1022                         u16 vfid,
1023                         struct ecore_mcp_link_params *params,
1024                         struct ecore_mcp_link_state *link,
1025                         struct ecore_mcp_link_capabilities *p_caps)
1026 {
1027         struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
1028         struct ecore_bulletin_content *p_bulletin;
1029
1030         if (!p_vf)
1031                 return;
1032
1033         p_bulletin = p_vf->bulletin.p_virt;
1034         p_bulletin->req_autoneg = params->speed.autoneg;
1035         p_bulletin->req_adv_speed = params->speed.advertised_speeds;
1036         p_bulletin->req_forced_speed = params->speed.forced_speed;
1037         p_bulletin->req_autoneg_pause = params->pause.autoneg;
1038         p_bulletin->req_forced_rx = params->pause.forced_rx;
1039         p_bulletin->req_forced_tx = params->pause.forced_tx;
1040         p_bulletin->req_loopback = params->loopback_mode;
1041
1042         p_bulletin->link_up = link->link_up;
1043         p_bulletin->speed = link->speed;
1044         p_bulletin->full_duplex = link->full_duplex;
1045         p_bulletin->autoneg = link->an;
1046         p_bulletin->autoneg_complete = link->an_complete;
1047         p_bulletin->parallel_detection = link->parallel_detection;
1048         p_bulletin->pfc_enabled = link->pfc_enabled;
1049         p_bulletin->partner_adv_speed = link->partner_adv_speed;
1050         p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en;
1051         p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en;
1052         p_bulletin->partner_adv_pause = link->partner_adv_pause;
1053         p_bulletin->sfp_tx_fault = link->sfp_tx_fault;
1054
1055         p_bulletin->capability_speed = p_caps->speed_capabilities;
1056 }
1057
1058 enum _ecore_status_t
1059 ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn,
1060                          struct ecore_ptt *p_ptt,
1061                          struct ecore_iov_vf_init_params *p_params)
1062 {
1063         struct ecore_mcp_link_capabilities link_caps;
1064         struct ecore_mcp_link_params link_params;
1065         struct ecore_mcp_link_state link_state;
1066         u8 num_of_vf_available_chains  = 0;
1067         struct ecore_vf_info *vf = OSAL_NULL;
1068         u16 qid, num_irqs;
1069         enum _ecore_status_t rc = ECORE_SUCCESS;
1070         u32 cids;
1071         u8 i;
1072
1073         vf = ecore_iov_get_vf_info(p_hwfn, p_params->rel_vf_id, false);
1074         if (!vf) {
1075                 DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n");
1076                 return ECORE_UNKNOWN_ERROR;
1077         }
1078
1079         if (vf->b_init) {
1080                 DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n",
1081                           p_params->rel_vf_id);
1082                 return ECORE_INVAL;
1083         }
1084
1085         /* Perform sanity checking on the requested vport/rss */
1086         if (p_params->vport_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) {
1087                 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use VPORT %02x\n",
1088                           p_params->rel_vf_id, p_params->vport_id);
1089                 return ECORE_INVAL;
1090         }
1091
1092         if ((p_params->num_queues > 1) &&
1093             (p_params->rss_eng_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG))) {
1094                 DP_NOTICE(p_hwfn, true, "VF[%d] - can't use RSS_ENG %02x\n",
1095                           p_params->rel_vf_id, p_params->rss_eng_id);
1096                 return ECORE_INVAL;
1097         }
1098
1099         /* TODO - remove this once we get confidence of change */
1100         if (!p_params->vport_id) {
1101                 DP_NOTICE(p_hwfn, false,
1102                           "VF[%d] - Unlikely that VF uses vport0. Forgotten?\n",
1103                           p_params->rel_vf_id);
1104         }
1105         if ((!p_params->rss_eng_id) && (p_params->num_queues > 1)) {
1106                 DP_NOTICE(p_hwfn, false,
1107                           "VF[%d] - Unlikely that VF uses RSS_eng0. Forgotten?\n",
1108                           p_params->rel_vf_id);
1109         }
1110         vf->vport_id = p_params->vport_id;
1111         vf->rss_eng_id = p_params->rss_eng_id;
1112
1113         /* Perform sanity checking on the requested queue_id */
1114         for (i = 0; i < p_params->num_queues; i++) {
1115                 u16 min_vf_qzone = (u16)FEAT_NUM(p_hwfn, ECORE_PF_L2_QUE);
1116                 u16 max_vf_qzone = min_vf_qzone +
1117                                    FEAT_NUM(p_hwfn, ECORE_VF_L2_QUE) - 1;
1118
1119                 qid = p_params->req_rx_queue[i];
1120                 if (qid < min_vf_qzone || qid > max_vf_qzone) {
1121                         DP_NOTICE(p_hwfn, true,
1122                                   "Can't enable Rx qid [%04x] for VF[%d]: qids [0x%04x,...,0x%04x] available\n",
1123                                   qid, p_params->rel_vf_id,
1124                                   min_vf_qzone, max_vf_qzone);
1125                         return ECORE_INVAL;
1126                 }
1127
1128                 qid = p_params->req_tx_queue[i];
1129                 if (qid > max_vf_qzone) {
1130                         DP_NOTICE(p_hwfn, true,
1131                                   "Can't enable Tx qid [%04x] for VF[%d]: max qid 0x%04x\n",
1132                                   qid, p_params->rel_vf_id, max_vf_qzone);
1133                         return ECORE_INVAL;
1134                 }
1135
1136                 /* If client *really* wants, Tx qid can be shared with PF */
1137                 if (qid < min_vf_qzone)
1138                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1139                                    "VF[%d] is using PF qid [0x%04x] for Txq[0x%02x]\n",
1140                                    p_params->rel_vf_id, qid, i);
1141         }
1142
1143         /* Limit number of queues according to number of CIDs */
1144         ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids);
1145         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1146                    "VF[%d] - requesting to initialize for 0x%04x queues"
1147                    " [0x%04x CIDs available]\n",
1148                    vf->relative_vf_id, p_params->num_queues, (u16)cids);
1149         num_irqs = OSAL_MIN_T(u16, p_params->num_queues, ((u16)cids));
1150
1151         num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn,
1152                                                                p_ptt,
1153                                                                vf,
1154                                                                num_irqs);
1155         if (num_of_vf_available_chains == 0) {
1156                 DP_ERR(p_hwfn, "no available igu sbs\n");
1157                 return ECORE_NOMEM;
1158         }
1159
1160         /* Choose queue number and index ranges */
1161         vf->num_rxqs = num_of_vf_available_chains;
1162         vf->num_txqs = num_of_vf_available_chains;
1163
1164         for (i = 0; i < vf->num_rxqs; i++) {
1165                 struct ecore_vf_queue *p_queue = &vf->vf_queues[i];
1166
1167                 p_queue->fw_rx_qid = p_params->req_rx_queue[i];
1168                 p_queue->fw_tx_qid = p_params->req_tx_queue[i];
1169
1170                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1171                            "VF[%d] - Q[%d] SB %04x, qid [Rx %04x Tx %04x]\n",
1172                            vf->relative_vf_id, i, vf->igu_sbs[i],
1173                            p_queue->fw_rx_qid, p_queue->fw_tx_qid);
1174         }
1175
1176         /* Update the link configuration in bulletin.
1177          */
1178         OSAL_MEMCPY(&link_params, ecore_mcp_get_link_params(p_hwfn),
1179                     sizeof(link_params));
1180         OSAL_MEMCPY(&link_state, ecore_mcp_get_link_state(p_hwfn),
1181                     sizeof(link_state));
1182         OSAL_MEMCPY(&link_caps, ecore_mcp_get_link_capabilities(p_hwfn),
1183                     sizeof(link_caps));
1184         ecore_iov_set_link(p_hwfn, p_params->rel_vf_id,
1185                            &link_params, &link_state, &link_caps);
1186
1187         rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf);
1188
1189         if (rc == ECORE_SUCCESS) {
1190                 vf->b_init = true;
1191                 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] |=
1192                         (1ULL << (vf->relative_vf_id % 64));
1193
1194                 if (IS_LEAD_HWFN(p_hwfn))
1195                         p_hwfn->p_dev->p_iov_info->num_vfs++;
1196         }
1197
1198         return rc;
1199 }
1200
1201 enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn,
1202                                                  struct ecore_ptt *p_ptt,
1203                                                  u16 rel_vf_id)
1204 {
1205         struct ecore_mcp_link_capabilities caps;
1206         struct ecore_mcp_link_params params;
1207         struct ecore_mcp_link_state link;
1208         struct ecore_vf_info *vf = OSAL_NULL;
1209
1210         vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
1211         if (!vf) {
1212                 DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n");
1213                 return ECORE_UNKNOWN_ERROR;
1214         }
1215
1216         if (vf->bulletin.p_virt)
1217                 OSAL_MEMSET(vf->bulletin.p_virt, 0,
1218                             sizeof(*vf->bulletin.p_virt));
1219
1220         OSAL_MEMSET(&vf->p_vf_info, 0, sizeof(vf->p_vf_info));
1221
1222         /* Get the link configuration back in bulletin so
1223          * that when VFs are re-enabled they get the actual
1224          * link configuration.
1225          */
1226         OSAL_MEMCPY(&params, ecore_mcp_get_link_params(p_hwfn), sizeof(params));
1227         OSAL_MEMCPY(&link, ecore_mcp_get_link_state(p_hwfn), sizeof(link));
1228         OSAL_MEMCPY(&caps, ecore_mcp_get_link_capabilities(p_hwfn),
1229                     sizeof(caps));
1230         ecore_iov_set_link(p_hwfn, rel_vf_id, &params, &link, &caps);
1231
1232         /* Forget the VF's acquisition message */
1233         OSAL_MEMSET(&vf->acquire, 0, sizeof(vf->acquire));
1234
1235         /* disablng interrupts and resetting permission table was done during
1236          * vf-close, however, we could get here without going through vf_close
1237          */
1238         /* Disable Interrupts for VF */
1239         ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
1240
1241         /* Reset Permission table */
1242         ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
1243
1244         vf->num_rxqs = 0;
1245         vf->num_txqs = 0;
1246         ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf);
1247
1248         if (vf->b_init) {
1249                 vf->b_init = false;
1250                 p_hwfn->pf_iov_info->active_vfs[vf->relative_vf_id / 64] &=
1251                                         ~(1ULL << (vf->relative_vf_id / 64));
1252
1253                 if (IS_LEAD_HWFN(p_hwfn))
1254                         p_hwfn->p_dev->p_iov_info->num_vfs--;
1255         }
1256
1257         return ECORE_SUCCESS;
1258 }
1259
1260 static bool ecore_iov_tlv_supported(u16 tlvtype)
1261 {
1262         return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX;
1263 }
1264
1265 static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1266                                          struct ecore_vf_info *vf, u16 tlv)
1267 {
1268         /* lock the channel */
1269         /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */
1270
1271         /* record the locking op */
1272         /* vf->op_current = tlv; @@@TBD MichalK */
1273
1274         /* log the lock */
1275         if (ecore_iov_tlv_supported(tlv))
1276                 DP_VERBOSE(p_hwfn,
1277                            ECORE_MSG_IOV,
1278                            "VF[%d]: vf pf channel locked by %s\n",
1279                            vf->abs_vf_id,
1280                            ecore_channel_tlvs_string[tlv]);
1281         else
1282                 DP_VERBOSE(p_hwfn,
1283                            ECORE_MSG_IOV,
1284                            "VF[%d]: vf pf channel locked by %04x\n",
1285                            vf->abs_vf_id, tlv);
1286 }
1287
1288 static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn,
1289                                            struct ecore_vf_info *vf,
1290                                            u16 expected_tlv)
1291 {
1292         /* log the unlock */
1293         if (ecore_iov_tlv_supported(expected_tlv))
1294                 DP_VERBOSE(p_hwfn,
1295                            ECORE_MSG_IOV,
1296                            "VF[%d]: vf pf channel unlocked by %s\n",
1297                            vf->abs_vf_id,
1298                            ecore_channel_tlvs_string[expected_tlv]);
1299         else
1300                 DP_VERBOSE(p_hwfn,
1301                            ECORE_MSG_IOV,
1302                            "VF[%d]: vf pf channel unlocked by %04x\n",
1303                            vf->abs_vf_id, expected_tlv);
1304
1305         /* record the locking op */
1306         /* vf->op_current = CHANNEL_TLV_NONE; */
1307 }
1308
1309 /* place a given tlv on the tlv buffer, continuing current tlv list */
1310 void *ecore_add_tlv(struct ecore_hwfn *p_hwfn,
1311                     u8 **offset, u16 type, u16 length)
1312 {
1313         struct channel_tlv *tl = (struct channel_tlv *)*offset;
1314
1315         tl->type = type;
1316         tl->length = length;
1317
1318         /* Offset should keep pointing to next TLV (the end of the last) */
1319         *offset += length;
1320
1321         /* Return a pointer to the start of the added tlv */
1322         return *offset - length;
1323 }
1324
1325 /* list the types and lengths of the tlvs on the buffer */
1326 void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list)
1327 {
1328         u16 i = 1, total_length = 0;
1329         struct channel_tlv *tlv;
1330
1331         do {
1332                 /* cast current tlv list entry to channel tlv header */
1333                 tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length);
1334
1335                 /* output tlv */
1336                 if (ecore_iov_tlv_supported(tlv->type))
1337                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1338                                    "TLV number %d: type %s, length %d\n",
1339                                    i, ecore_channel_tlvs_string[tlv->type],
1340                                    tlv->length);
1341                 else
1342                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1343                                    "TLV number %d: type %d, length %d\n",
1344                                    i, tlv->type, tlv->length);
1345
1346                 if (tlv->type == CHANNEL_TLV_LIST_END)
1347                         return;
1348
1349                 /* Validate entry - protect against malicious VFs */
1350                 if (!tlv->length) {
1351                         DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n");
1352                         return;
1353                 }
1354                 total_length += tlv->length;
1355                 if (total_length >= sizeof(struct tlv_buffer_size)) {
1356                         DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n");
1357                         return;
1358                 }
1359
1360                 i++;
1361         } while (1);
1362 }
1363
1364 static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn,
1365                                     struct ecore_ptt *p_ptt,
1366                                     struct ecore_vf_info *p_vf,
1367                                     u16 length, u8 status)
1368 {
1369         struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
1370         struct ecore_dmae_params params;
1371         u8 eng_vf_id;
1372
1373         mbx->reply_virt->default_resp.hdr.status = status;
1374
1375         ecore_dp_tlv_list(p_hwfn, mbx->reply_virt);
1376
1377 #ifdef CONFIG_ECORE_SW_CHANNEL
1378         mbx->sw_mbx.response_size =
1379             length + sizeof(struct channel_list_end_tlv);
1380
1381         if (!p_hwfn->p_dev->b_hw_channel)
1382                 return;
1383 #endif
1384
1385         eng_vf_id = p_vf->abs_vf_id;
1386
1387         OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
1388         params.flags = ECORE_DMAE_FLAG_VF_DST;
1389         params.dst_vfid = eng_vf_id;
1390
1391         ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
1392                              mbx->req_virt->first_tlv.reply_address +
1393                              sizeof(u64),
1394                              (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4,
1395                              &params);
1396
1397         ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys,
1398                              mbx->req_virt->first_tlv.reply_address,
1399                              sizeof(u64) / 4, &params);
1400
1401         REG_WR(p_hwfn,
1402                GTT_BAR0_MAP_REG_USDM_RAM +
1403                USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1);
1404 }
1405
1406 static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn,
1407                                   enum ecore_iov_vport_update_flag flag)
1408 {
1409         switch (flag) {
1410         case ECORE_IOV_VP_UPDATE_ACTIVATE:
1411                 return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
1412         case ECORE_IOV_VP_UPDATE_VLAN_STRIP:
1413                 return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
1414         case ECORE_IOV_VP_UPDATE_TX_SWITCH:
1415                 return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
1416         case ECORE_IOV_VP_UPDATE_MCAST:
1417                 return CHANNEL_TLV_VPORT_UPDATE_MCAST;
1418         case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM:
1419                 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
1420         case ECORE_IOV_VP_UPDATE_RSS:
1421                 return CHANNEL_TLV_VPORT_UPDATE_RSS;
1422         case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN:
1423                 return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
1424         case ECORE_IOV_VP_UPDATE_SGE_TPA:
1425                 return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
1426         default:
1427                 return 0;
1428         }
1429 }
1430
1431 static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn,
1432                                               struct ecore_vf_info *p_vf,
1433                                               struct ecore_iov_vf_mbx *p_mbx,
1434                                               u8 status, u16 tlvs_mask,
1435                                               u16 tlvs_accepted)
1436 {
1437         struct pfvf_def_resp_tlv *resp;
1438         u16 size, total_len, i;
1439
1440         OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs));
1441         p_mbx->offset = (u8 *)p_mbx->reply_virt;
1442         size = sizeof(struct pfvf_def_resp_tlv);
1443         total_len = size;
1444
1445         ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size);
1446
1447         /* Prepare response for all extended tlvs if they are found by PF */
1448         for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) {
1449                 if (!(tlvs_mask & (1 << i)))
1450                         continue;
1451
1452                 resp = ecore_add_tlv(p_hwfn, &p_mbx->offset,
1453                                      ecore_iov_vport_to_tlv(p_hwfn, i), size);
1454
1455                 if (tlvs_accepted & (1 << i))
1456                         resp->hdr.status = status;
1457                 else
1458                         resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED;
1459
1460                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1461                            "VF[%d] - vport_update resp: TLV %d, status %02x\n",
1462                            p_vf->relative_vf_id,
1463                            ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status);
1464
1465                 total_len += size;
1466         }
1467
1468         ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END,
1469                       sizeof(struct channel_list_end_tlv));
1470
1471         return total_len;
1472 }
1473
1474 static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn,
1475                                    struct ecore_ptt *p_ptt,
1476                                    struct ecore_vf_info *vf_info,
1477                                    u16 type, u16 length, u8 status)
1478 {
1479         struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx;
1480
1481         mbx->offset = (u8 *)mbx->reply_virt;
1482
1483         ecore_add_tlv(p_hwfn, &mbx->offset, type, length);
1484         ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
1485                       sizeof(struct channel_list_end_tlv));
1486
1487         ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status);
1488
1489         OSAL_IOV_PF_RESP_TYPE(p_hwfn, vf_info->relative_vf_id, status);
1490 }
1491
1492 struct ecore_public_vf_info
1493 *ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn,
1494                               u16 relative_vf_id,
1495                               bool b_enabled_only)
1496 {
1497         struct ecore_vf_info *vf = OSAL_NULL;
1498
1499         vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only);
1500         if (!vf)
1501                 return OSAL_NULL;
1502
1503         return &vf->p_vf_info;
1504 }
1505
1506 static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn,
1507                                  struct ecore_vf_info *p_vf)
1508 {
1509         u32 i, j;
1510         p_vf->vf_bulletin = 0;
1511         p_vf->vport_instance = 0;
1512         p_vf->configured_features = 0;
1513
1514         /* If VF previously requested less resources, go back to default */
1515         p_vf->num_rxqs = p_vf->num_sbs;
1516         p_vf->num_txqs = p_vf->num_sbs;
1517
1518         p_vf->num_active_rxqs = 0;
1519
1520         for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1521                 struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1522
1523                 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
1524                         if (!p_queue->cids[j].p_cid)
1525                                 continue;
1526
1527                         ecore_eth_queue_cid_release(p_hwfn,
1528                                                     p_queue->cids[j].p_cid);
1529                         p_queue->cids[j].p_cid = OSAL_NULL;
1530                 }
1531         }
1532
1533         OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config));
1534         OSAL_MEMSET(&p_vf->acquire, 0, sizeof(p_vf->acquire));
1535         OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id);
1536 }
1537
1538 static u8 ecore_iov_vf_mbx_acquire_resc(struct ecore_hwfn *p_hwfn,
1539                                         struct ecore_ptt *p_ptt,
1540                                         struct ecore_vf_info *p_vf,
1541                                         struct vf_pf_resc_request *p_req,
1542                                         struct pf_vf_resc *p_resp)
1543 {
1544         u8 i;
1545
1546         /* Queue related information */
1547         p_resp->num_rxqs = p_vf->num_rxqs;
1548         p_resp->num_txqs = p_vf->num_txqs;
1549         p_resp->num_sbs = p_vf->num_sbs;
1550
1551         for (i = 0; i < p_resp->num_sbs; i++) {
1552                 p_resp->hw_sbs[i].hw_sb_id = p_vf->igu_sbs[i];
1553                 /* TODO - what's this sb_qid field? Is it deprecated?
1554                  * or is there an ecore_client that looks at this?
1555                  */
1556                 p_resp->hw_sbs[i].sb_qid = 0;
1557         }
1558
1559         /* These fields are filled for backward compatibility.
1560          * Unused by modern vfs.
1561          */
1562         for (i = 0; i < p_resp->num_rxqs; i++) {
1563                 ecore_fw_l2_queue(p_hwfn, p_vf->vf_queues[i].fw_rx_qid,
1564                                   (u16 *)&p_resp->hw_qid[i]);
1565                 p_resp->cid[i] = i;
1566         }
1567
1568         /* Filter related information */
1569         p_resp->num_mac_filters = OSAL_MIN_T(u8, p_vf->num_mac_filters,
1570                                              p_req->num_mac_filters);
1571         p_resp->num_vlan_filters = OSAL_MIN_T(u8, p_vf->num_vlan_filters,
1572                                               p_req->num_vlan_filters);
1573
1574         /* This isn't really needed/enforced, but some legacy VFs might depend
1575          * on the correct filling of this field.
1576          */
1577         p_resp->num_mc_filters = ECORE_MAX_MC_ADDRS;
1578
1579         /* Validate sufficient resources for VF */
1580         if (p_resp->num_rxqs < p_req->num_rxqs ||
1581             p_resp->num_txqs < p_req->num_txqs ||
1582             p_resp->num_sbs < p_req->num_sbs ||
1583             p_resp->num_mac_filters < p_req->num_mac_filters ||
1584             p_resp->num_vlan_filters < p_req->num_vlan_filters ||
1585             p_resp->num_mc_filters < p_req->num_mc_filters) {
1586                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1587                            "VF[%d] - Insufficient resources: rxq [%02x/%02x]"
1588                            " txq [%02x/%02x] sbs [%02x/%02x] mac [%02x/%02x]"
1589                            " vlan [%02x/%02x] mc [%02x/%02x]\n",
1590                            p_vf->abs_vf_id,
1591                            p_req->num_rxqs, p_resp->num_rxqs,
1592                            p_req->num_rxqs, p_resp->num_txqs,
1593                            p_req->num_sbs, p_resp->num_sbs,
1594                            p_req->num_mac_filters, p_resp->num_mac_filters,
1595                            p_req->num_vlan_filters, p_resp->num_vlan_filters,
1596                            p_req->num_mc_filters, p_resp->num_mc_filters);
1597
1598                 /* Some legacy OSes are incapable of correctly handling this
1599                  * failure.
1600                  */
1601                 if ((p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
1602                      ETH_HSI_VER_NO_PKT_LEN_TUNN) &&
1603                     (p_vf->acquire.vfdev_info.os_type ==
1604                      VFPF_ACQUIRE_OS_WINDOWS))
1605                         return PFVF_STATUS_SUCCESS;
1606
1607                 return PFVF_STATUS_NO_RESOURCE;
1608         }
1609
1610         return PFVF_STATUS_SUCCESS;
1611 }
1612
1613 static void ecore_iov_vf_mbx_acquire_stats(struct ecore_hwfn *p_hwfn,
1614                                            struct pfvf_stats_info *p_stats)
1615 {
1616         p_stats->mstats.address = PXP_VF_BAR0_START_MSDM_ZONE_B +
1617                                   OFFSETOF(struct mstorm_vf_zone,
1618                                            non_trigger.eth_queue_stat);
1619         p_stats->mstats.len = sizeof(struct eth_mstorm_per_queue_stat);
1620         p_stats->ustats.address = PXP_VF_BAR0_START_USDM_ZONE_B +
1621                                   OFFSETOF(struct ustorm_vf_zone,
1622                                            non_trigger.eth_queue_stat);
1623         p_stats->ustats.len = sizeof(struct eth_ustorm_per_queue_stat);
1624         p_stats->pstats.address = PXP_VF_BAR0_START_PSDM_ZONE_B +
1625                                   OFFSETOF(struct pstorm_vf_zone,
1626                                            non_trigger.eth_queue_stat);
1627         p_stats->pstats.len = sizeof(struct eth_pstorm_per_queue_stat);
1628         p_stats->tstats.address = 0;
1629         p_stats->tstats.len = 0;
1630 }
1631
1632 static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn       *p_hwfn,
1633                                      struct ecore_ptt        *p_ptt,
1634                                      struct ecore_vf_info    *vf)
1635 {
1636         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
1637         struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp;
1638         struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
1639         struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire;
1640         u8 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1641         struct pf_vf_resc *resc = &resp->resc;
1642         enum _ecore_status_t rc;
1643
1644         OSAL_MEMSET(resp, 0, sizeof(*resp));
1645
1646         /* Write the PF version so that VF would know which version
1647          * is supported - might be later overridden. This guarantees that
1648          * VF could recognize legacy PF based on lack of versions in reply.
1649          */
1650         pfdev_info->major_fp_hsi = ETH_HSI_VER_MAJOR;
1651         pfdev_info->minor_fp_hsi = ETH_HSI_VER_MINOR;
1652
1653         /* TODO - not doing anything is bad since we'll assert, but this isn't
1654          * necessarily the right behavior - perhaps we should have allowed some
1655          * versatility here.
1656          */
1657         if (vf->state != VF_FREE &&
1658             vf->state != VF_STOPPED) {
1659                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1660                            "VF[%d] sent ACQUIRE but is already in state %d - fail request\n",
1661                            vf->abs_vf_id, vf->state);
1662                 goto out;
1663         }
1664
1665         /* Validate FW compatibility */
1666         if (req->vfdev_info.eth_fp_hsi_major != ETH_HSI_VER_MAJOR) {
1667                 if (req->vfdev_info.capabilities &
1668                     VFPF_ACQUIRE_CAP_PRE_FP_HSI) {
1669                         struct vf_pf_vfdev_info *p_vfdev = &req->vfdev_info;
1670
1671                         /* This legacy support would need to be removed once
1672                          * the major has changed.
1673                          */
1674                         OSAL_BUILD_BUG_ON(ETH_HSI_VER_MAJOR != 3);
1675
1676                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1677                                    "VF[%d] is pre-fastpath HSI\n",
1678                                    vf->abs_vf_id);
1679                         p_vfdev->eth_fp_hsi_major = ETH_HSI_VER_MAJOR;
1680                         p_vfdev->eth_fp_hsi_minor = ETH_HSI_VER_NO_PKT_LEN_TUNN;
1681                 } else {
1682                         DP_INFO(p_hwfn,
1683                                 "VF[%d] needs fastpath HSI %02x.%02x, which is"
1684                                 " incompatible with loaded FW's faspath"
1685                                 " HSI %02x.%02x\n",
1686                                 vf->abs_vf_id,
1687                                 req->vfdev_info.eth_fp_hsi_major,
1688                                 req->vfdev_info.eth_fp_hsi_minor,
1689                                 ETH_HSI_VER_MAJOR, ETH_HSI_VER_MINOR);
1690
1691                         goto out;
1692                 }
1693         }
1694
1695         /* On 100g PFs, prevent old VFs from loading */
1696         if ((p_hwfn->p_dev->num_hwfns > 1) &&
1697             !(req->vfdev_info.capabilities & VFPF_ACQUIRE_CAP_100G)) {
1698                 DP_INFO(p_hwfn,
1699                         "VF[%d] is running an old driver that doesn't support"
1700                         " 100g\n",
1701                         vf->abs_vf_id);
1702                 goto out;
1703         }
1704
1705 #ifndef __EXTRACT__LINUX__
1706         if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) {
1707                 vfpf_status = PFVF_STATUS_NOT_SUPPORTED;
1708                 goto out;
1709         }
1710 #endif
1711
1712         /* Store the acquire message */
1713         OSAL_MEMCPY(&vf->acquire, req, sizeof(vf->acquire));
1714
1715         vf->opaque_fid = req->vfdev_info.opaque_fid;
1716
1717         vf->vf_bulletin = req->bulletin_addr;
1718         vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ?
1719             vf->bulletin.size : req->bulletin_size;
1720
1721         /* fill in pfdev info */
1722         pfdev_info->chip_num = p_hwfn->p_dev->chip_num;
1723         pfdev_info->db_size = 0;        /* @@@ TBD MichalK Vf Doorbells */
1724         pfdev_info->indices_per_sb = PIS_PER_SB;
1725
1726         pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED |
1727                                    PFVF_ACQUIRE_CAP_POST_FW_OVERRIDE;
1728         if (p_hwfn->p_dev->num_hwfns > 1)
1729                 pfdev_info->capabilities |= PFVF_ACQUIRE_CAP_100G;
1730
1731         ecore_iov_vf_mbx_acquire_stats(p_hwfn, &pfdev_info->stats_info);
1732
1733         OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr,
1734                     ETH_ALEN);
1735
1736         pfdev_info->fw_major = FW_MAJOR_VERSION;
1737         pfdev_info->fw_minor = FW_MINOR_VERSION;
1738         pfdev_info->fw_rev = FW_REVISION_VERSION;
1739         pfdev_info->fw_eng = FW_ENGINEERING_VERSION;
1740
1741         /* Incorrect when legacy, but doesn't matter as legacy isn't reading
1742          * this field.
1743          */
1744         pfdev_info->minor_fp_hsi = OSAL_MIN_T(u8, ETH_HSI_VER_MINOR,
1745                                               req->vfdev_info.eth_fp_hsi_minor);
1746         pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE();
1747         ecore_mcp_get_mfw_ver(p_hwfn, p_ptt, &pfdev_info->mfw_ver,
1748                               OSAL_NULL);
1749
1750         pfdev_info->dev_type = p_hwfn->p_dev->type;
1751         pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev;
1752
1753         /* Fill resources available to VF; Make sure there are enough to
1754          * satisfy the VF's request.
1755          */
1756         vfpf_status = ecore_iov_vf_mbx_acquire_resc(p_hwfn, p_ptt, vf,
1757                                                     &req->resc_request, resc);
1758         if (vfpf_status != PFVF_STATUS_SUCCESS)
1759                 goto out;
1760
1761         /* Start the VF in FW */
1762         rc = ecore_sp_vf_start(p_hwfn, vf);
1763         if (rc != ECORE_SUCCESS) {
1764                 DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n",
1765                           vf->abs_vf_id);
1766                 vfpf_status = PFVF_STATUS_FAILURE;
1767                 goto out;
1768         }
1769
1770         /* Fill agreed size of bulletin board in response, and post
1771          * an initial image to the bulletin board.
1772          */
1773         resp->bulletin_size = vf->bulletin.size;
1774         ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt);
1775
1776         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1777                    "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x,"
1778                    " db_size=%d, idx_per_sb=%d, pf_cap=0x%lx\n"
1779                    "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d,"
1780                    " n_vlans-%d\n",
1781                    vf->abs_vf_id, resp->pfdev_info.chip_num,
1782                    resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb,
1783                    (unsigned long)resp->pfdev_info.capabilities, resc->num_rxqs,
1784                    resc->num_txqs, resc->num_sbs, resc->num_mac_filters,
1785                    resc->num_vlan_filters);
1786
1787         vf->state = VF_ACQUIRED;
1788
1789 out:
1790         /* Prepare Response */
1791         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE,
1792                                sizeof(struct pfvf_acquire_resp_tlv),
1793                                vfpf_status);
1794 }
1795
1796 static enum _ecore_status_t
1797 __ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
1798                          struct ecore_vf_info *p_vf, bool val)
1799 {
1800         struct ecore_sp_vport_update_params params;
1801         enum _ecore_status_t rc;
1802
1803         if (val == p_vf->spoof_chk) {
1804                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1805                            "Spoofchk value[%d] is already configured\n", val);
1806                 return ECORE_SUCCESS;
1807         }
1808
1809         OSAL_MEMSET(&params, 0, sizeof(struct ecore_sp_vport_update_params));
1810         params.opaque_fid = p_vf->opaque_fid;
1811         params.vport_id = p_vf->vport_id;
1812         params.update_anti_spoofing_en_flg = 1;
1813         params.anti_spoofing_en = val;
1814
1815         rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
1816                                    OSAL_NULL);
1817         if (rc == ECORE_SUCCESS) {
1818                 p_vf->spoof_chk = val;
1819                 p_vf->req_spoofchk_val = p_vf->spoof_chk;
1820                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1821                            "Spoofchk val[%d] configured\n", val);
1822         } else {
1823                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1824                            "Spoofchk configuration[val:%d] failed for VF[%d]\n",
1825                            val, p_vf->relative_vf_id);
1826         }
1827
1828         return rc;
1829 }
1830
1831 static enum _ecore_status_t
1832 ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn,
1833                                    struct ecore_vf_info *p_vf)
1834 {
1835         struct ecore_filter_ucast filter;
1836         enum _ecore_status_t rc = ECORE_SUCCESS;
1837         int i;
1838
1839         OSAL_MEMSET(&filter, 0, sizeof(filter));
1840         filter.is_rx_filter = 1;
1841         filter.is_tx_filter = 1;
1842         filter.vport_to_add_to = p_vf->vport_id;
1843         filter.opcode = ECORE_FILTER_ADD;
1844
1845         /* Reconfigure vlans */
1846         for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
1847                 if (!p_vf->shadow_config.vlans[i].used)
1848                         continue;
1849
1850                 filter.type = ECORE_FILTER_VLAN;
1851                 filter.vlan = p_vf->shadow_config.vlans[i].vid;
1852                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
1853                            "Reconfiguring VLAN [0x%04x] for VF [%04x]\n",
1854                            filter.vlan, p_vf->relative_vf_id);
1855                 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1856                                                &filter, ECORE_SPQ_MODE_CB,
1857                                                OSAL_NULL);
1858                 if (rc) {
1859                         DP_NOTICE(p_hwfn, true,
1860                                   "Failed to configure VLAN [%04x]"
1861                                   " to VF [%04x]\n",
1862                                   filter.vlan, p_vf->relative_vf_id);
1863                         break;
1864                 }
1865         }
1866
1867         return rc;
1868 }
1869
1870 static enum _ecore_status_t
1871 ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn,
1872                                      struct ecore_vf_info *p_vf, u64 events)
1873 {
1874         enum _ecore_status_t rc = ECORE_SUCCESS;
1875
1876         /*TODO - what about MACs? */
1877
1878         if ((events & (1 << VLAN_ADDR_FORCED)) &&
1879             !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED)))
1880                 rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf);
1881
1882         return rc;
1883 }
1884
1885 static  enum _ecore_status_t
1886 ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn,
1887                                  struct ecore_vf_info *p_vf,
1888                                  u64 events)
1889 {
1890         enum _ecore_status_t rc = ECORE_SUCCESS;
1891         struct ecore_filter_ucast filter;
1892
1893         if (!p_vf->vport_instance)
1894                 return ECORE_INVAL;
1895
1896         if (events & (1 << MAC_ADDR_FORCED)) {
1897                 /* Since there's no way [currently] of removing the MAC,
1898                  * we can always assume this means we need to force it.
1899                  */
1900                 OSAL_MEMSET(&filter, 0, sizeof(filter));
1901                 filter.type = ECORE_FILTER_MAC;
1902                 filter.opcode = ECORE_FILTER_REPLACE;
1903                 filter.is_rx_filter = 1;
1904                 filter.is_tx_filter = 1;
1905                 filter.vport_to_add_to = p_vf->vport_id;
1906                 OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN);
1907
1908                 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1909                                                &filter,
1910                                                ECORE_SPQ_MODE_CB, OSAL_NULL);
1911                 if (rc) {
1912                         DP_NOTICE(p_hwfn, true,
1913                                   "PF failed to configure MAC for VF\n");
1914                         return rc;
1915                 }
1916
1917                 p_vf->configured_features |= 1 << MAC_ADDR_FORCED;
1918         }
1919
1920         if (events & (1 << VLAN_ADDR_FORCED)) {
1921                 struct ecore_sp_vport_update_params vport_update;
1922                 u8 removal;
1923                 int i;
1924
1925                 OSAL_MEMSET(&filter, 0, sizeof(filter));
1926                 filter.type = ECORE_FILTER_VLAN;
1927                 filter.is_rx_filter = 1;
1928                 filter.is_tx_filter = 1;
1929                 filter.vport_to_add_to = p_vf->vport_id;
1930                 filter.vlan = p_vf->bulletin.p_virt->pvid;
1931                 filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE :
1932                     ECORE_FILTER_FLUSH;
1933
1934                 /* Send the ramrod */
1935                 rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid,
1936                                                &filter,
1937                                                ECORE_SPQ_MODE_CB, OSAL_NULL);
1938                 if (rc) {
1939                         DP_NOTICE(p_hwfn, true,
1940                                   "PF failed to configure VLAN for VF\n");
1941                         return rc;
1942                 }
1943
1944                 /* Update the default-vlan & silent vlan stripping */
1945                 OSAL_MEMSET(&vport_update, 0, sizeof(vport_update));
1946                 vport_update.opaque_fid = p_vf->opaque_fid;
1947                 vport_update.vport_id = p_vf->vport_id;
1948                 vport_update.update_default_vlan_enable_flg = 1;
1949                 vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0;
1950                 vport_update.update_default_vlan_flg = 1;
1951                 vport_update.default_vlan = filter.vlan;
1952
1953                 vport_update.update_inner_vlan_removal_flg = 1;
1954                 removal = filter.vlan ?
1955                     1 : p_vf->shadow_config.inner_vlan_removal;
1956                 vport_update.inner_vlan_removal_flg = removal;
1957                 vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0;
1958                 rc = ecore_sp_vport_update(p_hwfn, &vport_update,
1959                                            ECORE_SPQ_MODE_EBLOCK, OSAL_NULL);
1960                 if (rc) {
1961                         DP_NOTICE(p_hwfn, true,
1962                                   "PF failed to configure VF vport for vlan\n");
1963                         return rc;
1964                 }
1965
1966                 /* Update all the Rx queues */
1967                 for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) {
1968                         struct ecore_vf_queue *p_queue = &p_vf->vf_queues[i];
1969                         struct ecore_queue_cid *p_cid = OSAL_NULL;
1970
1971                         /* There can be at most 1 Rx queue on qzone. Find it */
1972                         p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, p_vf,
1973                                                               p_queue);
1974                         if (p_cid == OSAL_NULL)
1975                                 continue;
1976
1977                         rc = ecore_sp_eth_rx_queues_update(p_hwfn,
1978                                                            (void **)&p_cid,
1979                                                    1, 0, 1,
1980                                                    ECORE_SPQ_MODE_EBLOCK,
1981                                                    OSAL_NULL);
1982                         if (rc) {
1983                                 DP_NOTICE(p_hwfn, true,
1984                                           "Failed to send Rx update"
1985                                           " fo queue[0x%04x]\n",
1986                                           p_cid->rel.queue_id);
1987                                 return rc;
1988                         }
1989                 }
1990
1991                 if (filter.vlan)
1992                         p_vf->configured_features |= 1 << VLAN_ADDR_FORCED;
1993                 else
1994                         p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED);
1995         }
1996
1997         /* If forced features are terminated, we need to configure the shadow
1998          * configuration back again.
1999          */
2000         if (events)
2001                 ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events);
2002
2003         return rc;
2004 }
2005
2006 static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn,
2007                                          struct ecore_ptt *p_ptt,
2008                                          struct ecore_vf_info *vf)
2009 {
2010         struct ecore_sp_vport_start_params params = { 0 };
2011         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2012         struct vfpf_vport_start_tlv *start;
2013         u8 status = PFVF_STATUS_SUCCESS;
2014         struct ecore_vf_info *vf_info;
2015         u64 *p_bitmap;
2016         int sb_id;
2017         enum _ecore_status_t rc;
2018
2019         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true);
2020         if (!vf_info) {
2021                 DP_NOTICE(p_hwfn->p_dev, true,
2022                           "Failed to get VF info, invalid vfid [%d]\n",
2023                           vf->relative_vf_id);
2024                 return;
2025         }
2026
2027         vf->state = VF_ENABLED;
2028         start = &mbx->req_virt->start_vport;
2029
2030         ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf);
2031
2032         /* Initialize Status block in CAU */
2033         for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) {
2034                 if (!start->sb_addr[sb_id]) {
2035                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2036                                    "VF[%d] did not fill the address of SB %d\n",
2037                                    vf->relative_vf_id, sb_id);
2038                         break;
2039                 }
2040
2041                 ecore_int_cau_conf_sb(p_hwfn, p_ptt,
2042                                       start->sb_addr[sb_id],
2043                                       vf->igu_sbs[sb_id],
2044                                       vf->abs_vf_id, 1);
2045         }
2046
2047         vf->mtu = start->mtu;
2048         vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal;
2049
2050         /* Take into consideration configuration forced by hypervisor;
2051          * If none is configured, use the supplied VF values [for old
2052          * vfs that would still be fine, since they passed '0' as padding].
2053          */
2054         p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap;
2055         if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) {
2056                 u8 vf_req = start->only_untagged;
2057
2058                 vf_info->bulletin.p_virt->default_only_untagged = vf_req;
2059                 *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT;
2060         }
2061
2062         params.tpa_mode = start->tpa_mode;
2063         params.remove_inner_vlan = start->inner_vlan_removal;
2064         params.tx_switching = true;
2065
2066 #ifndef ASIC_ONLY
2067         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2068                 DP_NOTICE(p_hwfn, false,
2069                           "FPGA: Don't config VF for Tx-switching [no pVFC]\n");
2070                 params.tx_switching = false;
2071         }
2072 #endif
2073
2074         params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged;
2075         params.drop_ttl0 = false;
2076         params.concrete_fid = vf->concrete_fid;
2077         params.opaque_fid = vf->opaque_fid;
2078         params.vport_id = vf->vport_id;
2079         params.max_buffers_per_cqe = start->max_buffers_per_cqe;
2080         params.mtu = vf->mtu;
2081         params.check_mac = true;
2082
2083         rc = ecore_sp_eth_vport_start(p_hwfn, &params);
2084         if (rc != ECORE_SUCCESS) {
2085                 DP_ERR(p_hwfn,
2086                        "ecore_iov_vf_mbx_start_vport returned error %d\n", rc);
2087                 status = PFVF_STATUS_FAILURE;
2088         } else {
2089                 vf->vport_instance++;
2090
2091                 /* Force configuration if needed on the newly opened vport */
2092                 ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap);
2093                 OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id,
2094                                           vf->vport_id, vf->opaque_fid);
2095                 __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val);
2096         }
2097
2098         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START,
2099                                sizeof(struct pfvf_def_resp_tlv), status);
2100 }
2101
2102 static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn,
2103                                         struct ecore_ptt *p_ptt,
2104                                         struct ecore_vf_info *vf)
2105 {
2106         u8 status = PFVF_STATUS_SUCCESS;
2107         enum _ecore_status_t rc;
2108
2109         vf->vport_instance--;
2110         vf->spoof_chk = false;
2111
2112         if ((ecore_iov_validate_active_rxq(p_hwfn, vf)) ||
2113             (ecore_iov_validate_active_txq(p_hwfn, vf))) {
2114                 vf->b_malicious = true;
2115                 DP_NOTICE(p_hwfn, false,
2116                           "VF [%02x] - considered malicious;"
2117                           " Unable to stop RX/TX queuess\n",
2118                           vf->abs_vf_id);
2119         }
2120
2121         rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id);
2122         if (rc != ECORE_SUCCESS) {
2123                 DP_ERR(p_hwfn,
2124                        "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc);
2125                 status = PFVF_STATUS_FAILURE;
2126         }
2127
2128         /* Forget the configuration on the vport */
2129         vf->configured_features = 0;
2130         OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config));
2131
2132         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN,
2133                                sizeof(struct pfvf_def_resp_tlv), status);
2134 }
2135
2136 static void ecore_iov_vf_mbx_start_rxq_resp(struct ecore_hwfn *p_hwfn,
2137                                             struct ecore_ptt *p_ptt,
2138                                             struct ecore_vf_info *vf,
2139                                             u8 status, bool b_legacy)
2140 {
2141         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2142         struct pfvf_start_queue_resp_tlv *p_tlv;
2143         struct vfpf_start_rxq_tlv *req;
2144         u16 length;
2145
2146         mbx->offset = (u8 *)mbx->reply_virt;
2147
2148         /* Taking a bigger struct instead of adding a TLV to list was a
2149          * mistake, but one which we're now stuck with, as some older
2150          * clients assume the size of the previous response.
2151          */
2152         if (!b_legacy)
2153                 length = sizeof(*p_tlv);
2154         else
2155                 length = sizeof(struct pfvf_def_resp_tlv);
2156
2157         p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_RXQ,
2158                               length);
2159         ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2160                       sizeof(struct channel_list_end_tlv));
2161
2162         /* Update the TLV with the response */
2163         if ((status == PFVF_STATUS_SUCCESS) && !b_legacy) {
2164                 req = &mbx->req_virt->start_rxq;
2165                 p_tlv->offset = PXP_VF_BAR0_START_MSDM_ZONE_B +
2166                                 OFFSETOF(struct mstorm_vf_zone,
2167                                          non_trigger.eth_rx_queue_producers) +
2168                                 sizeof(struct eth_rx_prod_data) * req->rx_qid;
2169         }
2170
2171         ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
2172 }
2173
2174 static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn,
2175                                        struct ecore_ptt *p_ptt,
2176                                        struct ecore_vf_info *vf)
2177 {
2178         struct ecore_queue_start_common_params params;
2179         struct ecore_queue_cid_vf_params vf_params;
2180         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2181         u8 status = PFVF_STATUS_NO_RESOURCE;
2182         struct ecore_vf_queue *p_queue;
2183         struct vfpf_start_rxq_tlv *req;
2184         struct ecore_queue_cid *p_cid;
2185         bool b_legacy_vf = false;
2186         u8 qid_usage_idx;
2187         enum _ecore_status_t rc;
2188
2189         req = &mbx->req_virt->start_rxq;
2190
2191         if (!ecore_iov_validate_rxq(p_hwfn, vf, req->rx_qid,
2192                                     ECORE_IOV_VALIDATE_Q_DISABLE) ||
2193             !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2194                 goto out;
2195
2196         /* Legacy VFs made assumptions on the CID their queues connected to,
2197          * assuming queue X used CID X.
2198          * TODO - need to validate that there was no official release post
2199          * the current legacy scheme that still made that assumption.
2200          */
2201         if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2202             ETH_HSI_VER_NO_PKT_LEN_TUNN)
2203                 b_legacy_vf = true;
2204
2205         /* Acquire a new queue-cid */
2206         p_queue = &vf->vf_queues[req->rx_qid];
2207
2208         OSAL_MEMSET(&params, 0, sizeof(params));
2209         params.queue_id = (u8)p_queue->fw_rx_qid;
2210         params.vport_id = vf->vport_id;
2211         params.stats_id = vf->abs_vf_id + 0x10;
2212         params.sb = req->hw_sb;
2213         params.sb_idx = req->sb_index;
2214
2215         /* TODO - set qid_usage_idx according to extended TLV. For now, use
2216          * '0' for Rx.
2217          */
2218         qid_usage_idx = 0;
2219
2220         OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2221         vf_params.vfid = vf->relative_vf_id;
2222         vf_params.vf_qid = (u8)req->rx_qid;
2223         vf_params.b_legacy = b_legacy_vf;
2224         vf_params.qid_usage_idx = qid_usage_idx;
2225
2226         p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2227                                        &params, &vf_params);
2228         if (p_cid == OSAL_NULL)
2229                 goto out;
2230
2231         /* Legacy VFs have their Producers in a different location, which they
2232          * calculate on their own and clean the producer prior to this.
2233          */
2234         if (!b_legacy_vf)
2235                 REG_WR(p_hwfn,
2236                        GTT_BAR0_MAP_REG_MSDM_RAM +
2237                        MSTORM_ETH_VF_PRODS_OFFSET(vf->abs_vf_id, req->rx_qid),
2238                        0);
2239
2240         rc = ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
2241                                         req->bd_max_bytes,
2242                                         req->rxq_addr,
2243                                         req->cqe_pbl_addr,
2244                                         req->cqe_pbl_size);
2245         if (rc != ECORE_SUCCESS) {
2246                 status = PFVF_STATUS_FAILURE;
2247                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2248         } else {
2249                 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2250                 p_queue->cids[qid_usage_idx].b_is_tx = false;
2251                 status = PFVF_STATUS_SUCCESS;
2252                 vf->num_active_rxqs++;
2253         }
2254
2255 out:
2256         ecore_iov_vf_mbx_start_rxq_resp(p_hwfn, p_ptt, vf, status,
2257                                         b_legacy_vf);
2258 }
2259
2260 static void
2261 ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp,
2262                                  struct ecore_tunnel_info *p_tun,
2263                                  u16 tunn_feature_mask)
2264 {
2265         p_resp->tunn_feature_mask = tunn_feature_mask;
2266         p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled;
2267         p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled;
2268         p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled;
2269         p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled;
2270         p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled;
2271         p_resp->vxlan_clss = p_tun->vxlan.tun_cls;
2272         p_resp->l2gre_clss = p_tun->l2_gre.tun_cls;
2273         p_resp->ipgre_clss = p_tun->ip_gre.tun_cls;
2274         p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls;
2275         p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls;
2276         p_resp->geneve_udp_port = p_tun->geneve_port.port;
2277         p_resp->vxlan_udp_port = p_tun->vxlan_port.port;
2278 }
2279
2280 static void
2281 __ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2282                                 struct ecore_tunn_update_type *p_tun,
2283                                 enum ecore_tunn_mode mask, u8 tun_cls)
2284 {
2285         if (p_req->tun_mode_update_mask & (1 << mask)) {
2286                 p_tun->b_update_mode = true;
2287
2288                 if (p_req->tunn_mode & (1 << mask))
2289                         p_tun->b_mode_enabled = true;
2290         }
2291
2292         p_tun->tun_cls = tun_cls;
2293 }
2294
2295 static void
2296 ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req,
2297                               struct ecore_tunn_update_type *p_tun,
2298                               struct ecore_tunn_update_udp_port *p_port,
2299                               enum ecore_tunn_mode mask,
2300                               u8 tun_cls, u8 update_port, u16 port)
2301 {
2302         if (update_port) {
2303                 p_port->b_update_port = true;
2304                 p_port->port = port;
2305         }
2306
2307         __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls);
2308 }
2309
2310 static bool
2311 ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req)
2312 {
2313         bool b_update_requested = false;
2314
2315         if (p_req->tun_mode_update_mask || p_req->update_tun_cls ||
2316             p_req->update_geneve_port || p_req->update_vxlan_port)
2317                 b_update_requested = true;
2318
2319         return b_update_requested;
2320 }
2321
2322 static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn,
2323                                                struct ecore_ptt *p_ptt,
2324                                                struct ecore_vf_info *p_vf)
2325 {
2326         struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel;
2327         struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2328         struct pfvf_update_tunn_param_tlv *p_resp;
2329         struct vfpf_update_tunn_param_tlv *p_req;
2330         enum _ecore_status_t rc = ECORE_SUCCESS;
2331         u8 status = PFVF_STATUS_SUCCESS;
2332         bool b_update_required = false;
2333         struct ecore_tunnel_info tunn;
2334         u16 tunn_feature_mask = 0;
2335         int i;
2336
2337         mbx->offset = (u8 *)mbx->reply_virt;
2338
2339         OSAL_MEM_ZERO(&tunn, sizeof(tunn));
2340         p_req = &mbx->req_virt->tunn_param_update;
2341
2342         if (!ecore_iov_pf_validate_tunn_param(p_req)) {
2343                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2344                            "No tunnel update requested by VF\n");
2345                 status = PFVF_STATUS_FAILURE;
2346                 goto send_resp;
2347         }
2348
2349         tunn.b_update_rx_cls = p_req->update_tun_cls;
2350         tunn.b_update_tx_cls = p_req->update_tun_cls;
2351
2352         ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port,
2353                                       ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss,
2354                                       p_req->update_vxlan_port,
2355                                       p_req->vxlan_port);
2356         ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port,
2357                                       ECORE_MODE_L2GENEVE_TUNN,
2358                                       p_req->l2geneve_clss,
2359                                       p_req->update_geneve_port,
2360                                       p_req->geneve_port);
2361         __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve,
2362                                         ECORE_MODE_IPGENEVE_TUNN,
2363                                         p_req->ipgeneve_clss);
2364         __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre,
2365                                         ECORE_MODE_L2GRE_TUNN,
2366                                         p_req->l2gre_clss);
2367         __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre,
2368                                         ECORE_MODE_IPGRE_TUNN,
2369                                         p_req->ipgre_clss);
2370
2371         /* If PF modifies VF's req then it should
2372          * still return an error in case of partial configuration
2373          * or modified configuration as opposed to requested one.
2374          */
2375         rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask,
2376                                                  &b_update_required, &tunn);
2377
2378         if (rc != ECORE_SUCCESS)
2379                 status = PFVF_STATUS_FAILURE;
2380
2381         /* If ECORE client is willing to update anything ? */
2382         if (b_update_required) {
2383                 u16 geneve_port;
2384
2385                 rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn,
2386                                                  ECORE_SPQ_MODE_EBLOCK,
2387                                                  OSAL_NULL);
2388                 if (rc != ECORE_SUCCESS)
2389                         status = PFVF_STATUS_FAILURE;
2390
2391                 geneve_port = p_tun->geneve_port.port;
2392                 ecore_for_each_vf(p_hwfn, i) {
2393                         ecore_iov_bulletin_set_udp_ports(p_hwfn, i,
2394                                                          p_tun->vxlan_port.port,
2395                                                          geneve_port);
2396                 }
2397         }
2398
2399 send_resp:
2400         p_resp = ecore_add_tlv(p_hwfn, &mbx->offset,
2401                                CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp));
2402
2403         ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask);
2404         ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2405                       sizeof(struct channel_list_end_tlv));
2406
2407         ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status);
2408 }
2409
2410 static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn,
2411                                             struct ecore_ptt *p_ptt,
2412                                             struct ecore_vf_info *p_vf,
2413                                             u32 cid,
2414                                             u8 status)
2415 {
2416         struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx;
2417         struct pfvf_start_queue_resp_tlv *p_tlv;
2418         bool b_legacy = false;
2419         u16 length;
2420
2421         mbx->offset = (u8 *)mbx->reply_virt;
2422
2423         /* Taking a bigger struct instead of adding a TLV to list was a
2424          * mistake, but one which we're now stuck with, as some older
2425          * clients assume the size of the previous response.
2426          */
2427         if (p_vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2428             ETH_HSI_VER_NO_PKT_LEN_TUNN)
2429                 b_legacy = true;
2430
2431         if (!b_legacy)
2432                 length = sizeof(*p_tlv);
2433         else
2434                 length = sizeof(struct pfvf_def_resp_tlv);
2435
2436         p_tlv = ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_START_TXQ,
2437                               length);
2438         ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END,
2439                       sizeof(struct channel_list_end_tlv));
2440
2441         /* Update the TLV with the response */
2442         if ((status == PFVF_STATUS_SUCCESS) && !b_legacy)
2443                 p_tlv->offset = DB_ADDR_VF(cid, DQ_DEMS_LEGACY);
2444
2445         ecore_iov_send_response(p_hwfn, p_ptt, p_vf, length, status);
2446 }
2447
2448 static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn,
2449                                        struct ecore_ptt *p_ptt,
2450                                        struct ecore_vf_info *vf)
2451 {
2452         struct ecore_queue_start_common_params params;
2453         struct ecore_queue_cid_vf_params vf_params;
2454         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2455         u8 status = PFVF_STATUS_NO_RESOURCE;
2456         struct ecore_vf_queue *p_queue;
2457         struct vfpf_start_txq_tlv *req;
2458         struct ecore_queue_cid *p_cid;
2459         bool b_legacy_vf = false;
2460         u8 qid_usage_idx;
2461         u32 cid = 0;
2462         enum _ecore_status_t rc;
2463         u16 pq;
2464
2465         OSAL_MEMSET(&params, 0, sizeof(params));
2466         req = &mbx->req_virt->start_txq;
2467
2468         if (!ecore_iov_validate_txq(p_hwfn, vf, req->tx_qid,
2469                                     ECORE_IOV_VALIDATE_Q_NA) ||
2470             !ecore_iov_validate_sb(p_hwfn, vf, req->hw_sb))
2471                 goto out;
2472
2473         /* In case this is a legacy VF - need to know to use the right cids.
2474          * TODO - need to validate that there was no official release post
2475          * the current legacy scheme that still made that assumption.
2476          */
2477         if (vf->acquire.vfdev_info.eth_fp_hsi_minor ==
2478             ETH_HSI_VER_NO_PKT_LEN_TUNN)
2479                 b_legacy_vf = true;
2480
2481         /* Acquire a new queue-cid */
2482         p_queue = &vf->vf_queues[req->tx_qid];
2483
2484         params.queue_id = p_queue->fw_tx_qid;
2485         params.vport_id = vf->vport_id;
2486         params.stats_id = vf->abs_vf_id + 0x10;
2487         params.sb = req->hw_sb;
2488         params.sb_idx = req->sb_index;
2489
2490         /* TODO - set qid_usage_idx according to extended TLV. For now, use
2491          * '1' for Tx.
2492          */
2493         qid_usage_idx = 1;
2494
2495         if (p_queue->cids[qid_usage_idx].p_cid)
2496                 goto out;
2497
2498         OSAL_MEM_ZERO(&vf_params, sizeof(vf_params));
2499         vf_params.vfid = vf->relative_vf_id;
2500         vf_params.vf_qid = (u8)req->tx_qid;
2501         vf_params.b_legacy = b_legacy_vf;
2502         vf_params.qid_usage_idx = qid_usage_idx;
2503
2504         p_cid = ecore_eth_queue_to_cid(p_hwfn, vf->opaque_fid,
2505                                        &params, &vf_params);
2506         if (p_cid == OSAL_NULL)
2507                 goto out;
2508
2509         pq = ecore_get_cm_pq_idx_vf(p_hwfn,
2510                                     vf->relative_vf_id);
2511         rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
2512                                         req->pbl_addr, req->pbl_size, pq);
2513         if (rc != ECORE_SUCCESS) {
2514                 status = PFVF_STATUS_FAILURE;
2515                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
2516         } else {
2517                 status = PFVF_STATUS_SUCCESS;
2518                 p_queue->cids[qid_usage_idx].p_cid = p_cid;
2519                 p_queue->cids[qid_usage_idx].b_is_tx = true;
2520                 cid = p_cid->cid;
2521         }
2522
2523 out:
2524         ecore_iov_vf_mbx_start_txq_resp(p_hwfn, p_ptt, vf,
2525                                         cid, status);
2526 }
2527
2528 static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn,
2529                                                    struct ecore_vf_info *vf,
2530                                                    u16 rxq_id,
2531                                                    u8 num_rxqs,
2532                                                    bool cqe_completion)
2533 {
2534         enum _ecore_status_t rc = ECORE_SUCCESS;
2535         int qid, i;
2536
2537         /* TODO - improve validation [wrap around] */
2538         if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues))
2539                 return ECORE_INVAL;
2540
2541         for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) {
2542                 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
2543                 struct ecore_queue_cid **pp_cid = OSAL_NULL;
2544
2545                 /* There can be at most a single Rx per qzone. Find it */
2546                 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
2547                         if (p_queue->cids[i].p_cid &&
2548                             !p_queue->cids[i].b_is_tx) {
2549                                 pp_cid = &p_queue->cids[i].p_cid;
2550                                 break;
2551                         }
2552                 }
2553                 if (pp_cid == OSAL_NULL) {
2554                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2555                                    "Ignoring VF[%02x] request of closing Rx queue %04x - closed\n",
2556                                    vf->relative_vf_id, qid);
2557                         continue;
2558                 }
2559
2560                 rc = ecore_eth_rx_queue_stop(p_hwfn, *pp_cid,
2561                                              false, cqe_completion);
2562                 if (rc != ECORE_SUCCESS)
2563                         return rc;
2564
2565                 *pp_cid = OSAL_NULL;
2566                 vf->num_active_rxqs--;
2567         }
2568
2569         return rc;
2570 }
2571
2572 static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn,
2573                                                    struct ecore_vf_info *vf,
2574                                                    u16 txq_id, u8 num_txqs)
2575 {
2576         enum _ecore_status_t rc = ECORE_SUCCESS;
2577         struct ecore_vf_queue *p_queue;
2578         int qid, j;
2579
2580         if (!ecore_iov_validate_txq(p_hwfn, vf, txq_id,
2581                                     ECORE_IOV_VALIDATE_Q_NA) ||
2582             !ecore_iov_validate_txq(p_hwfn, vf, txq_id + num_txqs,
2583                                     ECORE_IOV_VALIDATE_Q_NA))
2584                 return ECORE_INVAL;
2585
2586         for (qid = txq_id; qid < txq_id + num_txqs; qid++) {
2587                 p_queue = &vf->vf_queues[qid];
2588                 for (j = 0; j < MAX_QUEUES_PER_QZONE; j++) {
2589                         if (p_queue->cids[j].p_cid == OSAL_NULL)
2590                                 continue;
2591
2592                         if (!p_queue->cids[j].b_is_tx)
2593                                 continue;
2594
2595                         rc = ecore_eth_tx_queue_stop(p_hwfn,
2596                                                      p_queue->cids[j].p_cid);
2597                         if (rc != ECORE_SUCCESS)
2598                                 return rc;
2599
2600                         p_queue->cids[j].p_cid = OSAL_NULL;
2601                 }
2602         }
2603
2604         return rc;
2605 }
2606
2607 static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn,
2608                                        struct ecore_ptt *p_ptt,
2609                                        struct ecore_vf_info *vf)
2610 {
2611         u16 length = sizeof(struct pfvf_def_resp_tlv);
2612         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2613         u8 status = PFVF_STATUS_SUCCESS;
2614         struct vfpf_stop_rxqs_tlv *req;
2615         enum _ecore_status_t rc;
2616
2617         /* We give the option of starting from qid != 0, in this case we
2618          * need to make sure that qid + num_qs doesn't exceed the actual
2619          * amount of queues that exist.
2620          */
2621         req = &mbx->req_virt->stop_rxqs;
2622         rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid,
2623                                     req->num_rxqs, req->cqe_completion);
2624         if (rc)
2625                 status = PFVF_STATUS_FAILURE;
2626
2627         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS,
2628                                length, status);
2629 }
2630
2631 static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn,
2632                                        struct ecore_ptt *p_ptt,
2633                                        struct ecore_vf_info *vf)
2634 {
2635         u16 length = sizeof(struct pfvf_def_resp_tlv);
2636         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2637         u8 status = PFVF_STATUS_SUCCESS;
2638         struct vfpf_stop_txqs_tlv *req;
2639         enum _ecore_status_t rc;
2640
2641         /* We give the option of starting from qid != 0, in this case we
2642          * need to make sure that qid + num_qs doesn't exceed the actual
2643          * amount of queues that exist.
2644          */
2645         req = &mbx->req_virt->stop_txqs;
2646         rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs);
2647         if (rc)
2648                 status = PFVF_STATUS_FAILURE;
2649
2650         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS,
2651                                length, status);
2652 }
2653
2654 static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn,
2655                                          struct ecore_ptt *p_ptt,
2656                                          struct ecore_vf_info *vf)
2657 {
2658         struct ecore_queue_cid *handlers[ECORE_MAX_VF_CHAINS_PER_PF];
2659         u16 length = sizeof(struct pfvf_def_resp_tlv);
2660         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
2661         struct vfpf_update_rxq_tlv *req;
2662         u8 status = PFVF_STATUS_FAILURE;
2663         u8 complete_event_flg;
2664         u8 complete_cqe_flg;
2665         enum _ecore_status_t rc;
2666         u16 i;
2667
2668         req = &mbx->req_virt->update_rxq;
2669         complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG);
2670         complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG);
2671
2672         /* Validate inputs */
2673         for (i = req->rx_qid; i < req->rx_qid + req->num_rxqs; i++) {
2674                 if (!ecore_iov_validate_rxq(p_hwfn, vf, i,
2675                                             ECORE_IOV_VALIDATE_Q_ENABLE)) {
2676                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2677                                    "VF[%d]: Incorrect Rxqs [%04x, %02x]\n",
2678                                    vf->relative_vf_id, req->rx_qid,
2679                                    req->num_rxqs);
2680                         goto out;
2681                 }
2682         }
2683
2684         for (i = 0; i < req->num_rxqs; i++) {
2685                 struct ecore_vf_queue *p_queue;
2686                 u16 qid = req->rx_qid + i;
2687
2688                 p_queue = &vf->vf_queues[qid];
2689                 handlers[i] = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
2690                                                             p_queue);
2691         }
2692
2693         rc = ecore_sp_eth_rx_queues_update(p_hwfn, (void **)&handlers,
2694                                            req->num_rxqs,
2695                                            complete_cqe_flg,
2696                                            complete_event_flg,
2697                                            ECORE_SPQ_MODE_EBLOCK,
2698                                            OSAL_NULL);
2699         if (rc)
2700                 goto out;
2701
2702         status = PFVF_STATUS_SUCCESS;
2703 out:
2704         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ,
2705                                length, status);
2706 }
2707
2708 void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn,
2709                                  void *p_tlvs_list, u16 req_type)
2710 {
2711         struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list;
2712         int len = 0;
2713
2714         do {
2715                 if (!p_tlv->length) {
2716                         DP_NOTICE(p_hwfn, true, "Zero length TLV found\n");
2717                         return OSAL_NULL;
2718                 }
2719
2720                 if (p_tlv->type == req_type) {
2721                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2722                                    "Extended tlv type %s, length %d found\n",
2723                                    ecore_channel_tlvs_string[p_tlv->type],
2724                                    p_tlv->length);
2725                         return p_tlv;
2726                 }
2727
2728                 len += p_tlv->length;
2729                 p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length);
2730
2731                 if ((len + p_tlv->length) > TLV_BUFFER_SIZE) {
2732                         DP_NOTICE(p_hwfn, true,
2733                                   "TLVs has overrun the buffer size\n");
2734                         return OSAL_NULL;
2735                 }
2736         } while (p_tlv->type != CHANNEL_TLV_LIST_END);
2737
2738         return OSAL_NULL;
2739 }
2740
2741 static void
2742 ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn,
2743                               struct ecore_sp_vport_update_params *p_data,
2744                               struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2745 {
2746         struct vfpf_vport_update_activate_tlv *p_act_tlv;
2747         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE;
2748
2749         p_act_tlv = (struct vfpf_vport_update_activate_tlv *)
2750             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2751         if (!p_act_tlv)
2752                 return;
2753
2754         p_data->update_vport_active_rx_flg = p_act_tlv->update_rx;
2755         p_data->vport_active_rx_flg = p_act_tlv->active_rx;
2756         p_data->update_vport_active_tx_flg = p_act_tlv->update_tx;
2757         p_data->vport_active_tx_flg = p_act_tlv->active_tx;
2758         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE;
2759 }
2760
2761 static void
2762 ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn,
2763                                struct ecore_sp_vport_update_params *p_data,
2764                                struct ecore_vf_info *p_vf,
2765                                struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2766 {
2767         struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv;
2768         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP;
2769
2770         p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *)
2771             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2772         if (!p_vlan_tlv)
2773                 return;
2774
2775         p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan;
2776
2777         /* Ignore the VF request if we're forcing a vlan */
2778         if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) {
2779                 p_data->update_inner_vlan_removal_flg = 1;
2780                 p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan;
2781         }
2782
2783         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP;
2784 }
2785
2786 static void
2787 ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn,
2788                               struct ecore_sp_vport_update_params *p_data,
2789                               struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2790 {
2791         struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv;
2792         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH;
2793
2794         p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *)
2795             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2796         if (!p_tx_switch_tlv)
2797                 return;
2798
2799 #ifndef ASIC_ONLY
2800         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) {
2801                 DP_NOTICE(p_hwfn, false,
2802                           "FPGA: Ignore tx-switching configuration originating"
2803                           " from VFs\n");
2804                 return;
2805         }
2806 #endif
2807
2808         p_data->update_tx_switching_flg = 1;
2809         p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching;
2810         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH;
2811 }
2812
2813 static void
2814 ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn,
2815                                     struct ecore_sp_vport_update_params *p_data,
2816                                     struct ecore_iov_vf_mbx *p_mbx,
2817                                     u16 *tlvs_mask)
2818 {
2819         struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv;
2820         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST;
2821
2822         p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *)
2823             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2824         if (!p_mcast_tlv)
2825                 return;
2826
2827         p_data->update_approx_mcast_flg = 1;
2828         OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins,
2829                     sizeof(unsigned long) *
2830                     ETH_MULTICAST_MAC_BINS_IN_REGS);
2831         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST;
2832 }
2833
2834 static void
2835 ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn,
2836                                 struct ecore_sp_vport_update_params *p_data,
2837                                 struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask)
2838 {
2839         struct ecore_filter_accept_flags *p_flags = &p_data->accept_flags;
2840         struct vfpf_vport_update_accept_param_tlv *p_accept_tlv;
2841         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM;
2842
2843         p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *)
2844             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2845         if (!p_accept_tlv)
2846                 return;
2847
2848         p_flags->update_rx_mode_config = p_accept_tlv->update_rx_mode;
2849         p_flags->rx_accept_filter = p_accept_tlv->rx_accept_filter;
2850         p_flags->update_tx_mode_config = p_accept_tlv->update_tx_mode;
2851         p_flags->tx_accept_filter = p_accept_tlv->tx_accept_filter;
2852         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM;
2853 }
2854
2855 static void
2856 ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn,
2857                                     struct ecore_sp_vport_update_params *p_data,
2858                                     struct ecore_iov_vf_mbx *p_mbx,
2859                                     u16 *tlvs_mask)
2860 {
2861         struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan;
2862         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN;
2863
2864         p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *)
2865             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2866         if (!p_accept_any_vlan)
2867                 return;
2868
2869         p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan;
2870         p_data->update_accept_any_vlan_flg =
2871                         p_accept_any_vlan->update_accept_any_vlan_flg;
2872         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN;
2873 }
2874
2875 static void
2876 ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn,
2877                               struct ecore_vf_info *vf,
2878                               struct ecore_sp_vport_update_params *p_data,
2879                               struct ecore_rss_params *p_rss,
2880                               struct ecore_iov_vf_mbx *p_mbx,
2881                               u16 *tlvs_mask, u16 *tlvs_accepted)
2882 {
2883         struct vfpf_vport_update_rss_tlv *p_rss_tlv;
2884         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS;
2885         bool b_reject = false;
2886         u16 table_size;
2887         u16 i, q_idx;
2888
2889         p_rss_tlv = (struct vfpf_vport_update_rss_tlv *)
2890             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2891         if (!p_rss_tlv) {
2892                 p_data->rss_params = OSAL_NULL;
2893                 return;
2894         }
2895
2896         OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params));
2897
2898         p_rss->update_rss_config =
2899             !!(p_rss_tlv->update_rss_flags &
2900                 VFPF_UPDATE_RSS_CONFIG_FLAG);
2901         p_rss->update_rss_capabilities =
2902             !!(p_rss_tlv->update_rss_flags &
2903                 VFPF_UPDATE_RSS_CAPS_FLAG);
2904         p_rss->update_rss_ind_table =
2905             !!(p_rss_tlv->update_rss_flags &
2906                 VFPF_UPDATE_RSS_IND_TABLE_FLAG);
2907         p_rss->update_rss_key =
2908             !!(p_rss_tlv->update_rss_flags &
2909                 VFPF_UPDATE_RSS_KEY_FLAG);
2910
2911         p_rss->rss_enable = p_rss_tlv->rss_enable;
2912         p_rss->rss_eng_id = vf->rss_eng_id;
2913         p_rss->rss_caps = p_rss_tlv->rss_caps;
2914         p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log;
2915         OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key,
2916                     sizeof(p_rss->rss_key));
2917
2918         table_size = OSAL_MIN_T(u16, OSAL_ARRAY_SIZE(p_rss->rss_ind_table),
2919                                 (1 << p_rss_tlv->rss_table_size_log));
2920
2921         for (i = 0; i < table_size; i++) {
2922                 struct ecore_queue_cid *p_cid;
2923
2924                 q_idx = p_rss_tlv->rss_ind_table[i];
2925                 if (!ecore_iov_validate_rxq(p_hwfn, vf, q_idx,
2926                                             ECORE_IOV_VALIDATE_Q_ENABLE)) {
2927                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
2928                                    "VF[%d]: Omitting RSS due to wrong queue %04x\n",
2929                                    vf->relative_vf_id, q_idx);
2930                         b_reject = true;
2931                         goto out;
2932                 }
2933
2934                 p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
2935                                                       &vf->vf_queues[q_idx]);
2936                 p_rss->rss_ind_table[i] = p_cid;
2937         }
2938
2939         p_data->rss_params = p_rss;
2940 out:
2941         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2942         if (!b_reject)
2943                 *tlvs_accepted |= 1 << ECORE_IOV_VP_UPDATE_RSS;
2944 }
2945
2946 static void
2947 ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn,
2948                                   struct ecore_vf_info *vf,
2949                                   struct ecore_sp_vport_update_params *p_data,
2950                                   struct ecore_sge_tpa_params *p_sge_tpa,
2951                                   struct ecore_iov_vf_mbx *p_mbx,
2952                                   u16 *tlvs_mask)
2953 {
2954         struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv;
2955         u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA;
2956
2957         p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *)
2958             ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv);
2959
2960         if (!p_sge_tpa_tlv) {
2961                 p_data->sge_tpa_params = OSAL_NULL;
2962                 return;
2963         }
2964
2965         OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params));
2966
2967         p_sge_tpa->update_tpa_en_flg =
2968             !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG);
2969         p_sge_tpa->update_tpa_param_flg =
2970             !!(p_sge_tpa_tlv->update_sge_tpa_flags &
2971                 VFPF_UPDATE_TPA_PARAM_FLAG);
2972
2973         p_sge_tpa->tpa_ipv4_en_flg =
2974             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG);
2975         p_sge_tpa->tpa_ipv6_en_flg =
2976             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG);
2977         p_sge_tpa->tpa_pkt_split_flg =
2978             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG);
2979         p_sge_tpa->tpa_hdr_data_split_flg =
2980             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG);
2981         p_sge_tpa->tpa_gro_consistent_flg =
2982             !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG);
2983
2984         p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num;
2985         p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size;
2986         p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start;
2987         p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont;
2988         p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe;
2989
2990         p_data->sge_tpa_params = p_sge_tpa;
2991
2992         *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA;
2993 }
2994
2995 static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn,
2996                                           struct ecore_ptt *p_ptt,
2997                                           struct ecore_vf_info *vf)
2998 {
2999         struct ecore_rss_params *p_rss_params = OSAL_NULL;
3000         struct ecore_sp_vport_update_params params;
3001         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3002         struct ecore_sge_tpa_params sge_tpa_params;
3003         u16 tlvs_mask = 0, tlvs_accepted = 0;
3004         u8 status = PFVF_STATUS_SUCCESS;
3005         u16 length;
3006         enum _ecore_status_t rc;
3007
3008         /* Valiate PF can send such a request */
3009         if (!vf->vport_instance) {
3010                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3011                            "No VPORT instance available for VF[%d],"
3012                            " failing vport update\n",
3013                            vf->abs_vf_id);
3014                 status = PFVF_STATUS_FAILURE;
3015                 goto out;
3016         }
3017
3018         p_rss_params = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_rss_params));
3019         if (p_rss_params == OSAL_NULL) {
3020                 status = PFVF_STATUS_FAILURE;
3021                 goto out;
3022         }
3023
3024         OSAL_MEMSET(&params, 0, sizeof(params));
3025         params.opaque_fid = vf->opaque_fid;
3026         params.vport_id = vf->vport_id;
3027         params.rss_params = OSAL_NULL;
3028
3029         /* Search for extended tlvs list and update values
3030          * from VF in struct ecore_sp_vport_update_params.
3031          */
3032         ecore_iov_vp_update_act_param(p_hwfn, &params, mbx, &tlvs_mask);
3033         ecore_iov_vp_update_vlan_param(p_hwfn, &params, vf, mbx, &tlvs_mask);
3034         ecore_iov_vp_update_tx_switch(p_hwfn, &params, mbx, &tlvs_mask);
3035         ecore_iov_vp_update_mcast_bin_param(p_hwfn, &params, mbx, &tlvs_mask);
3036         ecore_iov_vp_update_accept_flag(p_hwfn, &params, mbx, &tlvs_mask);
3037         ecore_iov_vp_update_accept_any_vlan(p_hwfn, &params, mbx, &tlvs_mask);
3038         ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, &params,
3039                                           &sge_tpa_params, mbx, &tlvs_mask);
3040
3041         tlvs_accepted = tlvs_mask;
3042
3043         /* Some of the extended TLVs need to be validated first; In that case,
3044          * they can update the mask without updating the accepted [so that
3045          * PF could communicate to VF it has rejected request].
3046          */
3047         ecore_iov_vp_update_rss_param(p_hwfn, vf, &params, p_rss_params,
3048                                       mbx, &tlvs_mask, &tlvs_accepted);
3049
3050         /* Just log a message if there is no single extended tlv in buffer.
3051          * When all features of vport update ramrod would be requested by VF
3052          * as extended TLVs in buffer then an error can be returned in response
3053          * if there is no extended TLV present in buffer.
3054          */
3055         if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id,
3056                                      &params, &tlvs_accepted) !=
3057             ECORE_SUCCESS) {
3058                 tlvs_accepted = 0;
3059                 status = PFVF_STATUS_NOT_SUPPORTED;
3060                 goto out;
3061         }
3062
3063         if (!tlvs_accepted) {
3064                 if (tlvs_mask)
3065                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3066                                    "Upper-layer prevents said VF"
3067                                    " configuration\n");
3068                 else
3069                         DP_NOTICE(p_hwfn, true,
3070                                   "No feature tlvs found for vport update\n");
3071                 status = PFVF_STATUS_NOT_SUPPORTED;
3072                 goto out;
3073         }
3074
3075         rc = ecore_sp_vport_update(p_hwfn, &params, ECORE_SPQ_MODE_EBLOCK,
3076                                    OSAL_NULL);
3077
3078         if (rc)
3079                 status = PFVF_STATUS_FAILURE;
3080
3081 out:
3082         OSAL_VFREE(p_hwfn->p_dev, p_rss_params);
3083         length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status,
3084                                                     tlvs_mask, tlvs_accepted);
3085         ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status);
3086 }
3087
3088 static enum _ecore_status_t
3089 ecore_iov_vf_update_vlan_shadow(struct ecore_hwfn *p_hwfn,
3090                                 struct ecore_vf_info *p_vf,
3091                                 struct ecore_filter_ucast *p_params)
3092 {
3093         int i;
3094
3095         /* First remove entries and then add new ones */
3096         if (p_params->opcode == ECORE_FILTER_REMOVE) {
3097                 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3098                         if (p_vf->shadow_config.vlans[i].used &&
3099                             p_vf->shadow_config.vlans[i].vid ==
3100                             p_params->vlan) {
3101                                 p_vf->shadow_config.vlans[i].used = false;
3102                                 break;
3103                         }
3104                 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3105                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3106                                    "VF [%d] - Tries to remove a non-existing"
3107                                    " vlan\n",
3108                                    p_vf->relative_vf_id);
3109                         return ECORE_INVAL;
3110                 }
3111         } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3112                    p_params->opcode == ECORE_FILTER_FLUSH) {
3113                 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++)
3114                         p_vf->shadow_config.vlans[i].used = false;
3115         }
3116
3117         /* In forced mode, we're willing to remove entries - but we don't add
3118          * new ones.
3119          */
3120         if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))
3121                 return ECORE_SUCCESS;
3122
3123         if (p_params->opcode == ECORE_FILTER_ADD ||
3124             p_params->opcode == ECORE_FILTER_REPLACE) {
3125                 for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) {
3126                         if (p_vf->shadow_config.vlans[i].used)
3127                                 continue;
3128
3129                         p_vf->shadow_config.vlans[i].used = true;
3130                         p_vf->shadow_config.vlans[i].vid = p_params->vlan;
3131                         break;
3132                 }
3133
3134                 if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) {
3135                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3136                                    "VF [%d] - Tries to configure more than %d"
3137                                    " vlan filters\n",
3138                                    p_vf->relative_vf_id,
3139                                    ECORE_ETH_VF_NUM_VLAN_FILTERS + 1);
3140                         return ECORE_INVAL;
3141                 }
3142         }
3143
3144         return ECORE_SUCCESS;
3145 }
3146
3147 static enum _ecore_status_t
3148 ecore_iov_vf_update_mac_shadow(struct ecore_hwfn *p_hwfn,
3149                                struct ecore_vf_info *p_vf,
3150                                struct ecore_filter_ucast *p_params)
3151 {
3152         char empty_mac[ETH_ALEN];
3153         int i;
3154
3155         OSAL_MEM_ZERO(empty_mac, ETH_ALEN);
3156
3157         /* If we're in forced-mode, we don't allow any change */
3158         /* TODO - this would change if we were ever to implement logic for
3159          * removing a forced MAC altogether [in which case, like for vlans,
3160          * we should be able to re-trace previous configuration.
3161          */
3162         if (p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))
3163                 return ECORE_SUCCESS;
3164
3165         /* First remove entries and then add new ones */
3166         if (p_params->opcode == ECORE_FILTER_REMOVE) {
3167                 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3168                         if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3169                                          p_params->mac, ETH_ALEN)) {
3170                                 OSAL_MEM_ZERO(p_vf->shadow_config.macs[i],
3171                                               ETH_ALEN);
3172                                 break;
3173                         }
3174                 }
3175
3176                 if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3177                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3178                                    "MAC isn't configured\n");
3179                         return ECORE_INVAL;
3180                 }
3181         } else if (p_params->opcode == ECORE_FILTER_REPLACE ||
3182                    p_params->opcode == ECORE_FILTER_FLUSH) {
3183                 for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++)
3184                         OSAL_MEM_ZERO(p_vf->shadow_config.macs[i], ETH_ALEN);
3185         }
3186
3187         /* List the new MAC address */
3188         if (p_params->opcode != ECORE_FILTER_ADD &&
3189             p_params->opcode != ECORE_FILTER_REPLACE)
3190                 return ECORE_SUCCESS;
3191
3192         for (i = 0; i < ECORE_ETH_VF_NUM_MAC_FILTERS; i++) {
3193                 if (!OSAL_MEMCMP(p_vf->shadow_config.macs[i],
3194                                  empty_mac, ETH_ALEN)) {
3195                         OSAL_MEMCPY(p_vf->shadow_config.macs[i],
3196                                     p_params->mac, ETH_ALEN);
3197                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3198                                    "Added MAC at %d entry in shadow\n", i);
3199                         break;
3200                 }
3201         }
3202
3203         if (i == ECORE_ETH_VF_NUM_MAC_FILTERS) {
3204                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3205                            "No available place for MAC\n");
3206                 return ECORE_INVAL;
3207         }
3208
3209         return ECORE_SUCCESS;
3210 }
3211
3212 static enum _ecore_status_t
3213 ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn,
3214                                    struct ecore_vf_info *p_vf,
3215                                    struct ecore_filter_ucast *p_params)
3216 {
3217         enum _ecore_status_t rc = ECORE_SUCCESS;
3218
3219         if (p_params->type == ECORE_FILTER_MAC) {
3220                 rc = ecore_iov_vf_update_mac_shadow(p_hwfn, p_vf, p_params);
3221                 if (rc != ECORE_SUCCESS)
3222                         return rc;
3223         }
3224
3225         if (p_params->type == ECORE_FILTER_VLAN)
3226                 rc = ecore_iov_vf_update_vlan_shadow(p_hwfn, p_vf, p_params);
3227
3228         return rc;
3229 }
3230
3231 static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn,
3232                                           struct ecore_ptt *p_ptt,
3233                                           struct ecore_vf_info *vf)
3234 {
3235         struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt;
3236         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3237         struct vfpf_ucast_filter_tlv *req;
3238         u8 status = PFVF_STATUS_SUCCESS;
3239         struct ecore_filter_ucast params;
3240         enum _ecore_status_t rc;
3241
3242         /* Prepare the unicast filter params */
3243         OSAL_MEMSET(&params, 0, sizeof(struct ecore_filter_ucast));
3244         req = &mbx->req_virt->ucast_filter;
3245         params.opcode = (enum ecore_filter_opcode)req->opcode;
3246         params.type = (enum ecore_filter_ucast_type)req->type;
3247
3248         /* @@@TBD - We might need logic on HV side in determining this */
3249         params.is_rx_filter = 1;
3250         params.is_tx_filter = 1;
3251         params.vport_to_remove_from = vf->vport_id;
3252         params.vport_to_add_to = vf->vport_id;
3253         OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN);
3254         params.vlan = req->vlan;
3255
3256         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3257                    "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x]"
3258                    " MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n",
3259                    vf->abs_vf_id, params.opcode, params.type,
3260                    params.is_rx_filter ? "RX" : "",
3261                    params.is_tx_filter ? "TX" : "",
3262                    params.vport_to_add_to,
3263                    params.mac[0], params.mac[1], params.mac[2],
3264                    params.mac[3], params.mac[4], params.mac[5], params.vlan);
3265
3266         if (!vf->vport_instance) {
3267                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3268                            "No VPORT instance available for VF[%d],"
3269                            " failing ucast MAC configuration\n",
3270                            vf->abs_vf_id);
3271                 status = PFVF_STATUS_FAILURE;
3272                 goto out;
3273         }
3274
3275         /* Update shadow copy of the VF configuration */
3276         if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, &params) !=
3277             ECORE_SUCCESS) {
3278                 status = PFVF_STATUS_FAILURE;
3279                 goto out;
3280         }
3281
3282         /* Determine if the unicast filtering is acceptible by PF */
3283         if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) &&
3284             (params.type == ECORE_FILTER_VLAN ||
3285              params.type == ECORE_FILTER_MAC_VLAN)) {
3286                 /* Once VLAN is forced or PVID is set, do not allow
3287                  * to add/replace any further VLANs.
3288                  */
3289                 if (params.opcode == ECORE_FILTER_ADD ||
3290                     params.opcode == ECORE_FILTER_REPLACE)
3291                         status = PFVF_STATUS_FORCED;
3292                 goto out;
3293         }
3294
3295         if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) &&
3296             (params.type == ECORE_FILTER_MAC ||
3297              params.type == ECORE_FILTER_MAC_VLAN)) {
3298                 if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) ||
3299                     (params.opcode != ECORE_FILTER_ADD &&
3300                      params.opcode != ECORE_FILTER_REPLACE))
3301                         status = PFVF_STATUS_FORCED;
3302                 goto out;
3303         }
3304
3305         rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, &params);
3306         if (rc == ECORE_EXISTS) {
3307                 goto out;
3308         } else if (rc == ECORE_INVAL) {
3309                 status = PFVF_STATUS_FAILURE;
3310                 goto out;
3311         }
3312
3313         rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, &params,
3314                                        ECORE_SPQ_MODE_CB, OSAL_NULL);
3315         if (rc)
3316                 status = PFVF_STATUS_FAILURE;
3317
3318 out:
3319         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER,
3320                                sizeof(struct pfvf_def_resp_tlv), status);
3321 }
3322
3323 static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn,
3324                                          struct ecore_ptt *p_ptt,
3325                                          struct ecore_vf_info *vf)
3326 {
3327         int i;
3328
3329         /* Reset the SBs */
3330         for (i = 0; i < vf->num_sbs; i++)
3331                 ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt,
3332                                                   vf->igu_sbs[i],
3333                                                   vf->opaque_fid, false);
3334
3335         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP,
3336                                sizeof(struct pfvf_def_resp_tlv),
3337                                PFVF_STATUS_SUCCESS);
3338 }
3339
3340 static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn,
3341                                    struct ecore_ptt *p_ptt,
3342                                    struct ecore_vf_info *vf)
3343 {
3344         u16 length = sizeof(struct pfvf_def_resp_tlv);
3345         u8 status = PFVF_STATUS_SUCCESS;
3346
3347         /* Disable Interrupts for VF */
3348         ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0);
3349
3350         /* Reset Permission table */
3351         ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0);
3352
3353         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE,
3354                                length, status);
3355 }
3356
3357 static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn,
3358                                      struct ecore_ptt *p_ptt,
3359                                      struct ecore_vf_info *p_vf)
3360 {
3361         u16 length = sizeof(struct pfvf_def_resp_tlv);
3362         u8 status = PFVF_STATUS_SUCCESS;
3363         enum _ecore_status_t rc = ECORE_SUCCESS;
3364
3365         ecore_iov_vf_cleanup(p_hwfn, p_vf);
3366
3367         if (p_vf->state != VF_STOPPED && p_vf->state != VF_FREE) {
3368                 /* Stopping the VF */
3369                 rc = ecore_sp_vf_stop(p_hwfn, p_vf->concrete_fid,
3370                                       p_vf->opaque_fid);
3371
3372                 if (rc != ECORE_SUCCESS) {
3373                         DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n",
3374                                rc);
3375                         status = PFVF_STATUS_FAILURE;
3376                 }
3377
3378                 p_vf->state = VF_STOPPED;
3379         }
3380
3381         ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE,
3382                                length, status);
3383 }
3384
3385 static void ecore_iov_vf_pf_set_coalesce(struct ecore_hwfn *p_hwfn,
3386                                          struct ecore_ptt *p_ptt,
3387                                          struct ecore_vf_info *vf)
3388 {
3389         struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx;
3390         enum _ecore_status_t rc = ECORE_SUCCESS;
3391         struct vfpf_update_coalesce *req;
3392         u8 status = PFVF_STATUS_FAILURE;
3393         struct ecore_queue_cid *p_cid;
3394         u16 rx_coal, tx_coal;
3395         u16 qid;
3396         int i;
3397
3398         req = &mbx->req_virt->update_coalesce;
3399
3400         rx_coal = req->rx_coal;
3401         tx_coal = req->tx_coal;
3402         qid = req->qid;
3403
3404         if (!ecore_iov_validate_rxq(p_hwfn, vf, qid,
3405                                     ECORE_IOV_VALIDATE_Q_ENABLE) &&
3406             rx_coal) {
3407                 DP_ERR(p_hwfn, "VF[%d]: Invalid Rx queue_id = %d\n",
3408                        vf->abs_vf_id, qid);
3409                 goto out;
3410         }
3411
3412         if (!ecore_iov_validate_txq(p_hwfn, vf, qid,
3413                                     ECORE_IOV_VALIDATE_Q_ENABLE) &&
3414             tx_coal) {
3415                 DP_ERR(p_hwfn, "VF[%d]: Invalid Tx queue_id = %d\n",
3416                        vf->abs_vf_id, qid);
3417                 goto out;
3418         }
3419
3420         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3421                    "VF[%d]: Setting coalesce for VF rx_coal = %d, tx_coal = %d at queue = %d\n",
3422                    vf->abs_vf_id, rx_coal, tx_coal, qid);
3423
3424         if (rx_coal) {
3425                 p_cid = ecore_iov_get_vf_rx_queue_cid(p_hwfn, vf,
3426                                                       &vf->vf_queues[qid]);
3427
3428                 rc = ecore_set_rxq_coalesce(p_hwfn, p_ptt, rx_coal, p_cid);
3429                 if (rc != ECORE_SUCCESS) {
3430                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3431                                    "VF[%d]: Unable to set rx queue = %d coalesce\n",
3432                                    vf->abs_vf_id, vf->vf_queues[qid].fw_rx_qid);
3433                         goto out;
3434                 }
3435         }
3436
3437         /* TODO - in future, it might be possible to pass this in a per-cid
3438          * granularity. For now, do this for all Tx queues.
3439          */
3440         if (tx_coal) {
3441                 struct ecore_vf_queue *p_queue = &vf->vf_queues[qid];
3442
3443                 for (i = 0; i < MAX_QUEUES_PER_QZONE; i++) {
3444                         if (p_queue->cids[i].p_cid == OSAL_NULL)
3445                                 continue;
3446
3447                         if (!p_queue->cids[i].b_is_tx)
3448                                 continue;
3449
3450                         rc = ecore_set_txq_coalesce(p_hwfn, p_ptt, tx_coal,
3451                                                     p_queue->cids[i].p_cid);
3452                         if (rc != ECORE_SUCCESS) {
3453                                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3454                                            "VF[%d]: Unable to set tx queue coalesce\n",
3455                                            vf->abs_vf_id);
3456                                 goto out;
3457                         }
3458                 }
3459         }
3460
3461         status = PFVF_STATUS_SUCCESS;
3462 out:
3463         ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_COALESCE_UPDATE,
3464                                sizeof(struct pfvf_def_resp_tlv), status);
3465 }
3466
3467 static enum _ecore_status_t
3468 ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn,
3469                            struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3470 {
3471         int cnt;
3472         u32 val;
3473
3474         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid);
3475
3476         for (cnt = 0; cnt < 50; cnt++) {
3477                 val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT);
3478                 if (!val)
3479                         break;
3480                 OSAL_MSLEEP(20);
3481         }
3482         ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid);
3483
3484         if (cnt == 50) {
3485                 DP_ERR(p_hwfn,
3486                        "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n",
3487                        p_vf->abs_vf_id, val);
3488                 return ECORE_TIMEOUT;
3489         }
3490
3491         return ECORE_SUCCESS;
3492 }
3493
3494 static enum _ecore_status_t
3495 ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn,
3496                           struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt)
3497 {
3498         u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS];
3499         int i, cnt;
3500
3501         /* Read initial consumers & producers */
3502         for (i = 0; i < MAX_NUM_VOQS; i++) {
3503                 u32 prod;
3504
3505                 cons[i] = ecore_rd(p_hwfn, p_ptt,
3506                                    PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3507                                    i * 0x40);
3508                 prod = ecore_rd(p_hwfn, p_ptt,
3509                                 PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 +
3510                                 i * 0x40);
3511                 distance[i] = prod - cons[i];
3512         }
3513
3514         /* Wait for consumers to pass the producers */
3515         i = 0;
3516         for (cnt = 0; cnt < 50; cnt++) {
3517                 for (; i < MAX_NUM_VOQS; i++) {
3518                         u32 tmp;
3519
3520                         tmp = ecore_rd(p_hwfn, p_ptt,
3521                                        PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 +
3522                                        i * 0x40);
3523                         if (distance[i] > tmp - cons[i])
3524                                 break;
3525                 }
3526
3527                 if (i == MAX_NUM_VOQS)
3528                         break;
3529
3530                 OSAL_MSLEEP(20);
3531         }
3532
3533         if (cnt == 50) {
3534                 DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n",
3535                        p_vf->abs_vf_id, i);
3536                 return ECORE_TIMEOUT;
3537         }
3538
3539         return ECORE_SUCCESS;
3540 }
3541
3542 static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn,
3543                                                   struct ecore_vf_info *p_vf,
3544                                                   struct ecore_ptt *p_ptt)
3545 {
3546         enum _ecore_status_t rc;
3547
3548         /* TODO - add SRC and TM polling once we add storage IOV */
3549
3550         rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt);
3551         if (rc)
3552                 return rc;
3553
3554         rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt);
3555         if (rc)
3556                 return rc;
3557
3558         return ECORE_SUCCESS;
3559 }
3560
3561 static enum _ecore_status_t
3562 ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3563                                  struct ecore_ptt *p_ptt,
3564                                  u16 rel_vf_id, u32 *ack_vfs)
3565 {
3566         struct ecore_vf_info *p_vf;
3567         enum _ecore_status_t rc = ECORE_SUCCESS;
3568
3569         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false);
3570         if (!p_vf)
3571                 return ECORE_SUCCESS;
3572
3573         if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3574             (1ULL << (rel_vf_id % 64))) {
3575                 u16 vfid = p_vf->abs_vf_id;
3576
3577                 /* TODO - should we lock channel? */
3578
3579                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3580                            "VF[%d] - Handling FLR\n", vfid);
3581
3582                 ecore_iov_vf_cleanup(p_hwfn, p_vf);
3583
3584                 /* If VF isn't active, no need for anything but SW */
3585                 if (!p_vf->b_init)
3586                         goto cleanup;
3587
3588                 /* TODO - what to do in case of failure? */
3589                 rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt);
3590                 if (rc != ECORE_SUCCESS)
3591                         goto cleanup;
3592
3593                 rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true);
3594                 if (rc) {
3595                         /* TODO - what's now? What a mess.... */
3596                         DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid);
3597                         return rc;
3598                 }
3599
3600                 /* Workaround to make VF-PF channel ready, as FW
3601                  * doesn't do that as a part of FLR.
3602                  */
3603                 REG_WR(p_hwfn,
3604                        GTT_BAR0_MAP_REG_USDM_RAM +
3605                        USTORM_VF_PF_CHANNEL_READY_OFFSET(vfid), 1);
3606
3607                 /* VF_STOPPED has to be set only after final cleanup
3608                  * but prior to re-enabling the VF.
3609                  */
3610                 p_vf->state = VF_STOPPED;
3611
3612                 rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf);
3613                 if (rc) {
3614                         /* TODO - again, a mess... */
3615                         DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n",
3616                                vfid);
3617                         return rc;
3618                 }
3619 cleanup:
3620                 /* Mark VF for ack and clean pending state */
3621                 if (p_vf->state == VF_RESET)
3622                         p_vf->state = VF_STOPPED;
3623                 ack_vfs[vfid / 32] |= (1 << (vfid % 32));
3624                 p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &=
3625                     ~(1ULL << (rel_vf_id % 64));
3626                 p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &=
3627                     ~(1ULL << (rel_vf_id % 64));
3628         }
3629
3630         return rc;
3631 }
3632
3633 enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3634                                               struct ecore_ptt *p_ptt)
3635 {
3636         u32 ack_vfs[VF_MAX_STATIC / 32];
3637         enum _ecore_status_t rc = ECORE_SUCCESS;
3638         u16 i;
3639
3640         OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3641
3642         /* Since BRB <-> PRS interface can't be tested as part of the flr
3643          * polling due to HW limitations, simply sleep a bit. And since
3644          * there's no need to wait per-vf, do it before looping.
3645          */
3646         OSAL_MSLEEP(100);
3647
3648         for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++)
3649                 ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs);
3650
3651         rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3652         return rc;
3653 }
3654
3655 enum _ecore_status_t
3656 ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn,
3657                                 struct ecore_ptt *p_ptt, u16 rel_vf_id)
3658 {
3659         u32 ack_vfs[VF_MAX_STATIC / 32];
3660         enum _ecore_status_t rc = ECORE_SUCCESS;
3661
3662         OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32));
3663
3664         /* Wait instead of polling the BRB <-> PRS interface */
3665         OSAL_MSLEEP(100);
3666
3667         ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs);
3668
3669         rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs);
3670         return rc;
3671 }
3672
3673 bool ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs)
3674 {
3675         bool found = false;
3676         u16 i;
3677
3678         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n");
3679         for (i = 0; i < (VF_MAX_STATIC / 32); i++)
3680                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3681                            "[%08x,...,%08x]: %08x\n",
3682                            i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]);
3683
3684         if (!p_hwfn->p_dev->p_iov_info) {
3685                 DP_NOTICE(p_hwfn, true, "VF flr but no IOV\n");
3686                 return false;
3687         }
3688
3689         /* Mark VFs */
3690         for (i = 0; i < p_hwfn->p_dev->p_iov_info->total_vfs; i++) {
3691                 struct ecore_vf_info *p_vf;
3692                 u8 vfid;
3693
3694                 p_vf = ecore_iov_get_vf_info(p_hwfn, i, false);
3695                 if (!p_vf)
3696                         continue;
3697
3698                 vfid = p_vf->abs_vf_id;
3699                 if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) {
3700                         u64 *p_flr = p_hwfn->pf_iov_info->pending_flr;
3701                         u16 rel_vf_id = p_vf->relative_vf_id;
3702
3703                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3704                                    "VF[%d] [rel %d] got FLR-ed\n",
3705                                    vfid, rel_vf_id);
3706
3707                         p_vf->state = VF_RESET;
3708
3709                         /* No need to lock here, since pending_flr should
3710                          * only change here and before ACKing MFw. Since
3711                          * MFW will not trigger an additional attention for
3712                          * VF flr until ACKs, we're safe.
3713                          */
3714                         p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64);
3715                         found = true;
3716                 }
3717         }
3718
3719         return found;
3720 }
3721
3722 void ecore_iov_get_link(struct ecore_hwfn *p_hwfn,
3723                         u16 vfid,
3724                         struct ecore_mcp_link_params *p_params,
3725                         struct ecore_mcp_link_state *p_link,
3726                         struct ecore_mcp_link_capabilities *p_caps)
3727 {
3728         struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false);
3729         struct ecore_bulletin_content *p_bulletin;
3730
3731         if (!p_vf)
3732                 return;
3733
3734         p_bulletin = p_vf->bulletin.p_virt;
3735
3736         if (p_params)
3737                 __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin);
3738         if (p_link)
3739                 __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin);
3740         if (p_caps)
3741                 __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin);
3742 }
3743
3744 void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn,
3745                                struct ecore_ptt *p_ptt, int vfid)
3746 {
3747         struct ecore_iov_vf_mbx *mbx;
3748         struct ecore_vf_info *p_vf;
3749
3750         p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
3751         if (!p_vf)
3752                 return;
3753
3754         mbx = &p_vf->vf_mbx;
3755
3756         /* ecore_iov_process_mbx_request */
3757         DP_VERBOSE(p_hwfn,
3758                    ECORE_MSG_IOV,
3759                    "VF[%02x]: Processing mailbox message\n", p_vf->abs_vf_id);
3760
3761         mbx->first_tlv = mbx->req_virt->first_tlv;
3762
3763         OSAL_IOV_VF_MSG_TYPE(p_hwfn,
3764                              p_vf->relative_vf_id,
3765                              mbx->first_tlv.tl.type);
3766
3767         /* Lock the per vf op mutex and note the locker's identity.
3768          * The unlock will take place in mbx response.
3769          */
3770         ecore_iov_lock_vf_pf_channel(p_hwfn,
3771                                      p_vf, mbx->first_tlv.tl.type);
3772
3773         /* check if tlv type is known */
3774         if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type) &&
3775             !p_vf->b_malicious) {
3776                 /* switch on the opcode */
3777                 switch (mbx->first_tlv.tl.type) {
3778                 case CHANNEL_TLV_ACQUIRE:
3779                         ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf);
3780                         break;
3781                 case CHANNEL_TLV_VPORT_START:
3782                         ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf);
3783                         break;
3784                 case CHANNEL_TLV_VPORT_TEARDOWN:
3785                         ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf);
3786                         break;
3787                 case CHANNEL_TLV_START_RXQ:
3788                         ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf);
3789                         break;
3790                 case CHANNEL_TLV_START_TXQ:
3791                         ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf);
3792                         break;
3793                 case CHANNEL_TLV_STOP_RXQS:
3794                         ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf);
3795                         break;
3796                 case CHANNEL_TLV_STOP_TXQS:
3797                         ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf);
3798                         break;
3799                 case CHANNEL_TLV_UPDATE_RXQ:
3800                         ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf);
3801                         break;
3802                 case CHANNEL_TLV_VPORT_UPDATE:
3803                         ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf);
3804                         break;
3805                 case CHANNEL_TLV_UCAST_FILTER:
3806                         ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf);
3807                         break;
3808                 case CHANNEL_TLV_CLOSE:
3809                         ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf);
3810                         break;
3811                 case CHANNEL_TLV_INT_CLEANUP:
3812                         ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf);
3813                         break;
3814                 case CHANNEL_TLV_RELEASE:
3815                         ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf);
3816                         break;
3817                 case CHANNEL_TLV_UPDATE_TUNN_PARAM:
3818                         ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf);
3819                         break;
3820                 case CHANNEL_TLV_COALESCE_UPDATE:
3821                         ecore_iov_vf_pf_set_coalesce(p_hwfn, p_ptt, p_vf);
3822                         break;
3823                 }
3824         } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) {
3825                 /* If we've received a message from a VF we consider malicious
3826                  * we ignore the messasge unless it's one for RELEASE, in which
3827                  * case we'll let it have the benefit of doubt, allowing the
3828                  * next loaded driver to start again.
3829                  */
3830                 if (mbx->first_tlv.tl.type == CHANNEL_TLV_RELEASE) {
3831                         /* TODO - initiate FLR, remove malicious indication */
3832                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3833                                    "VF [%02x] - considered malicious, but wanted to RELEASE. TODO\n",
3834                                    p_vf->abs_vf_id);
3835                 } else {
3836                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3837                                    "VF [%02x] - considered malicious; Ignoring TLV [%04x]\n",
3838                                    p_vf->abs_vf_id, mbx->first_tlv.tl.type);
3839                 }
3840
3841                 ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3842                                        mbx->first_tlv.tl.type,
3843                                        sizeof(struct pfvf_def_resp_tlv),
3844                                        PFVF_STATUS_MALICIOUS);
3845         } else {
3846                 /* unknown TLV - this may belong to a VF driver from the future
3847                  * - a version written after this PF driver was written, which
3848                  * supports features unknown as of yet. Too bad since we don't
3849                  * support them. Or this may be because someone wrote a crappy
3850                  * VF driver and is sending garbage over the channel.
3851                  */
3852                 DP_NOTICE(p_hwfn, false,
3853                           "VF[%02x]: unknown TLV. type %04x length %04x"
3854                           " padding %08x reply address %lu\n",
3855                           p_vf->abs_vf_id,
3856                           mbx->first_tlv.tl.type,
3857                           mbx->first_tlv.tl.length,
3858                           mbx->first_tlv.padding,
3859                           (unsigned long)mbx->first_tlv.reply_address);
3860
3861                 /* Try replying in case reply address matches the acquisition's
3862                  * posted address.
3863                  */
3864                 if (p_vf->acquire.first_tlv.reply_address &&
3865                     (mbx->first_tlv.reply_address ==
3866                      p_vf->acquire.first_tlv.reply_address))
3867                         ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf,
3868                                                mbx->first_tlv.tl.type,
3869                                                sizeof(struct pfvf_def_resp_tlv),
3870                                                PFVF_STATUS_NOT_SUPPORTED);
3871                 else
3872                         DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3873                                    "VF[%02x]: Can't respond to TLV -"
3874                                    " no valid reply address\n",
3875                                    p_vf->abs_vf_id);
3876         }
3877
3878         ecore_iov_unlock_vf_pf_channel(p_hwfn, p_vf,
3879                                        mbx->first_tlv.tl.type);
3880
3881 #ifdef CONFIG_ECORE_SW_CHANNEL
3882         mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY;
3883         mbx->sw_mbx.response_offset = 0;
3884 #endif
3885 }
3886
3887 void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid)
3888 {
3889         u64 add_bit = 1ULL << (vfid % 64);
3890
3891         /* TODO - add locking mechanisms [no atomics in ecore, so we can't
3892         * add the lock inside the ecore_pf_iov struct].
3893         */
3894         p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit;
3895 }
3896
3897 void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn,
3898                                                u64 *events)
3899 {
3900         u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events;
3901
3902         /* TODO - Take a lock */
3903         OSAL_MEMCPY(events, p_pending_events,
3904                     sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
3905         OSAL_MEMSET(p_pending_events, 0,
3906                     sizeof(u64) * ECORE_VF_ARRAY_LENGTH);
3907 }
3908
3909 static struct ecore_vf_info *
3910 ecore_sriov_get_vf_from_absid(struct ecore_hwfn *p_hwfn, u16 abs_vfid)
3911 {
3912         u8 min = (u8)p_hwfn->p_dev->p_iov_info->first_vf_in_pf;
3913
3914         if (!_ecore_iov_pf_sanity_check(p_hwfn, (int)abs_vfid - min, false)) {
3915                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3916                            "Got indication for VF [abs 0x%08x] that cannot be"
3917                            " handled by PF\n",
3918                            abs_vfid);
3919                 return OSAL_NULL;
3920         }
3921
3922         return &p_hwfn->pf_iov_info->vfs_array[(u8)abs_vfid - min];
3923 }
3924
3925 static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn,
3926                                                  u16 abs_vfid,
3927                                                  struct regpair *vf_msg)
3928 {
3929         struct ecore_vf_info *p_vf = ecore_sriov_get_vf_from_absid(p_hwfn,
3930                                                                    abs_vfid);
3931
3932         if (!p_vf)
3933                 return ECORE_SUCCESS;
3934
3935         /* List the physical address of the request so that handler
3936          * could later on copy the message from it.
3937          */
3938         p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo;
3939
3940         return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id);
3941 }
3942
3943 static void ecore_sriov_vfpf_malicious(struct ecore_hwfn *p_hwfn,
3944                                        struct malicious_vf_eqe_data *p_data)
3945 {
3946         struct ecore_vf_info *p_vf;
3947
3948         p_vf = ecore_sriov_get_vf_from_absid(p_hwfn, p_data->vfId);
3949
3950         if (!p_vf)
3951                 return;
3952
3953         DP_INFO(p_hwfn,
3954                 "VF [%d] - Malicious behavior [%02x]\n",
3955                 p_vf->abs_vf_id, p_data->errId);
3956
3957         p_vf->b_malicious = true;
3958
3959         OSAL_PF_VF_MALICIOUS(p_hwfn, p_vf->relative_vf_id);
3960 }
3961
3962 enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn,
3963                                            u8 opcode,
3964                                            __le16 echo,
3965                                            union event_ring_data *data)
3966 {
3967         switch (opcode) {
3968         case COMMON_EVENT_VF_PF_CHANNEL:
3969                 return ecore_sriov_vfpf_msg(p_hwfn, OSAL_LE16_TO_CPU(echo),
3970                                             &data->vf_pf_channel.msg_addr);
3971         case COMMON_EVENT_VF_FLR:
3972                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
3973                            "VF-FLR is still not supported\n");
3974                 return ECORE_SUCCESS;
3975         case COMMON_EVENT_MALICIOUS_VF:
3976                 ecore_sriov_vfpf_malicious(p_hwfn, &data->malicious_vf);
3977                 return ECORE_SUCCESS;
3978         default:
3979                 DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n",
3980                         opcode);
3981                 return ECORE_INVAL;
3982         }
3983 }
3984
3985 bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3986 {
3987         return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &
3988                    (1ULL << (rel_vf_id % 64)));
3989 }
3990
3991 u16 ecore_iov_get_next_active_vf(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
3992 {
3993         struct ecore_hw_sriov_info *p_iov = p_hwfn->p_dev->p_iov_info;
3994         u16 i;
3995
3996         if (!p_iov)
3997                 goto out;
3998
3999         for (i = rel_vf_id; i < p_iov->total_vfs; i++)
4000                 if (ecore_iov_is_valid_vfid(p_hwfn, rel_vf_id, true, false))
4001                         return i;
4002
4003 out:
4004         return E4_MAX_NUM_VFS;
4005 }
4006
4007 enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn,
4008                                            struct ecore_ptt *ptt, int vfid)
4009 {
4010         struct ecore_dmae_params params;
4011         struct ecore_vf_info *vf_info;
4012
4013         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4014         if (!vf_info)
4015                 return ECORE_INVAL;
4016
4017         OSAL_MEMSET(&params, 0, sizeof(struct ecore_dmae_params));
4018         params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST;
4019         params.src_vfid = vf_info->abs_vf_id;
4020
4021         if (ecore_dmae_host2host(p_hwfn, ptt,
4022                                  vf_info->vf_mbx.pending_req,
4023                                  vf_info->vf_mbx.req_phys,
4024                                  sizeof(union vfpf_tlvs) / 4, &params)) {
4025                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4026                            "Failed to copy message from VF 0x%02x\n", vfid);
4027
4028                 return ECORE_IO;
4029         }
4030
4031         return ECORE_SUCCESS;
4032 }
4033
4034 void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn,
4035                                        u8 *mac, int vfid)
4036 {
4037         struct ecore_vf_info *vf_info;
4038         u64 feature;
4039
4040         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4041         if (!vf_info) {
4042                 DP_NOTICE(p_hwfn->p_dev, true,
4043                           "Can not set forced MAC, invalid vfid [%d]\n", vfid);
4044                 return;
4045         }
4046         if (vf_info->b_malicious) {
4047                 DP_NOTICE(p_hwfn->p_dev, false,
4048                           "Can't set forced MAC to malicious VF [%d]\n",
4049                           vfid);
4050                 return;
4051         }
4052
4053         feature = 1 << MAC_ADDR_FORCED;
4054         OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4055
4056         vf_info->bulletin.p_virt->valid_bitmap |= feature;
4057         /* Forced MAC will disable MAC_ADDR */
4058         vf_info->bulletin.p_virt->valid_bitmap &=
4059             ~(1 << VFPF_BULLETIN_MAC_ADDR);
4060
4061         ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4062 }
4063
4064 enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn,
4065                                                 u8 *mac, int vfid)
4066 {
4067         struct ecore_vf_info *vf_info;
4068         u64 feature;
4069
4070         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4071         if (!vf_info) {
4072                 DP_NOTICE(p_hwfn->p_dev, true,
4073                           "Can not set MAC, invalid vfid [%d]\n", vfid);
4074                 return ECORE_INVAL;
4075         }
4076         if (vf_info->b_malicious) {
4077                 DP_NOTICE(p_hwfn->p_dev, false,
4078                           "Can't set MAC to malicious VF [%d]\n",
4079                           vfid);
4080                 return ECORE_INVAL;
4081         }
4082
4083         if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) {
4084                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4085                            "Can not set MAC, Forced MAC is configured\n");
4086                 return ECORE_INVAL;
4087         }
4088
4089         feature = 1 << VFPF_BULLETIN_MAC_ADDR;
4090         OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN);
4091
4092         vf_info->bulletin.p_virt->valid_bitmap |= feature;
4093
4094         return ECORE_SUCCESS;
4095 }
4096
4097 enum _ecore_status_t
4098 ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn,
4099                                                bool b_untagged_only, int vfid)
4100 {
4101         struct ecore_vf_info *vf_info;
4102         u64 feature;
4103
4104         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4105         if (!vf_info) {
4106                 DP_NOTICE(p_hwfn->p_dev, true,
4107                           "Can not set untagged default, invalid vfid [%d]\n",
4108                           vfid);
4109                 return ECORE_INVAL;
4110         }
4111         if (vf_info->b_malicious) {
4112                 DP_NOTICE(p_hwfn->p_dev, false,
4113                           "Can't set untagged default to malicious VF [%d]\n",
4114                           vfid);
4115                 return ECORE_INVAL;
4116         }
4117
4118         /* Since this is configurable only during vport-start, don't take it
4119          * if we're past that point.
4120          */
4121         if (vf_info->state == VF_ENABLED) {
4122                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4123                            "Can't support untagged change for vfid[%d] -"
4124                            " VF is already active\n",
4125                            vfid);
4126                 return ECORE_INVAL;
4127         }
4128
4129         /* Set configuration; This will later be taken into account during the
4130          * VF initialization.
4131          */
4132         feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) |
4133             (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED);
4134         vf_info->bulletin.p_virt->valid_bitmap |= feature;
4135
4136         vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1
4137             : 0;
4138
4139         return ECORE_SUCCESS;
4140 }
4141
4142 void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid,
4143                                   u16 *opaque_fid)
4144 {
4145         struct ecore_vf_info *vf_info;
4146
4147         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4148         if (!vf_info)
4149                 return;
4150
4151         *opaque_fid = vf_info->opaque_fid;
4152 }
4153
4154 void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn,
4155                                         u16 pvid, int vfid)
4156 {
4157         struct ecore_vf_info *vf_info;
4158         u64 feature;
4159
4160         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4161         if (!vf_info) {
4162                 DP_NOTICE(p_hwfn->p_dev, true,
4163                           "Can not set forced MAC, invalid vfid [%d]\n",
4164                           vfid);
4165                 return;
4166         }
4167         if (vf_info->b_malicious) {
4168                 DP_NOTICE(p_hwfn->p_dev, false,
4169                           "Can't set forced vlan to malicious VF [%d]\n",
4170                           vfid);
4171                 return;
4172         }
4173
4174         feature = 1 << VLAN_ADDR_FORCED;
4175         vf_info->bulletin.p_virt->pvid = pvid;
4176         if (pvid)
4177                 vf_info->bulletin.p_virt->valid_bitmap |= feature;
4178         else
4179                 vf_info->bulletin.p_virt->valid_bitmap &= ~feature;
4180
4181         ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature);
4182 }
4183
4184 void ecore_iov_bulletin_set_udp_ports(struct ecore_hwfn *p_hwfn,
4185                                       int vfid, u16 vxlan_port, u16 geneve_port)
4186 {
4187         struct ecore_vf_info *vf_info;
4188
4189         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4190         if (!vf_info) {
4191                 DP_NOTICE(p_hwfn->p_dev, true,
4192                           "Can not set udp ports, invalid vfid [%d]\n", vfid);
4193                 return;
4194         }
4195
4196         if (vf_info->b_malicious) {
4197                 DP_VERBOSE(p_hwfn, ECORE_MSG_IOV,
4198                            "Can not set udp ports to malicious VF [%d]\n",
4199                            vfid);
4200                 return;
4201         }
4202
4203         vf_info->bulletin.p_virt->vxlan_udp_port = vxlan_port;
4204         vf_info->bulletin.p_virt->geneve_udp_port = geneve_port;
4205 }
4206
4207 bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid)
4208 {
4209         struct ecore_vf_info *p_vf_info;
4210
4211         p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4212         if (!p_vf_info)
4213                 return false;
4214
4215         return !!p_vf_info->vport_instance;
4216 }
4217
4218 bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid)
4219 {
4220         struct ecore_vf_info *p_vf_info;
4221
4222         p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4223         if (!p_vf_info)
4224                 return true;
4225
4226         return p_vf_info->state == VF_STOPPED;
4227 }
4228
4229 bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid)
4230 {
4231         struct ecore_vf_info *vf_info;
4232
4233         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4234         if (!vf_info)
4235                 return false;
4236
4237         return vf_info->spoof_chk;
4238 }
4239
4240 enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn,
4241                                             int vfid, bool val)
4242 {
4243         struct ecore_vf_info *vf;
4244         enum _ecore_status_t rc = ECORE_INVAL;
4245
4246         if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) {
4247                 DP_NOTICE(p_hwfn, true,
4248                           "SR-IOV sanity check failed, can't set spoofchk\n");
4249                 goto out;
4250         }
4251
4252         vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4253         if (!vf)
4254                 goto out;
4255
4256         if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) {
4257                 /* After VF VPORT start PF will configure spoof check */
4258                 vf->req_spoofchk_val = val;
4259                 rc = ECORE_SUCCESS;
4260                 goto out;
4261         }
4262
4263         rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val);
4264
4265 out:
4266         return rc;
4267 }
4268
4269 u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn)
4270 {
4271         u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf;
4272
4273         max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf
4274             : ECORE_MAX_VF_CHAINS_PER_PF;
4275
4276         return max_chains_per_vf;
4277 }
4278
4279 void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4280                                           u16 rel_vf_id,
4281                                           void **pp_req_virt_addr,
4282                                           u16 *p_req_virt_size)
4283 {
4284         struct ecore_vf_info *vf_info =
4285             ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4286
4287         if (!vf_info)
4288                 return;
4289
4290         if (pp_req_virt_addr)
4291                 *pp_req_virt_addr = vf_info->vf_mbx.req_virt;
4292
4293         if (p_req_virt_size)
4294                 *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt);
4295 }
4296
4297 void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn,
4298                                             u16 rel_vf_id,
4299                                             void **pp_reply_virt_addr,
4300                                             u16 *p_reply_virt_size)
4301 {
4302         struct ecore_vf_info *vf_info =
4303             ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4304
4305         if (!vf_info)
4306                 return;
4307
4308         if (pp_reply_virt_addr)
4309                 *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt;
4310
4311         if (p_reply_virt_size)
4312                 *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt);
4313 }
4314
4315 #ifdef CONFIG_ECORE_SW_CHANNEL
4316 struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn,
4317                                                  u16 rel_vf_id)
4318 {
4319         struct ecore_vf_info *vf_info =
4320             ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4321
4322         if (!vf_info)
4323                 return OSAL_NULL;
4324
4325         return &vf_info->vf_mbx.sw_mbx;
4326 }
4327 #endif
4328
4329 bool ecore_iov_is_valid_vfpf_msg_length(u32 length)
4330 {
4331         return (length >= sizeof(struct vfpf_first_tlv) &&
4332                 (length <= sizeof(union vfpf_tlvs)));
4333 }
4334
4335 u32 ecore_iov_pfvf_msg_length(void)
4336 {
4337         return sizeof(union pfvf_tlvs);
4338 }
4339
4340 u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4341 {
4342         struct ecore_vf_info *p_vf;
4343
4344         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4345         if (!p_vf || !p_vf->bulletin.p_virt)
4346                 return OSAL_NULL;
4347
4348         if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)))
4349                 return OSAL_NULL;
4350
4351         return p_vf->bulletin.p_virt->mac;
4352 }
4353
4354 u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn,
4355                                        u16 rel_vf_id)
4356 {
4357         struct ecore_vf_info *p_vf;
4358
4359         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4360         if (!p_vf || !p_vf->bulletin.p_virt)
4361                 return 0;
4362
4363         if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)))
4364                 return 0;
4365
4366         return p_vf->bulletin.p_virt->pvid;
4367 }
4368
4369 enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn,
4370                                                  struct ecore_ptt *p_ptt,
4371                                                  int vfid, int val)
4372 {
4373         struct ecore_vf_info *vf;
4374         u8 abs_vp_id = 0;
4375         enum _ecore_status_t rc;
4376
4377         vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4378
4379         if (!vf)
4380                 return ECORE_INVAL;
4381
4382         rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id);
4383         if (rc != ECORE_SUCCESS)
4384                 return rc;
4385
4386         return ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val);
4387 }
4388
4389 enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn,
4390                                             struct ecore_ptt *p_ptt,
4391                                             int vfid,
4392                                             struct ecore_eth_stats *p_stats)
4393 {
4394         struct ecore_vf_info *vf;
4395
4396         vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4397         if (!vf)
4398                 return ECORE_INVAL;
4399
4400         if (vf->state != VF_ENABLED)
4401                 return ECORE_INVAL;
4402
4403         __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats,
4404                                 vf->abs_vf_id + 0x10, false);
4405
4406         return ECORE_SUCCESS;
4407 }
4408
4409 u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4410 {
4411         struct ecore_vf_info *p_vf;
4412
4413         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4414         if (!p_vf)
4415                 return 0;
4416
4417         return p_vf->num_rxqs;
4418 }
4419
4420 u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4421 {
4422         struct ecore_vf_info *p_vf;
4423
4424         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4425         if (!p_vf)
4426                 return 0;
4427
4428         return p_vf->num_active_rxqs;
4429 }
4430
4431 void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4432 {
4433         struct ecore_vf_info *p_vf;
4434
4435         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4436         if (!p_vf)
4437                 return OSAL_NULL;
4438
4439         return p_vf->ctx;
4440 }
4441
4442 u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4443 {
4444         struct ecore_vf_info *p_vf;
4445
4446         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4447         if (!p_vf)
4448                 return 0;
4449
4450         return p_vf->num_sbs;
4451 }
4452
4453 bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4454 {
4455         struct ecore_vf_info *p_vf;
4456
4457         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4458         if (!p_vf)
4459                 return false;
4460
4461         return (p_vf->state == VF_FREE);
4462 }
4463
4464 bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn,
4465                                               u16 rel_vf_id)
4466 {
4467         struct ecore_vf_info *p_vf;
4468
4469         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4470         if (!p_vf)
4471                 return false;
4472
4473         return (p_vf->state == VF_ACQUIRED);
4474 }
4475
4476 bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id)
4477 {
4478         struct ecore_vf_info *p_vf;
4479
4480         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4481         if (!p_vf)
4482                 return false;
4483
4484         return (p_vf->state == VF_ENABLED);
4485 }
4486
4487 bool ecore_iov_is_vf_started(struct ecore_hwfn *p_hwfn,
4488                              u16 rel_vf_id)
4489 {
4490         struct ecore_vf_info *p_vf;
4491
4492         p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true);
4493         if (!p_vf)
4494                 return false;
4495
4496         return (p_vf->state != VF_FREE && p_vf->state != VF_STOPPED);
4497 }
4498
4499 enum _ecore_status_t
4500 ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid)
4501 {
4502         struct ecore_wfq_data *vf_vp_wfq;
4503         struct ecore_vf_info *vf_info;
4504
4505         vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true);
4506         if (!vf_info)
4507                 return 0;
4508
4509         vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id];
4510
4511         if (vf_vp_wfq->configured)
4512                 return vf_vp_wfq->min_speed;
4513         else
4514                 return 0;
4515 }