New upstream version 18.08
[deb_dpdk.git] / drivers / net / qede / base / ecore_l2.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  */
6
7 #include "bcm_osal.h"
8
9 #include "ecore.h"
10 #include "ecore_status.h"
11 #include "ecore_hsi_eth.h"
12 #include "ecore_chain.h"
13 #include "ecore_spq.h"
14 #include "ecore_init_fw_funcs.h"
15 #include "ecore_cxt.h"
16 #include "ecore_l2.h"
17 #include "ecore_sp_commands.h"
18 #include "ecore_gtt_reg_addr.h"
19 #include "ecore_iro.h"
20 #include "reg_addr.h"
21 #include "ecore_int.h"
22 #include "ecore_hw.h"
23 #include "ecore_vf.h"
24 #include "ecore_sriov.h"
25 #include "ecore_mcp.h"
26
27 #define ECORE_MAX_SGES_NUM 16
28 #define CRC32_POLY 0x1edc6f41
29
30 struct ecore_l2_info {
31         u32 queues;
32         unsigned long **pp_qid_usage;
33
34         /* The lock is meant to synchronize access to the qid usage */
35         osal_mutex_t lock;
36 };
37
38 enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
39 {
40         struct ecore_l2_info *p_l2_info;
41         unsigned long **pp_qids;
42         u32 i;
43
44         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
45                 return ECORE_SUCCESS;
46
47         p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info));
48         if (!p_l2_info)
49                 return ECORE_NOMEM;
50         p_hwfn->p_l2_info = p_l2_info;
51
52         if (IS_PF(p_hwfn->p_dev)) {
53                 p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
54         } else {
55                 u8 rx = 0, tx = 0;
56
57                 ecore_vf_get_num_rxqs(p_hwfn, &rx);
58                 ecore_vf_get_num_txqs(p_hwfn, &tx);
59
60                 p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx);
61         }
62
63         pp_qids = OSAL_VZALLOC(p_hwfn->p_dev,
64                                sizeof(unsigned long *) *
65                                p_l2_info->queues);
66         if (pp_qids == OSAL_NULL)
67                 return ECORE_NOMEM;
68         p_l2_info->pp_qid_usage = pp_qids;
69
70         for (i = 0; i < p_l2_info->queues; i++) {
71                 pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev,
72                                           MAX_QUEUES_PER_QZONE / 8);
73                 if (pp_qids[i] == OSAL_NULL)
74                         return ECORE_NOMEM;
75         }
76
77 #ifdef CONFIG_ECORE_LOCK_ALLOC
78         if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock))
79                 return ECORE_NOMEM;
80 #endif
81
82         return ECORE_SUCCESS;
83 }
84
85 void ecore_l2_setup(struct ecore_hwfn *p_hwfn)
86 {
87         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
88                 return;
89
90         OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock);
91 }
92
93 void ecore_l2_free(struct ecore_hwfn *p_hwfn)
94 {
95         u32 i;
96
97         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
98                 return;
99
100         if (p_hwfn->p_l2_info == OSAL_NULL)
101                 return;
102
103         if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL)
104                 goto out_l2_info;
105
106         /* Free until hit first uninitialized entry */
107         for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
108                 if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL)
109                         break;
110                 OSAL_VFREE(p_hwfn->p_dev,
111                            p_hwfn->p_l2_info->pp_qid_usage[i]);
112                 p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL;
113         }
114
115 #ifdef CONFIG_ECORE_LOCK_ALLOC
116         /* Lock is last to initialize, if everything else was */
117         if (i == p_hwfn->p_l2_info->queues)
118                 OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock);
119 #endif
120
121         OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
122         p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL;
123
124 out_l2_info:
125         OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
126         p_hwfn->p_l2_info = OSAL_NULL;
127 }
128
129 /* TODO - we'll need locking around these... */
130 static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn,
131                                           struct ecore_queue_cid *p_cid)
132 {
133         struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info;
134         u16 queue_id = p_cid->rel.queue_id;
135         bool b_rc = true;
136         u8 first;
137
138         OSAL_MUTEX_ACQUIRE(&p_l2_info->lock);
139
140         if (queue_id > p_l2_info->queues) {
141                 DP_NOTICE(p_hwfn, true,
142                           "Requested to increase usage for qzone %04x out of %08x\n",
143                           queue_id, p_l2_info->queues);
144                 b_rc = false;
145                 goto out;
146         }
147
148         first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id],
149                                              MAX_QUEUES_PER_QZONE);
150         if (first >= MAX_QUEUES_PER_QZONE) {
151                 b_rc = false;
152                 goto out;
153         }
154
155         OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]);
156         p_cid->qid_usage_idx = first;
157
158 out:
159         OSAL_MUTEX_RELEASE(&p_l2_info->lock);
160         return b_rc;
161 }
162
163 static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
164                                           struct ecore_queue_cid *p_cid)
165 {
166         OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock);
167
168         OSAL_CLEAR_BIT(p_cid->qid_usage_idx,
169                        p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
170
171         OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock);
172 }
173
174 void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
175                                  struct ecore_queue_cid *p_cid)
176 {
177         bool b_legacy_vf = !!(p_cid->vf_legacy &
178                               ECORE_QCID_LEGACY_VF_CID);
179
180         /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF.
181          * For legacy vf-queues, the CID doesn't go through here.
182          */
183         if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
184                 _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
185
186         /* VFs maintain the index inside queue-zone on their own */
187         if (p_cid->vfid == ECORE_QUEUE_CID_PF)
188                 ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
189
190         OSAL_VFREE(p_hwfn->p_dev, p_cid);
191 }
192
193 /* The internal is only meant to be directly called by PFs initializeing CIDs
194  * for their VFs.
195  */
196 static struct ecore_queue_cid *
197 _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
198                         u16 opaque_fid, u32 cid,
199                         struct ecore_queue_start_common_params *p_params,
200                         bool b_is_rx,
201                         struct ecore_queue_cid_vf_params *p_vf_params)
202 {
203         struct ecore_queue_cid *p_cid;
204         enum _ecore_status_t rc;
205
206         p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid));
207         if (p_cid == OSAL_NULL)
208                 return OSAL_NULL;
209
210         p_cid->opaque_fid = opaque_fid;
211         p_cid->cid = cid;
212         p_cid->p_owner = p_hwfn;
213
214         /* Fill in parameters */
215         p_cid->rel.vport_id = p_params->vport_id;
216         p_cid->rel.queue_id = p_params->queue_id;
217         p_cid->rel.stats_id = p_params->stats_id;
218         p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
219         p_cid->b_is_rx = b_is_rx;
220         p_cid->sb_idx = p_params->sb_idx;
221
222         /* Fill-in bits related to VFs' queues if information was provided */
223         if (p_vf_params != OSAL_NULL) {
224                 p_cid->vfid = p_vf_params->vfid;
225                 p_cid->vf_qid = p_vf_params->vf_qid;
226                 p_cid->vf_legacy = p_vf_params->vf_legacy;
227         } else {
228                 p_cid->vfid = ECORE_QUEUE_CID_PF;
229         }
230
231         /* Don't try calculating the absolute indices for VFs */
232         if (IS_VF(p_hwfn->p_dev)) {
233                 p_cid->abs = p_cid->rel;
234
235                 goto out;
236         }
237
238         /* Calculate the engine-absolute indices of the resources.
239          * This would guarantee they're valid later on.
240          * In some cases [SBs] we already have the right values.
241          */
242         rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
243         if (rc != ECORE_SUCCESS)
244                 goto fail;
245
246         rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
247                                &p_cid->abs.queue_id);
248         if (rc != ECORE_SUCCESS)
249                 goto fail;
250
251         /* In case of a PF configuring its VF's queues, the stats-id is already
252          * absolute [since there's a single index that's suitable per-VF].
253          */
254         if (p_cid->vfid == ECORE_QUEUE_CID_PF) {
255                 rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
256                                     &p_cid->abs.stats_id);
257                 if (rc != ECORE_SUCCESS)
258                         goto fail;
259         } else {
260                 p_cid->abs.stats_id = p_cid->rel.stats_id;
261         }
262
263 out:
264         /* VF-images have provided the qid_usage_idx on their own.
265          * Otherwise, we need to allocate a unique one.
266          */
267         if (!p_vf_params) {
268                 if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid))
269                         goto fail;
270         } else {
271                 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
272         }
273
274         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
275                    "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
276                    p_cid->opaque_fid, p_cid->cid,
277                    p_cid->rel.vport_id, p_cid->abs.vport_id,
278                    p_cid->rel.queue_id, p_cid->qid_usage_idx,
279                    p_cid->abs.queue_id,
280                    p_cid->rel.stats_id, p_cid->abs.stats_id,
281                    p_cid->sb_igu_id, p_cid->sb_idx);
282
283         return p_cid;
284
285 fail:
286         OSAL_VFREE(p_hwfn->p_dev, p_cid);
287         return OSAL_NULL;
288 }
289
290 struct ecore_queue_cid *
291 ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
292                        struct ecore_queue_start_common_params *p_params,
293                        bool b_is_rx,
294                        struct ecore_queue_cid_vf_params *p_vf_params)
295 {
296         struct ecore_queue_cid *p_cid;
297         u8 vfid = ECORE_CXT_PF_CID;
298         bool b_legacy_vf = false;
299         u32 cid = 0;
300
301         /* In case of legacy VFs, The CID can be derived from the additional
302          * VF parameters - the VF assumes queue X uses CID X, so we can simply
303          * use the vf_qid for this purpose as well.
304          */
305         if (p_vf_params) {
306                 vfid = p_vf_params->vfid;
307
308                 if (p_vf_params->vf_legacy &
309                     ECORE_QCID_LEGACY_VF_CID) {
310                         b_legacy_vf = true;
311                         cid = p_vf_params->vf_qid;
312                 }
313         }
314
315         /* Get a unique firmware CID for this queue, in case it's a PF.
316          * VF's don't need a CID as the queue configuration will be done
317          * by PF.
318          */
319         if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) {
320                 if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
321                                            &cid, vfid) != ECORE_SUCCESS) {
322                         DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
323                         return OSAL_NULL;
324                 }
325         }
326
327         p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
328                                         p_params, b_is_rx, p_vf_params);
329         if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
330                 _ecore_cxt_release_cid(p_hwfn, cid, vfid);
331
332         return p_cid;
333 }
334
335 static struct ecore_queue_cid *
336 ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
337                           bool b_is_rx,
338                           struct ecore_queue_start_common_params *p_params)
339 {
340         return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
341                                       OSAL_NULL);
342 }
343
344 enum _ecore_status_t
345 ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
346                          struct ecore_sp_vport_start_params *p_params)
347 {
348         struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
349         struct ecore_spq_entry *p_ent = OSAL_NULL;
350         struct ecore_sp_init_data init_data;
351         struct eth_vport_tpa_param *p_tpa;
352         u16 rx_mode = 0, tx_err = 0;
353         u8 abs_vport_id = 0;
354         enum _ecore_status_t rc = ECORE_NOTIMPL;
355
356         rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
357         if (rc != ECORE_SUCCESS)
358                 return rc;
359
360         /* Get SPQ entry */
361         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
362         init_data.cid = ecore_spq_get_cid(p_hwfn);
363         init_data.opaque_fid = p_params->opaque_fid;
364         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
365
366         rc = ecore_sp_init_request(p_hwfn, &p_ent,
367                                    ETH_RAMROD_VPORT_START,
368                                    PROTOCOLID_ETH, &init_data);
369         if (rc != ECORE_SUCCESS)
370                 return rc;
371
372         p_ramrod = &p_ent->ramrod.vport_start;
373         p_ramrod->vport_id = abs_vport_id;
374
375         p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
376         p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
377         p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
378         p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
379         p_ramrod->untagged = p_params->only_untagged;
380         p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
381
382         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
383         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
384
385         p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
386
387         /* Handle requests for strict behavior on transmission errors */
388         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
389                   p_params->b_err_illegal_vlan_mode ?
390                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
391         SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
392                   p_params->b_err_small_pkt ?
393                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
394         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
395                   p_params->b_err_anti_spoof ?
396                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
397         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
398                   p_params->b_err_illegal_inband_mode ?
399                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
400         SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
401                   p_params->b_err_vlan_insert_with_inband ?
402                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
403         SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
404                   p_params->b_err_big_pkt ?
405                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
406         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
407                   p_params->b_err_ctrl_frame ?
408                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
409         p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
410
411         /* TPA related fields */
412         p_tpa = &p_ramrod->tpa_param;
413         OSAL_MEMSET(p_tpa, 0, sizeof(struct eth_vport_tpa_param));
414         p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
415
416         switch (p_params->tpa_mode) {
417         case ECORE_TPA_MODE_GRO:
418                 p_tpa->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
419                 p_tpa->tpa_max_size = (u16)-1;
420                 p_tpa->tpa_min_size_to_cont = p_params->mtu / 2;
421                 p_tpa->tpa_min_size_to_start = p_params->mtu / 2;
422                 p_tpa->tpa_ipv4_en_flg = 1;
423                 p_tpa->tpa_ipv6_en_flg = 1;
424                 p_tpa->tpa_ipv4_tunn_en_flg = 1;
425                 p_tpa->tpa_ipv6_tunn_en_flg = 1;
426                 p_tpa->tpa_pkt_split_flg = 1;
427                 p_tpa->tpa_gro_consistent_flg = 1;
428                 break;
429         default:
430                 break;
431         }
432
433         p_ramrod->tx_switching_en = p_params->tx_switching;
434 #ifndef ASIC_ONLY
435         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
436                 p_ramrod->tx_switching_en = 0;
437 #endif
438
439         p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
440         p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
441
442         /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
443         p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid);
444
445         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
446 }
447
448 enum _ecore_status_t
449 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
450                      struct ecore_sp_vport_start_params *p_params)
451 {
452         if (IS_VF(p_hwfn->p_dev))
453                 return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
454                                                p_params->mtu,
455                                                p_params->remove_inner_vlan,
456                                                p_params->tpa_mode,
457                                                p_params->max_buffers_per_cqe,
458                                                p_params->only_untagged);
459
460         return ecore_sp_eth_vport_start(p_hwfn, p_params);
461 }
462
463 static enum _ecore_status_t
464 ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
465                           struct vport_update_ramrod_data *p_ramrod,
466                           struct ecore_rss_params *p_rss)
467 {
468         struct eth_vport_rss_config *p_config;
469         u16 capabilities = 0;
470         int i, table_size;
471         enum _ecore_status_t rc = ECORE_SUCCESS;
472
473         if (!p_rss) {
474                 p_ramrod->common.update_rss_flg = 0;
475                 return rc;
476         }
477         p_config = &p_ramrod->rss_config;
478
479         OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
480                           ETH_RSS_IND_TABLE_ENTRIES_NUM);
481
482         rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
483         if (rc != ECORE_SUCCESS)
484                 return rc;
485
486         p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
487         p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
488         p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
489         p_config->update_rss_key = p_rss->update_rss_key;
490
491         p_config->rss_mode = p_rss->rss_enable ?
492             ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED;
493
494         p_config->capabilities = 0;
495
496         SET_FIELD(capabilities,
497                   ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
498                   !!(p_rss->rss_caps & ECORE_RSS_IPV4));
499         SET_FIELD(capabilities,
500                   ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
501                   !!(p_rss->rss_caps & ECORE_RSS_IPV6));
502         SET_FIELD(capabilities,
503                   ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
504                   !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
505         SET_FIELD(capabilities,
506                   ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
507                   !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
508         SET_FIELD(capabilities,
509                   ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
510                   !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
511         SET_FIELD(capabilities,
512                   ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
513                   !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
514         p_config->tbl_size = p_rss->rss_table_size_log;
515         p_config->capabilities = OSAL_CPU_TO_LE16(capabilities);
516
517         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
518                    "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
519                    p_ramrod->common.update_rss_flg,
520                    p_config->rss_mode,
521                    p_config->update_rss_capabilities,
522                    p_config->capabilities,
523                    p_config->update_rss_ind_table, p_config->update_rss_key);
524
525         table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
526                                 1 << p_config->tbl_size);
527         for (i = 0; i < table_size; i++) {
528                 struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
529
530                 if (!p_queue)
531                         return ECORE_INVAL;
532
533                 p_config->indirection_table[i] =
534                                 OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
535         }
536
537         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
538                    "Configured RSS indirection table [%d entries]:\n",
539                    table_size);
540         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
541                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
542                            "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
543                            OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
544                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
545                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
546                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
547                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
548                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
549                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
550                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
551                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
552                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
553                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
554                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
555                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
556                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
557                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
558                          OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
559         }
560
561         for (i = 0; i < 10; i++)
562                 p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
563
564         return rc;
565 }
566
567 static void
568 ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
569                             struct vport_update_ramrod_data *p_ramrod,
570                             struct ecore_filter_accept_flags accept_flags)
571 {
572         p_ramrod->common.update_rx_mode_flg =
573                                         accept_flags.update_rx_mode_config;
574         p_ramrod->common.update_tx_mode_flg =
575                                         accept_flags.update_tx_mode_config;
576
577 #ifndef ASIC_ONLY
578         /* On B0 emulation we cannot enable Tx, since this would cause writes
579          * to PVFC HW block which isn't implemented in emulation.
580          */
581         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
582                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
583                            "Non-Asic - prevent Tx mode in vport update\n");
584                 p_ramrod->common.update_tx_mode_flg = 0;
585         }
586 #endif
587
588         /* Set Rx mode accept flags */
589         if (p_ramrod->common.update_rx_mode_flg) {
590                 u8 accept_filter = accept_flags.rx_accept_filter;
591                 u16 state = 0;
592
593                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
594                           !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
595                            !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
596
597                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
598                           !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
599
600                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
601                           !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
602                             !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
603
604                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
605                           (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
606                            !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
607
608                 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
609                           !!(accept_filter & ECORE_ACCEPT_BCAST));
610
611                 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
612                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
613                            "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
614                            p_ramrod->common.vport_id, state);
615         }
616
617         /* Set Tx mode accept flags */
618         if (p_ramrod->common.update_tx_mode_flg) {
619                 u8 accept_filter = accept_flags.tx_accept_filter;
620                 u16 state = 0;
621
622                 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
623                           !!(accept_filter & ECORE_ACCEPT_NONE));
624
625                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
626                           !!(accept_filter & ECORE_ACCEPT_NONE));
627
628                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
629                           (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
630                            !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
631
632                 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
633                           !!(accept_filter & ECORE_ACCEPT_BCAST));
634
635                 p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
636                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
637                            "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
638                            p_ramrod->common.vport_id, state);
639         }
640 }
641
642 static void
643 ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod,
644                               struct ecore_sge_tpa_params *p_params)
645 {
646         struct eth_vport_tpa_param *p_tpa;
647         u16 val;
648
649         if (!p_params) {
650                 p_ramrod->common.update_tpa_param_flg = 0;
651                 p_ramrod->common.update_tpa_en_flg = 0;
652                 p_ramrod->common.update_tpa_param_flg = 0;
653                 return;
654         }
655
656         p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
657         p_tpa = &p_ramrod->tpa_param;
658         p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
659         p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
660         p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
661         p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
662
663         p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
664         p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
665         p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
666         p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
667         p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
668         p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
669         val = p_params->tpa_max_size;
670         p_tpa->tpa_max_size = OSAL_CPU_TO_LE16(val);
671         val = p_params->tpa_min_size_to_start;
672         p_tpa->tpa_min_size_to_start = OSAL_CPU_TO_LE16(val);
673         val = p_params->tpa_min_size_to_cont;
674         p_tpa->tpa_min_size_to_cont = OSAL_CPU_TO_LE16(val);
675 }
676
677 static void
678 ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
679                           struct ecore_sp_vport_update_params *p_params)
680 {
681         int i;
682
683         OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
684                     sizeof(p_ramrod->approx_mcast.bins));
685
686         if (!p_params->update_approx_mcast_flg)
687                 return;
688
689         p_ramrod->common.update_approx_mcast_flg = 1;
690         for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
691                 u32 *p_bins = p_params->bins;
692
693                 p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
694         }
695 }
696
697 enum _ecore_status_t
698 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
699                       struct ecore_sp_vport_update_params *p_params,
700                       enum spq_mode comp_mode,
701                       struct ecore_spq_comp_cb *p_comp_data)
702 {
703         struct ecore_rss_params *p_rss_params = p_params->rss_params;
704         struct vport_update_ramrod_data_cmn *p_cmn;
705         struct ecore_sp_init_data init_data;
706         struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
707         struct ecore_spq_entry *p_ent = OSAL_NULL;
708         u8 abs_vport_id = 0, val;
709         enum _ecore_status_t rc = ECORE_NOTIMPL;
710
711         if (IS_VF(p_hwfn->p_dev)) {
712                 rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
713                 return rc;
714         }
715
716         rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
717         if (rc != ECORE_SUCCESS)
718                 return rc;
719
720         /* Get SPQ entry */
721         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
722         init_data.cid = ecore_spq_get_cid(p_hwfn);
723         init_data.opaque_fid = p_params->opaque_fid;
724         init_data.comp_mode = comp_mode;
725         init_data.p_comp_data = p_comp_data;
726
727         rc = ecore_sp_init_request(p_hwfn, &p_ent,
728                                    ETH_RAMROD_VPORT_UPDATE,
729                                    PROTOCOLID_ETH, &init_data);
730         if (rc != ECORE_SUCCESS)
731                 return rc;
732
733         /* Copy input params to ramrod according to FW struct */
734         p_ramrod = &p_ent->ramrod.vport_update;
735         p_cmn = &p_ramrod->common;
736
737         p_cmn->vport_id = abs_vport_id;
738
739         p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
740         p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
741         p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
742         p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
743
744         p_cmn->accept_any_vlan = p_params->accept_any_vlan;
745         val = p_params->update_accept_any_vlan_flg;
746         p_cmn->update_accept_any_vlan_flg = val;
747
748         p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
749         val = p_params->update_inner_vlan_removal_flg;
750         p_cmn->update_inner_vlan_removal_en_flg = val;
751
752         p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
753         val = p_params->update_default_vlan_enable_flg;
754         p_cmn->update_default_vlan_en_flg = val;
755
756         p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan);
757         p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
758
759         p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
760
761         p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
762
763 #ifndef ASIC_ONLY
764         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
765                 if (p_ramrod->common.tx_switching_en ||
766                     p_ramrod->common.update_tx_switching_en_flg) {
767                         DP_NOTICE(p_hwfn, false,
768                                   "FPGA - why are we seeing tx-switching? Overriding it\n");
769                         p_ramrod->common.tx_switching_en = 0;
770                         p_ramrod->common.update_tx_switching_en_flg = 1;
771                 }
772 #endif
773         p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
774
775         p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
776         val = p_params->update_anti_spoofing_en_flg;
777         p_ramrod->common.update_anti_spoofing_en_flg = val;
778
779         rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
780         if (rc != ECORE_SUCCESS) {
781                 /* Return spq entry which is taken in ecore_sp_init_request()*/
782                 ecore_spq_return_entry(p_hwfn, p_ent);
783                 return rc;
784         }
785
786         /* Update mcast bins for VFs, PF doesn't use this functionality */
787         ecore_sp_update_mcast_bin(p_ramrod, p_params);
788
789         ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
790         ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params);
791         if (p_params->mtu) {
792                 p_ramrod->common.update_mtu_flg = 1;
793                 p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
794         }
795
796         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
797 }
798
799 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
800                                          u16 opaque_fid, u8 vport_id)
801 {
802         struct vport_stop_ramrod_data *p_ramrod;
803         struct ecore_sp_init_data init_data;
804         struct ecore_spq_entry *p_ent;
805         u8 abs_vport_id = 0;
806         enum _ecore_status_t rc;
807
808         if (IS_VF(p_hwfn->p_dev))
809                 return ecore_vf_pf_vport_stop(p_hwfn);
810
811         rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
812         if (rc != ECORE_SUCCESS)
813                 return rc;
814
815         /* Get SPQ entry */
816         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
817         init_data.cid = ecore_spq_get_cid(p_hwfn);
818         init_data.opaque_fid = opaque_fid;
819         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
820
821         rc = ecore_sp_init_request(p_hwfn, &p_ent,
822                                    ETH_RAMROD_VPORT_STOP,
823                                    PROTOCOLID_ETH, &init_data);
824         if (rc != ECORE_SUCCESS)
825                 return rc;
826
827         p_ramrod = &p_ent->ramrod.vport_stop;
828         p_ramrod->vport_id = abs_vport_id;
829
830         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
831 }
832
833 static enum _ecore_status_t
834 ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
835                          struct ecore_filter_accept_flags *p_accept_flags)
836 {
837         struct ecore_sp_vport_update_params s_params;
838
839         OSAL_MEMSET(&s_params, 0, sizeof(s_params));
840         OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
841                     sizeof(struct ecore_filter_accept_flags));
842
843         return ecore_vf_pf_vport_update(p_hwfn, &s_params);
844 }
845
846 enum _ecore_status_t
847 ecore_filter_accept_cmd(struct ecore_dev *p_dev,
848                         u8 vport,
849                         struct ecore_filter_accept_flags accept_flags,
850                         u8 update_accept_any_vlan,
851                         u8 accept_any_vlan,
852                         enum spq_mode comp_mode,
853                         struct ecore_spq_comp_cb *p_comp_data)
854 {
855         struct ecore_sp_vport_update_params vport_update_params;
856         int i, rc;
857
858         /* Prepare and send the vport rx_mode change */
859         OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params));
860         vport_update_params.vport_id = vport;
861         vport_update_params.accept_flags = accept_flags;
862         vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
863         vport_update_params.accept_any_vlan = accept_any_vlan;
864
865         for_each_hwfn(p_dev, i) {
866                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
867
868                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
869
870                 if (IS_VF(p_dev)) {
871                         rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
872                         if (rc != ECORE_SUCCESS)
873                                 return rc;
874                         continue;
875                 }
876
877                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
878                                            comp_mode, p_comp_data);
879                 if (rc != ECORE_SUCCESS) {
880                         DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
881                         return rc;
882                 }
883
884                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
885                            "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
886                            accept_flags.rx_accept_filter,
887                            accept_flags.tx_accept_filter);
888
889                 if (update_accept_any_vlan)
890                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
891                                    "accept_any_vlan=%d configured\n",
892                                    accept_any_vlan);
893         }
894
895         return 0;
896 }
897
898 enum _ecore_status_t
899 ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
900                            struct ecore_queue_cid *p_cid,
901                            u16 bd_max_bytes,
902                            dma_addr_t bd_chain_phys_addr,
903                            dma_addr_t cqe_pbl_addr,
904                            u16 cqe_pbl_size)
905 {
906         struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
907         struct ecore_spq_entry *p_ent = OSAL_NULL;
908         struct ecore_sp_init_data init_data;
909         enum _ecore_status_t rc = ECORE_NOTIMPL;
910
911         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
912                    "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
913                    p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
914                    p_cid->abs.vport_id, p_cid->sb_igu_id);
915
916         /* Get SPQ entry */
917         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
918         init_data.cid = p_cid->cid;
919         init_data.opaque_fid = p_cid->opaque_fid;
920         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
921
922         rc = ecore_sp_init_request(p_hwfn, &p_ent,
923                                    ETH_RAMROD_RX_QUEUE_START,
924                                    PROTOCOLID_ETH, &init_data);
925         if (rc != ECORE_SUCCESS)
926                 return rc;
927
928         p_ramrod = &p_ent->ramrod.rx_queue_start;
929
930         p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
931         p_ramrod->sb_index = p_cid->sb_idx;
932         p_ramrod->vport_id = p_cid->abs.vport_id;
933         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
934         p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
935         p_ramrod->complete_cqe_flg = 0;
936         p_ramrod->complete_event_flg = 1;
937
938         p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
939         DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
940
941         p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
942         DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
943
944         if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
945                 bool b_legacy_vf = !!(p_cid->vf_legacy &
946                                       ECORE_QCID_LEGACY_VF_RX_PROD);
947
948                 p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
949                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
950                            "Queue%s is meant for VF rxq[%02x]\n",
951                            b_legacy_vf ? " [legacy]" : "",
952                            p_cid->vf_qid);
953                 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
954         }
955
956         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
957 }
958
959 static enum _ecore_status_t
960 ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
961                             struct ecore_queue_cid *p_cid,
962                             u16 bd_max_bytes,
963                             dma_addr_t bd_chain_phys_addr,
964                             dma_addr_t cqe_pbl_addr,
965                             u16 cqe_pbl_size,
966                             void OSAL_IOMEM * *pp_prod)
967 {
968         u32 init_prod_val = 0;
969
970         *pp_prod = (u8 OSAL_IOMEM *)
971                     p_hwfn->regview +
972                     GTT_BAR0_MAP_REG_MSDM_RAM +
973                     MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
974
975         /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
976         __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
977                           (u32 *)(&init_prod_val));
978
979         return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
980                                           bd_max_bytes,
981                                           bd_chain_phys_addr,
982                                           cqe_pbl_addr, cqe_pbl_size);
983 }
984
985 enum _ecore_status_t
986 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
987                          u16 opaque_fid,
988                          struct ecore_queue_start_common_params *p_params,
989                          u16 bd_max_bytes,
990                          dma_addr_t bd_chain_phys_addr,
991                          dma_addr_t cqe_pbl_addr,
992                          u16 cqe_pbl_size,
993                          struct ecore_rxq_start_ret_params *p_ret_params)
994 {
995         struct ecore_queue_cid *p_cid;
996         enum _ecore_status_t rc;
997
998         /* Allocate a CID for the queue */
999         p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
1000         if (p_cid == OSAL_NULL)
1001                 return ECORE_NOMEM;
1002
1003         if (IS_PF(p_hwfn->p_dev))
1004                 rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
1005                                                  bd_max_bytes,
1006                                                  bd_chain_phys_addr,
1007                                                  cqe_pbl_addr, cqe_pbl_size,
1008                                                  &p_ret_params->p_prod);
1009         else
1010                 rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
1011                                            bd_max_bytes,
1012                                            bd_chain_phys_addr,
1013                                            cqe_pbl_addr,
1014                                            cqe_pbl_size,
1015                                            &p_ret_params->p_prod);
1016
1017         /* Provide the caller with a reference to as handler */
1018         if (rc != ECORE_SUCCESS)
1019                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1020         else
1021                 p_ret_params->p_handle = (void *)p_cid;
1022
1023         return rc;
1024 }
1025
1026 enum _ecore_status_t
1027 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
1028                               void **pp_rxq_handles,
1029                               u8 num_rxqs,
1030                               u8 complete_cqe_flg,
1031                               u8 complete_event_flg,
1032                               enum spq_mode comp_mode,
1033                               struct ecore_spq_comp_cb *p_comp_data)
1034 {
1035         struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
1036         struct ecore_spq_entry *p_ent = OSAL_NULL;
1037         struct ecore_sp_init_data init_data;
1038         struct ecore_queue_cid *p_cid;
1039         enum _ecore_status_t rc = ECORE_NOTIMPL;
1040         u8 i;
1041
1042         if (IS_VF(p_hwfn->p_dev))
1043                 return ecore_vf_pf_rxqs_update(p_hwfn,
1044                                                (struct ecore_queue_cid **)
1045                                                pp_rxq_handles,
1046                                                num_rxqs,
1047                                                complete_cqe_flg,
1048                                                complete_event_flg);
1049
1050         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1051         init_data.comp_mode = comp_mode;
1052         init_data.p_comp_data = p_comp_data;
1053
1054         for (i = 0; i < num_rxqs; i++) {
1055                 p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
1056
1057                 /* Get SPQ entry */
1058                 init_data.cid = p_cid->cid;
1059                 init_data.opaque_fid = p_cid->opaque_fid;
1060
1061                 rc = ecore_sp_init_request(p_hwfn, &p_ent,
1062                                            ETH_RAMROD_RX_QUEUE_UPDATE,
1063                                            PROTOCOLID_ETH, &init_data);
1064                 if (rc != ECORE_SUCCESS)
1065                         return rc;
1066
1067                 p_ramrod = &p_ent->ramrod.rx_queue_update;
1068                 p_ramrod->vport_id = p_cid->abs.vport_id;
1069
1070                 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1071                 p_ramrod->complete_cqe_flg = complete_cqe_flg;
1072                 p_ramrod->complete_event_flg = complete_event_flg;
1073
1074                 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1075                 if (rc != ECORE_SUCCESS)
1076                         return rc;
1077         }
1078
1079         return rc;
1080 }
1081
1082 static enum _ecore_status_t
1083 ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1084                            struct ecore_queue_cid *p_cid,
1085                            bool b_eq_completion_only,
1086                            bool b_cqe_completion)
1087 {
1088         struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
1089         struct ecore_spq_entry *p_ent = OSAL_NULL;
1090         struct ecore_sp_init_data init_data;
1091         enum _ecore_status_t rc;
1092
1093         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1094         init_data.cid = p_cid->cid;
1095         init_data.opaque_fid = p_cid->opaque_fid;
1096         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1097
1098         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1099                                    ETH_RAMROD_RX_QUEUE_STOP,
1100                                    PROTOCOLID_ETH, &init_data);
1101         if (rc != ECORE_SUCCESS)
1102                 return rc;
1103
1104         p_ramrod = &p_ent->ramrod.rx_queue_stop;
1105         p_ramrod->vport_id = p_cid->abs.vport_id;
1106         p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1107
1108         /* Cleaning the queue requires the completion to arrive there.
1109          * In addition, VFs require the answer to come as eqe to PF.
1110          */
1111         p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) &&
1112                                       !b_eq_completion_only) ||
1113                                      b_cqe_completion;
1114         p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) ||
1115                                        b_eq_completion_only;
1116
1117         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1118 }
1119
1120 enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1121                                              void *p_rxq,
1122                                              bool eq_completion_only,
1123                                              bool cqe_completion)
1124 {
1125         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
1126         enum _ecore_status_t rc = ECORE_NOTIMPL;
1127
1128         if (IS_PF(p_hwfn->p_dev))
1129                 rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1130                                                 eq_completion_only,
1131                                                 cqe_completion);
1132         else
1133                 rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1134
1135         if (rc == ECORE_SUCCESS)
1136                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1137         return rc;
1138 }
1139
1140 enum _ecore_status_t
1141 ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
1142                            struct ecore_queue_cid *p_cid,
1143                            dma_addr_t pbl_addr, u16 pbl_size,
1144                            u16 pq_id)
1145 {
1146         struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
1147         struct ecore_spq_entry *p_ent = OSAL_NULL;
1148         struct ecore_sp_init_data init_data;
1149         enum _ecore_status_t rc = ECORE_NOTIMPL;
1150
1151         /* Get SPQ entry */
1152         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1153         init_data.cid = p_cid->cid;
1154         init_data.opaque_fid = p_cid->opaque_fid;
1155         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1156
1157         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1158                                    ETH_RAMROD_TX_QUEUE_START,
1159                                    PROTOCOLID_ETH, &init_data);
1160         if (rc != ECORE_SUCCESS)
1161                 return rc;
1162
1163         p_ramrod = &p_ent->ramrod.tx_queue_start;
1164         p_ramrod->vport_id = p_cid->abs.vport_id;
1165
1166         p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
1167         p_ramrod->sb_index = p_cid->sb_idx;
1168         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1169
1170         p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1171         p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1172
1173         p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
1174         DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1175
1176         p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
1177
1178         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1179 }
1180
1181 static enum _ecore_status_t
1182 ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
1183                             struct ecore_queue_cid *p_cid,
1184                             u8 tc,
1185                             dma_addr_t pbl_addr, u16 pbl_size,
1186                             void OSAL_IOMEM * *pp_doorbell)
1187 {
1188         enum _ecore_status_t rc;
1189         u16 pq_id;
1190
1191         /* TODO - set tc in the pq_params for multi-cos.
1192          * If pacing is enabled then select queue according to
1193          * rate limiter availability otherwise select queue based
1194          * on multi cos.
1195          */
1196         if (IS_ECORE_PACING(p_hwfn))
1197                 pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, p_cid->rel.queue_id);
1198         else
1199                 pq_id = ecore_get_cm_pq_idx_mcos(p_hwfn, tc);
1200
1201         rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr,
1202                                         pbl_size, pq_id);
1203         if (rc != ECORE_SUCCESS)
1204                 return rc;
1205
1206         /* Provide the caller with the necessary return values */
1207         *pp_doorbell = (u8 OSAL_IOMEM *)
1208                        p_hwfn->doorbells +
1209                        DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
1210
1211         return ECORE_SUCCESS;
1212 }
1213
1214 enum _ecore_status_t
1215 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
1216                          struct ecore_queue_start_common_params *p_params,
1217                          u8 tc,
1218                          dma_addr_t pbl_addr, u16 pbl_size,
1219                          struct ecore_txq_start_ret_params *p_ret_params)
1220 {
1221         struct ecore_queue_cid *p_cid;
1222         enum _ecore_status_t rc;
1223
1224         p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
1225         if (p_cid == OSAL_NULL)
1226                 return ECORE_INVAL;
1227
1228         if (IS_PF(p_hwfn->p_dev))
1229                 rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1230                                                  pbl_addr, pbl_size,
1231                                                  &p_ret_params->p_doorbell);
1232         else
1233                 rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
1234                                            pbl_addr, pbl_size,
1235                                            &p_ret_params->p_doorbell);
1236
1237         if (rc != ECORE_SUCCESS)
1238                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1239         else
1240                 p_ret_params->p_handle = (void *)p_cid;
1241
1242         return rc;
1243 }
1244
1245 static enum _ecore_status_t
1246 ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1247                            struct ecore_queue_cid *p_cid)
1248 {
1249         struct ecore_spq_entry *p_ent = OSAL_NULL;
1250         struct ecore_sp_init_data init_data;
1251         enum _ecore_status_t rc;
1252
1253         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1254         init_data.cid = p_cid->cid;
1255         init_data.opaque_fid = p_cid->opaque_fid;
1256         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1257
1258         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1259                                    ETH_RAMROD_TX_QUEUE_STOP,
1260                                    PROTOCOLID_ETH, &init_data);
1261         if (rc != ECORE_SUCCESS)
1262                 return rc;
1263
1264         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1265 }
1266
1267 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1268                                              void *p_handle)
1269 {
1270         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
1271         enum _ecore_status_t rc;
1272
1273         if (IS_PF(p_hwfn->p_dev))
1274                 rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1275         else
1276                 rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
1277
1278         if (rc == ECORE_SUCCESS)
1279                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1280         return rc;
1281 }
1282
1283 static enum eth_filter_action
1284 ecore_filter_action(enum ecore_filter_opcode opcode)
1285 {
1286         enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1287
1288         switch (opcode) {
1289         case ECORE_FILTER_ADD:
1290                 action = ETH_FILTER_ACTION_ADD;
1291                 break;
1292         case ECORE_FILTER_REMOVE:
1293                 action = ETH_FILTER_ACTION_REMOVE;
1294                 break;
1295         case ECORE_FILTER_FLUSH:
1296                 action = ETH_FILTER_ACTION_REMOVE_ALL;
1297                 break;
1298         default:
1299                 action = MAX_ETH_FILTER_ACTION;
1300         }
1301
1302         return action;
1303 }
1304
1305 static enum _ecore_status_t
1306 ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
1307                           u16 opaque_fid,
1308                           struct ecore_filter_ucast *p_filter_cmd,
1309                           struct vport_filter_update_ramrod_data **pp_ramrod,
1310                           struct ecore_spq_entry **pp_ent,
1311                           enum spq_mode comp_mode,
1312                           struct ecore_spq_comp_cb *p_comp_data)
1313 {
1314         u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1315         struct vport_filter_update_ramrod_data *p_ramrod;
1316         struct eth_filter_cmd *p_first_filter;
1317         struct eth_filter_cmd *p_second_filter;
1318         struct ecore_sp_init_data init_data;
1319         enum eth_filter_action action;
1320         enum _ecore_status_t rc;
1321
1322         rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1323                             &vport_to_remove_from);
1324         if (rc != ECORE_SUCCESS)
1325                 return rc;
1326
1327         rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1328                             &vport_to_add_to);
1329         if (rc != ECORE_SUCCESS)
1330                 return rc;
1331
1332         /* Get SPQ entry */
1333         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1334         init_data.cid = ecore_spq_get_cid(p_hwfn);
1335         init_data.opaque_fid = opaque_fid;
1336         init_data.comp_mode = comp_mode;
1337         init_data.p_comp_data = p_comp_data;
1338
1339         rc = ecore_sp_init_request(p_hwfn, pp_ent,
1340                                    ETH_RAMROD_FILTERS_UPDATE,
1341                                    PROTOCOLID_ETH, &init_data);
1342         if (rc != ECORE_SUCCESS)
1343                 return rc;
1344
1345         *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1346         p_ramrod = *pp_ramrod;
1347         p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1348         p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1349
1350 #ifndef ASIC_ONLY
1351         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1352                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1353                            "Non-Asic - prevent Tx filters\n");
1354                 p_ramrod->filter_cmd_hdr.tx = 0;
1355         }
1356 #endif
1357
1358         switch (p_filter_cmd->opcode) {
1359         case ECORE_FILTER_REPLACE:
1360         case ECORE_FILTER_MOVE:
1361                 p_ramrod->filter_cmd_hdr.cmd_cnt = 2;
1362                 break;
1363         default:
1364                 p_ramrod->filter_cmd_hdr.cmd_cnt = 1;
1365                 break;
1366         }
1367
1368         p_first_filter = &p_ramrod->filter_cmds[0];
1369         p_second_filter = &p_ramrod->filter_cmds[1];
1370
1371         switch (p_filter_cmd->type) {
1372         case ECORE_FILTER_MAC:
1373                 p_first_filter->type = ETH_FILTER_TYPE_MAC;
1374                 break;
1375         case ECORE_FILTER_VLAN:
1376                 p_first_filter->type = ETH_FILTER_TYPE_VLAN;
1377                 break;
1378         case ECORE_FILTER_MAC_VLAN:
1379                 p_first_filter->type = ETH_FILTER_TYPE_PAIR;
1380                 break;
1381         case ECORE_FILTER_INNER_MAC:
1382                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC;
1383                 break;
1384         case ECORE_FILTER_INNER_VLAN:
1385                 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN;
1386                 break;
1387         case ECORE_FILTER_INNER_PAIR:
1388                 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR;
1389                 break;
1390         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1391                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1392                 break;
1393         case ECORE_FILTER_MAC_VNI_PAIR:
1394                 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR;
1395                 break;
1396         case ECORE_FILTER_VNI:
1397                 p_first_filter->type = ETH_FILTER_TYPE_VNI;
1398                 break;
1399         case ECORE_FILTER_UNUSED: /* @DPDK */
1400                 p_first_filter->type = MAX_ETH_FILTER_TYPE;
1401                 break;
1402         }
1403
1404         if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1405             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1406             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1407             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1408             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1409             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
1410                 ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
1411                                       &p_first_filter->mac_mid,
1412                                       &p_first_filter->mac_lsb,
1413                                       (u8 *)p_filter_cmd->mac);
1414
1415         if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1416             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1417             (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1418             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1419                 p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
1420
1421         if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1422             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1423             (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1424                 p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
1425
1426         if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
1427                 p_second_filter->type = p_first_filter->type;
1428                 p_second_filter->mac_msb = p_first_filter->mac_msb;
1429                 p_second_filter->mac_mid = p_first_filter->mac_mid;
1430                 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1431                 p_second_filter->vlan_id = p_first_filter->vlan_id;
1432                 p_second_filter->vni = p_first_filter->vni;
1433
1434                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1435
1436                 p_first_filter->vport_id = vport_to_remove_from;
1437
1438                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1439                 p_second_filter->vport_id = vport_to_add_to;
1440         } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
1441                 p_first_filter->vport_id = vport_to_add_to;
1442                 OSAL_MEMCPY(p_second_filter, p_first_filter,
1443                             sizeof(*p_second_filter));
1444                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1445                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1446         } else {
1447                 action = ecore_filter_action(p_filter_cmd->opcode);
1448
1449                 if (action == MAX_ETH_FILTER_ACTION) {
1450                         DP_NOTICE(p_hwfn, true,
1451                                   "%d is not supported yet\n",
1452                                   p_filter_cmd->opcode);
1453                         return ECORE_NOTIMPL;
1454                 }
1455
1456                 p_first_filter->action = action;
1457                 p_first_filter->vport_id =
1458                     (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1459                     vport_to_remove_from : vport_to_add_to;
1460         }
1461
1462         return ECORE_SUCCESS;
1463 }
1464
1465 enum _ecore_status_t
1466 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
1467                           u16 opaque_fid,
1468                           struct ecore_filter_ucast *p_filter_cmd,
1469                           enum spq_mode comp_mode,
1470                           struct ecore_spq_comp_cb *p_comp_data)
1471 {
1472         struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
1473         struct ecore_spq_entry *p_ent = OSAL_NULL;
1474         struct eth_filter_cmd_header *p_header;
1475         enum _ecore_status_t rc;
1476
1477         rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1478                                        &p_ramrod, &p_ent,
1479                                        comp_mode, p_comp_data);
1480         if (rc != ECORE_SUCCESS) {
1481                 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1482                 return rc;
1483         }
1484         p_header = &p_ramrod->filter_cmd_hdr;
1485         p_header->assert_on_error = p_filter_cmd->assert_on_error;
1486
1487         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1488         if (rc != ECORE_SUCCESS) {
1489                 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1490                 return rc;
1491         }
1492
1493         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1494                    "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1495                    (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
1496                    ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1497                     "REMOVE" :
1498                     ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
1499                      "MOVE" : "REPLACE")),
1500                    (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
1501                    ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
1502                     "VLAN" : "MAC & VLAN"),
1503                    p_ramrod->filter_cmd_hdr.cmd_cnt,
1504                    p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter);
1505         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1506                    "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1507                    p_filter_cmd->vport_to_add_to,
1508                    p_filter_cmd->vport_to_remove_from,
1509                    p_filter_cmd->mac[0], p_filter_cmd->mac[1],
1510                    p_filter_cmd->mac[2], p_filter_cmd->mac[3],
1511                    p_filter_cmd->mac[4], p_filter_cmd->mac[5],
1512                    p_filter_cmd->vlan);
1513
1514         return ECORE_SUCCESS;
1515 }
1516
1517 /*******************************************************************************
1518  * Description:
1519  *         Calculates crc 32 on a buffer
1520  *         Note: crc32_length MUST be aligned to 8
1521  * Return:
1522  ******************************************************************************/
1523 static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed)
1524 {
1525         u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1526         u8 msb = 0, current_byte = 0;
1527
1528         if ((crc32_packet == OSAL_NULL) ||
1529             (crc32_length == 0) || ((crc32_length % 8) != 0)) {
1530                 return crc32_result;
1531         }
1532
1533         for (byte = 0; byte < crc32_length; byte++) {
1534                 current_byte = crc32_packet[byte];
1535                 for (bit = 0; bit < 8; bit++) {
1536                         msb = (u8)(crc32_result >> 31);
1537                         crc32_result = crc32_result << 1;
1538                         if (msb != (0x1 & (current_byte >> bit))) {
1539                                 crc32_result = crc32_result ^ CRC32_POLY;
1540                                 crc32_result |= 1;
1541                         }
1542                 }
1543         }
1544
1545         return crc32_result;
1546 }
1547
1548 static u32 ecore_crc32c_le(u32 seed, u8 *mac)
1549 {
1550         u32 packet_buf[2] = { 0 };
1551
1552         OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
1553         return ecore_calc_crc32c((u8 *)packet_buf, 8, seed);
1554 }
1555
1556 u8 ecore_mcast_bin_from_mac(u8 *mac)
1557 {
1558         u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac);
1559
1560         return crc & 0xff;
1561 }
1562
1563 static enum _ecore_status_t
1564 ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
1565                           struct ecore_filter_mcast *p_filter_cmd,
1566                           enum spq_mode comp_mode,
1567                           struct ecore_spq_comp_cb *p_comp_data)
1568 {
1569         struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
1570         u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1571         struct ecore_spq_entry *p_ent = OSAL_NULL;
1572         struct ecore_sp_init_data init_data;
1573         u8 abs_vport_id = 0;
1574         enum _ecore_status_t rc;
1575         int i;
1576
1577         if (p_filter_cmd->opcode == ECORE_FILTER_ADD)
1578                 rc = ecore_fw_vport(p_hwfn,
1579                                     p_filter_cmd->vport_to_add_to,
1580                                     &abs_vport_id);
1581         else
1582                 rc = ecore_fw_vport(p_hwfn,
1583                                     p_filter_cmd->vport_to_remove_from,
1584                                     &abs_vport_id);
1585         if (rc != ECORE_SUCCESS)
1586                 return rc;
1587
1588         /* Get SPQ entry */
1589         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1590         init_data.cid = ecore_spq_get_cid(p_hwfn);
1591         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1592         init_data.comp_mode = comp_mode;
1593         init_data.p_comp_data = p_comp_data;
1594
1595         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1596                                    ETH_RAMROD_VPORT_UPDATE,
1597                                    PROTOCOLID_ETH, &init_data);
1598         if (rc != ECORE_SUCCESS) {
1599                 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1600                 return rc;
1601         }
1602
1603         p_ramrod = &p_ent->ramrod.vport_update;
1604         p_ramrod->common.update_approx_mcast_flg = 1;
1605
1606         /* explicitly clear out the entire vector */
1607         OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
1608                     0, sizeof(p_ramrod->approx_mcast.bins));
1609         OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1610         /* filter ADD op is explicit set op and it removes
1611         *  any existing filters for the vport.
1612         */
1613         if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1614                 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1615                         u32 bit;
1616
1617                         bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1618                         bins[bit / 32] |= 1 << (bit % 32);
1619                 }
1620
1621                 /* Convert to correct endianity */
1622                 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1623                         struct vport_update_ramrod_mcast *p_ramrod_bins;
1624
1625                         p_ramrod_bins = &p_ramrod->approx_mcast;
1626                         p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]);
1627                 }
1628         }
1629
1630         p_ramrod->common.vport_id = abs_vport_id;
1631
1632         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1633         if (rc != ECORE_SUCCESS)
1634                 DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
1635
1636         return rc;
1637 }
1638
1639 enum _ecore_status_t
1640 ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
1641                        struct ecore_filter_mcast *p_filter_cmd,
1642                        enum spq_mode comp_mode,
1643                        struct ecore_spq_comp_cb *p_comp_data)
1644 {
1645         enum _ecore_status_t rc = ECORE_SUCCESS;
1646         int i;
1647
1648         /* only ADD and REMOVE operations are supported for multi-cast */
1649         if ((p_filter_cmd->opcode != ECORE_FILTER_ADD &&
1650              (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
1651             (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
1652                 return ECORE_INVAL;
1653         }
1654
1655         for_each_hwfn(p_dev, i) {
1656                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1657
1658                 if (IS_VF(p_dev)) {
1659                         ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1660                         continue;
1661                 }
1662
1663                 rc = ecore_sp_eth_filter_mcast(p_hwfn,
1664                                                p_filter_cmd,
1665                                                comp_mode, p_comp_data);
1666                 if (rc != ECORE_SUCCESS)
1667                         break;
1668         }
1669
1670         return rc;
1671 }
1672
1673 enum _ecore_status_t
1674 ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
1675                        struct ecore_filter_ucast *p_filter_cmd,
1676                        enum spq_mode comp_mode,
1677                        struct ecore_spq_comp_cb *p_comp_data)
1678 {
1679         enum _ecore_status_t rc = ECORE_SUCCESS;
1680         int i;
1681
1682         for_each_hwfn(p_dev, i) {
1683                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1684                 u16 opaque_fid;
1685
1686                 if (IS_VF(p_dev)) {
1687                         rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1688                         continue;
1689                 }
1690
1691                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1692                 rc = ecore_sp_eth_filter_ucast(p_hwfn,
1693                                                opaque_fid,
1694                                                p_filter_cmd,
1695                                                comp_mode, p_comp_data);
1696                 if (rc != ECORE_SUCCESS)
1697                         break;
1698         }
1699
1700         return rc;
1701 }
1702
1703 /* Statistics related code */
1704 static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
1705                                              u32 *p_addr, u32 *p_len,
1706                                              u16 statistics_bin)
1707 {
1708         if (IS_PF(p_hwfn->p_dev)) {
1709                 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1710                     PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1711                 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1712         } else {
1713                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1714                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1715
1716                 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1717                 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1718         }
1719 }
1720
1721 static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
1722                                      struct ecore_ptt *p_ptt,
1723                                      struct ecore_eth_stats *p_stats,
1724                                      u16 statistics_bin)
1725 {
1726         struct eth_pstorm_per_queue_stat pstats;
1727         u32 pstats_addr = 0, pstats_len = 0;
1728
1729         __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1730                                          statistics_bin);
1731
1732         OSAL_MEMSET(&pstats, 0, sizeof(pstats));
1733         ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1734
1735         p_stats->common.tx_ucast_bytes +=
1736                 HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1737         p_stats->common.tx_mcast_bytes +=
1738                 HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1739         p_stats->common.tx_bcast_bytes +=
1740                 HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1741         p_stats->common.tx_ucast_pkts +=
1742                 HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1743         p_stats->common.tx_mcast_pkts +=
1744                 HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1745         p_stats->common.tx_bcast_pkts +=
1746                 HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1747         p_stats->common.tx_err_drop_pkts +=
1748                 HILO_64_REGPAIR(pstats.error_drop_pkts);
1749 }
1750
1751 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
1752                                      struct ecore_ptt *p_ptt,
1753                                      struct ecore_eth_stats *p_stats)
1754 {
1755         struct tstorm_per_port_stat tstats;
1756         u32 tstats_addr, tstats_len;
1757
1758         if (IS_PF(p_hwfn->p_dev)) {
1759                 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1760                     TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1761                 tstats_len = sizeof(struct tstorm_per_port_stat);
1762         } else {
1763                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1764                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1765
1766                 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1767                 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1768         }
1769
1770         OSAL_MEMSET(&tstats, 0, sizeof(tstats));
1771         ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1772
1773         p_stats->common.mftag_filter_discards +=
1774                 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1775         p_stats->common.mac_filter_discards +=
1776                 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1777 }
1778
1779 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
1780                                              u32 *p_addr, u32 *p_len,
1781                                              u16 statistics_bin)
1782 {
1783         if (IS_PF(p_hwfn->p_dev)) {
1784                 *p_addr = BAR0_MAP_REG_USDM_RAM +
1785                     USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1786                 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1787         } else {
1788                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1789                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1790
1791                 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1792                 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1793         }
1794 }
1795
1796 static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
1797                                      struct ecore_ptt *p_ptt,
1798                                      struct ecore_eth_stats *p_stats,
1799                                      u16 statistics_bin)
1800 {
1801         struct eth_ustorm_per_queue_stat ustats;
1802         u32 ustats_addr = 0, ustats_len = 0;
1803
1804         __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1805                                          statistics_bin);
1806
1807         OSAL_MEMSET(&ustats, 0, sizeof(ustats));
1808         ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1809
1810         p_stats->common.rx_ucast_bytes +=
1811                 HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1812         p_stats->common.rx_mcast_bytes +=
1813                 HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1814         p_stats->common.rx_bcast_bytes +=
1815                 HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1816         p_stats->common.rx_ucast_pkts +=
1817                 HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1818         p_stats->common.rx_mcast_pkts +=
1819                 HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1820         p_stats->common.rx_bcast_pkts +=
1821                 HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1822 }
1823
1824 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
1825                                              u32 *p_addr, u32 *p_len,
1826                                              u16 statistics_bin)
1827 {
1828         if (IS_PF(p_hwfn->p_dev)) {
1829                 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1830                     MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1831                 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1832         } else {
1833                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1834                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1835
1836                 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1837                 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1838         }
1839 }
1840
1841 static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
1842                                      struct ecore_ptt *p_ptt,
1843                                      struct ecore_eth_stats *p_stats,
1844                                      u16 statistics_bin)
1845 {
1846         struct eth_mstorm_per_queue_stat mstats;
1847         u32 mstats_addr = 0, mstats_len = 0;
1848
1849         __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1850                                          statistics_bin);
1851
1852         OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1853         ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1854
1855         p_stats->common.no_buff_discards +=
1856                 HILO_64_REGPAIR(mstats.no_buff_discard);
1857         p_stats->common.packet_too_big_discard +=
1858                 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1859         p_stats->common.ttl0_discard +=
1860                 HILO_64_REGPAIR(mstats.ttl0_discard);
1861         p_stats->common.tpa_coalesced_pkts +=
1862                 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1863         p_stats->common.tpa_coalesced_events +=
1864                 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1865         p_stats->common.tpa_aborts_num +=
1866                 HILO_64_REGPAIR(mstats.tpa_aborts_num);
1867         p_stats->common.tpa_coalesced_bytes +=
1868                 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1869 }
1870
1871 static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
1872                                          struct ecore_ptt *p_ptt,
1873                                          struct ecore_eth_stats *p_stats)
1874 {
1875         struct ecore_eth_stats_common *p_common = &p_stats->common;
1876         struct port_stats port_stats;
1877         int j;
1878
1879         OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
1880
1881         ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
1882                           p_hwfn->mcp_info->port_addr +
1883                           OFFSETOF(struct public_port, stats),
1884                           sizeof(port_stats));
1885
1886         p_common->rx_64_byte_packets += port_stats.eth.r64;
1887         p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1888         p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1889         p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1890         p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1891         p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1892         p_common->rx_crc_errors += port_stats.eth.rfcs;
1893         p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1894         p_common->rx_pause_frames += port_stats.eth.rxpf;
1895         p_common->rx_pfc_frames += port_stats.eth.rxpp;
1896         p_common->rx_align_errors += port_stats.eth.raln;
1897         p_common->rx_carrier_errors += port_stats.eth.rfcr;
1898         p_common->rx_oversize_packets += port_stats.eth.rovr;
1899         p_common->rx_jabbers += port_stats.eth.rjbr;
1900         p_common->rx_undersize_packets += port_stats.eth.rund;
1901         p_common->rx_fragments += port_stats.eth.rfrg;
1902         p_common->tx_64_byte_packets += port_stats.eth.t64;
1903         p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1904         p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1905         p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1906         p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1907         p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1908         p_common->tx_pause_frames += port_stats.eth.txpf;
1909         p_common->tx_pfc_frames += port_stats.eth.txpp;
1910         p_common->rx_mac_bytes += port_stats.eth.rbyte;
1911         p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1912         p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1913         p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1914         p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1915         p_common->tx_mac_bytes += port_stats.eth.tbyte;
1916         p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1917         p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1918         p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1919         p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1920         for (j = 0; j < 8; j++) {
1921                 p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1922                 p_common->brb_discards += port_stats.brb.brb_discard[j];
1923         }
1924
1925         if (ECORE_IS_BB(p_hwfn->p_dev)) {
1926                 struct ecore_eth_stats_bb *p_bb = &p_stats->bb;
1927
1928                 p_bb->rx_1519_to_1522_byte_packets +=
1929                         port_stats.eth.u0.bb0.r1522;
1930                 p_bb->rx_1519_to_2047_byte_packets +=
1931                         port_stats.eth.u0.bb0.r2047;
1932                 p_bb->rx_2048_to_4095_byte_packets +=
1933                         port_stats.eth.u0.bb0.r4095;
1934                 p_bb->rx_4096_to_9216_byte_packets +=
1935                         port_stats.eth.u0.bb0.r9216;
1936                 p_bb->rx_9217_to_16383_byte_packets +=
1937                         port_stats.eth.u0.bb0.r16383;
1938                 p_bb->tx_1519_to_2047_byte_packets +=
1939                         port_stats.eth.u1.bb1.t2047;
1940                 p_bb->tx_2048_to_4095_byte_packets +=
1941                         port_stats.eth.u1.bb1.t4095;
1942                 p_bb->tx_4096_to_9216_byte_packets +=
1943                         port_stats.eth.u1.bb1.t9216;
1944                 p_bb->tx_9217_to_16383_byte_packets +=
1945                         port_stats.eth.u1.bb1.t16383;
1946                 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1947                 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1948         } else {
1949                 struct ecore_eth_stats_ah *p_ah = &p_stats->ah;
1950
1951                 p_ah->rx_1519_to_max_byte_packets +=
1952                         port_stats.eth.u0.ah0.r1519_to_max;
1953                 p_ah->tx_1519_to_max_byte_packets =
1954                         port_stats.eth.u1.ah1.t1519_to_max;
1955         }
1956
1957         p_common->link_change_count = ecore_rd(p_hwfn, p_ptt,
1958                                                p_hwfn->mcp_info->port_addr +
1959                                                OFFSETOF(struct public_port,
1960                                                         link_change_count));
1961 }
1962
1963 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
1964                              struct ecore_ptt *p_ptt,
1965                              struct ecore_eth_stats *stats,
1966                              u16 statistics_bin, bool b_get_port_stats)
1967 {
1968         __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1969         __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1970         __ecore_get_vport_tstats(p_hwfn, p_ptt, stats);
1971         __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1972
1973 #ifndef ASIC_ONLY
1974         /* Avoid getting PORT stats for emulation. */
1975         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1976                 return;
1977 #endif
1978
1979         if (b_get_port_stats && p_hwfn->mcp_info)
1980                 __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
1981 }
1982
1983 static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
1984                                    struct ecore_eth_stats *stats)
1985 {
1986         u8 fw_vport = 0;
1987         int i;
1988
1989         OSAL_MEMSET(stats, 0, sizeof(*stats));
1990
1991         for_each_hwfn(p_dev, i) {
1992                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1993                 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
1994                     ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
1995                 bool b_get_port_stats;
1996
1997                 if (IS_PF(p_dev)) {
1998                         /* The main vport index is relative first */
1999                         if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
2000                                 DP_ERR(p_hwfn, "No vport available!\n");
2001                                 goto out;
2002                         }
2003                 }
2004
2005                 if (IS_PF(p_dev) && !p_ptt) {
2006                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2007                         continue;
2008                 }
2009
2010                 b_get_port_stats = IS_PF(p_dev) && IS_LEAD_HWFN(p_hwfn);
2011                 __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
2012                                         b_get_port_stats);
2013
2014 out:
2015                 if (IS_PF(p_dev) && p_ptt)
2016                         ecore_ptt_release(p_hwfn, p_ptt);
2017         }
2018 }
2019
2020 void ecore_get_vport_stats(struct ecore_dev *p_dev,
2021                            struct ecore_eth_stats *stats)
2022 {
2023         u32 i;
2024
2025         if (!p_dev) {
2026                 OSAL_MEMSET(stats, 0, sizeof(*stats));
2027                 return;
2028         }
2029
2030         _ecore_get_vport_stats(p_dev, stats);
2031
2032         if (!p_dev->reset_stats)
2033                 return;
2034
2035         /* Reduce the statistics baseline */
2036         for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
2037                 ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
2038 }
2039
2040 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
2041 void ecore_reset_vport_stats(struct ecore_dev *p_dev)
2042 {
2043         int i;
2044
2045         for_each_hwfn(p_dev, i) {
2046                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2047                 struct eth_mstorm_per_queue_stat mstats;
2048                 struct eth_ustorm_per_queue_stat ustats;
2049                 struct eth_pstorm_per_queue_stat pstats;
2050                 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
2051                     ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
2052                 u32 addr = 0, len = 0;
2053
2054                 if (IS_PF(p_dev) && !p_ptt) {
2055                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2056                         continue;
2057                 }
2058
2059                 OSAL_MEMSET(&mstats, 0, sizeof(mstats));
2060                 __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
2061                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
2062
2063                 OSAL_MEMSET(&ustats, 0, sizeof(ustats));
2064                 __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
2065                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
2066
2067                 OSAL_MEMSET(&pstats, 0, sizeof(pstats));
2068                 __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
2069                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
2070
2071                 if (IS_PF(p_dev))
2072                         ecore_ptt_release(p_hwfn, p_ptt);
2073         }
2074
2075         /* PORT statistics are not necessarily reset, so we need to
2076          * read and create a baseline for future statistics.
2077          * Link change stat is maintained by MFW, return its value as is.
2078          */
2079         if (!p_dev->reset_stats)
2080                 DP_INFO(p_dev, "Reset stats not allocated\n");
2081         else {
2082                 _ecore_get_vport_stats(p_dev, p_dev->reset_stats);
2083                 p_dev->reset_stats->common.link_change_count = 0;
2084         }
2085 }
2086
2087 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
2088                                struct ecore_ptt *p_ptt,
2089                                struct ecore_arfs_config_params *p_cfg_params)
2090 {
2091         if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits))
2092                 return;
2093
2094         if (p_cfg_params->arfs_enable) {
2095                 ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
2096                                  p_cfg_params->tcp,
2097                                  p_cfg_params->udp,
2098                                  p_cfg_params->ipv4,
2099                                  p_cfg_params->ipv6,
2100                                  GFT_PROFILE_TYPE_4_TUPLE);
2101                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2102                            "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
2103                            p_cfg_params->tcp ? "Enable" : "Disable",
2104                            p_cfg_params->udp ? "Enable" : "Disable",
2105                            p_cfg_params->ipv4 ? "Enable" : "Disable",
2106                            p_cfg_params->ipv6 ? "Enable" : "Disable");
2107         } else {
2108                 ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2109         }
2110         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
2111                    p_cfg_params->arfs_enable ? "Enable" : "Disable");
2112 }
2113
2114 enum _ecore_status_t
2115 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
2116                                   struct ecore_spq_comp_cb *p_cb,
2117                                   dma_addr_t p_addr, u16 length,
2118                                   u16 qid, u8 vport_id,
2119                                   bool b_is_add)
2120 {
2121         struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
2122         struct ecore_spq_entry *p_ent = OSAL_NULL;
2123         struct ecore_sp_init_data init_data;
2124         u16 abs_rx_q_id = 0;
2125         u8 abs_vport_id = 0;
2126         enum _ecore_status_t rc = ECORE_NOTIMPL;
2127
2128         rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
2129         if (rc != ECORE_SUCCESS)
2130                 return rc;
2131
2132         rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
2133         if (rc != ECORE_SUCCESS)
2134                 return rc;
2135
2136         /* Get SPQ entry */
2137         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2138         init_data.cid = ecore_spq_get_cid(p_hwfn);
2139
2140         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2141
2142         if (p_cb) {
2143                 init_data.comp_mode = ECORE_SPQ_MODE_CB;
2144                 init_data.p_comp_data = p_cb;
2145         } else {
2146                 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2147         }
2148
2149         rc = ecore_sp_init_request(p_hwfn, &p_ent,
2150                                    ETH_RAMROD_GFT_UPDATE_FILTER,
2151                                    PROTOCOLID_ETH, &init_data);
2152         if (rc != ECORE_SUCCESS)
2153                 return rc;
2154
2155         p_ramrod = &p_ent->ramrod.rx_update_gft;
2156
2157         DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
2158         p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
2159
2160         p_ramrod->action_icid_valid = 0;
2161         p_ramrod->action_icid = 0;
2162
2163         p_ramrod->rx_qid_valid = 1;
2164         p_ramrod->rx_qid = OSAL_CPU_TO_LE16(abs_rx_q_id);
2165
2166         p_ramrod->flow_id_valid = 0;
2167         p_ramrod->flow_id = 0;
2168
2169         p_ramrod->vport_id = OSAL_CPU_TO_LE16((u16)abs_vport_id);
2170         p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
2171                                            : GFT_DELETE_FILTER;
2172
2173         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2174                    "V[%0x], Q[%04x] - %s filter from 0x%lx [length %04xb]\n",
2175                    abs_vport_id, abs_rx_q_id,
2176                    b_is_add ? "Adding" : "Removing",
2177                    (unsigned long)p_addr, length);
2178
2179         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2180 }
2181
2182 int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
2183                            struct ecore_ptt *p_ptt,
2184                            struct ecore_queue_cid *p_cid,
2185                            u16 *p_rx_coal)
2186 {
2187         u32 coalesce, address, is_valid;
2188         struct cau_sb_entry sb_entry;
2189         u8 timer_res;
2190         enum _ecore_status_t rc;
2191
2192         rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2193                                  p_cid->sb_igu_id * sizeof(u64),
2194                                  (u64)(osal_uintptr_t)&sb_entry, 2, 0);
2195         if (rc != ECORE_SUCCESS) {
2196                 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2197                 return rc;
2198         }
2199
2200         timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
2201
2202         address = BAR0_MAP_REG_USDM_RAM +
2203                   USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2204         coalesce = ecore_rd(p_hwfn, p_ptt, address);
2205
2206         is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2207         if (!is_valid)
2208                 return ECORE_INVAL;
2209
2210         coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2211         *p_rx_coal = (u16)(coalesce << timer_res);
2212
2213         return ECORE_SUCCESS;
2214 }
2215
2216 int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
2217                            struct ecore_ptt *p_ptt,
2218                            struct ecore_queue_cid *p_cid,
2219                            u16 *p_tx_coal)
2220 {
2221         u32 coalesce, address, is_valid;
2222         struct cau_sb_entry sb_entry;
2223         u8 timer_res;
2224         enum _ecore_status_t rc;
2225
2226         rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2227                                  p_cid->sb_igu_id * sizeof(u64),
2228                                  (u64)(osal_uintptr_t)&sb_entry, 2, 0);
2229         if (rc != ECORE_SUCCESS) {
2230                 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2231                 return rc;
2232         }
2233
2234         timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
2235
2236         address = BAR0_MAP_REG_XSDM_RAM +
2237                   XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2238         coalesce = ecore_rd(p_hwfn, p_ptt, address);
2239
2240         is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2241         if (!is_valid)
2242                 return ECORE_INVAL;
2243
2244         coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2245         *p_tx_coal = (u16)(coalesce << timer_res);
2246
2247         return ECORE_SUCCESS;
2248 }
2249
2250 enum _ecore_status_t
2251 ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal,
2252                          void *handle)
2253 {
2254         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle;
2255         enum _ecore_status_t rc = ECORE_SUCCESS;
2256         struct ecore_ptt *p_ptt;
2257
2258         if (IS_VF(p_hwfn->p_dev)) {
2259                 rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
2260                 if (rc != ECORE_SUCCESS)
2261                         DP_NOTICE(p_hwfn, false,
2262                                   "Unable to read queue calescing\n");
2263
2264                 return rc;
2265         }
2266
2267         p_ptt = ecore_ptt_acquire(p_hwfn);
2268         if (!p_ptt)
2269                 return ECORE_AGAIN;
2270
2271         if (p_cid->b_is_rx) {
2272                 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2273                 if (rc != ECORE_SUCCESS)
2274                         goto out;
2275         } else {
2276                 rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2277                 if (rc != ECORE_SUCCESS)
2278                         goto out;
2279         }
2280
2281 out:
2282         ecore_ptt_release(p_hwfn, p_ptt);
2283
2284         return rc;
2285 }
2286
2287 enum _ecore_status_t
2288 ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn,
2289                            struct ecore_ptt *p_ptt,
2290                            struct ecore_queue_cid *p_cid, u32 rate)
2291 {
2292         struct ecore_mcp_link_state *p_link;
2293         u8 vport;
2294
2295         vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id);
2296         p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
2297
2298         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
2299                    "About to rate limit qm vport %d for queue %d with rate %d\n",
2300                    vport, p_cid->rel.queue_id, rate);
2301
2302         return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate,
2303                                    p_link->speed);
2304 }