4ab8fd5f26360876c9a8079d596fceecb83581de
[deb_dpdk.git] / drivers / net / qede / base / ecore_l2.c
1 /*
2  * Copyright (c) 2016 QLogic Corporation.
3  * All rights reserved.
4  * www.qlogic.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10
11 #include "ecore.h"
12 #include "ecore_status.h"
13 #include "ecore_hsi_eth.h"
14 #include "ecore_chain.h"
15 #include "ecore_spq.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_cxt.h"
18 #include "ecore_l2.h"
19 #include "ecore_sp_commands.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "reg_addr.h"
23 #include "ecore_int.h"
24 #include "ecore_hw.h"
25 #include "ecore_vf.h"
26 #include "ecore_sriov.h"
27 #include "ecore_mcp.h"
28
29 #define ECORE_MAX_SGES_NUM 16
30 #define CRC32_POLY 0x1edc6f41
31
32 struct ecore_l2_info {
33         u32 queues;
34         unsigned long **pp_qid_usage;
35
36         /* The lock is meant to synchronize access to the qid usage */
37         osal_mutex_t lock;
38 };
39
40 enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
41 {
42         struct ecore_l2_info *p_l2_info;
43         unsigned long **pp_qids;
44         u32 i;
45
46         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
47                 return ECORE_SUCCESS;
48
49         p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info));
50         if (!p_l2_info)
51                 return ECORE_NOMEM;
52         p_hwfn->p_l2_info = p_l2_info;
53
54         if (IS_PF(p_hwfn->p_dev)) {
55                 p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
56         } else {
57                 u8 rx = 0, tx = 0;
58
59                 ecore_vf_get_num_rxqs(p_hwfn, &rx);
60                 ecore_vf_get_num_txqs(p_hwfn, &tx);
61
62                 p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx);
63         }
64
65         pp_qids = OSAL_VZALLOC(p_hwfn->p_dev,
66                                sizeof(unsigned long *) *
67                                p_l2_info->queues);
68         if (pp_qids == OSAL_NULL)
69                 return ECORE_NOMEM;
70         p_l2_info->pp_qid_usage = pp_qids;
71
72         for (i = 0; i < p_l2_info->queues; i++) {
73                 pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev,
74                                           MAX_QUEUES_PER_QZONE / 8);
75                 if (pp_qids[i] == OSAL_NULL)
76                         return ECORE_NOMEM;
77         }
78
79 #ifdef CONFIG_ECORE_LOCK_ALLOC
80         OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock);
81 #endif
82
83         return ECORE_SUCCESS;
84 }
85
86 void ecore_l2_setup(struct ecore_hwfn *p_hwfn)
87 {
88         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
89                 return;
90
91         OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock);
92 }
93
94 void ecore_l2_free(struct ecore_hwfn *p_hwfn)
95 {
96         u32 i;
97
98         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
99                 return;
100
101         if (p_hwfn->p_l2_info == OSAL_NULL)
102                 return;
103
104         if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL)
105                 goto out_l2_info;
106
107         /* Free until hit first uninitialized entry */
108         for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
109                 if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL)
110                         break;
111                 OSAL_VFREE(p_hwfn->p_dev,
112                            p_hwfn->p_l2_info->pp_qid_usage[i]);
113         }
114
115 #ifdef CONFIG_ECORE_LOCK_ALLOC
116         /* Lock is last to initialize, if everything else was */
117         if (i == p_hwfn->p_l2_info->queues)
118                 OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock);
119 #endif
120
121         OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
122
123 out_l2_info:
124         OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
125         p_hwfn->p_l2_info = OSAL_NULL;
126 }
127
128 /* TODO - we'll need locking around these... */
129 static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn,
130                                           struct ecore_queue_cid *p_cid)
131 {
132         struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info;
133         u16 queue_id = p_cid->rel.queue_id;
134         bool b_rc = true;
135         u8 first;
136
137         OSAL_MUTEX_ACQUIRE(&p_l2_info->lock);
138
139         if (queue_id > p_l2_info->queues) {
140                 DP_NOTICE(p_hwfn, true,
141                           "Requested to increase usage for qzone %04x out of %08x\n",
142                           queue_id, p_l2_info->queues);
143                 b_rc = false;
144                 goto out;
145         }
146
147         first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id],
148                                              MAX_QUEUES_PER_QZONE);
149         if (first >= MAX_QUEUES_PER_QZONE) {
150                 b_rc = false;
151                 goto out;
152         }
153
154         OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]);
155         p_cid->qid_usage_idx = first;
156
157 out:
158         OSAL_MUTEX_RELEASE(&p_l2_info->lock);
159         return b_rc;
160 }
161
162 static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
163                                           struct ecore_queue_cid *p_cid)
164 {
165         OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock);
166
167         OSAL_CLEAR_BIT(p_cid->qid_usage_idx,
168                        p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
169
170         OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock);
171 }
172
173 void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
174                                  struct ecore_queue_cid *p_cid)
175 {
176         /* For VF-queues, stuff is a bit complicated as:
177          *  - They always maintain the qid_usage on their own.
178          *  - In legacy mode, they also maintain their CIDs.
179          */
180
181         /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF */
182         if (IS_PF(p_hwfn->p_dev) && !p_cid->b_legacy_vf)
183                 _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
184         if (!p_cid->b_legacy_vf)
185                 ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
186         OSAL_VFREE(p_hwfn->p_dev, p_cid);
187 }
188
189 /* The internal is only meant to be directly called by PFs initializeing CIDs
190  * for their VFs.
191  */
192 static struct ecore_queue_cid *
193 _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
194                         u16 opaque_fid, u32 cid,
195                         struct ecore_queue_start_common_params *p_params,
196                         struct ecore_queue_cid_vf_params *p_vf_params)
197 {
198         struct ecore_queue_cid *p_cid;
199         enum _ecore_status_t rc;
200
201         p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid));
202         if (p_cid == OSAL_NULL)
203                 return OSAL_NULL;
204
205         p_cid->opaque_fid = opaque_fid;
206         p_cid->cid = cid;
207         p_cid->rel = *p_params;
208         p_cid->p_owner = p_hwfn;
209
210         /* Fill-in bits related to VFs' queues if information was provided */
211         if (p_vf_params != OSAL_NULL) {
212                 p_cid->vfid = p_vf_params->vfid;
213                 p_cid->vf_qid = p_vf_params->vf_qid;
214                 p_cid->b_legacy_vf = p_vf_params->b_legacy;
215         } else {
216                 p_cid->vfid = ECORE_QUEUE_CID_PF;
217         }
218
219         /* Don't try calculating the absolute indices for VFs */
220         if (IS_VF(p_hwfn->p_dev)) {
221                 p_cid->abs = p_cid->rel;
222
223                 goto out;
224         }
225
226         /* Calculate the engine-absolute indices of the resources.
227          * The would guarantee they're valid later on.
228          * In some cases [SBs] we already have the right values.
229          */
230         rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
231         if (rc != ECORE_SUCCESS)
232                 goto fail;
233
234         rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
235                                &p_cid->abs.queue_id);
236         if (rc != ECORE_SUCCESS)
237                 goto fail;
238
239         /* In case of a PF configuring its VF's queues, the stats-id is already
240          * absolute [since there's a single index that's suitable per-VF].
241          */
242         if (p_cid->vfid == ECORE_QUEUE_CID_PF) {
243                 rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
244                                     &p_cid->abs.stats_id);
245                 if (rc != ECORE_SUCCESS)
246                         goto fail;
247         } else {
248                 p_cid->abs.stats_id = p_cid->rel.stats_id;
249         }
250
251         /* SBs relevant information was already provided as absolute */
252         p_cid->abs.sb = p_cid->rel.sb;
253         p_cid->abs.sb_idx = p_cid->rel.sb_idx;
254
255 out:
256         /* VF-images have provided the qid_usage_idx on their own.
257          * Otherwise, we need to allocate a unique one.
258          */
259         if (!p_vf_params) {
260                 if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid))
261                         goto fail;
262         } else {
263                 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
264         }
265
266         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
267                    "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
268                    p_cid->opaque_fid, p_cid->cid,
269                    p_cid->rel.vport_id, p_cid->abs.vport_id,
270                    p_cid->rel.queue_id, p_cid->qid_usage_idx,
271                    p_cid->abs.queue_id,
272                    p_cid->rel.stats_id, p_cid->abs.stats_id,
273                    p_cid->abs.sb, p_cid->abs.sb_idx);
274
275         return p_cid;
276
277 fail:
278         OSAL_VFREE(p_hwfn->p_dev, p_cid);
279         return OSAL_NULL;
280 }
281
282 struct ecore_queue_cid *
283 ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
284                        struct ecore_queue_start_common_params *p_params,
285                        struct ecore_queue_cid_vf_params *p_vf_params)
286 {
287         struct ecore_queue_cid *p_cid;
288         u8 vfid = ECORE_CXT_PF_CID;
289         bool b_legacy_vf = false;
290         u32 cid = 0;
291
292         /* In case of legacy VFs, The CID can be derived from the additional
293          * VF parameters - the VF assumes queue X uses CID X, so we can simply
294          * use the vf_qid for this purpose as well.
295          */
296         if (p_vf_params) {
297                 vfid = p_vf_params->vfid;
298
299                 if (p_vf_params->b_legacy) {
300                         b_legacy_vf = true;
301                         cid = p_vf_params->vf_qid;
302                 }
303         }
304
305         /* Get a unique firmware CID for this queue, in case it's a PF.
306          * VF's don't need a CID as the queue configuration will be done
307          * by PF.
308          */
309         if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) {
310                 if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
311                                            &cid, vfid) != ECORE_SUCCESS) {
312                         DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
313                         return OSAL_NULL;
314                 }
315         }
316
317         p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
318                                         p_params, p_vf_params);
319         if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
320                 _ecore_cxt_release_cid(p_hwfn, cid, vfid);
321
322         return p_cid;
323 }
324
325 static struct ecore_queue_cid *
326 ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
327                           struct ecore_queue_start_common_params *p_params)
328 {
329         return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, OSAL_NULL);
330 }
331
332 enum _ecore_status_t
333 ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
334                          struct ecore_sp_vport_start_params *p_params)
335 {
336         struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
337         struct ecore_spq_entry *p_ent = OSAL_NULL;
338         struct ecore_sp_init_data init_data;
339         u16 rx_mode = 0, tx_err = 0;
340         u8 abs_vport_id = 0;
341         enum _ecore_status_t rc = ECORE_NOTIMPL;
342
343         rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
344         if (rc != ECORE_SUCCESS)
345                 return rc;
346
347         /* Get SPQ entry */
348         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
349         init_data.cid = ecore_spq_get_cid(p_hwfn);
350         init_data.opaque_fid = p_params->opaque_fid;
351         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
352
353         rc = ecore_sp_init_request(p_hwfn, &p_ent,
354                                    ETH_RAMROD_VPORT_START,
355                                    PROTOCOLID_ETH, &init_data);
356         if (rc != ECORE_SUCCESS)
357                 return rc;
358
359         p_ramrod = &p_ent->ramrod.vport_start;
360         p_ramrod->vport_id = abs_vport_id;
361
362         p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
363         p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
364         p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
365         p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
366         p_ramrod->untagged = p_params->only_untagged;
367         p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
368
369         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
370         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
371
372         p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
373
374         /* Handle requests for strict behavior on transmission errors */
375         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
376                   p_params->b_err_illegal_vlan_mode ?
377                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
378         SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
379                   p_params->b_err_small_pkt ?
380                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
381         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
382                   p_params->b_err_anti_spoof ?
383                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
384         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
385                   p_params->b_err_illegal_inband_mode ?
386                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
387         SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
388                   p_params->b_err_vlan_insert_with_inband ?
389                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
390         SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
391                   p_params->b_err_big_pkt ?
392                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
393         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
394                   p_params->b_err_ctrl_frame ?
395                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
396         p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
397
398         /* TPA related fields */
399         OSAL_MEMSET(&p_ramrod->tpa_param, 0,
400                     sizeof(struct eth_vport_tpa_param));
401         p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe;
402
403         switch (p_params->tpa_mode) {
404         case ECORE_TPA_MODE_GRO:
405                 p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
406                 p_ramrod->tpa_param.tpa_max_size = (u16)-1;
407                 p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2;
408                 p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2;
409                 p_ramrod->tpa_param.tpa_ipv4_en_flg = 1;
410                 p_ramrod->tpa_param.tpa_ipv6_en_flg = 1;
411                 p_ramrod->tpa_param.tpa_ipv4_tunn_en_flg = 1;
412                 p_ramrod->tpa_param.tpa_ipv6_tunn_en_flg = 1;
413                 p_ramrod->tpa_param.tpa_pkt_split_flg = 1;
414                 p_ramrod->tpa_param.tpa_gro_consistent_flg = 1;
415                 break;
416         default:
417                 break;
418         }
419
420         p_ramrod->tx_switching_en = p_params->tx_switching;
421 #ifndef ASIC_ONLY
422         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
423                 p_ramrod->tx_switching_en = 0;
424 #endif
425
426         p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
427         p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
428
429         /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
430         p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev,
431                                                     p_params->concrete_fid);
432
433         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
434 }
435
436 enum _ecore_status_t
437 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
438                      struct ecore_sp_vport_start_params *p_params)
439 {
440         if (IS_VF(p_hwfn->p_dev))
441                 return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
442                                                p_params->mtu,
443                                                p_params->remove_inner_vlan,
444                                                p_params->tpa_mode,
445                                                p_params->max_buffers_per_cqe,
446                                                p_params->only_untagged);
447
448         return ecore_sp_eth_vport_start(p_hwfn, p_params);
449 }
450
451 static enum _ecore_status_t
452 ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
453                           struct vport_update_ramrod_data *p_ramrod,
454                           struct ecore_rss_params *p_rss)
455 {
456         struct eth_vport_rss_config *p_config;
457         int i, table_size;
458         enum _ecore_status_t rc = ECORE_SUCCESS;
459
460         if (!p_rss) {
461                 p_ramrod->common.update_rss_flg = 0;
462                 return rc;
463         }
464         p_config = &p_ramrod->rss_config;
465
466         OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
467                           ETH_RSS_IND_TABLE_ENTRIES_NUM);
468
469         rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
470         if (rc != ECORE_SUCCESS)
471                 return rc;
472
473         p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
474         p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
475         p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
476         p_config->update_rss_key = p_rss->update_rss_key;
477
478         p_config->rss_mode = p_rss->rss_enable ?
479             ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED;
480
481         p_config->capabilities = 0;
482
483         SET_FIELD(p_config->capabilities,
484                   ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
485                   !!(p_rss->rss_caps & ECORE_RSS_IPV4));
486         SET_FIELD(p_config->capabilities,
487                   ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
488                   !!(p_rss->rss_caps & ECORE_RSS_IPV6));
489         SET_FIELD(p_config->capabilities,
490                   ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
491                   !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
492         SET_FIELD(p_config->capabilities,
493                   ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
494                   !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
495         SET_FIELD(p_config->capabilities,
496                   ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
497                   !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
498         SET_FIELD(p_config->capabilities,
499                   ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
500                   !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
501         p_config->tbl_size = p_rss->rss_table_size_log;
502         p_config->capabilities = OSAL_CPU_TO_LE16(p_config->capabilities);
503
504         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
505                    "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
506                    p_ramrod->common.update_rss_flg,
507                    p_config->rss_mode,
508                    p_config->update_rss_capabilities,
509                    p_config->capabilities,
510                    p_config->update_rss_ind_table, p_config->update_rss_key);
511
512         table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
513                                 1 << p_config->tbl_size);
514         for (i = 0; i < table_size; i++) {
515                 struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
516
517                 if (!p_queue)
518                         return ECORE_INVAL;
519
520                 p_config->indirection_table[i] =
521                                 OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
522         }
523
524         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
525                    "Configured RSS indirection table [%d entries]:\n",
526                    table_size);
527         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
528                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
529                            "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
530                            OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
531                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
532                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
533                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
534                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
535                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
536                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
537                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
538                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
539                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
540                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
541                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
542                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
543                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
544                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
545                          OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
546         }
547
548         for (i = 0; i < 10; i++)
549                 p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
550
551         return rc;
552 }
553
554 static void
555 ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
556                             struct vport_update_ramrod_data *p_ramrod,
557                             struct ecore_filter_accept_flags accept_flags)
558 {
559         p_ramrod->common.update_rx_mode_flg =
560                                         accept_flags.update_rx_mode_config;
561         p_ramrod->common.update_tx_mode_flg =
562                                         accept_flags.update_tx_mode_config;
563
564 #ifndef ASIC_ONLY
565         /* On B0 emulation we cannot enable Tx, since this would cause writes
566          * to PVFC HW block which isn't implemented in emulation.
567          */
568         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
569                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
570                            "Non-Asic - prevent Tx mode in vport update\n");
571                 p_ramrod->common.update_tx_mode_flg = 0;
572         }
573 #endif
574
575         /* Set Rx mode accept flags */
576         if (p_ramrod->common.update_rx_mode_flg) {
577                 u8 accept_filter = accept_flags.rx_accept_filter;
578                 u16 state = 0;
579
580                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
581                           !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
582                            !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
583
584                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
585                           !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
586
587                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
588                           !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
589                             !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
590
591                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
592                           (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
593                            !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
594
595                 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
596                           !!(accept_filter & ECORE_ACCEPT_BCAST));
597
598                 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
599                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
600                            "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
601                            p_ramrod->common.vport_id, state);
602         }
603
604         /* Set Tx mode accept flags */
605         if (p_ramrod->common.update_tx_mode_flg) {
606                 u8 accept_filter = accept_flags.tx_accept_filter;
607                 u16 state = 0;
608
609                 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
610                           !!(accept_filter & ECORE_ACCEPT_NONE));
611
612                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
613                           !!(accept_filter & ECORE_ACCEPT_NONE));
614
615                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
616                           (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
617                            !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
618
619                 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
620                           !!(accept_filter & ECORE_ACCEPT_BCAST));
621
622                 p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
623                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
624                            "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
625                            p_ramrod->common.vport_id, state);
626         }
627 }
628
629 static void
630 ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn,
631                               struct vport_update_ramrod_data *p_ramrod,
632                               struct ecore_sge_tpa_params *p_params)
633 {
634         struct eth_vport_tpa_param *p_tpa;
635
636         if (!p_params) {
637                 p_ramrod->common.update_tpa_param_flg = 0;
638                 p_ramrod->common.update_tpa_en_flg = 0;
639                 p_ramrod->common.update_tpa_param_flg = 0;
640                 return;
641         }
642
643         p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
644         p_tpa = &p_ramrod->tpa_param;
645         p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
646         p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
647         p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
648         p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
649
650         p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
651         p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
652         p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
653         p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
654         p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
655         p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
656         p_tpa->tpa_max_size = p_params->tpa_max_size;
657         p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start;
658         p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont;
659 }
660
661 static void
662 ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn,
663                           struct vport_update_ramrod_data *p_ramrod,
664                           struct ecore_sp_vport_update_params *p_params)
665 {
666         int i;
667
668         OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
669                     sizeof(p_ramrod->approx_mcast.bins));
670
671         if (!p_params->update_approx_mcast_flg)
672                 return;
673
674         p_ramrod->common.update_approx_mcast_flg = 1;
675         for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
676                 u32 *p_bins = (u32 *)p_params->bins;
677
678                 p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
679         }
680 }
681
682 enum _ecore_status_t
683 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
684                       struct ecore_sp_vport_update_params *p_params,
685                       enum spq_mode comp_mode,
686                       struct ecore_spq_comp_cb *p_comp_data)
687 {
688         struct ecore_rss_params *p_rss_params = p_params->rss_params;
689         struct vport_update_ramrod_data_cmn *p_cmn;
690         struct ecore_sp_init_data init_data;
691         struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
692         struct ecore_spq_entry *p_ent = OSAL_NULL;
693         u8 abs_vport_id = 0, val;
694         enum _ecore_status_t rc = ECORE_NOTIMPL;
695
696         if (IS_VF(p_hwfn->p_dev)) {
697                 rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
698                 return rc;
699         }
700
701         rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
702         if (rc != ECORE_SUCCESS)
703                 return rc;
704
705         /* Get SPQ entry */
706         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
707         init_data.cid = ecore_spq_get_cid(p_hwfn);
708         init_data.opaque_fid = p_params->opaque_fid;
709         init_data.comp_mode = comp_mode;
710         init_data.p_comp_data = p_comp_data;
711
712         rc = ecore_sp_init_request(p_hwfn, &p_ent,
713                                    ETH_RAMROD_VPORT_UPDATE,
714                                    PROTOCOLID_ETH, &init_data);
715         if (rc != ECORE_SUCCESS)
716                 return rc;
717
718         /* Copy input params to ramrod according to FW struct */
719         p_ramrod = &p_ent->ramrod.vport_update;
720         p_cmn = &p_ramrod->common;
721
722         p_cmn->vport_id = abs_vport_id;
723
724         p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
725         p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
726         p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
727         p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
728
729         p_cmn->accept_any_vlan = p_params->accept_any_vlan;
730         val = p_params->update_accept_any_vlan_flg;
731         p_cmn->update_accept_any_vlan_flg = val;
732
733         p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
734         val = p_params->update_inner_vlan_removal_flg;
735         p_cmn->update_inner_vlan_removal_en_flg = val;
736
737         p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
738         val = p_params->update_default_vlan_enable_flg;
739         p_cmn->update_default_vlan_en_flg = val;
740
741         p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan);
742         p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
743
744         p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
745
746         p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
747
748 #ifndef ASIC_ONLY
749         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
750                 if (p_ramrod->common.tx_switching_en ||
751                     p_ramrod->common.update_tx_switching_en_flg) {
752                         DP_NOTICE(p_hwfn, false,
753                                   "FPGA - why are we seeing tx-switching? Overriding it\n");
754                         p_ramrod->common.tx_switching_en = 0;
755                         p_ramrod->common.update_tx_switching_en_flg = 1;
756                 }
757 #endif
758         p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
759
760         p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
761         val = p_params->update_anti_spoofing_en_flg;
762         p_ramrod->common.update_anti_spoofing_en_flg = val;
763
764         rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
765         if (rc != ECORE_SUCCESS) {
766                 /* Return spq entry which is taken in ecore_sp_init_request()*/
767                 ecore_spq_return_entry(p_hwfn, p_ent);
768                 return rc;
769         }
770
771         /* Update mcast bins for VFs, PF doesn't use this functionality */
772         ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
773
774         ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
775         ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod,
776                                       p_params->sge_tpa_params);
777         if (p_params->mtu) {
778                 p_ramrod->common.update_mtu_flg = 1;
779                 p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
780         }
781
782         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
783 }
784
785 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
786                                          u16 opaque_fid, u8 vport_id)
787 {
788         struct vport_stop_ramrod_data *p_ramrod;
789         struct ecore_sp_init_data init_data;
790         struct ecore_spq_entry *p_ent;
791         u8 abs_vport_id = 0;
792         enum _ecore_status_t rc;
793
794         if (IS_VF(p_hwfn->p_dev))
795                 return ecore_vf_pf_vport_stop(p_hwfn);
796
797         rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
798         if (rc != ECORE_SUCCESS)
799                 return rc;
800
801         /* Get SPQ entry */
802         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
803         init_data.cid = ecore_spq_get_cid(p_hwfn);
804         init_data.opaque_fid = opaque_fid;
805         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
806
807         rc = ecore_sp_init_request(p_hwfn, &p_ent,
808                                    ETH_RAMROD_VPORT_STOP,
809                                    PROTOCOLID_ETH, &init_data);
810         if (rc != ECORE_SUCCESS)
811                 return rc;
812
813         p_ramrod = &p_ent->ramrod.vport_stop;
814         p_ramrod->vport_id = abs_vport_id;
815
816         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
817 }
818
819 static enum _ecore_status_t
820 ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
821                          struct ecore_filter_accept_flags *p_accept_flags)
822 {
823         struct ecore_sp_vport_update_params s_params;
824
825         OSAL_MEMSET(&s_params, 0, sizeof(s_params));
826         OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
827                     sizeof(struct ecore_filter_accept_flags));
828
829         return ecore_vf_pf_vport_update(p_hwfn, &s_params);
830 }
831
832 enum _ecore_status_t
833 ecore_filter_accept_cmd(struct ecore_dev *p_dev,
834                         u8 vport,
835                         struct ecore_filter_accept_flags accept_flags,
836                         u8 update_accept_any_vlan,
837                         u8 accept_any_vlan,
838                         enum spq_mode comp_mode,
839                         struct ecore_spq_comp_cb *p_comp_data)
840 {
841         struct ecore_sp_vport_update_params vport_update_params;
842         int i, rc;
843
844         /* Prepare and send the vport rx_mode change */
845         OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params));
846         vport_update_params.vport_id = vport;
847         vport_update_params.accept_flags = accept_flags;
848         vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
849         vport_update_params.accept_any_vlan = accept_any_vlan;
850
851         for_each_hwfn(p_dev, i) {
852                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
853
854                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
855
856                 if (IS_VF(p_dev)) {
857                         rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
858                         if (rc != ECORE_SUCCESS)
859                                 return rc;
860                         continue;
861                 }
862
863                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
864                                            comp_mode, p_comp_data);
865                 if (rc != ECORE_SUCCESS) {
866                         DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
867                         return rc;
868                 }
869
870                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
871                            "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
872                            accept_flags.rx_accept_filter,
873                            accept_flags.tx_accept_filter);
874
875                 if (update_accept_any_vlan)
876                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
877                                    "accept_any_vlan=%d configured\n",
878                                    accept_any_vlan);
879         }
880
881         return 0;
882 }
883
884 enum _ecore_status_t
885 ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
886                            struct ecore_queue_cid *p_cid,
887                            u16 bd_max_bytes,
888                            dma_addr_t bd_chain_phys_addr,
889                            dma_addr_t cqe_pbl_addr,
890                            u16 cqe_pbl_size)
891 {
892         struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
893         struct ecore_spq_entry *p_ent = OSAL_NULL;
894         struct ecore_sp_init_data init_data;
895         enum _ecore_status_t rc = ECORE_NOTIMPL;
896
897         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
898                    "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
899                    p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
900                    p_cid->abs.vport_id, p_cid->abs.sb);
901
902         /* Get SPQ entry */
903         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
904         init_data.cid = p_cid->cid;
905         init_data.opaque_fid = p_cid->opaque_fid;
906         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
907
908         rc = ecore_sp_init_request(p_hwfn, &p_ent,
909                                    ETH_RAMROD_RX_QUEUE_START,
910                                    PROTOCOLID_ETH, &init_data);
911         if (rc != ECORE_SUCCESS)
912                 return rc;
913
914         p_ramrod = &p_ent->ramrod.rx_queue_start;
915
916         p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
917         p_ramrod->sb_index = p_cid->abs.sb_idx;
918         p_ramrod->vport_id = p_cid->abs.vport_id;
919         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
920         p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
921         p_ramrod->complete_cqe_flg = 0;
922         p_ramrod->complete_event_flg = 1;
923
924         p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
925         DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
926
927         p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
928         DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
929
930         if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
931                 p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
932                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
933                            "Queue%s is meant for VF rxq[%02x]\n",
934                            !!p_cid->b_legacy_vf ? " [legacy]" : "",
935                            p_cid->vf_qid);
936                 p_ramrod->vf_rx_prod_use_zone_a = !!p_cid->b_legacy_vf;
937         }
938
939         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
940 }
941
942 static enum _ecore_status_t
943 ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
944                             struct ecore_queue_cid *p_cid,
945                             u16 bd_max_bytes,
946                             dma_addr_t bd_chain_phys_addr,
947                             dma_addr_t cqe_pbl_addr,
948                             u16 cqe_pbl_size,
949                             void OSAL_IOMEM * *pp_prod)
950 {
951         u32 init_prod_val = 0;
952
953         *pp_prod = (u8 OSAL_IOMEM *)
954                     p_hwfn->regview +
955                     GTT_BAR0_MAP_REG_MSDM_RAM +
956                     MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
957
958         /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
959         __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
960                           (u32 *)(&init_prod_val));
961
962         return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
963                                           bd_max_bytes,
964                                           bd_chain_phys_addr,
965                                           cqe_pbl_addr, cqe_pbl_size);
966 }
967
968 enum _ecore_status_t
969 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
970                          u16 opaque_fid,
971                          struct ecore_queue_start_common_params *p_params,
972                          u16 bd_max_bytes,
973                          dma_addr_t bd_chain_phys_addr,
974                          dma_addr_t cqe_pbl_addr,
975                          u16 cqe_pbl_size,
976                          struct ecore_rxq_start_ret_params *p_ret_params)
977 {
978         struct ecore_queue_cid *p_cid;
979         enum _ecore_status_t rc;
980
981         /* Allocate a CID for the queue */
982         p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
983         if (p_cid == OSAL_NULL)
984                 return ECORE_NOMEM;
985
986         if (IS_PF(p_hwfn->p_dev))
987                 rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
988                                                  bd_max_bytes,
989                                                  bd_chain_phys_addr,
990                                                  cqe_pbl_addr, cqe_pbl_size,
991                                                  &p_ret_params->p_prod);
992         else
993                 rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
994                                            bd_max_bytes,
995                                            bd_chain_phys_addr,
996                                            cqe_pbl_addr,
997                                            cqe_pbl_size,
998                                            &p_ret_params->p_prod);
999
1000         /* Provide the caller with a reference to as handler */
1001         if (rc != ECORE_SUCCESS)
1002                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1003         else
1004                 p_ret_params->p_handle = (void *)p_cid;
1005
1006         return rc;
1007 }
1008
1009 enum _ecore_status_t
1010 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
1011                               void **pp_rxq_handles,
1012                               u8 num_rxqs,
1013                               u8 complete_cqe_flg,
1014                               u8 complete_event_flg,
1015                               enum spq_mode comp_mode,
1016                               struct ecore_spq_comp_cb *p_comp_data)
1017 {
1018         struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
1019         struct ecore_spq_entry *p_ent = OSAL_NULL;
1020         struct ecore_sp_init_data init_data;
1021         struct ecore_queue_cid *p_cid;
1022         enum _ecore_status_t rc = ECORE_NOTIMPL;
1023         u8 i;
1024
1025         if (IS_VF(p_hwfn->p_dev))
1026                 return ecore_vf_pf_rxqs_update(p_hwfn,
1027                                                (struct ecore_queue_cid **)
1028                                                pp_rxq_handles,
1029                                                num_rxqs,
1030                                                complete_cqe_flg,
1031                                                complete_event_flg);
1032
1033         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1034         init_data.comp_mode = comp_mode;
1035         init_data.p_comp_data = p_comp_data;
1036
1037         for (i = 0; i < num_rxqs; i++) {
1038                 p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
1039
1040                 /* Get SPQ entry */
1041                 init_data.cid = p_cid->cid;
1042                 init_data.opaque_fid = p_cid->opaque_fid;
1043
1044                 rc = ecore_sp_init_request(p_hwfn, &p_ent,
1045                                            ETH_RAMROD_RX_QUEUE_UPDATE,
1046                                            PROTOCOLID_ETH, &init_data);
1047                 if (rc != ECORE_SUCCESS)
1048                         return rc;
1049
1050                 p_ramrod = &p_ent->ramrod.rx_queue_update;
1051                 p_ramrod->vport_id = p_cid->abs.vport_id;
1052
1053                 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1054                 p_ramrod->complete_cqe_flg = complete_cqe_flg;
1055                 p_ramrod->complete_event_flg = complete_event_flg;
1056
1057                 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1058                 if (rc != ECORE_SUCCESS)
1059                         return rc;
1060         }
1061
1062         return rc;
1063 }
1064
1065 static enum _ecore_status_t
1066 ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1067                            struct ecore_queue_cid *p_cid,
1068                            bool b_eq_completion_only,
1069                            bool b_cqe_completion)
1070 {
1071         struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
1072         struct ecore_spq_entry *p_ent = OSAL_NULL;
1073         struct ecore_sp_init_data init_data;
1074         enum _ecore_status_t rc;
1075
1076         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1077         init_data.cid = p_cid->cid;
1078         init_data.opaque_fid = p_cid->opaque_fid;
1079         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1080
1081         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1082                                    ETH_RAMROD_RX_QUEUE_STOP,
1083                                    PROTOCOLID_ETH, &init_data);
1084         if (rc != ECORE_SUCCESS)
1085                 return rc;
1086
1087         p_ramrod = &p_ent->ramrod.rx_queue_stop;
1088         p_ramrod->vport_id = p_cid->abs.vport_id;
1089         p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1090
1091         /* Cleaning the queue requires the completion to arrive there.
1092          * In addition, VFs require the answer to come as eqe to PF.
1093          */
1094         p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) &&
1095                                       !b_eq_completion_only) ||
1096                                      b_cqe_completion;
1097         p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) ||
1098                                        b_eq_completion_only;
1099
1100         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1101 }
1102
1103 enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1104                                              void *p_rxq,
1105                                              bool eq_completion_only,
1106                                              bool cqe_completion)
1107 {
1108         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
1109         enum _ecore_status_t rc = ECORE_NOTIMPL;
1110
1111         if (IS_PF(p_hwfn->p_dev))
1112                 rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1113                                                 eq_completion_only,
1114                                                 cqe_completion);
1115         else
1116                 rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1117
1118         if (rc == ECORE_SUCCESS)
1119                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1120         return rc;
1121 }
1122
1123 enum _ecore_status_t
1124 ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
1125                            struct ecore_queue_cid *p_cid,
1126                            dma_addr_t pbl_addr, u16 pbl_size,
1127                            u16 pq_id)
1128 {
1129         struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
1130         struct ecore_spq_entry *p_ent = OSAL_NULL;
1131         struct ecore_sp_init_data init_data;
1132         enum _ecore_status_t rc = ECORE_NOTIMPL;
1133
1134         /* Get SPQ entry */
1135         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1136         init_data.cid = p_cid->cid;
1137         init_data.opaque_fid = p_cid->opaque_fid;
1138         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1139
1140         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1141                                    ETH_RAMROD_TX_QUEUE_START,
1142                                    PROTOCOLID_ETH, &init_data);
1143         if (rc != ECORE_SUCCESS)
1144                 return rc;
1145
1146         p_ramrod = &p_ent->ramrod.tx_queue_start;
1147         p_ramrod->vport_id = p_cid->abs.vport_id;
1148
1149         p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->abs.sb);
1150         p_ramrod->sb_index = p_cid->abs.sb_idx;
1151         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1152
1153         p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1154         p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1155
1156         p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
1157         DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1158
1159         p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
1160
1161         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1162 }
1163
1164 static enum _ecore_status_t
1165 ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
1166                             struct ecore_queue_cid *p_cid,
1167                             u8 tc,
1168                             dma_addr_t pbl_addr, u16 pbl_size,
1169                             void OSAL_IOMEM * *pp_doorbell)
1170 {
1171         enum _ecore_status_t rc;
1172
1173         /* TODO - set tc in the pq_params for multi-cos */
1174         rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid,
1175                                         pbl_addr, pbl_size,
1176                                         ecore_get_cm_pq_idx_mcos(p_hwfn, tc));
1177         if (rc != ECORE_SUCCESS)
1178                 return rc;
1179
1180         /* Provide the caller with the necessary return values */
1181         *pp_doorbell = (u8 OSAL_IOMEM *)
1182                        p_hwfn->doorbells +
1183                        DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
1184
1185         return ECORE_SUCCESS;
1186 }
1187
1188 enum _ecore_status_t
1189 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
1190                          struct ecore_queue_start_common_params *p_params,
1191                          u8 tc,
1192                          dma_addr_t pbl_addr, u16 pbl_size,
1193                          struct ecore_txq_start_ret_params *p_ret_params)
1194 {
1195         struct ecore_queue_cid *p_cid;
1196         enum _ecore_status_t rc;
1197
1198         p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, p_params);
1199         if (p_cid == OSAL_NULL)
1200                 return ECORE_INVAL;
1201
1202         if (IS_PF(p_hwfn->p_dev))
1203                 rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1204                                                  pbl_addr, pbl_size,
1205                                                  &p_ret_params->p_doorbell);
1206         else
1207                 rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
1208                                            pbl_addr, pbl_size,
1209                                            &p_ret_params->p_doorbell);
1210
1211         if (rc != ECORE_SUCCESS)
1212                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1213         else
1214                 p_ret_params->p_handle = (void *)p_cid;
1215
1216         return rc;
1217 }
1218
1219 static enum _ecore_status_t
1220 ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1221                            struct ecore_queue_cid *p_cid)
1222 {
1223         struct ecore_spq_entry *p_ent = OSAL_NULL;
1224         struct ecore_sp_init_data init_data;
1225         enum _ecore_status_t rc;
1226
1227         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1228         init_data.cid = p_cid->cid;
1229         init_data.opaque_fid = p_cid->opaque_fid;
1230         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1231
1232         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1233                                    ETH_RAMROD_TX_QUEUE_STOP,
1234                                    PROTOCOLID_ETH, &init_data);
1235         if (rc != ECORE_SUCCESS)
1236                 return rc;
1237
1238         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1239 }
1240
1241 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1242                                              void *p_handle)
1243 {
1244         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
1245         enum _ecore_status_t rc;
1246
1247         if (IS_PF(p_hwfn->p_dev))
1248                 rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1249         else
1250                 rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
1251
1252         if (rc == ECORE_SUCCESS)
1253                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1254         return rc;
1255 }
1256
1257 static enum eth_filter_action
1258 ecore_filter_action(enum ecore_filter_opcode opcode)
1259 {
1260         enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1261
1262         switch (opcode) {
1263         case ECORE_FILTER_ADD:
1264                 action = ETH_FILTER_ACTION_ADD;
1265                 break;
1266         case ECORE_FILTER_REMOVE:
1267                 action = ETH_FILTER_ACTION_REMOVE;
1268                 break;
1269         case ECORE_FILTER_FLUSH:
1270                 action = ETH_FILTER_ACTION_REMOVE_ALL;
1271                 break;
1272         default:
1273                 action = MAX_ETH_FILTER_ACTION;
1274         }
1275
1276         return action;
1277 }
1278
1279 static enum _ecore_status_t
1280 ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
1281                           u16 opaque_fid,
1282                           struct ecore_filter_ucast *p_filter_cmd,
1283                           struct vport_filter_update_ramrod_data **pp_ramrod,
1284                           struct ecore_spq_entry **pp_ent,
1285                           enum spq_mode comp_mode,
1286                           struct ecore_spq_comp_cb *p_comp_data)
1287 {
1288         u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1289         struct vport_filter_update_ramrod_data *p_ramrod;
1290         struct eth_filter_cmd *p_first_filter;
1291         struct eth_filter_cmd *p_second_filter;
1292         struct ecore_sp_init_data init_data;
1293         enum eth_filter_action action;
1294         enum _ecore_status_t rc;
1295
1296         rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1297                             &vport_to_remove_from);
1298         if (rc != ECORE_SUCCESS)
1299                 return rc;
1300
1301         rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1302                             &vport_to_add_to);
1303         if (rc != ECORE_SUCCESS)
1304                 return rc;
1305
1306         /* Get SPQ entry */
1307         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1308         init_data.cid = ecore_spq_get_cid(p_hwfn);
1309         init_data.opaque_fid = opaque_fid;
1310         init_data.comp_mode = comp_mode;
1311         init_data.p_comp_data = p_comp_data;
1312
1313         rc = ecore_sp_init_request(p_hwfn, pp_ent,
1314                                    ETH_RAMROD_FILTERS_UPDATE,
1315                                    PROTOCOLID_ETH, &init_data);
1316         if (rc != ECORE_SUCCESS)
1317                 return rc;
1318
1319         *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1320         p_ramrod = *pp_ramrod;
1321         p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1322         p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1323
1324 #ifndef ASIC_ONLY
1325         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1326                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1327                            "Non-Asic - prevent Tx filters\n");
1328                 p_ramrod->filter_cmd_hdr.tx = 0;
1329         }
1330 #endif
1331
1332         switch (p_filter_cmd->opcode) {
1333         case ECORE_FILTER_REPLACE:
1334         case ECORE_FILTER_MOVE:
1335                 p_ramrod->filter_cmd_hdr.cmd_cnt = 2;
1336                 break;
1337         default:
1338                 p_ramrod->filter_cmd_hdr.cmd_cnt = 1;
1339                 break;
1340         }
1341
1342         p_first_filter = &p_ramrod->filter_cmds[0];
1343         p_second_filter = &p_ramrod->filter_cmds[1];
1344
1345         switch (p_filter_cmd->type) {
1346         case ECORE_FILTER_MAC:
1347                 p_first_filter->type = ETH_FILTER_TYPE_MAC;
1348                 break;
1349         case ECORE_FILTER_VLAN:
1350                 p_first_filter->type = ETH_FILTER_TYPE_VLAN;
1351                 break;
1352         case ECORE_FILTER_MAC_VLAN:
1353                 p_first_filter->type = ETH_FILTER_TYPE_PAIR;
1354                 break;
1355         case ECORE_FILTER_INNER_MAC:
1356                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC;
1357                 break;
1358         case ECORE_FILTER_INNER_VLAN:
1359                 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN;
1360                 break;
1361         case ECORE_FILTER_INNER_PAIR:
1362                 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR;
1363                 break;
1364         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1365                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1366                 break;
1367         case ECORE_FILTER_MAC_VNI_PAIR:
1368                 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR;
1369                 break;
1370         case ECORE_FILTER_VNI:
1371                 p_first_filter->type = ETH_FILTER_TYPE_VNI;
1372                 break;
1373         case ECORE_FILTER_UNUSED: /* @DPDK */
1374                 p_first_filter->type = MAX_ETH_FILTER_TYPE;
1375                 break;
1376         }
1377
1378         if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1379             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1380             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1381             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1382             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1383             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
1384                 ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
1385                                       &p_first_filter->mac_mid,
1386                                       &p_first_filter->mac_lsb,
1387                                       (u8 *)p_filter_cmd->mac);
1388
1389         if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1390             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1391             (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1392             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1393                 p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
1394
1395         if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1396             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1397             (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1398                 p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
1399
1400         if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
1401                 p_second_filter->type = p_first_filter->type;
1402                 p_second_filter->mac_msb = p_first_filter->mac_msb;
1403                 p_second_filter->mac_mid = p_first_filter->mac_mid;
1404                 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1405                 p_second_filter->vlan_id = p_first_filter->vlan_id;
1406                 p_second_filter->vni = p_first_filter->vni;
1407
1408                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1409
1410                 p_first_filter->vport_id = vport_to_remove_from;
1411
1412                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1413                 p_second_filter->vport_id = vport_to_add_to;
1414         } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
1415                 p_first_filter->vport_id = vport_to_add_to;
1416                 OSAL_MEMCPY(p_second_filter, p_first_filter,
1417                             sizeof(*p_second_filter));
1418                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1419                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1420         } else {
1421                 action = ecore_filter_action(p_filter_cmd->opcode);
1422
1423                 if (action == MAX_ETH_FILTER_ACTION) {
1424                         DP_NOTICE(p_hwfn, true,
1425                                   "%d is not supported yet\n",
1426                                   p_filter_cmd->opcode);
1427                         return ECORE_NOTIMPL;
1428                 }
1429
1430                 p_first_filter->action = action;
1431                 p_first_filter->vport_id =
1432                     (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1433                     vport_to_remove_from : vport_to_add_to;
1434         }
1435
1436         return ECORE_SUCCESS;
1437 }
1438
1439 enum _ecore_status_t
1440 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
1441                           u16 opaque_fid,
1442                           struct ecore_filter_ucast *p_filter_cmd,
1443                           enum spq_mode comp_mode,
1444                           struct ecore_spq_comp_cb *p_comp_data)
1445 {
1446         struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
1447         struct ecore_spq_entry *p_ent = OSAL_NULL;
1448         struct eth_filter_cmd_header *p_header;
1449         enum _ecore_status_t rc;
1450
1451         rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1452                                        &p_ramrod, &p_ent,
1453                                        comp_mode, p_comp_data);
1454         if (rc != ECORE_SUCCESS) {
1455                 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1456                 return rc;
1457         }
1458         p_header = &p_ramrod->filter_cmd_hdr;
1459         p_header->assert_on_error = p_filter_cmd->assert_on_error;
1460
1461         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1462         if (rc != ECORE_SUCCESS) {
1463                 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1464                 return rc;
1465         }
1466
1467         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1468                    "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1469                    (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
1470                    ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1471                     "REMOVE" :
1472                     ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
1473                      "MOVE" : "REPLACE")),
1474                    (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
1475                    ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
1476                     "VLAN" : "MAC & VLAN"),
1477                    p_ramrod->filter_cmd_hdr.cmd_cnt,
1478                    p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter);
1479         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1480                    "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1481                    p_filter_cmd->vport_to_add_to,
1482                    p_filter_cmd->vport_to_remove_from,
1483                    p_filter_cmd->mac[0], p_filter_cmd->mac[1],
1484                    p_filter_cmd->mac[2], p_filter_cmd->mac[3],
1485                    p_filter_cmd->mac[4], p_filter_cmd->mac[5],
1486                    p_filter_cmd->vlan);
1487
1488         return ECORE_SUCCESS;
1489 }
1490
1491 /*******************************************************************************
1492  * Description:
1493  *         Calculates crc 32 on a buffer
1494  *         Note: crc32_length MUST be aligned to 8
1495  * Return:
1496  ******************************************************************************/
1497 static u32 ecore_calc_crc32c(u8 *crc32_packet,
1498                              u32 crc32_length, u32 crc32_seed, u8 complement)
1499 {
1500         u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1501         u8 msb = 0, current_byte = 0;
1502
1503         if ((crc32_packet == OSAL_NULL) ||
1504             (crc32_length == 0) || ((crc32_length % 8) != 0)) {
1505                 return crc32_result;
1506         }
1507
1508         for (byte = 0; byte < crc32_length; byte++) {
1509                 current_byte = crc32_packet[byte];
1510                 for (bit = 0; bit < 8; bit++) {
1511                         msb = (u8)(crc32_result >> 31);
1512                         crc32_result = crc32_result << 1;
1513                         if (msb != (0x1 & (current_byte >> bit))) {
1514                                 crc32_result = crc32_result ^ CRC32_POLY;
1515                                 crc32_result |= 1;
1516                         }
1517                 }
1518         }
1519
1520         return crc32_result;
1521 }
1522
1523 static u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len)
1524 {
1525         u32 packet_buf[2] = { 0 };
1526
1527         OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
1528         return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0);
1529 }
1530
1531 u8 ecore_mcast_bin_from_mac(u8 *mac)
1532 {
1533         u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED,
1534                                   mac, ETH_ALEN);
1535
1536         return crc & 0xff;
1537 }
1538
1539 static enum _ecore_status_t
1540 ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
1541                           u16 opaque_fid,
1542                           struct ecore_filter_mcast *p_filter_cmd,
1543                           enum spq_mode comp_mode,
1544                           struct ecore_spq_comp_cb *p_comp_data)
1545 {
1546         unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1547         struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
1548         struct ecore_spq_entry *p_ent = OSAL_NULL;
1549         struct ecore_sp_init_data init_data;
1550         u8 abs_vport_id = 0;
1551         enum _ecore_status_t rc;
1552         int i;
1553
1554         if (p_filter_cmd->opcode == ECORE_FILTER_ADD)
1555                 rc = ecore_fw_vport(p_hwfn,
1556                                     p_filter_cmd->vport_to_add_to,
1557                                     &abs_vport_id);
1558         else
1559                 rc = ecore_fw_vport(p_hwfn,
1560                                     p_filter_cmd->vport_to_remove_from,
1561                                     &abs_vport_id);
1562         if (rc != ECORE_SUCCESS)
1563                 return rc;
1564
1565         /* Get SPQ entry */
1566         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1567         init_data.cid = ecore_spq_get_cid(p_hwfn);
1568         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1569         init_data.comp_mode = comp_mode;
1570         init_data.p_comp_data = p_comp_data;
1571
1572         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1573                                    ETH_RAMROD_VPORT_UPDATE,
1574                                    PROTOCOLID_ETH, &init_data);
1575         if (rc != ECORE_SUCCESS) {
1576                 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1577                 return rc;
1578         }
1579
1580         p_ramrod = &p_ent->ramrod.vport_update;
1581         p_ramrod->common.update_approx_mcast_flg = 1;
1582
1583         /* explicitly clear out the entire vector */
1584         OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
1585                     0, sizeof(p_ramrod->approx_mcast.bins));
1586         OSAL_MEMSET(bins, 0, sizeof(unsigned long) *
1587                     ETH_MULTICAST_MAC_BINS_IN_REGS);
1588         /* filter ADD op is explicit set op and it removes
1589         *  any existing filters for the vport.
1590         */
1591         if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1592                 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1593                         u32 bit;
1594
1595                         bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1596                         OSAL_SET_BIT(bit, bins);
1597                 }
1598
1599                 /* Convert to correct endianity */
1600                 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1601                         struct vport_update_ramrod_mcast *p_ramrod_bins;
1602                         u32 *p_bins = (u32 *)bins;
1603
1604                         p_ramrod_bins = &p_ramrod->approx_mcast;
1605                         p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
1606                 }
1607         }
1608
1609         p_ramrod->common.vport_id = abs_vport_id;
1610
1611         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1612         if (rc != ECORE_SUCCESS)
1613                 DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
1614
1615         return rc;
1616 }
1617
1618 enum _ecore_status_t
1619 ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
1620                        struct ecore_filter_mcast *p_filter_cmd,
1621                        enum spq_mode comp_mode,
1622                        struct ecore_spq_comp_cb *p_comp_data)
1623 {
1624         enum _ecore_status_t rc = ECORE_SUCCESS;
1625         int i;
1626
1627         /* only ADD and REMOVE operations are supported for multi-cast */
1628         if ((p_filter_cmd->opcode != ECORE_FILTER_ADD &&
1629              (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
1630             (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
1631                 return ECORE_INVAL;
1632         }
1633
1634         for_each_hwfn(p_dev, i) {
1635                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1636                 u16 opaque_fid;
1637
1638                 if (IS_VF(p_dev)) {
1639                         ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1640                         continue;
1641                 }
1642
1643                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1644                 rc = ecore_sp_eth_filter_mcast(p_hwfn,
1645                                                opaque_fid,
1646                                                p_filter_cmd,
1647                                                comp_mode, p_comp_data);
1648                 if (rc != ECORE_SUCCESS)
1649                         break;
1650         }
1651
1652         return rc;
1653 }
1654
1655 enum _ecore_status_t
1656 ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
1657                        struct ecore_filter_ucast *p_filter_cmd,
1658                        enum spq_mode comp_mode,
1659                        struct ecore_spq_comp_cb *p_comp_data)
1660 {
1661         enum _ecore_status_t rc = ECORE_SUCCESS;
1662         int i;
1663
1664         for_each_hwfn(p_dev, i) {
1665                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1666                 u16 opaque_fid;
1667
1668                 if (IS_VF(p_dev)) {
1669                         rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1670                         continue;
1671                 }
1672
1673                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1674                 rc = ecore_sp_eth_filter_ucast(p_hwfn,
1675                                                opaque_fid,
1676                                                p_filter_cmd,
1677                                                comp_mode, p_comp_data);
1678                 if (rc != ECORE_SUCCESS)
1679                         break;
1680         }
1681
1682         return rc;
1683 }
1684
1685 /* Statistics related code */
1686 static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
1687                                              u32 *p_addr, u32 *p_len,
1688                                              u16 statistics_bin)
1689 {
1690         if (IS_PF(p_hwfn->p_dev)) {
1691                 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1692                     PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1693                 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1694         } else {
1695                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1696                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1697
1698                 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1699                 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1700         }
1701 }
1702
1703 static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
1704                                      struct ecore_ptt *p_ptt,
1705                                      struct ecore_eth_stats *p_stats,
1706                                      u16 statistics_bin)
1707 {
1708         struct eth_pstorm_per_queue_stat pstats;
1709         u32 pstats_addr = 0, pstats_len = 0;
1710
1711         __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1712                                          statistics_bin);
1713
1714         OSAL_MEMSET(&pstats, 0, sizeof(pstats));
1715         ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1716
1717         p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1718         p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1719         p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1720         p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1721         p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1722         p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1723         p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts);
1724 }
1725
1726 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
1727                                      struct ecore_ptt *p_ptt,
1728                                      struct ecore_eth_stats *p_stats,
1729                                      u16 statistics_bin)
1730 {
1731         struct tstorm_per_port_stat tstats;
1732         u32 tstats_addr, tstats_len;
1733
1734         if (IS_PF(p_hwfn->p_dev)) {
1735                 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1736                     TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1737                 tstats_len = sizeof(struct tstorm_per_port_stat);
1738         } else {
1739                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1740                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1741
1742                 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1743                 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1744         }
1745
1746         OSAL_MEMSET(&tstats, 0, sizeof(tstats));
1747         ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1748
1749         p_stats->mftag_filter_discards +=
1750             HILO_64_REGPAIR(tstats.mftag_filter_discard);
1751         p_stats->mac_filter_discards +=
1752             HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1753 }
1754
1755 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
1756                                              u32 *p_addr, u32 *p_len,
1757                                              u16 statistics_bin)
1758 {
1759         if (IS_PF(p_hwfn->p_dev)) {
1760                 *p_addr = BAR0_MAP_REG_USDM_RAM +
1761                     USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1762                 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1763         } else {
1764                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1765                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1766
1767                 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1768                 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1769         }
1770 }
1771
1772 static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
1773                                      struct ecore_ptt *p_ptt,
1774                                      struct ecore_eth_stats *p_stats,
1775                                      u16 statistics_bin)
1776 {
1777         struct eth_ustorm_per_queue_stat ustats;
1778         u32 ustats_addr = 0, ustats_len = 0;
1779
1780         __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1781                                          statistics_bin);
1782
1783         OSAL_MEMSET(&ustats, 0, sizeof(ustats));
1784         ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1785
1786         p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1787         p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1788         p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1789         p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1790         p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1791         p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1792 }
1793
1794 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
1795                                              u32 *p_addr, u32 *p_len,
1796                                              u16 statistics_bin)
1797 {
1798         if (IS_PF(p_hwfn->p_dev)) {
1799                 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1800                     MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1801                 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1802         } else {
1803                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1804                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1805
1806                 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1807                 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1808         }
1809 }
1810
1811 static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
1812                                      struct ecore_ptt *p_ptt,
1813                                      struct ecore_eth_stats *p_stats,
1814                                      u16 statistics_bin)
1815 {
1816         struct eth_mstorm_per_queue_stat mstats;
1817         u32 mstats_addr = 0, mstats_len = 0;
1818
1819         __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1820                                          statistics_bin);
1821
1822         OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1823         ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1824
1825         p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard);
1826         p_stats->packet_too_big_discard +=
1827             HILO_64_REGPAIR(mstats.packet_too_big_discard);
1828         p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard);
1829         p_stats->tpa_coalesced_pkts +=
1830             HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1831         p_stats->tpa_coalesced_events +=
1832             HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1833         p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num);
1834         p_stats->tpa_coalesced_bytes +=
1835             HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1836 }
1837
1838 static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
1839                                          struct ecore_ptt *p_ptt,
1840                                          struct ecore_eth_stats *p_stats)
1841 {
1842         struct port_stats port_stats;
1843         int j;
1844
1845         OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
1846
1847         ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
1848                           p_hwfn->mcp_info->port_addr +
1849                           OFFSETOF(struct public_port, stats),
1850                           sizeof(port_stats));
1851
1852         p_stats->rx_64_byte_packets += port_stats.eth.r64;
1853         p_stats->rx_65_to_127_byte_packets += port_stats.eth.r127;
1854         p_stats->rx_128_to_255_byte_packets += port_stats.eth.r255;
1855         p_stats->rx_256_to_511_byte_packets += port_stats.eth.r511;
1856         p_stats->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1857         p_stats->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1858         p_stats->rx_1519_to_1522_byte_packets += port_stats.eth.r1522;
1859         p_stats->rx_1519_to_2047_byte_packets += port_stats.eth.r2047;
1860         p_stats->rx_2048_to_4095_byte_packets += port_stats.eth.r4095;
1861         p_stats->rx_4096_to_9216_byte_packets += port_stats.eth.r9216;
1862         p_stats->rx_9217_to_16383_byte_packets += port_stats.eth.r16383;
1863         p_stats->rx_crc_errors += port_stats.eth.rfcs;
1864         p_stats->rx_mac_crtl_frames += port_stats.eth.rxcf;
1865         p_stats->rx_pause_frames += port_stats.eth.rxpf;
1866         p_stats->rx_pfc_frames += port_stats.eth.rxpp;
1867         p_stats->rx_align_errors += port_stats.eth.raln;
1868         p_stats->rx_carrier_errors += port_stats.eth.rfcr;
1869         p_stats->rx_oversize_packets += port_stats.eth.rovr;
1870         p_stats->rx_jabbers += port_stats.eth.rjbr;
1871         p_stats->rx_undersize_packets += port_stats.eth.rund;
1872         p_stats->rx_fragments += port_stats.eth.rfrg;
1873         p_stats->tx_64_byte_packets += port_stats.eth.t64;
1874         p_stats->tx_65_to_127_byte_packets += port_stats.eth.t127;
1875         p_stats->tx_128_to_255_byte_packets += port_stats.eth.t255;
1876         p_stats->tx_256_to_511_byte_packets += port_stats.eth.t511;
1877         p_stats->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1878         p_stats->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1879         p_stats->tx_1519_to_2047_byte_packets += port_stats.eth.t2047;
1880         p_stats->tx_2048_to_4095_byte_packets += port_stats.eth.t4095;
1881         p_stats->tx_4096_to_9216_byte_packets += port_stats.eth.t9216;
1882         p_stats->tx_9217_to_16383_byte_packets += port_stats.eth.t16383;
1883         p_stats->tx_pause_frames += port_stats.eth.txpf;
1884         p_stats->tx_pfc_frames += port_stats.eth.txpp;
1885         p_stats->tx_lpi_entry_count += port_stats.eth.tlpiec;
1886         p_stats->tx_total_collisions += port_stats.eth.tncl;
1887         p_stats->rx_mac_bytes += port_stats.eth.rbyte;
1888         p_stats->rx_mac_uc_packets += port_stats.eth.rxuca;
1889         p_stats->rx_mac_mc_packets += port_stats.eth.rxmca;
1890         p_stats->rx_mac_bc_packets += port_stats.eth.rxbca;
1891         p_stats->rx_mac_frames_ok += port_stats.eth.rxpok;
1892         p_stats->tx_mac_bytes += port_stats.eth.tbyte;
1893         p_stats->tx_mac_uc_packets += port_stats.eth.txuca;
1894         p_stats->tx_mac_mc_packets += port_stats.eth.txmca;
1895         p_stats->tx_mac_bc_packets += port_stats.eth.txbca;
1896         p_stats->tx_mac_ctrl_frames += port_stats.eth.txcf;
1897         for (j = 0; j < 8; j++) {
1898                 p_stats->brb_truncates += port_stats.brb.brb_truncate[j];
1899                 p_stats->brb_discards += port_stats.brb.brb_discard[j];
1900         }
1901 }
1902
1903 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
1904                              struct ecore_ptt *p_ptt,
1905                              struct ecore_eth_stats *stats,
1906                              u16 statistics_bin, bool b_get_port_stats)
1907 {
1908         __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1909         __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1910         __ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin);
1911         __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1912
1913 #ifndef ASIC_ONLY
1914         /* Avoid getting PORT stats for emulation. */
1915         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1916                 return;
1917 #endif
1918
1919         if (b_get_port_stats && p_hwfn->mcp_info)
1920                 __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
1921 }
1922
1923 static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
1924                                    struct ecore_eth_stats *stats)
1925 {
1926         u8 fw_vport = 0;
1927         int i;
1928
1929         OSAL_MEMSET(stats, 0, sizeof(*stats));
1930
1931         for_each_hwfn(p_dev, i) {
1932                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1933                 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
1934                     ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
1935
1936                 if (IS_PF(p_dev)) {
1937                         /* The main vport index is relative first */
1938                         if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
1939                                 DP_ERR(p_hwfn, "No vport available!\n");
1940                                 goto out;
1941                         }
1942                 }
1943
1944                 if (IS_PF(p_dev) && !p_ptt) {
1945                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1946                         continue;
1947                 }
1948
1949                 __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
1950                                         IS_PF(p_dev) ? true : false);
1951
1952 out:
1953                 if (IS_PF(p_dev) && p_ptt)
1954                         ecore_ptt_release(p_hwfn, p_ptt);
1955         }
1956 }
1957
1958 void ecore_get_vport_stats(struct ecore_dev *p_dev,
1959                            struct ecore_eth_stats *stats)
1960 {
1961         u32 i;
1962
1963         if (!p_dev) {
1964                 OSAL_MEMSET(stats, 0, sizeof(*stats));
1965                 return;
1966         }
1967
1968         _ecore_get_vport_stats(p_dev, stats);
1969
1970         if (!p_dev->reset_stats)
1971                 return;
1972
1973         /* Reduce the statistics baseline */
1974         for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
1975                 ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
1976 }
1977
1978 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
1979 void ecore_reset_vport_stats(struct ecore_dev *p_dev)
1980 {
1981         int i;
1982
1983         for_each_hwfn(p_dev, i) {
1984                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1985                 struct eth_mstorm_per_queue_stat mstats;
1986                 struct eth_ustorm_per_queue_stat ustats;
1987                 struct eth_pstorm_per_queue_stat pstats;
1988                 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
1989                     ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
1990                 u32 addr = 0, len = 0;
1991
1992                 if (IS_PF(p_dev) && !p_ptt) {
1993                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
1994                         continue;
1995                 }
1996
1997                 OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1998                 __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
1999                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
2000
2001                 OSAL_MEMSET(&ustats, 0, sizeof(ustats));
2002                 __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
2003                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
2004
2005                 OSAL_MEMSET(&pstats, 0, sizeof(pstats));
2006                 __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
2007                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
2008
2009                 if (IS_PF(p_dev))
2010                         ecore_ptt_release(p_hwfn, p_ptt);
2011         }
2012
2013         /* PORT statistics are not necessarily reset, so we need to
2014          * read and create a baseline for future statistics.
2015          */
2016         if (!p_dev->reset_stats)
2017                 DP_INFO(p_dev, "Reset stats not allocated\n");
2018         else
2019                 _ecore_get_vport_stats(p_dev, p_dev->reset_stats);
2020 }
2021
2022 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
2023                                struct ecore_ptt *p_ptt,
2024                                struct ecore_arfs_config_params *p_cfg_params)
2025 {
2026         if (p_cfg_params->arfs_enable) {
2027                 ecore_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
2028                                           p_cfg_params->tcp,
2029                                           p_cfg_params->udp,
2030                                           p_cfg_params->ipv4,
2031                                           p_cfg_params->ipv6);
2032                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2033                            "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
2034                            p_cfg_params->tcp ? "Enable" : "Disable",
2035                            p_cfg_params->udp ? "Enable" : "Disable",
2036                            p_cfg_params->ipv4 ? "Enable" : "Disable",
2037                            p_cfg_params->ipv6 ? "Enable" : "Disable");
2038         } else {
2039                 ecore_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2040         }
2041         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
2042                    p_cfg_params->arfs_enable ? "Enable" : "Disable");
2043 }
2044
2045 enum _ecore_status_t
2046 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
2047                                   struct ecore_ptt *p_ptt,
2048                                   struct ecore_spq_comp_cb *p_cb,
2049                                   dma_addr_t p_addr, u16 length,
2050                                   u16 qid, u8 vport_id,
2051                                   bool b_is_add)
2052 {
2053         struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
2054         struct ecore_spq_entry *p_ent = OSAL_NULL;
2055         struct ecore_sp_init_data init_data;
2056         u16 abs_rx_q_id = 0;
2057         u8 abs_vport_id = 0;
2058         enum _ecore_status_t rc = ECORE_NOTIMPL;
2059
2060         rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
2061         if (rc != ECORE_SUCCESS)
2062                 return rc;
2063
2064         rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
2065         if (rc != ECORE_SUCCESS)
2066                 return rc;
2067
2068         /* Get SPQ entry */
2069         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2070         init_data.cid = ecore_spq_get_cid(p_hwfn);
2071
2072         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2073
2074         if (p_cb) {
2075                 init_data.comp_mode = ECORE_SPQ_MODE_CB;
2076                 init_data.p_comp_data = p_cb;
2077         } else {
2078                 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2079         }
2080
2081         rc = ecore_sp_init_request(p_hwfn, &p_ent,
2082                                    ETH_RAMROD_GFT_UPDATE_FILTER,
2083                                    PROTOCOLID_ETH, &init_data);
2084         if (rc != ECORE_SUCCESS)
2085                 return rc;
2086
2087         p_ramrod = &p_ent->ramrod.rx_update_gft;
2088
2089         DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
2090         p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
2091         p_ramrod->rx_qid_or_action_icid = OSAL_CPU_TO_LE16(abs_rx_q_id);
2092         p_ramrod->vport_id = abs_vport_id;
2093         p_ramrod->filter_type = RFS_FILTER_TYPE;
2094         p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
2095                                            : GFT_DELETE_FILTER;
2096
2097         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2098                    "V[%0x], Q[%04x] - %s filter from 0x%lx [length %04xb]\n",
2099                    abs_vport_id, abs_rx_q_id,
2100                    b_is_add ? "Adding" : "Removing",
2101                    (unsigned long)p_addr, length);
2102
2103         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2104 }