New upstream version 18.05
[deb_dpdk.git] / drivers / net / qede / base / ecore_l2.c
1 /*
2  * Copyright (c) 2016 - 2018 Cavium Inc.
3  * All rights reserved.
4  * www.cavium.com
5  *
6  * See LICENSE.qede_pmd for copyright and licensing details.
7  */
8
9 #include "bcm_osal.h"
10
11 #include "ecore.h"
12 #include "ecore_status.h"
13 #include "ecore_hsi_eth.h"
14 #include "ecore_chain.h"
15 #include "ecore_spq.h"
16 #include "ecore_init_fw_funcs.h"
17 #include "ecore_cxt.h"
18 #include "ecore_l2.h"
19 #include "ecore_sp_commands.h"
20 #include "ecore_gtt_reg_addr.h"
21 #include "ecore_iro.h"
22 #include "reg_addr.h"
23 #include "ecore_int.h"
24 #include "ecore_hw.h"
25 #include "ecore_vf.h"
26 #include "ecore_sriov.h"
27 #include "ecore_mcp.h"
28
29 #define ECORE_MAX_SGES_NUM 16
30 #define CRC32_POLY 0x1edc6f41
31
32 struct ecore_l2_info {
33         u32 queues;
34         unsigned long **pp_qid_usage;
35
36         /* The lock is meant to synchronize access to the qid usage */
37         osal_mutex_t lock;
38 };
39
40 enum _ecore_status_t ecore_l2_alloc(struct ecore_hwfn *p_hwfn)
41 {
42         struct ecore_l2_info *p_l2_info;
43         unsigned long **pp_qids;
44         u32 i;
45
46         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
47                 return ECORE_SUCCESS;
48
49         p_l2_info = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_l2_info));
50         if (!p_l2_info)
51                 return ECORE_NOMEM;
52         p_hwfn->p_l2_info = p_l2_info;
53
54         if (IS_PF(p_hwfn->p_dev)) {
55                 p_l2_info->queues = RESC_NUM(p_hwfn, ECORE_L2_QUEUE);
56         } else {
57                 u8 rx = 0, tx = 0;
58
59                 ecore_vf_get_num_rxqs(p_hwfn, &rx);
60                 ecore_vf_get_num_txqs(p_hwfn, &tx);
61
62                 p_l2_info->queues = (u32)OSAL_MAX_T(u8, rx, tx);
63         }
64
65         pp_qids = OSAL_VZALLOC(p_hwfn->p_dev,
66                                sizeof(unsigned long *) *
67                                p_l2_info->queues);
68         if (pp_qids == OSAL_NULL)
69                 return ECORE_NOMEM;
70         p_l2_info->pp_qid_usage = pp_qids;
71
72         for (i = 0; i < p_l2_info->queues; i++) {
73                 pp_qids[i] = OSAL_VZALLOC(p_hwfn->p_dev,
74                                           MAX_QUEUES_PER_QZONE / 8);
75                 if (pp_qids[i] == OSAL_NULL)
76                         return ECORE_NOMEM;
77         }
78
79 #ifdef CONFIG_ECORE_LOCK_ALLOC
80         if (OSAL_MUTEX_ALLOC(p_hwfn, &p_l2_info->lock))
81                 return ECORE_NOMEM;
82 #endif
83
84         return ECORE_SUCCESS;
85 }
86
87 void ecore_l2_setup(struct ecore_hwfn *p_hwfn)
88 {
89         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
90                 return;
91
92         OSAL_MUTEX_INIT(&p_hwfn->p_l2_info->lock);
93 }
94
95 void ecore_l2_free(struct ecore_hwfn *p_hwfn)
96 {
97         u32 i;
98
99         if (!ECORE_IS_L2_PERSONALITY(p_hwfn))
100                 return;
101
102         if (p_hwfn->p_l2_info == OSAL_NULL)
103                 return;
104
105         if (p_hwfn->p_l2_info->pp_qid_usage == OSAL_NULL)
106                 goto out_l2_info;
107
108         /* Free until hit first uninitialized entry */
109         for (i = 0; i < p_hwfn->p_l2_info->queues; i++) {
110                 if (p_hwfn->p_l2_info->pp_qid_usage[i] == OSAL_NULL)
111                         break;
112                 OSAL_VFREE(p_hwfn->p_dev,
113                            p_hwfn->p_l2_info->pp_qid_usage[i]);
114                 p_hwfn->p_l2_info->pp_qid_usage[i] = OSAL_NULL;
115         }
116
117 #ifdef CONFIG_ECORE_LOCK_ALLOC
118         /* Lock is last to initialize, if everything else was */
119         if (i == p_hwfn->p_l2_info->queues)
120                 OSAL_MUTEX_DEALLOC(&p_hwfn->p_l2_info->lock);
121 #endif
122
123         OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info->pp_qid_usage);
124         p_hwfn->p_l2_info->pp_qid_usage = OSAL_NULL;
125
126 out_l2_info:
127         OSAL_VFREE(p_hwfn->p_dev, p_hwfn->p_l2_info);
128         p_hwfn->p_l2_info = OSAL_NULL;
129 }
130
131 /* TODO - we'll need locking around these... */
132 static bool ecore_eth_queue_qid_usage_add(struct ecore_hwfn *p_hwfn,
133                                           struct ecore_queue_cid *p_cid)
134 {
135         struct ecore_l2_info *p_l2_info = p_hwfn->p_l2_info;
136         u16 queue_id = p_cid->rel.queue_id;
137         bool b_rc = true;
138         u8 first;
139
140         OSAL_MUTEX_ACQUIRE(&p_l2_info->lock);
141
142         if (queue_id > p_l2_info->queues) {
143                 DP_NOTICE(p_hwfn, true,
144                           "Requested to increase usage for qzone %04x out of %08x\n",
145                           queue_id, p_l2_info->queues);
146                 b_rc = false;
147                 goto out;
148         }
149
150         first = (u8)OSAL_FIND_FIRST_ZERO_BIT(p_l2_info->pp_qid_usage[queue_id],
151                                              MAX_QUEUES_PER_QZONE);
152         if (first >= MAX_QUEUES_PER_QZONE) {
153                 b_rc = false;
154                 goto out;
155         }
156
157         OSAL_SET_BIT(first, p_l2_info->pp_qid_usage[queue_id]);
158         p_cid->qid_usage_idx = first;
159
160 out:
161         OSAL_MUTEX_RELEASE(&p_l2_info->lock);
162         return b_rc;
163 }
164
165 static void ecore_eth_queue_qid_usage_del(struct ecore_hwfn *p_hwfn,
166                                           struct ecore_queue_cid *p_cid)
167 {
168         OSAL_MUTEX_ACQUIRE(&p_hwfn->p_l2_info->lock);
169
170         OSAL_CLEAR_BIT(p_cid->qid_usage_idx,
171                        p_hwfn->p_l2_info->pp_qid_usage[p_cid->rel.queue_id]);
172
173         OSAL_MUTEX_RELEASE(&p_hwfn->p_l2_info->lock);
174 }
175
176 void ecore_eth_queue_cid_release(struct ecore_hwfn *p_hwfn,
177                                  struct ecore_queue_cid *p_cid)
178 {
179         bool b_legacy_vf = !!(p_cid->vf_legacy &
180                               ECORE_QCID_LEGACY_VF_CID);
181
182         /* VFs' CIDs are 0-based in PF-view, and uninitialized on VF.
183          * For legacy vf-queues, the CID doesn't go through here.
184          */
185         if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
186                 _ecore_cxt_release_cid(p_hwfn, p_cid->cid, p_cid->vfid);
187
188         /* VFs maintain the index inside queue-zone on their own */
189         if (p_cid->vfid == ECORE_QUEUE_CID_PF)
190                 ecore_eth_queue_qid_usage_del(p_hwfn, p_cid);
191
192         OSAL_VFREE(p_hwfn->p_dev, p_cid);
193 }
194
195 /* The internal is only meant to be directly called by PFs initializeing CIDs
196  * for their VFs.
197  */
198 static struct ecore_queue_cid *
199 _ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn,
200                         u16 opaque_fid, u32 cid,
201                         struct ecore_queue_start_common_params *p_params,
202                         bool b_is_rx,
203                         struct ecore_queue_cid_vf_params *p_vf_params)
204 {
205         struct ecore_queue_cid *p_cid;
206         enum _ecore_status_t rc;
207
208         p_cid = OSAL_VZALLOC(p_hwfn->p_dev, sizeof(*p_cid));
209         if (p_cid == OSAL_NULL)
210                 return OSAL_NULL;
211
212         p_cid->opaque_fid = opaque_fid;
213         p_cid->cid = cid;
214         p_cid->p_owner = p_hwfn;
215
216         /* Fill in parameters */
217         p_cid->rel.vport_id = p_params->vport_id;
218         p_cid->rel.queue_id = p_params->queue_id;
219         p_cid->rel.stats_id = p_params->stats_id;
220         p_cid->sb_igu_id = p_params->p_sb->igu_sb_id;
221         p_cid->b_is_rx = b_is_rx;
222         p_cid->sb_idx = p_params->sb_idx;
223
224         /* Fill-in bits related to VFs' queues if information was provided */
225         if (p_vf_params != OSAL_NULL) {
226                 p_cid->vfid = p_vf_params->vfid;
227                 p_cid->vf_qid = p_vf_params->vf_qid;
228                 p_cid->vf_legacy = p_vf_params->vf_legacy;
229         } else {
230                 p_cid->vfid = ECORE_QUEUE_CID_PF;
231         }
232
233         /* Don't try calculating the absolute indices for VFs */
234         if (IS_VF(p_hwfn->p_dev)) {
235                 p_cid->abs = p_cid->rel;
236
237                 goto out;
238         }
239
240         /* Calculate the engine-absolute indices of the resources.
241          * This would guarantee they're valid later on.
242          * In some cases [SBs] we already have the right values.
243          */
244         rc = ecore_fw_vport(p_hwfn, p_cid->rel.vport_id, &p_cid->abs.vport_id);
245         if (rc != ECORE_SUCCESS)
246                 goto fail;
247
248         rc = ecore_fw_l2_queue(p_hwfn, p_cid->rel.queue_id,
249                                &p_cid->abs.queue_id);
250         if (rc != ECORE_SUCCESS)
251                 goto fail;
252
253         /* In case of a PF configuring its VF's queues, the stats-id is already
254          * absolute [since there's a single index that's suitable per-VF].
255          */
256         if (p_cid->vfid == ECORE_QUEUE_CID_PF) {
257                 rc = ecore_fw_vport(p_hwfn, p_cid->rel.stats_id,
258                                     &p_cid->abs.stats_id);
259                 if (rc != ECORE_SUCCESS)
260                         goto fail;
261         } else {
262                 p_cid->abs.stats_id = p_cid->rel.stats_id;
263         }
264
265 out:
266         /* VF-images have provided the qid_usage_idx on their own.
267          * Otherwise, we need to allocate a unique one.
268          */
269         if (!p_vf_params) {
270                 if (!ecore_eth_queue_qid_usage_add(p_hwfn, p_cid))
271                         goto fail;
272         } else {
273                 p_cid->qid_usage_idx = p_vf_params->qid_usage_idx;
274         }
275
276         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
277                    "opaque_fid: %04x CID %08x vport %02x [%02x] qzone %04x.%02x [%04x] stats %02x [%02x] SB %04x PI %02x\n",
278                    p_cid->opaque_fid, p_cid->cid,
279                    p_cid->rel.vport_id, p_cid->abs.vport_id,
280                    p_cid->rel.queue_id, p_cid->qid_usage_idx,
281                    p_cid->abs.queue_id,
282                    p_cid->rel.stats_id, p_cid->abs.stats_id,
283                    p_cid->sb_igu_id, p_cid->sb_idx);
284
285         return p_cid;
286
287 fail:
288         OSAL_VFREE(p_hwfn->p_dev, p_cid);
289         return OSAL_NULL;
290 }
291
292 struct ecore_queue_cid *
293 ecore_eth_queue_to_cid(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
294                        struct ecore_queue_start_common_params *p_params,
295                        bool b_is_rx,
296                        struct ecore_queue_cid_vf_params *p_vf_params)
297 {
298         struct ecore_queue_cid *p_cid;
299         u8 vfid = ECORE_CXT_PF_CID;
300         bool b_legacy_vf = false;
301         u32 cid = 0;
302
303         /* In case of legacy VFs, The CID can be derived from the additional
304          * VF parameters - the VF assumes queue X uses CID X, so we can simply
305          * use the vf_qid for this purpose as well.
306          */
307         if (p_vf_params) {
308                 vfid = p_vf_params->vfid;
309
310                 if (p_vf_params->vf_legacy &
311                     ECORE_QCID_LEGACY_VF_CID) {
312                         b_legacy_vf = true;
313                         cid = p_vf_params->vf_qid;
314                 }
315         }
316
317         /* Get a unique firmware CID for this queue, in case it's a PF.
318          * VF's don't need a CID as the queue configuration will be done
319          * by PF.
320          */
321         if (IS_PF(p_hwfn->p_dev) && !b_legacy_vf) {
322                 if (_ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH,
323                                            &cid, vfid) != ECORE_SUCCESS) {
324                         DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n");
325                         return OSAL_NULL;
326                 }
327         }
328
329         p_cid = _ecore_eth_queue_to_cid(p_hwfn, opaque_fid, cid,
330                                         p_params, b_is_rx, p_vf_params);
331         if ((p_cid == OSAL_NULL) && IS_PF(p_hwfn->p_dev) && !b_legacy_vf)
332                 _ecore_cxt_release_cid(p_hwfn, cid, vfid);
333
334         return p_cid;
335 }
336
337 static struct ecore_queue_cid *
338 ecore_eth_queue_to_cid_pf(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
339                           bool b_is_rx,
340                           struct ecore_queue_start_common_params *p_params)
341 {
342         return ecore_eth_queue_to_cid(p_hwfn, opaque_fid, p_params, b_is_rx,
343                                       OSAL_NULL);
344 }
345
346 enum _ecore_status_t
347 ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn,
348                          struct ecore_sp_vport_start_params *p_params)
349 {
350         struct vport_start_ramrod_data *p_ramrod = OSAL_NULL;
351         struct ecore_spq_entry *p_ent = OSAL_NULL;
352         struct ecore_sp_init_data init_data;
353         struct eth_vport_tpa_param *p_tpa;
354         u16 rx_mode = 0, tx_err = 0;
355         u8 abs_vport_id = 0;
356         enum _ecore_status_t rc = ECORE_NOTIMPL;
357
358         rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
359         if (rc != ECORE_SUCCESS)
360                 return rc;
361
362         /* Get SPQ entry */
363         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
364         init_data.cid = ecore_spq_get_cid(p_hwfn);
365         init_data.opaque_fid = p_params->opaque_fid;
366         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
367
368         rc = ecore_sp_init_request(p_hwfn, &p_ent,
369                                    ETH_RAMROD_VPORT_START,
370                                    PROTOCOLID_ETH, &init_data);
371         if (rc != ECORE_SUCCESS)
372                 return rc;
373
374         p_ramrod = &p_ent->ramrod.vport_start;
375         p_ramrod->vport_id = abs_vport_id;
376
377         p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu);
378         p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts;
379         p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan;
380         p_ramrod->drop_ttl0_en = p_params->drop_ttl0;
381         p_ramrod->untagged = p_params->only_untagged;
382         p_ramrod->zero_placement_offset = p_params->zero_placement_offset;
383
384         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1);
385         SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1);
386
387         p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode);
388
389         /* Handle requests for strict behavior on transmission errors */
390         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE,
391                   p_params->b_err_illegal_vlan_mode ?
392                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
393         SET_FIELD(tx_err, ETH_TX_ERR_VALS_PACKET_TOO_SMALL,
394                   p_params->b_err_small_pkt ?
395                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
396         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR,
397                   p_params->b_err_anti_spoof ?
398                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
399         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS,
400                   p_params->b_err_illegal_inband_mode ?
401                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
402         SET_FIELD(tx_err, ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG,
403                   p_params->b_err_vlan_insert_with_inband ?
404                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
405         SET_FIELD(tx_err, ETH_TX_ERR_VALS_MTU_VIOLATION,
406                   p_params->b_err_big_pkt ?
407                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
408         SET_FIELD(tx_err, ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME,
409                   p_params->b_err_ctrl_frame ?
410                   ETH_TX_ERR_ASSERT_MALICIOUS : 0);
411         p_ramrod->tx_err_behav.values = OSAL_CPU_TO_LE16(tx_err);
412
413         /* TPA related fields */
414         p_tpa = &p_ramrod->tpa_param;
415         OSAL_MEMSET(p_tpa, 0, sizeof(struct eth_vport_tpa_param));
416         p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
417
418         switch (p_params->tpa_mode) {
419         case ECORE_TPA_MODE_GRO:
420                 p_tpa->tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM;
421                 p_tpa->tpa_max_size = (u16)-1;
422                 p_tpa->tpa_min_size_to_cont = p_params->mtu / 2;
423                 p_tpa->tpa_min_size_to_start = p_params->mtu / 2;
424                 p_tpa->tpa_ipv4_en_flg = 1;
425                 p_tpa->tpa_ipv6_en_flg = 1;
426                 p_tpa->tpa_ipv4_tunn_en_flg = 1;
427                 p_tpa->tpa_ipv6_tunn_en_flg = 1;
428                 p_tpa->tpa_pkt_split_flg = 1;
429                 p_tpa->tpa_gro_consistent_flg = 1;
430                 break;
431         default:
432                 break;
433         }
434
435         p_ramrod->tx_switching_en = p_params->tx_switching;
436 #ifndef ASIC_ONLY
437         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev))
438                 p_ramrod->tx_switching_en = 0;
439 #endif
440
441         p_ramrod->ctl_frame_mac_check_en = !!p_params->check_mac;
442         p_ramrod->ctl_frame_ethtype_check_en = !!p_params->check_ethtype;
443
444         /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */
445         p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_params->concrete_fid);
446
447         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
448 }
449
450 enum _ecore_status_t
451 ecore_sp_vport_start(struct ecore_hwfn *p_hwfn,
452                      struct ecore_sp_vport_start_params *p_params)
453 {
454         if (IS_VF(p_hwfn->p_dev))
455                 return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id,
456                                                p_params->mtu,
457                                                p_params->remove_inner_vlan,
458                                                p_params->tpa_mode,
459                                                p_params->max_buffers_per_cqe,
460                                                p_params->only_untagged);
461
462         return ecore_sp_eth_vport_start(p_hwfn, p_params);
463 }
464
465 static enum _ecore_status_t
466 ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn,
467                           struct vport_update_ramrod_data *p_ramrod,
468                           struct ecore_rss_params *p_rss)
469 {
470         struct eth_vport_rss_config *p_config;
471         u16 capabilities = 0;
472         int i, table_size;
473         enum _ecore_status_t rc = ECORE_SUCCESS;
474
475         if (!p_rss) {
476                 p_ramrod->common.update_rss_flg = 0;
477                 return rc;
478         }
479         p_config = &p_ramrod->rss_config;
480
481         OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE !=
482                           ETH_RSS_IND_TABLE_ENTRIES_NUM);
483
484         rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id);
485         if (rc != ECORE_SUCCESS)
486                 return rc;
487
488         p_ramrod->common.update_rss_flg = p_rss->update_rss_config;
489         p_config->update_rss_capabilities = p_rss->update_rss_capabilities;
490         p_config->update_rss_ind_table = p_rss->update_rss_ind_table;
491         p_config->update_rss_key = p_rss->update_rss_key;
492
493         p_config->rss_mode = p_rss->rss_enable ?
494             ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED;
495
496         p_config->capabilities = 0;
497
498         SET_FIELD(capabilities,
499                   ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY,
500                   !!(p_rss->rss_caps & ECORE_RSS_IPV4));
501         SET_FIELD(capabilities,
502                   ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY,
503                   !!(p_rss->rss_caps & ECORE_RSS_IPV6));
504         SET_FIELD(capabilities,
505                   ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY,
506                   !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP));
507         SET_FIELD(capabilities,
508                   ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY,
509                   !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP));
510         SET_FIELD(capabilities,
511                   ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY,
512                   !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP));
513         SET_FIELD(capabilities,
514                   ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY,
515                   !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP));
516         p_config->tbl_size = p_rss->rss_table_size_log;
517         p_config->capabilities = OSAL_CPU_TO_LE16(capabilities);
518
519         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
520                    "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n",
521                    p_ramrod->common.update_rss_flg,
522                    p_config->rss_mode,
523                    p_config->update_rss_capabilities,
524                    p_config->capabilities,
525                    p_config->update_rss_ind_table, p_config->update_rss_key);
526
527         table_size = OSAL_MIN_T(int, ECORE_RSS_IND_TABLE_SIZE,
528                                 1 << p_config->tbl_size);
529         for (i = 0; i < table_size; i++) {
530                 struct ecore_queue_cid *p_queue = p_rss->rss_ind_table[i];
531
532                 if (!p_queue)
533                         return ECORE_INVAL;
534
535                 p_config->indirection_table[i] =
536                                 OSAL_CPU_TO_LE16(p_queue->abs.queue_id);
537         }
538
539         DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
540                    "Configured RSS indirection table [%d entries]:\n",
541                    table_size);
542         for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i += 0x10) {
543                 DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP,
544                            "%04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x %04x\n",
545                            OSAL_LE16_TO_CPU(p_config->indirection_table[i]),
546                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 1]),
547                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 2]),
548                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 3]),
549                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 4]),
550                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 5]),
551                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 6]),
552                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 7]),
553                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 8]),
554                            OSAL_LE16_TO_CPU(p_config->indirection_table[i + 9]),
555                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 10]),
556                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 11]),
557                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 12]),
558                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 13]),
559                           OSAL_LE16_TO_CPU(p_config->indirection_table[i + 14]),
560                          OSAL_LE16_TO_CPU(p_config->indirection_table[i + 15]));
561         }
562
563         for (i = 0; i < 10; i++)
564                 p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]);
565
566         return rc;
567 }
568
569 static void
570 ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn,
571                             struct vport_update_ramrod_data *p_ramrod,
572                             struct ecore_filter_accept_flags accept_flags)
573 {
574         p_ramrod->common.update_rx_mode_flg =
575                                         accept_flags.update_rx_mode_config;
576         p_ramrod->common.update_tx_mode_flg =
577                                         accept_flags.update_tx_mode_config;
578
579 #ifndef ASIC_ONLY
580         /* On B0 emulation we cannot enable Tx, since this would cause writes
581          * to PVFC HW block which isn't implemented in emulation.
582          */
583         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
584                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
585                            "Non-Asic - prevent Tx mode in vport update\n");
586                 p_ramrod->common.update_tx_mode_flg = 0;
587         }
588 #endif
589
590         /* Set Rx mode accept flags */
591         if (p_ramrod->common.update_rx_mode_flg) {
592                 u8 accept_filter = accept_flags.rx_accept_filter;
593                 u16 state = 0;
594
595                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL,
596                           !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) ||
597                            !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)));
598
599                 SET_FIELD(state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED,
600                           !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED));
601
602                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL,
603                           !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) ||
604                             !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
605
606                 SET_FIELD(state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL,
607                           (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
608                            !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
609
610                 SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
611                           !!(accept_filter & ECORE_ACCEPT_BCAST));
612
613                 p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(state);
614                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
615                            "vport[%02x] p_ramrod->rx_mode.state = 0x%x\n",
616                            p_ramrod->common.vport_id, state);
617         }
618
619         /* Set Tx mode accept flags */
620         if (p_ramrod->common.update_tx_mode_flg) {
621                 u8 accept_filter = accept_flags.tx_accept_filter;
622                 u16 state = 0;
623
624                 SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL,
625                           !!(accept_filter & ECORE_ACCEPT_NONE));
626
627                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL,
628                           !!(accept_filter & ECORE_ACCEPT_NONE));
629
630                 SET_FIELD(state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL,
631                           (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) &&
632                            !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED)));
633
634                 SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
635                           !!(accept_filter & ECORE_ACCEPT_BCAST));
636
637                 p_ramrod->tx_mode.state = OSAL_CPU_TO_LE16(state);
638                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
639                            "vport[%02x] p_ramrod->tx_mode.state = 0x%x\n",
640                            p_ramrod->common.vport_id, state);
641         }
642 }
643
644 static void
645 ecore_sp_vport_update_sge_tpa(struct vport_update_ramrod_data *p_ramrod,
646                               struct ecore_sge_tpa_params *p_params)
647 {
648         struct eth_vport_tpa_param *p_tpa;
649         u16 val;
650
651         if (!p_params) {
652                 p_ramrod->common.update_tpa_param_flg = 0;
653                 p_ramrod->common.update_tpa_en_flg = 0;
654                 p_ramrod->common.update_tpa_param_flg = 0;
655                 return;
656         }
657
658         p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg;
659         p_tpa = &p_ramrod->tpa_param;
660         p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg;
661         p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg;
662         p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg;
663         p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg;
664
665         p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg;
666         p_tpa->max_buff_num = p_params->max_buffers_per_cqe;
667         p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg;
668         p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg;
669         p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg;
670         p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num;
671         val = p_params->tpa_max_size;
672         p_tpa->tpa_max_size = OSAL_CPU_TO_LE16(val);
673         val = p_params->tpa_min_size_to_start;
674         p_tpa->tpa_min_size_to_start = OSAL_CPU_TO_LE16(val);
675         val = p_params->tpa_min_size_to_cont;
676         p_tpa->tpa_min_size_to_cont = OSAL_CPU_TO_LE16(val);
677 }
678
679 static void
680 ecore_sp_update_mcast_bin(struct vport_update_ramrod_data *p_ramrod,
681                           struct ecore_sp_vport_update_params *p_params)
682 {
683         int i;
684
685         OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0,
686                     sizeof(p_ramrod->approx_mcast.bins));
687
688         if (!p_params->update_approx_mcast_flg)
689                 return;
690
691         p_ramrod->common.update_approx_mcast_flg = 1;
692         for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
693                 u32 *p_bins = p_params->bins;
694
695                 p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]);
696         }
697 }
698
699 enum _ecore_status_t
700 ecore_sp_vport_update(struct ecore_hwfn *p_hwfn,
701                       struct ecore_sp_vport_update_params *p_params,
702                       enum spq_mode comp_mode,
703                       struct ecore_spq_comp_cb *p_comp_data)
704 {
705         struct ecore_rss_params *p_rss_params = p_params->rss_params;
706         struct vport_update_ramrod_data_cmn *p_cmn;
707         struct ecore_sp_init_data init_data;
708         struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
709         struct ecore_spq_entry *p_ent = OSAL_NULL;
710         u8 abs_vport_id = 0, val;
711         enum _ecore_status_t rc = ECORE_NOTIMPL;
712
713         if (IS_VF(p_hwfn->p_dev)) {
714                 rc = ecore_vf_pf_vport_update(p_hwfn, p_params);
715                 return rc;
716         }
717
718         rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
719         if (rc != ECORE_SUCCESS)
720                 return rc;
721
722         /* Get SPQ entry */
723         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
724         init_data.cid = ecore_spq_get_cid(p_hwfn);
725         init_data.opaque_fid = p_params->opaque_fid;
726         init_data.comp_mode = comp_mode;
727         init_data.p_comp_data = p_comp_data;
728
729         rc = ecore_sp_init_request(p_hwfn, &p_ent,
730                                    ETH_RAMROD_VPORT_UPDATE,
731                                    PROTOCOLID_ETH, &init_data);
732         if (rc != ECORE_SUCCESS)
733                 return rc;
734
735         /* Copy input params to ramrod according to FW struct */
736         p_ramrod = &p_ent->ramrod.vport_update;
737         p_cmn = &p_ramrod->common;
738
739         p_cmn->vport_id = abs_vport_id;
740
741         p_cmn->rx_active_flg = p_params->vport_active_rx_flg;
742         p_cmn->update_rx_active_flg = p_params->update_vport_active_rx_flg;
743         p_cmn->tx_active_flg = p_params->vport_active_tx_flg;
744         p_cmn->update_tx_active_flg = p_params->update_vport_active_tx_flg;
745
746         p_cmn->accept_any_vlan = p_params->accept_any_vlan;
747         val = p_params->update_accept_any_vlan_flg;
748         p_cmn->update_accept_any_vlan_flg = val;
749
750         p_cmn->inner_vlan_removal_en = p_params->inner_vlan_removal_flg;
751         val = p_params->update_inner_vlan_removal_flg;
752         p_cmn->update_inner_vlan_removal_en_flg = val;
753
754         p_cmn->default_vlan_en = p_params->default_vlan_enable_flg;
755         val = p_params->update_default_vlan_enable_flg;
756         p_cmn->update_default_vlan_en_flg = val;
757
758         p_cmn->default_vlan = OSAL_CPU_TO_LE16(p_params->default_vlan);
759         p_cmn->update_default_vlan_flg = p_params->update_default_vlan_flg;
760
761         p_cmn->silent_vlan_removal_en = p_params->silent_vlan_removal_flg;
762
763         p_ramrod->common.tx_switching_en = p_params->tx_switching_flg;
764
765 #ifndef ASIC_ONLY
766         if (CHIP_REV_IS_FPGA(p_hwfn->p_dev))
767                 if (p_ramrod->common.tx_switching_en ||
768                     p_ramrod->common.update_tx_switching_en_flg) {
769                         DP_NOTICE(p_hwfn, false,
770                                   "FPGA - why are we seeing tx-switching? Overriding it\n");
771                         p_ramrod->common.tx_switching_en = 0;
772                         p_ramrod->common.update_tx_switching_en_flg = 1;
773                 }
774 #endif
775         p_cmn->update_tx_switching_en_flg = p_params->update_tx_switching_flg;
776
777         p_cmn->anti_spoofing_en = p_params->anti_spoofing_en;
778         val = p_params->update_anti_spoofing_en_flg;
779         p_ramrod->common.update_anti_spoofing_en_flg = val;
780
781         rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params);
782         if (rc != ECORE_SUCCESS) {
783                 /* Return spq entry which is taken in ecore_sp_init_request()*/
784                 ecore_spq_return_entry(p_hwfn, p_ent);
785                 return rc;
786         }
787
788         /* Update mcast bins for VFs, PF doesn't use this functionality */
789         ecore_sp_update_mcast_bin(p_ramrod, p_params);
790
791         ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags);
792         ecore_sp_vport_update_sge_tpa(p_ramrod, p_params->sge_tpa_params);
793         if (p_params->mtu) {
794                 p_ramrod->common.update_mtu_flg = 1;
795                 p_ramrod->common.mtu = OSAL_CPU_TO_LE16(p_params->mtu);
796         }
797
798         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
799 }
800
801 enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn,
802                                          u16 opaque_fid, u8 vport_id)
803 {
804         struct vport_stop_ramrod_data *p_ramrod;
805         struct ecore_sp_init_data init_data;
806         struct ecore_spq_entry *p_ent;
807         u8 abs_vport_id = 0;
808         enum _ecore_status_t rc;
809
810         if (IS_VF(p_hwfn->p_dev))
811                 return ecore_vf_pf_vport_stop(p_hwfn);
812
813         rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
814         if (rc != ECORE_SUCCESS)
815                 return rc;
816
817         /* Get SPQ entry */
818         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
819         init_data.cid = ecore_spq_get_cid(p_hwfn);
820         init_data.opaque_fid = opaque_fid;
821         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
822
823         rc = ecore_sp_init_request(p_hwfn, &p_ent,
824                                    ETH_RAMROD_VPORT_STOP,
825                                    PROTOCOLID_ETH, &init_data);
826         if (rc != ECORE_SUCCESS)
827                 return rc;
828
829         p_ramrod = &p_ent->ramrod.vport_stop;
830         p_ramrod->vport_id = abs_vport_id;
831
832         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
833 }
834
835 static enum _ecore_status_t
836 ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn,
837                          struct ecore_filter_accept_flags *p_accept_flags)
838 {
839         struct ecore_sp_vport_update_params s_params;
840
841         OSAL_MEMSET(&s_params, 0, sizeof(s_params));
842         OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags,
843                     sizeof(struct ecore_filter_accept_flags));
844
845         return ecore_vf_pf_vport_update(p_hwfn, &s_params);
846 }
847
848 enum _ecore_status_t
849 ecore_filter_accept_cmd(struct ecore_dev *p_dev,
850                         u8 vport,
851                         struct ecore_filter_accept_flags accept_flags,
852                         u8 update_accept_any_vlan,
853                         u8 accept_any_vlan,
854                         enum spq_mode comp_mode,
855                         struct ecore_spq_comp_cb *p_comp_data)
856 {
857         struct ecore_sp_vport_update_params vport_update_params;
858         int i, rc;
859
860         /* Prepare and send the vport rx_mode change */
861         OSAL_MEMSET(&vport_update_params, 0, sizeof(vport_update_params));
862         vport_update_params.vport_id = vport;
863         vport_update_params.accept_flags = accept_flags;
864         vport_update_params.update_accept_any_vlan_flg = update_accept_any_vlan;
865         vport_update_params.accept_any_vlan = accept_any_vlan;
866
867         for_each_hwfn(p_dev, i) {
868                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
869
870                 vport_update_params.opaque_fid = p_hwfn->hw_info.opaque_fid;
871
872                 if (IS_VF(p_dev)) {
873                         rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags);
874                         if (rc != ECORE_SUCCESS)
875                                 return rc;
876                         continue;
877                 }
878
879                 rc = ecore_sp_vport_update(p_hwfn, &vport_update_params,
880                                            comp_mode, p_comp_data);
881                 if (rc != ECORE_SUCCESS) {
882                         DP_ERR(p_dev, "Update rx_mode failed %d\n", rc);
883                         return rc;
884                 }
885
886                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
887                            "Accept filter configured, flags = [Rx]%x [Tx]%x\n",
888                            accept_flags.rx_accept_filter,
889                            accept_flags.tx_accept_filter);
890
891                 if (update_accept_any_vlan)
892                         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
893                                    "accept_any_vlan=%d configured\n",
894                                    accept_any_vlan);
895         }
896
897         return 0;
898 }
899
900 enum _ecore_status_t
901 ecore_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn,
902                            struct ecore_queue_cid *p_cid,
903                            u16 bd_max_bytes,
904                            dma_addr_t bd_chain_phys_addr,
905                            dma_addr_t cqe_pbl_addr,
906                            u16 cqe_pbl_size)
907 {
908         struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
909         struct ecore_spq_entry *p_ent = OSAL_NULL;
910         struct ecore_sp_init_data init_data;
911         enum _ecore_status_t rc = ECORE_NOTIMPL;
912
913         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
914                    "opaque_fid=0x%x, cid=0x%x, rx_qzone=0x%x, vport_id=0x%x, sb_id=0x%x\n",
915                    p_cid->opaque_fid, p_cid->cid, p_cid->abs.queue_id,
916                    p_cid->abs.vport_id, p_cid->sb_igu_id);
917
918         /* Get SPQ entry */
919         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
920         init_data.cid = p_cid->cid;
921         init_data.opaque_fid = p_cid->opaque_fid;
922         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
923
924         rc = ecore_sp_init_request(p_hwfn, &p_ent,
925                                    ETH_RAMROD_RX_QUEUE_START,
926                                    PROTOCOLID_ETH, &init_data);
927         if (rc != ECORE_SUCCESS)
928                 return rc;
929
930         p_ramrod = &p_ent->ramrod.rx_queue_start;
931
932         p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
933         p_ramrod->sb_index = p_cid->sb_idx;
934         p_ramrod->vport_id = p_cid->abs.vport_id;
935         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
936         p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
937         p_ramrod->complete_cqe_flg = 0;
938         p_ramrod->complete_event_flg = 1;
939
940         p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes);
941         DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr);
942
943         p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size);
944         DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr);
945
946         if (p_cid->vfid != ECORE_QUEUE_CID_PF) {
947                 bool b_legacy_vf = !!(p_cid->vf_legacy &
948                                       ECORE_QCID_LEGACY_VF_RX_PROD);
949
950                 p_ramrod->vf_rx_prod_index = p_cid->vf_qid;
951                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
952                            "Queue%s is meant for VF rxq[%02x]\n",
953                            b_legacy_vf ? " [legacy]" : "",
954                            p_cid->vf_qid);
955                 p_ramrod->vf_rx_prod_use_zone_a = b_legacy_vf;
956         }
957
958         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
959 }
960
961 static enum _ecore_status_t
962 ecore_eth_pf_rx_queue_start(struct ecore_hwfn *p_hwfn,
963                             struct ecore_queue_cid *p_cid,
964                             u16 bd_max_bytes,
965                             dma_addr_t bd_chain_phys_addr,
966                             dma_addr_t cqe_pbl_addr,
967                             u16 cqe_pbl_size,
968                             void OSAL_IOMEM * *pp_prod)
969 {
970         u32 init_prod_val = 0;
971
972         *pp_prod = (u8 OSAL_IOMEM *)
973                     p_hwfn->regview +
974                     GTT_BAR0_MAP_REG_MSDM_RAM +
975                     MSTORM_ETH_PF_PRODS_OFFSET(p_cid->abs.queue_id);
976
977         /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
978         __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
979                           (u32 *)(&init_prod_val));
980
981         return ecore_eth_rxq_start_ramrod(p_hwfn, p_cid,
982                                           bd_max_bytes,
983                                           bd_chain_phys_addr,
984                                           cqe_pbl_addr, cqe_pbl_size);
985 }
986
987 enum _ecore_status_t
988 ecore_eth_rx_queue_start(struct ecore_hwfn *p_hwfn,
989                          u16 opaque_fid,
990                          struct ecore_queue_start_common_params *p_params,
991                          u16 bd_max_bytes,
992                          dma_addr_t bd_chain_phys_addr,
993                          dma_addr_t cqe_pbl_addr,
994                          u16 cqe_pbl_size,
995                          struct ecore_rxq_start_ret_params *p_ret_params)
996 {
997         struct ecore_queue_cid *p_cid;
998         enum _ecore_status_t rc;
999
1000         /* Allocate a CID for the queue */
1001         p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, true, p_params);
1002         if (p_cid == OSAL_NULL)
1003                 return ECORE_NOMEM;
1004
1005         if (IS_PF(p_hwfn->p_dev))
1006                 rc = ecore_eth_pf_rx_queue_start(p_hwfn, p_cid,
1007                                                  bd_max_bytes,
1008                                                  bd_chain_phys_addr,
1009                                                  cqe_pbl_addr, cqe_pbl_size,
1010                                                  &p_ret_params->p_prod);
1011         else
1012                 rc = ecore_vf_pf_rxq_start(p_hwfn, p_cid,
1013                                            bd_max_bytes,
1014                                            bd_chain_phys_addr,
1015                                            cqe_pbl_addr,
1016                                            cqe_pbl_size,
1017                                            &p_ret_params->p_prod);
1018
1019         /* Provide the caller with a reference to as handler */
1020         if (rc != ECORE_SUCCESS)
1021                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1022         else
1023                 p_ret_params->p_handle = (void *)p_cid;
1024
1025         return rc;
1026 }
1027
1028 enum _ecore_status_t
1029 ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn,
1030                               void **pp_rxq_handles,
1031                               u8 num_rxqs,
1032                               u8 complete_cqe_flg,
1033                               u8 complete_event_flg,
1034                               enum spq_mode comp_mode,
1035                               struct ecore_spq_comp_cb *p_comp_data)
1036 {
1037         struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL;
1038         struct ecore_spq_entry *p_ent = OSAL_NULL;
1039         struct ecore_sp_init_data init_data;
1040         struct ecore_queue_cid *p_cid;
1041         enum _ecore_status_t rc = ECORE_NOTIMPL;
1042         u8 i;
1043
1044         if (IS_VF(p_hwfn->p_dev))
1045                 return ecore_vf_pf_rxqs_update(p_hwfn,
1046                                                (struct ecore_queue_cid **)
1047                                                pp_rxq_handles,
1048                                                num_rxqs,
1049                                                complete_cqe_flg,
1050                                                complete_event_flg);
1051
1052         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1053         init_data.comp_mode = comp_mode;
1054         init_data.p_comp_data = p_comp_data;
1055
1056         for (i = 0; i < num_rxqs; i++) {
1057                 p_cid = ((struct ecore_queue_cid **)pp_rxq_handles)[i];
1058
1059                 /* Get SPQ entry */
1060                 init_data.cid = p_cid->cid;
1061                 init_data.opaque_fid = p_cid->opaque_fid;
1062
1063                 rc = ecore_sp_init_request(p_hwfn, &p_ent,
1064                                            ETH_RAMROD_RX_QUEUE_UPDATE,
1065                                            PROTOCOLID_ETH, &init_data);
1066                 if (rc != ECORE_SUCCESS)
1067                         return rc;
1068
1069                 p_ramrod = &p_ent->ramrod.rx_queue_update;
1070                 p_ramrod->vport_id = p_cid->abs.vport_id;
1071
1072                 p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1073                 p_ramrod->complete_cqe_flg = complete_cqe_flg;
1074                 p_ramrod->complete_event_flg = complete_event_flg;
1075
1076                 rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1077                 if (rc != ECORE_SUCCESS)
1078                         return rc;
1079         }
1080
1081         return rc;
1082 }
1083
1084 static enum _ecore_status_t
1085 ecore_eth_pf_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1086                            struct ecore_queue_cid *p_cid,
1087                            bool b_eq_completion_only,
1088                            bool b_cqe_completion)
1089 {
1090         struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL;
1091         struct ecore_spq_entry *p_ent = OSAL_NULL;
1092         struct ecore_sp_init_data init_data;
1093         enum _ecore_status_t rc;
1094
1095         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1096         init_data.cid = p_cid->cid;
1097         init_data.opaque_fid = p_cid->opaque_fid;
1098         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1099
1100         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1101                                    ETH_RAMROD_RX_QUEUE_STOP,
1102                                    PROTOCOLID_ETH, &init_data);
1103         if (rc != ECORE_SUCCESS)
1104                 return rc;
1105
1106         p_ramrod = &p_ent->ramrod.rx_queue_stop;
1107         p_ramrod->vport_id = p_cid->abs.vport_id;
1108         p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1109
1110         /* Cleaning the queue requires the completion to arrive there.
1111          * In addition, VFs require the answer to come as eqe to PF.
1112          */
1113         p_ramrod->complete_cqe_flg = ((p_cid->vfid == ECORE_QUEUE_CID_PF) &&
1114                                       !b_eq_completion_only) ||
1115                                      b_cqe_completion;
1116         p_ramrod->complete_event_flg = (p_cid->vfid != ECORE_QUEUE_CID_PF) ||
1117                                        b_eq_completion_only;
1118
1119         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1120 }
1121
1122 enum _ecore_status_t ecore_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn,
1123                                              void *p_rxq,
1124                                              bool eq_completion_only,
1125                                              bool cqe_completion)
1126 {
1127         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_rxq;
1128         enum _ecore_status_t rc = ECORE_NOTIMPL;
1129
1130         if (IS_PF(p_hwfn->p_dev))
1131                 rc = ecore_eth_pf_rx_queue_stop(p_hwfn, p_cid,
1132                                                 eq_completion_only,
1133                                                 cqe_completion);
1134         else
1135                 rc = ecore_vf_pf_rxq_stop(p_hwfn, p_cid, cqe_completion);
1136
1137         if (rc == ECORE_SUCCESS)
1138                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1139         return rc;
1140 }
1141
1142 enum _ecore_status_t
1143 ecore_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn,
1144                            struct ecore_queue_cid *p_cid,
1145                            dma_addr_t pbl_addr, u16 pbl_size,
1146                            u16 pq_id)
1147 {
1148         struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL;
1149         struct ecore_spq_entry *p_ent = OSAL_NULL;
1150         struct ecore_sp_init_data init_data;
1151         enum _ecore_status_t rc = ECORE_NOTIMPL;
1152
1153         /* Get SPQ entry */
1154         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1155         init_data.cid = p_cid->cid;
1156         init_data.opaque_fid = p_cid->opaque_fid;
1157         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1158
1159         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1160                                    ETH_RAMROD_TX_QUEUE_START,
1161                                    PROTOCOLID_ETH, &init_data);
1162         if (rc != ECORE_SUCCESS)
1163                 return rc;
1164
1165         p_ramrod = &p_ent->ramrod.tx_queue_start;
1166         p_ramrod->vport_id = p_cid->abs.vport_id;
1167
1168         p_ramrod->sb_id = OSAL_CPU_TO_LE16(p_cid->sb_igu_id);
1169         p_ramrod->sb_index = p_cid->sb_idx;
1170         p_ramrod->stats_counter_id = p_cid->abs.stats_id;
1171
1172         p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1173         p_ramrod->same_as_last_id = OSAL_CPU_TO_LE16(p_cid->abs.queue_id);
1174
1175         p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size);
1176         DMA_REGPAIR_LE(p_ramrod->pbl_base_addr, pbl_addr);
1177
1178         p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id);
1179
1180         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1181 }
1182
1183 static enum _ecore_status_t
1184 ecore_eth_pf_tx_queue_start(struct ecore_hwfn *p_hwfn,
1185                             struct ecore_queue_cid *p_cid,
1186                             u8 tc,
1187                             dma_addr_t pbl_addr, u16 pbl_size,
1188                             void OSAL_IOMEM * *pp_doorbell)
1189 {
1190         enum _ecore_status_t rc;
1191         u16 pq_id;
1192
1193         /* TODO - set tc in the pq_params for multi-cos.
1194          * If pacing is enabled then select queue according to
1195          * rate limiter availability otherwise select queue based
1196          * on multi cos.
1197          */
1198         if (IS_ECORE_PACING(p_hwfn))
1199                 pq_id = ecore_get_cm_pq_idx_rl(p_hwfn, p_cid->rel.queue_id);
1200         else
1201                 pq_id = ecore_get_cm_pq_idx_mcos(p_hwfn, tc);
1202
1203         rc = ecore_eth_txq_start_ramrod(p_hwfn, p_cid, pbl_addr,
1204                                         pbl_size, pq_id);
1205         if (rc != ECORE_SUCCESS)
1206                 return rc;
1207
1208         /* Provide the caller with the necessary return values */
1209         *pp_doorbell = (u8 OSAL_IOMEM *)
1210                        p_hwfn->doorbells +
1211                        DB_ADDR(p_cid->cid, DQ_DEMS_LEGACY);
1212
1213         return ECORE_SUCCESS;
1214 }
1215
1216 enum _ecore_status_t
1217 ecore_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, u16 opaque_fid,
1218                          struct ecore_queue_start_common_params *p_params,
1219                          u8 tc,
1220                          dma_addr_t pbl_addr, u16 pbl_size,
1221                          struct ecore_txq_start_ret_params *p_ret_params)
1222 {
1223         struct ecore_queue_cid *p_cid;
1224         enum _ecore_status_t rc;
1225
1226         p_cid = ecore_eth_queue_to_cid_pf(p_hwfn, opaque_fid, false, p_params);
1227         if (p_cid == OSAL_NULL)
1228                 return ECORE_INVAL;
1229
1230         if (IS_PF(p_hwfn->p_dev))
1231                 rc = ecore_eth_pf_tx_queue_start(p_hwfn, p_cid, tc,
1232                                                  pbl_addr, pbl_size,
1233                                                  &p_ret_params->p_doorbell);
1234         else
1235                 rc = ecore_vf_pf_txq_start(p_hwfn, p_cid,
1236                                            pbl_addr, pbl_size,
1237                                            &p_ret_params->p_doorbell);
1238
1239         if (rc != ECORE_SUCCESS)
1240                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1241         else
1242                 p_ret_params->p_handle = (void *)p_cid;
1243
1244         return rc;
1245 }
1246
1247 static enum _ecore_status_t
1248 ecore_eth_pf_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1249                            struct ecore_queue_cid *p_cid)
1250 {
1251         struct ecore_spq_entry *p_ent = OSAL_NULL;
1252         struct ecore_sp_init_data init_data;
1253         enum _ecore_status_t rc;
1254
1255         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1256         init_data.cid = p_cid->cid;
1257         init_data.opaque_fid = p_cid->opaque_fid;
1258         init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
1259
1260         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1261                                    ETH_RAMROD_TX_QUEUE_STOP,
1262                                    PROTOCOLID_ETH, &init_data);
1263         if (rc != ECORE_SUCCESS)
1264                 return rc;
1265
1266         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1267 }
1268
1269 enum _ecore_status_t ecore_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn,
1270                                              void *p_handle)
1271 {
1272         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)p_handle;
1273         enum _ecore_status_t rc;
1274
1275         if (IS_PF(p_hwfn->p_dev))
1276                 rc = ecore_eth_pf_tx_queue_stop(p_hwfn, p_cid);
1277         else
1278                 rc = ecore_vf_pf_txq_stop(p_hwfn, p_cid);
1279
1280         if (rc == ECORE_SUCCESS)
1281                 ecore_eth_queue_cid_release(p_hwfn, p_cid);
1282         return rc;
1283 }
1284
1285 static enum eth_filter_action
1286 ecore_filter_action(enum ecore_filter_opcode opcode)
1287 {
1288         enum eth_filter_action action = MAX_ETH_FILTER_ACTION;
1289
1290         switch (opcode) {
1291         case ECORE_FILTER_ADD:
1292                 action = ETH_FILTER_ACTION_ADD;
1293                 break;
1294         case ECORE_FILTER_REMOVE:
1295                 action = ETH_FILTER_ACTION_REMOVE;
1296                 break;
1297         case ECORE_FILTER_FLUSH:
1298                 action = ETH_FILTER_ACTION_REMOVE_ALL;
1299                 break;
1300         default:
1301                 action = MAX_ETH_FILTER_ACTION;
1302         }
1303
1304         return action;
1305 }
1306
1307 static enum _ecore_status_t
1308 ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn,
1309                           u16 opaque_fid,
1310                           struct ecore_filter_ucast *p_filter_cmd,
1311                           struct vport_filter_update_ramrod_data **pp_ramrod,
1312                           struct ecore_spq_entry **pp_ent,
1313                           enum spq_mode comp_mode,
1314                           struct ecore_spq_comp_cb *p_comp_data)
1315 {
1316         u8 vport_to_add_to = 0, vport_to_remove_from = 0;
1317         struct vport_filter_update_ramrod_data *p_ramrod;
1318         struct eth_filter_cmd *p_first_filter;
1319         struct eth_filter_cmd *p_second_filter;
1320         struct ecore_sp_init_data init_data;
1321         enum eth_filter_action action;
1322         enum _ecore_status_t rc;
1323
1324         rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from,
1325                             &vport_to_remove_from);
1326         if (rc != ECORE_SUCCESS)
1327                 return rc;
1328
1329         rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to,
1330                             &vport_to_add_to);
1331         if (rc != ECORE_SUCCESS)
1332                 return rc;
1333
1334         /* Get SPQ entry */
1335         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1336         init_data.cid = ecore_spq_get_cid(p_hwfn);
1337         init_data.opaque_fid = opaque_fid;
1338         init_data.comp_mode = comp_mode;
1339         init_data.p_comp_data = p_comp_data;
1340
1341         rc = ecore_sp_init_request(p_hwfn, pp_ent,
1342                                    ETH_RAMROD_FILTERS_UPDATE,
1343                                    PROTOCOLID_ETH, &init_data);
1344         if (rc != ECORE_SUCCESS)
1345                 return rc;
1346
1347         *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update;
1348         p_ramrod = *pp_ramrod;
1349         p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0;
1350         p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0;
1351
1352 #ifndef ASIC_ONLY
1353         if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) {
1354                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1355                            "Non-Asic - prevent Tx filters\n");
1356                 p_ramrod->filter_cmd_hdr.tx = 0;
1357         }
1358 #endif
1359
1360         switch (p_filter_cmd->opcode) {
1361         case ECORE_FILTER_REPLACE:
1362         case ECORE_FILTER_MOVE:
1363                 p_ramrod->filter_cmd_hdr.cmd_cnt = 2;
1364                 break;
1365         default:
1366                 p_ramrod->filter_cmd_hdr.cmd_cnt = 1;
1367                 break;
1368         }
1369
1370         p_first_filter = &p_ramrod->filter_cmds[0];
1371         p_second_filter = &p_ramrod->filter_cmds[1];
1372
1373         switch (p_filter_cmd->type) {
1374         case ECORE_FILTER_MAC:
1375                 p_first_filter->type = ETH_FILTER_TYPE_MAC;
1376                 break;
1377         case ECORE_FILTER_VLAN:
1378                 p_first_filter->type = ETH_FILTER_TYPE_VLAN;
1379                 break;
1380         case ECORE_FILTER_MAC_VLAN:
1381                 p_first_filter->type = ETH_FILTER_TYPE_PAIR;
1382                 break;
1383         case ECORE_FILTER_INNER_MAC:
1384                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC;
1385                 break;
1386         case ECORE_FILTER_INNER_VLAN:
1387                 p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN;
1388                 break;
1389         case ECORE_FILTER_INNER_PAIR:
1390                 p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR;
1391                 break;
1392         case ECORE_FILTER_INNER_MAC_VNI_PAIR:
1393                 p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR;
1394                 break;
1395         case ECORE_FILTER_MAC_VNI_PAIR:
1396                 p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR;
1397                 break;
1398         case ECORE_FILTER_VNI:
1399                 p_first_filter->type = ETH_FILTER_TYPE_VNI;
1400                 break;
1401         case ECORE_FILTER_UNUSED: /* @DPDK */
1402                 p_first_filter->type = MAX_ETH_FILTER_TYPE;
1403                 break;
1404         }
1405
1406         if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) ||
1407             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1408             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) ||
1409             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) ||
1410             (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1411             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR))
1412                 ecore_set_fw_mac_addr(&p_first_filter->mac_msb,
1413                                       &p_first_filter->mac_mid,
1414                                       &p_first_filter->mac_lsb,
1415                                       (u8 *)p_filter_cmd->mac);
1416
1417         if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) ||
1418             (p_first_filter->type == ETH_FILTER_TYPE_PAIR) ||
1419             (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) ||
1420             (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR))
1421                 p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan);
1422
1423         if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) ||
1424             (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) ||
1425             (p_first_filter->type == ETH_FILTER_TYPE_VNI))
1426                 p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni);
1427
1428         if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) {
1429                 p_second_filter->type = p_first_filter->type;
1430                 p_second_filter->mac_msb = p_first_filter->mac_msb;
1431                 p_second_filter->mac_mid = p_first_filter->mac_mid;
1432                 p_second_filter->mac_lsb = p_first_filter->mac_lsb;
1433                 p_second_filter->vlan_id = p_first_filter->vlan_id;
1434                 p_second_filter->vni = p_first_filter->vni;
1435
1436                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE;
1437
1438                 p_first_filter->vport_id = vport_to_remove_from;
1439
1440                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1441                 p_second_filter->vport_id = vport_to_add_to;
1442         } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) {
1443                 p_first_filter->vport_id = vport_to_add_to;
1444                 OSAL_MEMCPY(p_second_filter, p_first_filter,
1445                             sizeof(*p_second_filter));
1446                 p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL;
1447                 p_second_filter->action = ETH_FILTER_ACTION_ADD;
1448         } else {
1449                 action = ecore_filter_action(p_filter_cmd->opcode);
1450
1451                 if (action == MAX_ETH_FILTER_ACTION) {
1452                         DP_NOTICE(p_hwfn, true,
1453                                   "%d is not supported yet\n",
1454                                   p_filter_cmd->opcode);
1455                         return ECORE_NOTIMPL;
1456                 }
1457
1458                 p_first_filter->action = action;
1459                 p_first_filter->vport_id =
1460                     (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1461                     vport_to_remove_from : vport_to_add_to;
1462         }
1463
1464         return ECORE_SUCCESS;
1465 }
1466
1467 enum _ecore_status_t
1468 ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn,
1469                           u16 opaque_fid,
1470                           struct ecore_filter_ucast *p_filter_cmd,
1471                           enum spq_mode comp_mode,
1472                           struct ecore_spq_comp_cb *p_comp_data)
1473 {
1474         struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL;
1475         struct ecore_spq_entry *p_ent = OSAL_NULL;
1476         struct eth_filter_cmd_header *p_header;
1477         enum _ecore_status_t rc;
1478
1479         rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd,
1480                                        &p_ramrod, &p_ent,
1481                                        comp_mode, p_comp_data);
1482         if (rc != ECORE_SUCCESS) {
1483                 DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc);
1484                 return rc;
1485         }
1486         p_header = &p_ramrod->filter_cmd_hdr;
1487         p_header->assert_on_error = p_filter_cmd->assert_on_error;
1488
1489         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1490         if (rc != ECORE_SUCCESS) {
1491                 DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc);
1492                 return rc;
1493         }
1494
1495         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1496                    "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n",
1497                    (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" :
1498                    ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ?
1499                     "REMOVE" :
1500                     ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ?
1501                      "MOVE" : "REPLACE")),
1502                    (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" :
1503                    ((p_filter_cmd->type == ECORE_FILTER_VLAN) ?
1504                     "VLAN" : "MAC & VLAN"),
1505                    p_ramrod->filter_cmd_hdr.cmd_cnt,
1506                    p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter);
1507         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
1508                    "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n",
1509                    p_filter_cmd->vport_to_add_to,
1510                    p_filter_cmd->vport_to_remove_from,
1511                    p_filter_cmd->mac[0], p_filter_cmd->mac[1],
1512                    p_filter_cmd->mac[2], p_filter_cmd->mac[3],
1513                    p_filter_cmd->mac[4], p_filter_cmd->mac[5],
1514                    p_filter_cmd->vlan);
1515
1516         return ECORE_SUCCESS;
1517 }
1518
1519 /*******************************************************************************
1520  * Description:
1521  *         Calculates crc 32 on a buffer
1522  *         Note: crc32_length MUST be aligned to 8
1523  * Return:
1524  ******************************************************************************/
1525 static u32 ecore_calc_crc32c(u8 *crc32_packet, u32 crc32_length, u32 crc32_seed)
1526 {
1527         u32 byte = 0, bit = 0, crc32_result = crc32_seed;
1528         u8 msb = 0, current_byte = 0;
1529
1530         if ((crc32_packet == OSAL_NULL) ||
1531             (crc32_length == 0) || ((crc32_length % 8) != 0)) {
1532                 return crc32_result;
1533         }
1534
1535         for (byte = 0; byte < crc32_length; byte++) {
1536                 current_byte = crc32_packet[byte];
1537                 for (bit = 0; bit < 8; bit++) {
1538                         msb = (u8)(crc32_result >> 31);
1539                         crc32_result = crc32_result << 1;
1540                         if (msb != (0x1 & (current_byte >> bit))) {
1541                                 crc32_result = crc32_result ^ CRC32_POLY;
1542                                 crc32_result |= 1;
1543                         }
1544                 }
1545         }
1546
1547         return crc32_result;
1548 }
1549
1550 static u32 ecore_crc32c_le(u32 seed, u8 *mac)
1551 {
1552         u32 packet_buf[2] = { 0 };
1553
1554         OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6);
1555         return ecore_calc_crc32c((u8 *)packet_buf, 8, seed);
1556 }
1557
1558 u8 ecore_mcast_bin_from_mac(u8 *mac)
1559 {
1560         u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, mac);
1561
1562         return crc & 0xff;
1563 }
1564
1565 static enum _ecore_status_t
1566 ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn,
1567                           struct ecore_filter_mcast *p_filter_cmd,
1568                           enum spq_mode comp_mode,
1569                           struct ecore_spq_comp_cb *p_comp_data)
1570 {
1571         struct vport_update_ramrod_data *p_ramrod = OSAL_NULL;
1572         u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
1573         struct ecore_spq_entry *p_ent = OSAL_NULL;
1574         struct ecore_sp_init_data init_data;
1575         u8 abs_vport_id = 0;
1576         enum _ecore_status_t rc;
1577         int i;
1578
1579         if (p_filter_cmd->opcode == ECORE_FILTER_ADD)
1580                 rc = ecore_fw_vport(p_hwfn,
1581                                     p_filter_cmd->vport_to_add_to,
1582                                     &abs_vport_id);
1583         else
1584                 rc = ecore_fw_vport(p_hwfn,
1585                                     p_filter_cmd->vport_to_remove_from,
1586                                     &abs_vport_id);
1587         if (rc != ECORE_SUCCESS)
1588                 return rc;
1589
1590         /* Get SPQ entry */
1591         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
1592         init_data.cid = ecore_spq_get_cid(p_hwfn);
1593         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
1594         init_data.comp_mode = comp_mode;
1595         init_data.p_comp_data = p_comp_data;
1596
1597         rc = ecore_sp_init_request(p_hwfn, &p_ent,
1598                                    ETH_RAMROD_VPORT_UPDATE,
1599                                    PROTOCOLID_ETH, &init_data);
1600         if (rc != ECORE_SUCCESS) {
1601                 DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc);
1602                 return rc;
1603         }
1604
1605         p_ramrod = &p_ent->ramrod.vport_update;
1606         p_ramrod->common.update_approx_mcast_flg = 1;
1607
1608         /* explicitly clear out the entire vector */
1609         OSAL_MEMSET(&p_ramrod->approx_mcast.bins,
1610                     0, sizeof(p_ramrod->approx_mcast.bins));
1611         OSAL_MEMSET(bins, 0, sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
1612         /* filter ADD op is explicit set op and it removes
1613         *  any existing filters for the vport.
1614         */
1615         if (p_filter_cmd->opcode == ECORE_FILTER_ADD) {
1616                 for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
1617                         u32 bit;
1618
1619                         bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]);
1620                         bins[bit / 32] |= 1 << (bit % 32);
1621                 }
1622
1623                 /* Convert to correct endianity */
1624                 for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
1625                         struct vport_update_ramrod_mcast *p_ramrod_bins;
1626
1627                         p_ramrod_bins = &p_ramrod->approx_mcast;
1628                         p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(bins[i]);
1629                 }
1630         }
1631
1632         p_ramrod->common.vport_id = abs_vport_id;
1633
1634         rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
1635         if (rc != ECORE_SUCCESS)
1636                 DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc);
1637
1638         return rc;
1639 }
1640
1641 enum _ecore_status_t
1642 ecore_filter_mcast_cmd(struct ecore_dev *p_dev,
1643                        struct ecore_filter_mcast *p_filter_cmd,
1644                        enum spq_mode comp_mode,
1645                        struct ecore_spq_comp_cb *p_comp_data)
1646 {
1647         enum _ecore_status_t rc = ECORE_SUCCESS;
1648         int i;
1649
1650         /* only ADD and REMOVE operations are supported for multi-cast */
1651         if ((p_filter_cmd->opcode != ECORE_FILTER_ADD &&
1652              (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) ||
1653             (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) {
1654                 return ECORE_INVAL;
1655         }
1656
1657         for_each_hwfn(p_dev, i) {
1658                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1659
1660                 if (IS_VF(p_dev)) {
1661                         ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd);
1662                         continue;
1663                 }
1664
1665                 rc = ecore_sp_eth_filter_mcast(p_hwfn,
1666                                                p_filter_cmd,
1667                                                comp_mode, p_comp_data);
1668                 if (rc != ECORE_SUCCESS)
1669                         break;
1670         }
1671
1672         return rc;
1673 }
1674
1675 enum _ecore_status_t
1676 ecore_filter_ucast_cmd(struct ecore_dev *p_dev,
1677                        struct ecore_filter_ucast *p_filter_cmd,
1678                        enum spq_mode comp_mode,
1679                        struct ecore_spq_comp_cb *p_comp_data)
1680 {
1681         enum _ecore_status_t rc = ECORE_SUCCESS;
1682         int i;
1683
1684         for_each_hwfn(p_dev, i) {
1685                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1686                 u16 opaque_fid;
1687
1688                 if (IS_VF(p_dev)) {
1689                         rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd);
1690                         continue;
1691                 }
1692
1693                 opaque_fid = p_hwfn->hw_info.opaque_fid;
1694                 rc = ecore_sp_eth_filter_ucast(p_hwfn,
1695                                                opaque_fid,
1696                                                p_filter_cmd,
1697                                                comp_mode, p_comp_data);
1698                 if (rc != ECORE_SUCCESS)
1699                         break;
1700         }
1701
1702         return rc;
1703 }
1704
1705 /* Statistics related code */
1706 static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn,
1707                                              u32 *p_addr, u32 *p_len,
1708                                              u16 statistics_bin)
1709 {
1710         if (IS_PF(p_hwfn->p_dev)) {
1711                 *p_addr = BAR0_MAP_REG_PSDM_RAM +
1712                     PSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1713                 *p_len = sizeof(struct eth_pstorm_per_queue_stat);
1714         } else {
1715                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1716                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1717
1718                 *p_addr = p_resp->pfdev_info.stats_info.pstats.address;
1719                 *p_len = p_resp->pfdev_info.stats_info.pstats.len;
1720         }
1721 }
1722
1723 static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn,
1724                                      struct ecore_ptt *p_ptt,
1725                                      struct ecore_eth_stats *p_stats,
1726                                      u16 statistics_bin)
1727 {
1728         struct eth_pstorm_per_queue_stat pstats;
1729         u32 pstats_addr = 0, pstats_len = 0;
1730
1731         __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len,
1732                                          statistics_bin);
1733
1734         OSAL_MEMSET(&pstats, 0, sizeof(pstats));
1735         ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len);
1736
1737         p_stats->common.tx_ucast_bytes +=
1738                 HILO_64_REGPAIR(pstats.sent_ucast_bytes);
1739         p_stats->common.tx_mcast_bytes +=
1740                 HILO_64_REGPAIR(pstats.sent_mcast_bytes);
1741         p_stats->common.tx_bcast_bytes +=
1742                 HILO_64_REGPAIR(pstats.sent_bcast_bytes);
1743         p_stats->common.tx_ucast_pkts +=
1744                 HILO_64_REGPAIR(pstats.sent_ucast_pkts);
1745         p_stats->common.tx_mcast_pkts +=
1746                 HILO_64_REGPAIR(pstats.sent_mcast_pkts);
1747         p_stats->common.tx_bcast_pkts +=
1748                 HILO_64_REGPAIR(pstats.sent_bcast_pkts);
1749         p_stats->common.tx_err_drop_pkts +=
1750                 HILO_64_REGPAIR(pstats.error_drop_pkts);
1751 }
1752
1753 static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn,
1754                                      struct ecore_ptt *p_ptt,
1755                                      struct ecore_eth_stats *p_stats)
1756 {
1757         struct tstorm_per_port_stat tstats;
1758         u32 tstats_addr, tstats_len;
1759
1760         if (IS_PF(p_hwfn->p_dev)) {
1761                 tstats_addr = BAR0_MAP_REG_TSDM_RAM +
1762                     TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn));
1763                 tstats_len = sizeof(struct tstorm_per_port_stat);
1764         } else {
1765                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1766                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1767
1768                 tstats_addr = p_resp->pfdev_info.stats_info.tstats.address;
1769                 tstats_len = p_resp->pfdev_info.stats_info.tstats.len;
1770         }
1771
1772         OSAL_MEMSET(&tstats, 0, sizeof(tstats));
1773         ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len);
1774
1775         p_stats->common.mftag_filter_discards +=
1776                 HILO_64_REGPAIR(tstats.mftag_filter_discard);
1777         p_stats->common.mac_filter_discards +=
1778                 HILO_64_REGPAIR(tstats.eth_mac_filter_discard);
1779 }
1780
1781 static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn,
1782                                              u32 *p_addr, u32 *p_len,
1783                                              u16 statistics_bin)
1784 {
1785         if (IS_PF(p_hwfn->p_dev)) {
1786                 *p_addr = BAR0_MAP_REG_USDM_RAM +
1787                     USTORM_QUEUE_STAT_OFFSET(statistics_bin);
1788                 *p_len = sizeof(struct eth_ustorm_per_queue_stat);
1789         } else {
1790                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1791                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1792
1793                 *p_addr = p_resp->pfdev_info.stats_info.ustats.address;
1794                 *p_len = p_resp->pfdev_info.stats_info.ustats.len;
1795         }
1796 }
1797
1798 static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn,
1799                                      struct ecore_ptt *p_ptt,
1800                                      struct ecore_eth_stats *p_stats,
1801                                      u16 statistics_bin)
1802 {
1803         struct eth_ustorm_per_queue_stat ustats;
1804         u32 ustats_addr = 0, ustats_len = 0;
1805
1806         __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len,
1807                                          statistics_bin);
1808
1809         OSAL_MEMSET(&ustats, 0, sizeof(ustats));
1810         ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len);
1811
1812         p_stats->common.rx_ucast_bytes +=
1813                 HILO_64_REGPAIR(ustats.rcv_ucast_bytes);
1814         p_stats->common.rx_mcast_bytes +=
1815                 HILO_64_REGPAIR(ustats.rcv_mcast_bytes);
1816         p_stats->common.rx_bcast_bytes +=
1817                 HILO_64_REGPAIR(ustats.rcv_bcast_bytes);
1818         p_stats->common.rx_ucast_pkts +=
1819                 HILO_64_REGPAIR(ustats.rcv_ucast_pkts);
1820         p_stats->common.rx_mcast_pkts +=
1821                 HILO_64_REGPAIR(ustats.rcv_mcast_pkts);
1822         p_stats->common.rx_bcast_pkts +=
1823                 HILO_64_REGPAIR(ustats.rcv_bcast_pkts);
1824 }
1825
1826 static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn,
1827                                              u32 *p_addr, u32 *p_len,
1828                                              u16 statistics_bin)
1829 {
1830         if (IS_PF(p_hwfn->p_dev)) {
1831                 *p_addr = BAR0_MAP_REG_MSDM_RAM +
1832                     MSTORM_QUEUE_STAT_OFFSET(statistics_bin);
1833                 *p_len = sizeof(struct eth_mstorm_per_queue_stat);
1834         } else {
1835                 struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info;
1836                 struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp;
1837
1838                 *p_addr = p_resp->pfdev_info.stats_info.mstats.address;
1839                 *p_len = p_resp->pfdev_info.stats_info.mstats.len;
1840         }
1841 }
1842
1843 static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn,
1844                                      struct ecore_ptt *p_ptt,
1845                                      struct ecore_eth_stats *p_stats,
1846                                      u16 statistics_bin)
1847 {
1848         struct eth_mstorm_per_queue_stat mstats;
1849         u32 mstats_addr = 0, mstats_len = 0;
1850
1851         __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len,
1852                                          statistics_bin);
1853
1854         OSAL_MEMSET(&mstats, 0, sizeof(mstats));
1855         ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len);
1856
1857         p_stats->common.no_buff_discards +=
1858                 HILO_64_REGPAIR(mstats.no_buff_discard);
1859         p_stats->common.packet_too_big_discard +=
1860                 HILO_64_REGPAIR(mstats.packet_too_big_discard);
1861         p_stats->common.ttl0_discard +=
1862                 HILO_64_REGPAIR(mstats.ttl0_discard);
1863         p_stats->common.tpa_coalesced_pkts +=
1864                 HILO_64_REGPAIR(mstats.tpa_coalesced_pkts);
1865         p_stats->common.tpa_coalesced_events +=
1866                 HILO_64_REGPAIR(mstats.tpa_coalesced_events);
1867         p_stats->common.tpa_aborts_num +=
1868                 HILO_64_REGPAIR(mstats.tpa_aborts_num);
1869         p_stats->common.tpa_coalesced_bytes +=
1870                 HILO_64_REGPAIR(mstats.tpa_coalesced_bytes);
1871 }
1872
1873 static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn,
1874                                          struct ecore_ptt *p_ptt,
1875                                          struct ecore_eth_stats *p_stats)
1876 {
1877         struct ecore_eth_stats_common *p_common = &p_stats->common;
1878         struct port_stats port_stats;
1879         int j;
1880
1881         OSAL_MEMSET(&port_stats, 0, sizeof(port_stats));
1882
1883         ecore_memcpy_from(p_hwfn, p_ptt, &port_stats,
1884                           p_hwfn->mcp_info->port_addr +
1885                           OFFSETOF(struct public_port, stats),
1886                           sizeof(port_stats));
1887
1888         p_common->rx_64_byte_packets += port_stats.eth.r64;
1889         p_common->rx_65_to_127_byte_packets += port_stats.eth.r127;
1890         p_common->rx_128_to_255_byte_packets += port_stats.eth.r255;
1891         p_common->rx_256_to_511_byte_packets += port_stats.eth.r511;
1892         p_common->rx_512_to_1023_byte_packets += port_stats.eth.r1023;
1893         p_common->rx_1024_to_1518_byte_packets += port_stats.eth.r1518;
1894         p_common->rx_crc_errors += port_stats.eth.rfcs;
1895         p_common->rx_mac_crtl_frames += port_stats.eth.rxcf;
1896         p_common->rx_pause_frames += port_stats.eth.rxpf;
1897         p_common->rx_pfc_frames += port_stats.eth.rxpp;
1898         p_common->rx_align_errors += port_stats.eth.raln;
1899         p_common->rx_carrier_errors += port_stats.eth.rfcr;
1900         p_common->rx_oversize_packets += port_stats.eth.rovr;
1901         p_common->rx_jabbers += port_stats.eth.rjbr;
1902         p_common->rx_undersize_packets += port_stats.eth.rund;
1903         p_common->rx_fragments += port_stats.eth.rfrg;
1904         p_common->tx_64_byte_packets += port_stats.eth.t64;
1905         p_common->tx_65_to_127_byte_packets += port_stats.eth.t127;
1906         p_common->tx_128_to_255_byte_packets += port_stats.eth.t255;
1907         p_common->tx_256_to_511_byte_packets += port_stats.eth.t511;
1908         p_common->tx_512_to_1023_byte_packets += port_stats.eth.t1023;
1909         p_common->tx_1024_to_1518_byte_packets += port_stats.eth.t1518;
1910         p_common->tx_pause_frames += port_stats.eth.txpf;
1911         p_common->tx_pfc_frames += port_stats.eth.txpp;
1912         p_common->rx_mac_bytes += port_stats.eth.rbyte;
1913         p_common->rx_mac_uc_packets += port_stats.eth.rxuca;
1914         p_common->rx_mac_mc_packets += port_stats.eth.rxmca;
1915         p_common->rx_mac_bc_packets += port_stats.eth.rxbca;
1916         p_common->rx_mac_frames_ok += port_stats.eth.rxpok;
1917         p_common->tx_mac_bytes += port_stats.eth.tbyte;
1918         p_common->tx_mac_uc_packets += port_stats.eth.txuca;
1919         p_common->tx_mac_mc_packets += port_stats.eth.txmca;
1920         p_common->tx_mac_bc_packets += port_stats.eth.txbca;
1921         p_common->tx_mac_ctrl_frames += port_stats.eth.txcf;
1922         for (j = 0; j < 8; j++) {
1923                 p_common->brb_truncates += port_stats.brb.brb_truncate[j];
1924                 p_common->brb_discards += port_stats.brb.brb_discard[j];
1925         }
1926
1927         if (ECORE_IS_BB(p_hwfn->p_dev)) {
1928                 struct ecore_eth_stats_bb *p_bb = &p_stats->bb;
1929
1930                 p_bb->rx_1519_to_1522_byte_packets +=
1931                         port_stats.eth.u0.bb0.r1522;
1932                 p_bb->rx_1519_to_2047_byte_packets +=
1933                         port_stats.eth.u0.bb0.r2047;
1934                 p_bb->rx_2048_to_4095_byte_packets +=
1935                         port_stats.eth.u0.bb0.r4095;
1936                 p_bb->rx_4096_to_9216_byte_packets +=
1937                         port_stats.eth.u0.bb0.r9216;
1938                 p_bb->rx_9217_to_16383_byte_packets +=
1939                         port_stats.eth.u0.bb0.r16383;
1940                 p_bb->tx_1519_to_2047_byte_packets +=
1941                         port_stats.eth.u1.bb1.t2047;
1942                 p_bb->tx_2048_to_4095_byte_packets +=
1943                         port_stats.eth.u1.bb1.t4095;
1944                 p_bb->tx_4096_to_9216_byte_packets +=
1945                         port_stats.eth.u1.bb1.t9216;
1946                 p_bb->tx_9217_to_16383_byte_packets +=
1947                         port_stats.eth.u1.bb1.t16383;
1948                 p_bb->tx_lpi_entry_count += port_stats.eth.u2.bb2.tlpiec;
1949                 p_bb->tx_total_collisions += port_stats.eth.u2.bb2.tncl;
1950         } else {
1951                 struct ecore_eth_stats_ah *p_ah = &p_stats->ah;
1952
1953                 p_ah->rx_1519_to_max_byte_packets +=
1954                         port_stats.eth.u0.ah0.r1519_to_max;
1955                 p_ah->tx_1519_to_max_byte_packets =
1956                         port_stats.eth.u1.ah1.t1519_to_max;
1957         }
1958
1959         p_common->link_change_count = ecore_rd(p_hwfn, p_ptt,
1960                                                p_hwfn->mcp_info->port_addr +
1961                                                OFFSETOF(struct public_port,
1962                                                         link_change_count));
1963 }
1964
1965 void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn,
1966                              struct ecore_ptt *p_ptt,
1967                              struct ecore_eth_stats *stats,
1968                              u16 statistics_bin, bool b_get_port_stats)
1969 {
1970         __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin);
1971         __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin);
1972         __ecore_get_vport_tstats(p_hwfn, p_ptt, stats);
1973         __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin);
1974
1975 #ifndef ASIC_ONLY
1976         /* Avoid getting PORT stats for emulation. */
1977         if (CHIP_REV_IS_EMUL(p_hwfn->p_dev))
1978                 return;
1979 #endif
1980
1981         if (b_get_port_stats && p_hwfn->mcp_info)
1982                 __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats);
1983 }
1984
1985 static void _ecore_get_vport_stats(struct ecore_dev *p_dev,
1986                                    struct ecore_eth_stats *stats)
1987 {
1988         u8 fw_vport = 0;
1989         int i;
1990
1991         OSAL_MEMSET(stats, 0, sizeof(*stats));
1992
1993         for_each_hwfn(p_dev, i) {
1994                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
1995                 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
1996                     ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
1997                 bool b_get_port_stats;
1998
1999                 if (IS_PF(p_dev)) {
2000                         /* The main vport index is relative first */
2001                         if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) {
2002                                 DP_ERR(p_hwfn, "No vport available!\n");
2003                                 goto out;
2004                         }
2005                 }
2006
2007                 if (IS_PF(p_dev) && !p_ptt) {
2008                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2009                         continue;
2010                 }
2011
2012                 b_get_port_stats = IS_PF(p_dev) && IS_LEAD_HWFN(p_hwfn);
2013                 __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport,
2014                                         b_get_port_stats);
2015
2016 out:
2017                 if (IS_PF(p_dev) && p_ptt)
2018                         ecore_ptt_release(p_hwfn, p_ptt);
2019         }
2020 }
2021
2022 void ecore_get_vport_stats(struct ecore_dev *p_dev,
2023                            struct ecore_eth_stats *stats)
2024 {
2025         u32 i;
2026
2027         if (!p_dev) {
2028                 OSAL_MEMSET(stats, 0, sizeof(*stats));
2029                 return;
2030         }
2031
2032         _ecore_get_vport_stats(p_dev, stats);
2033
2034         if (!p_dev->reset_stats)
2035                 return;
2036
2037         /* Reduce the statistics baseline */
2038         for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++)
2039                 ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i];
2040 }
2041
2042 /* zeroes V-PORT specific portion of stats (Port stats remains untouched) */
2043 void ecore_reset_vport_stats(struct ecore_dev *p_dev)
2044 {
2045         int i;
2046
2047         for_each_hwfn(p_dev, i) {
2048                 struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i];
2049                 struct eth_mstorm_per_queue_stat mstats;
2050                 struct eth_ustorm_per_queue_stat ustats;
2051                 struct eth_pstorm_per_queue_stat pstats;
2052                 struct ecore_ptt *p_ptt = IS_PF(p_dev) ?
2053                     ecore_ptt_acquire(p_hwfn) : OSAL_NULL;
2054                 u32 addr = 0, len = 0;
2055
2056                 if (IS_PF(p_dev) && !p_ptt) {
2057                         DP_ERR(p_hwfn, "Failed to acquire ptt\n");
2058                         continue;
2059                 }
2060
2061                 OSAL_MEMSET(&mstats, 0, sizeof(mstats));
2062                 __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0);
2063                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len);
2064
2065                 OSAL_MEMSET(&ustats, 0, sizeof(ustats));
2066                 __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0);
2067                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len);
2068
2069                 OSAL_MEMSET(&pstats, 0, sizeof(pstats));
2070                 __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0);
2071                 ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len);
2072
2073                 if (IS_PF(p_dev))
2074                         ecore_ptt_release(p_hwfn, p_ptt);
2075         }
2076
2077         /* PORT statistics are not necessarily reset, so we need to
2078          * read and create a baseline for future statistics.
2079          * Link change stat is maintained by MFW, return its value as is.
2080          */
2081         if (!p_dev->reset_stats)
2082                 DP_INFO(p_dev, "Reset stats not allocated\n");
2083         else {
2084                 _ecore_get_vport_stats(p_dev, p_dev->reset_stats);
2085                 p_dev->reset_stats->common.link_change_count = 0;
2086         }
2087 }
2088
2089 void ecore_arfs_mode_configure(struct ecore_hwfn *p_hwfn,
2090                                struct ecore_ptt *p_ptt,
2091                                struct ecore_arfs_config_params *p_cfg_params)
2092 {
2093         if (OSAL_TEST_BIT(ECORE_MF_DISABLE_ARFS, &p_hwfn->p_dev->mf_bits))
2094                 return;
2095
2096         if (p_cfg_params->arfs_enable) {
2097                 ecore_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
2098                                  p_cfg_params->tcp,
2099                                  p_cfg_params->udp,
2100                                  p_cfg_params->ipv4,
2101                                  p_cfg_params->ipv6,
2102                                  GFT_PROFILE_TYPE_4_TUPLE);
2103                 DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2104                            "tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n",
2105                            p_cfg_params->tcp ? "Enable" : "Disable",
2106                            p_cfg_params->udp ? "Enable" : "Disable",
2107                            p_cfg_params->ipv4 ? "Enable" : "Disable",
2108                            p_cfg_params->ipv6 ? "Enable" : "Disable");
2109         } else {
2110                 ecore_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
2111         }
2112         DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Configured ARFS mode : %s\n",
2113                    p_cfg_params->arfs_enable ? "Enable" : "Disable");
2114 }
2115
2116 enum _ecore_status_t
2117 ecore_configure_rfs_ntuple_filter(struct ecore_hwfn *p_hwfn,
2118                                   struct ecore_spq_comp_cb *p_cb,
2119                                   dma_addr_t p_addr, u16 length,
2120                                   u16 qid, u8 vport_id,
2121                                   bool b_is_add)
2122 {
2123         struct rx_update_gft_filter_data *p_ramrod = OSAL_NULL;
2124         struct ecore_spq_entry *p_ent = OSAL_NULL;
2125         struct ecore_sp_init_data init_data;
2126         u16 abs_rx_q_id = 0;
2127         u8 abs_vport_id = 0;
2128         enum _ecore_status_t rc = ECORE_NOTIMPL;
2129
2130         rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id);
2131         if (rc != ECORE_SUCCESS)
2132                 return rc;
2133
2134         rc = ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id);
2135         if (rc != ECORE_SUCCESS)
2136                 return rc;
2137
2138         /* Get SPQ entry */
2139         OSAL_MEMSET(&init_data, 0, sizeof(init_data));
2140         init_data.cid = ecore_spq_get_cid(p_hwfn);
2141
2142         init_data.opaque_fid = p_hwfn->hw_info.opaque_fid;
2143
2144         if (p_cb) {
2145                 init_data.comp_mode = ECORE_SPQ_MODE_CB;
2146                 init_data.p_comp_data = p_cb;
2147         } else {
2148                 init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK;
2149         }
2150
2151         rc = ecore_sp_init_request(p_hwfn, &p_ent,
2152                                    ETH_RAMROD_GFT_UPDATE_FILTER,
2153                                    PROTOCOLID_ETH, &init_data);
2154         if (rc != ECORE_SUCCESS)
2155                 return rc;
2156
2157         p_ramrod = &p_ent->ramrod.rx_update_gft;
2158
2159         DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
2160         p_ramrod->pkt_hdr_length = OSAL_CPU_TO_LE16(length);
2161
2162         p_ramrod->action_icid_valid = 0;
2163         p_ramrod->action_icid = 0;
2164
2165         p_ramrod->rx_qid_valid = 1;
2166         p_ramrod->rx_qid = OSAL_CPU_TO_LE16(abs_rx_q_id);
2167
2168         p_ramrod->flow_id_valid = 0;
2169         p_ramrod->flow_id = 0;
2170
2171         p_ramrod->vport_id = OSAL_CPU_TO_LE16((u16)abs_vport_id);
2172         p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER
2173                                            : GFT_DELETE_FILTER;
2174
2175         DP_VERBOSE(p_hwfn, ECORE_MSG_SP,
2176                    "V[%0x], Q[%04x] - %s filter from 0x%lx [length %04xb]\n",
2177                    abs_vport_id, abs_rx_q_id,
2178                    b_is_add ? "Adding" : "Removing",
2179                    (unsigned long)p_addr, length);
2180
2181         return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL);
2182 }
2183
2184 int ecore_get_rxq_coalesce(struct ecore_hwfn *p_hwfn,
2185                            struct ecore_ptt *p_ptt,
2186                            struct ecore_queue_cid *p_cid,
2187                            u16 *p_rx_coal)
2188 {
2189         u32 coalesce, address, is_valid;
2190         struct cau_sb_entry sb_entry;
2191         u8 timer_res;
2192         enum _ecore_status_t rc;
2193
2194         rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2195                                  p_cid->sb_igu_id * sizeof(u64),
2196                                  (u64)(osal_uintptr_t)&sb_entry, 2, 0);
2197         if (rc != ECORE_SUCCESS) {
2198                 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2199                 return rc;
2200         }
2201
2202         timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES0);
2203
2204         address = BAR0_MAP_REG_USDM_RAM +
2205                   USTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2206         coalesce = ecore_rd(p_hwfn, p_ptt, address);
2207
2208         is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2209         if (!is_valid)
2210                 return ECORE_INVAL;
2211
2212         coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2213         *p_rx_coal = (u16)(coalesce << timer_res);
2214
2215         return ECORE_SUCCESS;
2216 }
2217
2218 int ecore_get_txq_coalesce(struct ecore_hwfn *p_hwfn,
2219                            struct ecore_ptt *p_ptt,
2220                            struct ecore_queue_cid *p_cid,
2221                            u16 *p_tx_coal)
2222 {
2223         u32 coalesce, address, is_valid;
2224         struct cau_sb_entry sb_entry;
2225         u8 timer_res;
2226         enum _ecore_status_t rc;
2227
2228         rc = ecore_dmae_grc2host(p_hwfn, p_ptt, CAU_REG_SB_VAR_MEMORY +
2229                                  p_cid->sb_igu_id * sizeof(u64),
2230                                  (u64)(osal_uintptr_t)&sb_entry, 2, 0);
2231         if (rc != ECORE_SUCCESS) {
2232                 DP_ERR(p_hwfn, "dmae_grc2host failed %d\n", rc);
2233                 return rc;
2234         }
2235
2236         timer_res = GET_FIELD(sb_entry.params, CAU_SB_ENTRY_TIMER_RES1);
2237
2238         address = BAR0_MAP_REG_XSDM_RAM +
2239                   XSTORM_ETH_QUEUE_ZONE_OFFSET(p_cid->abs.queue_id);
2240         coalesce = ecore_rd(p_hwfn, p_ptt, address);
2241
2242         is_valid = GET_FIELD(coalesce, COALESCING_TIMESET_VALID);
2243         if (!is_valid)
2244                 return ECORE_INVAL;
2245
2246         coalesce = GET_FIELD(coalesce, COALESCING_TIMESET_TIMESET);
2247         *p_tx_coal = (u16)(coalesce << timer_res);
2248
2249         return ECORE_SUCCESS;
2250 }
2251
2252 enum _ecore_status_t
2253 ecore_get_queue_coalesce(struct ecore_hwfn *p_hwfn, u16 *p_coal,
2254                          void *handle)
2255 {
2256         struct ecore_queue_cid *p_cid = (struct ecore_queue_cid *)handle;
2257         enum _ecore_status_t rc = ECORE_SUCCESS;
2258         struct ecore_ptt *p_ptt;
2259
2260         if (IS_VF(p_hwfn->p_dev)) {
2261                 rc = ecore_vf_pf_get_coalesce(p_hwfn, p_coal, p_cid);
2262                 if (rc != ECORE_SUCCESS)
2263                         DP_NOTICE(p_hwfn, false,
2264                                   "Unable to read queue calescing\n");
2265
2266                 return rc;
2267         }
2268
2269         p_ptt = ecore_ptt_acquire(p_hwfn);
2270         if (!p_ptt)
2271                 return ECORE_AGAIN;
2272
2273         if (p_cid->b_is_rx) {
2274                 rc = ecore_get_rxq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2275                 if (rc != ECORE_SUCCESS)
2276                         goto out;
2277         } else {
2278                 rc = ecore_get_txq_coalesce(p_hwfn, p_ptt, p_cid, p_coal);
2279                 if (rc != ECORE_SUCCESS)
2280                         goto out;
2281         }
2282
2283 out:
2284         ecore_ptt_release(p_hwfn, p_ptt);
2285
2286         return rc;
2287 }
2288
2289 enum _ecore_status_t
2290 ecore_eth_tx_queue_maxrate(struct ecore_hwfn *p_hwfn,
2291                            struct ecore_ptt *p_ptt,
2292                            struct ecore_queue_cid *p_cid, u32 rate)
2293 {
2294         struct ecore_mcp_link_state *p_link;
2295         u8 vport;
2296
2297         vport = (u8)ecore_get_qm_vport_idx_rl(p_hwfn, p_cid->rel.queue_id);
2298         p_link = &ECORE_LEADING_HWFN(p_hwfn->p_dev)->mcp_info->link_output;
2299
2300         DP_VERBOSE(p_hwfn, ECORE_MSG_LINK,
2301                    "About to rate limit qm vport %d for queue %d with rate %d\n",
2302                    vport, p_cid->rel.queue_id, rate);
2303
2304         return ecore_init_vport_rl(p_hwfn, p_ptt, vport, rate,
2305                                    p_link->speed);
2306 }