Imported Upstream version 17.05.2
[deb_dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
40
41 #include "bnxt.h"
42 #include "bnxt_cpr.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_rxq.h"
46 #include "bnxt_rxr.h"
47 #include "bnxt_ring.h"
48 #include "bnxt_txq.h"
49 #include "bnxt_txr.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
52
53 #include <rte_io.h>
54
55 #define HWRM_CMD_TIMEOUT                2000
56
57 /*
58  * HWRM Functions (sent to HWRM)
59  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
60  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
61  * command was failed by the ChiMP.
62  */
63
64 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
65                                         uint32_t msg_len)
66 {
67         unsigned int i;
68         struct input *req = msg;
69         struct output *resp = bp->hwrm_cmd_resp_addr;
70         uint32_t *data = msg;
71         uint8_t *bar;
72         uint8_t *valid;
73
74         /* Write request msg to hwrm channel */
75         for (i = 0; i < msg_len; i += 4) {
76                 bar = (uint8_t *)bp->bar0 + i;
77                 rte_write32(*data, bar);
78                 data++;
79         }
80
81         /* Zero the rest of the request space */
82         for (; i < bp->max_req_len; i += 4) {
83                 bar = (uint8_t *)bp->bar0 + i;
84                 rte_write32(0, bar);
85         }
86
87         /* Ring channel doorbell */
88         bar = (uint8_t *)bp->bar0 + 0x100;
89         rte_write32(1, bar);
90
91         /* Poll for the valid bit */
92         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
93                 /* Sanity check on the resp->resp_len */
94                 rte_rmb();
95                 if (resp->resp_len && resp->resp_len <=
96                                 bp->max_resp_len) {
97                         /* Last byte of resp contains the valid key */
98                         valid = (uint8_t *)resp + resp->resp_len - 1;
99                         if (*valid == HWRM_RESP_VALID_KEY)
100                                 break;
101                 }
102                 rte_delay_us(600);
103         }
104
105         if (i >= HWRM_CMD_TIMEOUT) {
106                 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
107                         req->req_type);
108                 goto err_ret;
109         }
110         return 0;
111
112 err_ret:
113         return -1;
114 }
115
116 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
117 {
118         int rc;
119
120         rte_spinlock_lock(&bp->hwrm_lock);
121         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
122         rte_spinlock_unlock(&bp->hwrm_lock);
123         return rc;
124 }
125
126 #define HWRM_PREP(req, type, cr, resp) \
127         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
128         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
129         req.cmpl_ring = rte_cpu_to_le_16(cr); \
130         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
131         req.target_id = rte_cpu_to_le_16(0xffff); \
132         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
133
134 #define HWRM_CHECK_RESULT \
135         { \
136                 if (rc) { \
137                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
138                                 __func__, rc); \
139                         return rc; \
140                 } \
141                 if (resp->error_code) { \
142                         rc = rte_le_to_cpu_16(resp->error_code); \
143                         RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
144                         return rc; \
145                 } \
146         }
147
148 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
149 {
150         int rc = 0;
151         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
152         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
153
154         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
155         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
156         req.mask = 0;
157
158         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
159
160         HWRM_CHECK_RESULT;
161
162         return rc;
163 }
164
165 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
166 {
167         int rc = 0;
168         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
169         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
170         uint32_t mask = 0;
171
172         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
173         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
174
175         /* FIXME add multicast flag, when multicast adding options is supported
176          * by ethtool.
177          */
178         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
179                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
180         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
181                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
182         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
183                                     mask);
184
185         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
186
187         HWRM_CHECK_RESULT;
188
189         return rc;
190 }
191
192 int bnxt_hwrm_clear_filter(struct bnxt *bp,
193                            struct bnxt_filter_info *filter)
194 {
195         int rc = 0;
196         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
197         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
198
199         if (filter->fw_l2_filter_id == UINT64_MAX)
200                 return 0;
201
202         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
203
204         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
205
206         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
207
208         HWRM_CHECK_RESULT;
209
210         filter->fw_l2_filter_id = -1;
211
212         return 0;
213 }
214
215 int bnxt_hwrm_set_filter(struct bnxt *bp,
216                          struct bnxt_vnic_info *vnic,
217                          struct bnxt_filter_info *filter)
218 {
219         int rc = 0;
220         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
221         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
222         uint32_t enables = 0;
223
224         if (filter->fw_l2_filter_id != UINT64_MAX)
225                 bnxt_hwrm_clear_filter(bp, filter);
226
227         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
228
229         req.flags = rte_cpu_to_le_32(filter->flags);
230
231         enables = filter->enables |
232               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
233         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
234
235         if (enables &
236             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
237                 memcpy(req.l2_addr, filter->l2_addr,
238                        ETHER_ADDR_LEN);
239         if (enables &
240             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
241                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
242                        ETHER_ADDR_LEN);
243         if (enables &
244             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
245                 req.l2_ovlan = filter->l2_ovlan;
246         if (enables &
247             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
248                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
249
250         req.enables = rte_cpu_to_le_32(enables);
251
252         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
253
254         HWRM_CHECK_RESULT;
255
256         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
257
258         return rc;
259 }
260
261 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
262 {
263         int rc;
264         struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
265         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
266
267         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
268
269         memcpy(req.encap_request, fwd_cmd,
270                sizeof(req.encap_request));
271
272         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
273
274         HWRM_CHECK_RESULT;
275
276         return rc;
277 }
278
279 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
280 {
281         int rc = 0;
282         struct hwrm_func_qcaps_input req = {.req_type = 0 };
283         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
284
285         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
286
287         req.fid = rte_cpu_to_le_16(0xffff);
288
289         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
290
291         HWRM_CHECK_RESULT;
292
293         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
294         if (BNXT_PF(bp)) {
295                 struct bnxt_pf_info *pf = &bp->pf;
296
297                 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
298                 pf->port_id = resp->port_id;
299                 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
300                 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
301                 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
302                 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
303                 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
304                 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
305                 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
306                 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
307                 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
308         } else {
309                 struct bnxt_vf_info *vf = &bp->vf;
310
311                 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
312                 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
313                 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
314                 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
315                 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
316                 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
317                 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
318                 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
319         }
320
321         return rc;
322 }
323
324 int bnxt_hwrm_func_reset(struct bnxt *bp)
325 {
326         int rc = 0;
327         struct hwrm_func_reset_input req = {.req_type = 0 };
328         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
329
330         HWRM_PREP(req, FUNC_RESET, -1, resp);
331
332         req.enables = rte_cpu_to_le_32(0);
333
334         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
335
336         HWRM_CHECK_RESULT;
337
338         return rc;
339 }
340
341 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
342                                    uint32_t *vf_req_fwd)
343 {
344         int rc;
345         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
346         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
347
348         if (bp->flags & BNXT_FLAG_REGISTERED)
349                 return 0;
350
351         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
352         req.flags = flags;
353         req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
354                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
355         req.ver_maj = RTE_VER_YEAR;
356         req.ver_min = RTE_VER_MONTH;
357         req.ver_upd = RTE_VER_MINOR;
358
359         memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
360
361         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
362
363         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
364
365         HWRM_CHECK_RESULT;
366
367         bp->flags |= BNXT_FLAG_REGISTERED;
368
369         return rc;
370 }
371
372 int bnxt_hwrm_ver_get(struct bnxt *bp)
373 {
374         int rc = 0;
375         struct hwrm_ver_get_input req = {.req_type = 0 };
376         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
377         uint32_t my_version;
378         uint32_t fw_version;
379         uint16_t max_resp_len;
380         char type[RTE_MEMZONE_NAMESIZE];
381
382         HWRM_PREP(req, VER_GET, -1, resp);
383
384         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
385         req.hwrm_intf_min = HWRM_VERSION_MINOR;
386         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
387
388         /*
389          * Hold the lock since we may be adjusting the response pointers.
390          */
391         rte_spinlock_lock(&bp->hwrm_lock);
392         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
393
394         HWRM_CHECK_RESULT;
395
396         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
397                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
398                 resp->hwrm_intf_upd,
399                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
400         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
401                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
402
403         my_version = HWRM_VERSION_MAJOR << 16;
404         my_version |= HWRM_VERSION_MINOR << 8;
405         my_version |= HWRM_VERSION_UPDATE;
406
407         fw_version = resp->hwrm_intf_maj << 16;
408         fw_version |= resp->hwrm_intf_min << 8;
409         fw_version |= resp->hwrm_intf_upd;
410
411         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
412                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
413                 rc = -EINVAL;
414                 goto error;
415         }
416
417         if (my_version != fw_version) {
418                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
419                 if (my_version < fw_version) {
420                         RTE_LOG(INFO, PMD,
421                                 "Firmware API version is newer than driver.\n");
422                         RTE_LOG(INFO, PMD,
423                                 "The driver may be missing features.\n");
424                 } else {
425                         RTE_LOG(INFO, PMD,
426                                 "Firmware API version is older than driver.\n");
427                         RTE_LOG(INFO, PMD,
428                                 "Not all driver features may be functional.\n");
429                 }
430         }
431
432         if (bp->max_req_len > resp->max_req_win_len) {
433                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
434                 rc = -EINVAL;
435         }
436         bp->max_req_len = resp->max_req_win_len;
437         max_resp_len = resp->max_resp_len;
438         if (bp->max_resp_len != max_resp_len) {
439                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
440                         bp->pdev->addr.domain, bp->pdev->addr.bus,
441                         bp->pdev->addr.devid, bp->pdev->addr.function);
442
443                 rte_free(bp->hwrm_cmd_resp_addr);
444
445                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
446                 if (bp->hwrm_cmd_resp_addr == NULL) {
447                         rc = -ENOMEM;
448                         goto error;
449                 }
450                 bp->hwrm_cmd_resp_dma_addr =
451                         rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
452                 bp->max_resp_len = max_resp_len;
453         }
454
455 error:
456         rte_spinlock_unlock(&bp->hwrm_lock);
457         return rc;
458 }
459
460 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
461 {
462         int rc;
463         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
464         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
465
466         if (!(bp->flags & BNXT_FLAG_REGISTERED))
467                 return 0;
468
469         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
470         req.flags = flags;
471
472         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
473
474         HWRM_CHECK_RESULT;
475
476         bp->flags &= ~BNXT_FLAG_REGISTERED;
477
478         return rc;
479 }
480
481 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
482 {
483         int rc = 0;
484         struct hwrm_port_phy_cfg_input req = {0};
485         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
486         uint32_t enables = 0;
487         uint32_t link_speed_mask =
488                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
489
490         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
491
492         if (conf->link_up) {
493                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
494                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
495                 /*
496                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
497                  * any auto mode, even "none".
498                  */
499                 if (!conf->link_speed) {
500                         req.auto_mode = conf->auto_mode;
501                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
502                         if (conf->auto_mode ==
503                             HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
504                                 req.auto_link_speed_mask =
505                                         conf->auto_link_speed_mask;
506                                 enables |= link_speed_mask;
507                         }
508                         if (bp->link_info.auto_link_speed) {
509                                 req.auto_link_speed =
510                                         bp->link_info.auto_link_speed;
511                                 enables |=
512                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
513                         }
514                 }
515                 req.auto_duplex = conf->duplex;
516                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
517                 req.auto_pause = conf->auto_pause;
518                 req.force_pause = conf->force_pause;
519                 /* Set force_pause if there is no auto or if there is a force */
520                 if (req.auto_pause && !req.force_pause)
521                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
522                 else
523                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
524
525                 req.enables = rte_cpu_to_le_32(enables);
526         } else {
527                 req.flags =
528                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
529                 RTE_LOG(INFO, PMD, "Force Link Down\n");
530         }
531
532         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
533
534         HWRM_CHECK_RESULT;
535
536         return rc;
537 }
538
539 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
540                                    struct bnxt_link_info *link_info)
541 {
542         int rc = 0;
543         struct hwrm_port_phy_qcfg_input req = {0};
544         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
545
546         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
547
548         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
549
550         HWRM_CHECK_RESULT;
551
552         link_info->phy_link_status = resp->link;
553         link_info->link_up =
554                 (link_info->phy_link_status ==
555                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
556         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
557         link_info->duplex = resp->duplex;
558         link_info->pause = resp->pause;
559         link_info->auto_pause = resp->auto_pause;
560         link_info->force_pause = resp->force_pause;
561         link_info->auto_mode = resp->auto_mode;
562
563         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
564         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
565         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
566         link_info->phy_ver[0] = resp->phy_maj;
567         link_info->phy_ver[1] = resp->phy_min;
568         link_info->phy_ver[2] = resp->phy_bld;
569
570         return rc;
571 }
572
573 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
574 {
575         int rc = 0;
576         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
577         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
578
579         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
580
581         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
582
583         HWRM_CHECK_RESULT;
584
585 #define GET_QUEUE_INFO(x) \
586         bp->cos_queue[x].id = resp->queue_id##x; \
587         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
588
589         GET_QUEUE_INFO(0);
590         GET_QUEUE_INFO(1);
591         GET_QUEUE_INFO(2);
592         GET_QUEUE_INFO(3);
593         GET_QUEUE_INFO(4);
594         GET_QUEUE_INFO(5);
595         GET_QUEUE_INFO(6);
596         GET_QUEUE_INFO(7);
597
598         return rc;
599 }
600
601 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
602                          struct bnxt_ring *ring,
603                          uint32_t ring_type, uint32_t map_index,
604                          uint32_t stats_ctx_id)
605 {
606         int rc = 0;
607         struct hwrm_ring_alloc_input req = {.req_type = 0 };
608         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
609
610         HWRM_PREP(req, RING_ALLOC, -1, resp);
611
612         req.enables = rte_cpu_to_le_32(0);
613
614         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
615         req.fbo = rte_cpu_to_le_32(0);
616         /* Association of ring index with doorbell index */
617         req.logical_id = rte_cpu_to_le_16(map_index);
618
619         switch (ring_type) {
620         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
621                 req.queue_id = bp->cos_queue[0].id;
622                 /* FALLTHROUGH */
623         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
624                 req.ring_type = ring_type;
625                 req.cmpl_ring_id =
626                     rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
627                 req.length = rte_cpu_to_le_32(ring->ring_size);
628                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
629                 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
630                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
631                 break;
632         case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
633                 req.ring_type = ring_type;
634                 /*
635                  * TODO: Some HWRM versions crash with
636                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
637                  */
638                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
639                 req.length = rte_cpu_to_le_32(ring->ring_size);
640                 break;
641         default:
642                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
643                         ring_type);
644                 return -1;
645         }
646
647         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
648
649         if (rc || resp->error_code) {
650                 if (rc == 0 && resp->error_code)
651                         rc = rte_le_to_cpu_16(resp->error_code);
652                 switch (ring_type) {
653                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
654                         RTE_LOG(ERR, PMD,
655                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
656                         return rc;
657                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
658                         RTE_LOG(ERR, PMD,
659                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
660                         return rc;
661                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
662                         RTE_LOG(ERR, PMD,
663                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
664                         return rc;
665                 default:
666                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
667                         return rc;
668                 }
669         }
670
671         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
672         return rc;
673 }
674
675 int bnxt_hwrm_ring_free(struct bnxt *bp,
676                         struct bnxt_ring *ring, uint32_t ring_type)
677 {
678         int rc;
679         struct hwrm_ring_free_input req = {.req_type = 0 };
680         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
681
682         HWRM_PREP(req, RING_FREE, -1, resp);
683
684         req.ring_type = ring_type;
685         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
686
687         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
688
689         if (rc || resp->error_code) {
690                 if (rc == 0 && resp->error_code)
691                         rc = rte_le_to_cpu_16(resp->error_code);
692
693                 switch (ring_type) {
694                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
695                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
696                                 rc);
697                         return rc;
698                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
699                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
700                                 rc);
701                         return rc;
702                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
703                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
704                                 rc);
705                         return rc;
706                 default:
707                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
708                         return rc;
709                 }
710         }
711         return 0;
712 }
713
714 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
715 {
716         int rc = 0;
717         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
718         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
719
720         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
721
722         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
723         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
724         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
725         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
726
727         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
728
729         HWRM_CHECK_RESULT;
730
731         bp->grp_info[idx].fw_grp_id =
732             rte_le_to_cpu_16(resp->ring_group_id);
733
734         return rc;
735 }
736
737 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
738 {
739         int rc;
740         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
741         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
742
743         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
744
745         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
746
747         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
748
749         HWRM_CHECK_RESULT;
750
751         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
752         return rc;
753 }
754
755 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
756 {
757         int rc = 0;
758         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
759         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
760
761         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
762
763         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
764                 return rc;
765
766         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
767         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
768
769         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
770
771         HWRM_CHECK_RESULT;
772
773         return rc;
774 }
775
776 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
777                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
778 {
779         int rc;
780         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
781         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
782
783         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
784
785         req.update_period_ms = rte_cpu_to_le_32(1000);
786
787         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
788         req.stats_dma_addr =
789             rte_cpu_to_le_64(cpr->hw_stats_map);
790
791         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
792
793         HWRM_CHECK_RESULT;
794
795         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
796         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
797
798         return rc;
799 }
800
801 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
802                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
803 {
804         int rc;
805         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
806         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
807
808         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
809
810         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
811         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
812
813         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
814
815         HWRM_CHECK_RESULT;
816
817         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
818         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
819
820         return rc;
821 }
822
823 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
824 {
825         int rc = 0, i, j;
826         struct hwrm_vnic_alloc_input req = { 0 };
827         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
828
829         /* map ring groups to this vnic */
830         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
831                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
832                         RTE_LOG(ERR, PMD,
833                                 "Not enough ring groups avail:%x req:%x\n", j,
834                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
835                         break;
836                 }
837                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
838         }
839
840         vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
841         vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
842
843         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
844
845         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
846
847         HWRM_CHECK_RESULT;
848
849         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
850         return rc;
851 }
852
853 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
854 {
855         int rc = 0;
856         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
857         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
858
859         HWRM_PREP(req, VNIC_CFG, -1, resp);
860
861         /* Only RSS support for now TBD: COS & LB */
862         req.enables =
863             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
864                              HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
865                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
866         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
867         req.dflt_ring_grp =
868                 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
869         req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
870         req.cos_rule = rte_cpu_to_le_16(0xffff);
871         req.lb_rule = rte_cpu_to_le_16(0xffff);
872         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
873                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
874         if (vnic->func_default)
875                 req.flags = 1;
876         if (vnic->vlan_strip)
877                 req.flags |=
878                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
879
880         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
881
882         HWRM_CHECK_RESULT;
883
884         return rc;
885 }
886
887 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
888 {
889         int rc = 0;
890         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
891         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
892                                                 bp->hwrm_cmd_resp_addr;
893
894         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
895
896         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
897
898         HWRM_CHECK_RESULT;
899
900         vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
901
902         return rc;
903 }
904
905 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
906 {
907         int rc = 0;
908         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
909         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
910                                                 bp->hwrm_cmd_resp_addr;
911
912         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
913
914         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
915
916         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
917
918         HWRM_CHECK_RESULT;
919
920         vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
921
922         return rc;
923 }
924
925 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
926 {
927         int rc = 0;
928         struct hwrm_vnic_free_input req = {.req_type = 0 };
929         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
930
931         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
932                 return rc;
933
934         HWRM_PREP(req, VNIC_FREE, -1, resp);
935
936         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
937
938         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
939
940         HWRM_CHECK_RESULT;
941
942         vnic->fw_vnic_id = INVALID_HW_RING_ID;
943         return rc;
944 }
945
946 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
947                            struct bnxt_vnic_info *vnic)
948 {
949         int rc = 0;
950         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
951         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
952
953         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
954
955         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
956
957         req.ring_grp_tbl_addr =
958             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
959         req.hash_key_tbl_addr =
960             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
961         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
962
963         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
964
965         HWRM_CHECK_RESULT;
966
967         return rc;
968 }
969
970 /*
971  * HWRM utility functions
972  */
973
974 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
975 {
976         unsigned int i;
977         int rc = 0;
978
979         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
980                 struct bnxt_tx_queue *txq;
981                 struct bnxt_rx_queue *rxq;
982                 struct bnxt_cp_ring_info *cpr;
983
984                 if (i >= bp->rx_cp_nr_rings) {
985                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
986                         cpr = txq->cp_ring;
987                 } else {
988                         rxq = bp->rx_queues[i];
989                         cpr = rxq->cp_ring;
990                 }
991
992                 rc = bnxt_hwrm_stat_clear(bp, cpr);
993                 if (rc)
994                         return rc;
995         }
996         return 0;
997 }
998
999 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1000 {
1001         int rc;
1002         unsigned int i;
1003         struct bnxt_cp_ring_info *cpr;
1004
1005         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1006                 unsigned int idx = i + 1;
1007
1008                 if (i >= bp->rx_cp_nr_rings)
1009                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1010                 else
1011                         cpr = bp->rx_queues[i]->cp_ring;
1012                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1013                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1014                         if (rc)
1015                                 return rc;
1016                 }
1017         }
1018         return 0;
1019 }
1020
1021 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1022 {
1023         unsigned int i;
1024         int rc = 0;
1025
1026         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1027                 struct bnxt_tx_queue *txq;
1028                 struct bnxt_rx_queue *rxq;
1029                 struct bnxt_cp_ring_info *cpr;
1030                 unsigned int idx = i + 1;
1031
1032                 if (i >= bp->rx_cp_nr_rings) {
1033                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1034                         cpr = txq->cp_ring;
1035                 } else {
1036                         rxq = bp->rx_queues[i];
1037                         cpr = rxq->cp_ring;
1038                 }
1039
1040                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1041
1042                 if (rc)
1043                         return rc;
1044         }
1045         return rc;
1046 }
1047
1048 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1049 {
1050         uint16_t i;
1051         uint32_t rc = 0;
1052
1053         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1054                 unsigned int idx = i + 1;
1055
1056                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1057                         RTE_LOG(ERR, PMD,
1058                                 "Attempt to free invalid ring group %d\n",
1059                                 idx);
1060                         continue;
1061                 }
1062
1063                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1064
1065                 if (rc)
1066                         return rc;
1067         }
1068         return rc;
1069 }
1070
1071 static void bnxt_free_cp_ring(struct bnxt *bp,
1072                               struct bnxt_cp_ring_info *cpr, unsigned int idx)
1073 {
1074         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1075
1076         bnxt_hwrm_ring_free(bp, cp_ring,
1077                         HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1078         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1079         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1080         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1081                         sizeof(*cpr->cp_desc_ring));
1082         cpr->cp_raw_cons = 0;
1083 }
1084
1085 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1086 {
1087         unsigned int i;
1088         int rc = 0;
1089
1090         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1091                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1092                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1093                 struct bnxt_ring *ring = txr->tx_ring_struct;
1094                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1095                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1096
1097                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1098                         bnxt_hwrm_ring_free(bp, ring,
1099                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1100                         ring->fw_ring_id = INVALID_HW_RING_ID;
1101                         memset(txr->tx_desc_ring, 0,
1102                                         txr->tx_ring_struct->ring_size *
1103                                         sizeof(*txr->tx_desc_ring));
1104                         memset(txr->tx_buf_ring, 0,
1105                                         txr->tx_ring_struct->ring_size *
1106                                         sizeof(*txr->tx_buf_ring));
1107                         txr->tx_prod = 0;
1108                         txr->tx_cons = 0;
1109                 }
1110                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1111                         bnxt_free_cp_ring(bp, cpr, idx);
1112         }
1113
1114         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1115                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1116                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1117                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1118                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1119                 unsigned int idx = i + 1;
1120
1121                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1122                         bnxt_hwrm_ring_free(bp, ring,
1123                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1124                         ring->fw_ring_id = INVALID_HW_RING_ID;
1125                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1126                         memset(rxr->rx_desc_ring, 0,
1127                                         rxr->rx_ring_struct->ring_size *
1128                                         sizeof(*rxr->rx_desc_ring));
1129                         memset(rxr->rx_buf_ring, 0,
1130                                         rxr->rx_ring_struct->ring_size *
1131                                         sizeof(*rxr->rx_buf_ring));
1132                         rxr->rx_prod = 0;
1133                 }
1134                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1135                         bnxt_free_cp_ring(bp, cpr, idx);
1136         }
1137
1138         /* Default completion ring */
1139         {
1140                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1141
1142                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1143                         bnxt_free_cp_ring(bp, cpr, 0);
1144         }
1145
1146         return rc;
1147 }
1148
1149 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1150 {
1151         uint16_t i;
1152         uint32_t rc = 0;
1153
1154         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1155                 unsigned int idx = i + 1;
1156
1157                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1158                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1159                         continue;
1160
1161                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1162
1163                 if (rc)
1164                         return rc;
1165         }
1166         return rc;
1167 }
1168
1169 void bnxt_free_hwrm_resources(struct bnxt *bp)
1170 {
1171         /* Release memzone */
1172         rte_free(bp->hwrm_cmd_resp_addr);
1173         bp->hwrm_cmd_resp_addr = NULL;
1174         bp->hwrm_cmd_resp_dma_addr = 0;
1175 }
1176
1177 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1178 {
1179         struct rte_pci_device *pdev = bp->pdev;
1180         char type[RTE_MEMZONE_NAMESIZE];
1181
1182         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1183                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1184         bp->max_req_len = HWRM_MAX_REQ_LEN;
1185         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1186         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1187         if (bp->hwrm_cmd_resp_addr == NULL)
1188                 return -ENOMEM;
1189         bp->hwrm_cmd_resp_dma_addr =
1190                 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1191         rte_spinlock_init(&bp->hwrm_lock);
1192
1193         return 0;
1194 }
1195
1196 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1197 {
1198         struct bnxt_filter_info *filter;
1199         int rc = 0;
1200
1201         STAILQ_FOREACH(filter, &vnic->filter, next) {
1202                 rc = bnxt_hwrm_clear_filter(bp, filter);
1203                 if (rc)
1204                         break;
1205         }
1206         return rc;
1207 }
1208
1209 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1210 {
1211         struct bnxt_filter_info *filter;
1212         int rc = 0;
1213
1214         STAILQ_FOREACH(filter, &vnic->filter, next) {
1215                 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1216                 if (rc)
1217                         break;
1218         }
1219         return rc;
1220 }
1221
1222 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1223 {
1224         struct bnxt_vnic_info *vnic;
1225         unsigned int i;
1226
1227         if (bp->vnic_info == NULL)
1228                 return;
1229
1230         vnic = &bp->vnic_info[0];
1231         bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1232
1233         /* VNIC resources */
1234         for (i = 0; i < bp->nr_vnics; i++) {
1235                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1236
1237                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1238
1239                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1240                 bnxt_hwrm_vnic_free(bp, vnic);
1241         }
1242         /* Ring resources */
1243         bnxt_free_all_hwrm_rings(bp);
1244         bnxt_free_all_hwrm_ring_grps(bp);
1245         bnxt_free_all_hwrm_stat_ctxs(bp);
1246 }
1247
1248 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1249 {
1250         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1251
1252         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1253                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1254
1255         switch (conf_link_speed) {
1256         case ETH_LINK_SPEED_10M_HD:
1257         case ETH_LINK_SPEED_100M_HD:
1258                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1259         }
1260         return hw_link_duplex;
1261 }
1262
1263 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1264 {
1265         uint16_t eth_link_speed = 0;
1266
1267         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1268                 return ETH_LINK_SPEED_AUTONEG;
1269
1270         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1271         case ETH_LINK_SPEED_100M:
1272         case ETH_LINK_SPEED_100M_HD:
1273                 eth_link_speed =
1274                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1275                 break;
1276         case ETH_LINK_SPEED_1G:
1277                 eth_link_speed =
1278                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1279                 break;
1280         case ETH_LINK_SPEED_2_5G:
1281                 eth_link_speed =
1282                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1283                 break;
1284         case ETH_LINK_SPEED_10G:
1285                 eth_link_speed =
1286                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1287                 break;
1288         case ETH_LINK_SPEED_20G:
1289                 eth_link_speed =
1290                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1291                 break;
1292         case ETH_LINK_SPEED_25G:
1293                 eth_link_speed =
1294                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1295                 break;
1296         case ETH_LINK_SPEED_40G:
1297                 eth_link_speed =
1298                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1299                 break;
1300         case ETH_LINK_SPEED_50G:
1301                 eth_link_speed =
1302                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1303                 break;
1304         default:
1305                 RTE_LOG(ERR, PMD,
1306                         "Unsupported link speed %d; default to AUTO\n",
1307                         conf_link_speed);
1308                 break;
1309         }
1310         return eth_link_speed;
1311 }
1312
1313 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1314                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1315                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1316                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1317
1318 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1319 {
1320         uint32_t one_speed;
1321
1322         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1323                 return 0;
1324
1325         if (link_speed & ETH_LINK_SPEED_FIXED) {
1326                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1327
1328                 if (one_speed & (one_speed - 1)) {
1329                         RTE_LOG(ERR, PMD,
1330                                 "Invalid advertised speeds (%u) for port %u\n",
1331                                 link_speed, port_id);
1332                         return -EINVAL;
1333                 }
1334                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1335                         RTE_LOG(ERR, PMD,
1336                                 "Unsupported advertised speed (%u) for port %u\n",
1337                                 link_speed, port_id);
1338                         return -EINVAL;
1339                 }
1340         } else {
1341                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1342                         RTE_LOG(ERR, PMD,
1343                                 "Unsupported advertised speeds (%u) for port %u\n",
1344                                 link_speed, port_id);
1345                         return -EINVAL;
1346                 }
1347         }
1348         return 0;
1349 }
1350
1351 static uint16_t
1352 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1353 {
1354         uint16_t ret = 0;
1355
1356         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1357                 if (bp->link_info.support_speeds)
1358                         return bp->link_info.support_speeds;
1359                 link_speed = BNXT_SUPPORTED_SPEEDS;
1360         }
1361
1362         if (link_speed & ETH_LINK_SPEED_100M)
1363                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1364         if (link_speed & ETH_LINK_SPEED_100M_HD)
1365                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1366         if (link_speed & ETH_LINK_SPEED_1G)
1367                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1368         if (link_speed & ETH_LINK_SPEED_2_5G)
1369                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1370         if (link_speed & ETH_LINK_SPEED_10G)
1371                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1372         if (link_speed & ETH_LINK_SPEED_20G)
1373                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1374         if (link_speed & ETH_LINK_SPEED_25G)
1375                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1376         if (link_speed & ETH_LINK_SPEED_40G)
1377                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1378         if (link_speed & ETH_LINK_SPEED_50G)
1379                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1380         return ret;
1381 }
1382
1383 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1384 {
1385         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1386
1387         switch (hw_link_speed) {
1388         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1389                 eth_link_speed = ETH_SPEED_NUM_100M;
1390                 break;
1391         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1392                 eth_link_speed = ETH_SPEED_NUM_1G;
1393                 break;
1394         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1395                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1396                 break;
1397         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1398                 eth_link_speed = ETH_SPEED_NUM_10G;
1399                 break;
1400         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1401                 eth_link_speed = ETH_SPEED_NUM_20G;
1402                 break;
1403         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1404                 eth_link_speed = ETH_SPEED_NUM_25G;
1405                 break;
1406         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1407                 eth_link_speed = ETH_SPEED_NUM_40G;
1408                 break;
1409         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1410                 eth_link_speed = ETH_SPEED_NUM_50G;
1411                 break;
1412         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1413         default:
1414                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1415                         hw_link_speed);
1416                 break;
1417         }
1418         return eth_link_speed;
1419 }
1420
1421 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1422 {
1423         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1424
1425         switch (hw_link_duplex) {
1426         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1427         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1428                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1429                 break;
1430         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1431                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1432                 break;
1433         default:
1434                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1435                         hw_link_duplex);
1436                 break;
1437         }
1438         return eth_link_duplex;
1439 }
1440
1441 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1442 {
1443         int rc = 0;
1444         struct bnxt_link_info *link_info = &bp->link_info;
1445
1446         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1447         if (rc) {
1448                 RTE_LOG(ERR, PMD,
1449                         "Get link config failed with rc %d\n", rc);
1450                 goto exit;
1451         }
1452         if (link_info->link_speed)
1453                 link->link_speed =
1454                         bnxt_parse_hw_link_speed(link_info->link_speed);
1455         else
1456                 link->link_speed = ETH_SPEED_NUM_NONE;
1457         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1458         link->link_status = link_info->link_up;
1459         link->link_autoneg = link_info->auto_mode ==
1460                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1461                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1462 exit:
1463         return rc;
1464 }
1465
1466 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1467 {
1468         int rc = 0;
1469         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1470         struct bnxt_link_info link_req;
1471         uint16_t speed;
1472
1473         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1474                 return 0;
1475
1476         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1477                         bp->eth_dev->data->port_id);
1478         if (rc)
1479                 goto error;
1480
1481         memset(&link_req, 0, sizeof(link_req));
1482         link_req.link_up = link_up;
1483         if (!link_up)
1484                 goto port_phy_cfg;
1485
1486         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1487         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1488         if (speed == 0) {
1489                 link_req.phy_flags |=
1490                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1491                 link_req.auto_mode =
1492                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1493                 link_req.auto_link_speed_mask =
1494                         bnxt_parse_eth_link_speed_mask(bp,
1495                                                        dev_conf->link_speeds);
1496         } else {
1497                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1498                 link_req.link_speed = speed;
1499                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1500         }
1501         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1502         link_req.auto_pause = bp->link_info.auto_pause;
1503         link_req.force_pause = bp->link_info.force_pause;
1504
1505 port_phy_cfg:
1506         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1507         if (rc) {
1508                 RTE_LOG(ERR, PMD,
1509                         "Set link config failed with rc %d\n", rc);
1510         }
1511
1512 error:
1513         return rc;
1514 }
1515
1516 /* JIRA 22088 */
1517 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1518 {
1519         struct hwrm_func_qcfg_input req = {0};
1520         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1521         int rc = 0;
1522
1523         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1524         req.fid = rte_cpu_to_le_16(0xffff);
1525
1526         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1527
1528         HWRM_CHECK_RESULT;
1529
1530         if (BNXT_VF(bp)) {
1531                 struct bnxt_vf_info *vf = &bp->vf;
1532
1533                 /* Hard Coded.. 0xfff VLAN ID mask */
1534                 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1535         }
1536
1537         switch (resp->port_partition_type) {
1538         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1539         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1540         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1541                 bp->port_partition_type = resp->port_partition_type;
1542                 break;
1543         default:
1544                 bp->port_partition_type = 0;
1545                 break;
1546         }
1547
1548         return rc;
1549 }