Imported Upstream version 16.07-rc1
[deb_dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
40
41 #include "bnxt.h"
42 #include "bnxt_cpr.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_rxq.h"
46 #include "bnxt_rxr.h"
47 #include "bnxt_ring.h"
48 #include "bnxt_txq.h"
49 #include "bnxt_txr.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
52
53 #define HWRM_CMD_TIMEOUT                2000
54
55 /*
56  * HWRM Functions (sent to HWRM)
57  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
58  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
59  * command was failed by the ChiMP.
60  */
61
62 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
63                                         uint32_t msg_len)
64 {
65         unsigned int i;
66         struct input *req = msg;
67         struct output *resp = bp->hwrm_cmd_resp_addr;
68         uint32_t *data = msg;
69         uint8_t *bar;
70         uint8_t *valid;
71
72         /* Write request msg to hwrm channel */
73         for (i = 0; i < msg_len; i += 4) {
74                 bar = (uint8_t *)bp->bar0 + i;
75                 *(volatile uint32_t *)bar = *data;
76                 data++;
77         }
78
79         /* Zero the rest of the request space */
80         for (; i < bp->max_req_len; i += 4) {
81                 bar = (uint8_t *)bp->bar0 + i;
82                 *(volatile uint32_t *)bar = 0;
83         }
84
85         /* Ring channel doorbell */
86         bar = (uint8_t *)bp->bar0 + 0x100;
87         *(volatile uint32_t *)bar = 1;
88
89         /* Poll for the valid bit */
90         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
91                 /* Sanity check on the resp->resp_len */
92                 rte_rmb();
93                 if (resp->resp_len && resp->resp_len <=
94                                 bp->max_resp_len) {
95                         /* Last byte of resp contains the valid key */
96                         valid = (uint8_t *)resp + resp->resp_len - 1;
97                         if (*valid == HWRM_RESP_VALID_KEY)
98                                 break;
99                 }
100                 rte_delay_us(600);
101         }
102
103         if (i >= HWRM_CMD_TIMEOUT) {
104                 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
105                         req->req_type);
106                 goto err_ret;
107         }
108         return 0;
109
110 err_ret:
111         return -1;
112 }
113
114 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
115 {
116         int rc;
117
118         rte_spinlock_lock(&bp->hwrm_lock);
119         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
120         rte_spinlock_unlock(&bp->hwrm_lock);
121         return rc;
122 }
123
124 #define HWRM_PREP(req, type, cr, resp) \
125         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
126         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
127         req.cmpl_ring = rte_cpu_to_le_16(cr); \
128         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
129         req.target_id = rte_cpu_to_le_16(0xffff); \
130         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
131
132 #define HWRM_CHECK_RESULT \
133         { \
134                 if (rc) { \
135                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
136                                 __func__, rc); \
137                         return rc; \
138                 } \
139                 if (resp->error_code) { \
140                         rc = rte_le_to_cpu_16(resp->error_code); \
141                         RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
142                         return rc; \
143                 } \
144         }
145
146 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
147 {
148         int rc = 0;
149         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
150         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
151
152         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
153         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
154         req.mask = 0;
155
156         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
157
158         HWRM_CHECK_RESULT;
159
160         return rc;
161 }
162
163 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
164 {
165         int rc = 0;
166         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
167         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
168         uint32_t mask = 0;
169
170         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
171         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
172
173         /* FIXME add multicast flag, when multicast adding options is supported
174          * by ethtool.
175          */
176         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
177                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
178         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
179                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
180         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST |
181                                     HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
182                                     mask);
183
184         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
185
186         HWRM_CHECK_RESULT;
187
188         return rc;
189 }
190
191 int bnxt_hwrm_clear_filter(struct bnxt *bp,
192                            struct bnxt_filter_info *filter)
193 {
194         int rc = 0;
195         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
196         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
197
198         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
199
200         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
201
202         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
203
204         HWRM_CHECK_RESULT;
205
206         filter->fw_l2_filter_id = -1;
207
208         return 0;
209 }
210
211 int bnxt_hwrm_set_filter(struct bnxt *bp,
212                          struct bnxt_vnic_info *vnic,
213                          struct bnxt_filter_info *filter)
214 {
215         int rc = 0;
216         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
217         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
218         uint32_t enables = 0;
219
220         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
221
222         req.flags = rte_cpu_to_le_32(filter->flags);
223
224         enables = filter->enables |
225               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
226         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
227
228         if (enables &
229             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
230                 memcpy(req.l2_addr, filter->l2_addr,
231                        ETHER_ADDR_LEN);
232         if (enables &
233             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
234                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
235                        ETHER_ADDR_LEN);
236         if (enables &
237             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
238                 req.l2_ovlan = filter->l2_ovlan;
239         if (enables &
240             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
241                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
242
243         req.enables = rte_cpu_to_le_32(enables);
244
245         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
246
247         HWRM_CHECK_RESULT;
248
249         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
250
251         return rc;
252 }
253
254 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
255 {
256         int rc;
257         struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
258         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
259
260         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
261
262         memcpy(req.encap_request, fwd_cmd,
263                sizeof(req.encap_request));
264
265         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
266
267         HWRM_CHECK_RESULT;
268
269         return rc;
270 }
271
272 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
273 {
274         int rc = 0;
275         struct hwrm_func_qcaps_input req = {.req_type = 0 };
276         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
277
278         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
279
280         req.fid = rte_cpu_to_le_16(0xffff);
281
282         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
283
284         HWRM_CHECK_RESULT;
285
286         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
287         if (BNXT_PF(bp)) {
288                 struct bnxt_pf_info *pf = &bp->pf;
289
290                 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
291                 pf->port_id = resp->port_id;
292                 memcpy(pf->mac_addr, resp->perm_mac_address, ETHER_ADDR_LEN);
293                 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
294                 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
295                 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
296                 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
297                 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
298                 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
299                 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
300                 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
301         } else {
302                 struct bnxt_vf_info *vf = &bp->vf;
303
304                 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
305                 memcpy(vf->mac_addr, &resp->perm_mac_address, ETHER_ADDR_LEN);
306                 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
307                 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
308                 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
309                 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
310                 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
311                 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
312         }
313
314         return rc;
315 }
316
317 int bnxt_hwrm_func_reset(struct bnxt *bp)
318 {
319         int rc = 0;
320         struct hwrm_func_reset_input req = {.req_type = 0 };
321         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
322
323         HWRM_PREP(req, FUNC_RESET, -1, resp);
324
325         req.enables = rte_cpu_to_le_32(0);
326
327         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
328
329         HWRM_CHECK_RESULT;
330
331         return rc;
332 }
333
334 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
335                                    uint32_t *vf_req_fwd)
336 {
337         int rc;
338         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
339         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
340
341         if (bp->flags & BNXT_FLAG_REGISTERED)
342                 return 0;
343
344         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
345         req.flags = flags;
346         req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER;
347         req.ver_maj = RTE_VER_YEAR;
348         req.ver_min = RTE_VER_MONTH;
349         req.ver_upd = RTE_VER_MINOR;
350
351         memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
352
353         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
354
355         HWRM_CHECK_RESULT;
356
357         bp->flags |= BNXT_FLAG_REGISTERED;
358
359         return rc;
360 }
361
362 int bnxt_hwrm_ver_get(struct bnxt *bp)
363 {
364         int rc = 0;
365         struct hwrm_ver_get_input req = {.req_type = 0 };
366         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
367         uint32_t my_version;
368         uint32_t fw_version;
369         uint16_t max_resp_len;
370         char type[RTE_MEMZONE_NAMESIZE];
371
372         HWRM_PREP(req, VER_GET, -1, resp);
373
374         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
375         req.hwrm_intf_min = HWRM_VERSION_MINOR;
376         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
377
378         /*
379          * Hold the lock since we may be adjusting the response pointers.
380          */
381         rte_spinlock_lock(&bp->hwrm_lock);
382         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
383
384         HWRM_CHECK_RESULT;
385
386         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
387                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
388                 resp->hwrm_intf_upd,
389                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
390
391         my_version = HWRM_VERSION_MAJOR << 16;
392         my_version |= HWRM_VERSION_MINOR << 8;
393         my_version |= HWRM_VERSION_UPDATE;
394
395         fw_version = resp->hwrm_intf_maj << 16;
396         fw_version |= resp->hwrm_intf_min << 8;
397         fw_version |= resp->hwrm_intf_upd;
398
399         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
400                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
401                 rc = -EINVAL;
402                 goto error;
403         }
404
405         if (my_version != fw_version) {
406                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
407                 if (my_version < fw_version) {
408                         RTE_LOG(INFO, PMD,
409                                 "Firmware API version is newer than driver.\n");
410                         RTE_LOG(INFO, PMD,
411                                 "The driver may be missing features.\n");
412                 } else {
413                         RTE_LOG(INFO, PMD,
414                                 "Firmware API version is older than driver.\n");
415                         RTE_LOG(INFO, PMD,
416                                 "Not all driver features may be functional.\n");
417                 }
418         }
419
420         if (bp->max_req_len > resp->max_req_win_len) {
421                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
422                 rc = -EINVAL;
423         }
424         bp->max_req_len = resp->max_req_win_len;
425         max_resp_len = resp->max_resp_len;
426         if (bp->max_resp_len != max_resp_len) {
427                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
428                         bp->pdev->addr.domain, bp->pdev->addr.bus,
429                         bp->pdev->addr.devid, bp->pdev->addr.function);
430
431                 rte_free(bp->hwrm_cmd_resp_addr);
432
433                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
434                 if (bp->hwrm_cmd_resp_addr == NULL) {
435                         rc = -ENOMEM;
436                         goto error;
437                 }
438                 bp->hwrm_cmd_resp_dma_addr =
439                         rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
440                 bp->max_resp_len = max_resp_len;
441         }
442
443 error:
444         rte_spinlock_unlock(&bp->hwrm_lock);
445         return rc;
446 }
447
448 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
449 {
450         int rc;
451         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
452         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
453
454         if (!(bp->flags & BNXT_FLAG_REGISTERED))
455                 return 0;
456
457         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
458         req.flags = flags;
459
460         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
461
462         HWRM_CHECK_RESULT;
463
464         bp->flags &= ~BNXT_FLAG_REGISTERED;
465
466         return rc;
467 }
468
469 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
470 {
471         int rc = 0;
472         struct hwrm_port_phy_cfg_input req = {.req_type = 0};
473         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
474
475         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
476
477         req.flags = conf->phy_flags;
478         if (conf->link_up) {
479                 req.force_link_speed = conf->link_speed;
480                 /*
481                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
482                  * any auto mode, even "none".
483                  */
484                 if (req.auto_mode == HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE) {
485                         req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
486                 } else {
487                         req.auto_mode = conf->auto_mode;
488                         req.enables |=
489                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
490                         req.auto_link_speed_mask = conf->auto_link_speed_mask;
491                         req.enables |=
492                            HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
493                         req.auto_link_speed = conf->auto_link_speed;
494                         req.enables |=
495                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
496                 }
497                 req.auto_duplex = conf->duplex;
498                 req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
499                 req.auto_pause = conf->auto_pause;
500                 /* Set force_pause if there is no auto or if there is a force */
501                 if (req.auto_pause)
502                         req.enables |=
503                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
504                 else
505                         req.enables |=
506                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
507                 req.force_pause = conf->force_pause;
508                 if (req.force_pause)
509                         req.enables |=
510                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
511         } else {
512                 req.flags &= ~HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
513                 req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN;
514                 req.force_link_speed = 0;
515         }
516
517         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
518
519         HWRM_CHECK_RESULT;
520
521         return rc;
522 }
523
524 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
525                                    struct bnxt_link_info *link_info)
526 {
527         int rc = 0;
528         struct hwrm_port_phy_qcfg_input req = {.req_type = 0};
529         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
530
531         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
532
533         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
534
535         HWRM_CHECK_RESULT;
536
537         link_info->phy_link_status = resp->link;
538         if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) {
539                 link_info->link_up = 1;
540                 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
541         } else {
542                 link_info->link_up = 0;
543                 link_info->link_speed = 0;
544         }
545         link_info->duplex = resp->duplex;
546         link_info->pause = resp->pause;
547         link_info->auto_pause = resp->auto_pause;
548         link_info->force_pause = resp->force_pause;
549         link_info->auto_mode = resp->auto_mode;
550
551         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
552         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
553         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
554         link_info->phy_ver[0] = resp->phy_maj;
555         link_info->phy_ver[1] = resp->phy_min;
556         link_info->phy_ver[2] = resp->phy_bld;
557
558         return rc;
559 }
560
561 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
562 {
563         int rc = 0;
564         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
565         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
566
567         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
568
569         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
570
571         HWRM_CHECK_RESULT;
572
573 #define GET_QUEUE_INFO(x) \
574         bp->cos_queue[x].id = resp->queue_id##x; \
575         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
576
577         GET_QUEUE_INFO(0);
578         GET_QUEUE_INFO(1);
579         GET_QUEUE_INFO(2);
580         GET_QUEUE_INFO(3);
581         GET_QUEUE_INFO(4);
582         GET_QUEUE_INFO(5);
583         GET_QUEUE_INFO(6);
584         GET_QUEUE_INFO(7);
585
586         return rc;
587 }
588
589 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
590                          struct bnxt_ring *ring,
591                          uint32_t ring_type, uint32_t map_index,
592                          uint32_t stats_ctx_id)
593 {
594         int rc = 0;
595         struct hwrm_ring_alloc_input req = {.req_type = 0 };
596         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
597
598         HWRM_PREP(req, RING_ALLOC, -1, resp);
599
600         req.enables = rte_cpu_to_le_32(0);
601
602         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
603         req.fbo = rte_cpu_to_le_32(0);
604         /* Association of ring index with doorbell index */
605         req.logical_id = rte_cpu_to_le_16(map_index);
606
607         switch (ring_type) {
608         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
609                 req.queue_id = bp->cos_queue[0].id;
610         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
611                 req.ring_type = ring_type;
612                 req.cmpl_ring_id =
613                     rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
614                 req.length = rte_cpu_to_le_32(ring->ring_size);
615                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
616                 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
617                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
618                 break;
619         case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
620                 req.ring_type = ring_type;
621                 /*
622                  * TODO: Some HWRM versions crash with
623                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
624                  */
625                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
626                 req.length = rte_cpu_to_le_32(ring->ring_size);
627                 break;
628         default:
629                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
630                         ring_type);
631                 return -1;
632         }
633
634         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
635
636         if (rc || resp->error_code) {
637                 if (rc == 0 && resp->error_code)
638                         rc = rte_le_to_cpu_16(resp->error_code);
639                 switch (ring_type) {
640                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
641                         RTE_LOG(ERR, PMD,
642                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
643                         return rc;
644                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
645                         RTE_LOG(ERR, PMD,
646                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
647                         return rc;
648                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
649                         RTE_LOG(ERR, PMD,
650                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
651                         return rc;
652                 default:
653                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
654                         return rc;
655                 }
656         }
657
658         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
659         return rc;
660 }
661
662 int bnxt_hwrm_ring_free(struct bnxt *bp,
663                         struct bnxt_ring *ring, uint32_t ring_type)
664 {
665         int rc;
666         struct hwrm_ring_free_input req = {.req_type = 0 };
667         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
668
669         HWRM_PREP(req, RING_FREE, -1, resp);
670
671         req.ring_type = ring_type;
672         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
673
674         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
675
676         if (rc || resp->error_code) {
677                 if (rc == 0 && resp->error_code)
678                         rc = rte_le_to_cpu_16(resp->error_code);
679
680                 switch (ring_type) {
681                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
682                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
683                                 rc);
684                         return rc;
685                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
686                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
687                                 rc);
688                         return rc;
689                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
690                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
691                                 rc);
692                         return rc;
693                 default:
694                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
695                         return rc;
696                 }
697         }
698         return 0;
699 }
700
701 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
702 {
703         int rc = 0;
704         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
705         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
706
707         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
708
709         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
710         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
711         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
712         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
713
714         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
715
716         HWRM_CHECK_RESULT;
717
718         bp->grp_info[idx].fw_grp_id =
719             rte_le_to_cpu_16(resp->ring_group_id);
720
721         return rc;
722 }
723
724 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
725 {
726         int rc;
727         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
728         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
729
730         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
731
732         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
733
734         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
735
736         HWRM_CHECK_RESULT;
737
738         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
739         return rc;
740 }
741
742 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
743 {
744         int rc = 0;
745         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
746         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
747
748         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
749
750         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
751                 return rc;
752
753         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
754         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
755
756         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
757
758         HWRM_CHECK_RESULT;
759
760         return rc;
761 }
762
763 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
764                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
765 {
766         int rc;
767         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
768         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
769
770         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
771
772         req.update_period_ms = rte_cpu_to_le_32(1000);
773
774         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
775         req.stats_dma_addr =
776             rte_cpu_to_le_64(cpr->hw_stats_map);
777
778         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
779
780         HWRM_CHECK_RESULT;
781
782         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
783         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
784
785         return rc;
786 }
787
788 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
789                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
790 {
791         int rc;
792         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
793         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
794
795         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
796
797         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
798         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
799
800         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
801
802         HWRM_CHECK_RESULT;
803
804         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
805         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
806
807         return rc;
808 }
809
810 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
811 {
812         int rc = 0, i, j;
813         struct hwrm_vnic_alloc_input req = {.req_type = 0 };
814         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
815
816         /* map ring groups to this vnic */
817         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
818                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
819                         RTE_LOG(ERR, PMD,
820                                 "Not enough ring groups avail:%x req:%x\n", j,
821                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
822                         break;
823                 }
824                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
825         }
826
827         vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
828         vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
829
830         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
831
832         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
833
834         HWRM_CHECK_RESULT;
835
836         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
837         return rc;
838 }
839
840 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
841 {
842         int rc = 0;
843         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
844         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
845
846         HWRM_PREP(req, VNIC_CFG, -1, resp);
847
848         /* Only RSS support for now TBD: COS & LB */
849         req.enables =
850             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
851                              HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
852                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
853         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
854         req.dflt_ring_grp =
855                 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
856         req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
857         req.cos_rule = rte_cpu_to_le_16(0xffff);
858         req.lb_rule = rte_cpu_to_le_16(0xffff);
859         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
860                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
861         if (vnic->func_default)
862                 req.flags = 1;
863         if (vnic->vlan_strip)
864                 req.flags |=
865                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
866
867         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
868
869         HWRM_CHECK_RESULT;
870
871         return rc;
872 }
873
874 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
875 {
876         int rc = 0;
877         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
878         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
879                                                 bp->hwrm_cmd_resp_addr;
880
881         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
882
883         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
884
885         HWRM_CHECK_RESULT;
886
887         vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
888
889         return rc;
890 }
891
892 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
893 {
894         int rc = 0;
895         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
896         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
897                                                 bp->hwrm_cmd_resp_addr;
898
899         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
900
901         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
902
903         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
904
905         HWRM_CHECK_RESULT;
906
907         vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
908
909         return rc;
910 }
911
912 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
913 {
914         int rc = 0;
915         struct hwrm_vnic_free_input req = {.req_type = 0 };
916         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
917
918         if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
919                 return rc;
920
921         HWRM_PREP(req, VNIC_FREE, -1, resp);
922
923         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
924
925         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
926
927         HWRM_CHECK_RESULT;
928
929         vnic->fw_vnic_id = INVALID_HW_RING_ID;
930         return rc;
931 }
932
933 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
934                            struct bnxt_vnic_info *vnic)
935 {
936         int rc = 0;
937         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
938         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
939
940         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
941
942         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
943
944         req.ring_grp_tbl_addr =
945             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
946         req.hash_key_tbl_addr =
947             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
948         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
949
950         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
951
952         HWRM_CHECK_RESULT;
953
954         return rc;
955 }
956
957 /*
958  * HWRM utility functions
959  */
960
961 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
962 {
963         unsigned int i;
964         int rc = 0;
965
966         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
967                 struct bnxt_tx_queue *txq;
968                 struct bnxt_rx_queue *rxq;
969                 struct bnxt_cp_ring_info *cpr;
970
971                 if (i >= bp->rx_cp_nr_rings) {
972                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
973                         cpr = txq->cp_ring;
974                 } else {
975                         rxq = bp->rx_queues[i];
976                         cpr = rxq->cp_ring;
977                 }
978
979                 rc = bnxt_hwrm_stat_clear(bp, cpr);
980                 if (rc)
981                         return rc;
982         }
983         return 0;
984 }
985
986 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
987 {
988         int rc;
989         unsigned int i;
990         struct bnxt_cp_ring_info *cpr;
991
992         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
993                 unsigned int idx = i + 1;
994
995                 if (i >= bp->rx_cp_nr_rings)
996                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
997                 else
998                         cpr = bp->rx_queues[i]->cp_ring;
999                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1000                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1001                         if (rc)
1002                                 return rc;
1003                 }
1004         }
1005         return 0;
1006 }
1007
1008 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1009 {
1010         unsigned int i;
1011         int rc = 0;
1012
1013         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1014                 struct bnxt_tx_queue *txq;
1015                 struct bnxt_rx_queue *rxq;
1016                 struct bnxt_cp_ring_info *cpr;
1017                 unsigned int idx = i + 1;
1018
1019                 if (i >= bp->rx_cp_nr_rings) {
1020                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1021                         cpr = txq->cp_ring;
1022                 } else {
1023                         rxq = bp->rx_queues[i];
1024                         cpr = rxq->cp_ring;
1025                 }
1026
1027                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1028
1029                 if (rc)
1030                         return rc;
1031         }
1032         return rc;
1033 }
1034
1035 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1036 {
1037         uint16_t i;
1038         uint32_t rc = 0;
1039
1040         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1041                 unsigned int idx = i + 1;
1042
1043                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1044                         RTE_LOG(ERR, PMD,
1045                                 "Attempt to free invalid ring group %d\n",
1046                                 idx);
1047                         continue;
1048                 }
1049
1050                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1051
1052                 if (rc)
1053                         return rc;
1054         }
1055         return rc;
1056 }
1057
1058 static void bnxt_free_cp_ring(struct bnxt *bp,
1059                               struct bnxt_cp_ring_info *cpr, unsigned int idx)
1060 {
1061         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1062
1063         bnxt_hwrm_ring_free(bp, cp_ring,
1064                         HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1065         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1066         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1067         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1068                         sizeof(*cpr->cp_desc_ring));
1069         cpr->cp_raw_cons = 0;
1070 }
1071
1072 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1073 {
1074         unsigned int i;
1075         int rc = 0;
1076
1077         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1078                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1079                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1080                 struct bnxt_ring *ring = txr->tx_ring_struct;
1081                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1082                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1083
1084                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1085                         bnxt_hwrm_ring_free(bp, ring,
1086                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1087                         ring->fw_ring_id = INVALID_HW_RING_ID;
1088                         memset(txr->tx_desc_ring, 0,
1089                                         txr->tx_ring_struct->ring_size *
1090                                         sizeof(*txr->tx_desc_ring));
1091                         memset(txr->tx_buf_ring, 0,
1092                                         txr->tx_ring_struct->ring_size *
1093                                         sizeof(*txr->tx_buf_ring));
1094                         txr->tx_prod = 0;
1095                         txr->tx_cons = 0;
1096                 }
1097                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1098                         bnxt_free_cp_ring(bp, cpr, idx);
1099         }
1100
1101         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1102                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1103                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1104                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1105                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1106                 unsigned int idx = i + 1;
1107
1108                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1109                         bnxt_hwrm_ring_free(bp, ring,
1110                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1111                         ring->fw_ring_id = INVALID_HW_RING_ID;
1112                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1113                         memset(rxr->rx_desc_ring, 0,
1114                                         rxr->rx_ring_struct->ring_size *
1115                                         sizeof(*rxr->rx_desc_ring));
1116                         memset(rxr->rx_buf_ring, 0,
1117                                         rxr->rx_ring_struct->ring_size *
1118                                         sizeof(*rxr->rx_buf_ring));
1119                         rxr->rx_prod = 0;
1120                 }
1121                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1122                         bnxt_free_cp_ring(bp, cpr, idx);
1123         }
1124
1125         /* Default completion ring */
1126         {
1127                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1128
1129                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1130                         bnxt_free_cp_ring(bp, cpr, 0);
1131         }
1132
1133         return rc;
1134 }
1135
1136 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1137 {
1138         uint16_t i;
1139         uint32_t rc = 0;
1140
1141         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1142                 unsigned int idx = i + 1;
1143
1144                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1145                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1146                         continue;
1147
1148                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1149
1150                 if (rc)
1151                         return rc;
1152         }
1153         return rc;
1154 }
1155
1156 void bnxt_free_hwrm_resources(struct bnxt *bp)
1157 {
1158         /* Release memzone */
1159         rte_free(bp->hwrm_cmd_resp_addr);
1160         bp->hwrm_cmd_resp_addr = NULL;
1161         bp->hwrm_cmd_resp_dma_addr = 0;
1162 }
1163
1164 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1165 {
1166         struct rte_pci_device *pdev = bp->pdev;
1167         char type[RTE_MEMZONE_NAMESIZE];
1168
1169         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1170                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1171         bp->max_req_len = HWRM_MAX_REQ_LEN;
1172         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1173         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1174         if (bp->hwrm_cmd_resp_addr == NULL)
1175                 return -ENOMEM;
1176         bp->hwrm_cmd_resp_dma_addr =
1177                 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1178         rte_spinlock_init(&bp->hwrm_lock);
1179
1180         return 0;
1181 }
1182
1183 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1184 {
1185         struct bnxt_filter_info *filter;
1186         int rc = 0;
1187
1188         STAILQ_FOREACH(filter, &vnic->filter, next) {
1189                 rc = bnxt_hwrm_clear_filter(bp, filter);
1190                 if (rc)
1191                         break;
1192         }
1193         return rc;
1194 }
1195
1196 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1197 {
1198         struct bnxt_filter_info *filter;
1199         int rc = 0;
1200
1201         STAILQ_FOREACH(filter, &vnic->filter, next) {
1202                 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1203                 if (rc)
1204                         break;
1205         }
1206         return rc;
1207 }
1208
1209 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1210 {
1211         struct bnxt_vnic_info *vnic;
1212         unsigned int i;
1213
1214         if (bp->vnic_info == NULL)
1215                 return;
1216
1217         vnic = &bp->vnic_info[0];
1218         bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1219
1220         /* VNIC resources */
1221         for (i = 0; i < bp->nr_vnics; i++) {
1222                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1223
1224                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1225
1226                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1227                 bnxt_hwrm_vnic_free(bp, vnic);
1228         }
1229         /* Ring resources */
1230         bnxt_free_all_hwrm_rings(bp);
1231         bnxt_free_all_hwrm_ring_grps(bp);
1232         bnxt_free_all_hwrm_stat_ctxs(bp);
1233 }
1234
1235 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1236 {
1237         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1238
1239         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1240                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1241
1242         switch (conf_link_speed) {
1243         case ETH_LINK_SPEED_10M_HD:
1244         case ETH_LINK_SPEED_100M_HD:
1245                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1246         }
1247         return hw_link_duplex;
1248 }
1249
1250 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1251 {
1252         uint16_t eth_link_speed = 0;
1253
1254         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1255                 return ETH_LINK_SPEED_AUTONEG;
1256
1257         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1258         case ETH_LINK_SPEED_100M:
1259         case ETH_LINK_SPEED_100M_HD:
1260                 eth_link_speed =
1261                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB;
1262                 break;
1263         case ETH_LINK_SPEED_1G:
1264                 eth_link_speed =
1265                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1266                 break;
1267         case ETH_LINK_SPEED_2_5G:
1268                 eth_link_speed =
1269                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1270                 break;
1271         case ETH_LINK_SPEED_10G:
1272                 eth_link_speed =
1273                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1274                 break;
1275         case ETH_LINK_SPEED_20G:
1276                 eth_link_speed =
1277                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1278                 break;
1279         case ETH_LINK_SPEED_25G:
1280                 eth_link_speed =
1281                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1282                 break;
1283         case ETH_LINK_SPEED_40G:
1284                 eth_link_speed =
1285                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1286                 break;
1287         case ETH_LINK_SPEED_50G:
1288                 eth_link_speed =
1289                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1290                 break;
1291         default:
1292                 RTE_LOG(ERR, PMD,
1293                         "Unsupported link speed %d; default to AUTO\n",
1294                         conf_link_speed);
1295                 break;
1296         }
1297         return eth_link_speed;
1298 }
1299
1300 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1301                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1302                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1303                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1304
1305 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1306 {
1307         uint32_t one_speed;
1308
1309         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1310                 return 0;
1311
1312         if (link_speed & ETH_LINK_SPEED_FIXED) {
1313                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1314
1315                 if (one_speed & (one_speed - 1)) {
1316                         RTE_LOG(ERR, PMD,
1317                                 "Invalid advertised speeds (%u) for port %u\n",
1318                                 link_speed, port_id);
1319                         return -EINVAL;
1320                 }
1321                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1322                         RTE_LOG(ERR, PMD,
1323                                 "Unsupported advertised speed (%u) for port %u\n",
1324                                 link_speed, port_id);
1325                         return -EINVAL;
1326                 }
1327         } else {
1328                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1329                         RTE_LOG(ERR, PMD,
1330                                 "Unsupported advertised speeds (%u) for port %u\n",
1331                                 link_speed, port_id);
1332                         return -EINVAL;
1333                 }
1334         }
1335         return 0;
1336 }
1337
1338 static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed)
1339 {
1340         uint16_t ret = 0;
1341
1342         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1343                 link_speed = BNXT_SUPPORTED_SPEEDS;
1344
1345         if (link_speed & ETH_LINK_SPEED_100M)
1346                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1347         if (link_speed & ETH_LINK_SPEED_100M_HD)
1348                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1349         if (link_speed & ETH_LINK_SPEED_1G)
1350                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1351         if (link_speed & ETH_LINK_SPEED_2_5G)
1352                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1353         if (link_speed & ETH_LINK_SPEED_10G)
1354                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1355         if (link_speed & ETH_LINK_SPEED_20G)
1356                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1357         if (link_speed & ETH_LINK_SPEED_25G)
1358                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1359         if (link_speed & ETH_LINK_SPEED_40G)
1360                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1361         if (link_speed & ETH_LINK_SPEED_50G)
1362                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1363         return ret;
1364 }
1365
1366 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1367 {
1368         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1369
1370         switch (hw_link_speed) {
1371         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1372                 eth_link_speed = ETH_SPEED_NUM_100M;
1373                 break;
1374         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1375                 eth_link_speed = ETH_SPEED_NUM_1G;
1376                 break;
1377         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1378                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1379                 break;
1380         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1381                 eth_link_speed = ETH_SPEED_NUM_10G;
1382                 break;
1383         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1384                 eth_link_speed = ETH_SPEED_NUM_20G;
1385                 break;
1386         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1387                 eth_link_speed = ETH_SPEED_NUM_25G;
1388                 break;
1389         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1390                 eth_link_speed = ETH_SPEED_NUM_40G;
1391                 break;
1392         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1393                 eth_link_speed = ETH_SPEED_NUM_50G;
1394                 break;
1395         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1396         default:
1397                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1398                         hw_link_speed);
1399                 break;
1400         }
1401         return eth_link_speed;
1402 }
1403
1404 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1405 {
1406         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1407
1408         switch (hw_link_duplex) {
1409         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1410         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1411                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1412                 break;
1413         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1414                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1415                 break;
1416         default:
1417                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1418                         hw_link_duplex);
1419                 break;
1420         }
1421         return eth_link_duplex;
1422 }
1423
1424 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1425 {
1426         int rc = 0;
1427         struct bnxt_link_info *link_info = &bp->link_info;
1428
1429         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1430         if (rc) {
1431                 RTE_LOG(ERR, PMD,
1432                         "Get link config failed with rc %d\n", rc);
1433                 goto exit;
1434         }
1435         if (link_info->link_up)
1436                 link->link_speed =
1437                         bnxt_parse_hw_link_speed(link_info->link_speed);
1438         else
1439                 link->link_speed = ETH_LINK_SPEED_10M;
1440         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1441         link->link_status = link_info->link_up;
1442         link->link_autoneg = link_info->auto_mode ==
1443                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1444                 ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG;
1445 exit:
1446         return rc;
1447 }
1448
1449 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1450 {
1451         int rc = 0;
1452         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1453         struct bnxt_link_info link_req;
1454         uint16_t speed;
1455
1456         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1457                         bp->eth_dev->data->port_id);
1458         if (rc)
1459                 goto error;
1460
1461         memset(&link_req, 0, sizeof(link_req));
1462         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1463         link_req.link_up = link_up;
1464         if (speed == 0) {
1465                 link_req.phy_flags =
1466                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1467                 link_req.auto_mode =
1468                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW;
1469                 link_req.auto_link_speed_mask =
1470                         bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds);
1471                 link_req.auto_link_speed =
1472                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB;
1473         } else {
1474                 link_req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
1475                 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE |
1476                         HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1477                 link_req.link_speed = speed;
1478         }
1479         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1480         link_req.auto_pause = bp->link_info.auto_pause;
1481         link_req.force_pause = bp->link_info.force_pause;
1482
1483         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1484         if (rc) {
1485                 RTE_LOG(ERR, PMD,
1486                         "Set link config failed with rc %d\n", rc);
1487         }
1488
1489 error:
1490         return rc;
1491 }