93910d812e3695f14ddab4055dbe8ab12b486e7a
[deb_dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
40
41 #include "bnxt.h"
42 #include "bnxt_cpr.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_rxq.h"
46 #include "bnxt_rxr.h"
47 #include "bnxt_ring.h"
48 #include "bnxt_txq.h"
49 #include "bnxt_txr.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
52
53 #define HWRM_CMD_TIMEOUT                2000
54
55 /*
56  * HWRM Functions (sent to HWRM)
57  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
58  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
59  * command was failed by the ChiMP.
60  */
61
62 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
63                                         uint32_t msg_len)
64 {
65         unsigned int i;
66         struct input *req = msg;
67         struct output *resp = bp->hwrm_cmd_resp_addr;
68         uint32_t *data = msg;
69         uint8_t *bar;
70         uint8_t *valid;
71
72         /* Write request msg to hwrm channel */
73         for (i = 0; i < msg_len; i += 4) {
74                 bar = (uint8_t *)bp->bar0 + i;
75                 *(volatile uint32_t *)bar = *data;
76                 data++;
77         }
78
79         /* Zero the rest of the request space */
80         for (; i < bp->max_req_len; i += 4) {
81                 bar = (uint8_t *)bp->bar0 + i;
82                 *(volatile uint32_t *)bar = 0;
83         }
84
85         /* Ring channel doorbell */
86         bar = (uint8_t *)bp->bar0 + 0x100;
87         *(volatile uint32_t *)bar = 1;
88
89         /* Poll for the valid bit */
90         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
91                 /* Sanity check on the resp->resp_len */
92                 rte_rmb();
93                 if (resp->resp_len && resp->resp_len <=
94                                 bp->max_resp_len) {
95                         /* Last byte of resp contains the valid key */
96                         valid = (uint8_t *)resp + resp->resp_len - 1;
97                         if (*valid == HWRM_RESP_VALID_KEY)
98                                 break;
99                 }
100                 rte_delay_us(600);
101         }
102
103         if (i >= HWRM_CMD_TIMEOUT) {
104                 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
105                         req->req_type);
106                 goto err_ret;
107         }
108         return 0;
109
110 err_ret:
111         return -1;
112 }
113
114 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
115 {
116         int rc;
117
118         rte_spinlock_lock(&bp->hwrm_lock);
119         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
120         rte_spinlock_unlock(&bp->hwrm_lock);
121         return rc;
122 }
123
124 #define HWRM_PREP(req, type, cr, resp) \
125         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
126         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
127         req.cmpl_ring = rte_cpu_to_le_16(cr); \
128         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
129         req.target_id = rte_cpu_to_le_16(0xffff); \
130         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
131
132 #define HWRM_CHECK_RESULT \
133         { \
134                 if (rc) { \
135                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
136                                 __func__, rc); \
137                         return rc; \
138                 } \
139                 if (resp->error_code) { \
140                         rc = rte_le_to_cpu_16(resp->error_code); \
141                         RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
142                         return rc; \
143                 } \
144         }
145
146 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
147 {
148         int rc = 0;
149         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
150         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
151
152         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
153         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
154         req.mask = 0;
155
156         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
157
158         HWRM_CHECK_RESULT;
159
160         return rc;
161 }
162
163 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
164 {
165         int rc = 0;
166         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
167         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
168         uint32_t mask = 0;
169
170         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
171         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
172
173         /* FIXME add multicast flag, when multicast adding options is supported
174          * by ethtool.
175          */
176         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
177                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
178         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
179                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
180         req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
181                                     mask);
182
183         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
184
185         HWRM_CHECK_RESULT;
186
187         return rc;
188 }
189
190 int bnxt_hwrm_clear_filter(struct bnxt *bp,
191                            struct bnxt_filter_info *filter)
192 {
193         int rc = 0;
194         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
195         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
196
197         if (filter->fw_l2_filter_id == UINT64_MAX)
198                 return 0;
199
200         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
201
202         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
203
204         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
205
206         HWRM_CHECK_RESULT;
207
208         filter->fw_l2_filter_id = -1;
209
210         return 0;
211 }
212
213 int bnxt_hwrm_set_filter(struct bnxt *bp,
214                          struct bnxt_vnic_info *vnic,
215                          struct bnxt_filter_info *filter)
216 {
217         int rc = 0;
218         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
219         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
220         uint32_t enables = 0;
221
222         if (filter->fw_l2_filter_id != UINT64_MAX)
223                 bnxt_hwrm_clear_filter(bp, filter);
224
225         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
226
227         req.flags = rte_cpu_to_le_32(filter->flags);
228
229         enables = filter->enables |
230               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
231         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
232
233         if (enables &
234             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
235                 memcpy(req.l2_addr, filter->l2_addr,
236                        ETHER_ADDR_LEN);
237         if (enables &
238             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
239                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
240                        ETHER_ADDR_LEN);
241         if (enables &
242             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
243                 req.l2_ovlan = filter->l2_ovlan;
244         if (enables &
245             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
246                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
247
248         req.enables = rte_cpu_to_le_32(enables);
249
250         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
251
252         HWRM_CHECK_RESULT;
253
254         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
255
256         return rc;
257 }
258
259 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
260 {
261         int rc;
262         struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
263         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
264
265         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
266
267         memcpy(req.encap_request, fwd_cmd,
268                sizeof(req.encap_request));
269
270         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
271
272         HWRM_CHECK_RESULT;
273
274         return rc;
275 }
276
277 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
278 {
279         int rc = 0;
280         struct hwrm_func_qcaps_input req = {.req_type = 0 };
281         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
282
283         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
284
285         req.fid = rte_cpu_to_le_16(0xffff);
286
287         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
288
289         HWRM_CHECK_RESULT;
290
291         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
292         if (BNXT_PF(bp)) {
293                 struct bnxt_pf_info *pf = &bp->pf;
294
295                 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
296                 pf->port_id = resp->port_id;
297                 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
298                 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
299                 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
300                 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
301                 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
302                 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
303                 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
304                 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
305                 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
306         } else {
307                 struct bnxt_vf_info *vf = &bp->vf;
308
309                 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
310                 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
311                 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
312                 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
313                 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
314                 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
315                 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
316                 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
317         }
318
319         return rc;
320 }
321
322 int bnxt_hwrm_func_reset(struct bnxt *bp)
323 {
324         int rc = 0;
325         struct hwrm_func_reset_input req = {.req_type = 0 };
326         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
327
328         HWRM_PREP(req, FUNC_RESET, -1, resp);
329
330         req.enables = rte_cpu_to_le_32(0);
331
332         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
333
334         HWRM_CHECK_RESULT;
335
336         return rc;
337 }
338
339 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
340                                    uint32_t *vf_req_fwd)
341 {
342         int rc;
343         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
344         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
345
346         if (bp->flags & BNXT_FLAG_REGISTERED)
347                 return 0;
348
349         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
350         req.flags = flags;
351         req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
352                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
353         req.ver_maj = RTE_VER_YEAR;
354         req.ver_min = RTE_VER_MONTH;
355         req.ver_upd = RTE_VER_MINOR;
356
357         memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
358
359         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
360
361         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
362
363         HWRM_CHECK_RESULT;
364
365         bp->flags |= BNXT_FLAG_REGISTERED;
366
367         return rc;
368 }
369
370 int bnxt_hwrm_ver_get(struct bnxt *bp)
371 {
372         int rc = 0;
373         struct hwrm_ver_get_input req = {.req_type = 0 };
374         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
375         uint32_t my_version;
376         uint32_t fw_version;
377         uint16_t max_resp_len;
378         char type[RTE_MEMZONE_NAMESIZE];
379
380         HWRM_PREP(req, VER_GET, -1, resp);
381
382         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
383         req.hwrm_intf_min = HWRM_VERSION_MINOR;
384         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
385
386         /*
387          * Hold the lock since we may be adjusting the response pointers.
388          */
389         rte_spinlock_lock(&bp->hwrm_lock);
390         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
391
392         HWRM_CHECK_RESULT;
393
394         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
395                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
396                 resp->hwrm_intf_upd,
397                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
398         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
399                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
400
401         my_version = HWRM_VERSION_MAJOR << 16;
402         my_version |= HWRM_VERSION_MINOR << 8;
403         my_version |= HWRM_VERSION_UPDATE;
404
405         fw_version = resp->hwrm_intf_maj << 16;
406         fw_version |= resp->hwrm_intf_min << 8;
407         fw_version |= resp->hwrm_intf_upd;
408
409         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
410                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
411                 rc = -EINVAL;
412                 goto error;
413         }
414
415         if (my_version != fw_version) {
416                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
417                 if (my_version < fw_version) {
418                         RTE_LOG(INFO, PMD,
419                                 "Firmware API version is newer than driver.\n");
420                         RTE_LOG(INFO, PMD,
421                                 "The driver may be missing features.\n");
422                 } else {
423                         RTE_LOG(INFO, PMD,
424                                 "Firmware API version is older than driver.\n");
425                         RTE_LOG(INFO, PMD,
426                                 "Not all driver features may be functional.\n");
427                 }
428         }
429
430         if (bp->max_req_len > resp->max_req_win_len) {
431                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
432                 rc = -EINVAL;
433         }
434         bp->max_req_len = resp->max_req_win_len;
435         max_resp_len = resp->max_resp_len;
436         if (bp->max_resp_len != max_resp_len) {
437                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
438                         bp->pdev->addr.domain, bp->pdev->addr.bus,
439                         bp->pdev->addr.devid, bp->pdev->addr.function);
440
441                 rte_free(bp->hwrm_cmd_resp_addr);
442
443                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
444                 if (bp->hwrm_cmd_resp_addr == NULL) {
445                         rc = -ENOMEM;
446                         goto error;
447                 }
448                 bp->hwrm_cmd_resp_dma_addr =
449                         rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
450                 bp->max_resp_len = max_resp_len;
451         }
452
453 error:
454         rte_spinlock_unlock(&bp->hwrm_lock);
455         return rc;
456 }
457
458 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
459 {
460         int rc;
461         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
462         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
463
464         if (!(bp->flags & BNXT_FLAG_REGISTERED))
465                 return 0;
466
467         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
468         req.flags = flags;
469
470         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
471
472         HWRM_CHECK_RESULT;
473
474         bp->flags &= ~BNXT_FLAG_REGISTERED;
475
476         return rc;
477 }
478
479 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
480 {
481         int rc = 0;
482         struct hwrm_port_phy_cfg_input req = {0};
483         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
484         uint32_t enables = 0;
485         uint32_t link_speed_mask =
486                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
487
488         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
489
490         if (conf->link_up) {
491                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
492                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
493                 /*
494                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
495                  * any auto mode, even "none".
496                  */
497                 if (!conf->link_speed) {
498                         req.auto_mode = conf->auto_mode;
499                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
500                         if (conf->auto_mode ==
501                             HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
502                                 req.auto_link_speed_mask =
503                                         conf->auto_link_speed_mask;
504                                 enables |= link_speed_mask;
505                         }
506                         if (bp->link_info.auto_link_speed) {
507                                 req.auto_link_speed =
508                                         bp->link_info.auto_link_speed;
509                                 enables |=
510                                 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
511                         }
512                 }
513                 req.auto_duplex = conf->duplex;
514                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
515                 req.auto_pause = conf->auto_pause;
516                 req.force_pause = conf->force_pause;
517                 /* Set force_pause if there is no auto or if there is a force */
518                 if (req.auto_pause && !req.force_pause)
519                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
520                 else
521                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
522
523                 req.enables = rte_cpu_to_le_32(enables);
524         } else {
525                 req.flags =
526                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
527                 RTE_LOG(INFO, PMD, "Force Link Down\n");
528         }
529
530         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
531
532         HWRM_CHECK_RESULT;
533
534         return rc;
535 }
536
537 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
538                                    struct bnxt_link_info *link_info)
539 {
540         int rc = 0;
541         struct hwrm_port_phy_qcfg_input req = {0};
542         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
543
544         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
545
546         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
547
548         HWRM_CHECK_RESULT;
549
550         link_info->phy_link_status = resp->link;
551         link_info->link_up =
552                 (link_info->phy_link_status ==
553                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
554         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
555         link_info->duplex = resp->duplex;
556         link_info->pause = resp->pause;
557         link_info->auto_pause = resp->auto_pause;
558         link_info->force_pause = resp->force_pause;
559         link_info->auto_mode = resp->auto_mode;
560
561         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
562         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
563         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
564         link_info->phy_ver[0] = resp->phy_maj;
565         link_info->phy_ver[1] = resp->phy_min;
566         link_info->phy_ver[2] = resp->phy_bld;
567
568         return rc;
569 }
570
571 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
572 {
573         int rc = 0;
574         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
575         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
576
577         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
578
579         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
580
581         HWRM_CHECK_RESULT;
582
583 #define GET_QUEUE_INFO(x) \
584         bp->cos_queue[x].id = resp->queue_id##x; \
585         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
586
587         GET_QUEUE_INFO(0);
588         GET_QUEUE_INFO(1);
589         GET_QUEUE_INFO(2);
590         GET_QUEUE_INFO(3);
591         GET_QUEUE_INFO(4);
592         GET_QUEUE_INFO(5);
593         GET_QUEUE_INFO(6);
594         GET_QUEUE_INFO(7);
595
596         return rc;
597 }
598
599 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
600                          struct bnxt_ring *ring,
601                          uint32_t ring_type, uint32_t map_index,
602                          uint32_t stats_ctx_id)
603 {
604         int rc = 0;
605         struct hwrm_ring_alloc_input req = {.req_type = 0 };
606         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
607
608         HWRM_PREP(req, RING_ALLOC, -1, resp);
609
610         req.enables = rte_cpu_to_le_32(0);
611
612         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
613         req.fbo = rte_cpu_to_le_32(0);
614         /* Association of ring index with doorbell index */
615         req.logical_id = rte_cpu_to_le_16(map_index);
616
617         switch (ring_type) {
618         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
619                 req.queue_id = bp->cos_queue[0].id;
620                 /* FALLTHROUGH */
621         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
622                 req.ring_type = ring_type;
623                 req.cmpl_ring_id =
624                     rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
625                 req.length = rte_cpu_to_le_32(ring->ring_size);
626                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
627                 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
628                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
629                 break;
630         case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
631                 req.ring_type = ring_type;
632                 /*
633                  * TODO: Some HWRM versions crash with
634                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
635                  */
636                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
637                 req.length = rte_cpu_to_le_32(ring->ring_size);
638                 break;
639         default:
640                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
641                         ring_type);
642                 return -1;
643         }
644
645         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
646
647         if (rc || resp->error_code) {
648                 if (rc == 0 && resp->error_code)
649                         rc = rte_le_to_cpu_16(resp->error_code);
650                 switch (ring_type) {
651                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
652                         RTE_LOG(ERR, PMD,
653                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
654                         return rc;
655                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
656                         RTE_LOG(ERR, PMD,
657                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
658                         return rc;
659                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
660                         RTE_LOG(ERR, PMD,
661                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
662                         return rc;
663                 default:
664                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
665                         return rc;
666                 }
667         }
668
669         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
670         return rc;
671 }
672
673 int bnxt_hwrm_ring_free(struct bnxt *bp,
674                         struct bnxt_ring *ring, uint32_t ring_type)
675 {
676         int rc;
677         struct hwrm_ring_free_input req = {.req_type = 0 };
678         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
679
680         HWRM_PREP(req, RING_FREE, -1, resp);
681
682         req.ring_type = ring_type;
683         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
684
685         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
686
687         if (rc || resp->error_code) {
688                 if (rc == 0 && resp->error_code)
689                         rc = rte_le_to_cpu_16(resp->error_code);
690
691                 switch (ring_type) {
692                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
693                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
694                                 rc);
695                         return rc;
696                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
697                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
698                                 rc);
699                         return rc;
700                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
701                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
702                                 rc);
703                         return rc;
704                 default:
705                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
706                         return rc;
707                 }
708         }
709         return 0;
710 }
711
712 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
713 {
714         int rc = 0;
715         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
716         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
717
718         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
719
720         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
721         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
722         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
723         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
724
725         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
726
727         HWRM_CHECK_RESULT;
728
729         bp->grp_info[idx].fw_grp_id =
730             rte_le_to_cpu_16(resp->ring_group_id);
731
732         return rc;
733 }
734
735 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
736 {
737         int rc;
738         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
739         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
740
741         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
742
743         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
744
745         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
746
747         HWRM_CHECK_RESULT;
748
749         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
750         return rc;
751 }
752
753 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
754 {
755         int rc = 0;
756         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
757         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
758
759         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
760
761         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
762                 return rc;
763
764         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
765         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
766
767         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
768
769         HWRM_CHECK_RESULT;
770
771         return rc;
772 }
773
774 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
775                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
776 {
777         int rc;
778         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
779         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
780
781         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
782
783         req.update_period_ms = rte_cpu_to_le_32(1000);
784
785         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
786         req.stats_dma_addr =
787             rte_cpu_to_le_64(cpr->hw_stats_map);
788
789         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
790
791         HWRM_CHECK_RESULT;
792
793         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
794         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
795
796         return rc;
797 }
798
799 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
800                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
801 {
802         int rc;
803         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
804         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
805
806         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
807
808         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
809         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
810
811         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
812
813         HWRM_CHECK_RESULT;
814
815         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
816         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
817
818         return rc;
819 }
820
821 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
822 {
823         int rc = 0, i, j;
824         struct hwrm_vnic_alloc_input req = { 0 };
825         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
826
827         /* map ring groups to this vnic */
828         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
829                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
830                         RTE_LOG(ERR, PMD,
831                                 "Not enough ring groups avail:%x req:%x\n", j,
832                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
833                         break;
834                 }
835                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
836         }
837
838         vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
839         vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
840
841         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
842
843         if (vnic->func_default)
844                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
845         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
846
847         HWRM_CHECK_RESULT;
848
849         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
850         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
851         return rc;
852 }
853
854 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
855 {
856         int rc = 0;
857         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
858         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
859
860         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
861                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
862                 return rc;
863         }
864
865         HWRM_PREP(req, VNIC_CFG, -1, resp);
866
867         /* Only RSS support for now TBD: COS & LB */
868         req.enables =
869             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
870                              HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
871                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
872         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
873         req.dflt_ring_grp =
874                 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
875         req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
876         req.cos_rule = rte_cpu_to_le_16(0xffff);
877         req.lb_rule = rte_cpu_to_le_16(0xffff);
878         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
879                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
880         if (vnic->func_default)
881                 req.flags = 1;
882         if (vnic->vlan_strip)
883                 req.flags |=
884                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
885
886         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
887
888         HWRM_CHECK_RESULT;
889
890         return rc;
891 }
892
893 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
894 {
895         int rc = 0;
896         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
897         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
898                                                 bp->hwrm_cmd_resp_addr;
899
900         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
901
902         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
903
904         HWRM_CHECK_RESULT;
905
906         vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
907         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
908
909         return rc;
910 }
911
912 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
913 {
914         int rc = 0;
915         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
916         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
917                                                 bp->hwrm_cmd_resp_addr;
918
919         if (vnic->fw_rss_cos_lb_ctx == 0xffff) {
920                 RTE_LOG(DEBUG, PMD,
921                         "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
922                 return rc;
923         }
924
925         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
926
927         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
928
929         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
930
931         HWRM_CHECK_RESULT;
932
933         vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
934
935         return rc;
936 }
937
938 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
939 {
940         int rc = 0;
941         struct hwrm_vnic_free_input req = {.req_type = 0 };
942         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
943
944         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
945                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
946                 return rc;
947         }
948
949         HWRM_PREP(req, VNIC_FREE, -1, resp);
950
951         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
952
953         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
954
955         HWRM_CHECK_RESULT;
956
957         vnic->fw_vnic_id = INVALID_HW_RING_ID;
958         return rc;
959 }
960
961 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
962                            struct bnxt_vnic_info *vnic)
963 {
964         int rc = 0;
965         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
966         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
967
968         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
969
970         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
971
972         req.ring_grp_tbl_addr =
973             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
974         req.hash_key_tbl_addr =
975             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
976         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
977
978         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
979
980         HWRM_CHECK_RESULT;
981
982         return rc;
983 }
984
985 /*
986  * HWRM utility functions
987  */
988
989 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
990 {
991         unsigned int i;
992         int rc = 0;
993
994         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
995                 struct bnxt_tx_queue *txq;
996                 struct bnxt_rx_queue *rxq;
997                 struct bnxt_cp_ring_info *cpr;
998
999                 if (i >= bp->rx_cp_nr_rings) {
1000                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1001                         cpr = txq->cp_ring;
1002                 } else {
1003                         rxq = bp->rx_queues[i];
1004                         cpr = rxq->cp_ring;
1005                 }
1006
1007                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1008                 if (rc)
1009                         return rc;
1010         }
1011         return 0;
1012 }
1013
1014 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1015 {
1016         int rc;
1017         unsigned int i;
1018         struct bnxt_cp_ring_info *cpr;
1019
1020         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1021                 unsigned int idx = i + 1;
1022
1023                 if (i >= bp->rx_cp_nr_rings)
1024                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1025                 else
1026                         cpr = bp->rx_queues[i]->cp_ring;
1027                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1028                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1029                         if (rc)
1030                                 return rc;
1031                 }
1032         }
1033         return 0;
1034 }
1035
1036 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1037 {
1038         unsigned int i;
1039         int rc = 0;
1040
1041         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1042                 struct bnxt_tx_queue *txq;
1043                 struct bnxt_rx_queue *rxq;
1044                 struct bnxt_cp_ring_info *cpr;
1045                 unsigned int idx = i + 1;
1046
1047                 if (i >= bp->rx_cp_nr_rings) {
1048                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1049                         cpr = txq->cp_ring;
1050                 } else {
1051                         rxq = bp->rx_queues[i];
1052                         cpr = rxq->cp_ring;
1053                 }
1054
1055                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1056
1057                 if (rc)
1058                         return rc;
1059         }
1060         return rc;
1061 }
1062
1063 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1064 {
1065         uint16_t i;
1066         uint32_t rc = 0;
1067
1068         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1069                 unsigned int idx = i + 1;
1070
1071                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1072                         RTE_LOG(ERR, PMD,
1073                                 "Attempt to free invalid ring group %d\n",
1074                                 idx);
1075                         continue;
1076                 }
1077
1078                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1079
1080                 if (rc)
1081                         return rc;
1082         }
1083         return rc;
1084 }
1085
1086 static void bnxt_free_cp_ring(struct bnxt *bp,
1087                               struct bnxt_cp_ring_info *cpr, unsigned int idx)
1088 {
1089         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1090
1091         bnxt_hwrm_ring_free(bp, cp_ring,
1092                         HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1093         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1094         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1095         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1096                         sizeof(*cpr->cp_desc_ring));
1097         cpr->cp_raw_cons = 0;
1098 }
1099
1100 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1101 {
1102         unsigned int i;
1103         int rc = 0;
1104
1105         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1106                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1107                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1108                 struct bnxt_ring *ring = txr->tx_ring_struct;
1109                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1110                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1111
1112                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1113                         bnxt_hwrm_ring_free(bp, ring,
1114                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1115                         ring->fw_ring_id = INVALID_HW_RING_ID;
1116                         memset(txr->tx_desc_ring, 0,
1117                                         txr->tx_ring_struct->ring_size *
1118                                         sizeof(*txr->tx_desc_ring));
1119                         memset(txr->tx_buf_ring, 0,
1120                                         txr->tx_ring_struct->ring_size *
1121                                         sizeof(*txr->tx_buf_ring));
1122                         txr->tx_prod = 0;
1123                         txr->tx_cons = 0;
1124                 }
1125                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1126                         bnxt_free_cp_ring(bp, cpr, idx);
1127         }
1128
1129         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1130                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1131                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1132                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1133                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1134                 unsigned int idx = i + 1;
1135
1136                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1137                         bnxt_hwrm_ring_free(bp, ring,
1138                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1139                         ring->fw_ring_id = INVALID_HW_RING_ID;
1140                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1141                         memset(rxr->rx_desc_ring, 0,
1142                                         rxr->rx_ring_struct->ring_size *
1143                                         sizeof(*rxr->rx_desc_ring));
1144                         memset(rxr->rx_buf_ring, 0,
1145                                         rxr->rx_ring_struct->ring_size *
1146                                         sizeof(*rxr->rx_buf_ring));
1147                         rxr->rx_prod = 0;
1148                 }
1149                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1150                         bnxt_free_cp_ring(bp, cpr, idx);
1151         }
1152
1153         /* Default completion ring */
1154         {
1155                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1156
1157                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1158                         bnxt_free_cp_ring(bp, cpr, 0);
1159         }
1160
1161         return rc;
1162 }
1163
1164 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1165 {
1166         uint16_t i;
1167         uint32_t rc = 0;
1168
1169         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1170                 unsigned int idx = i + 1;
1171
1172                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1173                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1174                         continue;
1175
1176                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1177
1178                 if (rc)
1179                         return rc;
1180         }
1181         return rc;
1182 }
1183
1184 void bnxt_free_hwrm_resources(struct bnxt *bp)
1185 {
1186         /* Release memzone */
1187         rte_free(bp->hwrm_cmd_resp_addr);
1188         bp->hwrm_cmd_resp_addr = NULL;
1189         bp->hwrm_cmd_resp_dma_addr = 0;
1190 }
1191
1192 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1193 {
1194         struct rte_pci_device *pdev = bp->pdev;
1195         char type[RTE_MEMZONE_NAMESIZE];
1196
1197         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1198                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1199         bp->max_req_len = HWRM_MAX_REQ_LEN;
1200         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1201         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1202         if (bp->hwrm_cmd_resp_addr == NULL)
1203                 return -ENOMEM;
1204         bp->hwrm_cmd_resp_dma_addr =
1205                 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1206         rte_spinlock_init(&bp->hwrm_lock);
1207
1208         return 0;
1209 }
1210
1211 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1212 {
1213         struct bnxt_filter_info *filter;
1214         int rc = 0;
1215
1216         STAILQ_FOREACH(filter, &vnic->filter, next) {
1217                 rc = bnxt_hwrm_clear_filter(bp, filter);
1218                 if (rc)
1219                         break;
1220         }
1221         return rc;
1222 }
1223
1224 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1225 {
1226         struct bnxt_filter_info *filter;
1227         int rc = 0;
1228
1229         STAILQ_FOREACH(filter, &vnic->filter, next) {
1230                 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1231                 if (rc)
1232                         break;
1233         }
1234         return rc;
1235 }
1236
1237 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1238 {
1239         struct bnxt_vnic_info *vnic;
1240         unsigned int i;
1241
1242         if (bp->vnic_info == NULL)
1243                 return;
1244
1245         vnic = &bp->vnic_info[0];
1246         bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1247
1248         /* VNIC resources */
1249         for (i = 0; i < bp->nr_vnics; i++) {
1250                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1251
1252                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1253
1254                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1255                 bnxt_hwrm_vnic_free(bp, vnic);
1256         }
1257         /* Ring resources */
1258         bnxt_free_all_hwrm_rings(bp);
1259         bnxt_free_all_hwrm_ring_grps(bp);
1260         bnxt_free_all_hwrm_stat_ctxs(bp);
1261 }
1262
1263 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1264 {
1265         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1266
1267         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1268                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1269
1270         switch (conf_link_speed) {
1271         case ETH_LINK_SPEED_10M_HD:
1272         case ETH_LINK_SPEED_100M_HD:
1273                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1274         }
1275         return hw_link_duplex;
1276 }
1277
1278 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1279 {
1280         uint16_t eth_link_speed = 0;
1281
1282         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1283                 return ETH_LINK_SPEED_AUTONEG;
1284
1285         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1286         case ETH_LINK_SPEED_100M:
1287         case ETH_LINK_SPEED_100M_HD:
1288                 eth_link_speed =
1289                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1290                 break;
1291         case ETH_LINK_SPEED_1G:
1292                 eth_link_speed =
1293                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1294                 break;
1295         case ETH_LINK_SPEED_2_5G:
1296                 eth_link_speed =
1297                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1298                 break;
1299         case ETH_LINK_SPEED_10G:
1300                 eth_link_speed =
1301                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1302                 break;
1303         case ETH_LINK_SPEED_20G:
1304                 eth_link_speed =
1305                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1306                 break;
1307         case ETH_LINK_SPEED_25G:
1308                 eth_link_speed =
1309                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1310                 break;
1311         case ETH_LINK_SPEED_40G:
1312                 eth_link_speed =
1313                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1314                 break;
1315         case ETH_LINK_SPEED_50G:
1316                 eth_link_speed =
1317                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1318                 break;
1319         default:
1320                 RTE_LOG(ERR, PMD,
1321                         "Unsupported link speed %d; default to AUTO\n",
1322                         conf_link_speed);
1323                 break;
1324         }
1325         return eth_link_speed;
1326 }
1327
1328 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1329                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1330                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1331                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1332
1333 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1334 {
1335         uint32_t one_speed;
1336
1337         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1338                 return 0;
1339
1340         if (link_speed & ETH_LINK_SPEED_FIXED) {
1341                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1342
1343                 if (one_speed & (one_speed - 1)) {
1344                         RTE_LOG(ERR, PMD,
1345                                 "Invalid advertised speeds (%u) for port %u\n",
1346                                 link_speed, port_id);
1347                         return -EINVAL;
1348                 }
1349                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1350                         RTE_LOG(ERR, PMD,
1351                                 "Unsupported advertised speed (%u) for port %u\n",
1352                                 link_speed, port_id);
1353                         return -EINVAL;
1354                 }
1355         } else {
1356                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1357                         RTE_LOG(ERR, PMD,
1358                                 "Unsupported advertised speeds (%u) for port %u\n",
1359                                 link_speed, port_id);
1360                         return -EINVAL;
1361                 }
1362         }
1363         return 0;
1364 }
1365
1366 static uint16_t
1367 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1368 {
1369         uint16_t ret = 0;
1370
1371         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1372                 if (bp->link_info.support_speeds)
1373                         return bp->link_info.support_speeds;
1374                 link_speed = BNXT_SUPPORTED_SPEEDS;
1375         }
1376
1377         if (link_speed & ETH_LINK_SPEED_100M)
1378                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1379         if (link_speed & ETH_LINK_SPEED_100M_HD)
1380                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1381         if (link_speed & ETH_LINK_SPEED_1G)
1382                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1383         if (link_speed & ETH_LINK_SPEED_2_5G)
1384                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1385         if (link_speed & ETH_LINK_SPEED_10G)
1386                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1387         if (link_speed & ETH_LINK_SPEED_20G)
1388                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1389         if (link_speed & ETH_LINK_SPEED_25G)
1390                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1391         if (link_speed & ETH_LINK_SPEED_40G)
1392                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1393         if (link_speed & ETH_LINK_SPEED_50G)
1394                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1395         return ret;
1396 }
1397
1398 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1399 {
1400         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1401
1402         switch (hw_link_speed) {
1403         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1404                 eth_link_speed = ETH_SPEED_NUM_100M;
1405                 break;
1406         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1407                 eth_link_speed = ETH_SPEED_NUM_1G;
1408                 break;
1409         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1410                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1411                 break;
1412         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1413                 eth_link_speed = ETH_SPEED_NUM_10G;
1414                 break;
1415         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1416                 eth_link_speed = ETH_SPEED_NUM_20G;
1417                 break;
1418         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1419                 eth_link_speed = ETH_SPEED_NUM_25G;
1420                 break;
1421         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1422                 eth_link_speed = ETH_SPEED_NUM_40G;
1423                 break;
1424         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1425                 eth_link_speed = ETH_SPEED_NUM_50G;
1426                 break;
1427         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1428         default:
1429                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1430                         hw_link_speed);
1431                 break;
1432         }
1433         return eth_link_speed;
1434 }
1435
1436 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1437 {
1438         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1439
1440         switch (hw_link_duplex) {
1441         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1442         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1443                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1444                 break;
1445         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1446                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1447                 break;
1448         default:
1449                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1450                         hw_link_duplex);
1451                 break;
1452         }
1453         return eth_link_duplex;
1454 }
1455
1456 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1457 {
1458         int rc = 0;
1459         struct bnxt_link_info *link_info = &bp->link_info;
1460
1461         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1462         if (rc) {
1463                 RTE_LOG(ERR, PMD,
1464                         "Get link config failed with rc %d\n", rc);
1465                 goto exit;
1466         }
1467         if (link_info->link_speed)
1468                 link->link_speed =
1469                         bnxt_parse_hw_link_speed(link_info->link_speed);
1470         else
1471                 link->link_speed = ETH_SPEED_NUM_NONE;
1472         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1473         link->link_status = link_info->link_up;
1474         link->link_autoneg = link_info->auto_mode ==
1475                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1476                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1477 exit:
1478         return rc;
1479 }
1480
1481 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1482 {
1483         int rc = 0;
1484         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1485         struct bnxt_link_info link_req;
1486         uint16_t speed;
1487
1488         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1489                 return 0;
1490
1491         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1492                         bp->eth_dev->data->port_id);
1493         if (rc)
1494                 goto error;
1495
1496         memset(&link_req, 0, sizeof(link_req));
1497         link_req.link_up = link_up;
1498         if (!link_up)
1499                 goto port_phy_cfg;
1500
1501         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1502         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1503         if (speed == 0) {
1504                 link_req.phy_flags |=
1505                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1506                 link_req.auto_mode =
1507                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1508                 link_req.auto_link_speed_mask =
1509                         bnxt_parse_eth_link_speed_mask(bp,
1510                                                        dev_conf->link_speeds);
1511         } else {
1512                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1513                 link_req.link_speed = speed;
1514                 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1515         }
1516         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1517         link_req.auto_pause = bp->link_info.auto_pause;
1518         link_req.force_pause = bp->link_info.force_pause;
1519
1520 port_phy_cfg:
1521         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1522         if (rc) {
1523                 RTE_LOG(ERR, PMD,
1524                         "Set link config failed with rc %d\n", rc);
1525         }
1526
1527 error:
1528         return rc;
1529 }
1530
1531 /* JIRA 22088 */
1532 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1533 {
1534         struct hwrm_func_qcfg_input req = {0};
1535         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1536         int rc = 0;
1537
1538         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1539         req.fid = rte_cpu_to_le_16(0xffff);
1540
1541         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1542
1543         HWRM_CHECK_RESULT;
1544
1545         if (BNXT_VF(bp)) {
1546                 struct bnxt_vf_info *vf = &bp->vf;
1547
1548                 /* Hard Coded.. 0xfff VLAN ID mask */
1549                 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1550         }
1551
1552         switch (resp->port_partition_type) {
1553         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1554         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1555         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1556                 bp->port_partition_type = resp->port_partition_type;
1557                 break;
1558         default:
1559                 bp->port_partition_type = 0;
1560                 break;
1561         }
1562
1563         return rc;
1564 }