New upstream version 16.11.4
[deb_dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
40
41 #include "bnxt.h"
42 #include "bnxt_cpr.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_rxq.h"
46 #include "bnxt_rxr.h"
47 #include "bnxt_ring.h"
48 #include "bnxt_txq.h"
49 #include "bnxt_txr.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
52
53 #define HWRM_CMD_TIMEOUT                2000
54
55 /*
56  * HWRM Functions (sent to HWRM)
57  * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
58  * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
59  * command was failed by the ChiMP.
60  */
61
62 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
63                                         uint32_t msg_len)
64 {
65         unsigned int i;
66         struct input *req = msg;
67         struct output *resp = bp->hwrm_cmd_resp_addr;
68         uint32_t *data = msg;
69         uint8_t *bar;
70         uint8_t *valid;
71
72         /* Write request msg to hwrm channel */
73         for (i = 0; i < msg_len; i += 4) {
74                 bar = (uint8_t *)bp->bar0 + i;
75                 *(volatile uint32_t *)bar = *data;
76                 data++;
77         }
78
79         /* Zero the rest of the request space */
80         for (; i < bp->max_req_len; i += 4) {
81                 bar = (uint8_t *)bp->bar0 + i;
82                 *(volatile uint32_t *)bar = 0;
83         }
84
85         /* Ring channel doorbell */
86         bar = (uint8_t *)bp->bar0 + 0x100;
87         *(volatile uint32_t *)bar = 1;
88
89         /* Poll for the valid bit */
90         for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
91                 /* Sanity check on the resp->resp_len */
92                 rte_rmb();
93                 if (resp->resp_len && resp->resp_len <=
94                                 bp->max_resp_len) {
95                         /* Last byte of resp contains the valid key */
96                         valid = (uint8_t *)resp + resp->resp_len - 1;
97                         if (*valid == HWRM_RESP_VALID_KEY)
98                                 break;
99                 }
100                 rte_delay_us(600);
101         }
102
103         if (i >= HWRM_CMD_TIMEOUT) {
104                 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
105                         req->req_type);
106                 goto err_ret;
107         }
108         return 0;
109
110 err_ret:
111         return -1;
112 }
113
114 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
115 {
116         int rc;
117
118         rte_spinlock_lock(&bp->hwrm_lock);
119         rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
120         rte_spinlock_unlock(&bp->hwrm_lock);
121         return rc;
122 }
123
124 #define HWRM_PREP(req, type, cr, resp) \
125         memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
126         req.req_type = rte_cpu_to_le_16(HWRM_##type); \
127         req.cmpl_ring = rte_cpu_to_le_16(cr); \
128         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
129         req.target_id = rte_cpu_to_le_16(0xffff); \
130         req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
131
132 #define HWRM_CHECK_RESULT \
133         { \
134                 if (rc) { \
135                         RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
136                                 __func__, rc); \
137                         return rc; \
138                 } \
139                 if (resp->error_code) { \
140                         rc = rte_le_to_cpu_16(resp->error_code); \
141                         RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
142                         return rc; \
143                 } \
144         }
145
146 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
147 {
148         int rc = 0;
149         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
150         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
151
152         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
153         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
154         req.mask = 0;
155
156         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
157
158         HWRM_CHECK_RESULT;
159
160         return rc;
161 }
162
163 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
164 {
165         int rc = 0;
166         struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
167         struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
168         uint32_t mask = 0;
169
170         HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
171         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
172
173         /* FIXME add multicast flag, when multicast adding options is supported
174          * by ethtool.
175          */
176         if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
177                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
178         if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
179                 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
180         req.mask = rte_cpu_to_le_32(mask);
181
182         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
183
184         HWRM_CHECK_RESULT;
185
186         return rc;
187 }
188
189 int bnxt_hwrm_clear_filter(struct bnxt *bp,
190                            struct bnxt_filter_info *filter)
191 {
192         int rc = 0;
193         struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
194         struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
195
196         if (filter->fw_l2_filter_id == UINT64_MAX)
197                 return 0;
198
199         HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
200
201         req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
202
203         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
204
205         HWRM_CHECK_RESULT;
206
207         filter->fw_l2_filter_id = -1;
208
209         return 0;
210 }
211
212 int bnxt_hwrm_set_filter(struct bnxt *bp,
213                          struct bnxt_vnic_info *vnic,
214                          struct bnxt_filter_info *filter)
215 {
216         int rc = 0;
217         struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
218         struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
219         uint32_t enables = 0;
220
221         if (filter->fw_l2_filter_id != UINT64_MAX)
222                 bnxt_hwrm_clear_filter(bp, filter);
223
224         HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
225
226         req.flags = rte_cpu_to_le_32(filter->flags);
227
228         enables = filter->enables |
229               HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
230         req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
231
232         if (enables &
233             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
234                 memcpy(req.l2_addr, filter->l2_addr,
235                        ETHER_ADDR_LEN);
236         if (enables &
237             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
238                 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
239                        ETHER_ADDR_LEN);
240         if (enables &
241             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
242                 req.l2_ovlan = filter->l2_ovlan;
243         if (enables &
244             HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
245                 req.l2_ovlan_mask = filter->l2_ovlan_mask;
246
247         req.enables = rte_cpu_to_le_32(enables);
248
249         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
250
251         HWRM_CHECK_RESULT;
252
253         filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
254
255         return rc;
256 }
257
258 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
259 {
260         int rc;
261         struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
262         struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
263
264         HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
265
266         memcpy(req.encap_request, fwd_cmd,
267                sizeof(req.encap_request));
268
269         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
270
271         HWRM_CHECK_RESULT;
272
273         return rc;
274 }
275
276 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
277 {
278         int rc = 0;
279         struct hwrm_func_qcaps_input req = {.req_type = 0 };
280         struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
281
282         HWRM_PREP(req, FUNC_QCAPS, -1, resp);
283
284         req.fid = rte_cpu_to_le_16(0xffff);
285
286         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
287
288         HWRM_CHECK_RESULT;
289
290         bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
291         if (BNXT_PF(bp)) {
292                 struct bnxt_pf_info *pf = &bp->pf;
293
294                 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
295                 pf->port_id = resp->port_id;
296                 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
297                 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
298                 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
299                 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
300                 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
301                 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
302                 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
303                 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
304                 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
305         } else {
306                 struct bnxt_vf_info *vf = &bp->vf;
307
308                 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
309                 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
310                 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
311                 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
312                 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
313                 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
314                 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
315                 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
316         }
317
318         return rc;
319 }
320
321 int bnxt_hwrm_func_reset(struct bnxt *bp)
322 {
323         int rc = 0;
324         struct hwrm_func_reset_input req = {.req_type = 0 };
325         struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
326
327         HWRM_PREP(req, FUNC_RESET, -1, resp);
328
329         req.enables = rte_cpu_to_le_32(0);
330
331         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
332
333         HWRM_CHECK_RESULT;
334
335         return rc;
336 }
337
338 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
339                                    uint32_t *vf_req_fwd)
340 {
341         int rc;
342         struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
343         struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
344
345         if (bp->flags & BNXT_FLAG_REGISTERED)
346                 return 0;
347
348         HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
349         req.flags = flags;
350         req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
351                         HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
352         req.ver_maj = RTE_VER_YEAR;
353         req.ver_min = RTE_VER_MONTH;
354         req.ver_upd = RTE_VER_MINOR;
355
356         memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
357
358         req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
359
360         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
361
362         HWRM_CHECK_RESULT;
363
364         bp->flags |= BNXT_FLAG_REGISTERED;
365
366         return rc;
367 }
368
369 int bnxt_hwrm_ver_get(struct bnxt *bp)
370 {
371         int rc = 0;
372         struct hwrm_ver_get_input req = {.req_type = 0 };
373         struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
374         uint32_t my_version;
375         uint32_t fw_version;
376         uint16_t max_resp_len;
377         char type[RTE_MEMZONE_NAMESIZE];
378
379         HWRM_PREP(req, VER_GET, -1, resp);
380
381         req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
382         req.hwrm_intf_min = HWRM_VERSION_MINOR;
383         req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
384
385         /*
386          * Hold the lock since we may be adjusting the response pointers.
387          */
388         rte_spinlock_lock(&bp->hwrm_lock);
389         rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
390
391         HWRM_CHECK_RESULT;
392
393         RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
394                 resp->hwrm_intf_maj, resp->hwrm_intf_min,
395                 resp->hwrm_intf_upd,
396                 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
397         RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
398                 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
399
400         my_version = HWRM_VERSION_MAJOR << 16;
401         my_version |= HWRM_VERSION_MINOR << 8;
402         my_version |= HWRM_VERSION_UPDATE;
403
404         fw_version = resp->hwrm_intf_maj << 16;
405         fw_version |= resp->hwrm_intf_min << 8;
406         fw_version |= resp->hwrm_intf_upd;
407
408         if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
409                 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
410                 rc = -EINVAL;
411                 goto error;
412         }
413
414         if (my_version != fw_version) {
415                 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
416                 if (my_version < fw_version) {
417                         RTE_LOG(INFO, PMD,
418                                 "Firmware API version is newer than driver.\n");
419                         RTE_LOG(INFO, PMD,
420                                 "The driver may be missing features.\n");
421                 } else {
422                         RTE_LOG(INFO, PMD,
423                                 "Firmware API version is older than driver.\n");
424                         RTE_LOG(INFO, PMD,
425                                 "Not all driver features may be functional.\n");
426                 }
427         }
428
429         if (bp->max_req_len > resp->max_req_win_len) {
430                 RTE_LOG(ERR, PMD, "Unsupported request length\n");
431                 rc = -EINVAL;
432         }
433         bp->max_req_len = resp->max_req_win_len;
434         max_resp_len = resp->max_resp_len;
435         if (bp->max_resp_len != max_resp_len) {
436                 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
437                         bp->pdev->addr.domain, bp->pdev->addr.bus,
438                         bp->pdev->addr.devid, bp->pdev->addr.function);
439
440                 rte_free(bp->hwrm_cmd_resp_addr);
441
442                 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
443                 if (bp->hwrm_cmd_resp_addr == NULL) {
444                         rc = -ENOMEM;
445                         goto error;
446                 }
447                 bp->hwrm_cmd_resp_dma_addr =
448                         rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
449                 bp->max_resp_len = max_resp_len;
450         }
451
452 error:
453         rte_spinlock_unlock(&bp->hwrm_lock);
454         return rc;
455 }
456
457 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
458 {
459         int rc;
460         struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
461         struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
462
463         if (!(bp->flags & BNXT_FLAG_REGISTERED))
464                 return 0;
465
466         HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
467         req.flags = flags;
468
469         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
470
471         HWRM_CHECK_RESULT;
472
473         bp->flags &= ~BNXT_FLAG_REGISTERED;
474
475         return rc;
476 }
477
478 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
479 {
480         int rc = 0;
481         struct hwrm_port_phy_cfg_input req = {0};
482         struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
483         uint32_t enables = 0;
484
485         HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
486
487         if (conf->link_up) {
488                 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
489                 if (bp->link_info.auto_mode && conf->link_speed) {
490                         req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
491                         RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
492                 }
493
494                 req.flags = rte_cpu_to_le_32(conf->phy_flags);
495                 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
496                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
497                 /*
498                  * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
499                  * any auto mode, even "none".
500                  */
501                 if (!conf->link_speed) {
502                         /* No speeds specified. Enable AutoNeg - all speeds */
503                         req.auto_mode =
504                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
505                 }
506                 /* AutoNeg - Advertise speeds specified. */
507                 if (conf->auto_link_speed_mask) {
508                         req.auto_mode =
509                                 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
510                         req.auto_link_speed_mask =
511                                 conf->auto_link_speed_mask;
512                         enables |=
513                         HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
514                 }
515
516                 req.auto_duplex = conf->duplex;
517                 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
518                 req.auto_pause = conf->auto_pause;
519                 req.force_pause = conf->force_pause;
520                 /* Set force_pause if there is no auto or if there is a force */
521                 if (req.auto_pause && !req.force_pause)
522                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
523                 else
524                         enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
525
526                 req.enables = rte_cpu_to_le_32(enables);
527         } else {
528                 req.flags =
529                 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
530                 RTE_LOG(INFO, PMD, "Force Link Down\n");
531         }
532
533         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
534
535         HWRM_CHECK_RESULT;
536
537         return rc;
538 }
539
540 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
541                                    struct bnxt_link_info *link_info)
542 {
543         int rc = 0;
544         struct hwrm_port_phy_qcfg_input req = {0};
545         struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
546
547         HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
548
549         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
550
551         HWRM_CHECK_RESULT;
552
553         link_info->phy_link_status = resp->link;
554         link_info->link_up =
555                 (link_info->phy_link_status ==
556                  HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
557         link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
558         link_info->duplex = resp->duplex;
559         link_info->pause = resp->pause;
560         link_info->auto_pause = resp->auto_pause;
561         link_info->force_pause = resp->force_pause;
562         link_info->auto_mode = resp->auto_mode;
563         link_info->phy_type = resp->phy_type;
564         link_info->media_type = resp->media_type;
565
566         link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
567         link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
568         link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
569         link_info->phy_ver[0] = resp->phy_maj;
570         link_info->phy_ver[1] = resp->phy_min;
571         link_info->phy_ver[2] = resp->phy_bld;
572
573         return rc;
574 }
575
576 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
577 {
578         int rc = 0;
579         struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
580         struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
581
582         HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
583
584         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
585
586         HWRM_CHECK_RESULT;
587
588 #define GET_QUEUE_INFO(x) \
589         bp->cos_queue[x].id = resp->queue_id##x; \
590         bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
591
592         GET_QUEUE_INFO(0);
593         GET_QUEUE_INFO(1);
594         GET_QUEUE_INFO(2);
595         GET_QUEUE_INFO(3);
596         GET_QUEUE_INFO(4);
597         GET_QUEUE_INFO(5);
598         GET_QUEUE_INFO(6);
599         GET_QUEUE_INFO(7);
600
601         return rc;
602 }
603
604 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
605                          struct bnxt_ring *ring,
606                          uint32_t ring_type, uint32_t map_index,
607                          uint32_t stats_ctx_id)
608 {
609         int rc = 0;
610         struct hwrm_ring_alloc_input req = {.req_type = 0 };
611         struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
612
613         HWRM_PREP(req, RING_ALLOC, -1, resp);
614
615         req.enables = rte_cpu_to_le_32(0);
616
617         req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
618         req.fbo = rte_cpu_to_le_32(0);
619         /* Association of ring index with doorbell index */
620         req.logical_id = rte_cpu_to_le_16(map_index);
621
622         switch (ring_type) {
623         case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
624                 req.queue_id = bp->cos_queue[0].id;
625                 /* FALLTHROUGH */
626         case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
627                 req.ring_type = ring_type;
628                 req.cmpl_ring_id =
629                     rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
630                 req.length = rte_cpu_to_le_32(ring->ring_size);
631                 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
632                 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
633                         HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
634                 break;
635         case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
636                 req.ring_type = ring_type;
637                 /*
638                  * TODO: Some HWRM versions crash with
639                  * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
640                  */
641                 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
642                 req.length = rte_cpu_to_le_32(ring->ring_size);
643                 break;
644         default:
645                 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
646                         ring_type);
647                 return -1;
648         }
649
650         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
651
652         if (rc || resp->error_code) {
653                 if (rc == 0 && resp->error_code)
654                         rc = rte_le_to_cpu_16(resp->error_code);
655                 switch (ring_type) {
656                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
657                         RTE_LOG(ERR, PMD,
658                                 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
659                         return rc;
660                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
661                         RTE_LOG(ERR, PMD,
662                                 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
663                         return rc;
664                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
665                         RTE_LOG(ERR, PMD,
666                                 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
667                         return rc;
668                 default:
669                         RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
670                         return rc;
671                 }
672         }
673
674         ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
675         return rc;
676 }
677
678 int bnxt_hwrm_ring_free(struct bnxt *bp,
679                         struct bnxt_ring *ring, uint32_t ring_type)
680 {
681         int rc;
682         struct hwrm_ring_free_input req = {.req_type = 0 };
683         struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
684
685         HWRM_PREP(req, RING_FREE, -1, resp);
686
687         req.ring_type = ring_type;
688         req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
689
690         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
691
692         if (rc || resp->error_code) {
693                 if (rc == 0 && resp->error_code)
694                         rc = rte_le_to_cpu_16(resp->error_code);
695
696                 switch (ring_type) {
697                 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
698                         RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
699                                 rc);
700                         return rc;
701                 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
702                         RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
703                                 rc);
704                         return rc;
705                 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
706                         RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
707                                 rc);
708                         return rc;
709                 default:
710                         RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
711                         return rc;
712                 }
713         }
714         return 0;
715 }
716
717 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
718 {
719         int rc = 0;
720         struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
721         struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
722
723         HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
724
725         req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
726         req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
727         req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
728         req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
729
730         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
731
732         HWRM_CHECK_RESULT;
733
734         bp->grp_info[idx].fw_grp_id =
735             rte_le_to_cpu_16(resp->ring_group_id);
736
737         return rc;
738 }
739
740 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
741 {
742         int rc;
743         struct hwrm_ring_grp_free_input req = {.req_type = 0 };
744         struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
745
746         HWRM_PREP(req, RING_GRP_FREE, -1, resp);
747
748         req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
749
750         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
751
752         HWRM_CHECK_RESULT;
753
754         bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
755         return rc;
756 }
757
758 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
759 {
760         int rc = 0;
761         struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
762         struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
763
764         HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
765
766         if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
767                 return rc;
768
769         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
770         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
771
772         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
773
774         HWRM_CHECK_RESULT;
775
776         return rc;
777 }
778
779 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
780                              struct bnxt_cp_ring_info *cpr, unsigned int idx)
781 {
782         int rc;
783         struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
784         struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
785
786         HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
787
788         req.update_period_ms = rte_cpu_to_le_32(1000);
789
790         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
791         req.stats_dma_addr =
792             rte_cpu_to_le_64(cpr->hw_stats_map);
793
794         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
795
796         HWRM_CHECK_RESULT;
797
798         cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
799         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
800
801         return rc;
802 }
803
804 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
805                             struct bnxt_cp_ring_info *cpr, unsigned int idx)
806 {
807         int rc;
808         struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
809         struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
810
811         HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
812
813         req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
814         req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
815
816         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
817
818         HWRM_CHECK_RESULT;
819
820         cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
821         bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
822
823         return rc;
824 }
825
826 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
827 {
828         int rc = 0, i, j;
829         struct hwrm_vnic_alloc_input req = { 0 };
830         struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
831
832         /* map ring groups to this vnic */
833         for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
834                 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
835                         RTE_LOG(ERR, PMD,
836                                 "Not enough ring groups avail:%x req:%x\n", j,
837                                 (vnic->end_grp_id - vnic->start_grp_id) + 1);
838                         break;
839                 }
840                 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
841         }
842
843         vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
844         vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
845
846         HWRM_PREP(req, VNIC_ALLOC, -1, resp);
847
848         if (vnic->func_default)
849                 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
850         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
851
852         HWRM_CHECK_RESULT;
853
854         vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
855         RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
856         return rc;
857 }
858
859 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
860 {
861         int rc = 0;
862         struct hwrm_vnic_cfg_input req = {.req_type = 0 };
863         struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
864
865         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
866                 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
867                 return rc;
868         }
869
870         HWRM_PREP(req, VNIC_CFG, -1, resp);
871
872         /* Only RSS support for now TBD: COS & LB */
873         req.enables =
874             rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
875                              HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
876                              HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
877         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
878         req.dflt_ring_grp =
879                 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
880         req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
881         req.cos_rule = rte_cpu_to_le_16(0xffff);
882         req.lb_rule = rte_cpu_to_le_16(0xffff);
883         req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
884                                    ETHER_CRC_LEN + VLAN_TAG_SIZE);
885         if (vnic->func_default)
886                 req.flags = 1;
887         if (vnic->vlan_strip)
888                 req.flags |=
889                     rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
890
891         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
892
893         HWRM_CHECK_RESULT;
894
895         return rc;
896 }
897
898 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
899 {
900         int rc = 0;
901         struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
902         struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
903                                                 bp->hwrm_cmd_resp_addr;
904
905         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
906
907         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
908
909         HWRM_CHECK_RESULT;
910
911         vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
912         RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
913
914         return rc;
915 }
916
917 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
918 {
919         int rc = 0;
920         struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
921         struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
922                                                 bp->hwrm_cmd_resp_addr;
923
924         if (vnic->fw_rss_cos_lb_ctx == 0xffff) {
925                 RTE_LOG(DEBUG, PMD,
926                         "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
927                 return rc;
928         }
929
930         HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
931
932         req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
933
934         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
935
936         HWRM_CHECK_RESULT;
937
938         vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
939
940         return rc;
941 }
942
943 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
944 {
945         int rc = 0;
946         struct hwrm_vnic_free_input req = {.req_type = 0 };
947         struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
948
949         if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
950                 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
951                 return rc;
952         }
953
954         HWRM_PREP(req, VNIC_FREE, -1, resp);
955
956         req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
957
958         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
959
960         HWRM_CHECK_RESULT;
961
962         vnic->fw_vnic_id = INVALID_HW_RING_ID;
963         return rc;
964 }
965
966 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
967                            struct bnxt_vnic_info *vnic)
968 {
969         int rc = 0;
970         struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
971         struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
972
973         HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
974
975         req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
976
977         req.ring_grp_tbl_addr =
978             rte_cpu_to_le_64(vnic->rss_table_dma_addr);
979         req.hash_key_tbl_addr =
980             rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
981         req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
982
983         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
984
985         HWRM_CHECK_RESULT;
986
987         return rc;
988 }
989
990 /*
991  * HWRM utility functions
992  */
993
994 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
995 {
996         unsigned int i;
997         int rc = 0;
998
999         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1000                 struct bnxt_tx_queue *txq;
1001                 struct bnxt_rx_queue *rxq;
1002                 struct bnxt_cp_ring_info *cpr;
1003
1004                 if (i >= bp->rx_cp_nr_rings) {
1005                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1006                         cpr = txq->cp_ring;
1007                 } else {
1008                         rxq = bp->rx_queues[i];
1009                         cpr = rxq->cp_ring;
1010                 }
1011
1012                 rc = bnxt_hwrm_stat_clear(bp, cpr);
1013                 if (rc)
1014                         return rc;
1015         }
1016         return 0;
1017 }
1018
1019 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1020 {
1021         int rc;
1022         unsigned int i;
1023         struct bnxt_cp_ring_info *cpr;
1024
1025         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1026                 unsigned int idx = i + 1;
1027
1028                 if (i >= bp->rx_cp_nr_rings)
1029                         cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1030                 else
1031                         cpr = bp->rx_queues[i]->cp_ring;
1032                 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1033                         rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1034                         if (rc)
1035                                 return rc;
1036                 }
1037         }
1038         return 0;
1039 }
1040
1041 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1042 {
1043         unsigned int i;
1044         int rc = 0;
1045
1046         for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1047                 struct bnxt_tx_queue *txq;
1048                 struct bnxt_rx_queue *rxq;
1049                 struct bnxt_cp_ring_info *cpr;
1050                 unsigned int idx = i + 1;
1051
1052                 if (i >= bp->rx_cp_nr_rings) {
1053                         txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1054                         cpr = txq->cp_ring;
1055                 } else {
1056                         rxq = bp->rx_queues[i];
1057                         cpr = rxq->cp_ring;
1058                 }
1059
1060                 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1061
1062                 if (rc)
1063                         return rc;
1064         }
1065         return rc;
1066 }
1067
1068 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1069 {
1070         uint16_t i;
1071         uint32_t rc = 0;
1072
1073         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1074                 unsigned int idx = i + 1;
1075
1076                 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1077                         RTE_LOG(ERR, PMD,
1078                                 "Attempt to free invalid ring group %d\n",
1079                                 idx);
1080                         continue;
1081                 }
1082
1083                 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1084
1085                 if (rc)
1086                         return rc;
1087         }
1088         return rc;
1089 }
1090
1091 static void bnxt_free_cp_ring(struct bnxt *bp,
1092                               struct bnxt_cp_ring_info *cpr, unsigned int idx)
1093 {
1094         struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1095
1096         bnxt_hwrm_ring_free(bp, cp_ring,
1097                         HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1098         cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1099         bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1100         memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1101                         sizeof(*cpr->cp_desc_ring));
1102         cpr->cp_raw_cons = 0;
1103 }
1104
1105 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1106 {
1107         unsigned int i;
1108         int rc = 0;
1109
1110         for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1111                 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1112                 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1113                 struct bnxt_ring *ring = txr->tx_ring_struct;
1114                 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1115                 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1116
1117                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1118                         bnxt_hwrm_ring_free(bp, ring,
1119                                         HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1120                         ring->fw_ring_id = INVALID_HW_RING_ID;
1121                         memset(txr->tx_desc_ring, 0,
1122                                         txr->tx_ring_struct->ring_size *
1123                                         sizeof(*txr->tx_desc_ring));
1124                         memset(txr->tx_buf_ring, 0,
1125                                         txr->tx_ring_struct->ring_size *
1126                                         sizeof(*txr->tx_buf_ring));
1127                         txr->tx_prod = 0;
1128                         txr->tx_cons = 0;
1129                 }
1130                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1131                         bnxt_free_cp_ring(bp, cpr, idx);
1132         }
1133
1134         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1135                 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1136                 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1137                 struct bnxt_ring *ring = rxr->rx_ring_struct;
1138                 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1139                 unsigned int idx = i + 1;
1140
1141                 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1142                         bnxt_hwrm_ring_free(bp, ring,
1143                                         HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1144                         ring->fw_ring_id = INVALID_HW_RING_ID;
1145                         bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1146                         memset(rxr->rx_desc_ring, 0,
1147                                         rxr->rx_ring_struct->ring_size *
1148                                         sizeof(*rxr->rx_desc_ring));
1149                         memset(rxr->rx_buf_ring, 0,
1150                                         rxr->rx_ring_struct->ring_size *
1151                                         sizeof(*rxr->rx_buf_ring));
1152                         rxr->rx_prod = 0;
1153                 }
1154                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1155                         bnxt_free_cp_ring(bp, cpr, idx);
1156         }
1157
1158         /* Default completion ring */
1159         {
1160                 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1161
1162                 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1163                         bnxt_free_cp_ring(bp, cpr, 0);
1164         }
1165
1166         return rc;
1167 }
1168
1169 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1170 {
1171         uint16_t i;
1172         uint32_t rc = 0;
1173
1174         for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1175                 unsigned int idx = i + 1;
1176
1177                 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1178                     bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1179                         continue;
1180
1181                 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1182
1183                 if (rc)
1184                         return rc;
1185         }
1186         return rc;
1187 }
1188
1189 void bnxt_free_hwrm_resources(struct bnxt *bp)
1190 {
1191         /* Release memzone */
1192         rte_free(bp->hwrm_cmd_resp_addr);
1193         bp->hwrm_cmd_resp_addr = NULL;
1194         bp->hwrm_cmd_resp_dma_addr = 0;
1195 }
1196
1197 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1198 {
1199         struct rte_pci_device *pdev = bp->pdev;
1200         char type[RTE_MEMZONE_NAMESIZE];
1201
1202         sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1203                 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1204         bp->max_req_len = HWRM_MAX_REQ_LEN;
1205         bp->max_resp_len = HWRM_MAX_RESP_LEN;
1206         bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1207         if (bp->hwrm_cmd_resp_addr == NULL)
1208                 return -ENOMEM;
1209         bp->hwrm_cmd_resp_dma_addr =
1210                 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1211         rte_spinlock_init(&bp->hwrm_lock);
1212
1213         return 0;
1214 }
1215
1216 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1217 {
1218         struct bnxt_filter_info *filter;
1219         int rc = 0;
1220
1221         STAILQ_FOREACH(filter, &vnic->filter, next) {
1222                 rc = bnxt_hwrm_clear_filter(bp, filter);
1223                 if (rc)
1224                         break;
1225         }
1226         return rc;
1227 }
1228
1229 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1230 {
1231         struct bnxt_filter_info *filter;
1232         int rc = 0;
1233
1234         STAILQ_FOREACH(filter, &vnic->filter, next) {
1235                 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1236                 if (rc)
1237                         break;
1238         }
1239         return rc;
1240 }
1241
1242 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1243 {
1244         struct bnxt_vnic_info *vnic;
1245         unsigned int i;
1246
1247         if (bp->vnic_info == NULL)
1248                 return;
1249
1250         vnic = &bp->vnic_info[0];
1251         bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1252
1253         /* VNIC resources */
1254         for (i = 0; i < bp->nr_vnics; i++) {
1255                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1256
1257                 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1258
1259                 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1260                 bnxt_hwrm_vnic_free(bp, vnic);
1261         }
1262         /* Ring resources */
1263         bnxt_free_all_hwrm_rings(bp);
1264         bnxt_free_all_hwrm_ring_grps(bp);
1265         bnxt_free_all_hwrm_stat_ctxs(bp);
1266 }
1267
1268 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1269 {
1270         uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1271
1272         if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1273                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1274
1275         switch (conf_link_speed) {
1276         case ETH_LINK_SPEED_10M_HD:
1277         case ETH_LINK_SPEED_100M_HD:
1278                 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1279         }
1280         return hw_link_duplex;
1281 }
1282
1283 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1284 {
1285         return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1286 }
1287
1288 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1289 {
1290         uint16_t eth_link_speed = 0;
1291
1292         if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1293                 return ETH_LINK_SPEED_AUTONEG;
1294
1295         switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1296         case ETH_LINK_SPEED_100M:
1297         case ETH_LINK_SPEED_100M_HD:
1298                 eth_link_speed =
1299                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1300                 break;
1301         case ETH_LINK_SPEED_1G:
1302                 eth_link_speed =
1303                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1304                 break;
1305         case ETH_LINK_SPEED_2_5G:
1306                 eth_link_speed =
1307                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1308                 break;
1309         case ETH_LINK_SPEED_10G:
1310                 eth_link_speed =
1311                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1312                 break;
1313         case ETH_LINK_SPEED_20G:
1314                 eth_link_speed =
1315                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1316                 break;
1317         case ETH_LINK_SPEED_25G:
1318                 eth_link_speed =
1319                         HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1320                 break;
1321         case ETH_LINK_SPEED_40G:
1322                 eth_link_speed =
1323                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1324                 break;
1325         case ETH_LINK_SPEED_50G:
1326                 eth_link_speed =
1327                         HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1328                 break;
1329         default:
1330                 RTE_LOG(ERR, PMD,
1331                         "Unsupported link speed %d; default to AUTO\n",
1332                         conf_link_speed);
1333                 break;
1334         }
1335         return eth_link_speed;
1336 }
1337
1338 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1339                 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1340                 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1341                 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1342
1343 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1344 {
1345         uint32_t one_speed;
1346
1347         if (link_speed == ETH_LINK_SPEED_AUTONEG)
1348                 return 0;
1349
1350         if (link_speed & ETH_LINK_SPEED_FIXED) {
1351                 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1352
1353                 if (one_speed & (one_speed - 1)) {
1354                         RTE_LOG(ERR, PMD,
1355                                 "Invalid advertised speeds (%u) for port %u\n",
1356                                 link_speed, port_id);
1357                         return -EINVAL;
1358                 }
1359                 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1360                         RTE_LOG(ERR, PMD,
1361                                 "Unsupported advertised speed (%u) for port %u\n",
1362                                 link_speed, port_id);
1363                         return -EINVAL;
1364                 }
1365         } else {
1366                 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1367                         RTE_LOG(ERR, PMD,
1368                                 "Unsupported advertised speeds (%u) for port %u\n",
1369                                 link_speed, port_id);
1370                         return -EINVAL;
1371                 }
1372         }
1373         return 0;
1374 }
1375
1376 static uint16_t
1377 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1378 {
1379         uint16_t ret = 0;
1380
1381         if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1382                 if (bp->link_info.support_speeds)
1383                         return bp->link_info.support_speeds;
1384                 link_speed = BNXT_SUPPORTED_SPEEDS;
1385         }
1386
1387         if (link_speed & ETH_LINK_SPEED_100M)
1388                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1389         if (link_speed & ETH_LINK_SPEED_100M_HD)
1390                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1391         if (link_speed & ETH_LINK_SPEED_1G)
1392                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1393         if (link_speed & ETH_LINK_SPEED_2_5G)
1394                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1395         if (link_speed & ETH_LINK_SPEED_10G)
1396                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1397         if (link_speed & ETH_LINK_SPEED_20G)
1398                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1399         if (link_speed & ETH_LINK_SPEED_25G)
1400                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1401         if (link_speed & ETH_LINK_SPEED_40G)
1402                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1403         if (link_speed & ETH_LINK_SPEED_50G)
1404                 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1405         return ret;
1406 }
1407
1408 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1409 {
1410         uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1411
1412         switch (hw_link_speed) {
1413         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1414                 eth_link_speed = ETH_SPEED_NUM_100M;
1415                 break;
1416         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1417                 eth_link_speed = ETH_SPEED_NUM_1G;
1418                 break;
1419         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1420                 eth_link_speed = ETH_SPEED_NUM_2_5G;
1421                 break;
1422         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1423                 eth_link_speed = ETH_SPEED_NUM_10G;
1424                 break;
1425         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1426                 eth_link_speed = ETH_SPEED_NUM_20G;
1427                 break;
1428         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1429                 eth_link_speed = ETH_SPEED_NUM_25G;
1430                 break;
1431         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1432                 eth_link_speed = ETH_SPEED_NUM_40G;
1433                 break;
1434         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1435                 eth_link_speed = ETH_SPEED_NUM_50G;
1436                 break;
1437         case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1438         default:
1439                 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1440                         hw_link_speed);
1441                 break;
1442         }
1443         return eth_link_speed;
1444 }
1445
1446 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1447 {
1448         uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1449
1450         switch (hw_link_duplex) {
1451         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1452         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1453                 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1454                 break;
1455         case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1456                 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1457                 break;
1458         default:
1459                 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1460                         hw_link_duplex);
1461                 break;
1462         }
1463         return eth_link_duplex;
1464 }
1465
1466 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1467 {
1468         int rc = 0;
1469         struct bnxt_link_info *link_info = &bp->link_info;
1470
1471         rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1472         if (rc) {
1473                 RTE_LOG(ERR, PMD,
1474                         "Get link config failed with rc %d\n", rc);
1475                 goto exit;
1476         }
1477         if (link_info->link_speed)
1478                 link->link_speed =
1479                         bnxt_parse_hw_link_speed(link_info->link_speed);
1480         else
1481                 link->link_speed = ETH_SPEED_NUM_NONE;
1482         link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1483         link->link_status = link_info->link_up;
1484         link->link_autoneg = link_info->auto_mode ==
1485                 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1486                 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1487 exit:
1488         return rc;
1489 }
1490
1491 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1492 {
1493         int rc = 0;
1494         struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1495         struct bnxt_link_info link_req;
1496         uint16_t speed, autoneg;
1497
1498         if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1499                 return 0;
1500
1501         rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1502                         bp->eth_dev->data->port_id);
1503         if (rc)
1504                 goto error;
1505
1506         memset(&link_req, 0, sizeof(link_req));
1507         link_req.link_up = link_up;
1508         if (!link_up)
1509                 goto port_phy_cfg;
1510
1511         autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
1512         speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1513         link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1514         if (autoneg == 1) {
1515                 link_req.phy_flags |=
1516                                 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1517                 link_req.auto_link_speed_mask =
1518                         bnxt_parse_eth_link_speed_mask(bp,
1519                                                        dev_conf->link_speeds);
1520         } else {
1521                 if (bp->link_info.phy_type ==
1522                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
1523                     bp->link_info.phy_type ==
1524                     HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
1525                     bp->link_info.media_type ==
1526                     HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
1527                         RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
1528                         return -EINVAL;
1529                 }
1530
1531                 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1532                 link_req.link_speed = speed;
1533         }
1534         link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1535         link_req.auto_pause = bp->link_info.auto_pause;
1536         link_req.force_pause = bp->link_info.force_pause;
1537
1538 port_phy_cfg:
1539         rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1540         if (rc) {
1541                 RTE_LOG(ERR, PMD,
1542                         "Set link config failed with rc %d\n", rc);
1543         }
1544
1545 error:
1546         return rc;
1547 }
1548
1549 /* JIRA 22088 */
1550 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1551 {
1552         struct hwrm_func_qcfg_input req = {0};
1553         struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1554         int rc = 0;
1555
1556         HWRM_PREP(req, FUNC_QCFG, -1, resp);
1557         req.fid = rte_cpu_to_le_16(0xffff);
1558
1559         rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1560
1561         HWRM_CHECK_RESULT;
1562
1563         if (BNXT_VF(bp)) {
1564                 struct bnxt_vf_info *vf = &bp->vf;
1565
1566                 /* Hard Coded.. 0xfff VLAN ID mask */
1567                 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1568         }
1569
1570         switch (resp->port_partition_type) {
1571         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1572         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1573         case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1574                 bp->port_partition_type = resp->port_partition_type;
1575                 break;
1576         default:
1577                 bp->port_partition_type = 0;
1578                 break;
1579         }
1580
1581         return rc;
1582 }