4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
47 #include "bnxt_ring.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
55 #define HWRM_CMD_TIMEOUT 2000
58 * HWRM Functions (sent to HWRM)
59 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
60 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
61 * command was failed by the ChiMP.
64 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
68 struct input *req = msg;
69 struct output *resp = bp->hwrm_cmd_resp_addr;
74 /* Write request msg to hwrm channel */
75 for (i = 0; i < msg_len; i += 4) {
76 bar = (uint8_t *)bp->bar0 + i;
77 rte_write32(*data, bar);
81 /* Zero the rest of the request space */
82 for (; i < bp->max_req_len; i += 4) {
83 bar = (uint8_t *)bp->bar0 + i;
87 /* Ring channel doorbell */
88 bar = (uint8_t *)bp->bar0 + 0x100;
91 /* Poll for the valid bit */
92 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
93 /* Sanity check on the resp->resp_len */
95 if (resp->resp_len && resp->resp_len <=
97 /* Last byte of resp contains the valid key */
98 valid = (uint8_t *)resp + resp->resp_len - 1;
99 if (*valid == HWRM_RESP_VALID_KEY)
105 if (i >= HWRM_CMD_TIMEOUT) {
106 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
116 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
120 rte_spinlock_lock(&bp->hwrm_lock);
121 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
122 rte_spinlock_unlock(&bp->hwrm_lock);
126 #define HWRM_PREP(req, type, cr, resp) \
127 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
128 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
129 req.cmpl_ring = rte_cpu_to_le_16(cr); \
130 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
131 req.target_id = rte_cpu_to_le_16(0xffff); \
132 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
134 #define HWRM_CHECK_RESULT \
137 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
141 if (resp->error_code) { \
142 rc = rte_le_to_cpu_16(resp->error_code); \
143 RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
148 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
151 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
152 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
154 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
155 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
158 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
165 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
168 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
169 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
172 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
173 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
175 /* FIXME add multicast flag, when multicast adding options is supported
178 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
179 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
180 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
181 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
182 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
185 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
192 int bnxt_hwrm_clear_filter(struct bnxt *bp,
193 struct bnxt_filter_info *filter)
196 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
197 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
199 if (filter->fw_l2_filter_id == UINT64_MAX)
202 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
204 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
206 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
210 filter->fw_l2_filter_id = -1;
215 int bnxt_hwrm_set_filter(struct bnxt *bp,
216 struct bnxt_vnic_info *vnic,
217 struct bnxt_filter_info *filter)
220 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
221 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
222 uint32_t enables = 0;
224 if (filter->fw_l2_filter_id != UINT64_MAX)
225 bnxt_hwrm_clear_filter(bp, filter);
227 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
229 req.flags = rte_cpu_to_le_32(filter->flags);
231 enables = filter->enables |
232 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
233 req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
236 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
237 memcpy(req.l2_addr, filter->l2_addr,
240 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
241 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
244 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
245 req.l2_ovlan = filter->l2_ovlan;
247 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
248 req.l2_ovlan_mask = filter->l2_ovlan_mask;
250 req.enables = rte_cpu_to_le_32(enables);
252 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
256 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
261 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
264 struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
265 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
267 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
269 memcpy(req.encap_request, fwd_cmd,
270 sizeof(req.encap_request));
272 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
279 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
282 struct hwrm_func_qcaps_input req = {.req_type = 0 };
283 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
285 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
287 req.fid = rte_cpu_to_le_16(0xffff);
289 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
293 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
295 struct bnxt_pf_info *pf = &bp->pf;
297 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
298 pf->port_id = resp->port_id;
299 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
300 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
301 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
302 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
303 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
304 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
305 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
306 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
307 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
309 struct bnxt_vf_info *vf = &bp->vf;
311 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
312 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
313 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
314 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
315 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
316 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
317 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
318 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
324 int bnxt_hwrm_func_reset(struct bnxt *bp)
327 struct hwrm_func_reset_input req = {.req_type = 0 };
328 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
330 HWRM_PREP(req, FUNC_RESET, -1, resp);
332 req.enables = rte_cpu_to_le_32(0);
334 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
341 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
342 uint32_t *vf_req_fwd)
345 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
346 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
348 if (bp->flags & BNXT_FLAG_REGISTERED)
351 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
353 req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
354 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
355 req.ver_maj = RTE_VER_YEAR;
356 req.ver_min = RTE_VER_MONTH;
357 req.ver_upd = RTE_VER_MINOR;
359 memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
361 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
363 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
367 bp->flags |= BNXT_FLAG_REGISTERED;
372 int bnxt_hwrm_ver_get(struct bnxt *bp)
375 struct hwrm_ver_get_input req = {.req_type = 0 };
376 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
379 uint16_t max_resp_len;
380 char type[RTE_MEMZONE_NAMESIZE];
382 HWRM_PREP(req, VER_GET, -1, resp);
384 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
385 req.hwrm_intf_min = HWRM_VERSION_MINOR;
386 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
389 * Hold the lock since we may be adjusting the response pointers.
391 rte_spinlock_lock(&bp->hwrm_lock);
392 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
396 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
397 resp->hwrm_intf_maj, resp->hwrm_intf_min,
399 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
400 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
401 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
403 my_version = HWRM_VERSION_MAJOR << 16;
404 my_version |= HWRM_VERSION_MINOR << 8;
405 my_version |= HWRM_VERSION_UPDATE;
407 fw_version = resp->hwrm_intf_maj << 16;
408 fw_version |= resp->hwrm_intf_min << 8;
409 fw_version |= resp->hwrm_intf_upd;
411 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
412 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
417 if (my_version != fw_version) {
418 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
419 if (my_version < fw_version) {
421 "Firmware API version is newer than driver.\n");
423 "The driver may be missing features.\n");
426 "Firmware API version is older than driver.\n");
428 "Not all driver features may be functional.\n");
432 if (bp->max_req_len > resp->max_req_win_len) {
433 RTE_LOG(ERR, PMD, "Unsupported request length\n");
436 bp->max_req_len = resp->max_req_win_len;
437 max_resp_len = resp->max_resp_len;
438 if (bp->max_resp_len != max_resp_len) {
439 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
440 bp->pdev->addr.domain, bp->pdev->addr.bus,
441 bp->pdev->addr.devid, bp->pdev->addr.function);
443 rte_free(bp->hwrm_cmd_resp_addr);
445 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
446 if (bp->hwrm_cmd_resp_addr == NULL) {
450 bp->hwrm_cmd_resp_dma_addr =
451 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
452 bp->max_resp_len = max_resp_len;
456 rte_spinlock_unlock(&bp->hwrm_lock);
460 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
463 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
464 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
466 if (!(bp->flags & BNXT_FLAG_REGISTERED))
469 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
472 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
476 bp->flags &= ~BNXT_FLAG_REGISTERED;
481 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
484 struct hwrm_port_phy_cfg_input req = {0};
485 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
486 uint32_t enables = 0;
487 uint32_t link_speed_mask =
488 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
490 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
493 req.flags = rte_cpu_to_le_32(conf->phy_flags);
494 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
496 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
497 * any auto mode, even "none".
499 if (!conf->link_speed) {
500 req.auto_mode = conf->auto_mode;
501 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
502 if (conf->auto_mode ==
503 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
504 req.auto_link_speed_mask =
505 conf->auto_link_speed_mask;
506 enables |= link_speed_mask;
508 if (bp->link_info.auto_link_speed) {
509 req.auto_link_speed =
510 bp->link_info.auto_link_speed;
512 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
515 req.auto_duplex = conf->duplex;
516 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
517 req.auto_pause = conf->auto_pause;
518 req.force_pause = conf->force_pause;
519 /* Set force_pause if there is no auto or if there is a force */
520 if (req.auto_pause && !req.force_pause)
521 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
523 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
525 req.enables = rte_cpu_to_le_32(enables);
528 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
529 RTE_LOG(INFO, PMD, "Force Link Down\n");
532 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
539 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
540 struct bnxt_link_info *link_info)
543 struct hwrm_port_phy_qcfg_input req = {0};
544 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
546 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
548 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
552 link_info->phy_link_status = resp->link;
554 (link_info->phy_link_status ==
555 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
556 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
557 link_info->duplex = resp->duplex;
558 link_info->pause = resp->pause;
559 link_info->auto_pause = resp->auto_pause;
560 link_info->force_pause = resp->force_pause;
561 link_info->auto_mode = resp->auto_mode;
563 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
564 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
565 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
566 link_info->phy_ver[0] = resp->phy_maj;
567 link_info->phy_ver[1] = resp->phy_min;
568 link_info->phy_ver[2] = resp->phy_bld;
573 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
576 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
577 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
579 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
581 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
585 #define GET_QUEUE_INFO(x) \
586 bp->cos_queue[x].id = resp->queue_id##x; \
587 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
601 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
602 struct bnxt_ring *ring,
603 uint32_t ring_type, uint32_t map_index,
604 uint32_t stats_ctx_id)
607 struct hwrm_ring_alloc_input req = {.req_type = 0 };
608 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
610 HWRM_PREP(req, RING_ALLOC, -1, resp);
612 req.enables = rte_cpu_to_le_32(0);
614 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
615 req.fbo = rte_cpu_to_le_32(0);
616 /* Association of ring index with doorbell index */
617 req.logical_id = rte_cpu_to_le_16(map_index);
620 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
621 req.queue_id = bp->cos_queue[0].id;
623 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
624 req.ring_type = ring_type;
626 rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
627 req.length = rte_cpu_to_le_32(ring->ring_size);
628 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
629 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
630 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
632 case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
633 req.ring_type = ring_type;
635 * TODO: Some HWRM versions crash with
636 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
638 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
639 req.length = rte_cpu_to_le_32(ring->ring_size);
642 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
647 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
649 if (rc || resp->error_code) {
650 if (rc == 0 && resp->error_code)
651 rc = rte_le_to_cpu_16(resp->error_code);
653 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
655 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
657 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
659 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
661 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
663 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
666 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
671 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
675 int bnxt_hwrm_ring_free(struct bnxt *bp,
676 struct bnxt_ring *ring, uint32_t ring_type)
679 struct hwrm_ring_free_input req = {.req_type = 0 };
680 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
682 HWRM_PREP(req, RING_FREE, -1, resp);
684 req.ring_type = ring_type;
685 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
687 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
689 if (rc || resp->error_code) {
690 if (rc == 0 && resp->error_code)
691 rc = rte_le_to_cpu_16(resp->error_code);
694 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
695 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
698 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
699 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
702 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
703 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
707 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
714 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
717 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
718 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
720 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
722 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
723 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
724 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
725 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
727 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
731 bp->grp_info[idx].fw_grp_id =
732 rte_le_to_cpu_16(resp->ring_group_id);
737 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
740 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
741 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
743 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
745 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
747 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
751 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
755 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
758 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
759 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
761 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
763 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
766 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
767 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
769 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
776 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
777 struct bnxt_cp_ring_info *cpr, unsigned int idx)
780 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
781 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
783 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
785 req.update_period_ms = rte_cpu_to_le_32(1000);
787 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
789 rte_cpu_to_le_64(cpr->hw_stats_map);
791 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
795 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
796 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
801 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
802 struct bnxt_cp_ring_info *cpr, unsigned int idx)
805 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
806 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
808 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
810 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
811 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
813 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
817 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
818 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
823 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
826 struct hwrm_vnic_alloc_input req = { 0 };
827 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
829 /* map ring groups to this vnic */
830 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
831 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
833 "Not enough ring groups avail:%x req:%x\n", j,
834 (vnic->end_grp_id - vnic->start_grp_id) + 1);
837 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
840 vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
841 vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
843 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
845 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
849 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
853 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
856 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
857 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
859 HWRM_PREP(req, VNIC_CFG, -1, resp);
861 /* Only RSS support for now TBD: COS & LB */
863 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
864 HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
865 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
866 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
868 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
869 req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
870 req.cos_rule = rte_cpu_to_le_16(0xffff);
871 req.lb_rule = rte_cpu_to_le_16(0xffff);
872 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
873 ETHER_CRC_LEN + VLAN_TAG_SIZE);
874 if (vnic->func_default)
876 if (vnic->vlan_strip)
878 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
880 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
887 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
890 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
891 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
892 bp->hwrm_cmd_resp_addr;
894 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
896 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
900 vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
905 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
908 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
909 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
910 bp->hwrm_cmd_resp_addr;
912 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
914 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
916 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
920 vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
925 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
928 struct hwrm_vnic_free_input req = {.req_type = 0 };
929 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
931 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
934 HWRM_PREP(req, VNIC_FREE, -1, resp);
936 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
938 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
942 vnic->fw_vnic_id = INVALID_HW_RING_ID;
946 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
947 struct bnxt_vnic_info *vnic)
950 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
951 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
953 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
955 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
957 req.ring_grp_tbl_addr =
958 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
959 req.hash_key_tbl_addr =
960 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
961 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
963 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
971 * HWRM utility functions
974 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
979 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
980 struct bnxt_tx_queue *txq;
981 struct bnxt_rx_queue *rxq;
982 struct bnxt_cp_ring_info *cpr;
984 if (i >= bp->rx_cp_nr_rings) {
985 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
988 rxq = bp->rx_queues[i];
992 rc = bnxt_hwrm_stat_clear(bp, cpr);
999 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1003 struct bnxt_cp_ring_info *cpr;
1005 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1006 unsigned int idx = i + 1;
1008 if (i >= bp->rx_cp_nr_rings)
1009 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1011 cpr = bp->rx_queues[i]->cp_ring;
1012 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1013 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1021 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1026 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1027 struct bnxt_tx_queue *txq;
1028 struct bnxt_rx_queue *rxq;
1029 struct bnxt_cp_ring_info *cpr;
1030 unsigned int idx = i + 1;
1032 if (i >= bp->rx_cp_nr_rings) {
1033 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1036 rxq = bp->rx_queues[i];
1040 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1048 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1053 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1054 unsigned int idx = i + 1;
1056 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1058 "Attempt to free invalid ring group %d\n",
1063 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1071 static void bnxt_free_cp_ring(struct bnxt *bp,
1072 struct bnxt_cp_ring_info *cpr, unsigned int idx)
1074 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1076 bnxt_hwrm_ring_free(bp, cp_ring,
1077 HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1078 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1079 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1080 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1081 sizeof(*cpr->cp_desc_ring));
1082 cpr->cp_raw_cons = 0;
1085 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1090 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1091 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1092 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1093 struct bnxt_ring *ring = txr->tx_ring_struct;
1094 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1095 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1097 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1098 bnxt_hwrm_ring_free(bp, ring,
1099 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1100 ring->fw_ring_id = INVALID_HW_RING_ID;
1101 memset(txr->tx_desc_ring, 0,
1102 txr->tx_ring_struct->ring_size *
1103 sizeof(*txr->tx_desc_ring));
1104 memset(txr->tx_buf_ring, 0,
1105 txr->tx_ring_struct->ring_size *
1106 sizeof(*txr->tx_buf_ring));
1110 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1111 bnxt_free_cp_ring(bp, cpr, idx);
1114 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1115 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1116 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1117 struct bnxt_ring *ring = rxr->rx_ring_struct;
1118 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1119 unsigned int idx = i + 1;
1121 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1122 bnxt_hwrm_ring_free(bp, ring,
1123 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1124 ring->fw_ring_id = INVALID_HW_RING_ID;
1125 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1126 memset(rxr->rx_desc_ring, 0,
1127 rxr->rx_ring_struct->ring_size *
1128 sizeof(*rxr->rx_desc_ring));
1129 memset(rxr->rx_buf_ring, 0,
1130 rxr->rx_ring_struct->ring_size *
1131 sizeof(*rxr->rx_buf_ring));
1134 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1135 bnxt_free_cp_ring(bp, cpr, idx);
1138 /* Default completion ring */
1140 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1142 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1143 bnxt_free_cp_ring(bp, cpr, 0);
1149 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1154 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1155 unsigned int idx = i + 1;
1157 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1158 bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1161 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1169 void bnxt_free_hwrm_resources(struct bnxt *bp)
1171 /* Release memzone */
1172 rte_free(bp->hwrm_cmd_resp_addr);
1173 bp->hwrm_cmd_resp_addr = NULL;
1174 bp->hwrm_cmd_resp_dma_addr = 0;
1177 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1179 struct rte_pci_device *pdev = bp->pdev;
1180 char type[RTE_MEMZONE_NAMESIZE];
1182 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1183 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1184 bp->max_req_len = HWRM_MAX_REQ_LEN;
1185 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1186 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1187 if (bp->hwrm_cmd_resp_addr == NULL)
1189 bp->hwrm_cmd_resp_dma_addr =
1190 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1191 rte_spinlock_init(&bp->hwrm_lock);
1196 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1198 struct bnxt_filter_info *filter;
1201 STAILQ_FOREACH(filter, &vnic->filter, next) {
1202 rc = bnxt_hwrm_clear_filter(bp, filter);
1209 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1211 struct bnxt_filter_info *filter;
1214 STAILQ_FOREACH(filter, &vnic->filter, next) {
1215 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1222 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1224 struct bnxt_vnic_info *vnic;
1227 if (bp->vnic_info == NULL)
1230 vnic = &bp->vnic_info[0];
1231 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1233 /* VNIC resources */
1234 for (i = 0; i < bp->nr_vnics; i++) {
1235 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1237 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1239 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1240 bnxt_hwrm_vnic_free(bp, vnic);
1242 /* Ring resources */
1243 bnxt_free_all_hwrm_rings(bp);
1244 bnxt_free_all_hwrm_ring_grps(bp);
1245 bnxt_free_all_hwrm_stat_ctxs(bp);
1248 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1250 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1252 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1253 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1255 switch (conf_link_speed) {
1256 case ETH_LINK_SPEED_10M_HD:
1257 case ETH_LINK_SPEED_100M_HD:
1258 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1260 return hw_link_duplex;
1263 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1265 uint16_t eth_link_speed = 0;
1267 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1268 return ETH_LINK_SPEED_AUTONEG;
1270 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1271 case ETH_LINK_SPEED_100M:
1272 case ETH_LINK_SPEED_100M_HD:
1274 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1276 case ETH_LINK_SPEED_1G:
1278 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1280 case ETH_LINK_SPEED_2_5G:
1282 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1284 case ETH_LINK_SPEED_10G:
1286 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1288 case ETH_LINK_SPEED_20G:
1290 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1292 case ETH_LINK_SPEED_25G:
1294 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1296 case ETH_LINK_SPEED_40G:
1298 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1300 case ETH_LINK_SPEED_50G:
1302 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1306 "Unsupported link speed %d; default to AUTO\n",
1310 return eth_link_speed;
1313 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1314 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1315 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1316 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1318 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1322 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1325 if (link_speed & ETH_LINK_SPEED_FIXED) {
1326 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1328 if (one_speed & (one_speed - 1)) {
1330 "Invalid advertised speeds (%u) for port %u\n",
1331 link_speed, port_id);
1334 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1336 "Unsupported advertised speed (%u) for port %u\n",
1337 link_speed, port_id);
1341 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1343 "Unsupported advertised speeds (%u) for port %u\n",
1344 link_speed, port_id);
1352 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1356 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1357 if (bp->link_info.support_speeds)
1358 return bp->link_info.support_speeds;
1359 link_speed = BNXT_SUPPORTED_SPEEDS;
1362 if (link_speed & ETH_LINK_SPEED_100M)
1363 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1364 if (link_speed & ETH_LINK_SPEED_100M_HD)
1365 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1366 if (link_speed & ETH_LINK_SPEED_1G)
1367 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1368 if (link_speed & ETH_LINK_SPEED_2_5G)
1369 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1370 if (link_speed & ETH_LINK_SPEED_10G)
1371 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1372 if (link_speed & ETH_LINK_SPEED_20G)
1373 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1374 if (link_speed & ETH_LINK_SPEED_25G)
1375 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1376 if (link_speed & ETH_LINK_SPEED_40G)
1377 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1378 if (link_speed & ETH_LINK_SPEED_50G)
1379 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1383 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1385 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1387 switch (hw_link_speed) {
1388 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1389 eth_link_speed = ETH_SPEED_NUM_100M;
1391 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1392 eth_link_speed = ETH_SPEED_NUM_1G;
1394 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1395 eth_link_speed = ETH_SPEED_NUM_2_5G;
1397 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1398 eth_link_speed = ETH_SPEED_NUM_10G;
1400 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1401 eth_link_speed = ETH_SPEED_NUM_20G;
1403 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1404 eth_link_speed = ETH_SPEED_NUM_25G;
1406 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1407 eth_link_speed = ETH_SPEED_NUM_40G;
1409 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1410 eth_link_speed = ETH_SPEED_NUM_50G;
1412 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1414 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1418 return eth_link_speed;
1421 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1423 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1425 switch (hw_link_duplex) {
1426 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1427 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1428 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1430 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1431 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1434 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1438 return eth_link_duplex;
1441 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1444 struct bnxt_link_info *link_info = &bp->link_info;
1446 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1449 "Get link config failed with rc %d\n", rc);
1452 if (link_info->link_speed)
1454 bnxt_parse_hw_link_speed(link_info->link_speed);
1456 link->link_speed = ETH_SPEED_NUM_NONE;
1457 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1458 link->link_status = link_info->link_up;
1459 link->link_autoneg = link_info->auto_mode ==
1460 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1461 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1466 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1469 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1470 struct bnxt_link_info link_req;
1473 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1476 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1477 bp->eth_dev->data->port_id);
1481 memset(&link_req, 0, sizeof(link_req));
1482 link_req.link_up = link_up;
1486 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1487 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1489 link_req.phy_flags |=
1490 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1491 link_req.auto_mode =
1492 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1493 link_req.auto_link_speed_mask =
1494 bnxt_parse_eth_link_speed_mask(bp,
1495 dev_conf->link_speeds);
1497 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1498 link_req.link_speed = speed;
1499 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1501 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1502 link_req.auto_pause = bp->link_info.auto_pause;
1503 link_req.force_pause = bp->link_info.force_pause;
1506 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1509 "Set link config failed with rc %d\n", rc);
1517 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1519 struct hwrm_func_qcfg_input req = {0};
1520 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1523 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1524 req.fid = rte_cpu_to_le_16(0xffff);
1526 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1531 struct bnxt_vf_info *vf = &bp->vf;
1533 /* Hard Coded.. 0xfff VLAN ID mask */
1534 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1537 switch (resp->port_partition_type) {
1538 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1539 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1540 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1541 bp->port_partition_type = resp->port_partition_type;
1544 bp->port_partition_type = 0;