4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
47 #include "bnxt_ring.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
53 #define HWRM_CMD_TIMEOUT 2000
56 * HWRM Functions (sent to HWRM)
57 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
58 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
59 * command was failed by the ChiMP.
62 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
66 struct input *req = msg;
67 struct output *resp = bp->hwrm_cmd_resp_addr;
72 /* Write request msg to hwrm channel */
73 for (i = 0; i < msg_len; i += 4) {
74 bar = (uint8_t *)bp->bar0 + i;
75 *(volatile uint32_t *)bar = *data;
79 /* Zero the rest of the request space */
80 for (; i < bp->max_req_len; i += 4) {
81 bar = (uint8_t *)bp->bar0 + i;
82 *(volatile uint32_t *)bar = 0;
85 /* Ring channel doorbell */
86 bar = (uint8_t *)bp->bar0 + 0x100;
87 *(volatile uint32_t *)bar = 1;
89 /* Poll for the valid bit */
90 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
91 /* Sanity check on the resp->resp_len */
93 if (resp->resp_len && resp->resp_len <=
95 /* Last byte of resp contains the valid key */
96 valid = (uint8_t *)resp + resp->resp_len - 1;
97 if (*valid == HWRM_RESP_VALID_KEY)
103 if (i >= HWRM_CMD_TIMEOUT) {
104 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
114 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
118 rte_spinlock_lock(&bp->hwrm_lock);
119 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
120 rte_spinlock_unlock(&bp->hwrm_lock);
124 #define HWRM_PREP(req, type, cr, resp) \
125 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
126 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
127 req.cmpl_ring = rte_cpu_to_le_16(cr); \
128 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
129 req.target_id = rte_cpu_to_le_16(0xffff); \
130 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
132 #define HWRM_CHECK_RESULT \
135 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
139 if (resp->error_code) { \
140 rc = rte_le_to_cpu_16(resp->error_code); \
141 RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
146 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
149 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
150 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
152 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
153 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
156 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
163 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
166 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
167 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
170 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
171 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
173 /* FIXME add multicast flag, when multicast adding options is supported
176 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
177 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
178 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
179 mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
180 req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST |
183 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
190 int bnxt_hwrm_clear_filter(struct bnxt *bp,
191 struct bnxt_filter_info *filter)
194 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
195 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
197 if (filter->fw_l2_filter_id == UINT64_MAX)
200 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
202 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
204 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
208 filter->fw_l2_filter_id = -1;
213 int bnxt_hwrm_set_filter(struct bnxt *bp,
214 struct bnxt_vnic_info *vnic,
215 struct bnxt_filter_info *filter)
218 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
219 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
220 uint32_t enables = 0;
222 if (filter->fw_l2_filter_id != UINT64_MAX)
223 bnxt_hwrm_clear_filter(bp, filter);
225 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
227 req.flags = rte_cpu_to_le_32(filter->flags);
229 enables = filter->enables |
230 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
231 req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
234 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
235 memcpy(req.l2_addr, filter->l2_addr,
238 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
239 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
242 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
243 req.l2_ovlan = filter->l2_ovlan;
245 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
246 req.l2_ovlan_mask = filter->l2_ovlan_mask;
248 req.enables = rte_cpu_to_le_32(enables);
250 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
254 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
259 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
262 struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
263 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
265 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
267 memcpy(req.encap_request, fwd_cmd,
268 sizeof(req.encap_request));
270 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
277 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
280 struct hwrm_func_qcaps_input req = {.req_type = 0 };
281 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
283 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
285 req.fid = rte_cpu_to_le_16(0xffff);
287 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
291 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
293 struct bnxt_pf_info *pf = &bp->pf;
295 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
296 pf->port_id = resp->port_id;
297 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
298 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
299 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
300 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
301 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
302 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
303 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
304 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
305 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
307 struct bnxt_vf_info *vf = &bp->vf;
309 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
310 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
311 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
312 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
313 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
314 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
315 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
316 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
322 int bnxt_hwrm_func_reset(struct bnxt *bp)
325 struct hwrm_func_reset_input req = {.req_type = 0 };
326 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
328 HWRM_PREP(req, FUNC_RESET, -1, resp);
330 req.enables = rte_cpu_to_le_32(0);
332 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
339 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
340 uint32_t *vf_req_fwd)
343 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
344 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
346 if (bp->flags & BNXT_FLAG_REGISTERED)
349 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
351 req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
352 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
353 req.ver_maj = RTE_VER_YEAR;
354 req.ver_min = RTE_VER_MONTH;
355 req.ver_upd = RTE_VER_MINOR;
357 memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
359 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
361 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
365 bp->flags |= BNXT_FLAG_REGISTERED;
370 int bnxt_hwrm_ver_get(struct bnxt *bp)
373 struct hwrm_ver_get_input req = {.req_type = 0 };
374 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
377 uint16_t max_resp_len;
378 char type[RTE_MEMZONE_NAMESIZE];
380 HWRM_PREP(req, VER_GET, -1, resp);
382 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
383 req.hwrm_intf_min = HWRM_VERSION_MINOR;
384 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
387 * Hold the lock since we may be adjusting the response pointers.
389 rte_spinlock_lock(&bp->hwrm_lock);
390 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
394 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
395 resp->hwrm_intf_maj, resp->hwrm_intf_min,
397 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
398 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
399 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
401 my_version = HWRM_VERSION_MAJOR << 16;
402 my_version |= HWRM_VERSION_MINOR << 8;
403 my_version |= HWRM_VERSION_UPDATE;
405 fw_version = resp->hwrm_intf_maj << 16;
406 fw_version |= resp->hwrm_intf_min << 8;
407 fw_version |= resp->hwrm_intf_upd;
409 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
410 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
415 if (my_version != fw_version) {
416 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
417 if (my_version < fw_version) {
419 "Firmware API version is newer than driver.\n");
421 "The driver may be missing features.\n");
424 "Firmware API version is older than driver.\n");
426 "Not all driver features may be functional.\n");
430 if (bp->max_req_len > resp->max_req_win_len) {
431 RTE_LOG(ERR, PMD, "Unsupported request length\n");
434 bp->max_req_len = resp->max_req_win_len;
435 max_resp_len = resp->max_resp_len;
436 if (bp->max_resp_len != max_resp_len) {
437 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
438 bp->pdev->addr.domain, bp->pdev->addr.bus,
439 bp->pdev->addr.devid, bp->pdev->addr.function);
441 rte_free(bp->hwrm_cmd_resp_addr);
443 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
444 if (bp->hwrm_cmd_resp_addr == NULL) {
448 bp->hwrm_cmd_resp_dma_addr =
449 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
450 bp->max_resp_len = max_resp_len;
454 rte_spinlock_unlock(&bp->hwrm_lock);
458 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
461 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
462 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
464 if (!(bp->flags & BNXT_FLAG_REGISTERED))
467 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
470 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
474 bp->flags &= ~BNXT_FLAG_REGISTERED;
479 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
482 struct hwrm_port_phy_cfg_input req = {0};
483 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
484 uint32_t enables = 0;
485 uint32_t link_speed_mask =
486 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
488 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
491 req.flags = rte_cpu_to_le_32(conf->phy_flags);
492 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
494 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
495 * any auto mode, even "none".
497 if (!conf->link_speed) {
498 req.auto_mode = conf->auto_mode;
499 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
500 if (conf->auto_mode ==
501 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
502 req.auto_link_speed_mask =
503 conf->auto_link_speed_mask;
504 enables |= link_speed_mask;
506 if (bp->link_info.auto_link_speed) {
507 req.auto_link_speed =
508 bp->link_info.auto_link_speed;
510 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
513 req.auto_duplex = conf->duplex;
514 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
515 req.auto_pause = conf->auto_pause;
516 req.force_pause = conf->force_pause;
517 /* Set force_pause if there is no auto or if there is a force */
518 if (req.auto_pause && !req.force_pause)
519 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
521 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
523 req.enables = rte_cpu_to_le_32(enables);
526 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
527 RTE_LOG(INFO, PMD, "Force Link Down\n");
530 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
537 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
538 struct bnxt_link_info *link_info)
541 struct hwrm_port_phy_qcfg_input req = {0};
542 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
544 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
546 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
550 link_info->phy_link_status = resp->link;
552 (link_info->phy_link_status ==
553 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
554 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
555 link_info->duplex = resp->duplex;
556 link_info->pause = resp->pause;
557 link_info->auto_pause = resp->auto_pause;
558 link_info->force_pause = resp->force_pause;
559 link_info->auto_mode = resp->auto_mode;
561 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
562 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
563 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
564 link_info->phy_ver[0] = resp->phy_maj;
565 link_info->phy_ver[1] = resp->phy_min;
566 link_info->phy_ver[2] = resp->phy_bld;
571 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
574 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
575 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
577 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
579 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
583 #define GET_QUEUE_INFO(x) \
584 bp->cos_queue[x].id = resp->queue_id##x; \
585 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
599 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
600 struct bnxt_ring *ring,
601 uint32_t ring_type, uint32_t map_index,
602 uint32_t stats_ctx_id)
605 struct hwrm_ring_alloc_input req = {.req_type = 0 };
606 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
608 HWRM_PREP(req, RING_ALLOC, -1, resp);
610 req.enables = rte_cpu_to_le_32(0);
612 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
613 req.fbo = rte_cpu_to_le_32(0);
614 /* Association of ring index with doorbell index */
615 req.logical_id = rte_cpu_to_le_16(map_index);
618 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
619 req.queue_id = bp->cos_queue[0].id;
621 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
622 req.ring_type = ring_type;
624 rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id);
625 req.length = rte_cpu_to_le_32(ring->ring_size);
626 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
627 req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
628 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
630 case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
631 req.ring_type = ring_type;
633 * TODO: Some HWRM versions crash with
634 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
636 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
637 req.length = rte_cpu_to_le_32(ring->ring_size);
640 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
645 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
647 if (rc || resp->error_code) {
648 if (rc == 0 && resp->error_code)
649 rc = rte_le_to_cpu_16(resp->error_code);
651 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
653 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
655 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
657 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
659 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
661 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
664 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
669 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
673 int bnxt_hwrm_ring_free(struct bnxt *bp,
674 struct bnxt_ring *ring, uint32_t ring_type)
677 struct hwrm_ring_free_input req = {.req_type = 0 };
678 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
680 HWRM_PREP(req, RING_FREE, -1, resp);
682 req.ring_type = ring_type;
683 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
685 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
687 if (rc || resp->error_code) {
688 if (rc == 0 && resp->error_code)
689 rc = rte_le_to_cpu_16(resp->error_code);
692 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
693 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
696 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
697 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
700 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
701 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
705 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
712 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
715 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
716 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
718 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
720 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
721 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
722 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
723 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
725 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
729 bp->grp_info[idx].fw_grp_id =
730 rte_le_to_cpu_16(resp->ring_group_id);
735 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
738 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
739 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
741 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
743 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
745 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
749 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
753 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
756 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
757 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
759 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
761 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
764 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
765 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
767 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
774 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
775 struct bnxt_cp_ring_info *cpr, unsigned int idx)
778 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
779 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
781 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
783 req.update_period_ms = rte_cpu_to_le_32(1000);
785 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
787 rte_cpu_to_le_64(cpr->hw_stats_map);
789 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
793 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
794 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
799 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
800 struct bnxt_cp_ring_info *cpr, unsigned int idx)
803 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
804 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
806 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
808 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
809 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
811 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
815 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
816 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
821 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
824 struct hwrm_vnic_alloc_input req = { 0 };
825 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
827 /* map ring groups to this vnic */
828 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) {
829 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
831 "Not enough ring groups avail:%x req:%x\n", j,
832 (vnic->end_grp_id - vnic->start_grp_id) + 1);
835 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
838 vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
839 vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
841 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
843 if (vnic->func_default)
844 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
845 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
849 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
850 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
854 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
857 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
858 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
860 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
861 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
865 HWRM_PREP(req, VNIC_CFG, -1, resp);
867 /* Only RSS support for now TBD: COS & LB */
869 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
870 HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
871 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
872 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
874 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
875 req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
876 req.cos_rule = rte_cpu_to_le_16(0xffff);
877 req.lb_rule = rte_cpu_to_le_16(0xffff);
878 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
879 ETHER_CRC_LEN + VLAN_TAG_SIZE);
880 if (vnic->func_default)
882 if (vnic->vlan_strip)
884 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
886 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
893 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
896 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
897 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
898 bp->hwrm_cmd_resp_addr;
900 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
902 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
906 vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
907 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
912 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
915 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
916 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
917 bp->hwrm_cmd_resp_addr;
919 if (vnic->fw_rss_cos_lb_ctx == 0xffff) {
921 "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
925 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
927 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
929 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
933 vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
938 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
941 struct hwrm_vnic_free_input req = {.req_type = 0 };
942 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
944 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
945 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
949 HWRM_PREP(req, VNIC_FREE, -1, resp);
951 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
953 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
957 vnic->fw_vnic_id = INVALID_HW_RING_ID;
961 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
962 struct bnxt_vnic_info *vnic)
965 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
966 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
968 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
970 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
972 req.ring_grp_tbl_addr =
973 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
974 req.hash_key_tbl_addr =
975 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
976 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
978 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
986 * HWRM utility functions
989 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
994 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
995 struct bnxt_tx_queue *txq;
996 struct bnxt_rx_queue *rxq;
997 struct bnxt_cp_ring_info *cpr;
999 if (i >= bp->rx_cp_nr_rings) {
1000 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1003 rxq = bp->rx_queues[i];
1007 rc = bnxt_hwrm_stat_clear(bp, cpr);
1014 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1018 struct bnxt_cp_ring_info *cpr;
1020 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1021 unsigned int idx = i + 1;
1023 if (i >= bp->rx_cp_nr_rings)
1024 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1026 cpr = bp->rx_queues[i]->cp_ring;
1027 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1028 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1036 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1041 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1042 struct bnxt_tx_queue *txq;
1043 struct bnxt_rx_queue *rxq;
1044 struct bnxt_cp_ring_info *cpr;
1045 unsigned int idx = i + 1;
1047 if (i >= bp->rx_cp_nr_rings) {
1048 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1051 rxq = bp->rx_queues[i];
1055 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1063 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1068 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1069 unsigned int idx = i + 1;
1071 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1073 "Attempt to free invalid ring group %d\n",
1078 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1086 static void bnxt_free_cp_ring(struct bnxt *bp,
1087 struct bnxt_cp_ring_info *cpr, unsigned int idx)
1089 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1091 bnxt_hwrm_ring_free(bp, cp_ring,
1092 HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1093 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1094 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1095 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1096 sizeof(*cpr->cp_desc_ring));
1097 cpr->cp_raw_cons = 0;
1100 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1105 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1106 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1107 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1108 struct bnxt_ring *ring = txr->tx_ring_struct;
1109 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1110 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1112 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1113 bnxt_hwrm_ring_free(bp, ring,
1114 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1115 ring->fw_ring_id = INVALID_HW_RING_ID;
1116 memset(txr->tx_desc_ring, 0,
1117 txr->tx_ring_struct->ring_size *
1118 sizeof(*txr->tx_desc_ring));
1119 memset(txr->tx_buf_ring, 0,
1120 txr->tx_ring_struct->ring_size *
1121 sizeof(*txr->tx_buf_ring));
1125 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1126 bnxt_free_cp_ring(bp, cpr, idx);
1129 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1130 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1131 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1132 struct bnxt_ring *ring = rxr->rx_ring_struct;
1133 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1134 unsigned int idx = i + 1;
1136 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1137 bnxt_hwrm_ring_free(bp, ring,
1138 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1139 ring->fw_ring_id = INVALID_HW_RING_ID;
1140 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1141 memset(rxr->rx_desc_ring, 0,
1142 rxr->rx_ring_struct->ring_size *
1143 sizeof(*rxr->rx_desc_ring));
1144 memset(rxr->rx_buf_ring, 0,
1145 rxr->rx_ring_struct->ring_size *
1146 sizeof(*rxr->rx_buf_ring));
1149 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1150 bnxt_free_cp_ring(bp, cpr, idx);
1153 /* Default completion ring */
1155 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1157 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1158 bnxt_free_cp_ring(bp, cpr, 0);
1164 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1169 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1170 unsigned int idx = i + 1;
1172 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1173 bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1176 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1184 void bnxt_free_hwrm_resources(struct bnxt *bp)
1186 /* Release memzone */
1187 rte_free(bp->hwrm_cmd_resp_addr);
1188 bp->hwrm_cmd_resp_addr = NULL;
1189 bp->hwrm_cmd_resp_dma_addr = 0;
1192 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1194 struct rte_pci_device *pdev = bp->pdev;
1195 char type[RTE_MEMZONE_NAMESIZE];
1197 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1198 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1199 bp->max_req_len = HWRM_MAX_REQ_LEN;
1200 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1201 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1202 if (bp->hwrm_cmd_resp_addr == NULL)
1204 bp->hwrm_cmd_resp_dma_addr =
1205 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1206 rte_spinlock_init(&bp->hwrm_lock);
1211 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1213 struct bnxt_filter_info *filter;
1216 STAILQ_FOREACH(filter, &vnic->filter, next) {
1217 rc = bnxt_hwrm_clear_filter(bp, filter);
1224 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1226 struct bnxt_filter_info *filter;
1229 STAILQ_FOREACH(filter, &vnic->filter, next) {
1230 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1237 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1239 struct bnxt_vnic_info *vnic;
1242 if (bp->vnic_info == NULL)
1245 vnic = &bp->vnic_info[0];
1246 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1248 /* VNIC resources */
1249 for (i = 0; i < bp->nr_vnics; i++) {
1250 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1252 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1254 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1255 bnxt_hwrm_vnic_free(bp, vnic);
1257 /* Ring resources */
1258 bnxt_free_all_hwrm_rings(bp);
1259 bnxt_free_all_hwrm_ring_grps(bp);
1260 bnxt_free_all_hwrm_stat_ctxs(bp);
1263 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1265 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1267 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1268 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1270 switch (conf_link_speed) {
1271 case ETH_LINK_SPEED_10M_HD:
1272 case ETH_LINK_SPEED_100M_HD:
1273 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1275 return hw_link_duplex;
1278 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1280 uint16_t eth_link_speed = 0;
1282 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1283 return ETH_LINK_SPEED_AUTONEG;
1285 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1286 case ETH_LINK_SPEED_100M:
1287 case ETH_LINK_SPEED_100M_HD:
1289 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1291 case ETH_LINK_SPEED_1G:
1293 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1295 case ETH_LINK_SPEED_2_5G:
1297 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1299 case ETH_LINK_SPEED_10G:
1301 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1303 case ETH_LINK_SPEED_20G:
1305 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1307 case ETH_LINK_SPEED_25G:
1309 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1311 case ETH_LINK_SPEED_40G:
1313 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1315 case ETH_LINK_SPEED_50G:
1317 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1321 "Unsupported link speed %d; default to AUTO\n",
1325 return eth_link_speed;
1328 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1329 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1330 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1331 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1333 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1337 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1340 if (link_speed & ETH_LINK_SPEED_FIXED) {
1341 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1343 if (one_speed & (one_speed - 1)) {
1345 "Invalid advertised speeds (%u) for port %u\n",
1346 link_speed, port_id);
1349 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1351 "Unsupported advertised speed (%u) for port %u\n",
1352 link_speed, port_id);
1356 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1358 "Unsupported advertised speeds (%u) for port %u\n",
1359 link_speed, port_id);
1367 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1371 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1372 if (bp->link_info.support_speeds)
1373 return bp->link_info.support_speeds;
1374 link_speed = BNXT_SUPPORTED_SPEEDS;
1377 if (link_speed & ETH_LINK_SPEED_100M)
1378 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1379 if (link_speed & ETH_LINK_SPEED_100M_HD)
1380 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1381 if (link_speed & ETH_LINK_SPEED_1G)
1382 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1383 if (link_speed & ETH_LINK_SPEED_2_5G)
1384 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1385 if (link_speed & ETH_LINK_SPEED_10G)
1386 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1387 if (link_speed & ETH_LINK_SPEED_20G)
1388 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1389 if (link_speed & ETH_LINK_SPEED_25G)
1390 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1391 if (link_speed & ETH_LINK_SPEED_40G)
1392 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1393 if (link_speed & ETH_LINK_SPEED_50G)
1394 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1398 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1400 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1402 switch (hw_link_speed) {
1403 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1404 eth_link_speed = ETH_SPEED_NUM_100M;
1406 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1407 eth_link_speed = ETH_SPEED_NUM_1G;
1409 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1410 eth_link_speed = ETH_SPEED_NUM_2_5G;
1412 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1413 eth_link_speed = ETH_SPEED_NUM_10G;
1415 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1416 eth_link_speed = ETH_SPEED_NUM_20G;
1418 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1419 eth_link_speed = ETH_SPEED_NUM_25G;
1421 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1422 eth_link_speed = ETH_SPEED_NUM_40G;
1424 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1425 eth_link_speed = ETH_SPEED_NUM_50G;
1427 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1429 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1433 return eth_link_speed;
1436 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1438 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1440 switch (hw_link_duplex) {
1441 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1442 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1443 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1445 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1446 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1449 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1453 return eth_link_duplex;
1456 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1459 struct bnxt_link_info *link_info = &bp->link_info;
1461 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1464 "Get link config failed with rc %d\n", rc);
1467 if (link_info->link_speed)
1469 bnxt_parse_hw_link_speed(link_info->link_speed);
1471 link->link_speed = ETH_SPEED_NUM_NONE;
1472 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1473 link->link_status = link_info->link_up;
1474 link->link_autoneg = link_info->auto_mode ==
1475 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1476 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1481 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1484 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1485 struct bnxt_link_info link_req;
1488 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1491 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1492 bp->eth_dev->data->port_id);
1496 memset(&link_req, 0, sizeof(link_req));
1497 link_req.link_up = link_up;
1501 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1502 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1504 link_req.phy_flags |=
1505 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1506 link_req.auto_mode =
1507 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
1508 link_req.auto_link_speed_mask =
1509 bnxt_parse_eth_link_speed_mask(bp,
1510 dev_conf->link_speeds);
1512 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1513 link_req.link_speed = speed;
1514 RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
1516 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1517 link_req.auto_pause = bp->link_info.auto_pause;
1518 link_req.force_pause = bp->link_info.force_pause;
1521 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1524 "Set link config failed with rc %d\n", rc);
1532 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1534 struct hwrm_func_qcfg_input req = {0};
1535 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1538 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1539 req.fid = rte_cpu_to_le_16(0xffff);
1541 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1546 struct bnxt_vf_info *vf = &bp->vf;
1548 /* Hard Coded.. 0xfff VLAN ID mask */
1549 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1552 switch (resp->port_partition_type) {
1553 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1554 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1555 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1556 bp->port_partition_type = resp->port_partition_type;
1559 bp->port_partition_type = 0;