4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
47 #include "bnxt_ring.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
53 #define HWRM_CMD_TIMEOUT 2000
56 * HWRM Functions (sent to HWRM)
57 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
58 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
59 * command was failed by the ChiMP.
62 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
66 struct input *req = msg;
67 struct output *resp = bp->hwrm_cmd_resp_addr;
72 /* Write request msg to hwrm channel */
73 for (i = 0; i < msg_len; i += 4) {
74 bar = (uint8_t *)bp->bar0 + i;
75 *(volatile uint32_t *)bar = *data;
79 /* Zero the rest of the request space */
80 for (; i < bp->max_req_len; i += 4) {
81 bar = (uint8_t *)bp->bar0 + i;
82 *(volatile uint32_t *)bar = 0;
85 /* Ring channel doorbell */
86 bar = (uint8_t *)bp->bar0 + 0x100;
87 *(volatile uint32_t *)bar = 1;
89 /* Poll for the valid bit */
90 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
91 /* Sanity check on the resp->resp_len */
93 if (resp->resp_len && resp->resp_len <=
95 /* Last byte of resp contains the valid key */
96 valid = (uint8_t *)resp + resp->resp_len - 1;
97 if (*valid == HWRM_RESP_VALID_KEY)
103 if (i >= HWRM_CMD_TIMEOUT) {
104 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
114 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
118 rte_spinlock_lock(&bp->hwrm_lock);
119 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
120 rte_spinlock_unlock(&bp->hwrm_lock);
124 #define HWRM_PREP(req, type, cr, resp) \
125 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
126 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
127 req.cmpl_ring = rte_cpu_to_le_16(cr); \
128 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
129 req.target_id = rte_cpu_to_le_16(0xffff); \
130 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
132 #define HWRM_CHECK_RESULT \
135 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
137 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
143 if (resp->error_code) { \
144 rc = rte_le_to_cpu_16(resp->error_code); \
145 RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
146 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
154 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
157 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
158 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
160 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
161 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
164 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
171 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
174 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
175 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
178 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
179 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
181 /* FIXME add multicast flag, when multicast adding options is supported
184 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
185 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
186 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
187 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
188 req.mask = rte_cpu_to_le_32(mask);
190 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
197 int bnxt_hwrm_clear_filter(struct bnxt *bp,
198 struct bnxt_filter_info *filter)
201 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
202 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
204 if (filter->fw_l2_filter_id == UINT64_MAX)
207 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
209 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
211 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
215 filter->fw_l2_filter_id = -1;
220 int bnxt_hwrm_set_filter(struct bnxt *bp,
221 struct bnxt_vnic_info *vnic,
222 struct bnxt_filter_info *filter)
225 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
226 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
227 uint32_t enables = 0;
229 if (filter->fw_l2_filter_id != UINT64_MAX)
230 bnxt_hwrm_clear_filter(bp, filter);
232 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
234 req.flags = rte_cpu_to_le_32(filter->flags);
236 enables = filter->enables |
237 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
238 req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
241 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
242 memcpy(req.l2_addr, filter->l2_addr,
245 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
246 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
249 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
250 req.l2_ovlan = filter->l2_ovlan;
252 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
253 req.l2_ovlan_mask = filter->l2_ovlan_mask;
255 req.enables = rte_cpu_to_le_32(enables);
257 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
261 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
266 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
269 struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
270 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
272 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
274 memcpy(req.encap_request, fwd_cmd,
275 sizeof(req.encap_request));
277 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
284 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
287 struct hwrm_func_qcaps_input req = {.req_type = 0 };
288 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
290 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
292 req.fid = rte_cpu_to_le_16(0xffff);
294 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
298 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
300 struct bnxt_pf_info *pf = &bp->pf;
302 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
303 pf->port_id = resp->port_id;
304 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
305 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
306 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
307 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
308 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
309 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
310 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
311 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
312 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
314 struct bnxt_vf_info *vf = &bp->vf;
316 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
317 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
318 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
319 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
320 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
321 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
322 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
323 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
329 int bnxt_hwrm_func_reset(struct bnxt *bp)
332 struct hwrm_func_reset_input req = {.req_type = 0 };
333 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
335 HWRM_PREP(req, FUNC_RESET, -1, resp);
337 req.enables = rte_cpu_to_le_32(0);
339 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
346 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
347 uint32_t *vf_req_fwd)
350 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
351 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
353 if (bp->flags & BNXT_FLAG_REGISTERED)
356 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
358 req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
359 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
360 req.ver_maj = RTE_VER_YEAR;
361 req.ver_min = RTE_VER_MONTH;
362 req.ver_upd = RTE_VER_MINOR;
364 memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
366 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
368 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
372 bp->flags |= BNXT_FLAG_REGISTERED;
377 int bnxt_hwrm_ver_get(struct bnxt *bp)
380 struct hwrm_ver_get_input req = {.req_type = 0 };
381 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
384 uint16_t max_resp_len;
385 char type[RTE_MEMZONE_NAMESIZE];
387 HWRM_PREP(req, VER_GET, -1, resp);
389 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
390 req.hwrm_intf_min = HWRM_VERSION_MINOR;
391 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
394 * Hold the lock since we may be adjusting the response pointers.
396 rte_spinlock_lock(&bp->hwrm_lock);
397 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
401 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
402 resp->hwrm_intf_maj, resp->hwrm_intf_min,
404 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
405 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
406 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
408 my_version = HWRM_VERSION_MAJOR << 16;
409 my_version |= HWRM_VERSION_MINOR << 8;
410 my_version |= HWRM_VERSION_UPDATE;
412 fw_version = resp->hwrm_intf_maj << 16;
413 fw_version |= resp->hwrm_intf_min << 8;
414 fw_version |= resp->hwrm_intf_upd;
416 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
417 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
422 if (my_version != fw_version) {
423 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
424 if (my_version < fw_version) {
426 "Firmware API version is newer than driver.\n");
428 "The driver may be missing features.\n");
431 "Firmware API version is older than driver.\n");
433 "Not all driver features may be functional.\n");
437 if (bp->max_req_len > resp->max_req_win_len) {
438 RTE_LOG(ERR, PMD, "Unsupported request length\n");
441 bp->max_req_len = resp->max_req_win_len;
442 max_resp_len = resp->max_resp_len;
443 if (bp->max_resp_len != max_resp_len) {
444 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
445 bp->pdev->addr.domain, bp->pdev->addr.bus,
446 bp->pdev->addr.devid, bp->pdev->addr.function);
448 rte_free(bp->hwrm_cmd_resp_addr);
450 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
451 if (bp->hwrm_cmd_resp_addr == NULL) {
455 bp->hwrm_cmd_resp_dma_addr =
456 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
457 bp->max_resp_len = max_resp_len;
461 rte_spinlock_unlock(&bp->hwrm_lock);
465 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
468 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
469 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
471 if (!(bp->flags & BNXT_FLAG_REGISTERED))
474 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
477 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
481 bp->flags &= ~BNXT_FLAG_REGISTERED;
486 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
489 struct hwrm_port_phy_cfg_input req = {0};
490 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
491 uint32_t enables = 0;
493 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
496 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
497 if (bp->link_info.auto_mode && conf->link_speed) {
498 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
499 RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
502 req.flags = rte_cpu_to_le_32(conf->phy_flags);
503 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
504 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
506 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
507 * any auto mode, even "none".
509 if (!conf->link_speed) {
510 /* No speeds specified. Enable AutoNeg - all speeds */
512 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
514 /* AutoNeg - Advertise speeds specified. */
515 if (conf->auto_link_speed_mask &&
516 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
518 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
519 req.auto_link_speed_mask =
520 conf->auto_link_speed_mask;
522 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
525 req.auto_duplex = conf->duplex;
526 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
527 req.auto_pause = conf->auto_pause;
528 req.force_pause = conf->force_pause;
529 /* Set force_pause if there is no auto or if there is a force */
530 if (req.auto_pause && !req.force_pause)
531 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
533 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
535 req.enables = rte_cpu_to_le_32(enables);
538 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
539 RTE_LOG(INFO, PMD, "Force Link Down\n");
542 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
549 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
550 struct bnxt_link_info *link_info)
553 struct hwrm_port_phy_qcfg_input req = {0};
554 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
556 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
558 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
562 link_info->phy_link_status = resp->link;
564 (link_info->phy_link_status ==
565 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
566 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
567 link_info->duplex = resp->duplex;
568 link_info->pause = resp->pause;
569 link_info->auto_pause = resp->auto_pause;
570 link_info->force_pause = resp->force_pause;
571 link_info->auto_mode = resp->auto_mode;
572 link_info->phy_type = resp->phy_type;
573 link_info->media_type = resp->media_type;
575 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
576 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
577 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
578 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
579 link_info->phy_ver[0] = resp->phy_maj;
580 link_info->phy_ver[1] = resp->phy_min;
581 link_info->phy_ver[2] = resp->phy_bld;
586 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
589 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
590 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
592 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
594 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
598 #define GET_QUEUE_INFO(x) \
599 bp->cos_queue[x].id = resp->queue_id##x; \
600 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
614 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
615 struct bnxt_ring *ring,
616 uint32_t ring_type, uint32_t map_index,
617 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
620 struct hwrm_ring_alloc_input req = {.req_type = 0 };
621 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
623 HWRM_PREP(req, RING_ALLOC, -1, resp);
625 req.enables = rte_cpu_to_le_32(0);
627 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
628 req.fbo = rte_cpu_to_le_32(0);
629 /* Association of ring index with doorbell index */
630 req.logical_id = rte_cpu_to_le_16(map_index);
633 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
634 req.queue_id = bp->cos_queue[0].id;
636 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
637 req.ring_type = ring_type;
638 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
639 req.length = rte_cpu_to_le_32(ring->ring_size);
640 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
641 if (stats_ctx_id != INVALID_STATS_CTX_ID)
643 rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
644 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
646 case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
647 req.ring_type = ring_type;
649 * TODO: Some HWRM versions crash with
650 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
652 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
653 req.length = rte_cpu_to_le_32(ring->ring_size);
656 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
661 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
663 if (rc || resp->error_code) {
664 if (rc == 0 && resp->error_code)
665 rc = rte_le_to_cpu_16(resp->error_code);
667 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
669 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
671 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
673 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
675 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
677 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
680 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
685 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
689 int bnxt_hwrm_ring_free(struct bnxt *bp,
690 struct bnxt_ring *ring, uint32_t ring_type)
693 struct hwrm_ring_free_input req = {.req_type = 0 };
694 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
696 HWRM_PREP(req, RING_FREE, -1, resp);
698 req.ring_type = ring_type;
699 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
701 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
703 if (rc || resp->error_code) {
704 if (rc == 0 && resp->error_code)
705 rc = rte_le_to_cpu_16(resp->error_code);
708 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
709 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
712 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
713 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
716 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
717 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
721 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
728 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
731 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
732 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
734 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
736 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
737 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
738 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
739 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
741 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
745 bp->grp_info[idx].fw_grp_id =
746 rte_le_to_cpu_16(resp->ring_group_id);
751 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
754 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
755 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
757 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
759 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
761 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
765 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
769 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
772 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
773 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
775 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
777 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
780 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
781 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
783 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
790 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
791 struct bnxt_cp_ring_info *cpr, unsigned int idx)
794 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
795 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
797 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
799 req.update_period_ms = rte_cpu_to_le_32(1000);
801 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
803 rte_cpu_to_le_64(cpr->hw_stats_map);
805 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
809 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
810 //Tx rings don't need grp_info entry. It is a Rx only attribute.
812 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
817 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
818 struct bnxt_cp_ring_info *cpr, unsigned int idx)
821 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
822 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
824 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
826 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
827 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
829 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
833 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
834 //Tx rings don't have a grp_info entry. It is a Rx only attribute.
836 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
841 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
844 struct hwrm_vnic_alloc_input req = { 0 };
845 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
847 /* map ring groups to this vnic */
848 for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++) {
849 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
851 "Not enough ring groups avail:%x req:%x\n", j,
852 (vnic->end_grp_id - vnic->start_grp_id) + 1);
855 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
858 vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
859 vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
861 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
863 if (vnic->func_default)
865 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
866 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
870 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
871 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
875 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
878 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
879 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
881 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
882 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
886 HWRM_PREP(req, VNIC_CFG, -1, resp);
888 /* Only RSS support for now TBD: COS & LB */
890 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
891 HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
892 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
893 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
895 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
896 req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
897 req.cos_rule = rte_cpu_to_le_16(0xffff);
898 req.lb_rule = rte_cpu_to_le_16(0xffff);
899 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
900 ETHER_CRC_LEN + VLAN_TAG_SIZE);
901 if (vnic->func_default)
903 if (vnic->vlan_strip)
905 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
907 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
914 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
917 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
918 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
919 bp->hwrm_cmd_resp_addr;
921 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
923 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
927 vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
928 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
933 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
936 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
937 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
938 bp->hwrm_cmd_resp_addr;
940 if (vnic->fw_rss_cos_lb_ctx == 0xffff) {
942 "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
946 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
948 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
950 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
954 vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
959 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
962 struct hwrm_vnic_free_input req = {.req_type = 0 };
963 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
965 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
966 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
970 HWRM_PREP(req, VNIC_FREE, -1, resp);
972 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
974 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
978 vnic->fw_vnic_id = INVALID_HW_RING_ID;
982 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
983 struct bnxt_vnic_info *vnic)
986 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
987 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
989 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
991 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
993 req.ring_grp_tbl_addr =
994 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
995 req.hash_key_tbl_addr =
996 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
997 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
999 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1007 * HWRM utility functions
1010 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1015 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1016 struct bnxt_tx_queue *txq;
1017 struct bnxt_rx_queue *rxq;
1018 struct bnxt_cp_ring_info *cpr;
1020 if (i >= bp->rx_cp_nr_rings) {
1021 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1024 rxq = bp->rx_queues[i];
1028 rc = bnxt_hwrm_stat_clear(bp, cpr);
1035 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1039 struct bnxt_cp_ring_info *cpr;
1041 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1042 unsigned int idx = i + 1;
1044 if (i >= bp->rx_cp_nr_rings) {
1045 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1046 //Tx rings don't have a grp_info entry.
1049 cpr = bp->rx_queues[i]->cp_ring;
1051 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1052 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1060 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1065 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1066 struct bnxt_tx_queue *txq;
1067 struct bnxt_rx_queue *rxq;
1068 struct bnxt_cp_ring_info *cpr;
1069 unsigned int idx = i + 1;
1071 if (i >= bp->rx_cp_nr_rings) {
1072 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1074 //Tx rings don't need grp_info entry.
1077 rxq = bp->rx_queues[i];
1081 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1089 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1094 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1095 unsigned int idx = i + 1;
1097 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1099 "Attempt to free invalid ring group %d\n",
1104 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1112 static void bnxt_free_cp_ring(struct bnxt *bp,
1113 struct bnxt_cp_ring_info *cpr)
1115 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1117 bnxt_hwrm_ring_free(bp, cp_ring,
1118 HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1119 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1120 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1121 sizeof(*cpr->cp_desc_ring));
1122 cpr->cp_raw_cons = 0;
1125 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1130 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1131 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1132 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1133 struct bnxt_ring *ring = txr->tx_ring_struct;
1134 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1136 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1137 bnxt_hwrm_ring_free(bp, ring,
1138 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1139 ring->fw_ring_id = INVALID_HW_RING_ID;
1140 memset(txr->tx_desc_ring, 0,
1141 txr->tx_ring_struct->ring_size *
1142 sizeof(*txr->tx_desc_ring));
1143 memset(txr->tx_buf_ring, 0,
1144 txr->tx_ring_struct->ring_size *
1145 sizeof(*txr->tx_buf_ring));
1149 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1150 bnxt_free_cp_ring(bp, cpr);
1153 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1154 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1155 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1156 struct bnxt_ring *ring = rxr->rx_ring_struct;
1157 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1158 unsigned int idx = i + 1;
1160 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1161 bnxt_hwrm_ring_free(bp, ring,
1162 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1163 ring->fw_ring_id = INVALID_HW_RING_ID;
1164 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1165 memset(rxr->rx_desc_ring, 0,
1166 rxr->rx_ring_struct->ring_size *
1167 sizeof(*rxr->rx_desc_ring));
1168 memset(rxr->rx_buf_ring, 0,
1169 rxr->rx_ring_struct->ring_size *
1170 sizeof(*rxr->rx_buf_ring));
1173 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1174 bnxt_free_cp_ring(bp, cpr);
1175 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1178 /* Default completion ring */
1180 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1182 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1183 bnxt_free_cp_ring(bp, cpr);
1184 bp->grp_info[0].cp_fw_ring_id = INVALID_HW_RING_ID;
1190 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1195 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1196 unsigned int idx = i + 1;
1198 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1199 bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1202 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1210 void bnxt_free_hwrm_resources(struct bnxt *bp)
1212 /* Release memzone */
1213 rte_free(bp->hwrm_cmd_resp_addr);
1214 bp->hwrm_cmd_resp_addr = NULL;
1215 bp->hwrm_cmd_resp_dma_addr = 0;
1218 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1220 struct rte_pci_device *pdev = bp->pdev;
1221 char type[RTE_MEMZONE_NAMESIZE];
1223 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1224 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1225 bp->max_req_len = HWRM_MAX_REQ_LEN;
1226 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1227 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1228 if (bp->hwrm_cmd_resp_addr == NULL)
1230 bp->hwrm_cmd_resp_dma_addr =
1231 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1232 rte_spinlock_init(&bp->hwrm_lock);
1237 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1239 struct bnxt_filter_info *filter;
1242 STAILQ_FOREACH(filter, &vnic->filter, next) {
1243 rc = bnxt_hwrm_clear_filter(bp, filter);
1250 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1252 struct bnxt_filter_info *filter;
1255 STAILQ_FOREACH(filter, &vnic->filter, next) {
1256 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1263 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1265 struct bnxt_vnic_info *vnic;
1268 if (bp->vnic_info == NULL)
1271 vnic = &bp->vnic_info[0];
1272 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1274 /* VNIC resources */
1275 for (i = 0; i < bp->nr_vnics; i++) {
1276 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1278 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1280 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1281 bnxt_hwrm_vnic_free(bp, vnic);
1283 rte_free(vnic->fw_grp_ids);
1285 /* Ring resources */
1286 bnxt_free_all_hwrm_rings(bp);
1287 bnxt_free_all_hwrm_ring_grps(bp);
1288 bnxt_free_all_hwrm_stat_ctxs(bp);
1291 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1293 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1295 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1296 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1298 switch (conf_link_speed) {
1299 case ETH_LINK_SPEED_10M_HD:
1300 case ETH_LINK_SPEED_100M_HD:
1301 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1303 return hw_link_duplex;
1306 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1308 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1311 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1313 uint16_t eth_link_speed = 0;
1315 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1316 return ETH_LINK_SPEED_AUTONEG;
1318 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1319 case ETH_LINK_SPEED_100M:
1320 case ETH_LINK_SPEED_100M_HD:
1322 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1324 case ETH_LINK_SPEED_1G:
1326 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1328 case ETH_LINK_SPEED_2_5G:
1330 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1332 case ETH_LINK_SPEED_10G:
1334 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1336 case ETH_LINK_SPEED_20G:
1338 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1340 case ETH_LINK_SPEED_25G:
1342 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1344 case ETH_LINK_SPEED_40G:
1346 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1348 case ETH_LINK_SPEED_50G:
1350 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1354 "Unsupported link speed %d; default to AUTO\n",
1358 return eth_link_speed;
1361 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1362 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1363 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1364 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1366 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1370 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1373 if (link_speed & ETH_LINK_SPEED_FIXED) {
1374 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1376 if (one_speed & (one_speed - 1)) {
1378 "Invalid advertised speeds (%u) for port %u\n",
1379 link_speed, port_id);
1382 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1384 "Unsupported advertised speed (%u) for port %u\n",
1385 link_speed, port_id);
1389 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1391 "Unsupported advertised speeds (%u) for port %u\n",
1392 link_speed, port_id);
1400 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1404 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1405 if (bp->link_info.support_speeds)
1406 return bp->link_info.support_speeds;
1407 link_speed = BNXT_SUPPORTED_SPEEDS;
1410 if (link_speed & ETH_LINK_SPEED_100M)
1411 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1412 if (link_speed & ETH_LINK_SPEED_100M_HD)
1413 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1414 if (link_speed & ETH_LINK_SPEED_1G)
1415 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1416 if (link_speed & ETH_LINK_SPEED_2_5G)
1417 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1418 if (link_speed & ETH_LINK_SPEED_10G)
1419 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1420 if (link_speed & ETH_LINK_SPEED_20G)
1421 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1422 if (link_speed & ETH_LINK_SPEED_25G)
1423 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1424 if (link_speed & ETH_LINK_SPEED_40G)
1425 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1426 if (link_speed & ETH_LINK_SPEED_50G)
1427 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1431 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1433 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1435 switch (hw_link_speed) {
1436 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1437 eth_link_speed = ETH_SPEED_NUM_100M;
1439 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1440 eth_link_speed = ETH_SPEED_NUM_1G;
1442 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1443 eth_link_speed = ETH_SPEED_NUM_2_5G;
1445 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1446 eth_link_speed = ETH_SPEED_NUM_10G;
1448 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1449 eth_link_speed = ETH_SPEED_NUM_20G;
1451 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1452 eth_link_speed = ETH_SPEED_NUM_25G;
1454 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1455 eth_link_speed = ETH_SPEED_NUM_40G;
1457 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1458 eth_link_speed = ETH_SPEED_NUM_50G;
1460 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1462 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1466 return eth_link_speed;
1469 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1471 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1473 switch (hw_link_duplex) {
1474 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1475 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1476 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1478 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1479 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1482 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1486 return eth_link_duplex;
1489 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1492 struct bnxt_link_info *link_info = &bp->link_info;
1494 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1497 "Get link config failed with rc %d\n", rc);
1500 if (link_info->link_speed)
1502 bnxt_parse_hw_link_speed(link_info->link_speed);
1504 link->link_speed = ETH_SPEED_NUM_NONE;
1505 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1506 link->link_status = link_info->link_up;
1507 link->link_autoneg = link_info->auto_mode ==
1508 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1509 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1514 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1517 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1518 struct bnxt_link_info link_req;
1519 uint16_t speed, autoneg;
1521 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1524 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1525 bp->eth_dev->data->port_id);
1529 memset(&link_req, 0, sizeof(link_req));
1530 link_req.link_up = link_up;
1534 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
1535 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1536 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1537 /* Autoneg can be done only when the FW allows */
1538 if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
1539 bp->link_info.force_link_speed)) {
1540 link_req.phy_flags |=
1541 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1542 link_req.auto_link_speed_mask =
1543 bnxt_parse_eth_link_speed_mask(bp,
1544 dev_conf->link_speeds);
1546 if (bp->link_info.phy_type ==
1547 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
1548 bp->link_info.phy_type ==
1549 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
1550 bp->link_info.media_type ==
1551 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
1552 RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
1556 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1557 /* If user wants a particular speed try that first. */
1559 link_req.link_speed = speed;
1560 else if (bp->link_info.force_link_speed)
1561 link_req.link_speed = bp->link_info.force_link_speed;
1563 link_req.link_speed = bp->link_info.auto_link_speed;
1565 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1566 link_req.auto_pause = bp->link_info.auto_pause;
1567 link_req.force_pause = bp->link_info.force_pause;
1570 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1573 "Set link config failed with rc %d\n", rc);
1581 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1583 struct hwrm_func_qcfg_input req = {0};
1584 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1587 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1588 req.fid = rte_cpu_to_le_16(0xffff);
1590 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1595 struct bnxt_vf_info *vf = &bp->vf;
1597 /* Hard Coded.. 0xfff VLAN ID mask */
1598 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1601 switch (resp->port_partition_type) {
1602 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1603 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1604 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1605 bp->port_partition_type = resp->port_partition_type;
1608 bp->port_partition_type = 0;