4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <rte_byteorder.h>
35 #include <rte_common.h>
36 #include <rte_cycles.h>
37 #include <rte_malloc.h>
38 #include <rte_memzone.h>
39 #include <rte_version.h>
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
47 #include "bnxt_ring.h"
50 #include "bnxt_vnic.h"
51 #include "hsi_struct_def_dpdk.h"
53 #define HWRM_CMD_TIMEOUT 6000000
56 * HWRM Functions (sent to HWRM)
57 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
58 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
59 * command was failed by the ChiMP.
62 static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg,
66 struct input *req = msg;
67 struct output *resp = bp->hwrm_cmd_resp_addr;
72 /* Write request msg to hwrm channel */
73 for (i = 0; i < msg_len; i += 4) {
74 bar = (uint8_t *)bp->bar0 + i;
75 *(volatile uint32_t *)bar = *data;
79 /* Zero the rest of the request space */
80 for (; i < bp->max_req_len; i += 4) {
81 bar = (uint8_t *)bp->bar0 + i;
82 *(volatile uint32_t *)bar = 0;
85 /* Ring channel doorbell */
86 bar = (uint8_t *)bp->bar0 + 0x100;
87 *(volatile uint32_t *)bar = 1;
89 /* Poll for the valid bit */
90 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
91 /* Sanity check on the resp->resp_len */
93 if (resp->resp_len && resp->resp_len <=
95 /* Last byte of resp contains the valid key */
96 valid = (uint8_t *)resp + resp->resp_len - 1;
97 if (*valid == HWRM_RESP_VALID_KEY)
103 if (i >= HWRM_CMD_TIMEOUT) {
104 RTE_LOG(ERR, PMD, "Error sending msg %x\n",
114 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len)
118 rte_spinlock_lock(&bp->hwrm_lock);
119 rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len);
120 rte_spinlock_unlock(&bp->hwrm_lock);
124 #define HWRM_PREP(req, type, cr, resp) \
125 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
126 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
127 req.cmpl_ring = rte_cpu_to_le_16(cr); \
128 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
129 req.target_id = rte_cpu_to_le_16(0xffff); \
130 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr)
132 #define HWRM_CHECK_RESULT \
135 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
137 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
143 if (resp->error_code) { \
144 rc = rte_le_to_cpu_16(resp->error_code); \
145 RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \
146 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
154 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
157 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
158 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
160 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
161 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
164 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
171 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
174 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
175 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
178 HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp);
179 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
181 /* FIXME add multicast flag, when multicast adding options is supported
184 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
185 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
186 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
187 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
188 req.mask = rte_cpu_to_le_32(mask);
190 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
197 int bnxt_hwrm_clear_filter(struct bnxt *bp,
198 struct bnxt_filter_info *filter)
201 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
202 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
204 if (filter->fw_l2_filter_id == UINT64_MAX)
207 HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp);
209 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
211 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
215 filter->fw_l2_filter_id = -1;
220 int bnxt_hwrm_set_filter(struct bnxt *bp,
221 struct bnxt_vnic_info *vnic,
222 struct bnxt_filter_info *filter)
225 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
226 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
227 uint32_t enables = 0;
229 if (filter->fw_l2_filter_id != UINT64_MAX)
230 bnxt_hwrm_clear_filter(bp, filter);
232 HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp);
234 req.flags = rte_cpu_to_le_32(filter->flags);
236 rte_cpu_to_le_32(HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST);
238 enables = filter->enables |
239 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
240 req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
243 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
244 memcpy(req.l2_addr, filter->l2_addr,
247 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
248 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
251 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
252 req.l2_ovlan = filter->l2_ovlan;
254 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
255 req.l2_ovlan_mask = filter->l2_ovlan_mask;
257 req.enables = rte_cpu_to_le_32(enables);
259 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
263 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
268 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd)
271 struct hwrm_exec_fwd_resp_input req = {.req_type = 0 };
272 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
274 HWRM_PREP(req, EXEC_FWD_RESP, -1, resp);
276 memcpy(req.encap_request, fwd_cmd,
277 sizeof(req.encap_request));
279 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
286 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
289 struct hwrm_func_qcaps_input req = {.req_type = 0 };
290 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
292 HWRM_PREP(req, FUNC_QCAPS, -1, resp);
294 req.fid = rte_cpu_to_le_16(0xffff);
296 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
300 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
302 struct bnxt_pf_info *pf = &bp->pf;
304 pf->fw_fid = rte_le_to_cpu_32(resp->fid);
305 pf->port_id = resp->port_id;
306 memcpy(pf->mac_addr, resp->mac_address, ETHER_ADDR_LEN);
307 pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
308 pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
309 pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
310 pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
311 pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
312 pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
313 pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
314 pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs);
316 struct bnxt_vf_info *vf = &bp->vf;
318 vf->fw_fid = rte_le_to_cpu_32(resp->fid);
319 memcpy(vf->mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
320 vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
321 vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
322 vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
323 vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
324 vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
325 vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
331 int bnxt_hwrm_func_reset(struct bnxt *bp)
334 struct hwrm_func_reset_input req = {.req_type = 0 };
335 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
337 HWRM_PREP(req, FUNC_RESET, -1, resp);
339 req.enables = rte_cpu_to_le_32(0);
341 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
348 int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags,
349 uint32_t *vf_req_fwd)
352 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
353 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
355 if (bp->flags & BNXT_FLAG_REGISTERED)
358 HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp);
360 req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
361 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD;
362 req.ver_maj = RTE_VER_YEAR;
363 req.ver_min = RTE_VER_MONTH;
364 req.ver_upd = RTE_VER_MINOR;
366 memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd));
368 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
370 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
374 bp->flags |= BNXT_FLAG_REGISTERED;
379 int bnxt_hwrm_ver_get(struct bnxt *bp)
382 struct hwrm_ver_get_input req = {.req_type = 0 };
383 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
385 uint16_t max_resp_len;
386 char type[RTE_MEMZONE_NAMESIZE];
388 HWRM_PREP(req, VER_GET, -1, resp);
390 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
391 req.hwrm_intf_min = HWRM_VERSION_MINOR;
392 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
395 * Hold the lock since we may be adjusting the response pointers.
397 rte_spinlock_lock(&bp->hwrm_lock);
398 rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req));
402 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
403 resp->hwrm_intf_maj, resp->hwrm_intf_min,
405 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
406 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
407 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
409 fw_version = resp->hwrm_intf_maj << 16;
410 fw_version |= resp->hwrm_intf_min << 8;
411 fw_version |= resp->hwrm_intf_upd;
413 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
414 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
419 if (bp->max_req_len > resp->max_req_win_len) {
420 RTE_LOG(ERR, PMD, "Unsupported request length\n");
423 bp->max_req_len = resp->max_req_win_len;
424 max_resp_len = resp->max_resp_len;
425 if (bp->max_resp_len != max_resp_len) {
426 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
427 bp->pdev->addr.domain, bp->pdev->addr.bus,
428 bp->pdev->addr.devid, bp->pdev->addr.function);
430 rte_free(bp->hwrm_cmd_resp_addr);
432 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
433 if (bp->hwrm_cmd_resp_addr == NULL) {
437 bp->hwrm_cmd_resp_dma_addr =
438 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
439 bp->max_resp_len = max_resp_len;
443 rte_spinlock_unlock(&bp->hwrm_lock);
447 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
450 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
451 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
453 if (!(bp->flags & BNXT_FLAG_REGISTERED))
456 HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp);
459 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
463 bp->flags &= ~BNXT_FLAG_REGISTERED;
468 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
471 struct hwrm_port_phy_cfg_input req = {0};
472 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
473 uint32_t enables = 0;
475 HWRM_PREP(req, PORT_PHY_CFG, -1, resp);
478 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
479 if (bp->link_info.auto_mode && conf->link_speed) {
480 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
481 RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
484 req.flags = rte_cpu_to_le_32(conf->phy_flags);
485 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
486 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
488 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
489 * any auto mode, even "none".
491 if (!conf->link_speed) {
492 /* No speeds specified. Enable AutoNeg - all speeds */
494 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
496 /* AutoNeg - Advertise speeds specified. */
497 if (conf->auto_link_speed_mask &&
498 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
500 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
501 req.auto_link_speed_mask =
502 conf->auto_link_speed_mask;
504 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
507 req.auto_duplex = conf->duplex;
508 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
509 req.auto_pause = conf->auto_pause;
510 req.force_pause = conf->force_pause;
511 /* Set force_pause if there is no auto or if there is a force */
512 if (req.auto_pause && !req.force_pause)
513 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
515 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
517 req.enables = rte_cpu_to_le_32(enables);
520 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN);
521 RTE_LOG(INFO, PMD, "Force Link Down\n");
524 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
531 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
532 struct bnxt_link_info *link_info)
535 struct hwrm_port_phy_qcfg_input req = {0};
536 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
538 HWRM_PREP(req, PORT_PHY_QCFG, -1, resp);
540 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
544 link_info->phy_link_status = resp->link;
546 (link_info->phy_link_status ==
547 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
548 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
549 link_info->duplex = resp->duplex;
550 link_info->pause = resp->pause;
551 link_info->auto_pause = resp->auto_pause;
552 link_info->force_pause = resp->force_pause;
553 link_info->auto_mode = resp->auto_mode;
554 link_info->phy_type = resp->phy_type;
555 link_info->media_type = resp->media_type;
557 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
558 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
559 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
560 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
561 link_info->phy_ver[0] = resp->phy_maj;
562 link_info->phy_ver[1] = resp->phy_min;
563 link_info->phy_ver[2] = resp->phy_bld;
568 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
571 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
572 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
574 HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp);
576 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
580 #define GET_QUEUE_INFO(x) \
581 bp->cos_queue[x].id = resp->queue_id##x; \
582 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
596 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
597 struct bnxt_ring *ring,
598 uint32_t ring_type, uint32_t map_index,
599 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
602 struct hwrm_ring_alloc_input req = {.req_type = 0 };
603 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
605 HWRM_PREP(req, RING_ALLOC, -1, resp);
607 req.enables = rte_cpu_to_le_32(0);
609 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
610 req.fbo = rte_cpu_to_le_32(0);
611 /* Association of ring index with doorbell index */
612 req.logical_id = rte_cpu_to_le_16(map_index);
615 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
616 req.queue_id = bp->cos_queue[0].id;
618 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
619 req.ring_type = ring_type;
620 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
621 req.length = rte_cpu_to_le_32(ring->ring_size);
622 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
623 if (stats_ctx_id != INVALID_STATS_CTX_ID)
625 rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) |
626 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID);
628 case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL:
629 req.ring_type = ring_type;
631 * TODO: Some HWRM versions crash with
632 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
634 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
635 req.length = rte_cpu_to_le_32(ring->ring_size);
638 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
643 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
645 if (rc || resp->error_code) {
646 if (rc == 0 && resp->error_code)
647 rc = rte_le_to_cpu_16(resp->error_code);
649 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
651 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
653 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
655 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
657 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
659 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
662 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
667 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
671 int bnxt_hwrm_ring_free(struct bnxt *bp,
672 struct bnxt_ring *ring, uint32_t ring_type)
675 struct hwrm_ring_free_input req = {.req_type = 0 };
676 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
678 HWRM_PREP(req, RING_FREE, -1, resp);
680 req.ring_type = ring_type;
681 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
683 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
685 if (rc || resp->error_code) {
686 if (rc == 0 && resp->error_code)
687 rc = rte_le_to_cpu_16(resp->error_code);
690 case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL:
691 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
694 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
695 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
698 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
699 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
703 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
710 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
713 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
714 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
716 HWRM_PREP(req, RING_GRP_ALLOC, -1, resp);
718 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
719 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
720 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
721 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
723 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
727 bp->grp_info[idx].fw_grp_id =
728 rte_le_to_cpu_16(resp->ring_group_id);
733 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
736 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
737 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
739 HWRM_PREP(req, RING_GRP_FREE, -1, resp);
741 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
743 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
747 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
751 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
754 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
755 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
757 HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp);
759 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
762 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
763 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
765 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
772 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp,
773 struct bnxt_cp_ring_info *cpr, unsigned int idx)
776 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
777 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
779 HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp);
781 req.update_period_ms = rte_cpu_to_le_32(1000);
783 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
785 rte_cpu_to_le_64(cpr->hw_stats_map);
787 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
791 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
792 //Tx rings don't need grp_info entry. It is a Rx only attribute.
794 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
799 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp,
800 struct bnxt_cp_ring_info *cpr, unsigned int idx)
803 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
804 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
806 HWRM_PREP(req, STAT_CTX_FREE, -1, resp);
808 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
809 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++);
811 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
815 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
816 //Tx rings don't have a grp_info entry. It is a Rx only attribute.
818 bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
823 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
826 struct hwrm_vnic_alloc_input req = { 0 };
827 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
829 /* map ring groups to this vnic */
830 for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++) {
831 if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) {
833 "Not enough ring groups avail:%x req:%x\n", j,
834 (vnic->end_grp_id - vnic->start_grp_id) + 1);
837 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
840 vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE;
841 vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE;
843 HWRM_PREP(req, VNIC_ALLOC, -1, resp);
845 if (vnic->func_default)
847 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
848 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
852 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
853 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
857 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
860 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
861 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
863 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
864 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
868 HWRM_PREP(req, VNIC_CFG, -1, resp);
870 /* Only RSS support for now TBD: COS & LB */
872 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP |
873 HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE |
874 HWRM_VNIC_CFG_INPUT_ENABLES_MRU);
875 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
877 rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id);
878 req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
879 req.cos_rule = rte_cpu_to_le_16(0xffff);
880 req.lb_rule = rte_cpu_to_le_16(0xffff);
881 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
882 ETHER_CRC_LEN + VLAN_TAG_SIZE);
883 /* Configure default VNIC only once. */
884 if (vnic->func_default && !(bp->flags & BNXT_FLAG_DFLT_VNIC_SET)) {
886 bp->flags |= BNXT_FLAG_DFLT_VNIC_SET;
888 if (vnic->vlan_strip)
890 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
892 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
899 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
902 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
903 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
904 bp->hwrm_cmd_resp_addr;
906 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp);
908 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
912 vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
913 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
918 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
921 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
922 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
923 bp->hwrm_cmd_resp_addr;
925 if (vnic->fw_rss_cos_lb_ctx == 0xffff) {
927 "VNIC RSS Rule %x\n", vnic->fw_rss_cos_lb_ctx);
931 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp);
933 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
935 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
939 vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID;
944 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
947 struct hwrm_vnic_free_input req = {.req_type = 0 };
948 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
950 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
951 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
955 HWRM_PREP(req, VNIC_FREE, -1, resp);
957 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
959 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
963 vnic->fw_vnic_id = INVALID_HW_RING_ID;
964 /* Configure default VNIC again if necessary. */
965 if (vnic->func_default && (bp->flags & BNXT_FLAG_DFLT_VNIC_SET))
966 bp->flags &= ~BNXT_FLAG_DFLT_VNIC_SET;
971 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
972 struct bnxt_vnic_info *vnic)
975 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
976 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
978 HWRM_PREP(req, VNIC_RSS_CFG, -1, resp);
980 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
982 req.ring_grp_tbl_addr =
983 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
984 req.hash_key_tbl_addr =
985 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
986 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx);
988 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
996 * HWRM utility functions
999 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1004 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1005 struct bnxt_tx_queue *txq;
1006 struct bnxt_rx_queue *rxq;
1007 struct bnxt_cp_ring_info *cpr;
1009 if (i >= bp->rx_cp_nr_rings) {
1010 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1013 rxq = bp->rx_queues[i];
1017 rc = bnxt_hwrm_stat_clear(bp, cpr);
1024 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1028 struct bnxt_cp_ring_info *cpr;
1030 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1031 unsigned int idx = i + 1;
1033 if (i >= bp->rx_cp_nr_rings) {
1034 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1035 //Tx rings don't have a grp_info entry.
1038 cpr = bp->rx_queues[i]->cp_ring;
1040 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1041 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx);
1049 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1054 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1055 struct bnxt_tx_queue *txq;
1056 struct bnxt_rx_queue *rxq;
1057 struct bnxt_cp_ring_info *cpr;
1058 unsigned int idx = i + 1;
1060 if (i >= bp->rx_cp_nr_rings) {
1061 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1063 //Tx rings don't need grp_info entry.
1066 rxq = bp->rx_queues[i];
1070 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx);
1078 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1083 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1084 unsigned int idx = i + 1;
1086 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) {
1088 "Attempt to free invalid ring group %d\n",
1093 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1101 static void bnxt_free_cp_ring(struct bnxt *bp,
1102 struct bnxt_cp_ring_info *cpr)
1104 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1106 bnxt_hwrm_ring_free(bp, cp_ring,
1107 HWRM_RING_FREE_INPUT_RING_TYPE_CMPL);
1108 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1109 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1110 sizeof(*cpr->cp_desc_ring));
1111 cpr->cp_raw_cons = 0;
1114 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1119 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1120 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1121 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1122 struct bnxt_ring *ring = txr->tx_ring_struct;
1123 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1125 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1126 bnxt_hwrm_ring_free(bp, ring,
1127 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1128 ring->fw_ring_id = INVALID_HW_RING_ID;
1129 memset(txr->tx_desc_ring, 0,
1130 txr->tx_ring_struct->ring_size *
1131 sizeof(*txr->tx_desc_ring));
1132 memset(txr->tx_buf_ring, 0,
1133 txr->tx_ring_struct->ring_size *
1134 sizeof(*txr->tx_buf_ring));
1138 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1139 bnxt_free_cp_ring(bp, cpr);
1142 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1143 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1144 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1145 struct bnxt_ring *ring = rxr->rx_ring_struct;
1146 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1147 unsigned int idx = i + 1;
1149 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1150 bnxt_hwrm_ring_free(bp, ring,
1151 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1152 ring->fw_ring_id = INVALID_HW_RING_ID;
1153 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1154 memset(rxr->rx_desc_ring, 0,
1155 rxr->rx_ring_struct->ring_size *
1156 sizeof(*rxr->rx_desc_ring));
1157 memset(rxr->rx_buf_ring, 0,
1158 rxr->rx_ring_struct->ring_size *
1159 sizeof(*rxr->rx_buf_ring));
1162 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1163 bnxt_free_cp_ring(bp, cpr);
1164 bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
1167 /* Default completion ring */
1169 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1171 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID)
1172 bnxt_free_cp_ring(bp, cpr);
1173 bp->grp_info[0].cp_fw_ring_id = INVALID_HW_RING_ID;
1179 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1184 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1185 unsigned int idx = i + 1;
1187 if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID ||
1188 bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID)
1191 rc = bnxt_hwrm_ring_grp_alloc(bp, idx);
1199 void bnxt_free_hwrm_resources(struct bnxt *bp)
1201 /* Release memzone */
1202 rte_free(bp->hwrm_cmd_resp_addr);
1203 bp->hwrm_cmd_resp_addr = NULL;
1204 bp->hwrm_cmd_resp_dma_addr = 0;
1207 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1209 struct rte_pci_device *pdev = bp->pdev;
1210 char type[RTE_MEMZONE_NAMESIZE];
1212 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1213 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1214 bp->max_req_len = HWRM_MAX_REQ_LEN;
1215 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1216 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1217 if (bp->hwrm_cmd_resp_addr == NULL)
1219 bp->hwrm_cmd_resp_dma_addr =
1220 rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr);
1221 rte_spinlock_init(&bp->hwrm_lock);
1226 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1228 struct bnxt_filter_info *filter;
1231 STAILQ_FOREACH(filter, &vnic->filter, next) {
1232 rc = bnxt_hwrm_clear_filter(bp, filter);
1239 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1241 struct bnxt_filter_info *filter;
1244 STAILQ_FOREACH(filter, &vnic->filter, next) {
1245 rc = bnxt_hwrm_set_filter(bp, vnic, filter);
1252 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1254 struct bnxt_vnic_info *vnic;
1257 if (bp->vnic_info == NULL)
1260 vnic = &bp->vnic_info[0];
1261 bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic);
1263 /* VNIC resources */
1264 for (i = 0; i < bp->nr_vnics; i++) {
1265 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1267 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1269 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1270 bnxt_hwrm_vnic_free(bp, vnic);
1272 rte_free(vnic->fw_grp_ids);
1274 /* Ring resources */
1275 bnxt_free_all_hwrm_rings(bp);
1276 bnxt_free_all_hwrm_ring_grps(bp);
1277 bnxt_free_all_hwrm_stat_ctxs(bp);
1280 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1282 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1284 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1285 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1287 switch (conf_link_speed) {
1288 case ETH_LINK_SPEED_10M_HD:
1289 case ETH_LINK_SPEED_100M_HD:
1290 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1292 return hw_link_duplex;
1295 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1297 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1300 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1302 uint16_t eth_link_speed = 0;
1304 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1305 return ETH_LINK_SPEED_AUTONEG;
1307 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1308 case ETH_LINK_SPEED_100M:
1309 case ETH_LINK_SPEED_100M_HD:
1311 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1313 case ETH_LINK_SPEED_1G:
1315 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1317 case ETH_LINK_SPEED_2_5G:
1319 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1321 case ETH_LINK_SPEED_10G:
1323 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1325 case ETH_LINK_SPEED_20G:
1327 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1329 case ETH_LINK_SPEED_25G:
1331 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1333 case ETH_LINK_SPEED_40G:
1335 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1337 case ETH_LINK_SPEED_50G:
1339 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1343 "Unsupported link speed %d; default to AUTO\n",
1347 return eth_link_speed;
1350 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1351 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1352 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1353 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1355 static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id)
1359 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1362 if (link_speed & ETH_LINK_SPEED_FIXED) {
1363 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1365 if (one_speed & (one_speed - 1)) {
1367 "Invalid advertised speeds (%u) for port %u\n",
1368 link_speed, port_id);
1371 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1373 "Unsupported advertised speed (%u) for port %u\n",
1374 link_speed, port_id);
1378 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1380 "Unsupported advertised speeds (%u) for port %u\n",
1381 link_speed, port_id);
1389 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
1393 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
1394 if (bp->link_info.support_speeds)
1395 return bp->link_info.support_speeds;
1396 link_speed = BNXT_SUPPORTED_SPEEDS;
1399 if (link_speed & ETH_LINK_SPEED_100M)
1400 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1401 if (link_speed & ETH_LINK_SPEED_100M_HD)
1402 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
1403 if (link_speed & ETH_LINK_SPEED_1G)
1404 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
1405 if (link_speed & ETH_LINK_SPEED_2_5G)
1406 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
1407 if (link_speed & ETH_LINK_SPEED_10G)
1408 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
1409 if (link_speed & ETH_LINK_SPEED_20G)
1410 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
1411 if (link_speed & ETH_LINK_SPEED_25G)
1412 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
1413 if (link_speed & ETH_LINK_SPEED_40G)
1414 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
1415 if (link_speed & ETH_LINK_SPEED_50G)
1416 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
1420 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
1422 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
1424 switch (hw_link_speed) {
1425 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
1426 eth_link_speed = ETH_SPEED_NUM_100M;
1428 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
1429 eth_link_speed = ETH_SPEED_NUM_1G;
1431 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
1432 eth_link_speed = ETH_SPEED_NUM_2_5G;
1434 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
1435 eth_link_speed = ETH_SPEED_NUM_10G;
1437 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
1438 eth_link_speed = ETH_SPEED_NUM_20G;
1440 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
1441 eth_link_speed = ETH_SPEED_NUM_25G;
1443 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
1444 eth_link_speed = ETH_SPEED_NUM_40G;
1446 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
1447 eth_link_speed = ETH_SPEED_NUM_50G;
1449 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
1451 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
1455 return eth_link_speed;
1458 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
1460 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1462 switch (hw_link_duplex) {
1463 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
1464 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
1465 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
1467 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
1468 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
1471 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
1475 return eth_link_duplex;
1478 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
1481 struct bnxt_link_info *link_info = &bp->link_info;
1483 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
1486 "Get link config failed with rc %d\n", rc);
1489 if (link_info->link_speed)
1491 bnxt_parse_hw_link_speed(link_info->link_speed);
1493 link->link_speed = ETH_SPEED_NUM_NONE;
1494 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
1495 link->link_status = link_info->link_up;
1496 link->link_autoneg = link_info->auto_mode ==
1497 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
1498 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
1503 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
1506 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
1507 struct bnxt_link_info link_req;
1508 uint16_t speed, autoneg;
1510 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
1513 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
1514 bp->eth_dev->data->port_id);
1518 memset(&link_req, 0, sizeof(link_req));
1519 link_req.link_up = link_up;
1523 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
1524 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
1525 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
1526 /* Autoneg can be done only when the FW allows */
1527 if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
1528 bp->link_info.force_link_speed)) {
1529 link_req.phy_flags |=
1530 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
1531 link_req.auto_link_speed_mask =
1532 bnxt_parse_eth_link_speed_mask(bp,
1533 dev_conf->link_speeds);
1535 if (bp->link_info.phy_type ==
1536 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
1537 bp->link_info.phy_type ==
1538 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
1539 bp->link_info.media_type ==
1540 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
1541 RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
1545 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
1546 /* If user wants a particular speed try that first. */
1548 link_req.link_speed = speed;
1549 else if (bp->link_info.force_link_speed)
1550 link_req.link_speed = bp->link_info.force_link_speed;
1552 link_req.link_speed = bp->link_info.auto_link_speed;
1554 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
1555 link_req.auto_pause = bp->link_info.auto_pause;
1556 link_req.force_pause = bp->link_info.force_pause;
1559 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
1562 "Set link config failed with rc %d\n", rc);
1570 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
1572 struct hwrm_func_qcfg_input req = {0};
1573 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1576 HWRM_PREP(req, FUNC_QCFG, -1, resp);
1577 req.fid = rte_cpu_to_le_16(0xffff);
1579 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1584 struct bnxt_vf_info *vf = &bp->vf;
1586 /* Hard Coded.. 0xfff VLAN ID mask */
1587 vf->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
1590 switch (resp->port_partition_type) {
1591 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
1592 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
1593 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
1594 bp->port_partition_type = resp->port_partition_type;
1597 bp->port_partition_type = 0;