4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
49 #include "bnxt_ring.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
57 #define HWRM_CMD_TIMEOUT 10000
59 struct bnxt_plcmodes_cfg {
61 uint16_t jumbo_thresh;
63 uint16_t hds_threshold;
66 static int page_getenum(size_t size)
82 PMD_DRV_LOG(ERR, "Page size %zu out of range\n", size);
83 return sizeof(void *) * 8 - 1;
86 static int page_roundup(size_t size)
88 return 1 << page_getenum(size);
92 * HWRM Functions (sent to HWRM)
93 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95 * command was failed by the ChiMP.
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
102 struct input *req = msg;
103 struct output *resp = bp->hwrm_cmd_resp_addr;
104 uint32_t *data = msg;
107 uint16_t max_req_len = bp->max_req_len;
108 struct hwrm_short_input short_input = { 0 };
110 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
113 memset(short_cmd_req, 0, bp->max_req_len);
114 memcpy(short_cmd_req, req, msg_len);
116 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117 short_input.signature = rte_cpu_to_le_16(
118 HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119 short_input.size = rte_cpu_to_le_16(msg_len);
120 short_input.req_addr =
121 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
123 data = (uint32_t *)&short_input;
124 msg_len = sizeof(short_input);
126 /* Sync memory write before updating doorbell */
129 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
132 /* Write request msg to hwrm channel */
133 for (i = 0; i < msg_len; i += 4) {
134 bar = (uint8_t *)bp->bar0 + i;
135 rte_write32(*data, bar);
139 /* Zero the rest of the request space */
140 for (; i < max_req_len; i += 4) {
141 bar = (uint8_t *)bp->bar0 + i;
145 /* Ring channel doorbell */
146 bar = (uint8_t *)bp->bar0 + 0x100;
149 /* Poll for the valid bit */
150 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151 /* Sanity check on the resp->resp_len */
153 if (resp->resp_len && resp->resp_len <=
155 /* Last byte of resp contains the valid key */
156 valid = (uint8_t *)resp + resp->resp_len - 1;
157 if (*valid == HWRM_RESP_VALID_KEY)
163 if (i >= HWRM_CMD_TIMEOUT) {
164 PMD_DRV_LOG(ERR, "Error sending msg 0x%04x\n",
175 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
176 * spinlock, and does initial processing.
178 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
179 * releases the spinlock only if it returns. If the regular int return codes
180 * are not used by the function, HWRM_CHECK_RESULT() should not be used
181 * directly, rather it should be copied and modified to suit the function.
183 * HWRM_UNLOCK() must be called after all response processing is completed.
185 #define HWRM_PREP(req, type) do { \
186 rte_spinlock_lock(&bp->hwrm_lock); \
187 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189 req.cmpl_ring = rte_cpu_to_le_16(-1); \
190 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191 req.target_id = rte_cpu_to_le_16(0xffff); \
192 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
195 #define HWRM_CHECK_RESULT() do {\
197 PMD_DRV_LOG(ERR, "failed rc:%d\n", rc); \
198 rte_spinlock_unlock(&bp->hwrm_lock); \
201 if (resp->error_code) { \
202 rc = rte_le_to_cpu_16(resp->error_code); \
203 if (resp->resp_len >= 16) { \
204 struct hwrm_err_output *tmp_hwrm_err_op = \
207 "error %d:%d:%08x:%04x\n", \
208 rc, tmp_hwrm_err_op->cmd_err, \
210 tmp_hwrm_err_op->opaque_0), \
212 tmp_hwrm_err_op->opaque_1)); \
214 PMD_DRV_LOG(ERR, "error %d\n", rc); \
216 rte_spinlock_unlock(&bp->hwrm_lock); \
221 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
223 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
226 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
227 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
229 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
230 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
233 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
241 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
242 struct bnxt_vnic_info *vnic,
244 struct bnxt_vlan_table_entry *vlan_table)
247 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
248 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
251 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
252 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
254 /* FIXME add multicast flag, when multicast adding options is supported
257 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
258 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
259 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
260 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
261 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
262 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
263 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
264 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
265 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
266 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
267 if (vnic->mc_addr_cnt) {
268 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
269 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
270 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
273 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
274 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
275 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
276 rte_mem_virt2iova(vlan_table));
277 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
279 req.mask = rte_cpu_to_le_32(mask);
281 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
289 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
291 struct bnxt_vlan_antispoof_table_entry *vlan_table)
294 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
295 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
296 bp->hwrm_cmd_resp_addr;
299 * Older HWRM versions did not support this command, and the set_rx_mask
300 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
301 * removed from set_rx_mask call, and this command was added.
303 * This command is also present from 1.7.8.11 and higher,
306 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
307 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
308 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
313 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
314 req.fid = rte_cpu_to_le_16(fid);
316 req.vlan_tag_mask_tbl_addr =
317 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
318 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
320 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
328 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
329 struct bnxt_filter_info *filter)
332 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
333 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
335 if (filter->fw_l2_filter_id == UINT64_MAX)
338 HWRM_PREP(req, CFA_L2_FILTER_FREE);
340 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
342 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
347 filter->fw_l2_filter_id = -1;
352 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
354 struct bnxt_filter_info *filter)
357 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
358 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
359 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
360 const struct rte_eth_vmdq_rx_conf *conf =
361 &dev_conf->rx_adv_conf.vmdq_rx_conf;
362 uint32_t enables = 0;
363 uint16_t j = dst_id - 1;
365 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
366 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
367 conf->pool_map[j].pools & (1UL << j)) {
369 "Add vlan %u to vmdq pool %u\n",
370 conf->pool_map[j].vlan_id, j);
372 filter->l2_ivlan = conf->pool_map[j].vlan_id;
374 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
375 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
378 if (filter->fw_l2_filter_id != UINT64_MAX)
379 bnxt_hwrm_clear_l2_filter(bp, filter);
381 HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
383 req.flags = rte_cpu_to_le_32(filter->flags);
385 enables = filter->enables |
386 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
387 req.dst_id = rte_cpu_to_le_16(dst_id);
390 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
391 memcpy(req.l2_addr, filter->l2_addr,
394 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
395 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
398 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
399 req.l2_ovlan = filter->l2_ovlan;
401 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
402 req.l2_ovlan = filter->l2_ivlan;
404 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
405 req.l2_ovlan_mask = filter->l2_ovlan_mask;
407 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
408 req.l2_ovlan_mask = filter->l2_ivlan_mask;
409 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
410 req.src_id = rte_cpu_to_le_32(filter->src_id);
411 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
412 req.src_type = filter->src_type;
414 req.enables = rte_cpu_to_le_32(enables);
416 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
420 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
426 int bnxt_hwrm_ptp_cfg(struct bnxt *bp)
428 struct hwrm_port_mac_cfg_input req = {.req_type = 0};
429 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
436 HWRM_PREP(req, PORT_MAC_CFG);
439 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE;
441 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_DISABLE;
442 if (ptp->tx_tstamp_en)
443 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE;
445 flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE;
446 req.flags = rte_cpu_to_le_32(flags);
448 rte_cpu_to_le_32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE);
449 req.rx_ts_capture_ptp_msg_type = rte_cpu_to_le_16(ptp->rxctl);
451 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
457 static int bnxt_hwrm_ptp_qcfg(struct bnxt *bp)
460 struct hwrm_port_mac_ptp_qcfg_input req = {.req_type = 0};
461 struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
462 struct bnxt_ptp_cfg *ptp = bp->ptp_cfg;
464 /* if (bp->hwrm_spec_code < 0x10801 || ptp) TBD */
468 HWRM_PREP(req, PORT_MAC_PTP_QCFG);
470 req.port_id = rte_cpu_to_le_16(bp->pf.port_id);
472 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
476 if (!(resp->flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_DIRECT_ACCESS))
479 ptp = rte_zmalloc("ptp_cfg", sizeof(*ptp), 0);
483 ptp->rx_regs[BNXT_PTP_RX_TS_L] =
484 rte_le_to_cpu_32(resp->rx_ts_reg_off_lower);
485 ptp->rx_regs[BNXT_PTP_RX_TS_H] =
486 rte_le_to_cpu_32(resp->rx_ts_reg_off_upper);
487 ptp->rx_regs[BNXT_PTP_RX_SEQ] =
488 rte_le_to_cpu_32(resp->rx_ts_reg_off_seq_id);
489 ptp->rx_regs[BNXT_PTP_RX_FIFO] =
490 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo);
491 ptp->rx_regs[BNXT_PTP_RX_FIFO_ADV] =
492 rte_le_to_cpu_32(resp->rx_ts_reg_off_fifo_adv);
493 ptp->tx_regs[BNXT_PTP_TX_TS_L] =
494 rte_le_to_cpu_32(resp->tx_ts_reg_off_lower);
495 ptp->tx_regs[BNXT_PTP_TX_TS_H] =
496 rte_le_to_cpu_32(resp->tx_ts_reg_off_upper);
497 ptp->tx_regs[BNXT_PTP_TX_SEQ] =
498 rte_le_to_cpu_32(resp->tx_ts_reg_off_seq_id);
499 ptp->tx_regs[BNXT_PTP_TX_FIFO] =
500 rte_le_to_cpu_32(resp->tx_ts_reg_off_fifo);
508 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
511 struct hwrm_func_qcaps_input req = {.req_type = 0 };
512 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
513 uint16_t new_max_vfs;
517 HWRM_PREP(req, FUNC_QCAPS);
519 req.fid = rte_cpu_to_le_16(0xffff);
521 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
525 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
526 flags = rte_le_to_cpu_32(resp->flags);
528 bp->pf.port_id = resp->port_id;
529 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
530 new_max_vfs = bp->pdev->max_vfs;
531 if (new_max_vfs != bp->pf.max_vfs) {
533 rte_free(bp->pf.vf_info);
534 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
535 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
536 bp->pf.max_vfs = new_max_vfs;
537 for (i = 0; i < new_max_vfs; i++) {
538 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
539 bp->pf.vf_info[i].vlan_table =
540 rte_zmalloc("VF VLAN table",
543 if (bp->pf.vf_info[i].vlan_table == NULL)
545 "Fail to alloc VLAN table for VF %d\n",
549 bp->pf.vf_info[i].vlan_table);
550 bp->pf.vf_info[i].vlan_as_table =
551 rte_zmalloc("VF VLAN AS table",
554 if (bp->pf.vf_info[i].vlan_as_table == NULL)
556 "Alloc VLAN AS table for VF %d fail\n",
560 bp->pf.vf_info[i].vlan_as_table);
561 STAILQ_INIT(&bp->pf.vf_info[i].filter);
566 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
567 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
568 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
569 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
570 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
571 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
572 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
573 /* TODO: For now, do not support VMDq/RFS on VFs. */
578 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
582 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
584 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
585 if (flags & HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED) {
586 bp->flags |= BNXT_FLAG_PTP_SUPPORTED;
587 PMD_DRV_LOG(INFO, "PTP SUPPORTED\n");
589 bnxt_hwrm_ptp_qcfg(bp);
598 int bnxt_hwrm_func_reset(struct bnxt *bp)
601 struct hwrm_func_reset_input req = {.req_type = 0 };
602 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
604 HWRM_PREP(req, FUNC_RESET);
606 req.enables = rte_cpu_to_le_32(0);
608 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
616 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
619 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
620 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
622 if (bp->flags & BNXT_FLAG_REGISTERED)
625 HWRM_PREP(req, FUNC_DRV_RGTR);
626 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
627 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
628 req.ver_maj = RTE_VER_YEAR;
629 req.ver_min = RTE_VER_MONTH;
630 req.ver_upd = RTE_VER_MINOR;
633 req.enables |= rte_cpu_to_le_32(
634 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
635 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
636 RTE_MIN(sizeof(req.vf_req_fwd),
637 sizeof(bp->pf.vf_req_fwd)));
640 req.async_event_fwd[0] |=
641 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_LINK_STATUS_CHANGE |
642 ASYNC_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED |
643 ASYNC_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE);
644 req.async_event_fwd[1] |=
645 rte_cpu_to_le_32(ASYNC_CMPL_EVENT_ID_PF_DRVR_UNLOAD |
646 ASYNC_CMPL_EVENT_ID_VF_CFG_CHANGE);
648 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
653 bp->flags |= BNXT_FLAG_REGISTERED;
658 int bnxt_hwrm_ver_get(struct bnxt *bp)
661 struct hwrm_ver_get_input req = {.req_type = 0 };
662 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
665 uint16_t max_resp_len;
666 char type[RTE_MEMZONE_NAMESIZE];
667 uint32_t dev_caps_cfg;
669 bp->max_req_len = HWRM_MAX_REQ_LEN;
670 HWRM_PREP(req, VER_GET);
672 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
673 req.hwrm_intf_min = HWRM_VERSION_MINOR;
674 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
676 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
680 PMD_DRV_LOG(INFO, "%d.%d.%d:%d.%d.%d\n",
681 resp->hwrm_intf_maj, resp->hwrm_intf_min,
683 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
684 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
685 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
686 PMD_DRV_LOG(INFO, "Driver HWRM version: %d.%d.%d\n",
687 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
689 my_version = HWRM_VERSION_MAJOR << 16;
690 my_version |= HWRM_VERSION_MINOR << 8;
691 my_version |= HWRM_VERSION_UPDATE;
693 fw_version = resp->hwrm_intf_maj << 16;
694 fw_version |= resp->hwrm_intf_min << 8;
695 fw_version |= resp->hwrm_intf_upd;
697 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
698 PMD_DRV_LOG(ERR, "Unsupported firmware API version\n");
703 if (my_version != fw_version) {
704 PMD_DRV_LOG(INFO, "BNXT Driver/HWRM API mismatch.\n");
705 if (my_version < fw_version) {
707 "Firmware API version is newer than driver.\n");
709 "The driver may be missing features.\n");
712 "Firmware API version is older than driver.\n");
714 "Not all driver features may be functional.\n");
718 if (bp->max_req_len > resp->max_req_win_len) {
719 PMD_DRV_LOG(ERR, "Unsupported request length\n");
722 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
723 max_resp_len = resp->max_resp_len;
724 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
726 if (bp->max_resp_len != max_resp_len) {
727 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
728 bp->pdev->addr.domain, bp->pdev->addr.bus,
729 bp->pdev->addr.devid, bp->pdev->addr.function);
731 rte_free(bp->hwrm_cmd_resp_addr);
733 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
734 if (bp->hwrm_cmd_resp_addr == NULL) {
738 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
739 bp->hwrm_cmd_resp_dma_addr =
740 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
741 if (bp->hwrm_cmd_resp_dma_addr == 0) {
743 "Unable to map response buffer to physical memory.\n");
747 bp->max_resp_len = max_resp_len;
751 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
753 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
754 PMD_DRV_LOG(DEBUG, "Short command supported\n");
756 rte_free(bp->hwrm_short_cmd_req_addr);
758 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
760 if (bp->hwrm_short_cmd_req_addr == NULL) {
764 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
765 bp->hwrm_short_cmd_req_dma_addr =
766 rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
767 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
768 rte_free(bp->hwrm_short_cmd_req_addr);
770 "Unable to map buffer to physical memory.\n");
775 bp->flags |= BNXT_FLAG_SHORT_CMD;
783 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
786 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
787 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
789 if (!(bp->flags & BNXT_FLAG_REGISTERED))
792 HWRM_PREP(req, FUNC_DRV_UNRGTR);
795 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
800 bp->flags &= ~BNXT_FLAG_REGISTERED;
805 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
808 struct hwrm_port_phy_cfg_input req = {0};
809 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
810 uint32_t enables = 0;
812 HWRM_PREP(req, PORT_PHY_CFG);
815 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
816 if (bp->link_info.auto_mode && conf->link_speed) {
817 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
818 PMD_DRV_LOG(DEBUG, "Disabling AutoNeg\n");
821 req.flags = rte_cpu_to_le_32(conf->phy_flags);
822 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
823 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
825 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
826 * any auto mode, even "none".
828 if (!conf->link_speed) {
829 /* No speeds specified. Enable AutoNeg - all speeds */
831 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
833 /* AutoNeg - Advertise speeds specified. */
834 if (conf->auto_link_speed_mask &&
835 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
837 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
838 req.auto_link_speed_mask =
839 conf->auto_link_speed_mask;
841 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
844 req.auto_duplex = conf->duplex;
845 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
846 req.auto_pause = conf->auto_pause;
847 req.force_pause = conf->force_pause;
848 /* Set force_pause if there is no auto or if there is a force */
849 if (req.auto_pause && !req.force_pause)
850 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
852 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
854 req.enables = rte_cpu_to_le_32(enables);
857 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
858 PMD_DRV_LOG(INFO, "Force Link Down\n");
861 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
869 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
870 struct bnxt_link_info *link_info)
873 struct hwrm_port_phy_qcfg_input req = {0};
874 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
876 HWRM_PREP(req, PORT_PHY_QCFG);
878 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
882 link_info->phy_link_status = resp->link;
884 (link_info->phy_link_status ==
885 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
886 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
887 link_info->duplex = resp->duplex_cfg;
888 link_info->pause = resp->pause;
889 link_info->auto_pause = resp->auto_pause;
890 link_info->force_pause = resp->force_pause;
891 link_info->auto_mode = resp->auto_mode;
892 link_info->phy_type = resp->phy_type;
893 link_info->media_type = resp->media_type;
895 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
896 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
897 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
898 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
899 link_info->phy_ver[0] = resp->phy_maj;
900 link_info->phy_ver[1] = resp->phy_min;
901 link_info->phy_ver[2] = resp->phy_bld;
905 PMD_DRV_LOG(DEBUG, "Link Speed %d\n", link_info->link_speed);
906 PMD_DRV_LOG(DEBUG, "Auto Mode %d\n", link_info->auto_mode);
907 PMD_DRV_LOG(DEBUG, "Support Speeds %x\n", link_info->support_speeds);
908 PMD_DRV_LOG(DEBUG, "Auto Link Speed %x\n", link_info->auto_link_speed);
909 PMD_DRV_LOG(DEBUG, "Auto Link Speed Mask %x\n",
910 link_info->auto_link_speed_mask);
911 PMD_DRV_LOG(DEBUG, "Forced Link Speed %x\n",
912 link_info->force_link_speed);
917 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
920 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
921 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
923 HWRM_PREP(req, QUEUE_QPORTCFG);
925 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
929 #define GET_QUEUE_INFO(x) \
930 bp->cos_queue[x].id = resp->queue_id##x; \
931 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
947 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
948 struct bnxt_ring *ring,
949 uint32_t ring_type, uint32_t map_index,
950 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
953 uint32_t enables = 0;
954 struct hwrm_ring_alloc_input req = {.req_type = 0 };
955 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
957 HWRM_PREP(req, RING_ALLOC);
959 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
960 req.fbo = rte_cpu_to_le_32(0);
961 /* Association of ring index with doorbell index */
962 req.logical_id = rte_cpu_to_le_16(map_index);
963 req.length = rte_cpu_to_le_32(ring->ring_size);
966 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
967 req.queue_id = bp->cos_queue[0].id;
969 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
970 req.ring_type = ring_type;
971 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
972 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
973 if (stats_ctx_id != INVALID_STATS_CTX_ID)
975 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
977 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
978 req.ring_type = ring_type;
980 * TODO: Some HWRM versions crash with
981 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
983 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
986 PMD_DRV_LOG(ERR, "hwrm alloc invalid ring type %d\n",
991 req.enables = rte_cpu_to_le_32(enables);
993 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
995 if (rc || resp->error_code) {
996 if (rc == 0 && resp->error_code)
997 rc = rte_le_to_cpu_16(resp->error_code);
999 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1001 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
1004 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1006 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
1009 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1011 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
1015 PMD_DRV_LOG(ERR, "Invalid ring. rc:%d\n", rc);
1021 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
1026 int bnxt_hwrm_ring_free(struct bnxt *bp,
1027 struct bnxt_ring *ring, uint32_t ring_type)
1030 struct hwrm_ring_free_input req = {.req_type = 0 };
1031 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
1033 HWRM_PREP(req, RING_FREE);
1035 req.ring_type = ring_type;
1036 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
1038 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1040 if (rc || resp->error_code) {
1041 if (rc == 0 && resp->error_code)
1042 rc = rte_le_to_cpu_16(resp->error_code);
1045 switch (ring_type) {
1046 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
1047 PMD_DRV_LOG(ERR, "hwrm_ring_free cp failed. rc:%d\n",
1050 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
1051 PMD_DRV_LOG(ERR, "hwrm_ring_free rx failed. rc:%d\n",
1054 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
1055 PMD_DRV_LOG(ERR, "hwrm_ring_free tx failed. rc:%d\n",
1059 PMD_DRV_LOG(ERR, "Invalid ring, rc:%d\n", rc);
1067 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
1070 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
1071 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1073 HWRM_PREP(req, RING_GRP_ALLOC);
1075 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
1076 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
1077 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
1078 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
1080 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1082 HWRM_CHECK_RESULT();
1084 bp->grp_info[idx].fw_grp_id =
1085 rte_le_to_cpu_16(resp->ring_group_id);
1092 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1095 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1096 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1098 HWRM_PREP(req, RING_GRP_FREE);
1100 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1102 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1104 HWRM_CHECK_RESULT();
1107 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1111 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1114 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1115 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1117 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1120 HWRM_PREP(req, STAT_CTX_CLR_STATS);
1122 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1124 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1126 HWRM_CHECK_RESULT();
1132 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1133 unsigned int idx __rte_unused)
1136 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1137 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1139 HWRM_PREP(req, STAT_CTX_ALLOC);
1141 req.update_period_ms = rte_cpu_to_le_32(0);
1143 req.stats_dma_addr =
1144 rte_cpu_to_le_64(cpr->hw_stats_map);
1146 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1148 HWRM_CHECK_RESULT();
1150 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1157 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1158 unsigned int idx __rte_unused)
1161 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1162 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1164 HWRM_PREP(req, STAT_CTX_FREE);
1166 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1168 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1170 HWRM_CHECK_RESULT();
1176 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1179 struct hwrm_vnic_alloc_input req = { 0 };
1180 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1182 /* map ring groups to this vnic */
1183 PMD_DRV_LOG(DEBUG, "Alloc VNIC. Start %x, End %x\n",
1184 vnic->start_grp_id, vnic->end_grp_id);
1185 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1186 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1187 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1188 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1189 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1190 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1191 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1192 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1193 HWRM_PREP(req, VNIC_ALLOC);
1195 if (vnic->func_default)
1196 req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
1197 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1199 HWRM_CHECK_RESULT();
1201 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1203 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1207 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1208 struct bnxt_vnic_info *vnic,
1209 struct bnxt_plcmodes_cfg *pmode)
1212 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1213 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1215 HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1217 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1219 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1221 HWRM_CHECK_RESULT();
1223 pmode->flags = rte_le_to_cpu_32(resp->flags);
1224 /* dflt_vnic bit doesn't exist in the _cfg command */
1225 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1226 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1227 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1228 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1235 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1236 struct bnxt_vnic_info *vnic,
1237 struct bnxt_plcmodes_cfg *pmode)
1240 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1241 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1243 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1245 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1246 req.flags = rte_cpu_to_le_32(pmode->flags);
1247 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1248 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1249 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1250 req.enables = rte_cpu_to_le_32(
1251 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1252 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1253 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1256 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1258 HWRM_CHECK_RESULT();
1264 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1267 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1268 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1269 uint32_t ctx_enable_flag = 0;
1270 struct bnxt_plcmodes_cfg pmodes;
1272 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1273 PMD_DRV_LOG(DEBUG, "VNIC ID %x\n", vnic->fw_vnic_id);
1277 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1281 HWRM_PREP(req, VNIC_CFG);
1283 /* Only RSS support for now TBD: COS & LB */
1285 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1286 if (vnic->lb_rule != 0xffff)
1287 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1288 if (vnic->cos_rule != 0xffff)
1289 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1290 if (vnic->rss_rule != 0xffff) {
1291 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1292 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1294 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1295 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1296 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1297 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1298 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1299 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1300 req.mru = rte_cpu_to_le_16(vnic->mru);
1301 if (vnic->func_default)
1303 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1304 if (vnic->vlan_strip)
1306 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1309 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1310 if (vnic->roce_dual)
1311 req.flags |= rte_cpu_to_le_32(
1312 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1313 if (vnic->roce_only)
1314 req.flags |= rte_cpu_to_le_32(
1315 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1316 if (vnic->rss_dflt_cr)
1317 req.flags |= rte_cpu_to_le_32(
1318 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1320 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1322 HWRM_CHECK_RESULT();
1325 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1330 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1334 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1335 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1337 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1338 PMD_DRV_LOG(DEBUG, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1341 HWRM_PREP(req, VNIC_QCFG);
1344 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1345 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1346 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1348 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1350 HWRM_CHECK_RESULT();
1352 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1353 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1354 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1355 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1356 vnic->mru = rte_le_to_cpu_16(resp->mru);
1357 vnic->func_default = rte_le_to_cpu_32(
1358 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1359 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1360 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1361 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1362 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1363 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1364 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1365 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1366 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1367 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1368 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1375 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1378 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1379 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1380 bp->hwrm_cmd_resp_addr;
1382 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1384 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1386 HWRM_CHECK_RESULT();
1388 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1390 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1395 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1398 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1399 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1400 bp->hwrm_cmd_resp_addr;
1402 if (vnic->rss_rule == 0xffff) {
1403 PMD_DRV_LOG(DEBUG, "VNIC RSS Rule %x\n", vnic->rss_rule);
1406 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1408 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1410 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1412 HWRM_CHECK_RESULT();
1415 vnic->rss_rule = INVALID_HW_RING_ID;
1420 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1423 struct hwrm_vnic_free_input req = {.req_type = 0 };
1424 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1426 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1427 PMD_DRV_LOG(DEBUG, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1431 HWRM_PREP(req, VNIC_FREE);
1433 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1435 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1437 HWRM_CHECK_RESULT();
1440 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1444 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1445 struct bnxt_vnic_info *vnic)
1448 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1449 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1451 HWRM_PREP(req, VNIC_RSS_CFG);
1453 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1455 req.ring_grp_tbl_addr =
1456 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1457 req.hash_key_tbl_addr =
1458 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1459 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1461 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1463 HWRM_CHECK_RESULT();
1469 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1470 struct bnxt_vnic_info *vnic)
1473 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1474 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1477 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1479 req.flags = rte_cpu_to_le_32(
1480 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1482 req.enables = rte_cpu_to_le_32(
1483 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1485 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1486 size -= RTE_PKTMBUF_HEADROOM;
1488 req.jumbo_thresh = rte_cpu_to_le_16(size);
1489 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1491 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1493 HWRM_CHECK_RESULT();
1499 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1500 struct bnxt_vnic_info *vnic, bool enable)
1503 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1504 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1506 HWRM_PREP(req, VNIC_TPA_CFG);
1509 req.enables = rte_cpu_to_le_32(
1510 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1511 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1512 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1513 req.flags = rte_cpu_to_le_32(
1514 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1515 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1516 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1517 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1518 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1519 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1520 req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
1521 req.max_agg_segs = rte_cpu_to_le_16(5);
1523 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1524 req.min_agg_len = rte_cpu_to_le_32(512);
1527 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1529 HWRM_CHECK_RESULT();
1535 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1537 struct hwrm_func_cfg_input req = {0};
1538 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1541 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1542 req.enables = rte_cpu_to_le_32(
1543 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1544 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1545 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1547 HWRM_PREP(req, FUNC_CFG);
1549 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1550 HWRM_CHECK_RESULT();
1553 bp->pf.vf_info[vf].random_mac = false;
1558 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1562 struct hwrm_func_qstats_input req = {.req_type = 0};
1563 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1565 HWRM_PREP(req, FUNC_QSTATS);
1567 req.fid = rte_cpu_to_le_16(fid);
1569 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1571 HWRM_CHECK_RESULT();
1574 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1581 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1582 struct rte_eth_stats *stats)
1585 struct hwrm_func_qstats_input req = {.req_type = 0};
1586 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1588 HWRM_PREP(req, FUNC_QSTATS);
1590 req.fid = rte_cpu_to_le_16(fid);
1592 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1594 HWRM_CHECK_RESULT();
1596 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1597 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1598 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1599 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1600 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1601 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1603 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1604 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1605 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1606 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1607 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1608 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1610 stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1611 stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1613 stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1620 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1623 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1624 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1626 HWRM_PREP(req, FUNC_CLR_STATS);
1628 req.fid = rte_cpu_to_le_16(fid);
1630 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1632 HWRM_CHECK_RESULT();
1639 * HWRM utility functions
1642 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1647 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1648 struct bnxt_tx_queue *txq;
1649 struct bnxt_rx_queue *rxq;
1650 struct bnxt_cp_ring_info *cpr;
1652 if (i >= bp->rx_cp_nr_rings) {
1653 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1656 rxq = bp->rx_queues[i];
1660 rc = bnxt_hwrm_stat_clear(bp, cpr);
1667 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1671 struct bnxt_cp_ring_info *cpr;
1673 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1675 if (i >= bp->rx_cp_nr_rings) {
1676 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1678 cpr = bp->rx_queues[i]->cp_ring;
1679 bp->grp_info[i].fw_stats_ctx = -1;
1681 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1682 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1683 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1691 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1696 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1697 struct bnxt_tx_queue *txq;
1698 struct bnxt_rx_queue *rxq;
1699 struct bnxt_cp_ring_info *cpr;
1701 if (i >= bp->rx_cp_nr_rings) {
1702 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1705 rxq = bp->rx_queues[i];
1709 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1717 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1722 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1724 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1727 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1735 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1736 unsigned int idx __rte_unused)
1738 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1740 bnxt_hwrm_ring_free(bp, cp_ring,
1741 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1742 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1743 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1744 sizeof(*cpr->cp_desc_ring));
1745 cpr->cp_raw_cons = 0;
1748 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1753 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1754 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1755 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1756 struct bnxt_ring *ring = txr->tx_ring_struct;
1757 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1758 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1760 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1761 bnxt_hwrm_ring_free(bp, ring,
1762 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1763 ring->fw_ring_id = INVALID_HW_RING_ID;
1764 memset(txr->tx_desc_ring, 0,
1765 txr->tx_ring_struct->ring_size *
1766 sizeof(*txr->tx_desc_ring));
1767 memset(txr->tx_buf_ring, 0,
1768 txr->tx_ring_struct->ring_size *
1769 sizeof(*txr->tx_buf_ring));
1773 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1774 bnxt_free_cp_ring(bp, cpr, idx);
1775 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1779 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1780 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1781 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1782 struct bnxt_ring *ring = rxr->rx_ring_struct;
1783 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1784 unsigned int idx = i + 1;
1786 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1787 bnxt_hwrm_ring_free(bp, ring,
1788 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1789 ring->fw_ring_id = INVALID_HW_RING_ID;
1790 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1791 memset(rxr->rx_desc_ring, 0,
1792 rxr->rx_ring_struct->ring_size *
1793 sizeof(*rxr->rx_desc_ring));
1794 memset(rxr->rx_buf_ring, 0,
1795 rxr->rx_ring_struct->ring_size *
1796 sizeof(*rxr->rx_buf_ring));
1799 ring = rxr->ag_ring_struct;
1800 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1801 bnxt_hwrm_ring_free(bp, ring,
1802 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1803 ring->fw_ring_id = INVALID_HW_RING_ID;
1804 memset(rxr->ag_buf_ring, 0,
1805 rxr->ag_ring_struct->ring_size *
1806 sizeof(*rxr->ag_buf_ring));
1808 bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1810 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1811 bnxt_free_cp_ring(bp, cpr, idx);
1812 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1813 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1817 /* Default completion ring */
1819 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1821 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1822 bnxt_free_cp_ring(bp, cpr, 0);
1823 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1830 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1835 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1836 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1843 void bnxt_free_hwrm_resources(struct bnxt *bp)
1845 /* Release memzone */
1846 rte_free(bp->hwrm_cmd_resp_addr);
1847 rte_free(bp->hwrm_short_cmd_req_addr);
1848 bp->hwrm_cmd_resp_addr = NULL;
1849 bp->hwrm_short_cmd_req_addr = NULL;
1850 bp->hwrm_cmd_resp_dma_addr = 0;
1851 bp->hwrm_short_cmd_req_dma_addr = 0;
1854 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1856 struct rte_pci_device *pdev = bp->pdev;
1857 char type[RTE_MEMZONE_NAMESIZE];
1859 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1860 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1861 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1862 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1863 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1864 if (bp->hwrm_cmd_resp_addr == NULL)
1866 bp->hwrm_cmd_resp_dma_addr =
1867 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1868 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1870 "unable to map response address to physical memory\n");
1873 rte_spinlock_init(&bp->hwrm_lock);
1878 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1880 struct bnxt_filter_info *filter;
1883 STAILQ_FOREACH(filter, &vnic->filter, next) {
1884 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1885 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1886 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1887 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1889 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1897 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1899 struct bnxt_filter_info *filter;
1900 struct rte_flow *flow;
1903 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1904 filter = flow->filter;
1905 PMD_DRV_LOG(ERR, "filter type %d\n", filter->filter_type);
1906 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1907 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1908 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1909 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1911 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1913 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1921 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1923 struct bnxt_filter_info *filter;
1926 STAILQ_FOREACH(filter, &vnic->filter, next) {
1927 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1928 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1930 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1931 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1934 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1942 void bnxt_free_tunnel_ports(struct bnxt *bp)
1944 if (bp->vxlan_port_cnt)
1945 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1946 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1948 if (bp->geneve_port_cnt)
1949 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1950 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1951 bp->geneve_port = 0;
1954 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1958 if (bp->vnic_info == NULL)
1962 * Cleanup VNICs in reverse order, to make sure the L2 filter
1963 * from vnic0 is last to be cleaned up.
1965 for (i = bp->nr_vnics - 1; i >= 0; i--) {
1966 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1968 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1970 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1972 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1974 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1976 bnxt_hwrm_vnic_free(bp, vnic);
1978 /* Ring resources */
1979 bnxt_free_all_hwrm_rings(bp);
1980 bnxt_free_all_hwrm_ring_grps(bp);
1981 bnxt_free_all_hwrm_stat_ctxs(bp);
1982 bnxt_free_tunnel_ports(bp);
1985 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1987 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1989 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1990 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1992 switch (conf_link_speed) {
1993 case ETH_LINK_SPEED_10M_HD:
1994 case ETH_LINK_SPEED_100M_HD:
1995 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1997 return hw_link_duplex;
2000 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
2002 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
2005 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
2007 uint16_t eth_link_speed = 0;
2009 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
2010 return ETH_LINK_SPEED_AUTONEG;
2012 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
2013 case ETH_LINK_SPEED_100M:
2014 case ETH_LINK_SPEED_100M_HD:
2016 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
2018 case ETH_LINK_SPEED_1G:
2020 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
2022 case ETH_LINK_SPEED_2_5G:
2024 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
2026 case ETH_LINK_SPEED_10G:
2028 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
2030 case ETH_LINK_SPEED_20G:
2032 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
2034 case ETH_LINK_SPEED_25G:
2036 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
2038 case ETH_LINK_SPEED_40G:
2040 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
2042 case ETH_LINK_SPEED_50G:
2044 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
2046 case ETH_LINK_SPEED_100G:
2048 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB;
2052 "Unsupported link speed %d; default to AUTO\n",
2056 return eth_link_speed;
2059 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
2060 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
2061 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
2062 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G | ETH_LINK_SPEED_100G)
2064 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
2068 if (link_speed == ETH_LINK_SPEED_AUTONEG)
2071 if (link_speed & ETH_LINK_SPEED_FIXED) {
2072 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
2074 if (one_speed & (one_speed - 1)) {
2076 "Invalid advertised speeds (%u) for port %u\n",
2077 link_speed, port_id);
2080 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2082 "Unsupported advertised speed (%u) for port %u\n",
2083 link_speed, port_id);
2087 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2089 "Unsupported advertised speeds (%u) for port %u\n",
2090 link_speed, port_id);
2098 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2102 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2103 if (bp->link_info.support_speeds)
2104 return bp->link_info.support_speeds;
2105 link_speed = BNXT_SUPPORTED_SPEEDS;
2108 if (link_speed & ETH_LINK_SPEED_100M)
2109 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2110 if (link_speed & ETH_LINK_SPEED_100M_HD)
2111 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2112 if (link_speed & ETH_LINK_SPEED_1G)
2113 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2114 if (link_speed & ETH_LINK_SPEED_2_5G)
2115 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2116 if (link_speed & ETH_LINK_SPEED_10G)
2117 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2118 if (link_speed & ETH_LINK_SPEED_20G)
2119 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2120 if (link_speed & ETH_LINK_SPEED_25G)
2121 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2122 if (link_speed & ETH_LINK_SPEED_40G)
2123 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2124 if (link_speed & ETH_LINK_SPEED_50G)
2125 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2126 if (link_speed & ETH_LINK_SPEED_100G)
2127 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB;
2131 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2133 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2135 switch (hw_link_speed) {
2136 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2137 eth_link_speed = ETH_SPEED_NUM_100M;
2139 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2140 eth_link_speed = ETH_SPEED_NUM_1G;
2142 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2143 eth_link_speed = ETH_SPEED_NUM_2_5G;
2145 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2146 eth_link_speed = ETH_SPEED_NUM_10G;
2148 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2149 eth_link_speed = ETH_SPEED_NUM_20G;
2151 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2152 eth_link_speed = ETH_SPEED_NUM_25G;
2154 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2155 eth_link_speed = ETH_SPEED_NUM_40G;
2157 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2158 eth_link_speed = ETH_SPEED_NUM_50G;
2160 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB:
2161 eth_link_speed = ETH_SPEED_NUM_100G;
2163 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2165 PMD_DRV_LOG(ERR, "HWRM link speed %d not defined\n",
2169 return eth_link_speed;
2172 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2174 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2176 switch (hw_link_duplex) {
2177 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2178 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2179 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2181 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2182 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2185 PMD_DRV_LOG(ERR, "HWRM link duplex %d not defined\n",
2189 return eth_link_duplex;
2192 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2195 struct bnxt_link_info *link_info = &bp->link_info;
2197 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2200 "Get link config failed with rc %d\n", rc);
2203 if (link_info->link_speed)
2205 bnxt_parse_hw_link_speed(link_info->link_speed);
2207 link->link_speed = ETH_SPEED_NUM_NONE;
2208 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2209 link->link_status = link_info->link_up;
2210 link->link_autoneg = link_info->auto_mode ==
2211 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2212 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2217 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2220 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2221 struct bnxt_link_info link_req;
2222 uint16_t speed, autoneg;
2224 if (!BNXT_SINGLE_PF(bp) || BNXT_VF(bp))
2227 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2228 bp->eth_dev->data->port_id);
2232 memset(&link_req, 0, sizeof(link_req));
2233 link_req.link_up = link_up;
2237 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2238 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2239 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2240 /* Autoneg can be done only when the FW allows */
2241 if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2242 bp->link_info.force_link_speed)) {
2243 link_req.phy_flags |=
2244 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2245 link_req.auto_link_speed_mask =
2246 bnxt_parse_eth_link_speed_mask(bp,
2247 dev_conf->link_speeds);
2249 if (bp->link_info.phy_type ==
2250 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2251 bp->link_info.phy_type ==
2252 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2253 bp->link_info.media_type ==
2254 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2255 PMD_DRV_LOG(ERR, "10GBase-T devices must autoneg\n");
2259 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2260 /* If user wants a particular speed try that first. */
2262 link_req.link_speed = speed;
2263 else if (bp->link_info.force_link_speed)
2264 link_req.link_speed = bp->link_info.force_link_speed;
2266 link_req.link_speed = bp->link_info.auto_link_speed;
2268 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2269 link_req.auto_pause = bp->link_info.auto_pause;
2270 link_req.force_pause = bp->link_info.force_pause;
2273 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2276 "Set link config failed with rc %d\n", rc);
2284 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2286 struct hwrm_func_qcfg_input req = {0};
2287 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2291 HWRM_PREP(req, FUNC_QCFG);
2292 req.fid = rte_cpu_to_le_16(0xffff);
2294 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2296 HWRM_CHECK_RESULT();
2298 /* Hard Coded.. 0xfff VLAN ID mask */
2299 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2300 flags = rte_le_to_cpu_16(resp->flags);
2301 if (BNXT_PF(bp) && (flags & HWRM_FUNC_QCFG_OUTPUT_FLAGS_MULTI_HOST))
2302 bp->flags |= BNXT_FLAG_MULTI_HOST;
2304 switch (resp->port_partition_type) {
2305 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2306 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2307 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2308 bp->port_partition_type = resp->port_partition_type;
2311 bp->port_partition_type = 0;
2320 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2321 struct hwrm_func_qcaps_output *qcaps)
2323 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2324 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2325 sizeof(qcaps->mac_address));
2326 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2327 qcaps->max_rx_rings = fcfg->num_rx_rings;
2328 qcaps->max_tx_rings = fcfg->num_tx_rings;
2329 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2330 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2332 qcaps->first_vf_id = 0;
2333 qcaps->max_vnics = fcfg->num_vnics;
2334 qcaps->max_decap_records = 0;
2335 qcaps->max_encap_records = 0;
2336 qcaps->max_tx_wm_flows = 0;
2337 qcaps->max_tx_em_flows = 0;
2338 qcaps->max_rx_wm_flows = 0;
2339 qcaps->max_rx_em_flows = 0;
2340 qcaps->max_flow_id = 0;
2341 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2342 qcaps->max_sp_tx_rings = 0;
2343 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2346 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2348 struct hwrm_func_cfg_input req = {0};
2349 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2352 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2353 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2354 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2355 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2356 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2357 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2358 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2359 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2360 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2361 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2362 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2363 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2364 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2365 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2366 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2367 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2368 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2369 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2370 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2371 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2372 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2373 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2374 req.fid = rte_cpu_to_le_16(0xffff);
2376 HWRM_PREP(req, FUNC_CFG);
2378 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2380 HWRM_CHECK_RESULT();
2386 static void populate_vf_func_cfg_req(struct bnxt *bp,
2387 struct hwrm_func_cfg_input *req,
2390 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2391 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2392 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2393 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2394 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2395 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2396 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2397 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2398 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2399 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2401 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2402 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2403 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2404 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2405 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2407 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2408 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2410 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2411 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2412 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2413 /* TODO: For now, do not support VMDq/RFS on VFs. */
2414 req->num_vnics = rte_cpu_to_le_16(1);
2415 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2419 static void add_random_mac_if_needed(struct bnxt *bp,
2420 struct hwrm_func_cfg_input *cfg_req,
2423 struct ether_addr mac;
2425 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2428 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2430 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2431 eth_random_addr(cfg_req->dflt_mac_addr);
2432 bp->pf.vf_info[vf].random_mac = true;
2434 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2438 static void reserve_resources_from_vf(struct bnxt *bp,
2439 struct hwrm_func_cfg_input *cfg_req,
2442 struct hwrm_func_qcaps_input req = {0};
2443 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2446 /* Get the actual allocated values now */
2447 HWRM_PREP(req, FUNC_QCAPS);
2448 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2449 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2452 PMD_DRV_LOG(ERR, "hwrm_func_qcaps failed rc:%d\n", rc);
2453 copy_func_cfg_to_qcaps(cfg_req, resp);
2454 } else if (resp->error_code) {
2455 rc = rte_le_to_cpu_16(resp->error_code);
2456 PMD_DRV_LOG(ERR, "hwrm_func_qcaps error %d\n", rc);
2457 copy_func_cfg_to_qcaps(cfg_req, resp);
2460 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2461 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2462 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2463 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2464 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2465 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2467 * TODO: While not supporting VMDq with VFs, max_vnics is always
2468 * forced to 1 in this case
2470 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2471 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2476 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2478 struct hwrm_func_qcfg_input req = {0};
2479 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2482 /* Check for zero MAC address */
2483 HWRM_PREP(req, FUNC_QCFG);
2484 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2485 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2487 PMD_DRV_LOG(ERR, "hwrm_func_qcfg failed rc:%d\n", rc);
2489 } else if (resp->error_code) {
2490 rc = rte_le_to_cpu_16(resp->error_code);
2491 PMD_DRV_LOG(ERR, "hwrm_func_qcfg error %d\n", rc);
2494 rc = rte_le_to_cpu_16(resp->vlan);
2501 static int update_pf_resource_max(struct bnxt *bp)
2503 struct hwrm_func_qcfg_input req = {0};
2504 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2507 /* And copy the allocated numbers into the pf struct */
2508 HWRM_PREP(req, FUNC_QCFG);
2509 req.fid = rte_cpu_to_le_16(0xffff);
2510 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2511 HWRM_CHECK_RESULT();
2513 /* Only TX ring value reflects actual allocation? TODO */
2514 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2515 bp->pf.evb_mode = resp->evb_mode;
2522 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2527 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2531 rc = bnxt_hwrm_func_qcaps(bp);
2535 bp->pf.func_cfg_flags &=
2536 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2537 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2538 bp->pf.func_cfg_flags |=
2539 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2540 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2544 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2546 struct hwrm_func_cfg_input req = {0};
2547 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2554 PMD_DRV_LOG(ERR, "Attempt to allcoate VFs on a VF!\n");
2558 rc = bnxt_hwrm_func_qcaps(bp);
2563 bp->pf.active_vfs = num_vfs;
2566 * First, configure the PF to only use one TX ring. This ensures that
2567 * there are enough rings for all VFs.
2569 * If we don't do this, when we call func_alloc() later, we will lock
2570 * extra rings to the PF that won't be available during func_cfg() of
2573 * This has been fixed with firmware versions above 20.6.54
2575 bp->pf.func_cfg_flags &=
2576 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2577 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2578 bp->pf.func_cfg_flags |=
2579 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2580 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2585 * Now, create and register a buffer to hold forwarded VF requests
2587 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2588 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2589 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2590 if (bp->pf.vf_req_buf == NULL) {
2594 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2595 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2596 for (i = 0; i < num_vfs; i++)
2597 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2598 (i * HWRM_MAX_REQ_LEN);
2600 rc = bnxt_hwrm_func_buf_rgtr(bp);
2604 populate_vf_func_cfg_req(bp, &req, num_vfs);
2606 bp->pf.active_vfs = 0;
2607 for (i = 0; i < num_vfs; i++) {
2608 add_random_mac_if_needed(bp, &req, i);
2610 HWRM_PREP(req, FUNC_CFG);
2611 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2612 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2613 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2615 /* Clear enable flag for next pass */
2616 req.enables &= ~rte_cpu_to_le_32(
2617 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2619 if (rc || resp->error_code) {
2621 "Failed to initizlie VF %d\n", i);
2623 "Not all VFs available. (%d, %d)\n",
2624 rc, resp->error_code);
2631 reserve_resources_from_vf(bp, &req, i);
2632 bp->pf.active_vfs++;
2633 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2637 * Now configure the PF to use "the rest" of the resources
2638 * We're using STD_TX_RING_MODE here though which will limit the TX
2639 * rings. This will allow QoS to function properly. Not setting this
2640 * will cause PF rings to break bandwidth settings.
2642 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2646 rc = update_pf_resource_max(bp);
2653 bnxt_hwrm_func_buf_unrgtr(bp);
2657 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2659 struct hwrm_func_cfg_input req = {0};
2660 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2663 HWRM_PREP(req, FUNC_CFG);
2665 req.fid = rte_cpu_to_le_16(0xffff);
2666 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2667 req.evb_mode = bp->pf.evb_mode;
2669 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2670 HWRM_CHECK_RESULT();
2676 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2677 uint8_t tunnel_type)
2679 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2680 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2683 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2684 req.tunnel_type = tunnel_type;
2685 req.tunnel_dst_port_val = port;
2686 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2687 HWRM_CHECK_RESULT();
2689 switch (tunnel_type) {
2690 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2691 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2692 bp->vxlan_port = port;
2694 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2695 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2696 bp->geneve_port = port;
2707 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2708 uint8_t tunnel_type)
2710 struct hwrm_tunnel_dst_port_free_input req = {0};
2711 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2714 HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2716 req.tunnel_type = tunnel_type;
2717 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2718 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2720 HWRM_CHECK_RESULT();
2726 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2729 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2730 struct hwrm_func_cfg_input req = {0};
2733 HWRM_PREP(req, FUNC_CFG);
2735 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2736 req.flags = rte_cpu_to_le_32(flags);
2737 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2739 HWRM_CHECK_RESULT();
2745 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2747 uint32_t *flag = flagp;
2749 vnic->flags = *flag;
2752 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2754 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2757 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2760 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2761 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2763 HWRM_PREP(req, FUNC_BUF_RGTR);
2765 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2766 req.req_buf_page_size = rte_cpu_to_le_16(
2767 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2768 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2769 req.req_buf_page_addr[0] =
2770 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2771 if (req.req_buf_page_addr[0] == 0) {
2773 "unable to map buffer address to physical memory\n");
2777 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2779 HWRM_CHECK_RESULT();
2785 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2788 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2789 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2791 HWRM_PREP(req, FUNC_BUF_UNRGTR);
2793 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2795 HWRM_CHECK_RESULT();
2801 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2803 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2804 struct hwrm_func_cfg_input req = {0};
2807 HWRM_PREP(req, FUNC_CFG);
2809 req.fid = rte_cpu_to_le_16(0xffff);
2810 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2811 req.enables = rte_cpu_to_le_32(
2812 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2813 req.async_event_cr = rte_cpu_to_le_16(
2814 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2815 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2817 HWRM_CHECK_RESULT();
2823 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2825 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2826 struct hwrm_func_vf_cfg_input req = {0};
2829 HWRM_PREP(req, FUNC_VF_CFG);
2831 req.enables = rte_cpu_to_le_32(
2832 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2833 req.async_event_cr = rte_cpu_to_le_16(
2834 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2835 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2837 HWRM_CHECK_RESULT();
2843 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2845 struct hwrm_func_cfg_input req = {0};
2846 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2847 uint16_t dflt_vlan, fid;
2848 uint32_t func_cfg_flags;
2851 HWRM_PREP(req, FUNC_CFG);
2854 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2855 fid = bp->pf.vf_info[vf].fid;
2856 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2858 fid = rte_cpu_to_le_16(0xffff);
2859 func_cfg_flags = bp->pf.func_cfg_flags;
2860 dflt_vlan = bp->vlan;
2863 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2864 req.fid = rte_cpu_to_le_16(fid);
2865 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2866 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2868 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2870 HWRM_CHECK_RESULT();
2876 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2877 uint16_t max_bw, uint16_t enables)
2879 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2880 struct hwrm_func_cfg_input req = {0};
2883 HWRM_PREP(req, FUNC_CFG);
2885 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2886 req.enables |= rte_cpu_to_le_32(enables);
2887 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2888 req.max_bw = rte_cpu_to_le_32(max_bw);
2889 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2891 HWRM_CHECK_RESULT();
2897 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2899 struct hwrm_func_cfg_input req = {0};
2900 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2903 HWRM_PREP(req, FUNC_CFG);
2905 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2906 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2907 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2908 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2910 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2912 HWRM_CHECK_RESULT();
2918 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2919 void *encaped, size_t ec_size)
2922 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2923 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2925 if (ec_size > sizeof(req.encap_request))
2928 HWRM_PREP(req, REJECT_FWD_RESP);
2930 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2931 memcpy(req.encap_request, encaped, ec_size);
2933 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2935 HWRM_CHECK_RESULT();
2941 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2942 struct ether_addr *mac)
2944 struct hwrm_func_qcfg_input req = {0};
2945 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2948 HWRM_PREP(req, FUNC_QCFG);
2950 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2951 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2953 HWRM_CHECK_RESULT();
2955 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2962 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2963 void *encaped, size_t ec_size)
2966 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2967 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2969 if (ec_size > sizeof(req.encap_request))
2972 HWRM_PREP(req, EXEC_FWD_RESP);
2974 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2975 memcpy(req.encap_request, encaped, ec_size);
2977 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2979 HWRM_CHECK_RESULT();
2985 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2986 struct rte_eth_stats *stats, uint8_t rx)
2989 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2990 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2992 HWRM_PREP(req, STAT_CTX_QUERY);
2994 req.stat_ctx_id = rte_cpu_to_le_32(cid);
2996 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2998 HWRM_CHECK_RESULT();
3001 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
3002 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
3003 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
3004 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
3005 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
3006 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
3007 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
3008 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
3010 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
3011 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
3012 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
3013 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
3014 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
3015 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
3016 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
3025 int bnxt_hwrm_port_qstats(struct bnxt *bp)
3027 struct hwrm_port_qstats_input req = {0};
3028 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
3029 struct bnxt_pf_info *pf = &bp->pf;
3032 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3035 HWRM_PREP(req, PORT_QSTATS);
3037 req.port_id = rte_cpu_to_le_16(pf->port_id);
3038 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
3039 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
3040 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3042 HWRM_CHECK_RESULT();
3048 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
3050 struct hwrm_port_clr_stats_input req = {0};
3051 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
3052 struct bnxt_pf_info *pf = &bp->pf;
3055 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
3058 HWRM_PREP(req, PORT_CLR_STATS);
3060 req.port_id = rte_cpu_to_le_16(pf->port_id);
3061 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3063 HWRM_CHECK_RESULT();
3069 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
3071 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
3072 struct hwrm_port_led_qcaps_input req = {0};
3078 HWRM_PREP(req, PORT_LED_QCAPS);
3079 req.port_id = bp->pf.port_id;
3080 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3082 HWRM_CHECK_RESULT();
3084 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3087 bp->num_leds = resp->num_leds;
3088 memcpy(bp->leds, &resp->led0_id,
3089 sizeof(bp->leds[0]) * bp->num_leds);
3090 for (i = 0; i < bp->num_leds; i++) {
3091 struct bnxt_led_info *led = &bp->leds[i];
3093 uint16_t caps = led->led_state_caps;
3095 if (!led->led_group_id ||
3096 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3108 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3110 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3111 struct hwrm_port_led_cfg_input req = {0};
3112 struct bnxt_led_cfg *led_cfg;
3113 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3114 uint16_t duration = 0;
3117 if (!bp->num_leds || BNXT_VF(bp))
3120 HWRM_PREP(req, PORT_LED_CFG);
3123 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3124 duration = rte_cpu_to_le_16(500);
3126 req.port_id = bp->pf.port_id;
3127 req.num_leds = bp->num_leds;
3128 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3129 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3130 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3131 led_cfg->led_id = bp->leds[i].led_id;
3132 led_cfg->led_state = led_state;
3133 led_cfg->led_blink_on = duration;
3134 led_cfg->led_blink_off = duration;
3135 led_cfg->led_group_id = bp->leds[i].led_group_id;
3138 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3140 HWRM_CHECK_RESULT();
3146 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3150 struct hwrm_nvm_get_dir_info_input req = {0};
3151 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3153 HWRM_PREP(req, NVM_GET_DIR_INFO);
3155 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3157 HWRM_CHECK_RESULT();
3161 *entries = rte_le_to_cpu_32(resp->entries);
3162 *length = rte_le_to_cpu_32(resp->entry_length);
3167 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3170 uint32_t dir_entries;
3171 uint32_t entry_length;
3174 rte_iova_t dma_handle;
3175 struct hwrm_nvm_get_dir_entries_input req = {0};
3176 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3178 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3182 *data++ = dir_entries;
3183 *data++ = entry_length;
3185 memset(data, 0xff, len);
3187 buflen = dir_entries * entry_length;
3188 buf = rte_malloc("nvm_dir", buflen, 0);
3189 rte_mem_lock_page(buf);
3192 dma_handle = rte_mem_virt2iova(buf);
3193 if (dma_handle == 0) {
3195 "unable to map response address to physical memory\n");
3198 HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3199 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3200 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3202 HWRM_CHECK_RESULT();
3206 memcpy(data, buf, len > buflen ? buflen : len);
3213 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3214 uint32_t offset, uint32_t length,
3219 rte_iova_t dma_handle;
3220 struct hwrm_nvm_read_input req = {0};
3221 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3223 buf = rte_malloc("nvm_item", length, 0);
3224 rte_mem_lock_page(buf);
3228 dma_handle = rte_mem_virt2iova(buf);
3229 if (dma_handle == 0) {
3231 "unable to map response address to physical memory\n");
3234 HWRM_PREP(req, NVM_READ);
3235 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3236 req.dir_idx = rte_cpu_to_le_16(index);
3237 req.offset = rte_cpu_to_le_32(offset);
3238 req.len = rte_cpu_to_le_32(length);
3239 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3240 HWRM_CHECK_RESULT();
3243 memcpy(data, buf, length);
3249 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3252 struct hwrm_nvm_erase_dir_entry_input req = {0};
3253 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3255 HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3256 req.dir_idx = rte_cpu_to_le_16(index);
3257 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3258 HWRM_CHECK_RESULT();
3265 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3266 uint16_t dir_ordinal, uint16_t dir_ext,
3267 uint16_t dir_attr, const uint8_t *data,
3271 struct hwrm_nvm_write_input req = {0};
3272 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3273 rte_iova_t dma_handle;
3276 HWRM_PREP(req, NVM_WRITE);
3278 req.dir_type = rte_cpu_to_le_16(dir_type);
3279 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3280 req.dir_ext = rte_cpu_to_le_16(dir_ext);
3281 req.dir_attr = rte_cpu_to_le_16(dir_attr);
3282 req.dir_data_length = rte_cpu_to_le_32(data_len);
3284 buf = rte_malloc("nvm_write", data_len, 0);
3285 rte_mem_lock_page(buf);
3289 dma_handle = rte_mem_virt2iova(buf);
3290 if (dma_handle == 0) {
3292 "unable to map response address to physical memory\n");
3295 memcpy(buf, data, data_len);
3296 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3298 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3300 HWRM_CHECK_RESULT();
3308 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3310 uint32_t *count = cbdata;
3312 *count = *count + 1;
3315 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3316 struct bnxt_vnic_info *vnic __rte_unused)
3321 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3325 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3326 &count, bnxt_vnic_count_hwrm_stub);
3331 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3334 struct hwrm_func_vf_vnic_ids_query_input req = {0};
3335 struct hwrm_func_vf_vnic_ids_query_output *resp =
3336 bp->hwrm_cmd_resp_addr;
3339 /* First query all VNIC ids */
3340 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3342 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3343 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3344 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3346 if (req.vnic_id_tbl_addr == 0) {
3349 "unable to map VNIC ID table address to physical memory\n");
3352 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3355 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3357 } else if (resp->error_code) {
3358 rc = rte_le_to_cpu_16(resp->error_code);
3360 PMD_DRV_LOG(ERR, "hwrm_func_vf_vnic_query error %d\n", rc);
3363 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3371 * This function queries the VNIC IDs for a specified VF. It then calls
3372 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3373 * Then it calls the hwrm_cb function to program this new vnic configuration.
3375 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3376 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3377 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3379 struct bnxt_vnic_info vnic;
3381 int i, num_vnic_ids;
3386 /* First query all VNIC ids */
3387 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3388 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3389 RTE_CACHE_LINE_SIZE);
3390 if (vnic_ids == NULL) {
3394 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3395 rte_mem_lock_page(((char *)vnic_ids) + sz);
3397 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3399 if (num_vnic_ids < 0)
3400 return num_vnic_ids;
3402 /* Retrieve VNIC, update bd_stall then update */
3404 for (i = 0; i < num_vnic_ids; i++) {
3405 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3406 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3407 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3410 if (vnic.mru <= 4) /* Indicates unallocated */
3413 vnic_cb(&vnic, cbdata);
3415 rc = hwrm_cb(bp, &vnic);
3425 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3428 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3429 struct hwrm_func_cfg_input req = {0};
3432 HWRM_PREP(req, FUNC_CFG);
3434 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3435 req.enables |= rte_cpu_to_le_32(
3436 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3437 req.vlan_antispoof_mode = on ?
3438 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3439 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3440 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3442 HWRM_CHECK_RESULT();
3448 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3450 struct bnxt_vnic_info vnic;
3453 int num_vnic_ids, i;
3457 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3458 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3459 RTE_CACHE_LINE_SIZE);
3460 if (vnic_ids == NULL) {
3465 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3466 rte_mem_lock_page(((char *)vnic_ids) + sz);
3468 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3474 * Loop through to find the default VNIC ID.
3475 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3476 * by sending the hwrm_func_qcfg command to the firmware.
3478 for (i = 0; i < num_vnic_ids; i++) {
3479 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3480 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3481 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3482 bp->pf.first_vf_id + vf);
3485 if (vnic.func_default) {
3487 return vnic.fw_vnic_id;
3490 /* Could not find a default VNIC. */
3491 PMD_DRV_LOG(ERR, "No default VNIC\n");
3497 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3499 struct bnxt_filter_info *filter)
3502 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3503 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3504 uint32_t enables = 0;
3506 if (filter->fw_em_filter_id != UINT64_MAX)
3507 bnxt_hwrm_clear_em_filter(bp, filter);
3509 HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3511 req.flags = rte_cpu_to_le_32(filter->flags);
3513 enables = filter->enables |
3514 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3515 req.dst_id = rte_cpu_to_le_16(dst_id);
3517 if (filter->ip_addr_type) {
3518 req.ip_addr_type = filter->ip_addr_type;
3519 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3522 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3523 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3525 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3526 memcpy(req.src_macaddr, filter->src_macaddr,
3529 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3530 memcpy(req.dst_macaddr, filter->dst_macaddr,
3533 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3534 req.ovlan_vid = filter->l2_ovlan;
3536 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3537 req.ivlan_vid = filter->l2_ivlan;
3539 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3540 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3542 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3543 req.ip_protocol = filter->ip_protocol;
3545 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3546 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3548 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3549 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3551 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3552 req.src_port = rte_cpu_to_be_16(filter->src_port);
3554 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3555 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3557 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3558 req.mirror_vnic_id = filter->mirror_vnic_id;
3560 req.enables = rte_cpu_to_le_32(enables);
3562 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3564 HWRM_CHECK_RESULT();
3566 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3572 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3575 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3576 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3578 if (filter->fw_em_filter_id == UINT64_MAX)
3581 PMD_DRV_LOG(ERR, "Clear EM filter\n");
3582 HWRM_PREP(req, CFA_EM_FLOW_FREE);
3584 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3586 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3588 HWRM_CHECK_RESULT();
3591 filter->fw_em_filter_id = -1;
3592 filter->fw_l2_filter_id = -1;
3597 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3599 struct bnxt_filter_info *filter)
3602 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3603 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3604 bp->hwrm_cmd_resp_addr;
3605 uint32_t enables = 0;
3607 if (filter->fw_ntuple_filter_id != UINT64_MAX)
3608 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3610 HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3612 req.flags = rte_cpu_to_le_32(filter->flags);
3614 enables = filter->enables |
3615 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3616 req.dst_id = rte_cpu_to_le_16(dst_id);
3619 if (filter->ip_addr_type) {
3620 req.ip_addr_type = filter->ip_addr_type;
3622 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3625 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3626 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3628 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3629 memcpy(req.src_macaddr, filter->src_macaddr,
3632 //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3633 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3636 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3637 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3639 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3640 req.ip_protocol = filter->ip_protocol;
3642 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3643 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3645 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3646 req.src_ipaddr_mask[0] =
3647 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3649 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3650 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3652 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3653 req.dst_ipaddr_mask[0] =
3654 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3656 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3657 req.src_port = rte_cpu_to_le_16(filter->src_port);
3659 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3660 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3662 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3663 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3665 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3666 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3668 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3669 req.mirror_vnic_id = filter->mirror_vnic_id;
3671 req.enables = rte_cpu_to_le_32(enables);
3673 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3675 HWRM_CHECK_RESULT();
3677 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3683 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3684 struct bnxt_filter_info *filter)
3687 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3688 struct hwrm_cfa_ntuple_filter_free_output *resp =
3689 bp->hwrm_cmd_resp_addr;
3691 if (filter->fw_ntuple_filter_id == UINT64_MAX)
3694 HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3696 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3698 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3700 HWRM_CHECK_RESULT();
3703 filter->fw_ntuple_filter_id = -1;
3708 int bnxt_vnic_rss_configure(struct bnxt *bp, struct bnxt_vnic_info *vnic)
3710 unsigned int rss_idx, fw_idx, i;
3712 if (vnic->rss_table && vnic->hash_type) {
3714 * Fill the RSS hash & redirection table with
3715 * ring group ids for all VNICs
3717 for (rss_idx = 0, fw_idx = 0; rss_idx < HW_HASH_INDEX_SIZE;
3718 rss_idx++, fw_idx++) {
3719 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
3720 fw_idx %= bp->rx_cp_nr_rings;
3721 if (vnic->fw_grp_ids[fw_idx] !=
3726 if (i == bp->rx_cp_nr_rings)
3728 vnic->rss_table[rss_idx] =
3729 vnic->fw_grp_ids[fw_idx];
3731 return bnxt_hwrm_vnic_rss_cfg(bp, vnic);