4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
49 #include "bnxt_ring.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
57 #define HWRM_CMD_TIMEOUT 10000
59 struct bnxt_plcmodes_cfg {
61 uint16_t jumbo_thresh;
63 uint16_t hds_threshold;
66 static int page_getenum(size_t size)
82 RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83 return sizeof(void *) * 8 - 1;
86 static int page_roundup(size_t size)
88 return 1 << page_getenum(size);
92 * HWRM Functions (sent to HWRM)
93 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95 * command was failed by the ChiMP.
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
102 struct input *req = msg;
103 struct output *resp = bp->hwrm_cmd_resp_addr;
104 uint32_t *data = msg;
107 uint16_t max_req_len = bp->max_req_len;
108 struct hwrm_short_input short_input = { 0 };
110 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
113 memset(short_cmd_req, 0, bp->max_req_len);
114 memcpy(short_cmd_req, req, msg_len);
116 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117 short_input.signature = rte_cpu_to_le_16(
118 HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119 short_input.size = rte_cpu_to_le_16(msg_len);
120 short_input.req_addr =
121 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
123 data = (uint32_t *)&short_input;
124 msg_len = sizeof(short_input);
126 /* Sync memory write before updating doorbell */
129 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
132 /* Write request msg to hwrm channel */
133 for (i = 0; i < msg_len; i += 4) {
134 bar = (uint8_t *)bp->bar0 + i;
135 rte_write32(*data, bar);
139 /* Zero the rest of the request space */
140 for (; i < max_req_len; i += 4) {
141 bar = (uint8_t *)bp->bar0 + i;
145 /* Ring channel doorbell */
146 bar = (uint8_t *)bp->bar0 + 0x100;
149 /* Poll for the valid bit */
150 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151 /* Sanity check on the resp->resp_len */
153 if (resp->resp_len && resp->resp_len <=
155 /* Last byte of resp contains the valid key */
156 valid = (uint8_t *)resp + resp->resp_len - 1;
157 if (*valid == HWRM_RESP_VALID_KEY)
163 if (i >= HWRM_CMD_TIMEOUT) {
164 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
175 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
176 * spinlock, and does initial processing.
178 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
179 * releases the spinlock only if it returns. If the regular int return codes
180 * are not used by the function, HWRM_CHECK_RESULT() should not be used
181 * directly, rather it should be copied and modified to suit the function.
183 * HWRM_UNLOCK() must be called after all response processing is completed.
185 #define HWRM_PREP(req, type) do { \
186 rte_spinlock_lock(&bp->hwrm_lock); \
187 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189 req.cmpl_ring = rte_cpu_to_le_16(-1); \
190 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191 req.target_id = rte_cpu_to_le_16(0xffff); \
192 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
195 #define HWRM_CHECK_RESULT() do {\
197 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
199 rte_spinlock_unlock(&bp->hwrm_lock); \
200 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
206 if (resp->error_code) { \
207 rc = rte_le_to_cpu_16(resp->error_code); \
208 if (resp->resp_len >= 16) { \
209 struct hwrm_err_output *tmp_hwrm_err_op = \
212 "%s error %d:%d:%08x:%04x\n", \
214 rc, tmp_hwrm_err_op->cmd_err, \
216 tmp_hwrm_err_op->opaque_0), \
218 tmp_hwrm_err_op->opaque_1)); \
222 "%s error %d\n", __func__, rc); \
224 rte_spinlock_unlock(&bp->hwrm_lock); \
225 if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
233 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
235 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
238 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
239 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
241 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
242 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
245 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
253 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
254 struct bnxt_vnic_info *vnic,
256 struct bnxt_vlan_table_entry *vlan_table)
259 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
260 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
263 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
266 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
267 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
269 /* FIXME add multicast flag, when multicast adding options is supported
272 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
273 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
274 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
275 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
276 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
277 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
278 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
279 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
280 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
281 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
282 if (vnic->mc_addr_cnt) {
283 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
284 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
285 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
288 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
289 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
290 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
291 rte_mem_virt2iova(vlan_table));
292 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
294 req.mask = rte_cpu_to_le_32(mask);
296 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
304 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
306 struct bnxt_vlan_antispoof_table_entry *vlan_table)
309 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
310 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
311 bp->hwrm_cmd_resp_addr;
314 * Older HWRM versions did not support this command, and the set_rx_mask
315 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
316 * removed from set_rx_mask call, and this command was added.
318 * This command is also present from 1.7.8.11 and higher,
321 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
322 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
323 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
328 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
329 req.fid = rte_cpu_to_le_16(fid);
331 req.vlan_tag_mask_tbl_addr =
332 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
333 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
335 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
343 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
344 struct bnxt_filter_info *filter)
347 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
348 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
350 if (filter->fw_l2_filter_id == UINT64_MAX)
353 HWRM_PREP(req, CFA_L2_FILTER_FREE);
355 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
357 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
362 filter->fw_l2_filter_id = -1;
367 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
369 struct bnxt_filter_info *filter)
372 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
373 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
374 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
375 const struct rte_eth_vmdq_rx_conf *conf =
376 &dev_conf->rx_adv_conf.vmdq_rx_conf;
377 uint32_t enables = 0;
378 uint16_t j = dst_id - 1;
380 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
381 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
382 conf->pool_map[j].pools & (1UL << j)) {
384 "Add vlan %u to vmdq pool %u\n",
385 conf->pool_map[j].vlan_id, j);
387 filter->l2_ivlan = conf->pool_map[j].vlan_id;
389 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
390 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
393 if (filter->fw_l2_filter_id != UINT64_MAX)
394 bnxt_hwrm_clear_l2_filter(bp, filter);
396 HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
398 req.flags = rte_cpu_to_le_32(filter->flags);
400 enables = filter->enables |
401 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
402 req.dst_id = rte_cpu_to_le_16(dst_id);
405 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
406 memcpy(req.l2_addr, filter->l2_addr,
409 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
410 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
413 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
414 req.l2_ovlan = filter->l2_ovlan;
416 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
417 req.l2_ivlan = filter->l2_ivlan;
419 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
420 req.l2_ovlan_mask = filter->l2_ovlan_mask;
422 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
423 req.l2_ivlan_mask = filter->l2_ivlan_mask;
424 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
425 req.src_id = rte_cpu_to_le_32(filter->src_id);
426 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
427 req.src_type = filter->src_type;
429 req.enables = rte_cpu_to_le_32(enables);
431 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
435 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
441 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
444 struct hwrm_func_qcaps_input req = {.req_type = 0 };
445 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
446 uint16_t new_max_vfs;
449 HWRM_PREP(req, FUNC_QCAPS);
451 req.fid = rte_cpu_to_le_16(0xffff);
453 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
457 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
459 bp->pf.port_id = resp->port_id;
460 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
461 new_max_vfs = bp->pdev->max_vfs;
462 if (new_max_vfs != bp->pf.max_vfs) {
464 rte_free(bp->pf.vf_info);
465 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
466 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
467 bp->pf.max_vfs = new_max_vfs;
468 for (i = 0; i < new_max_vfs; i++) {
469 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
470 bp->pf.vf_info[i].vlan_table =
471 rte_zmalloc("VF VLAN table",
474 if (bp->pf.vf_info[i].vlan_table == NULL)
476 "Fail to alloc VLAN table for VF %d\n",
480 bp->pf.vf_info[i].vlan_table);
481 bp->pf.vf_info[i].vlan_as_table =
482 rte_zmalloc("VF VLAN AS table",
485 if (bp->pf.vf_info[i].vlan_as_table == NULL)
487 "Alloc VLAN AS table for VF %d fail\n",
491 bp->pf.vf_info[i].vlan_as_table);
492 STAILQ_INIT(&bp->pf.vf_info[i].filter);
497 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
498 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
499 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
500 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
501 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
502 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
503 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
504 /* TODO: For now, do not support VMDq/RFS on VFs. */
509 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
513 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
515 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
521 int bnxt_hwrm_func_reset(struct bnxt *bp)
524 struct hwrm_func_reset_input req = {.req_type = 0 };
525 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
527 HWRM_PREP(req, FUNC_RESET);
529 req.enables = rte_cpu_to_le_32(0);
531 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
539 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
542 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
543 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
545 if (bp->flags & BNXT_FLAG_REGISTERED)
548 HWRM_PREP(req, FUNC_DRV_RGTR);
549 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
550 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
551 req.ver_maj = RTE_VER_YEAR;
552 req.ver_min = RTE_VER_MONTH;
553 req.ver_upd = RTE_VER_MINOR;
556 req.enables |= rte_cpu_to_le_32(
557 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
558 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
559 RTE_MIN(sizeof(req.vf_req_fwd),
560 sizeof(bp->pf.vf_req_fwd)));
563 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
564 //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
566 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
571 bp->flags |= BNXT_FLAG_REGISTERED;
576 int bnxt_hwrm_ver_get(struct bnxt *bp)
579 struct hwrm_ver_get_input req = {.req_type = 0 };
580 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
583 uint16_t max_resp_len;
584 char type[RTE_MEMZONE_NAMESIZE];
585 uint32_t dev_caps_cfg;
587 bp->max_req_len = HWRM_MAX_REQ_LEN;
588 HWRM_PREP(req, VER_GET);
590 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
591 req.hwrm_intf_min = HWRM_VERSION_MINOR;
592 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
594 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
598 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
599 resp->hwrm_intf_maj, resp->hwrm_intf_min,
601 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
602 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
603 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
604 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
605 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
607 my_version = HWRM_VERSION_MAJOR << 16;
608 my_version |= HWRM_VERSION_MINOR << 8;
609 my_version |= HWRM_VERSION_UPDATE;
611 fw_version = resp->hwrm_intf_maj << 16;
612 fw_version |= resp->hwrm_intf_min << 8;
613 fw_version |= resp->hwrm_intf_upd;
615 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
616 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
621 if (my_version != fw_version) {
622 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
623 if (my_version < fw_version) {
625 "Firmware API version is newer than driver.\n");
627 "The driver may be missing features.\n");
630 "Firmware API version is older than driver.\n");
632 "Not all driver features may be functional.\n");
636 if (bp->max_req_len > resp->max_req_win_len) {
637 RTE_LOG(ERR, PMD, "Unsupported request length\n");
640 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
641 max_resp_len = resp->max_resp_len;
642 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
644 if (bp->max_resp_len != max_resp_len) {
645 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
646 bp->pdev->addr.domain, bp->pdev->addr.bus,
647 bp->pdev->addr.devid, bp->pdev->addr.function);
649 rte_free(bp->hwrm_cmd_resp_addr);
651 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
652 if (bp->hwrm_cmd_resp_addr == NULL) {
656 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
657 bp->hwrm_cmd_resp_dma_addr =
658 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
659 if (bp->hwrm_cmd_resp_dma_addr == 0) {
661 "Unable to map response buffer to physical memory.\n");
665 bp->max_resp_len = max_resp_len;
669 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
671 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
672 RTE_LOG(DEBUG, PMD, "Short command supported\n");
674 rte_free(bp->hwrm_short_cmd_req_addr);
676 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
678 if (bp->hwrm_short_cmd_req_addr == NULL) {
682 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
683 bp->hwrm_short_cmd_req_dma_addr =
684 rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
685 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
686 rte_free(bp->hwrm_short_cmd_req_addr);
688 "Unable to map buffer to physical memory.\n");
693 bp->flags |= BNXT_FLAG_SHORT_CMD;
701 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
704 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
705 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
707 if (!(bp->flags & BNXT_FLAG_REGISTERED))
710 HWRM_PREP(req, FUNC_DRV_UNRGTR);
713 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
718 bp->flags &= ~BNXT_FLAG_REGISTERED;
723 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
726 struct hwrm_port_phy_cfg_input req = {0};
727 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
728 uint32_t enables = 0;
730 HWRM_PREP(req, PORT_PHY_CFG);
733 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
734 if (bp->link_info.auto_mode && conf->link_speed) {
735 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
736 RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
739 req.flags = rte_cpu_to_le_32(conf->phy_flags);
740 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
741 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
743 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
744 * any auto mode, even "none".
746 if (!conf->link_speed) {
747 /* No speeds specified. Enable AutoNeg - all speeds */
749 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
751 /* AutoNeg - Advertise speeds specified. */
752 if (conf->auto_link_speed_mask &&
753 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
755 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
756 req.auto_link_speed_mask =
757 conf->auto_link_speed_mask;
759 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
762 req.auto_duplex = conf->duplex;
763 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
764 req.auto_pause = conf->auto_pause;
765 req.force_pause = conf->force_pause;
766 /* Set force_pause if there is no auto or if there is a force */
767 if (req.auto_pause && !req.force_pause)
768 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
770 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
772 req.enables = rte_cpu_to_le_32(enables);
775 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
776 RTE_LOG(INFO, PMD, "Force Link Down\n");
779 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
787 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
788 struct bnxt_link_info *link_info)
791 struct hwrm_port_phy_qcfg_input req = {0};
792 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
794 HWRM_PREP(req, PORT_PHY_QCFG);
796 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
800 link_info->phy_link_status = resp->link;
802 (link_info->phy_link_status ==
803 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
804 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
805 link_info->duplex = resp->duplex_cfg;
806 link_info->pause = resp->pause;
807 link_info->auto_pause = resp->auto_pause;
808 link_info->force_pause = resp->force_pause;
809 link_info->auto_mode = resp->auto_mode;
810 link_info->phy_type = resp->phy_type;
811 link_info->media_type = resp->media_type;
813 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
814 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
815 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
816 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
817 link_info->phy_ver[0] = resp->phy_maj;
818 link_info->phy_ver[1] = resp->phy_min;
819 link_info->phy_ver[2] = resp->phy_bld;
823 RTE_LOG(DEBUG, PMD, "Link Speed %d\n", link_info->link_speed);
824 RTE_LOG(DEBUG, PMD, "Auto Mode %d\n", link_info->auto_mode);
825 RTE_LOG(DEBUG, PMD, "Support Speeds %x\n", link_info->support_speeds);
826 RTE_LOG(DEBUG, PMD, "Auto Link Speed %x\n", link_info->auto_link_speed);
827 RTE_LOG(DEBUG, PMD, "Auto Link Speed Mask %x\n",
828 link_info->auto_link_speed_mask);
829 RTE_LOG(DEBUG, PMD, "Forced Link Speed %x\n",
830 link_info->force_link_speed);
835 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
838 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
839 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
841 HWRM_PREP(req, QUEUE_QPORTCFG);
843 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
847 #define GET_QUEUE_INFO(x) \
848 bp->cos_queue[x].id = resp->queue_id##x; \
849 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
865 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
866 struct bnxt_ring *ring,
867 uint32_t ring_type, uint32_t map_index,
868 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
871 uint32_t enables = 0;
872 struct hwrm_ring_alloc_input req = {.req_type = 0 };
873 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
875 HWRM_PREP(req, RING_ALLOC);
877 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
878 req.fbo = rte_cpu_to_le_32(0);
879 /* Association of ring index with doorbell index */
880 req.logical_id = rte_cpu_to_le_16(map_index);
881 req.length = rte_cpu_to_le_32(ring->ring_size);
884 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
885 req.queue_id = bp->cos_queue[0].id;
887 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
888 req.ring_type = ring_type;
889 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
890 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
891 if (stats_ctx_id != INVALID_STATS_CTX_ID)
893 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
895 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
896 req.ring_type = ring_type;
898 * TODO: Some HWRM versions crash with
899 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
901 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
904 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
909 req.enables = rte_cpu_to_le_32(enables);
911 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
913 if (rc || resp->error_code) {
914 if (rc == 0 && resp->error_code)
915 rc = rte_le_to_cpu_16(resp->error_code);
917 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
919 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
922 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
924 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
927 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
929 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
933 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
939 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
944 int bnxt_hwrm_ring_free(struct bnxt *bp,
945 struct bnxt_ring *ring, uint32_t ring_type)
948 struct hwrm_ring_free_input req = {.req_type = 0 };
949 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
951 HWRM_PREP(req, RING_FREE);
953 req.ring_type = ring_type;
954 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
956 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
958 if (rc || resp->error_code) {
959 if (rc == 0 && resp->error_code)
960 rc = rte_le_to_cpu_16(resp->error_code);
964 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
965 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
968 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
969 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
972 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
973 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
977 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
985 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
988 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
989 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
991 HWRM_PREP(req, RING_GRP_ALLOC);
993 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
994 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
995 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
996 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
998 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1000 HWRM_CHECK_RESULT();
1002 bp->grp_info[idx].fw_grp_id =
1003 rte_le_to_cpu_16(resp->ring_group_id);
1010 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1013 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1014 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1016 HWRM_PREP(req, RING_GRP_FREE);
1018 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1020 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1022 HWRM_CHECK_RESULT();
1025 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1029 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1032 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1033 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1035 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1038 HWRM_PREP(req, STAT_CTX_CLR_STATS);
1040 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1042 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1044 HWRM_CHECK_RESULT();
1050 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1051 unsigned int idx __rte_unused)
1054 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1055 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1057 HWRM_PREP(req, STAT_CTX_ALLOC);
1059 req.update_period_ms = rte_cpu_to_le_32(0);
1061 req.stats_dma_addr =
1062 rte_cpu_to_le_64(cpr->hw_stats_map);
1064 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1066 HWRM_CHECK_RESULT();
1068 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1075 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1076 unsigned int idx __rte_unused)
1079 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1080 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1082 HWRM_PREP(req, STAT_CTX_FREE);
1084 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1086 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1088 HWRM_CHECK_RESULT();
1094 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1097 struct hwrm_vnic_alloc_input req = { 0 };
1098 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1100 /* map ring groups to this vnic */
1101 RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1102 vnic->start_grp_id, vnic->end_grp_id);
1103 for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
1104 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1106 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1107 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1108 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1109 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1110 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1111 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1112 HWRM_PREP(req, VNIC_ALLOC);
1114 if (vnic->func_default)
1116 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1117 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1119 HWRM_CHECK_RESULT();
1121 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1123 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1127 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1128 struct bnxt_vnic_info *vnic,
1129 struct bnxt_plcmodes_cfg *pmode)
1132 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1133 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1135 HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1137 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1139 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1141 HWRM_CHECK_RESULT();
1143 pmode->flags = rte_le_to_cpu_32(resp->flags);
1144 /* dflt_vnic bit doesn't exist in the _cfg command */
1145 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1146 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1147 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1148 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1155 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1156 struct bnxt_vnic_info *vnic,
1157 struct bnxt_plcmodes_cfg *pmode)
1160 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1161 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1163 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1165 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1166 req.flags = rte_cpu_to_le_32(pmode->flags);
1167 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1168 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1169 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1170 req.enables = rte_cpu_to_le_32(
1171 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1172 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1173 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1176 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1178 HWRM_CHECK_RESULT();
1184 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1187 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1188 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1189 uint32_t ctx_enable_flag = 0;
1190 struct bnxt_plcmodes_cfg pmodes;
1192 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1193 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1197 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1201 HWRM_PREP(req, VNIC_CFG);
1203 /* Only RSS support for now TBD: COS & LB */
1205 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1206 if (vnic->lb_rule != 0xffff)
1207 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1208 if (vnic->cos_rule != 0xffff)
1209 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1210 if (vnic->rss_rule != 0xffff) {
1211 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1212 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1214 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1215 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1216 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1217 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1218 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1219 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1220 req.mru = rte_cpu_to_le_16(vnic->mru);
1221 if (vnic->func_default)
1223 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1224 if (vnic->vlan_strip)
1226 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1229 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1230 if (vnic->roce_dual)
1231 req.flags |= rte_cpu_to_le_32(
1232 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1233 if (vnic->roce_only)
1234 req.flags |= rte_cpu_to_le_32(
1235 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1236 if (vnic->rss_dflt_cr)
1237 req.flags |= rte_cpu_to_le_32(
1238 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1240 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1242 HWRM_CHECK_RESULT();
1245 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1250 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1254 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1255 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1257 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1258 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1261 HWRM_PREP(req, VNIC_QCFG);
1264 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1265 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1266 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1268 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1270 HWRM_CHECK_RESULT();
1272 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1273 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1274 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1275 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1276 vnic->mru = rte_le_to_cpu_16(resp->mru);
1277 vnic->func_default = rte_le_to_cpu_32(
1278 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1279 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1280 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1281 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1282 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1283 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1284 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1285 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1286 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1287 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1288 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1295 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1298 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1299 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1300 bp->hwrm_cmd_resp_addr;
1302 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1304 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1306 HWRM_CHECK_RESULT();
1308 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1310 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1315 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1318 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1319 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1320 bp->hwrm_cmd_resp_addr;
1322 if (vnic->rss_rule == 0xffff) {
1323 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1326 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1328 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1330 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1332 HWRM_CHECK_RESULT();
1335 vnic->rss_rule = INVALID_HW_RING_ID;
1340 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1343 struct hwrm_vnic_free_input req = {.req_type = 0 };
1344 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1346 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1347 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1351 HWRM_PREP(req, VNIC_FREE);
1353 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1355 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1357 HWRM_CHECK_RESULT();
1360 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1364 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1365 struct bnxt_vnic_info *vnic)
1368 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1369 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1371 HWRM_PREP(req, VNIC_RSS_CFG);
1373 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1375 req.ring_grp_tbl_addr =
1376 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1377 req.hash_key_tbl_addr =
1378 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1379 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1381 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1383 HWRM_CHECK_RESULT();
1389 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1390 struct bnxt_vnic_info *vnic)
1393 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1394 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1397 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1398 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1402 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1404 req.flags = rte_cpu_to_le_32(
1405 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1407 req.enables = rte_cpu_to_le_32(
1408 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1410 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1411 size -= RTE_PKTMBUF_HEADROOM;
1413 req.jumbo_thresh = rte_cpu_to_le_16(size);
1414 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1416 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1418 HWRM_CHECK_RESULT();
1424 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1425 struct bnxt_vnic_info *vnic, bool enable)
1428 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1429 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1431 HWRM_PREP(req, VNIC_TPA_CFG);
1434 req.enables = rte_cpu_to_le_32(
1435 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1436 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1437 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1438 req.flags = rte_cpu_to_le_32(
1439 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1440 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1441 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1442 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1443 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1444 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1445 req.max_agg_segs = rte_cpu_to_le_16(5);
1447 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1448 req.min_agg_len = rte_cpu_to_le_32(512);
1450 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1452 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1454 HWRM_CHECK_RESULT();
1460 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1462 struct hwrm_func_cfg_input req = {0};
1463 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1466 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1467 req.enables = rte_cpu_to_le_32(
1468 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1469 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1470 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1472 HWRM_PREP(req, FUNC_CFG);
1474 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1475 HWRM_CHECK_RESULT();
1478 bp->pf.vf_info[vf].random_mac = false;
1483 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1487 struct hwrm_func_qstats_input req = {.req_type = 0};
1488 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1490 HWRM_PREP(req, FUNC_QSTATS);
1492 req.fid = rte_cpu_to_le_16(fid);
1494 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1496 HWRM_CHECK_RESULT();
1499 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1506 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1507 struct rte_eth_stats *stats)
1510 struct hwrm_func_qstats_input req = {.req_type = 0};
1511 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1513 HWRM_PREP(req, FUNC_QSTATS);
1515 req.fid = rte_cpu_to_le_16(fid);
1517 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1519 HWRM_CHECK_RESULT();
1521 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1522 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1523 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1524 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1525 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1526 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1528 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1529 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1530 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1531 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1532 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1533 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1535 stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1536 stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1538 stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1545 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1548 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1549 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1551 HWRM_PREP(req, FUNC_CLR_STATS);
1553 req.fid = rte_cpu_to_le_16(fid);
1555 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1557 HWRM_CHECK_RESULT();
1564 * HWRM utility functions
1567 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1572 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1573 struct bnxt_tx_queue *txq;
1574 struct bnxt_rx_queue *rxq;
1575 struct bnxt_cp_ring_info *cpr;
1577 if (i >= bp->rx_cp_nr_rings) {
1578 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1581 rxq = bp->rx_queues[i];
1585 rc = bnxt_hwrm_stat_clear(bp, cpr);
1592 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1596 struct bnxt_cp_ring_info *cpr;
1598 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1600 if (i >= bp->rx_cp_nr_rings) {
1601 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1603 cpr = bp->rx_queues[i]->cp_ring;
1604 bp->grp_info[i].fw_stats_ctx = -1;
1606 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1607 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1608 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1616 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1621 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1622 struct bnxt_tx_queue *txq;
1623 struct bnxt_rx_queue *rxq;
1624 struct bnxt_cp_ring_info *cpr;
1626 if (i >= bp->rx_cp_nr_rings) {
1627 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1630 rxq = bp->rx_queues[i];
1634 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1642 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1647 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1649 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1652 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1660 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1661 unsigned int idx __rte_unused)
1663 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1665 bnxt_hwrm_ring_free(bp, cp_ring,
1666 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1667 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1668 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1669 sizeof(*cpr->cp_desc_ring));
1670 cpr->cp_raw_cons = 0;
1673 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1678 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1679 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1680 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1681 struct bnxt_ring *ring = txr->tx_ring_struct;
1682 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1683 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1685 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1686 bnxt_hwrm_ring_free(bp, ring,
1687 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1688 ring->fw_ring_id = INVALID_HW_RING_ID;
1689 memset(txr->tx_desc_ring, 0,
1690 txr->tx_ring_struct->ring_size *
1691 sizeof(*txr->tx_desc_ring));
1692 memset(txr->tx_buf_ring, 0,
1693 txr->tx_ring_struct->ring_size *
1694 sizeof(*txr->tx_buf_ring));
1698 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1699 bnxt_free_cp_ring(bp, cpr, idx);
1700 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1704 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1705 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1706 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1707 struct bnxt_ring *ring = rxr->rx_ring_struct;
1708 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1709 unsigned int idx = i + 1;
1711 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1712 bnxt_hwrm_ring_free(bp, ring,
1713 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1714 ring->fw_ring_id = INVALID_HW_RING_ID;
1715 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1716 memset(rxr->rx_desc_ring, 0,
1717 rxr->rx_ring_struct->ring_size *
1718 sizeof(*rxr->rx_desc_ring));
1719 memset(rxr->rx_buf_ring, 0,
1720 rxr->rx_ring_struct->ring_size *
1721 sizeof(*rxr->rx_buf_ring));
1724 ring = rxr->ag_ring_struct;
1725 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1726 bnxt_hwrm_ring_free(bp, ring,
1727 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1728 ring->fw_ring_id = INVALID_HW_RING_ID;
1729 memset(rxr->ag_buf_ring, 0,
1730 rxr->ag_ring_struct->ring_size *
1731 sizeof(*rxr->ag_buf_ring));
1733 bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1735 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1736 bnxt_free_cp_ring(bp, cpr, idx);
1737 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1738 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1742 /* Default completion ring */
1744 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1746 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1747 bnxt_free_cp_ring(bp, cpr, 0);
1748 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1755 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1760 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1761 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1768 void bnxt_free_hwrm_resources(struct bnxt *bp)
1770 /* Release memzone */
1771 rte_free(bp->hwrm_cmd_resp_addr);
1772 rte_free(bp->hwrm_short_cmd_req_addr);
1773 bp->hwrm_cmd_resp_addr = NULL;
1774 bp->hwrm_short_cmd_req_addr = NULL;
1775 bp->hwrm_cmd_resp_dma_addr = 0;
1776 bp->hwrm_short_cmd_req_dma_addr = 0;
1779 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1781 struct rte_pci_device *pdev = bp->pdev;
1782 char type[RTE_MEMZONE_NAMESIZE];
1784 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1785 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1786 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1787 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1788 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1789 if (bp->hwrm_cmd_resp_addr == NULL)
1791 bp->hwrm_cmd_resp_dma_addr =
1792 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1793 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1795 "unable to map response address to physical memory\n");
1798 rte_spinlock_init(&bp->hwrm_lock);
1803 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1805 struct bnxt_filter_info *filter;
1808 STAILQ_FOREACH(filter, &vnic->filter, next) {
1809 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1810 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1811 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1812 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1814 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1815 STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
1823 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1825 struct bnxt_filter_info *filter;
1826 struct rte_flow *flow;
1829 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1830 filter = flow->filter;
1831 RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
1832 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1833 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1834 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1835 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1837 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1839 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1847 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1849 struct bnxt_filter_info *filter;
1852 STAILQ_FOREACH(filter, &vnic->filter, next) {
1853 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1854 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1856 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1857 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1860 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1868 void bnxt_free_tunnel_ports(struct bnxt *bp)
1870 if (bp->vxlan_port_cnt)
1871 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1872 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1874 if (bp->geneve_port_cnt)
1875 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1876 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1877 bp->geneve_port = 0;
1880 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1884 if (bp->vnic_info == NULL)
1888 * Cleanup VNICs in reverse order, to make sure the L2 filter
1889 * from vnic0 is last to be cleaned up.
1891 for (i = bp->nr_vnics - 1; i >= 0; i--) {
1892 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1894 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1896 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1898 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1900 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1902 bnxt_hwrm_vnic_free(bp, vnic);
1904 rte_free(vnic->fw_grp_ids);
1906 /* Ring resources */
1907 bnxt_free_all_hwrm_rings(bp);
1908 bnxt_free_all_hwrm_ring_grps(bp);
1909 bnxt_free_all_hwrm_stat_ctxs(bp);
1910 bnxt_free_tunnel_ports(bp);
1913 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1915 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1917 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1918 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1920 switch (conf_link_speed) {
1921 case ETH_LINK_SPEED_10M_HD:
1922 case ETH_LINK_SPEED_100M_HD:
1923 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1925 return hw_link_duplex;
1928 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1930 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1933 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1935 uint16_t eth_link_speed = 0;
1937 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1938 return ETH_LINK_SPEED_AUTONEG;
1940 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1941 case ETH_LINK_SPEED_100M:
1942 case ETH_LINK_SPEED_100M_HD:
1944 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1946 case ETH_LINK_SPEED_1G:
1948 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1950 case ETH_LINK_SPEED_2_5G:
1952 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1954 case ETH_LINK_SPEED_10G:
1956 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1958 case ETH_LINK_SPEED_20G:
1960 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1962 case ETH_LINK_SPEED_25G:
1964 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1966 case ETH_LINK_SPEED_40G:
1968 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1970 case ETH_LINK_SPEED_50G:
1972 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1976 "Unsupported link speed %d; default to AUTO\n",
1980 return eth_link_speed;
1983 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1984 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1985 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1986 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1988 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
1992 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1995 if (link_speed & ETH_LINK_SPEED_FIXED) {
1996 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1998 if (one_speed & (one_speed - 1)) {
2000 "Invalid advertised speeds (%u) for port %u\n",
2001 link_speed, port_id);
2004 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
2006 "Unsupported advertised speed (%u) for port %u\n",
2007 link_speed, port_id);
2011 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
2013 "Unsupported advertised speeds (%u) for port %u\n",
2014 link_speed, port_id);
2022 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2026 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2027 if (bp->link_info.support_speeds)
2028 return bp->link_info.support_speeds;
2029 link_speed = BNXT_SUPPORTED_SPEEDS;
2032 if (link_speed & ETH_LINK_SPEED_100M)
2033 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2034 if (link_speed & ETH_LINK_SPEED_100M_HD)
2035 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2036 if (link_speed & ETH_LINK_SPEED_1G)
2037 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2038 if (link_speed & ETH_LINK_SPEED_2_5G)
2039 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2040 if (link_speed & ETH_LINK_SPEED_10G)
2041 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2042 if (link_speed & ETH_LINK_SPEED_20G)
2043 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2044 if (link_speed & ETH_LINK_SPEED_25G)
2045 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2046 if (link_speed & ETH_LINK_SPEED_40G)
2047 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2048 if (link_speed & ETH_LINK_SPEED_50G)
2049 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2053 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2055 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2057 switch (hw_link_speed) {
2058 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2059 eth_link_speed = ETH_SPEED_NUM_100M;
2061 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2062 eth_link_speed = ETH_SPEED_NUM_1G;
2064 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2065 eth_link_speed = ETH_SPEED_NUM_2_5G;
2067 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2068 eth_link_speed = ETH_SPEED_NUM_10G;
2070 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2071 eth_link_speed = ETH_SPEED_NUM_20G;
2073 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2074 eth_link_speed = ETH_SPEED_NUM_25G;
2076 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2077 eth_link_speed = ETH_SPEED_NUM_40G;
2079 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2080 eth_link_speed = ETH_SPEED_NUM_50G;
2082 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2084 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
2088 return eth_link_speed;
2091 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2093 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2095 switch (hw_link_duplex) {
2096 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2097 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2098 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2100 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2101 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2104 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
2108 return eth_link_duplex;
2111 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2114 struct bnxt_link_info *link_info = &bp->link_info;
2116 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2119 "Get link config failed with rc %d\n", rc);
2122 if (link_info->link_speed)
2124 bnxt_parse_hw_link_speed(link_info->link_speed);
2126 link->link_speed = ETH_SPEED_NUM_NONE;
2127 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2128 link->link_status = link_info->link_up;
2129 link->link_autoneg = link_info->auto_mode ==
2130 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2131 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2136 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2139 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2140 struct bnxt_link_info link_req;
2141 uint16_t speed, autoneg;
2143 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
2146 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2147 bp->eth_dev->data->port_id);
2151 memset(&link_req, 0, sizeof(link_req));
2152 link_req.link_up = link_up;
2156 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2157 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2158 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2159 /* Autoneg can be done only when the FW allows */
2160 if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2161 bp->link_info.force_link_speed)) {
2162 link_req.phy_flags |=
2163 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2164 link_req.auto_link_speed_mask =
2165 bnxt_parse_eth_link_speed_mask(bp,
2166 dev_conf->link_speeds);
2168 if (bp->link_info.phy_type ==
2169 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2170 bp->link_info.phy_type ==
2171 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2172 bp->link_info.media_type ==
2173 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2174 RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
2178 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2179 /* If user wants a particular speed try that first. */
2181 link_req.link_speed = speed;
2182 else if (bp->link_info.force_link_speed)
2183 link_req.link_speed = bp->link_info.force_link_speed;
2185 link_req.link_speed = bp->link_info.auto_link_speed;
2187 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2188 link_req.auto_pause = bp->link_info.auto_pause;
2189 link_req.force_pause = bp->link_info.force_pause;
2192 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2195 "Set link config failed with rc %d\n", rc);
2203 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2205 struct hwrm_func_qcfg_input req = {0};
2206 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2209 HWRM_PREP(req, FUNC_QCFG);
2210 req.fid = rte_cpu_to_le_16(0xffff);
2212 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2214 HWRM_CHECK_RESULT();
2216 /* Hard Coded.. 0xfff VLAN ID mask */
2217 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2219 switch (resp->port_partition_type) {
2220 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2221 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2222 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2223 bp->port_partition_type = resp->port_partition_type;
2226 bp->port_partition_type = 0;
2235 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2236 struct hwrm_func_qcaps_output *qcaps)
2238 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2239 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2240 sizeof(qcaps->mac_address));
2241 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2242 qcaps->max_rx_rings = fcfg->num_rx_rings;
2243 qcaps->max_tx_rings = fcfg->num_tx_rings;
2244 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2245 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2247 qcaps->first_vf_id = 0;
2248 qcaps->max_vnics = fcfg->num_vnics;
2249 qcaps->max_decap_records = 0;
2250 qcaps->max_encap_records = 0;
2251 qcaps->max_tx_wm_flows = 0;
2252 qcaps->max_tx_em_flows = 0;
2253 qcaps->max_rx_wm_flows = 0;
2254 qcaps->max_rx_em_flows = 0;
2255 qcaps->max_flow_id = 0;
2256 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2257 qcaps->max_sp_tx_rings = 0;
2258 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2261 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2263 struct hwrm_func_cfg_input req = {0};
2264 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2267 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2268 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2269 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2270 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2271 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2272 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2273 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2274 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2275 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2276 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2277 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2278 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2279 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2280 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2281 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2282 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2283 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2284 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2285 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2286 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2287 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2288 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2289 req.fid = rte_cpu_to_le_16(0xffff);
2291 HWRM_PREP(req, FUNC_CFG);
2293 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2295 HWRM_CHECK_RESULT();
2301 static void populate_vf_func_cfg_req(struct bnxt *bp,
2302 struct hwrm_func_cfg_input *req,
2305 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2306 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2307 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2308 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2309 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2310 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2311 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2312 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2313 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2314 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2316 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2317 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2318 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2319 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2320 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2322 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2323 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2325 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2326 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2327 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2328 /* TODO: For now, do not support VMDq/RFS on VFs. */
2329 req->num_vnics = rte_cpu_to_le_16(1);
2330 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2334 static void add_random_mac_if_needed(struct bnxt *bp,
2335 struct hwrm_func_cfg_input *cfg_req,
2338 struct ether_addr mac;
2340 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2343 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2345 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2346 eth_random_addr(cfg_req->dflt_mac_addr);
2347 bp->pf.vf_info[vf].random_mac = true;
2349 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2353 static void reserve_resources_from_vf(struct bnxt *bp,
2354 struct hwrm_func_cfg_input *cfg_req,
2357 struct hwrm_func_qcaps_input req = {0};
2358 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2361 /* Get the actual allocated values now */
2362 HWRM_PREP(req, FUNC_QCAPS);
2363 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2364 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2367 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2368 copy_func_cfg_to_qcaps(cfg_req, resp);
2369 } else if (resp->error_code) {
2370 rc = rte_le_to_cpu_16(resp->error_code);
2371 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2372 copy_func_cfg_to_qcaps(cfg_req, resp);
2375 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2376 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2377 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2378 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2379 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2380 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2382 * TODO: While not supporting VMDq with VFs, max_vnics is always
2383 * forced to 1 in this case
2385 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2386 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2391 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2393 struct hwrm_func_qcfg_input req = {0};
2394 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2397 /* Check for zero MAC address */
2398 HWRM_PREP(req, FUNC_QCFG);
2399 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2400 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2402 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2404 } else if (resp->error_code) {
2405 rc = rte_le_to_cpu_16(resp->error_code);
2406 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2409 rc = rte_le_to_cpu_16(resp->vlan);
2416 static int update_pf_resource_max(struct bnxt *bp)
2418 struct hwrm_func_qcfg_input req = {0};
2419 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2422 /* And copy the allocated numbers into the pf struct */
2423 HWRM_PREP(req, FUNC_QCFG);
2424 req.fid = rte_cpu_to_le_16(0xffff);
2425 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2426 HWRM_CHECK_RESULT();
2428 /* Only TX ring value reflects actual allocation? TODO */
2429 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2430 bp->pf.evb_mode = resp->evb_mode;
2437 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2442 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2446 rc = bnxt_hwrm_func_qcaps(bp);
2450 bp->pf.func_cfg_flags &=
2451 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2452 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2453 bp->pf.func_cfg_flags |=
2454 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2455 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2459 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2461 struct hwrm_func_cfg_input req = {0};
2462 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2469 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2473 rc = bnxt_hwrm_func_qcaps(bp);
2478 bp->pf.active_vfs = num_vfs;
2481 * First, configure the PF to only use one TX ring. This ensures that
2482 * there are enough rings for all VFs.
2484 * If we don't do this, when we call func_alloc() later, we will lock
2485 * extra rings to the PF that won't be available during func_cfg() of
2488 * This has been fixed with firmware versions above 20.6.54
2490 bp->pf.func_cfg_flags &=
2491 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2492 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2493 bp->pf.func_cfg_flags |=
2494 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2495 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2500 * Now, create and register a buffer to hold forwarded VF requests
2502 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2503 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2504 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2505 if (bp->pf.vf_req_buf == NULL) {
2509 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2510 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2511 for (i = 0; i < num_vfs; i++)
2512 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2513 (i * HWRM_MAX_REQ_LEN);
2515 rc = bnxt_hwrm_func_buf_rgtr(bp);
2519 populate_vf_func_cfg_req(bp, &req, num_vfs);
2521 bp->pf.active_vfs = 0;
2522 for (i = 0; i < num_vfs; i++) {
2523 add_random_mac_if_needed(bp, &req, i);
2525 HWRM_PREP(req, FUNC_CFG);
2526 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2527 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2528 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2530 /* Clear enable flag for next pass */
2531 req.enables &= ~rte_cpu_to_le_32(
2532 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2534 if (rc || resp->error_code) {
2536 "Failed to initizlie VF %d\n", i);
2538 "Not all VFs available. (%d, %d)\n",
2539 rc, resp->error_code);
2546 reserve_resources_from_vf(bp, &req, i);
2547 bp->pf.active_vfs++;
2548 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2552 * Now configure the PF to use "the rest" of the resources
2553 * We're using STD_TX_RING_MODE here though which will limit the TX
2554 * rings. This will allow QoS to function properly. Not setting this
2555 * will cause PF rings to break bandwidth settings.
2557 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2561 rc = update_pf_resource_max(bp);
2568 bnxt_hwrm_func_buf_unrgtr(bp);
2572 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2574 struct hwrm_func_cfg_input req = {0};
2575 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2578 HWRM_PREP(req, FUNC_CFG);
2580 req.fid = rte_cpu_to_le_16(0xffff);
2581 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2582 req.evb_mode = bp->pf.evb_mode;
2584 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2585 HWRM_CHECK_RESULT();
2591 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2592 uint8_t tunnel_type)
2594 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2595 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2598 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2599 req.tunnel_type = tunnel_type;
2600 req.tunnel_dst_port_val = port;
2601 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2602 HWRM_CHECK_RESULT();
2604 switch (tunnel_type) {
2605 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2606 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2607 bp->vxlan_port = port;
2609 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2610 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2611 bp->geneve_port = port;
2622 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2623 uint8_t tunnel_type)
2625 struct hwrm_tunnel_dst_port_free_input req = {0};
2626 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2629 HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2631 req.tunnel_type = tunnel_type;
2632 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2633 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2635 HWRM_CHECK_RESULT();
2641 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2644 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2645 struct hwrm_func_cfg_input req = {0};
2648 HWRM_PREP(req, FUNC_CFG);
2650 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2651 req.flags = rte_cpu_to_le_32(flags);
2652 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2654 HWRM_CHECK_RESULT();
2660 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2662 uint32_t *flag = flagp;
2664 vnic->flags = *flag;
2667 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2669 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2672 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2675 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2676 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2678 HWRM_PREP(req, FUNC_BUF_RGTR);
2680 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2681 req.req_buf_page_size = rte_cpu_to_le_16(
2682 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2683 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2684 req.req_buf_page_addr[0] =
2685 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2686 if (req.req_buf_page_addr[0] == 0) {
2688 "unable to map buffer address to physical memory\n");
2692 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2694 HWRM_CHECK_RESULT();
2700 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2703 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2704 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2706 HWRM_PREP(req, FUNC_BUF_UNRGTR);
2708 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2710 HWRM_CHECK_RESULT();
2716 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2718 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2719 struct hwrm_func_cfg_input req = {0};
2722 HWRM_PREP(req, FUNC_CFG);
2724 req.fid = rte_cpu_to_le_16(0xffff);
2725 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2726 req.enables = rte_cpu_to_le_32(
2727 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2728 req.async_event_cr = rte_cpu_to_le_16(
2729 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2730 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2732 HWRM_CHECK_RESULT();
2738 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2740 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2741 struct hwrm_func_vf_cfg_input req = {0};
2744 HWRM_PREP(req, FUNC_VF_CFG);
2746 req.enables = rte_cpu_to_le_32(
2747 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2748 req.async_event_cr = rte_cpu_to_le_16(
2749 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2750 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2752 HWRM_CHECK_RESULT();
2758 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2760 struct hwrm_func_cfg_input req = {0};
2761 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2762 uint16_t dflt_vlan, fid;
2763 uint32_t func_cfg_flags;
2766 HWRM_PREP(req, FUNC_CFG);
2769 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2770 fid = bp->pf.vf_info[vf].fid;
2771 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2773 fid = rte_cpu_to_le_16(0xffff);
2774 func_cfg_flags = bp->pf.func_cfg_flags;
2775 dflt_vlan = bp->vlan;
2778 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2779 req.fid = rte_cpu_to_le_16(fid);
2780 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2781 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2783 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2785 HWRM_CHECK_RESULT();
2791 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2792 uint16_t max_bw, uint16_t enables)
2794 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2795 struct hwrm_func_cfg_input req = {0};
2798 HWRM_PREP(req, FUNC_CFG);
2800 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2801 req.enables |= rte_cpu_to_le_32(enables);
2802 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2803 req.max_bw = rte_cpu_to_le_32(max_bw);
2804 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2806 HWRM_CHECK_RESULT();
2812 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2814 struct hwrm_func_cfg_input req = {0};
2815 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2818 HWRM_PREP(req, FUNC_CFG);
2820 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2821 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2822 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2823 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2825 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2827 HWRM_CHECK_RESULT();
2833 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2834 void *encaped, size_t ec_size)
2837 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2838 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2840 if (ec_size > sizeof(req.encap_request))
2843 HWRM_PREP(req, REJECT_FWD_RESP);
2845 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2846 memcpy(req.encap_request, encaped, ec_size);
2848 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2850 HWRM_CHECK_RESULT();
2856 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2857 struct ether_addr *mac)
2859 struct hwrm_func_qcfg_input req = {0};
2860 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2863 HWRM_PREP(req, FUNC_QCFG);
2865 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2866 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2868 HWRM_CHECK_RESULT();
2870 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2877 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2878 void *encaped, size_t ec_size)
2881 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2882 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2884 if (ec_size > sizeof(req.encap_request))
2887 HWRM_PREP(req, EXEC_FWD_RESP);
2889 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2890 memcpy(req.encap_request, encaped, ec_size);
2892 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2894 HWRM_CHECK_RESULT();
2900 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2901 struct rte_eth_stats *stats, uint8_t rx)
2904 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2905 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2907 HWRM_PREP(req, STAT_CTX_QUERY);
2909 req.stat_ctx_id = rte_cpu_to_le_32(cid);
2911 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2913 HWRM_CHECK_RESULT();
2916 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2917 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2918 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2919 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2920 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2921 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2922 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2923 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2925 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2926 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2927 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2928 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2929 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2930 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2931 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2940 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2942 struct hwrm_port_qstats_input req = {0};
2943 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2944 struct bnxt_pf_info *pf = &bp->pf;
2947 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2950 HWRM_PREP(req, PORT_QSTATS);
2952 req.port_id = rte_cpu_to_le_16(pf->port_id);
2953 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2954 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2955 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2957 HWRM_CHECK_RESULT();
2963 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2965 struct hwrm_port_clr_stats_input req = {0};
2966 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2967 struct bnxt_pf_info *pf = &bp->pf;
2970 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2973 HWRM_PREP(req, PORT_CLR_STATS);
2975 req.port_id = rte_cpu_to_le_16(pf->port_id);
2976 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2978 HWRM_CHECK_RESULT();
2984 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2986 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2987 struct hwrm_port_led_qcaps_input req = {0};
2993 HWRM_PREP(req, PORT_LED_QCAPS);
2994 req.port_id = bp->pf.port_id;
2995 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2997 HWRM_CHECK_RESULT();
2999 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
3002 bp->num_leds = resp->num_leds;
3003 memcpy(bp->leds, &resp->led0_id,
3004 sizeof(bp->leds[0]) * bp->num_leds);
3005 for (i = 0; i < bp->num_leds; i++) {
3006 struct bnxt_led_info *led = &bp->leds[i];
3008 uint16_t caps = led->led_state_caps;
3010 if (!led->led_group_id ||
3011 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3023 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3025 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3026 struct hwrm_port_led_cfg_input req = {0};
3027 struct bnxt_led_cfg *led_cfg;
3028 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3029 uint16_t duration = 0;
3032 if (!bp->num_leds || BNXT_VF(bp))
3035 HWRM_PREP(req, PORT_LED_CFG);
3038 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3039 duration = rte_cpu_to_le_16(500);
3041 req.port_id = bp->pf.port_id;
3042 req.num_leds = bp->num_leds;
3043 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3044 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3045 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3046 led_cfg->led_id = bp->leds[i].led_id;
3047 led_cfg->led_state = led_state;
3048 led_cfg->led_blink_on = duration;
3049 led_cfg->led_blink_off = duration;
3050 led_cfg->led_group_id = bp->leds[i].led_group_id;
3053 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3055 HWRM_CHECK_RESULT();
3061 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3065 struct hwrm_nvm_get_dir_info_input req = {0};
3066 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3068 HWRM_PREP(req, NVM_GET_DIR_INFO);
3070 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3072 HWRM_CHECK_RESULT();
3076 *entries = rte_le_to_cpu_32(resp->entries);
3077 *length = rte_le_to_cpu_32(resp->entry_length);
3082 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3085 uint32_t dir_entries;
3086 uint32_t entry_length;
3089 rte_iova_t dma_handle;
3090 struct hwrm_nvm_get_dir_entries_input req = {0};
3091 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3093 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3097 *data++ = dir_entries;
3098 *data++ = entry_length;
3100 memset(data, 0xff, len);
3102 buflen = dir_entries * entry_length;
3103 buf = rte_malloc("nvm_dir", buflen, 0);
3104 rte_mem_lock_page(buf);
3107 dma_handle = rte_mem_virt2iova(buf);
3108 if (dma_handle == 0) {
3110 "unable to map response address to physical memory\n");
3113 HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3114 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3115 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3118 memcpy(data, buf, len > buflen ? buflen : len);
3121 HWRM_CHECK_RESULT();
3127 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3128 uint32_t offset, uint32_t length,
3133 rte_iova_t dma_handle;
3134 struct hwrm_nvm_read_input req = {0};
3135 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3137 buf = rte_malloc("nvm_item", length, 0);
3138 rte_mem_lock_page(buf);
3142 dma_handle = rte_mem_virt2iova(buf);
3143 if (dma_handle == 0) {
3145 "unable to map response address to physical memory\n");
3148 HWRM_PREP(req, NVM_READ);
3149 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3150 req.dir_idx = rte_cpu_to_le_16(index);
3151 req.offset = rte_cpu_to_le_32(offset);
3152 req.len = rte_cpu_to_le_32(length);
3153 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3155 memcpy(data, buf, length);
3158 HWRM_CHECK_RESULT();
3164 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3167 struct hwrm_nvm_erase_dir_entry_input req = {0};
3168 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3170 HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3171 req.dir_idx = rte_cpu_to_le_16(index);
3172 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3173 HWRM_CHECK_RESULT();
3180 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3181 uint16_t dir_ordinal, uint16_t dir_ext,
3182 uint16_t dir_attr, const uint8_t *data,
3186 struct hwrm_nvm_write_input req = {0};
3187 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3188 rte_iova_t dma_handle;
3191 buf = rte_malloc("nvm_write", data_len, 0);
3192 rte_mem_lock_page(buf);
3196 dma_handle = rte_mem_virt2iova(buf);
3197 if (dma_handle == 0) {
3199 "unable to map response address to physical memory\n");
3202 memcpy(buf, data, data_len);
3204 HWRM_PREP(req, NVM_WRITE);
3206 req.dir_type = rte_cpu_to_le_16(dir_type);
3207 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3208 req.dir_ext = rte_cpu_to_le_16(dir_ext);
3209 req.dir_attr = rte_cpu_to_le_16(dir_attr);
3210 req.dir_data_length = rte_cpu_to_le_32(data_len);
3211 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3213 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3216 HWRM_CHECK_RESULT();
3223 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3225 uint32_t *count = cbdata;
3227 *count = *count + 1;
3230 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3231 struct bnxt_vnic_info *vnic __rte_unused)
3236 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3240 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3241 &count, bnxt_vnic_count_hwrm_stub);
3246 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3249 struct hwrm_func_vf_vnic_ids_query_input req = {0};
3250 struct hwrm_func_vf_vnic_ids_query_output *resp =
3251 bp->hwrm_cmd_resp_addr;
3254 /* First query all VNIC ids */
3255 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3257 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3258 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3259 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3261 if (req.vnic_id_tbl_addr == 0) {
3264 "unable to map VNIC ID table address to physical memory\n");
3267 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3270 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3272 } else if (resp->error_code) {
3273 rc = rte_le_to_cpu_16(resp->error_code);
3275 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
3278 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3286 * This function queries the VNIC IDs for a specified VF. It then calls
3287 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3288 * Then it calls the hwrm_cb function to program this new vnic configuration.
3290 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3291 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3292 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3294 struct bnxt_vnic_info vnic;
3296 int i, num_vnic_ids;
3301 /* First query all VNIC ids */
3302 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3303 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3304 RTE_CACHE_LINE_SIZE);
3305 if (vnic_ids == NULL) {
3309 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3310 rte_mem_lock_page(((char *)vnic_ids) + sz);
3312 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3314 if (num_vnic_ids < 0)
3315 return num_vnic_ids;
3317 /* Retrieve VNIC, update bd_stall then update */
3319 for (i = 0; i < num_vnic_ids; i++) {
3320 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3321 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3322 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3325 if (vnic.mru <= 4) /* Indicates unallocated */
3328 vnic_cb(&vnic, cbdata);
3330 rc = hwrm_cb(bp, &vnic);
3340 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3343 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3344 struct hwrm_func_cfg_input req = {0};
3347 HWRM_PREP(req, FUNC_CFG);
3349 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3350 req.enables |= rte_cpu_to_le_32(
3351 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3352 req.vlan_antispoof_mode = on ?
3353 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3354 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3355 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3357 HWRM_CHECK_RESULT();
3363 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3365 struct bnxt_vnic_info vnic;
3368 int num_vnic_ids, i;
3372 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3373 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3374 RTE_CACHE_LINE_SIZE);
3375 if (vnic_ids == NULL) {
3380 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3381 rte_mem_lock_page(((char *)vnic_ids) + sz);
3383 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3389 * Loop through to find the default VNIC ID.
3390 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3391 * by sending the hwrm_func_qcfg command to the firmware.
3393 for (i = 0; i < num_vnic_ids; i++) {
3394 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3395 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3396 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3397 bp->pf.first_vf_id + vf);
3400 if (vnic.func_default) {
3402 return vnic.fw_vnic_id;
3405 /* Could not find a default VNIC. */
3406 RTE_LOG(ERR, PMD, "No default VNIC\n");
3412 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3414 struct bnxt_filter_info *filter)
3417 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3418 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3419 uint32_t enables = 0;
3421 if (filter->fw_em_filter_id != UINT64_MAX)
3422 bnxt_hwrm_clear_em_filter(bp, filter);
3424 HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3426 req.flags = rte_cpu_to_le_32(filter->flags);
3428 enables = filter->enables |
3429 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3430 req.dst_id = rte_cpu_to_le_16(dst_id);
3432 if (filter->ip_addr_type) {
3433 req.ip_addr_type = filter->ip_addr_type;
3434 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3437 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3438 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3440 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3441 memcpy(req.src_macaddr, filter->src_macaddr,
3444 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3445 memcpy(req.dst_macaddr, filter->dst_macaddr,
3448 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3449 req.ovlan_vid = filter->l2_ovlan;
3451 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3452 req.ivlan_vid = filter->l2_ivlan;
3454 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3455 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3457 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3458 req.ip_protocol = filter->ip_protocol;
3460 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3461 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3463 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3464 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3466 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3467 req.src_port = rte_cpu_to_be_16(filter->src_port);
3469 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3470 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3472 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3473 req.mirror_vnic_id = filter->mirror_vnic_id;
3475 req.enables = rte_cpu_to_le_32(enables);
3477 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3479 HWRM_CHECK_RESULT();
3481 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3487 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3490 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3491 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3493 if (filter->fw_em_filter_id == UINT64_MAX)
3496 RTE_LOG(ERR, PMD, "Clear EM filter\n");
3497 HWRM_PREP(req, CFA_EM_FLOW_FREE);
3499 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3501 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3503 HWRM_CHECK_RESULT();
3506 filter->fw_em_filter_id = -1;
3507 filter->fw_l2_filter_id = -1;
3512 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3514 struct bnxt_filter_info *filter)
3517 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3518 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3519 bp->hwrm_cmd_resp_addr;
3520 uint32_t enables = 0;
3522 if (filter->fw_ntuple_filter_id != UINT64_MAX)
3523 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3525 HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3527 req.flags = rte_cpu_to_le_32(filter->flags);
3529 enables = filter->enables |
3530 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3531 req.dst_id = rte_cpu_to_le_16(dst_id);
3534 if (filter->ip_addr_type) {
3535 req.ip_addr_type = filter->ip_addr_type;
3537 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3540 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3541 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3543 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3544 memcpy(req.src_macaddr, filter->src_macaddr,
3547 //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3548 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3551 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3552 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3554 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3555 req.ip_protocol = filter->ip_protocol;
3557 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3558 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3560 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3561 req.src_ipaddr_mask[0] =
3562 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3564 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3565 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3567 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3568 req.dst_ipaddr_mask[0] =
3569 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3571 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3572 req.src_port = rte_cpu_to_le_16(filter->src_port);
3574 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3575 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3577 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3578 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3580 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3581 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3583 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3584 req.mirror_vnic_id = filter->mirror_vnic_id;
3586 req.enables = rte_cpu_to_le_32(enables);
3588 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3590 HWRM_CHECK_RESULT();
3592 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3598 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3599 struct bnxt_filter_info *filter)
3602 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3603 struct hwrm_cfa_ntuple_filter_free_output *resp =
3604 bp->hwrm_cmd_resp_addr;
3606 if (filter->fw_ntuple_filter_id == UINT64_MAX)
3609 HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3611 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3613 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3615 HWRM_CHECK_RESULT();
3618 filter->fw_ntuple_filter_id = -1;