4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include <rte_byteorder.h>
37 #include <rte_common.h>
38 #include <rte_cycles.h>
39 #include <rte_malloc.h>
40 #include <rte_memzone.h>
41 #include <rte_version.h>
45 #include "bnxt_filter.h"
46 #include "bnxt_hwrm.h"
49 #include "bnxt_ring.h"
52 #include "bnxt_vnic.h"
53 #include "hsi_struct_def_dpdk.h"
57 #define HWRM_CMD_TIMEOUT 10000
59 struct bnxt_plcmodes_cfg {
61 uint16_t jumbo_thresh;
63 uint16_t hds_threshold;
66 static int page_getenum(size_t size)
82 RTE_LOG(ERR, PMD, "Page size %zu out of range\n", size);
83 return sizeof(void *) * 8 - 1;
86 static int page_roundup(size_t size)
88 return 1 << page_getenum(size);
92 * HWRM Functions (sent to HWRM)
93 * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message()
94 * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM
95 * command was failed by the ChiMP.
98 static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg,
102 struct input *req = msg;
103 struct output *resp = bp->hwrm_cmd_resp_addr;
104 uint32_t *data = msg;
107 uint16_t max_req_len = bp->max_req_len;
108 struct hwrm_short_input short_input = { 0 };
110 if (bp->flags & BNXT_FLAG_SHORT_CMD) {
111 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
113 memset(short_cmd_req, 0, bp->max_req_len);
114 memcpy(short_cmd_req, req, msg_len);
116 short_input.req_type = rte_cpu_to_le_16(req->req_type);
117 short_input.signature = rte_cpu_to_le_16(
118 HWRM_SHORT_REQ_SIGNATURE_SHORT_CMD);
119 short_input.size = rte_cpu_to_le_16(msg_len);
120 short_input.req_addr =
121 rte_cpu_to_le_64(bp->hwrm_short_cmd_req_dma_addr);
123 data = (uint32_t *)&short_input;
124 msg_len = sizeof(short_input);
126 /* Sync memory write before updating doorbell */
129 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
132 /* Write request msg to hwrm channel */
133 for (i = 0; i < msg_len; i += 4) {
134 bar = (uint8_t *)bp->bar0 + i;
135 rte_write32(*data, bar);
139 /* Zero the rest of the request space */
140 for (; i < max_req_len; i += 4) {
141 bar = (uint8_t *)bp->bar0 + i;
145 /* Ring channel doorbell */
146 bar = (uint8_t *)bp->bar0 + 0x100;
149 /* Poll for the valid bit */
150 for (i = 0; i < HWRM_CMD_TIMEOUT; i++) {
151 /* Sanity check on the resp->resp_len */
153 if (resp->resp_len && resp->resp_len <=
155 /* Last byte of resp contains the valid key */
156 valid = (uint8_t *)resp + resp->resp_len - 1;
157 if (*valid == HWRM_RESP_VALID_KEY)
163 if (i >= HWRM_CMD_TIMEOUT) {
164 RTE_LOG(ERR, PMD, "Error sending msg 0x%04x\n",
175 * HWRM_PREP() should be used to prepare *ALL* HWRM commands. It grabs the
176 * spinlock, and does initial processing.
178 * HWRM_CHECK_RESULT() returns errors on failure and may not be used. It
179 * releases the spinlock only if it returns. If the regular int return codes
180 * are not used by the function, HWRM_CHECK_RESULT() should not be used
181 * directly, rather it should be copied and modified to suit the function.
183 * HWRM_UNLOCK() must be called after all response processing is completed.
185 #define HWRM_PREP(req, type) do { \
186 rte_spinlock_lock(&bp->hwrm_lock); \
187 memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \
188 req.req_type = rte_cpu_to_le_16(HWRM_##type); \
189 req.cmpl_ring = rte_cpu_to_le_16(-1); \
190 req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \
191 req.target_id = rte_cpu_to_le_16(0xffff); \
192 req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr); \
195 #define HWRM_CHECK_RESULT() do {\
197 RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
199 rte_spinlock_unlock(&bp->hwrm_lock); \
202 if (resp->error_code) { \
203 rc = rte_le_to_cpu_16(resp->error_code); \
204 if (resp->resp_len >= 16) { \
205 struct hwrm_err_output *tmp_hwrm_err_op = \
208 "%s error %d:%d:%08x:%04x\n", \
210 rc, tmp_hwrm_err_op->cmd_err, \
212 tmp_hwrm_err_op->opaque_0), \
214 tmp_hwrm_err_op->opaque_1)); \
218 "%s error %d\n", __func__, rc); \
220 rte_spinlock_unlock(&bp->hwrm_lock); \
225 #define HWRM_UNLOCK() rte_spinlock_unlock(&bp->hwrm_lock)
227 int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic)
230 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
231 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
233 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
234 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
237 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
245 int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
246 struct bnxt_vnic_info *vnic,
248 struct bnxt_vlan_table_entry *vlan_table)
251 struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 };
252 struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
255 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
258 HWRM_PREP(req, CFA_L2_SET_RX_MASK);
259 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
261 /* FIXME add multicast flag, when multicast adding options is supported
264 if (vnic->flags & BNXT_VNIC_INFO_BCAST)
265 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST;
266 if (vnic->flags & BNXT_VNIC_INFO_UNTAGGED)
267 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN;
268 if (vnic->flags & BNXT_VNIC_INFO_PROMISC)
269 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS;
270 if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI)
271 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST;
272 if (vnic->flags & BNXT_VNIC_INFO_MCAST)
273 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
274 if (vnic->mc_addr_cnt) {
275 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST;
276 req.num_mc_entries = rte_cpu_to_le_32(vnic->mc_addr_cnt);
277 req.mc_tbl_addr = rte_cpu_to_le_64(vnic->mc_list_dma_addr);
280 if (!(mask & HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLAN_NONVLAN))
281 mask |= HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_VLANONLY;
282 req.vlan_tag_tbl_addr = rte_cpu_to_le_64(
283 rte_mem_virt2iova(vlan_table));
284 req.num_vlan_tags = rte_cpu_to_le_32((uint32_t)vlan_count);
286 req.mask = rte_cpu_to_le_32(mask);
288 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
296 int bnxt_hwrm_cfa_vlan_antispoof_cfg(struct bnxt *bp, uint16_t fid,
298 struct bnxt_vlan_antispoof_table_entry *vlan_table)
301 struct hwrm_cfa_vlan_antispoof_cfg_input req = {.req_type = 0 };
302 struct hwrm_cfa_vlan_antispoof_cfg_output *resp =
303 bp->hwrm_cmd_resp_addr;
306 * Older HWRM versions did not support this command, and the set_rx_mask
307 * list was used for anti-spoof. In 1.8.0, the TX path configuration was
308 * removed from set_rx_mask call, and this command was added.
310 * This command is also present from 1.7.8.11 and higher,
313 if (bp->fw_ver < ((1 << 24) | (8 << 16))) {
314 if (bp->fw_ver != ((1 << 24) | (7 << 16) | (8 << 8))) {
315 if (bp->fw_ver < ((1 << 24) | (7 << 16) | (8 << 8) |
320 HWRM_PREP(req, CFA_VLAN_ANTISPOOF_CFG);
321 req.fid = rte_cpu_to_le_16(fid);
323 req.vlan_tag_mask_tbl_addr =
324 rte_cpu_to_le_64(rte_mem_virt2iova(vlan_table));
325 req.num_vlan_entries = rte_cpu_to_le_32((uint32_t)vlan_count);
327 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
335 int bnxt_hwrm_clear_l2_filter(struct bnxt *bp,
336 struct bnxt_filter_info *filter)
339 struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 };
340 struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr;
342 if (filter->fw_l2_filter_id == UINT64_MAX)
345 HWRM_PREP(req, CFA_L2_FILTER_FREE);
347 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
349 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
354 filter->fw_l2_filter_id = -1;
359 int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
361 struct bnxt_filter_info *filter)
364 struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 };
365 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
366 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
367 const struct rte_eth_vmdq_rx_conf *conf =
368 &dev_conf->rx_adv_conf.vmdq_rx_conf;
369 uint32_t enables = 0;
370 uint16_t j = dst_id - 1;
372 //TODO: Is there a better way to add VLANs to each VNIC in case of VMDQ
373 if ((dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) &&
374 conf->pool_map[j].pools & (1UL << j)) {
376 "Add vlan %u to vmdq pool %u\n",
377 conf->pool_map[j].vlan_id, j);
379 filter->l2_ivlan = conf->pool_map[j].vlan_id;
381 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
382 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
385 if (filter->fw_l2_filter_id != UINT64_MAX)
386 bnxt_hwrm_clear_l2_filter(bp, filter);
388 HWRM_PREP(req, CFA_L2_FILTER_ALLOC);
390 req.flags = rte_cpu_to_le_32(filter->flags);
392 enables = filter->enables |
393 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
394 req.dst_id = rte_cpu_to_le_16(dst_id);
397 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR)
398 memcpy(req.l2_addr, filter->l2_addr,
401 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK)
402 memcpy(req.l2_addr_mask, filter->l2_addr_mask,
405 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN)
406 req.l2_ovlan = filter->l2_ovlan;
408 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
409 req.l2_ovlan = filter->l2_ivlan;
411 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
412 req.l2_ovlan_mask = filter->l2_ovlan_mask;
414 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
415 req.l2_ovlan_mask = filter->l2_ivlan_mask;
416 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
417 req.src_id = rte_cpu_to_le_32(filter->src_id);
418 if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
419 req.src_type = filter->src_type;
421 req.enables = rte_cpu_to_le_32(enables);
423 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
427 filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id);
433 int bnxt_hwrm_func_qcaps(struct bnxt *bp)
436 struct hwrm_func_qcaps_input req = {.req_type = 0 };
437 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
438 uint16_t new_max_vfs;
441 HWRM_PREP(req, FUNC_QCAPS);
443 req.fid = rte_cpu_to_le_16(0xffff);
445 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
449 bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps);
451 bp->pf.port_id = resp->port_id;
452 bp->pf.first_vf_id = rte_le_to_cpu_16(resp->first_vf_id);
453 new_max_vfs = bp->pdev->max_vfs;
454 if (new_max_vfs != bp->pf.max_vfs) {
456 rte_free(bp->pf.vf_info);
457 bp->pf.vf_info = rte_malloc("bnxt_vf_info",
458 sizeof(bp->pf.vf_info[0]) * new_max_vfs, 0);
459 bp->pf.max_vfs = new_max_vfs;
460 for (i = 0; i < new_max_vfs; i++) {
461 bp->pf.vf_info[i].fid = bp->pf.first_vf_id + i;
462 bp->pf.vf_info[i].vlan_table =
463 rte_zmalloc("VF VLAN table",
466 if (bp->pf.vf_info[i].vlan_table == NULL)
468 "Fail to alloc VLAN table for VF %d\n",
472 bp->pf.vf_info[i].vlan_table);
473 bp->pf.vf_info[i].vlan_as_table =
474 rte_zmalloc("VF VLAN AS table",
477 if (bp->pf.vf_info[i].vlan_as_table == NULL)
479 "Alloc VLAN AS table for VF %d fail\n",
483 bp->pf.vf_info[i].vlan_as_table);
484 STAILQ_INIT(&bp->pf.vf_info[i].filter);
489 bp->fw_fid = rte_le_to_cpu_32(resp->fid);
490 memcpy(bp->dflt_mac_addr, &resp->mac_address, ETHER_ADDR_LEN);
491 bp->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx);
492 bp->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings);
493 bp->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings);
494 bp->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings);
495 bp->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs);
496 /* TODO: For now, do not support VMDq/RFS on VFs. */
501 bp->max_vnics = rte_le_to_cpu_16(resp->max_vnics);
505 bp->max_stat_ctx = rte_le_to_cpu_16(resp->max_stat_ctx);
507 bp->pf.total_vnics = rte_le_to_cpu_16(resp->max_vnics);
513 int bnxt_hwrm_func_reset(struct bnxt *bp)
516 struct hwrm_func_reset_input req = {.req_type = 0 };
517 struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr;
519 HWRM_PREP(req, FUNC_RESET);
521 req.enables = rte_cpu_to_le_32(0);
523 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
531 int bnxt_hwrm_func_driver_register(struct bnxt *bp)
534 struct hwrm_func_drv_rgtr_input req = {.req_type = 0 };
535 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
537 if (bp->flags & BNXT_FLAG_REGISTERED)
540 HWRM_PREP(req, FUNC_DRV_RGTR);
541 req.enables = rte_cpu_to_le_32(HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER |
542 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD);
543 req.ver_maj = RTE_VER_YEAR;
544 req.ver_min = RTE_VER_MONTH;
545 req.ver_upd = RTE_VER_MINOR;
548 req.enables |= rte_cpu_to_le_32(
549 HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_INPUT_FWD);
550 memcpy(req.vf_req_fwd, bp->pf.vf_req_fwd,
551 RTE_MIN(sizeof(req.vf_req_fwd),
552 sizeof(bp->pf.vf_req_fwd)));
555 req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1); /* TODO: Use MACRO */
556 //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
558 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
563 bp->flags |= BNXT_FLAG_REGISTERED;
568 int bnxt_hwrm_ver_get(struct bnxt *bp)
571 struct hwrm_ver_get_input req = {.req_type = 0 };
572 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
575 uint16_t max_resp_len;
576 char type[RTE_MEMZONE_NAMESIZE];
577 uint32_t dev_caps_cfg;
579 bp->max_req_len = HWRM_MAX_REQ_LEN;
580 HWRM_PREP(req, VER_GET);
582 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
583 req.hwrm_intf_min = HWRM_VERSION_MINOR;
584 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
586 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
590 RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n",
591 resp->hwrm_intf_maj, resp->hwrm_intf_min,
593 resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld);
594 bp->fw_ver = (resp->hwrm_fw_maj << 24) | (resp->hwrm_fw_min << 16) |
595 (resp->hwrm_fw_bld << 8) | resp->hwrm_fw_rsvd;
596 RTE_LOG(INFO, PMD, "Driver HWRM version: %d.%d.%d\n",
597 HWRM_VERSION_MAJOR, HWRM_VERSION_MINOR, HWRM_VERSION_UPDATE);
599 my_version = HWRM_VERSION_MAJOR << 16;
600 my_version |= HWRM_VERSION_MINOR << 8;
601 my_version |= HWRM_VERSION_UPDATE;
603 fw_version = resp->hwrm_intf_maj << 16;
604 fw_version |= resp->hwrm_intf_min << 8;
605 fw_version |= resp->hwrm_intf_upd;
607 if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) {
608 RTE_LOG(ERR, PMD, "Unsupported firmware API version\n");
613 if (my_version != fw_version) {
614 RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n");
615 if (my_version < fw_version) {
617 "Firmware API version is newer than driver.\n");
619 "The driver may be missing features.\n");
622 "Firmware API version is older than driver.\n");
624 "Not all driver features may be functional.\n");
628 if (bp->max_req_len > resp->max_req_win_len) {
629 RTE_LOG(ERR, PMD, "Unsupported request length\n");
632 bp->max_req_len = rte_le_to_cpu_16(resp->max_req_win_len);
633 max_resp_len = resp->max_resp_len;
634 dev_caps_cfg = rte_le_to_cpu_32(resp->dev_caps_cfg);
636 if (bp->max_resp_len != max_resp_len) {
637 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x",
638 bp->pdev->addr.domain, bp->pdev->addr.bus,
639 bp->pdev->addr.devid, bp->pdev->addr.function);
641 rte_free(bp->hwrm_cmd_resp_addr);
643 bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0);
644 if (bp->hwrm_cmd_resp_addr == NULL) {
648 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
649 bp->hwrm_cmd_resp_dma_addr =
650 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
651 if (bp->hwrm_cmd_resp_dma_addr == 0) {
653 "Unable to map response buffer to physical memory.\n");
657 bp->max_resp_len = max_resp_len;
661 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
663 HWRM_VER_GET_OUTPUT_DEV_CAPS_CFG_SHORT_CMD_INPUTUIRED)) {
664 RTE_LOG(DEBUG, PMD, "Short command supported\n");
666 rte_free(bp->hwrm_short_cmd_req_addr);
668 bp->hwrm_short_cmd_req_addr = rte_malloc(type,
670 if (bp->hwrm_short_cmd_req_addr == NULL) {
674 rte_mem_lock_page(bp->hwrm_short_cmd_req_addr);
675 bp->hwrm_short_cmd_req_dma_addr =
676 rte_mem_virt2iova(bp->hwrm_short_cmd_req_addr);
677 if (bp->hwrm_short_cmd_req_dma_addr == 0) {
678 rte_free(bp->hwrm_short_cmd_req_addr);
680 "Unable to map buffer to physical memory.\n");
685 bp->flags |= BNXT_FLAG_SHORT_CMD;
693 int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags)
696 struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 };
697 struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
699 if (!(bp->flags & BNXT_FLAG_REGISTERED))
702 HWRM_PREP(req, FUNC_DRV_UNRGTR);
705 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
710 bp->flags &= ~BNXT_FLAG_REGISTERED;
715 static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
718 struct hwrm_port_phy_cfg_input req = {0};
719 struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
720 uint32_t enables = 0;
722 HWRM_PREP(req, PORT_PHY_CFG);
725 /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
726 if (bp->link_info.auto_mode && conf->link_speed) {
727 req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
728 RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
731 req.flags = rte_cpu_to_le_32(conf->phy_flags);
732 req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
733 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
735 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
736 * any auto mode, even "none".
738 if (!conf->link_speed) {
739 /* No speeds specified. Enable AutoNeg - all speeds */
741 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
743 /* AutoNeg - Advertise speeds specified. */
744 if (conf->auto_link_speed_mask &&
745 !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
747 HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
748 req.auto_link_speed_mask =
749 conf->auto_link_speed_mask;
751 HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
754 req.auto_duplex = conf->duplex;
755 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
756 req.auto_pause = conf->auto_pause;
757 req.force_pause = conf->force_pause;
758 /* Set force_pause if there is no auto or if there is a force */
759 if (req.auto_pause && !req.force_pause)
760 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE;
762 enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE;
764 req.enables = rte_cpu_to_le_32(enables);
767 rte_cpu_to_le_32(HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DWN);
768 RTE_LOG(INFO, PMD, "Force Link Down\n");
771 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
779 static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
780 struct bnxt_link_info *link_info)
783 struct hwrm_port_phy_qcfg_input req = {0};
784 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
786 HWRM_PREP(req, PORT_PHY_QCFG);
788 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
792 link_info->phy_link_status = resp->link;
794 (link_info->phy_link_status ==
795 HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) ? 1 : 0;
796 link_info->link_speed = rte_le_to_cpu_16(resp->link_speed);
797 link_info->duplex = resp->duplex_cfg;
798 link_info->pause = resp->pause;
799 link_info->auto_pause = resp->auto_pause;
800 link_info->force_pause = resp->force_pause;
801 link_info->auto_mode = resp->auto_mode;
802 link_info->phy_type = resp->phy_type;
803 link_info->media_type = resp->media_type;
805 link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
806 link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
807 link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
808 link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
809 link_info->phy_ver[0] = resp->phy_maj;
810 link_info->phy_ver[1] = resp->phy_min;
811 link_info->phy_ver[2] = resp->phy_bld;
815 RTE_LOG(DEBUG, PMD, "Link Speed %d\n", link_info->link_speed);
816 RTE_LOG(DEBUG, PMD, "Auto Mode %d\n", link_info->auto_mode);
817 RTE_LOG(DEBUG, PMD, "Support Speeds %x\n", link_info->support_speeds);
818 RTE_LOG(DEBUG, PMD, "Auto Link Speed %x\n", link_info->auto_link_speed);
819 RTE_LOG(DEBUG, PMD, "Auto Link Speed Mask %x\n",
820 link_info->auto_link_speed_mask);
821 RTE_LOG(DEBUG, PMD, "Forced Link Speed %x\n",
822 link_info->force_link_speed);
827 int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
830 struct hwrm_queue_qportcfg_input req = {.req_type = 0 };
831 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
833 HWRM_PREP(req, QUEUE_QPORTCFG);
835 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
839 #define GET_QUEUE_INFO(x) \
840 bp->cos_queue[x].id = resp->queue_id##x; \
841 bp->cos_queue[x].profile = resp->queue_id##x##_service_profile
857 int bnxt_hwrm_ring_alloc(struct bnxt *bp,
858 struct bnxt_ring *ring,
859 uint32_t ring_type, uint32_t map_index,
860 uint32_t stats_ctx_id, uint32_t cmpl_ring_id)
863 uint32_t enables = 0;
864 struct hwrm_ring_alloc_input req = {.req_type = 0 };
865 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
867 HWRM_PREP(req, RING_ALLOC);
869 req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma);
870 req.fbo = rte_cpu_to_le_32(0);
871 /* Association of ring index with doorbell index */
872 req.logical_id = rte_cpu_to_le_16(map_index);
873 req.length = rte_cpu_to_le_32(ring->ring_size);
876 case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX:
877 req.queue_id = bp->cos_queue[0].id;
879 case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX:
880 req.ring_type = ring_type;
881 req.cmpl_ring_id = rte_cpu_to_le_16(cmpl_ring_id);
882 req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id);
883 if (stats_ctx_id != INVALID_STATS_CTX_ID)
885 HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID;
887 case HWRM_RING_ALLOC_INPUT_RING_TYPE_L2_CMPL:
888 req.ring_type = ring_type;
890 * TODO: Some HWRM versions crash with
891 * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL
893 req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX;
896 RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n",
901 req.enables = rte_cpu_to_le_32(enables);
903 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
905 if (rc || resp->error_code) {
906 if (rc == 0 && resp->error_code)
907 rc = rte_le_to_cpu_16(resp->error_code);
909 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
911 "hwrm_ring_alloc cp failed. rc:%d\n", rc);
914 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
916 "hwrm_ring_alloc rx failed. rc:%d\n", rc);
919 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
921 "hwrm_ring_alloc tx failed. rc:%d\n", rc);
925 RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc);
931 ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id);
936 int bnxt_hwrm_ring_free(struct bnxt *bp,
937 struct bnxt_ring *ring, uint32_t ring_type)
940 struct hwrm_ring_free_input req = {.req_type = 0 };
941 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
943 HWRM_PREP(req, RING_FREE);
945 req.ring_type = ring_type;
946 req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id);
948 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
950 if (rc || resp->error_code) {
951 if (rc == 0 && resp->error_code)
952 rc = rte_le_to_cpu_16(resp->error_code);
956 case HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL:
957 RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n",
960 case HWRM_RING_FREE_INPUT_RING_TYPE_RX:
961 RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n",
964 case HWRM_RING_FREE_INPUT_RING_TYPE_TX:
965 RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n",
969 RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc);
977 int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx)
980 struct hwrm_ring_grp_alloc_input req = {.req_type = 0 };
981 struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr;
983 HWRM_PREP(req, RING_GRP_ALLOC);
985 req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id);
986 req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id);
987 req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id);
988 req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx);
990 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
994 bp->grp_info[idx].fw_grp_id =
995 rte_le_to_cpu_16(resp->ring_group_id);
1002 int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx)
1005 struct hwrm_ring_grp_free_input req = {.req_type = 0 };
1006 struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr;
1008 HWRM_PREP(req, RING_GRP_FREE);
1010 req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id);
1012 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1014 HWRM_CHECK_RESULT();
1017 bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID;
1021 int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1024 struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 };
1025 struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1027 if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE)
1030 HWRM_PREP(req, STAT_CTX_CLR_STATS);
1032 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1034 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1036 HWRM_CHECK_RESULT();
1042 int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1043 unsigned int idx __rte_unused)
1046 struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 };
1047 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1049 HWRM_PREP(req, STAT_CTX_ALLOC);
1051 req.update_period_ms = rte_cpu_to_le_32(0);
1053 req.stats_dma_addr =
1054 rte_cpu_to_le_64(cpr->hw_stats_map);
1056 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1058 HWRM_CHECK_RESULT();
1060 cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
1067 int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1068 unsigned int idx __rte_unused)
1071 struct hwrm_stat_ctx_free_input req = {.req_type = 0 };
1072 struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr;
1074 HWRM_PREP(req, STAT_CTX_FREE);
1076 req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id);
1078 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1080 HWRM_CHECK_RESULT();
1086 int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1089 struct hwrm_vnic_alloc_input req = { 0 };
1090 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
1092 /* map ring groups to this vnic */
1093 RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
1094 vnic->start_grp_id, vnic->end_grp_id);
1095 for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
1096 vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
1097 vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
1098 vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
1099 vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
1100 vnic->lb_rule = (uint16_t)HWRM_NA_SIGNATURE;
1101 vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
1102 ETHER_CRC_LEN + VLAN_TAG_SIZE;
1103 HWRM_PREP(req, VNIC_ALLOC);
1105 if (vnic->func_default)
1107 rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
1108 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1110 HWRM_CHECK_RESULT();
1112 vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id);
1114 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1118 static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
1119 struct bnxt_vnic_info *vnic,
1120 struct bnxt_plcmodes_cfg *pmode)
1123 struct hwrm_vnic_plcmodes_qcfg_input req = {.req_type = 0 };
1124 struct hwrm_vnic_plcmodes_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1126 HWRM_PREP(req, VNIC_PLCMODES_QCFG);
1128 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1130 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1132 HWRM_CHECK_RESULT();
1134 pmode->flags = rte_le_to_cpu_32(resp->flags);
1135 /* dflt_vnic bit doesn't exist in the _cfg command */
1136 pmode->flags &= ~(HWRM_VNIC_PLCMODES_QCFG_OUTPUT_FLAGS_DFLT_VNIC);
1137 pmode->jumbo_thresh = rte_le_to_cpu_16(resp->jumbo_thresh);
1138 pmode->hds_offset = rte_le_to_cpu_16(resp->hds_offset);
1139 pmode->hds_threshold = rte_le_to_cpu_16(resp->hds_threshold);
1146 static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
1147 struct bnxt_vnic_info *vnic,
1148 struct bnxt_plcmodes_cfg *pmode)
1151 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1152 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1154 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1156 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1157 req.flags = rte_cpu_to_le_32(pmode->flags);
1158 req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
1159 req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
1160 req.hds_threshold = rte_cpu_to_le_16(pmode->hds_threshold);
1161 req.enables = rte_cpu_to_le_32(
1162 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_THRESHOLD_VALID |
1163 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_HDS_OFFSET_VALID |
1164 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID
1167 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1169 HWRM_CHECK_RESULT();
1175 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1178 struct hwrm_vnic_cfg_input req = {.req_type = 0 };
1179 struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1180 uint32_t ctx_enable_flag = 0;
1181 struct bnxt_plcmodes_cfg pmodes;
1183 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1184 RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
1188 rc = bnxt_hwrm_vnic_plcmodes_qcfg(bp, vnic, &pmodes);
1192 HWRM_PREP(req, VNIC_CFG);
1194 /* Only RSS support for now TBD: COS & LB */
1196 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP);
1197 if (vnic->lb_rule != 0xffff)
1198 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE;
1199 if (vnic->cos_rule != 0xffff)
1200 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE;
1201 if (vnic->rss_rule != 0xffff) {
1202 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_MRU;
1203 ctx_enable_flag |= HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE;
1205 req.enables |= rte_cpu_to_le_32(ctx_enable_flag);
1206 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1207 req.dflt_ring_grp = rte_cpu_to_le_16(vnic->dflt_ring_grp);
1208 req.rss_rule = rte_cpu_to_le_16(vnic->rss_rule);
1209 req.cos_rule = rte_cpu_to_le_16(vnic->cos_rule);
1210 req.lb_rule = rte_cpu_to_le_16(vnic->lb_rule);
1211 req.mru = rte_cpu_to_le_16(vnic->mru);
1212 if (vnic->func_default)
1214 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT);
1215 if (vnic->vlan_strip)
1217 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE);
1220 rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE);
1221 if (vnic->roce_dual)
1222 req.flags |= rte_cpu_to_le_32(
1223 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE);
1224 if (vnic->roce_only)
1225 req.flags |= rte_cpu_to_le_32(
1226 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE);
1227 if (vnic->rss_dflt_cr)
1228 req.flags |= rte_cpu_to_le_32(
1229 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE);
1231 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1233 HWRM_CHECK_RESULT();
1236 rc = bnxt_hwrm_vnic_plcmodes_cfg(bp, vnic, &pmodes);
1241 int bnxt_hwrm_vnic_qcfg(struct bnxt *bp, struct bnxt_vnic_info *vnic,
1245 struct hwrm_vnic_qcfg_input req = {.req_type = 0 };
1246 struct hwrm_vnic_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
1248 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1249 RTE_LOG(DEBUG, PMD, "VNIC QCFG ID %d\n", vnic->fw_vnic_id);
1252 HWRM_PREP(req, VNIC_QCFG);
1255 rte_cpu_to_le_32(HWRM_VNIC_QCFG_INPUT_ENABLES_VF_ID_VALID);
1256 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1257 req.vf_id = rte_cpu_to_le_16(fw_vf_id);
1259 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1261 HWRM_CHECK_RESULT();
1263 vnic->dflt_ring_grp = rte_le_to_cpu_16(resp->dflt_ring_grp);
1264 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_rule);
1265 vnic->cos_rule = rte_le_to_cpu_16(resp->cos_rule);
1266 vnic->lb_rule = rte_le_to_cpu_16(resp->lb_rule);
1267 vnic->mru = rte_le_to_cpu_16(resp->mru);
1268 vnic->func_default = rte_le_to_cpu_32(
1269 resp->flags) & HWRM_VNIC_QCFG_OUTPUT_FLAGS_DEFAULT;
1270 vnic->vlan_strip = rte_le_to_cpu_32(resp->flags) &
1271 HWRM_VNIC_QCFG_OUTPUT_FLAGS_VLAN_STRIP_MODE;
1272 vnic->bd_stall = rte_le_to_cpu_32(resp->flags) &
1273 HWRM_VNIC_QCFG_OUTPUT_FLAGS_BD_STALL_MODE;
1274 vnic->roce_dual = rte_le_to_cpu_32(resp->flags) &
1275 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_DUAL_VNIC_MODE;
1276 vnic->roce_only = rte_le_to_cpu_32(resp->flags) &
1277 HWRM_VNIC_QCFG_OUTPUT_FLAGS_ROCE_ONLY_VNIC_MODE;
1278 vnic->rss_dflt_cr = rte_le_to_cpu_32(resp->flags) &
1279 HWRM_VNIC_QCFG_OUTPUT_FLAGS_RSS_DFLT_CR_MODE;
1286 int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1289 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 };
1290 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
1291 bp->hwrm_cmd_resp_addr;
1293 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC);
1295 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1297 HWRM_CHECK_RESULT();
1299 vnic->rss_rule = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id);
1301 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1306 int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1309 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 };
1310 struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp =
1311 bp->hwrm_cmd_resp_addr;
1313 if (vnic->rss_rule == 0xffff) {
1314 RTE_LOG(DEBUG, PMD, "VNIC RSS Rule %x\n", vnic->rss_rule);
1317 HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE);
1319 req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->rss_rule);
1321 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1323 HWRM_CHECK_RESULT();
1326 vnic->rss_rule = INVALID_HW_RING_ID;
1331 int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1334 struct hwrm_vnic_free_input req = {.req_type = 0 };
1335 struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr;
1337 if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
1338 RTE_LOG(DEBUG, PMD, "VNIC FREE ID %x\n", vnic->fw_vnic_id);
1342 HWRM_PREP(req, VNIC_FREE);
1344 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1346 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1348 HWRM_CHECK_RESULT();
1351 vnic->fw_vnic_id = INVALID_HW_RING_ID;
1355 int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp,
1356 struct bnxt_vnic_info *vnic)
1359 struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 };
1360 struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1362 HWRM_PREP(req, VNIC_RSS_CFG);
1364 req.hash_type = rte_cpu_to_le_32(vnic->hash_type);
1366 req.ring_grp_tbl_addr =
1367 rte_cpu_to_le_64(vnic->rss_table_dma_addr);
1368 req.hash_key_tbl_addr =
1369 rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr);
1370 req.rss_ctx_idx = rte_cpu_to_le_16(vnic->rss_rule);
1372 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1374 HWRM_CHECK_RESULT();
1380 int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
1381 struct bnxt_vnic_info *vnic)
1384 struct hwrm_vnic_plcmodes_cfg_input req = {.req_type = 0 };
1385 struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1388 HWRM_PREP(req, VNIC_PLCMODES_CFG);
1390 req.flags = rte_cpu_to_le_32(
1391 HWRM_VNIC_PLCMODES_CFG_INPUT_FLAGS_JUMBO_PLACEMENT);
1393 req.enables = rte_cpu_to_le_32(
1394 HWRM_VNIC_PLCMODES_CFG_INPUT_ENABLES_JUMBO_THRESH_VALID);
1396 size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
1397 size -= RTE_PKTMBUF_HEADROOM;
1399 req.jumbo_thresh = rte_cpu_to_le_16(size);
1400 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1402 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1404 HWRM_CHECK_RESULT();
1410 int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
1411 struct bnxt_vnic_info *vnic, bool enable)
1414 struct hwrm_vnic_tpa_cfg_input req = {.req_type = 0 };
1415 struct hwrm_vnic_tpa_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1417 HWRM_PREP(req, VNIC_TPA_CFG);
1420 req.enables = rte_cpu_to_le_32(
1421 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGG_SEGS |
1422 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MAX_AGGS |
1423 HWRM_VNIC_TPA_CFG_INPUT_ENABLES_MIN_AGG_LEN);
1424 req.flags = rte_cpu_to_le_32(
1425 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_TPA |
1426 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_ENCAP_TPA |
1427 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_RSC_WND_UPDATE |
1428 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
1429 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
1430 HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
1431 req.max_agg_segs = rte_cpu_to_le_16(5);
1433 rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
1434 req.min_agg_len = rte_cpu_to_le_32(512);
1436 req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
1438 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1440 HWRM_CHECK_RESULT();
1446 int bnxt_hwrm_func_vf_mac(struct bnxt *bp, uint16_t vf, const uint8_t *mac_addr)
1448 struct hwrm_func_cfg_input req = {0};
1449 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
1452 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
1453 req.enables = rte_cpu_to_le_32(
1454 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
1455 memcpy(req.dflt_mac_addr, mac_addr, sizeof(req.dflt_mac_addr));
1456 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
1458 HWRM_PREP(req, FUNC_CFG);
1460 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1461 HWRM_CHECK_RESULT();
1464 bp->pf.vf_info[vf].random_mac = false;
1469 int bnxt_hwrm_func_qstats_tx_drop(struct bnxt *bp, uint16_t fid,
1473 struct hwrm_func_qstats_input req = {.req_type = 0};
1474 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1476 HWRM_PREP(req, FUNC_QSTATS);
1478 req.fid = rte_cpu_to_le_16(fid);
1480 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1482 HWRM_CHECK_RESULT();
1485 *dropped = rte_le_to_cpu_64(resp->tx_drop_pkts);
1492 int bnxt_hwrm_func_qstats(struct bnxt *bp, uint16_t fid,
1493 struct rte_eth_stats *stats)
1496 struct hwrm_func_qstats_input req = {.req_type = 0};
1497 struct hwrm_func_qstats_output *resp = bp->hwrm_cmd_resp_addr;
1499 HWRM_PREP(req, FUNC_QSTATS);
1501 req.fid = rte_cpu_to_le_16(fid);
1503 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1505 HWRM_CHECK_RESULT();
1507 stats->ipackets = rte_le_to_cpu_64(resp->rx_ucast_pkts);
1508 stats->ipackets += rte_le_to_cpu_64(resp->rx_mcast_pkts);
1509 stats->ipackets += rte_le_to_cpu_64(resp->rx_bcast_pkts);
1510 stats->ibytes = rte_le_to_cpu_64(resp->rx_ucast_bytes);
1511 stats->ibytes += rte_le_to_cpu_64(resp->rx_mcast_bytes);
1512 stats->ibytes += rte_le_to_cpu_64(resp->rx_bcast_bytes);
1514 stats->opackets = rte_le_to_cpu_64(resp->tx_ucast_pkts);
1515 stats->opackets += rte_le_to_cpu_64(resp->tx_mcast_pkts);
1516 stats->opackets += rte_le_to_cpu_64(resp->tx_bcast_pkts);
1517 stats->obytes = rte_le_to_cpu_64(resp->tx_ucast_bytes);
1518 stats->obytes += rte_le_to_cpu_64(resp->tx_mcast_bytes);
1519 stats->obytes += rte_le_to_cpu_64(resp->tx_bcast_bytes);
1521 stats->ierrors = rte_le_to_cpu_64(resp->rx_err_pkts);
1522 stats->oerrors = rte_le_to_cpu_64(resp->tx_err_pkts);
1524 stats->imissed = rte_le_to_cpu_64(resp->rx_drop_pkts);
1531 int bnxt_hwrm_func_clr_stats(struct bnxt *bp, uint16_t fid)
1534 struct hwrm_func_clr_stats_input req = {.req_type = 0};
1535 struct hwrm_func_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
1537 HWRM_PREP(req, FUNC_CLR_STATS);
1539 req.fid = rte_cpu_to_le_16(fid);
1541 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
1543 HWRM_CHECK_RESULT();
1550 * HWRM utility functions
1553 int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp)
1558 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1559 struct bnxt_tx_queue *txq;
1560 struct bnxt_rx_queue *rxq;
1561 struct bnxt_cp_ring_info *cpr;
1563 if (i >= bp->rx_cp_nr_rings) {
1564 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1567 rxq = bp->rx_queues[i];
1571 rc = bnxt_hwrm_stat_clear(bp, cpr);
1578 int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
1582 struct bnxt_cp_ring_info *cpr;
1584 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1586 if (i >= bp->rx_cp_nr_rings) {
1587 cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
1589 cpr = bp->rx_queues[i]->cp_ring;
1590 bp->grp_info[i].fw_stats_ctx = -1;
1592 if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
1593 rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
1594 cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
1602 int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp)
1607 for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
1608 struct bnxt_tx_queue *txq;
1609 struct bnxt_rx_queue *rxq;
1610 struct bnxt_cp_ring_info *cpr;
1612 if (i >= bp->rx_cp_nr_rings) {
1613 txq = bp->tx_queues[i - bp->rx_cp_nr_rings];
1616 rxq = bp->rx_queues[i];
1620 rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, i);
1628 int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp)
1633 for (idx = 0; idx < bp->rx_cp_nr_rings; idx++) {
1635 if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID)
1638 rc = bnxt_hwrm_ring_grp_free(bp, idx);
1646 static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1647 unsigned int idx __rte_unused)
1649 struct bnxt_ring *cp_ring = cpr->cp_ring_struct;
1651 bnxt_hwrm_ring_free(bp, cp_ring,
1652 HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
1653 cp_ring->fw_ring_id = INVALID_HW_RING_ID;
1654 memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
1655 sizeof(*cpr->cp_desc_ring));
1656 cpr->cp_raw_cons = 0;
1659 int bnxt_free_all_hwrm_rings(struct bnxt *bp)
1664 for (i = 0; i < bp->tx_cp_nr_rings; i++) {
1665 struct bnxt_tx_queue *txq = bp->tx_queues[i];
1666 struct bnxt_tx_ring_info *txr = txq->tx_ring;
1667 struct bnxt_ring *ring = txr->tx_ring_struct;
1668 struct bnxt_cp_ring_info *cpr = txq->cp_ring;
1669 unsigned int idx = bp->rx_cp_nr_rings + i + 1;
1671 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1672 bnxt_hwrm_ring_free(bp, ring,
1673 HWRM_RING_FREE_INPUT_RING_TYPE_TX);
1674 ring->fw_ring_id = INVALID_HW_RING_ID;
1675 memset(txr->tx_desc_ring, 0,
1676 txr->tx_ring_struct->ring_size *
1677 sizeof(*txr->tx_desc_ring));
1678 memset(txr->tx_buf_ring, 0,
1679 txr->tx_ring_struct->ring_size *
1680 sizeof(*txr->tx_buf_ring));
1684 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1685 bnxt_free_cp_ring(bp, cpr, idx);
1686 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1690 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1691 struct bnxt_rx_queue *rxq = bp->rx_queues[i];
1692 struct bnxt_rx_ring_info *rxr = rxq->rx_ring;
1693 struct bnxt_ring *ring = rxr->rx_ring_struct;
1694 struct bnxt_cp_ring_info *cpr = rxq->cp_ring;
1695 unsigned int idx = i + 1;
1697 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1698 bnxt_hwrm_ring_free(bp, ring,
1699 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1700 ring->fw_ring_id = INVALID_HW_RING_ID;
1701 bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID;
1702 memset(rxr->rx_desc_ring, 0,
1703 rxr->rx_ring_struct->ring_size *
1704 sizeof(*rxr->rx_desc_ring));
1705 memset(rxr->rx_buf_ring, 0,
1706 rxr->rx_ring_struct->ring_size *
1707 sizeof(*rxr->rx_buf_ring));
1710 ring = rxr->ag_ring_struct;
1711 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
1712 bnxt_hwrm_ring_free(bp, ring,
1713 HWRM_RING_FREE_INPUT_RING_TYPE_RX);
1714 ring->fw_ring_id = INVALID_HW_RING_ID;
1715 memset(rxr->ag_buf_ring, 0,
1716 rxr->ag_ring_struct->ring_size *
1717 sizeof(*rxr->ag_buf_ring));
1719 bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
1721 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1722 bnxt_free_cp_ring(bp, cpr, idx);
1723 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
1724 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1728 /* Default completion ring */
1730 struct bnxt_cp_ring_info *cpr = bp->def_cp_ring;
1732 if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
1733 bnxt_free_cp_ring(bp, cpr, 0);
1734 cpr->cp_ring_struct->fw_ring_id = INVALID_HW_RING_ID;
1741 int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp)
1746 for (i = 0; i < bp->rx_cp_nr_rings; i++) {
1747 rc = bnxt_hwrm_ring_grp_alloc(bp, i);
1754 void bnxt_free_hwrm_resources(struct bnxt *bp)
1756 /* Release memzone */
1757 rte_free(bp->hwrm_cmd_resp_addr);
1758 rte_free(bp->hwrm_short_cmd_req_addr);
1759 bp->hwrm_cmd_resp_addr = NULL;
1760 bp->hwrm_short_cmd_req_addr = NULL;
1761 bp->hwrm_cmd_resp_dma_addr = 0;
1762 bp->hwrm_short_cmd_req_dma_addr = 0;
1765 int bnxt_alloc_hwrm_resources(struct bnxt *bp)
1767 struct rte_pci_device *pdev = bp->pdev;
1768 char type[RTE_MEMZONE_NAMESIZE];
1770 sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain,
1771 pdev->addr.bus, pdev->addr.devid, pdev->addr.function);
1772 bp->max_resp_len = HWRM_MAX_RESP_LEN;
1773 bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0);
1774 rte_mem_lock_page(bp->hwrm_cmd_resp_addr);
1775 if (bp->hwrm_cmd_resp_addr == NULL)
1777 bp->hwrm_cmd_resp_dma_addr =
1778 rte_mem_virt2iova(bp->hwrm_cmd_resp_addr);
1779 if (bp->hwrm_cmd_resp_dma_addr == 0) {
1781 "unable to map response address to physical memory\n");
1784 rte_spinlock_init(&bp->hwrm_lock);
1789 int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1791 struct bnxt_filter_info *filter;
1794 STAILQ_FOREACH(filter, &vnic->filter, next) {
1795 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1796 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1797 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1798 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1800 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1808 bnxt_clear_hwrm_vnic_flows(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1810 struct bnxt_filter_info *filter;
1811 struct rte_flow *flow;
1814 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1815 filter = flow->filter;
1816 RTE_LOG(ERR, PMD, "filter type %d\n", filter->filter_type);
1817 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1818 rc = bnxt_hwrm_clear_em_filter(bp, filter);
1819 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1820 rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1822 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
1824 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1832 int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
1834 struct bnxt_filter_info *filter;
1837 STAILQ_FOREACH(filter, &vnic->filter, next) {
1838 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1839 rc = bnxt_hwrm_set_em_filter(bp, filter->dst_id,
1841 else if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1842 rc = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id,
1845 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
1853 void bnxt_free_tunnel_ports(struct bnxt *bp)
1855 if (bp->vxlan_port_cnt)
1856 bnxt_hwrm_tunnel_dst_port_free(bp, bp->vxlan_fw_dst_port_id,
1857 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_VXLAN);
1859 if (bp->geneve_port_cnt)
1860 bnxt_hwrm_tunnel_dst_port_free(bp, bp->geneve_fw_dst_port_id,
1861 HWRM_TUNNEL_DST_PORT_FREE_INPUT_TUNNEL_TYPE_GENEVE);
1862 bp->geneve_port = 0;
1865 void bnxt_free_all_hwrm_resources(struct bnxt *bp)
1869 if (bp->vnic_info == NULL)
1873 * Cleanup VNICs in reverse order, to make sure the L2 filter
1874 * from vnic0 is last to be cleaned up.
1876 for (i = bp->nr_vnics - 1; i >= 0; i--) {
1877 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1879 bnxt_clear_hwrm_vnic_flows(bp, vnic);
1881 bnxt_clear_hwrm_vnic_filters(bp, vnic);
1883 bnxt_hwrm_vnic_ctx_free(bp, vnic);
1885 bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
1887 bnxt_hwrm_vnic_free(bp, vnic);
1889 /* Ring resources */
1890 bnxt_free_all_hwrm_rings(bp);
1891 bnxt_free_all_hwrm_ring_grps(bp);
1892 bnxt_free_all_hwrm_stat_ctxs(bp);
1893 bnxt_free_tunnel_ports(bp);
1896 static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
1898 uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1900 if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG)
1901 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH;
1903 switch (conf_link_speed) {
1904 case ETH_LINK_SPEED_10M_HD:
1905 case ETH_LINK_SPEED_100M_HD:
1906 return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF;
1908 return hw_link_duplex;
1911 static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
1913 return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
1916 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
1918 uint16_t eth_link_speed = 0;
1920 if (conf_link_speed == ETH_LINK_SPEED_AUTONEG)
1921 return ETH_LINK_SPEED_AUTONEG;
1923 switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) {
1924 case ETH_LINK_SPEED_100M:
1925 case ETH_LINK_SPEED_100M_HD:
1927 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB;
1929 case ETH_LINK_SPEED_1G:
1931 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB;
1933 case ETH_LINK_SPEED_2_5G:
1935 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB;
1937 case ETH_LINK_SPEED_10G:
1939 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB;
1941 case ETH_LINK_SPEED_20G:
1943 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB;
1945 case ETH_LINK_SPEED_25G:
1947 HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB;
1949 case ETH_LINK_SPEED_40G:
1951 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB;
1953 case ETH_LINK_SPEED_50G:
1955 HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB;
1959 "Unsupported link speed %d; default to AUTO\n",
1963 return eth_link_speed;
1966 #define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \
1967 ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \
1968 ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \
1969 ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G)
1971 static int bnxt_valid_link_speed(uint32_t link_speed, uint16_t port_id)
1975 if (link_speed == ETH_LINK_SPEED_AUTONEG)
1978 if (link_speed & ETH_LINK_SPEED_FIXED) {
1979 one_speed = link_speed & ~ETH_LINK_SPEED_FIXED;
1981 if (one_speed & (one_speed - 1)) {
1983 "Invalid advertised speeds (%u) for port %u\n",
1984 link_speed, port_id);
1987 if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) {
1989 "Unsupported advertised speed (%u) for port %u\n",
1990 link_speed, port_id);
1994 if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) {
1996 "Unsupported advertised speeds (%u) for port %u\n",
1997 link_speed, port_id);
2005 bnxt_parse_eth_link_speed_mask(struct bnxt *bp, uint32_t link_speed)
2009 if (link_speed == ETH_LINK_SPEED_AUTONEG) {
2010 if (bp->link_info.support_speeds)
2011 return bp->link_info.support_speeds;
2012 link_speed = BNXT_SUPPORTED_SPEEDS;
2015 if (link_speed & ETH_LINK_SPEED_100M)
2016 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2017 if (link_speed & ETH_LINK_SPEED_100M_HD)
2018 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB;
2019 if (link_speed & ETH_LINK_SPEED_1G)
2020 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB;
2021 if (link_speed & ETH_LINK_SPEED_2_5G)
2022 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB;
2023 if (link_speed & ETH_LINK_SPEED_10G)
2024 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB;
2025 if (link_speed & ETH_LINK_SPEED_20G)
2026 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB;
2027 if (link_speed & ETH_LINK_SPEED_25G)
2028 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB;
2029 if (link_speed & ETH_LINK_SPEED_40G)
2030 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB;
2031 if (link_speed & ETH_LINK_SPEED_50G)
2032 ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB;
2036 static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed)
2038 uint32_t eth_link_speed = ETH_SPEED_NUM_NONE;
2040 switch (hw_link_speed) {
2041 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB:
2042 eth_link_speed = ETH_SPEED_NUM_100M;
2044 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB:
2045 eth_link_speed = ETH_SPEED_NUM_1G;
2047 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB:
2048 eth_link_speed = ETH_SPEED_NUM_2_5G;
2050 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB:
2051 eth_link_speed = ETH_SPEED_NUM_10G;
2053 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB:
2054 eth_link_speed = ETH_SPEED_NUM_20G;
2056 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB:
2057 eth_link_speed = ETH_SPEED_NUM_25G;
2059 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB:
2060 eth_link_speed = ETH_SPEED_NUM_40G;
2062 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB:
2063 eth_link_speed = ETH_SPEED_NUM_50G;
2065 case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB:
2067 RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n",
2071 return eth_link_speed;
2074 static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex)
2076 uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2078 switch (hw_link_duplex) {
2079 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH:
2080 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL:
2081 eth_link_duplex = ETH_LINK_FULL_DUPLEX;
2083 case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF:
2084 eth_link_duplex = ETH_LINK_HALF_DUPLEX;
2087 RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n",
2091 return eth_link_duplex;
2094 int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link)
2097 struct bnxt_link_info *link_info = &bp->link_info;
2099 rc = bnxt_hwrm_port_phy_qcfg(bp, link_info);
2102 "Get link config failed with rc %d\n", rc);
2105 if (link_info->link_speed)
2107 bnxt_parse_hw_link_speed(link_info->link_speed);
2109 link->link_speed = ETH_SPEED_NUM_NONE;
2110 link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex);
2111 link->link_status = link_info->link_up;
2112 link->link_autoneg = link_info->auto_mode ==
2113 HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ?
2114 ETH_LINK_FIXED : ETH_LINK_AUTONEG;
2119 int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
2122 struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
2123 struct bnxt_link_info link_req;
2124 uint16_t speed, autoneg;
2126 if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
2129 rc = bnxt_valid_link_speed(dev_conf->link_speeds,
2130 bp->eth_dev->data->port_id);
2134 memset(&link_req, 0, sizeof(link_req));
2135 link_req.link_up = link_up;
2139 autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
2140 speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
2141 link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
2142 /* Autoneg can be done only when the FW allows */
2143 if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
2144 bp->link_info.force_link_speed)) {
2145 link_req.phy_flags |=
2146 HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
2147 link_req.auto_link_speed_mask =
2148 bnxt_parse_eth_link_speed_mask(bp,
2149 dev_conf->link_speeds);
2151 if (bp->link_info.phy_type ==
2152 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
2153 bp->link_info.phy_type ==
2154 HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
2155 bp->link_info.media_type ==
2156 HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
2157 RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
2161 link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
2162 /* If user wants a particular speed try that first. */
2164 link_req.link_speed = speed;
2165 else if (bp->link_info.force_link_speed)
2166 link_req.link_speed = bp->link_info.force_link_speed;
2168 link_req.link_speed = bp->link_info.auto_link_speed;
2170 link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
2171 link_req.auto_pause = bp->link_info.auto_pause;
2172 link_req.force_pause = bp->link_info.force_pause;
2175 rc = bnxt_hwrm_port_phy_cfg(bp, &link_req);
2178 "Set link config failed with rc %d\n", rc);
2186 int bnxt_hwrm_func_qcfg(struct bnxt *bp)
2188 struct hwrm_func_qcfg_input req = {0};
2189 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2192 HWRM_PREP(req, FUNC_QCFG);
2193 req.fid = rte_cpu_to_le_16(0xffff);
2195 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2197 HWRM_CHECK_RESULT();
2199 /* Hard Coded.. 0xfff VLAN ID mask */
2200 bp->vlan = rte_le_to_cpu_16(resp->vlan) & 0xfff;
2202 switch (resp->port_partition_type) {
2203 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_0:
2204 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR1_5:
2205 case HWRM_FUNC_QCFG_OUTPUT_PORT_PARTITION_TYPE_NPAR2_0:
2206 bp->port_partition_type = resp->port_partition_type;
2209 bp->port_partition_type = 0;
2218 static void copy_func_cfg_to_qcaps(struct hwrm_func_cfg_input *fcfg,
2219 struct hwrm_func_qcaps_output *qcaps)
2221 qcaps->max_rsscos_ctx = fcfg->num_rsscos_ctxs;
2222 memcpy(qcaps->mac_address, fcfg->dflt_mac_addr,
2223 sizeof(qcaps->mac_address));
2224 qcaps->max_l2_ctxs = fcfg->num_l2_ctxs;
2225 qcaps->max_rx_rings = fcfg->num_rx_rings;
2226 qcaps->max_tx_rings = fcfg->num_tx_rings;
2227 qcaps->max_cmpl_rings = fcfg->num_cmpl_rings;
2228 qcaps->max_stat_ctx = fcfg->num_stat_ctxs;
2230 qcaps->first_vf_id = 0;
2231 qcaps->max_vnics = fcfg->num_vnics;
2232 qcaps->max_decap_records = 0;
2233 qcaps->max_encap_records = 0;
2234 qcaps->max_tx_wm_flows = 0;
2235 qcaps->max_tx_em_flows = 0;
2236 qcaps->max_rx_wm_flows = 0;
2237 qcaps->max_rx_em_flows = 0;
2238 qcaps->max_flow_id = 0;
2239 qcaps->max_mcast_filters = fcfg->num_mcast_filters;
2240 qcaps->max_sp_tx_rings = 0;
2241 qcaps->max_hw_ring_grps = fcfg->num_hw_ring_grps;
2244 static int bnxt_hwrm_pf_func_cfg(struct bnxt *bp, int tx_rings)
2246 struct hwrm_func_cfg_input req = {0};
2247 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2250 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2251 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2252 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2253 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2254 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2255 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2256 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2257 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2258 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2259 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2260 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2261 req.mtu = rte_cpu_to_le_16(BNXT_MAX_MTU);
2262 req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2263 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2264 req.num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx);
2265 req.num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx);
2266 req.num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings);
2267 req.num_tx_rings = rte_cpu_to_le_16(tx_rings);
2268 req.num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings);
2269 req.num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx);
2270 req.num_vnics = rte_cpu_to_le_16(bp->max_vnics);
2271 req.num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps);
2272 req.fid = rte_cpu_to_le_16(0xffff);
2274 HWRM_PREP(req, FUNC_CFG);
2276 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2278 HWRM_CHECK_RESULT();
2284 static void populate_vf_func_cfg_req(struct bnxt *bp,
2285 struct hwrm_func_cfg_input *req,
2288 req->enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_MTU |
2289 HWRM_FUNC_CFG_INPUT_ENABLES_MRU |
2290 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RSSCOS_CTXS |
2291 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_STAT_CTXS |
2292 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_CMPL_RINGS |
2293 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_TX_RINGS |
2294 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_RX_RINGS |
2295 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_L2_CTXS |
2296 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_VNICS |
2297 HWRM_FUNC_CFG_INPUT_ENABLES_NUM_HW_RING_GRPS);
2299 req->mtu = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2300 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2301 req->mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN +
2302 ETHER_CRC_LEN + VLAN_TAG_SIZE);
2303 req->num_rsscos_ctxs = rte_cpu_to_le_16(bp->max_rsscos_ctx /
2305 req->num_stat_ctxs = rte_cpu_to_le_16(bp->max_stat_ctx / (num_vfs + 1));
2306 req->num_cmpl_rings = rte_cpu_to_le_16(bp->max_cp_rings /
2308 req->num_tx_rings = rte_cpu_to_le_16(bp->max_tx_rings / (num_vfs + 1));
2309 req->num_rx_rings = rte_cpu_to_le_16(bp->max_rx_rings / (num_vfs + 1));
2310 req->num_l2_ctxs = rte_cpu_to_le_16(bp->max_l2_ctx / (num_vfs + 1));
2311 /* TODO: For now, do not support VMDq/RFS on VFs. */
2312 req->num_vnics = rte_cpu_to_le_16(1);
2313 req->num_hw_ring_grps = rte_cpu_to_le_16(bp->max_ring_grps /
2317 static void add_random_mac_if_needed(struct bnxt *bp,
2318 struct hwrm_func_cfg_input *cfg_req,
2321 struct ether_addr mac;
2323 if (bnxt_hwrm_func_qcfg_vf_default_mac(bp, vf, &mac))
2326 if (memcmp(mac.addr_bytes, "\x00\x00\x00\x00\x00", 6) == 0) {
2328 rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2329 eth_random_addr(cfg_req->dflt_mac_addr);
2330 bp->pf.vf_info[vf].random_mac = true;
2332 memcpy(cfg_req->dflt_mac_addr, mac.addr_bytes, ETHER_ADDR_LEN);
2336 static void reserve_resources_from_vf(struct bnxt *bp,
2337 struct hwrm_func_cfg_input *cfg_req,
2340 struct hwrm_func_qcaps_input req = {0};
2341 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2344 /* Get the actual allocated values now */
2345 HWRM_PREP(req, FUNC_QCAPS);
2346 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2347 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2350 RTE_LOG(ERR, PMD, "hwrm_func_qcaps failed rc:%d\n", rc);
2351 copy_func_cfg_to_qcaps(cfg_req, resp);
2352 } else if (resp->error_code) {
2353 rc = rte_le_to_cpu_16(resp->error_code);
2354 RTE_LOG(ERR, PMD, "hwrm_func_qcaps error %d\n", rc);
2355 copy_func_cfg_to_qcaps(cfg_req, resp);
2358 bp->max_rsscos_ctx -= rte_le_to_cpu_16(resp->max_rsscos_ctx);
2359 bp->max_stat_ctx -= rte_le_to_cpu_16(resp->max_stat_ctx);
2360 bp->max_cp_rings -= rte_le_to_cpu_16(resp->max_cmpl_rings);
2361 bp->max_tx_rings -= rte_le_to_cpu_16(resp->max_tx_rings);
2362 bp->max_rx_rings -= rte_le_to_cpu_16(resp->max_rx_rings);
2363 bp->max_l2_ctx -= rte_le_to_cpu_16(resp->max_l2_ctxs);
2365 * TODO: While not supporting VMDq with VFs, max_vnics is always
2366 * forced to 1 in this case
2368 //bp->max_vnics -= rte_le_to_cpu_16(esp->max_vnics);
2369 bp->max_ring_grps -= rte_le_to_cpu_16(resp->max_hw_ring_grps);
2374 int bnxt_hwrm_func_qcfg_current_vf_vlan(struct bnxt *bp, int vf)
2376 struct hwrm_func_qcfg_input req = {0};
2377 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2380 /* Check for zero MAC address */
2381 HWRM_PREP(req, FUNC_QCFG);
2382 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2383 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2385 RTE_LOG(ERR, PMD, "hwrm_func_qcfg failed rc:%d\n", rc);
2387 } else if (resp->error_code) {
2388 rc = rte_le_to_cpu_16(resp->error_code);
2389 RTE_LOG(ERR, PMD, "hwrm_func_qcfg error %d\n", rc);
2392 rc = rte_le_to_cpu_16(resp->vlan);
2399 static int update_pf_resource_max(struct bnxt *bp)
2401 struct hwrm_func_qcfg_input req = {0};
2402 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2405 /* And copy the allocated numbers into the pf struct */
2406 HWRM_PREP(req, FUNC_QCFG);
2407 req.fid = rte_cpu_to_le_16(0xffff);
2408 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2409 HWRM_CHECK_RESULT();
2411 /* Only TX ring value reflects actual allocation? TODO */
2412 bp->max_tx_rings = rte_le_to_cpu_16(resp->alloc_tx_rings);
2413 bp->pf.evb_mode = resp->evb_mode;
2420 int bnxt_hwrm_allocate_pf_only(struct bnxt *bp)
2425 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2429 rc = bnxt_hwrm_func_qcaps(bp);
2433 bp->pf.func_cfg_flags &=
2434 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2435 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2436 bp->pf.func_cfg_flags |=
2437 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE;
2438 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2442 int bnxt_hwrm_allocate_vfs(struct bnxt *bp, int num_vfs)
2444 struct hwrm_func_cfg_input req = {0};
2445 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2452 RTE_LOG(ERR, PMD, "Attempt to allcoate VFs on a VF!\n");
2456 rc = bnxt_hwrm_func_qcaps(bp);
2461 bp->pf.active_vfs = num_vfs;
2464 * First, configure the PF to only use one TX ring. This ensures that
2465 * there are enough rings for all VFs.
2467 * If we don't do this, when we call func_alloc() later, we will lock
2468 * extra rings to the PF that won't be available during func_cfg() of
2471 * This has been fixed with firmware versions above 20.6.54
2473 bp->pf.func_cfg_flags &=
2474 ~(HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE |
2475 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_DISABLE);
2476 bp->pf.func_cfg_flags |=
2477 HWRM_FUNC_CFG_INPUT_FLAGS_STD_TX_RING_MODE_ENABLE;
2478 rc = bnxt_hwrm_pf_func_cfg(bp, 1);
2483 * Now, create and register a buffer to hold forwarded VF requests
2485 req_buf_sz = num_vfs * HWRM_MAX_REQ_LEN;
2486 bp->pf.vf_req_buf = rte_malloc("bnxt_vf_fwd", req_buf_sz,
2487 page_roundup(num_vfs * HWRM_MAX_REQ_LEN));
2488 if (bp->pf.vf_req_buf == NULL) {
2492 for (sz = 0; sz < req_buf_sz; sz += getpagesize())
2493 rte_mem_lock_page(((char *)bp->pf.vf_req_buf) + sz);
2494 for (i = 0; i < num_vfs; i++)
2495 bp->pf.vf_info[i].req_buf = ((char *)bp->pf.vf_req_buf) +
2496 (i * HWRM_MAX_REQ_LEN);
2498 rc = bnxt_hwrm_func_buf_rgtr(bp);
2502 populate_vf_func_cfg_req(bp, &req, num_vfs);
2504 bp->pf.active_vfs = 0;
2505 for (i = 0; i < num_vfs; i++) {
2506 add_random_mac_if_needed(bp, &req, i);
2508 HWRM_PREP(req, FUNC_CFG);
2509 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[i].func_cfg_flags);
2510 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[i].fid);
2511 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2513 /* Clear enable flag for next pass */
2514 req.enables &= ~rte_cpu_to_le_32(
2515 HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_MAC_ADDR);
2517 if (rc || resp->error_code) {
2519 "Failed to initizlie VF %d\n", i);
2521 "Not all VFs available. (%d, %d)\n",
2522 rc, resp->error_code);
2529 reserve_resources_from_vf(bp, &req, i);
2530 bp->pf.active_vfs++;
2531 bnxt_hwrm_func_clr_stats(bp, bp->pf.vf_info[i].fid);
2535 * Now configure the PF to use "the rest" of the resources
2536 * We're using STD_TX_RING_MODE here though which will limit the TX
2537 * rings. This will allow QoS to function properly. Not setting this
2538 * will cause PF rings to break bandwidth settings.
2540 rc = bnxt_hwrm_pf_func_cfg(bp, bp->max_tx_rings);
2544 rc = update_pf_resource_max(bp);
2551 bnxt_hwrm_func_buf_unrgtr(bp);
2555 int bnxt_hwrm_pf_evb_mode(struct bnxt *bp)
2557 struct hwrm_func_cfg_input req = {0};
2558 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2561 HWRM_PREP(req, FUNC_CFG);
2563 req.fid = rte_cpu_to_le_16(0xffff);
2564 req.enables = rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_EVB_MODE);
2565 req.evb_mode = bp->pf.evb_mode;
2567 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2568 HWRM_CHECK_RESULT();
2574 int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, uint16_t port,
2575 uint8_t tunnel_type)
2577 struct hwrm_tunnel_dst_port_alloc_input req = {0};
2578 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
2581 HWRM_PREP(req, TUNNEL_DST_PORT_ALLOC);
2582 req.tunnel_type = tunnel_type;
2583 req.tunnel_dst_port_val = port;
2584 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2585 HWRM_CHECK_RESULT();
2587 switch (tunnel_type) {
2588 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_VXLAN:
2589 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
2590 bp->vxlan_port = port;
2592 case HWRM_TUNNEL_DST_PORT_ALLOC_INPUT_TUNNEL_TYPE_GENEVE:
2593 bp->geneve_fw_dst_port_id = resp->tunnel_dst_port_id;
2594 bp->geneve_port = port;
2605 int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, uint16_t port,
2606 uint8_t tunnel_type)
2608 struct hwrm_tunnel_dst_port_free_input req = {0};
2609 struct hwrm_tunnel_dst_port_free_output *resp = bp->hwrm_cmd_resp_addr;
2612 HWRM_PREP(req, TUNNEL_DST_PORT_FREE);
2614 req.tunnel_type = tunnel_type;
2615 req.tunnel_dst_port_id = rte_cpu_to_be_16(port);
2616 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2618 HWRM_CHECK_RESULT();
2624 int bnxt_hwrm_func_cfg_vf_set_flags(struct bnxt *bp, uint16_t vf,
2627 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2628 struct hwrm_func_cfg_input req = {0};
2631 HWRM_PREP(req, FUNC_CFG);
2633 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2634 req.flags = rte_cpu_to_le_32(flags);
2635 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2637 HWRM_CHECK_RESULT();
2643 void vf_vnic_set_rxmask_cb(struct bnxt_vnic_info *vnic, void *flagp)
2645 uint32_t *flag = flagp;
2647 vnic->flags = *flag;
2650 int bnxt_set_rx_mask_no_vlan(struct bnxt *bp, struct bnxt_vnic_info *vnic)
2652 return bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic, 0, NULL);
2655 int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp)
2658 struct hwrm_func_buf_rgtr_input req = {.req_type = 0 };
2659 struct hwrm_func_buf_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
2661 HWRM_PREP(req, FUNC_BUF_RGTR);
2663 req.req_buf_num_pages = rte_cpu_to_le_16(1);
2664 req.req_buf_page_size = rte_cpu_to_le_16(
2665 page_getenum(bp->pf.active_vfs * HWRM_MAX_REQ_LEN));
2666 req.req_buf_len = rte_cpu_to_le_16(HWRM_MAX_REQ_LEN);
2667 req.req_buf_page_addr[0] =
2668 rte_cpu_to_le_64(rte_mem_virt2iova(bp->pf.vf_req_buf));
2669 if (req.req_buf_page_addr[0] == 0) {
2671 "unable to map buffer address to physical memory\n");
2675 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2677 HWRM_CHECK_RESULT();
2683 int bnxt_hwrm_func_buf_unrgtr(struct bnxt *bp)
2686 struct hwrm_func_buf_unrgtr_input req = {.req_type = 0 };
2687 struct hwrm_func_buf_unrgtr_output *resp = bp->hwrm_cmd_resp_addr;
2689 HWRM_PREP(req, FUNC_BUF_UNRGTR);
2691 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2693 HWRM_CHECK_RESULT();
2699 int bnxt_hwrm_func_cfg_def_cp(struct bnxt *bp)
2701 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2702 struct hwrm_func_cfg_input req = {0};
2705 HWRM_PREP(req, FUNC_CFG);
2707 req.fid = rte_cpu_to_le_16(0xffff);
2708 req.flags = rte_cpu_to_le_32(bp->pf.func_cfg_flags);
2709 req.enables = rte_cpu_to_le_32(
2710 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2711 req.async_event_cr = rte_cpu_to_le_16(
2712 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2713 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2715 HWRM_CHECK_RESULT();
2721 int bnxt_hwrm_vf_func_cfg_def_cp(struct bnxt *bp)
2723 struct hwrm_func_vf_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2724 struct hwrm_func_vf_cfg_input req = {0};
2727 HWRM_PREP(req, FUNC_VF_CFG);
2729 req.enables = rte_cpu_to_le_32(
2730 HWRM_FUNC_CFG_INPUT_ENABLES_ASYNC_EVENT_CR);
2731 req.async_event_cr = rte_cpu_to_le_16(
2732 bp->def_cp_ring->cp_ring_struct->fw_ring_id);
2733 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2735 HWRM_CHECK_RESULT();
2741 int bnxt_hwrm_set_default_vlan(struct bnxt *bp, int vf, uint8_t is_vf)
2743 struct hwrm_func_cfg_input req = {0};
2744 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2745 uint16_t dflt_vlan, fid;
2746 uint32_t func_cfg_flags;
2749 HWRM_PREP(req, FUNC_CFG);
2752 dflt_vlan = bp->pf.vf_info[vf].dflt_vlan;
2753 fid = bp->pf.vf_info[vf].fid;
2754 func_cfg_flags = bp->pf.vf_info[vf].func_cfg_flags;
2756 fid = rte_cpu_to_le_16(0xffff);
2757 func_cfg_flags = bp->pf.func_cfg_flags;
2758 dflt_vlan = bp->vlan;
2761 req.flags = rte_cpu_to_le_32(func_cfg_flags);
2762 req.fid = rte_cpu_to_le_16(fid);
2763 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2764 req.dflt_vlan = rte_cpu_to_le_16(dflt_vlan);
2766 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2768 HWRM_CHECK_RESULT();
2774 int bnxt_hwrm_func_bw_cfg(struct bnxt *bp, uint16_t vf,
2775 uint16_t max_bw, uint16_t enables)
2777 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2778 struct hwrm_func_cfg_input req = {0};
2781 HWRM_PREP(req, FUNC_CFG);
2783 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2784 req.enables |= rte_cpu_to_le_32(enables);
2785 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2786 req.max_bw = rte_cpu_to_le_32(max_bw);
2787 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2789 HWRM_CHECK_RESULT();
2795 int bnxt_hwrm_set_vf_vlan(struct bnxt *bp, int vf)
2797 struct hwrm_func_cfg_input req = {0};
2798 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
2801 HWRM_PREP(req, FUNC_CFG);
2803 req.flags = rte_cpu_to_le_32(bp->pf.vf_info[vf].func_cfg_flags);
2804 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2805 req.enables |= rte_cpu_to_le_32(HWRM_FUNC_CFG_INPUT_ENABLES_DFLT_VLAN);
2806 req.dflt_vlan = rte_cpu_to_le_16(bp->pf.vf_info[vf].dflt_vlan);
2808 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2810 HWRM_CHECK_RESULT();
2816 int bnxt_hwrm_reject_fwd_resp(struct bnxt *bp, uint16_t target_id,
2817 void *encaped, size_t ec_size)
2820 struct hwrm_reject_fwd_resp_input req = {.req_type = 0};
2821 struct hwrm_reject_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2823 if (ec_size > sizeof(req.encap_request))
2826 HWRM_PREP(req, REJECT_FWD_RESP);
2828 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2829 memcpy(req.encap_request, encaped, ec_size);
2831 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2833 HWRM_CHECK_RESULT();
2839 int bnxt_hwrm_func_qcfg_vf_default_mac(struct bnxt *bp, uint16_t vf,
2840 struct ether_addr *mac)
2842 struct hwrm_func_qcfg_input req = {0};
2843 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
2846 HWRM_PREP(req, FUNC_QCFG);
2848 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
2849 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2851 HWRM_CHECK_RESULT();
2853 memcpy(mac->addr_bytes, resp->mac_address, ETHER_ADDR_LEN);
2860 int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, uint16_t target_id,
2861 void *encaped, size_t ec_size)
2864 struct hwrm_exec_fwd_resp_input req = {.req_type = 0};
2865 struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr;
2867 if (ec_size > sizeof(req.encap_request))
2870 HWRM_PREP(req, EXEC_FWD_RESP);
2872 req.encap_resp_target_id = rte_cpu_to_le_16(target_id);
2873 memcpy(req.encap_request, encaped, ec_size);
2875 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2877 HWRM_CHECK_RESULT();
2883 int bnxt_hwrm_ctx_qstats(struct bnxt *bp, uint32_t cid, int idx,
2884 struct rte_eth_stats *stats, uint8_t rx)
2887 struct hwrm_stat_ctx_query_input req = {.req_type = 0};
2888 struct hwrm_stat_ctx_query_output *resp = bp->hwrm_cmd_resp_addr;
2890 HWRM_PREP(req, STAT_CTX_QUERY);
2892 req.stat_ctx_id = rte_cpu_to_le_32(cid);
2894 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2896 HWRM_CHECK_RESULT();
2899 stats->q_ipackets[idx] = rte_le_to_cpu_64(resp->rx_ucast_pkts);
2900 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_mcast_pkts);
2901 stats->q_ipackets[idx] += rte_le_to_cpu_64(resp->rx_bcast_pkts);
2902 stats->q_ibytes[idx] = rte_le_to_cpu_64(resp->rx_ucast_bytes);
2903 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_mcast_bytes);
2904 stats->q_ibytes[idx] += rte_le_to_cpu_64(resp->rx_bcast_bytes);
2905 stats->q_errors[idx] = rte_le_to_cpu_64(resp->rx_err_pkts);
2906 stats->q_errors[idx] += rte_le_to_cpu_64(resp->rx_drop_pkts);
2908 stats->q_opackets[idx] = rte_le_to_cpu_64(resp->tx_ucast_pkts);
2909 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_mcast_pkts);
2910 stats->q_opackets[idx] += rte_le_to_cpu_64(resp->tx_bcast_pkts);
2911 stats->q_obytes[idx] = rte_le_to_cpu_64(resp->tx_ucast_bytes);
2912 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_mcast_bytes);
2913 stats->q_obytes[idx] += rte_le_to_cpu_64(resp->tx_bcast_bytes);
2914 stats->q_errors[idx] += rte_le_to_cpu_64(resp->tx_err_pkts);
2923 int bnxt_hwrm_port_qstats(struct bnxt *bp)
2925 struct hwrm_port_qstats_input req = {0};
2926 struct hwrm_port_qstats_output *resp = bp->hwrm_cmd_resp_addr;
2927 struct bnxt_pf_info *pf = &bp->pf;
2930 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2933 HWRM_PREP(req, PORT_QSTATS);
2935 req.port_id = rte_cpu_to_le_16(pf->port_id);
2936 req.tx_stat_host_addr = rte_cpu_to_le_64(bp->hw_tx_port_stats_map);
2937 req.rx_stat_host_addr = rte_cpu_to_le_64(bp->hw_rx_port_stats_map);
2938 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2940 HWRM_CHECK_RESULT();
2946 int bnxt_hwrm_port_clr_stats(struct bnxt *bp)
2948 struct hwrm_port_clr_stats_input req = {0};
2949 struct hwrm_port_clr_stats_output *resp = bp->hwrm_cmd_resp_addr;
2950 struct bnxt_pf_info *pf = &bp->pf;
2953 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
2956 HWRM_PREP(req, PORT_CLR_STATS);
2958 req.port_id = rte_cpu_to_le_16(pf->port_id);
2959 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2961 HWRM_CHECK_RESULT();
2967 int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
2969 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
2970 struct hwrm_port_led_qcaps_input req = {0};
2976 HWRM_PREP(req, PORT_LED_QCAPS);
2977 req.port_id = bp->pf.port_id;
2978 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
2980 HWRM_CHECK_RESULT();
2982 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
2985 bp->num_leds = resp->num_leds;
2986 memcpy(bp->leds, &resp->led0_id,
2987 sizeof(bp->leds[0]) * bp->num_leds);
2988 for (i = 0; i < bp->num_leds; i++) {
2989 struct bnxt_led_info *led = &bp->leds[i];
2991 uint16_t caps = led->led_state_caps;
2993 if (!led->led_group_id ||
2994 !BNXT_LED_ALT_BLINK_CAP(caps)) {
3006 int bnxt_hwrm_port_led_cfg(struct bnxt *bp, bool led_on)
3008 struct hwrm_port_led_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3009 struct hwrm_port_led_cfg_input req = {0};
3010 struct bnxt_led_cfg *led_cfg;
3011 uint8_t led_state = HWRM_PORT_LED_QCFG_OUTPUT_LED0_STATE_DEFAULT;
3012 uint16_t duration = 0;
3015 if (!bp->num_leds || BNXT_VF(bp))
3018 HWRM_PREP(req, PORT_LED_CFG);
3021 led_state = HWRM_PORT_LED_CFG_INPUT_LED0_STATE_BLINKALT;
3022 duration = rte_cpu_to_le_16(500);
3024 req.port_id = bp->pf.port_id;
3025 req.num_leds = bp->num_leds;
3026 led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
3027 for (i = 0; i < bp->num_leds; i++, led_cfg++) {
3028 req.enables |= BNXT_LED_DFLT_ENABLES(i);
3029 led_cfg->led_id = bp->leds[i].led_id;
3030 led_cfg->led_state = led_state;
3031 led_cfg->led_blink_on = duration;
3032 led_cfg->led_blink_off = duration;
3033 led_cfg->led_group_id = bp->leds[i].led_group_id;
3036 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3038 HWRM_CHECK_RESULT();
3044 int bnxt_hwrm_nvm_get_dir_info(struct bnxt *bp, uint32_t *entries,
3048 struct hwrm_nvm_get_dir_info_input req = {0};
3049 struct hwrm_nvm_get_dir_info_output *resp = bp->hwrm_cmd_resp_addr;
3051 HWRM_PREP(req, NVM_GET_DIR_INFO);
3053 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3055 HWRM_CHECK_RESULT();
3059 *entries = rte_le_to_cpu_32(resp->entries);
3060 *length = rte_le_to_cpu_32(resp->entry_length);
3065 int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
3068 uint32_t dir_entries;
3069 uint32_t entry_length;
3072 rte_iova_t dma_handle;
3073 struct hwrm_nvm_get_dir_entries_input req = {0};
3074 struct hwrm_nvm_get_dir_entries_output *resp = bp->hwrm_cmd_resp_addr;
3076 rc = bnxt_hwrm_nvm_get_dir_info(bp, &dir_entries, &entry_length);
3080 *data++ = dir_entries;
3081 *data++ = entry_length;
3083 memset(data, 0xff, len);
3085 buflen = dir_entries * entry_length;
3086 buf = rte_malloc("nvm_dir", buflen, 0);
3087 rte_mem_lock_page(buf);
3090 dma_handle = rte_mem_virt2iova(buf);
3091 if (dma_handle == 0) {
3093 "unable to map response address to physical memory\n");
3096 HWRM_PREP(req, NVM_GET_DIR_ENTRIES);
3097 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3098 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3100 HWRM_CHECK_RESULT();
3104 memcpy(data, buf, len > buflen ? buflen : len);
3111 int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
3112 uint32_t offset, uint32_t length,
3117 rte_iova_t dma_handle;
3118 struct hwrm_nvm_read_input req = {0};
3119 struct hwrm_nvm_read_output *resp = bp->hwrm_cmd_resp_addr;
3121 buf = rte_malloc("nvm_item", length, 0);
3122 rte_mem_lock_page(buf);
3126 dma_handle = rte_mem_virt2iova(buf);
3127 if (dma_handle == 0) {
3129 "unable to map response address to physical memory\n");
3132 HWRM_PREP(req, NVM_READ);
3133 req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
3134 req.dir_idx = rte_cpu_to_le_16(index);
3135 req.offset = rte_cpu_to_le_32(offset);
3136 req.len = rte_cpu_to_le_32(length);
3137 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3138 HWRM_CHECK_RESULT();
3141 memcpy(data, buf, length);
3147 int bnxt_hwrm_erase_nvram_directory(struct bnxt *bp, uint8_t index)
3150 struct hwrm_nvm_erase_dir_entry_input req = {0};
3151 struct hwrm_nvm_erase_dir_entry_output *resp = bp->hwrm_cmd_resp_addr;
3153 HWRM_PREP(req, NVM_ERASE_DIR_ENTRY);
3154 req.dir_idx = rte_cpu_to_le_16(index);
3155 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3156 HWRM_CHECK_RESULT();
3163 int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
3164 uint16_t dir_ordinal, uint16_t dir_ext,
3165 uint16_t dir_attr, const uint8_t *data,
3169 struct hwrm_nvm_write_input req = {0};
3170 struct hwrm_nvm_write_output *resp = bp->hwrm_cmd_resp_addr;
3171 rte_iova_t dma_handle;
3174 HWRM_PREP(req, NVM_WRITE);
3176 req.dir_type = rte_cpu_to_le_16(dir_type);
3177 req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
3178 req.dir_ext = rte_cpu_to_le_16(dir_ext);
3179 req.dir_attr = rte_cpu_to_le_16(dir_attr);
3180 req.dir_data_length = rte_cpu_to_le_32(data_len);
3182 buf = rte_malloc("nvm_write", data_len, 0);
3183 rte_mem_lock_page(buf);
3187 dma_handle = rte_mem_virt2iova(buf);
3188 if (dma_handle == 0) {
3190 "unable to map response address to physical memory\n");
3193 memcpy(buf, data, data_len);
3194 req.host_src_addr = rte_cpu_to_le_64(dma_handle);
3196 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3198 HWRM_CHECK_RESULT();
3206 bnxt_vnic_count(struct bnxt_vnic_info *vnic __rte_unused, void *cbdata)
3208 uint32_t *count = cbdata;
3210 *count = *count + 1;
3213 static int bnxt_vnic_count_hwrm_stub(struct bnxt *bp __rte_unused,
3214 struct bnxt_vnic_info *vnic __rte_unused)
3219 int bnxt_vf_vnic_count(struct bnxt *bp, uint16_t vf)
3223 bnxt_hwrm_func_vf_vnic_query_and_config(bp, vf, bnxt_vnic_count,
3224 &count, bnxt_vnic_count_hwrm_stub);
3229 static int bnxt_hwrm_func_vf_vnic_query(struct bnxt *bp, uint16_t vf,
3232 struct hwrm_func_vf_vnic_ids_query_input req = {0};
3233 struct hwrm_func_vf_vnic_ids_query_output *resp =
3234 bp->hwrm_cmd_resp_addr;
3237 /* First query all VNIC ids */
3238 HWRM_PREP(req, FUNC_VF_VNIC_IDS_QUERY);
3240 req.vf_id = rte_cpu_to_le_16(bp->pf.first_vf_id + vf);
3241 req.max_vnic_id_cnt = rte_cpu_to_le_32(bp->pf.total_vnics);
3242 req.vnic_id_tbl_addr = rte_cpu_to_le_64(rte_mem_virt2iova(vnic_ids));
3244 if (req.vnic_id_tbl_addr == 0) {
3247 "unable to map VNIC ID table address to physical memory\n");
3250 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3253 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query failed rc:%d\n", rc);
3255 } else if (resp->error_code) {
3256 rc = rte_le_to_cpu_16(resp->error_code);
3258 RTE_LOG(ERR, PMD, "hwrm_func_vf_vnic_query error %d\n", rc);
3261 rc = rte_le_to_cpu_32(resp->vnic_id_cnt);
3269 * This function queries the VNIC IDs for a specified VF. It then calls
3270 * the vnic_cb to update the necessary field in vnic_info with cbdata.
3271 * Then it calls the hwrm_cb function to program this new vnic configuration.
3273 int bnxt_hwrm_func_vf_vnic_query_and_config(struct bnxt *bp, uint16_t vf,
3274 void (*vnic_cb)(struct bnxt_vnic_info *, void *), void *cbdata,
3275 int (*hwrm_cb)(struct bnxt *bp, struct bnxt_vnic_info *vnic))
3277 struct bnxt_vnic_info vnic;
3279 int i, num_vnic_ids;
3284 /* First query all VNIC ids */
3285 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3286 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3287 RTE_CACHE_LINE_SIZE);
3288 if (vnic_ids == NULL) {
3292 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3293 rte_mem_lock_page(((char *)vnic_ids) + sz);
3295 num_vnic_ids = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3297 if (num_vnic_ids < 0)
3298 return num_vnic_ids;
3300 /* Retrieve VNIC, update bd_stall then update */
3302 for (i = 0; i < num_vnic_ids; i++) {
3303 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3304 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3305 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic, bp->pf.first_vf_id + vf);
3308 if (vnic.mru <= 4) /* Indicates unallocated */
3311 vnic_cb(&vnic, cbdata);
3313 rc = hwrm_cb(bp, &vnic);
3323 int bnxt_hwrm_func_cfg_vf_set_vlan_anti_spoof(struct bnxt *bp, uint16_t vf,
3326 struct hwrm_func_cfg_output *resp = bp->hwrm_cmd_resp_addr;
3327 struct hwrm_func_cfg_input req = {0};
3330 HWRM_PREP(req, FUNC_CFG);
3332 req.fid = rte_cpu_to_le_16(bp->pf.vf_info[vf].fid);
3333 req.enables |= rte_cpu_to_le_32(
3334 HWRM_FUNC_CFG_INPUT_ENABLES_VLAN_ANTISPOOF_MODE);
3335 req.vlan_antispoof_mode = on ?
3336 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_VALIDATE_VLAN :
3337 HWRM_FUNC_CFG_INPUT_VLAN_ANTISPOOF_MODE_NOCHECK;
3338 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3340 HWRM_CHECK_RESULT();
3346 int bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(struct bnxt *bp, int vf)
3348 struct bnxt_vnic_info vnic;
3351 int num_vnic_ids, i;
3355 vnic_id_sz = bp->pf.total_vnics * sizeof(*vnic_ids);
3356 vnic_ids = rte_malloc("bnxt_hwrm_vf_vnic_ids_query", vnic_id_sz,
3357 RTE_CACHE_LINE_SIZE);
3358 if (vnic_ids == NULL) {
3363 for (sz = 0; sz < vnic_id_sz; sz += getpagesize())
3364 rte_mem_lock_page(((char *)vnic_ids) + sz);
3366 rc = bnxt_hwrm_func_vf_vnic_query(bp, vf, vnic_ids);
3372 * Loop through to find the default VNIC ID.
3373 * TODO: The easier way would be to obtain the resp->dflt_vnic_id
3374 * by sending the hwrm_func_qcfg command to the firmware.
3376 for (i = 0; i < num_vnic_ids; i++) {
3377 memset(&vnic, 0, sizeof(struct bnxt_vnic_info));
3378 vnic.fw_vnic_id = rte_le_to_cpu_16(vnic_ids[i]);
3379 rc = bnxt_hwrm_vnic_qcfg(bp, &vnic,
3380 bp->pf.first_vf_id + vf);
3383 if (vnic.func_default) {
3385 return vnic.fw_vnic_id;
3388 /* Could not find a default VNIC. */
3389 RTE_LOG(ERR, PMD, "No default VNIC\n");
3395 int bnxt_hwrm_set_em_filter(struct bnxt *bp,
3397 struct bnxt_filter_info *filter)
3400 struct hwrm_cfa_em_flow_alloc_input req = {.req_type = 0 };
3401 struct hwrm_cfa_em_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
3402 uint32_t enables = 0;
3404 if (filter->fw_em_filter_id != UINT64_MAX)
3405 bnxt_hwrm_clear_em_filter(bp, filter);
3407 HWRM_PREP(req, CFA_EM_FLOW_ALLOC);
3409 req.flags = rte_cpu_to_le_32(filter->flags);
3411 enables = filter->enables |
3412 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_ID;
3413 req.dst_id = rte_cpu_to_le_16(dst_id);
3415 if (filter->ip_addr_type) {
3416 req.ip_addr_type = filter->ip_addr_type;
3417 enables |= HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3420 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3421 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3423 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3424 memcpy(req.src_macaddr, filter->src_macaddr,
3427 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_MACADDR)
3428 memcpy(req.dst_macaddr, filter->dst_macaddr,
3431 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_OVLAN_VID)
3432 req.ovlan_vid = filter->l2_ovlan;
3434 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IVLAN_VID)
3435 req.ivlan_vid = filter->l2_ivlan;
3437 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_ETHERTYPE)
3438 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3440 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3441 req.ip_protocol = filter->ip_protocol;
3443 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3444 req.src_ipaddr[0] = rte_cpu_to_be_32(filter->src_ipaddr[0]);
3446 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_IPADDR)
3447 req.dst_ipaddr[0] = rte_cpu_to_be_32(filter->dst_ipaddr[0]);
3449 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_SRC_PORT)
3450 req.src_port = rte_cpu_to_be_16(filter->src_port);
3452 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_DST_PORT)
3453 req.dst_port = rte_cpu_to_be_16(filter->dst_port);
3455 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3456 req.mirror_vnic_id = filter->mirror_vnic_id;
3458 req.enables = rte_cpu_to_le_32(enables);
3460 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3462 HWRM_CHECK_RESULT();
3464 filter->fw_em_filter_id = rte_le_to_cpu_64(resp->em_filter_id);
3470 int bnxt_hwrm_clear_em_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
3473 struct hwrm_cfa_em_flow_free_input req = {.req_type = 0 };
3474 struct hwrm_cfa_em_flow_free_output *resp = bp->hwrm_cmd_resp_addr;
3476 if (filter->fw_em_filter_id == UINT64_MAX)
3479 RTE_LOG(ERR, PMD, "Clear EM filter\n");
3480 HWRM_PREP(req, CFA_EM_FLOW_FREE);
3482 req.em_filter_id = rte_cpu_to_le_64(filter->fw_em_filter_id);
3484 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3486 HWRM_CHECK_RESULT();
3489 filter->fw_em_filter_id = -1;
3490 filter->fw_l2_filter_id = -1;
3495 int bnxt_hwrm_set_ntuple_filter(struct bnxt *bp,
3497 struct bnxt_filter_info *filter)
3500 struct hwrm_cfa_ntuple_filter_alloc_input req = {.req_type = 0 };
3501 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
3502 bp->hwrm_cmd_resp_addr;
3503 uint32_t enables = 0;
3505 if (filter->fw_ntuple_filter_id != UINT64_MAX)
3506 bnxt_hwrm_clear_ntuple_filter(bp, filter);
3508 HWRM_PREP(req, CFA_NTUPLE_FILTER_ALLOC);
3510 req.flags = rte_cpu_to_le_32(filter->flags);
3512 enables = filter->enables |
3513 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_ID;
3514 req.dst_id = rte_cpu_to_le_16(dst_id);
3517 if (filter->ip_addr_type) {
3518 req.ip_addr_type = filter->ip_addr_type;
3520 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IPADDR_TYPE;
3523 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID)
3524 req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id);
3526 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_MACADDR)
3527 memcpy(req.src_macaddr, filter->src_macaddr,
3530 //HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_MACADDR)
3531 //memcpy(req.dst_macaddr, filter->dst_macaddr,
3534 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_ETHERTYPE)
3535 req.ethertype = rte_cpu_to_be_16(filter->ethertype);
3537 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_IP_PROTOCOL)
3538 req.ip_protocol = filter->ip_protocol;
3540 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR)
3541 req.src_ipaddr[0] = rte_cpu_to_le_32(filter->src_ipaddr[0]);
3543 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_IPADDR_MASK)
3544 req.src_ipaddr_mask[0] =
3545 rte_cpu_to_le_32(filter->src_ipaddr_mask[0]);
3547 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR)
3548 req.dst_ipaddr[0] = rte_cpu_to_le_32(filter->dst_ipaddr[0]);
3550 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_IPADDR_MASK)
3551 req.dst_ipaddr_mask[0] =
3552 rte_cpu_to_be_32(filter->dst_ipaddr_mask[0]);
3554 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT)
3555 req.src_port = rte_cpu_to_le_16(filter->src_port);
3557 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_SRC_PORT_MASK)
3558 req.src_port_mask = rte_cpu_to_le_16(filter->src_port_mask);
3560 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT)
3561 req.dst_port = rte_cpu_to_le_16(filter->dst_port);
3563 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_DST_PORT_MASK)
3564 req.dst_port_mask = rte_cpu_to_le_16(filter->dst_port_mask);
3566 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID)
3567 req.mirror_vnic_id = filter->mirror_vnic_id;
3569 req.enables = rte_cpu_to_le_32(enables);
3571 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3573 HWRM_CHECK_RESULT();
3575 filter->fw_ntuple_filter_id = rte_le_to_cpu_64(resp->ntuple_filter_id);
3581 int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
3582 struct bnxt_filter_info *filter)
3585 struct hwrm_cfa_ntuple_filter_free_input req = {.req_type = 0 };
3586 struct hwrm_cfa_ntuple_filter_free_output *resp =
3587 bp->hwrm_cmd_resp_addr;
3589 if (filter->fw_ntuple_filter_id == UINT64_MAX)
3592 HWRM_PREP(req, CFA_NTUPLE_FILTER_FREE);
3594 req.ntuple_filter_id = rte_cpu_to_le_64(filter->fw_ntuple_filter_id);
3596 rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
3598 HWRM_CHECK_RESULT();
3601 filter->fw_ntuple_filter_id = -1;