New upstream version 17.11.4
[deb_dpdk.git] / drivers / net / bnxt / bnxt_hwrm.c
index bf1fb46..db3222f 100644 (file)
@@ -197,6 +197,10 @@ err_ret:
                RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \
                        __func__, rc); \
                rte_spinlock_unlock(&bp->hwrm_lock); \
+               if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
+                       rc = -EACCES; \
+               else if (rc > 0) \
+                       rc = -EINVAL; \
                return rc; \
        } \
        if (resp->error_code) { \
@@ -218,6 +222,10 @@ err_ret:
                                "%s error %d\n", __func__, rc); \
                } \
                rte_spinlock_unlock(&bp->hwrm_lock); \
+               if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) \
+                       rc = -EACCES; \
+               else if (rc > 0) \
+                       rc = -EINVAL; \
                return rc; \
        } \
 } while (0)
@@ -252,6 +260,9 @@ int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp,
        struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr;
        uint32_t mask = 0;
 
+       if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
+               return rc;
+
        HWRM_PREP(req, CFA_L2_SET_RX_MASK);
        req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 
@@ -403,13 +414,13 @@ int bnxt_hwrm_set_l2_filter(struct bnxt *bp,
                req.l2_ovlan = filter->l2_ovlan;
        if (enables &
            HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN)
-               req.l2_ovlan = filter->l2_ivlan;
+               req.l2_ivlan = filter->l2_ivlan;
        if (enables &
            HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK)
                req.l2_ovlan_mask = filter->l2_ovlan_mask;
        if (enables &
            HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK)
-               req.l2_ovlan_mask = filter->l2_ivlan_mask;
+               req.l2_ivlan_mask = filter->l2_ivlan_mask;
        if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID)
                req.src_id = rte_cpu_to_le_32(filter->src_id);
        if (enables & HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE)
@@ -550,7 +561,7 @@ int bnxt_hwrm_func_driver_register(struct bnxt *bp)
        }
 
        req.async_event_fwd[0] |= rte_cpu_to_le_32(0x1);   /* TODO: Use MACRO */
-       memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
+       //memset(req.async_event_fwd, 0xff, sizeof(req.async_event_fwd));
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
@@ -715,34 +726,39 @@ static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf)
        struct hwrm_port_phy_cfg_input req = {0};
        struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr;
        uint32_t enables = 0;
-       uint32_t link_speed_mask =
-               HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
 
        HWRM_PREP(req, PORT_PHY_CFG);
 
        if (conf->link_up) {
+               /* Setting Fixed Speed. But AutoNeg is ON, So disable it */
+               if (bp->link_info.auto_mode && conf->link_speed) {
+                       req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE;
+                       RTE_LOG(DEBUG, PMD, "Disabling AutoNeg\n");
+               }
+
                req.flags = rte_cpu_to_le_32(conf->phy_flags);
                req.force_link_speed = rte_cpu_to_le_16(conf->link_speed);
+               enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
                /*
                 * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set
                 * any auto mode, even "none".
                 */
                if (!conf->link_speed) {
-                       req.auto_mode = conf->auto_mode;
-                       enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE;
-                       if (conf->auto_mode ==
-                           HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK) {
-                               req.auto_link_speed_mask =
-                                       conf->auto_link_speed_mask;
-                               enables |= link_speed_mask;
-                       }
-                       if (bp->link_info.auto_link_speed) {
-                               req.auto_link_speed =
-                                       bp->link_info.auto_link_speed;
-                               enables |=
-                               HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED;
-                       }
+                       /* No speeds specified. Enable AutoNeg - all speeds */
+                       req.auto_mode =
+                               HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS;
+               }
+               /* AutoNeg - Advertise speeds specified. */
+               if (conf->auto_link_speed_mask &&
+                   !(conf->phy_flags & HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE)) {
+                       req.auto_mode =
+                               HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
+                       req.auto_link_speed_mask =
+                               conf->auto_link_speed_mask;
+                       enables |=
+                       HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK;
                }
+
                req.auto_duplex = conf->duplex;
                enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX;
                req.auto_pause = conf->auto_pause;
@@ -791,16 +807,28 @@ static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp,
        link_info->auto_pause = resp->auto_pause;
        link_info->force_pause = resp->force_pause;
        link_info->auto_mode = resp->auto_mode;
+       link_info->phy_type = resp->phy_type;
+       link_info->media_type = resp->media_type;
 
        link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds);
        link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed);
        link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis);
+       link_info->force_link_speed = rte_le_to_cpu_16(resp->force_link_speed);
        link_info->phy_ver[0] = resp->phy_maj;
        link_info->phy_ver[1] = resp->phy_min;
        link_info->phy_ver[2] = resp->phy_bld;
 
        HWRM_UNLOCK();
 
+       RTE_LOG(DEBUG, PMD, "Link Speed %d\n", link_info->link_speed);
+       RTE_LOG(DEBUG, PMD, "Auto Mode %d\n", link_info->auto_mode);
+       RTE_LOG(DEBUG, PMD, "Support Speeds %x\n", link_info->support_speeds);
+       RTE_LOG(DEBUG, PMD, "Auto Link Speed %x\n", link_info->auto_link_speed);
+       RTE_LOG(DEBUG, PMD, "Auto Link Speed Mask %x\n",
+                   link_info->auto_link_speed_mask);
+       RTE_LOG(DEBUG, PMD, "Forced Link Speed %x\n",
+                   link_info->force_link_speed);
+
        return rc;
 }
 
@@ -1040,7 +1068,6 @@ int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id);
 
        HWRM_UNLOCK();
-       bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id;
 
        return rc;
 }
@@ -1073,8 +1100,9 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        /* map ring groups to this vnic */
        RTE_LOG(DEBUG, PMD, "Alloc VNIC. Start %x, End %x\n",
                vnic->start_grp_id, vnic->end_grp_id);
-       for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++)
+       for (i = vnic->start_grp_id, j = 0; i < vnic->end_grp_id; i++, j++)
                vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id;
+
        vnic->dflt_ring_grp = bp->grp_info[vnic->start_grp_id].fw_grp_id;
        vnic->rss_rule = (uint16_t)HWRM_NA_SIGNATURE;
        vnic->cos_rule = (uint16_t)HWRM_NA_SIGNATURE;
@@ -1084,7 +1112,8 @@ int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic)
        HWRM_PREP(req, VNIC_ALLOC);
 
        if (vnic->func_default)
-               req.flags = HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT;
+               req.flags =
+                       rte_cpu_to_le_32(HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
        HWRM_CHECK_RESULT();
@@ -1105,7 +1134,7 @@ static int bnxt_hwrm_vnic_plcmodes_qcfg(struct bnxt *bp,
 
        HWRM_PREP(req, VNIC_PLCMODES_QCFG);
 
-       req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+       req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
@@ -1133,7 +1162,7 @@ static int bnxt_hwrm_vnic_plcmodes_cfg(struct bnxt *bp,
 
        HWRM_PREP(req, VNIC_PLCMODES_CFG);
 
-       req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+       req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
        req.flags = rte_cpu_to_le_32(pmode->flags);
        req.jumbo_thresh = rte_cpu_to_le_16(pmode->jumbo_thresh);
        req.hds_offset = rte_cpu_to_le_16(pmode->hds_offset);
@@ -1365,6 +1394,11 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
        struct hwrm_vnic_plcmodes_cfg_output *resp = bp->hwrm_cmd_resp_addr;
        uint16_t size;
 
+       if (vnic->fw_vnic_id == INVALID_HW_RING_ID) {
+               RTE_LOG(DEBUG, PMD, "VNIC ID %x\n", vnic->fw_vnic_id);
+               return rc;
+       }
+
        HWRM_PREP(req, VNIC_PLCMODES_CFG);
 
        req.flags = rte_cpu_to_le_32(
@@ -1377,7 +1411,7 @@ int bnxt_hwrm_vnic_plcmode_cfg(struct bnxt *bp,
        size -= RTE_PKTMBUF_HEADROOM;
 
        req.jumbo_thresh = rte_cpu_to_le_16(size);
-       req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
+       req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
@@ -1408,12 +1442,12 @@ int bnxt_hwrm_vnic_tpa_cfg(struct bnxt *bp,
                                HWRM_VNIC_TPA_CFG_INPUT_FLAGS_GRO |
                                HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_ECN |
                        HWRM_VNIC_TPA_CFG_INPUT_FLAGS_AGG_WITH_SAME_GRE_SEQ);
-               req.vnic_id = rte_cpu_to_le_32(vnic->fw_vnic_id);
                req.max_agg_segs = rte_cpu_to_le_16(5);
                req.max_aggs =
                        rte_cpu_to_le_16(HWRM_VNIC_TPA_CFG_INPUT_MAX_AGGS_MAX);
                req.min_agg_len = rte_cpu_to_le_32(512);
        }
+       req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
@@ -1563,19 +1597,15 @@ int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp)
 
        for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) {
 
-               if (i >= bp->rx_cp_nr_rings)
+               if (i >= bp->rx_cp_nr_rings) {
                        cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring;
-               else
+               } else {
                        cpr = bp->rx_queues[i]->cp_ring;
+                       bp->grp_info[i].fw_stats_ctx = -1;
+               }
                if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) {
                        rc = bnxt_hwrm_stat_ctx_free(bp, cpr, i);
                        cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE;
-                       /*
-                        * TODO. Need a better way to reset grp_info.stats_ctx
-                        * for Rx rings only. stats_ctx is not saved for Tx
-                        * in grp_info.
-                        */
-                       bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
                        if (rc)
                                return rc;
                }
@@ -1635,7 +1665,6 @@ static void bnxt_free_cp_ring(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
        bnxt_hwrm_ring_free(bp, cp_ring,
                        HWRM_RING_FREE_INPUT_RING_TYPE_L2_CMPL);
        cp_ring->fw_ring_id = INVALID_HW_RING_ID;
-       bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID;
        memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size *
                        sizeof(*cpr->cp_desc_ring));
        cpr->cp_raw_cons = 0;
@@ -1691,10 +1720,17 @@ int bnxt_free_all_hwrm_rings(struct bnxt *bp)
                                        rxr->rx_ring_struct->ring_size *
                                        sizeof(*rxr->rx_buf_ring));
                        rxr->rx_prod = 0;
+               }
+               ring = rxr->ag_ring_struct;
+               if (ring->fw_ring_id != INVALID_HW_RING_ID) {
+                       bnxt_hwrm_ring_free(bp, ring,
+                                           HWRM_RING_FREE_INPUT_RING_TYPE_RX);
+                       ring->fw_ring_id = INVALID_HW_RING_ID;
                        memset(rxr->ag_buf_ring, 0,
-                                       rxr->ag_ring_struct->ring_size *
-                                       sizeof(*rxr->ag_buf_ring));
+                              rxr->ag_ring_struct->ring_size *
+                              sizeof(*rxr->ag_buf_ring));
                        rxr->ag_prod = 0;
+                       bp->grp_info[i].ag_fw_ring_id = INVALID_HW_RING_ID;
                }
                if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) {
                        bnxt_free_cp_ring(bp, cpr, idx);
@@ -1776,6 +1812,7 @@ int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic)
                        rc = bnxt_hwrm_clear_ntuple_filter(bp, filter);
                else
                        rc = bnxt_hwrm_clear_l2_filter(bp, filter);
+               STAILQ_REMOVE(&vnic->filter, filter, bnxt_filter_info, next);
                //if (rc)
                        //break;
        }
@@ -1863,6 +1900,8 @@ void bnxt_free_all_hwrm_resources(struct bnxt *bp)
                bnxt_hwrm_vnic_tpa_cfg(bp, vnic, false);
 
                bnxt_hwrm_vnic_free(bp, vnic);
+
+               rte_free(vnic->fw_grp_ids);
        }
        /* Ring resources */
        bnxt_free_all_hwrm_rings(bp);
@@ -1886,6 +1925,11 @@ static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed)
        return hw_link_duplex;
 }
 
+static uint16_t bnxt_check_eth_link_autoneg(uint32_t conf_link)
+{
+       return (conf_link & ETH_LINK_SPEED_FIXED) ? 0 : 1;
+}
+
 static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed)
 {
        uint16_t eth_link_speed = 0;
@@ -2094,7 +2138,7 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
        int rc = 0;
        struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
        struct bnxt_link_info link_req;
-       uint16_t speed;
+       uint16_t speed, autoneg;
 
        if (BNXT_NPAR_PF(bp) || BNXT_VF(bp))
                return 0;
@@ -2109,20 +2153,36 @@ int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up)
        if (!link_up)
                goto port_phy_cfg;
 
+       autoneg = bnxt_check_eth_link_autoneg(dev_conf->link_speeds);
        speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds);
        link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY;
-       if (speed == 0) {
+       /* Autoneg can be done only when the FW allows */
+       if (autoneg == 1 && !(bp->link_info.auto_link_speed ||
+                               bp->link_info.force_link_speed)) {
                link_req.phy_flags |=
                                HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG;
-               link_req.auto_mode =
-                               HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK;
                link_req.auto_link_speed_mask =
                        bnxt_parse_eth_link_speed_mask(bp,
                                                       dev_conf->link_speeds);
        } else {
+               if (bp->link_info.phy_type ==
+                   HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET ||
+                   bp->link_info.phy_type ==
+                   HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE ||
+                   bp->link_info.media_type ==
+                   HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP) {
+                       RTE_LOG(ERR, PMD, "10GBase-T devices must autoneg\n");
+                       return -EINVAL;
+               }
+
                link_req.phy_flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE;
-               link_req.link_speed = speed;
-               RTE_LOG(INFO, PMD, "Set Link Speed %x\n", speed);
+               /* If user wants a particular speed try that first. */
+               if (speed)
+                       link_req.link_speed = speed;
+               else if (bp->link_info.force_link_speed)
+                       link_req.link_speed = bp->link_info.force_link_speed;
+               else
+                       link_req.link_speed = bp->link_info.auto_link_speed;
        }
        link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds);
        link_req.auto_pause = bp->link_info.auto_pause;
@@ -3054,13 +3114,12 @@ int bnxt_get_nvram_directory(struct bnxt *bp, uint32_t len, uint8_t *data)
        req.host_dest_addr = rte_cpu_to_le_64(dma_handle);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
-       HWRM_CHECK_RESULT();
-       HWRM_UNLOCK();
-
        if (rc == 0)
                memcpy(data, buf, len > buflen ? buflen : len);
 
        rte_free(buf);
+       HWRM_CHECK_RESULT();
+       HWRM_UNLOCK();
 
        return rc;
 }
@@ -3092,12 +3151,13 @@ int bnxt_hwrm_get_nvram_item(struct bnxt *bp, uint32_t index,
        req.offset = rte_cpu_to_le_32(offset);
        req.len = rte_cpu_to_le_32(length);
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
-       HWRM_CHECK_RESULT();
-       HWRM_UNLOCK();
        if (rc == 0)
                memcpy(data, buf, length);
 
        rte_free(buf);
+       HWRM_CHECK_RESULT();
+       HWRM_UNLOCK();
+
        return rc;
 }
 
@@ -3128,14 +3188,6 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
        rte_iova_t dma_handle;
        uint8_t *buf;
 
-       HWRM_PREP(req, NVM_WRITE);
-
-       req.dir_type = rte_cpu_to_le_16(dir_type);
-       req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
-       req.dir_ext = rte_cpu_to_le_16(dir_ext);
-       req.dir_attr = rte_cpu_to_le_16(dir_attr);
-       req.dir_data_length = rte_cpu_to_le_32(data_len);
-
        buf = rte_malloc("nvm_write", data_len, 0);
        rte_mem_lock_page(buf);
        if (!buf)
@@ -3148,14 +3200,22 @@ int bnxt_hwrm_flash_nvram(struct bnxt *bp, uint16_t dir_type,
                return -ENOMEM;
        }
        memcpy(buf, data, data_len);
+
+       HWRM_PREP(req, NVM_WRITE);
+
+       req.dir_type = rte_cpu_to_le_16(dir_type);
+       req.dir_ordinal = rte_cpu_to_le_16(dir_ordinal);
+       req.dir_ext = rte_cpu_to_le_16(dir_ext);
+       req.dir_attr = rte_cpu_to_le_16(dir_attr);
+       req.dir_data_length = rte_cpu_to_le_32(data_len);
        req.host_src_addr = rte_cpu_to_le_64(dma_handle);
 
        rc = bnxt_hwrm_send_message(bp, &req, sizeof(req));
 
+       rte_free(buf);
        HWRM_CHECK_RESULT();
        HWRM_UNLOCK();
 
-       rte_free(buf);
        return rc;
 }
 
@@ -3556,7 +3616,6 @@ int bnxt_hwrm_clear_ntuple_filter(struct bnxt *bp,
        HWRM_UNLOCK();
 
        filter->fw_ntuple_filter_id = -1;
-       filter->fw_l2_filter_id = -1;
 
        return 0;
 }