New upstream version 16.11.5
[deb_dpdk.git] / drivers / net / i40e / base / i40e_adminq.c
index 222add4..e231582 100644 (file)
@@ -37,18 +37,6 @@ POSSIBILITY OF SUCH DAMAGE.
 #include "i40e_adminq.h"
 #include "i40e_prototype.h"
 
-#ifdef PF_DRIVER
-/**
- * i40e_is_nvm_update_op - return true if this is an NVM update operation
- * @desc: API request descriptor
- **/
-STATIC INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
-{
-       return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) ||
-               desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
-}
-
-#endif /* PF_DRIVER */
 /**
  *  i40e_adminq_init_regs - Initialize AdminQ registers
  *  @hw: pointer to the hardware structure
@@ -584,6 +572,26 @@ shutdown_arq_out:
        i40e_release_spinlock(&hw->aq.arq_spinlock);
        return ret_code;
 }
+#ifdef PF_DRIVER
+
+/**
+ *  i40e_resume_aq - resume AQ processing from 0
+ *  @hw: pointer to the hardware structure
+ **/
+STATIC void i40e_resume_aq(struct i40e_hw *hw)
+{
+       /* Registers are reset after PF reset */
+       hw->aq.asq.next_to_use = 0;
+       hw->aq.asq.next_to_clean = 0;
+
+       i40e_config_asq_regs(hw);
+
+       hw->aq.arq.next_to_use = 0;
+       hw->aq.arq.next_to_clean = 0;
+
+       i40e_config_arq_regs(hw);
+}
+#endif /* PF_DRIVER */
 
 /**
  *  i40e_init_adminq - main initialization routine for Admin Queue
@@ -598,12 +606,15 @@ shutdown_arq_out:
  **/
 enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
 {
-       enum i40e_status_code ret_code;
 #ifdef PF_DRIVER
-       u16 eetrack_lo, eetrack_hi;
        u16 cfg_ptr, oem_hi, oem_lo;
+       u16 eetrack_lo, eetrack_hi;
+#endif
+       enum i40e_status_code ret_code;
+#ifdef PF_DRIVER
        int retry = 0;
 #endif
+
        /* verify input for valid configuration */
        if ((hw->aq.num_arq_entries == 0) ||
            (hw->aq.num_asq_entries == 0) ||
@@ -612,8 +623,6 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
                ret_code = I40E_ERR_CONFIG;
                goto init_adminq_exit;
        }
-
-       /* initialize spin locks */
        i40e_init_spinlock(&hw->aq.asq_spinlock);
        i40e_init_spinlock(&hw->aq.arq_spinlock);
 
@@ -673,6 +682,12 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
                           &oem_lo);
        hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
 
+       /* Newer versions of firmware require lock when reading the NVM */
+       if ((hw->aq.api_maj_ver > 1) ||
+           ((hw->aq.api_maj_ver == 1) &&
+            (hw->aq.api_min_ver >= 5)))
+               hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
        if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
                ret_code = I40E_ERR_FIRMWARE_API_VERSION;
                goto init_adminq_free_arq;
@@ -680,13 +695,9 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
 
        /* pre-emptive resource lock release */
        i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
-       hw->aq.nvm_release_on_done = false;
+       hw->nvm_release_on_done = false;
        hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
 
-       ret_code = i40e_aq_set_hmc_resource_profile(hw,
-                                                   I40E_HMC_PROFILE_DEFAULT,
-                                                   0,
-                                                   NULL);
 #endif /* PF_DRIVER */
        ret_code = I40E_SUCCESS;
 
@@ -720,8 +731,6 @@ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw)
 
        i40e_shutdown_asq(hw);
        i40e_shutdown_arq(hw);
-
-       /* destroy the spinlocks */
        i40e_destroy_spinlock(&hw->aq.asq_spinlock);
        i40e_destroy_spinlock(&hw->aq.arq_spinlock);
 
@@ -747,7 +756,6 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
 
        desc = I40E_ADMINQ_DESC(*asq, ntc);
        details = I40E_ADMINQ_DETAILS(*asq, ntc);
-
        while (rd32(hw, hw->aq.asq.head) != ntc) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
                           "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
@@ -780,7 +788,11 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
  *  Returns true if the firmware has processed all descriptors on the
  *  admin send queue. Returns false if there are still requests pending.
  **/
+#ifdef VF_DRIVER
 bool i40e_asq_done(struct i40e_hw *hw)
+#else
+STATIC bool i40e_asq_done(struct i40e_hw *hw)
+#endif
 {
        /* AQ designers suggest use of head for better
         * timing reliability than DD bit
@@ -938,7 +950,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
                         */
                        if (i40e_asq_done(hw))
                                break;
-                       /* ugh! delay while spin_lock */
                        i40e_msec_delay(1);
                        total_delay++;
                } while (total_delay < hw->aq.asq_cmd_timeout);
@@ -1046,22 +1057,19 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
        }
 
        /* set next_to_use to head */
-#ifdef PF_DRIVER
 #ifdef INTEGRATED_VF
        if (!i40e_is_vf(hw))
-               ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
+               ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
+       else
+               ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
 #else
-       ntu = (rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK);
-#endif /* INTEGRATED_VF */
+#ifdef PF_DRIVER
+       ntu = rd32(hw, hw->aq.arq.head) & I40E_PF_ARQH_ARQH_MASK;
 #endif /* PF_DRIVER */
 #ifdef VF_DRIVER
-#ifdef INTEGRATED_VF
-       if (i40e_is_vf(hw))
-               ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
-#else
-       ntu = (rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK);
-#endif /* INTEGRATED_VF */
+       ntu = rd32(hw, hw->aq.arq.head) & I40E_VF_ARQH1_ARQH_MASK;
 #endif /* VF_DRIVER */
+#endif /* INTEGRATED_VF */
        if (ntu == ntc) {
                /* nothing to do - shouldn't need to update ring's values */
                ret_code = I40E_ERR_ADMIN_QUEUE_NO_WORK;
@@ -1120,27 +1128,8 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
        hw->aq.arq.next_to_use = ntu;
 
 #ifdef PF_DRIVER
-       if (i40e_is_nvm_update_op(&e->desc)) {
-               if (hw->aq.nvm_release_on_done) {
-                       i40e_release_nvm(hw);
-                       hw->aq.nvm_release_on_done = false;
-               }
-
-               switch (hw->nvmupd_state) {
-               case I40E_NVMUPD_STATE_INIT_WAIT:
-                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
-                       break;
-
-               case I40E_NVMUPD_STATE_WRITE_WAIT:
-                       hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
-                       break;
-
-               default:
-                       break;
-               }
-       }
-
-#endif
+       i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
+#endif /* PF_DRIVER */
 clean_arq_element_out:
        /* Set pending if needed, unlock and return */
        if (pending != NULL)
@@ -1151,16 +1140,3 @@ clean_arq_element_err:
        return ret_code;
 }
 
-void i40e_resume_aq(struct i40e_hw *hw)
-{
-       /* Registers are reset after PF reset */
-       hw->aq.asq.next_to_use = 0;
-       hw->aq.asq.next_to_clean = 0;
-
-       i40e_config_asq_regs(hw);
-
-       hw->aq.arq.next_to_use = 0;
-       hw->aq.arq.next_to_clean = 0;
-
-       i40e_config_arq_regs(hw);
-}