@@ -6056,6 +6056,14 @@ void ufshcd_schedule_eh_work(struct ufs_hba *hba)
6056
6056
}
6057
6057
}
6058
6058
6059
+ static void ufshcd_force_error_recovery (struct ufs_hba * hba )
6060
+ {
6061
+ spin_lock_irq (hba -> host -> host_lock );
6062
+ hba -> force_reset = true;
6063
+ ufshcd_schedule_eh_work (hba );
6064
+ spin_unlock_irq (hba -> host -> host_lock );
6065
+ }
6066
+
6059
6067
static void ufshcd_clk_scaling_allow (struct ufs_hba * hba , bool allow )
6060
6068
{
6061
6069
down_write (& hba -> clk_scaling_lock );
@@ -9083,6 +9091,15 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9083
9091
9084
9092
if (!hba -> dev_info .b_rpm_dev_flush_capable ) {
9085
9093
ret = ufshcd_set_dev_pwr_mode (hba , req_dev_pwr_mode );
9094
+ if (ret && pm_op != UFS_SHUTDOWN_PM ) {
9095
+ /*
9096
+ * If return err in suspend flow, IO will hang.
9097
+ * Trigger error handler and break suspend for
9098
+ * error recovery.
9099
+ */
9100
+ ufshcd_force_error_recovery (hba );
9101
+ ret = - EBUSY ;
9102
+ }
9086
9103
if (ret )
9087
9104
goto enable_scaling ;
9088
9105
}
@@ -9094,6 +9111,15 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
9094
9111
*/
9095
9112
check_for_bkops = !ufshcd_is_ufs_dev_deepsleep (hba );
9096
9113
ret = ufshcd_link_state_transition (hba , req_link_state , check_for_bkops );
9114
+ if (ret && pm_op != UFS_SHUTDOWN_PM ) {
9115
+ /*
9116
+ * If return err in suspend flow, IO will hang.
9117
+ * Trigger error handler and break suspend for
9118
+ * error recovery.
9119
+ */
9120
+ ufshcd_force_error_recovery (hba );
9121
+ ret = - EBUSY ;
9122
+ }
9097
9123
if (ret )
9098
9124
goto set_dev_active ;
9099
9125
0 commit comments