@@ -1076,27 +1076,31 @@ static int _opp_set_required_opps_genpd(struct device *dev,
1076
1076
{
1077
1077
struct device * * genpd_virt_devs =
1078
1078
opp_table -> genpd_virt_devs ? opp_table -> genpd_virt_devs : & dev ;
1079
- int i , ret = 0 ;
1079
+ int index , target , delta , ret = 0 ;
1080
+
1081
+ /* Scaling up? Set required OPPs in normal order, else reverse */
1082
+ if (!scaling_down ) {
1083
+ index = 0 ;
1084
+ target = opp_table -> required_opp_count ;
1085
+ delta = 1 ;
1086
+ } else {
1087
+ index = opp_table -> required_opp_count - 1 ;
1088
+ target = -1 ;
1089
+ delta = -1 ;
1090
+ }
1080
1091
1081
1092
/*
1082
1093
* Acquire genpd_virt_dev_lock to make sure we don't use a genpd_dev
1083
1094
* after it is freed from another thread.
1084
1095
*/
1085
1096
mutex_lock (& opp_table -> genpd_virt_dev_lock );
1086
1097
1087
- /* Scaling up? Set required OPPs in normal order, else reverse */
1088
- if (!scaling_down ) {
1089
- for (i = 0 ; i < opp_table -> required_opp_count ; i ++ ) {
1090
- ret = _set_performance_state (dev , genpd_virt_devs [i ], opp , i );
1091
- if (ret )
1092
- break ;
1093
- }
1094
- } else {
1095
- for (i = opp_table -> required_opp_count - 1 ; i >= 0 ; i -- ) {
1096
- ret = _set_performance_state (dev , genpd_virt_devs [i ], opp , i );
1097
- if (ret )
1098
- break ;
1099
- }
1098
+ while (index != target ) {
1099
+ ret = _set_performance_state (dev , genpd_virt_devs [index ], opp , index );
1100
+ if (ret )
1101
+ break ;
1102
+
1103
+ index += delta ;
1100
1104
}
1101
1105
1102
1106
mutex_unlock (& opp_table -> genpd_virt_dev_lock );
0 commit comments