@@ -164,7 +164,6 @@ enum dpu_enc_rc_states {
164
164
* clks and resources after IDLE_TIMEOUT time.
165
165
* @vsync_event_work: worker to handle vsync event for autorefresh
166
166
* @topology: topology of the display
167
- * @mode_set_complete: flag to indicate modeset completion
168
167
* @idle_timeout: idle timeout duration in milliseconds
169
168
*/
170
169
struct dpu_encoder_virt {
@@ -202,7 +201,6 @@ struct dpu_encoder_virt {
202
201
struct delayed_work delayed_off_work ;
203
202
struct kthread_work vsync_event_work ;
204
203
struct msm_display_topology topology ;
205
- bool mode_set_complete ;
206
204
207
205
u32 idle_timeout ;
208
206
};
@@ -563,6 +561,7 @@ static int dpu_encoder_virt_atomic_check(
563
561
const struct drm_display_mode * mode ;
564
562
struct drm_display_mode * adj_mode ;
565
563
struct msm_display_topology topology ;
564
+ struct dpu_global_state * global_state ;
566
565
int i = 0 ;
567
566
int ret = 0 ;
568
567
@@ -579,6 +578,7 @@ static int dpu_encoder_virt_atomic_check(
579
578
dpu_kms = to_dpu_kms (priv -> kms );
580
579
mode = & crtc_state -> mode ;
581
580
adj_mode = & crtc_state -> adjusted_mode ;
581
+ global_state = dpu_kms_get_existing_global_state (dpu_kms );
582
582
trace_dpu_enc_atomic_check (DRMID (drm_enc ));
583
583
584
584
/*
@@ -610,17 +610,15 @@ static int dpu_encoder_virt_atomic_check(
610
610
611
611
topology = dpu_encoder_get_topology (dpu_enc , dpu_kms , adj_mode );
612
612
613
- /* Reserve dynamic resources now. Indicating AtomicTest phase */
613
+ /* Reserve dynamic resources now. */
614
614
if (!ret ) {
615
615
/*
616
616
* Avoid reserving resources when mode set is pending. Topology
617
617
* info may not be available to complete reservation.
618
618
*/
619
- if (drm_atomic_crtc_needs_modeset (crtc_state )
620
- && dpu_enc -> mode_set_complete ) {
621
- ret = dpu_rm_reserve (& dpu_kms -> rm , drm_enc , crtc_state ,
622
- topology , true);
623
- dpu_enc -> mode_set_complete = false;
619
+ if (drm_atomic_crtc_needs_modeset (crtc_state )) {
620
+ ret = dpu_rm_reserve (& dpu_kms -> rm , global_state ,
621
+ drm_enc , crtc_state , topology );
624
622
}
625
623
}
626
624
@@ -957,12 +955,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
957
955
struct drm_connector * conn = NULL , * conn_iter ;
958
956
struct drm_crtc * drm_crtc ;
959
957
struct dpu_crtc_state * cstate ;
958
+ struct dpu_global_state * global_state ;
960
959
struct msm_display_topology topology ;
961
960
struct dpu_hw_blk * hw_pp [MAX_CHANNELS_PER_ENC ];
962
961
struct dpu_hw_blk * hw_ctl [MAX_CHANNELS_PER_ENC ];
963
962
struct dpu_hw_blk * hw_lm [MAX_CHANNELS_PER_ENC ];
964
963
int num_lm , num_ctl , num_pp ;
965
- int i , j , ret ;
964
+ int i , j ;
966
965
967
966
if (!drm_enc ) {
968
967
DPU_ERROR ("invalid encoder\n" );
@@ -976,6 +975,12 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
976
975
dpu_kms = to_dpu_kms (priv -> kms );
977
976
connector_list = & dpu_kms -> dev -> mode_config .connector_list ;
978
977
978
+ global_state = dpu_kms_get_existing_global_state (dpu_kms );
979
+ if (IS_ERR_OR_NULL (global_state )) {
980
+ DPU_ERROR ("Failed to get global state" );
981
+ return ;
982
+ }
983
+
979
984
trace_dpu_enc_mode_set (DRMID (drm_enc ));
980
985
981
986
list_for_each_entry (conn_iter , connector_list , head )
@@ -996,21 +1001,14 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
996
1001
997
1002
topology = dpu_encoder_get_topology (dpu_enc , dpu_kms , adj_mode );
998
1003
999
- /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
1000
- ret = dpu_rm_reserve (& dpu_kms -> rm , drm_enc , drm_crtc -> state ,
1001
- topology , false);
1002
- if (ret ) {
1003
- DPU_ERROR_ENC (dpu_enc ,
1004
- "failed to reserve hw resources, %d\n" , ret );
1005
- return ;
1006
- }
1007
-
1008
- num_pp = dpu_rm_get_assigned_resources (& dpu_kms -> rm , drm_enc -> base .id ,
1009
- DPU_HW_BLK_PINGPONG , hw_pp , ARRAY_SIZE (hw_pp ));
1010
- num_ctl = dpu_rm_get_assigned_resources (& dpu_kms -> rm , drm_enc -> base .id ,
1011
- DPU_HW_BLK_CTL , hw_ctl , ARRAY_SIZE (hw_ctl ));
1012
- num_lm = dpu_rm_get_assigned_resources (& dpu_kms -> rm , drm_enc -> base .id ,
1013
- DPU_HW_BLK_LM , hw_lm , ARRAY_SIZE (hw_lm ));
1004
+ /* Query resource that have been reserved in atomic check step. */
1005
+ num_pp = dpu_rm_get_assigned_resources (& dpu_kms -> rm , global_state ,
1006
+ drm_enc -> base .id , DPU_HW_BLK_PINGPONG , hw_pp ,
1007
+ ARRAY_SIZE (hw_pp ));
1008
+ num_ctl = dpu_rm_get_assigned_resources (& dpu_kms -> rm , global_state ,
1009
+ drm_enc -> base .id , DPU_HW_BLK_CTL , hw_ctl , ARRAY_SIZE (hw_ctl ));
1010
+ num_lm = dpu_rm_get_assigned_resources (& dpu_kms -> rm , global_state ,
1011
+ drm_enc -> base .id , DPU_HW_BLK_LM , hw_lm , ARRAY_SIZE (hw_lm ));
1014
1012
1015
1013
for (i = 0 ; i < MAX_CHANNELS_PER_ENC ; i ++ )
1016
1014
dpu_enc -> hw_pp [i ] = i < num_pp ? to_dpu_hw_pingpong (hw_pp [i ])
@@ -1035,21 +1033,21 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
1035
1033
if (!dpu_enc -> hw_pp [i ]) {
1036
1034
DPU_ERROR_ENC (dpu_enc ,
1037
1035
"no pp block assigned at idx: %d\n" , i );
1038
- goto error ;
1036
+ return ;
1039
1037
}
1040
1038
1041
1039
if (!hw_ctl [i ]) {
1042
1040
DPU_ERROR_ENC (dpu_enc ,
1043
1041
"no ctl block assigned at idx: %d\n" , i );
1044
- goto error ;
1042
+ return ;
1045
1043
}
1046
1044
1047
1045
phys -> hw_pp = dpu_enc -> hw_pp [i ];
1048
1046
phys -> hw_ctl = to_dpu_hw_ctl (hw_ctl [i ]);
1049
1047
1050
1048
num_blk = dpu_rm_get_assigned_resources (& dpu_kms -> rm ,
1051
- drm_enc -> base .id , DPU_HW_BLK_INTF , hw_blk ,
1052
- ARRAY_SIZE (hw_blk ));
1049
+ global_state , drm_enc -> base .id , DPU_HW_BLK_INTF ,
1050
+ hw_blk , ARRAY_SIZE (hw_blk ));
1053
1051
for (j = 0 ; j < num_blk ; j ++ ) {
1054
1052
struct dpu_hw_intf * hw_intf ;
1055
1053
@@ -1061,18 +1059,13 @@ static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
1061
1059
if (!phys -> hw_intf ) {
1062
1060
DPU_ERROR_ENC (dpu_enc ,
1063
1061
"no intf block assigned at idx: %d\n" , i );
1064
- goto error ;
1062
+ return ;
1065
1063
}
1066
1064
1067
1065
phys -> connector = conn -> state -> connector ;
1068
1066
if (phys -> ops .mode_set )
1069
1067
phys -> ops .mode_set (phys , mode , adj_mode );
1070
1068
}
1071
-
1072
- dpu_enc -> mode_set_complete = true;
1073
-
1074
- error :
1075
- dpu_rm_release (& dpu_kms -> rm , drm_enc );
1076
1069
}
1077
1070
1078
1071
static void _dpu_encoder_virt_enable_helper (struct drm_encoder * drm_enc )
@@ -1169,6 +1162,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1169
1162
struct dpu_encoder_virt * dpu_enc = NULL ;
1170
1163
struct msm_drm_private * priv ;
1171
1164
struct dpu_kms * dpu_kms ;
1165
+ struct dpu_global_state * global_state ;
1172
1166
int i = 0 ;
1173
1167
1174
1168
if (!drm_enc ) {
@@ -1187,6 +1181,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1187
1181
1188
1182
priv = drm_enc -> dev -> dev_private ;
1189
1183
dpu_kms = to_dpu_kms (priv -> kms );
1184
+ global_state = dpu_kms_get_existing_global_state (dpu_kms );
1190
1185
1191
1186
trace_dpu_enc_disable (DRMID (drm_enc ));
1192
1187
@@ -1216,7 +1211,7 @@ static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
1216
1211
1217
1212
DPU_DEBUG_ENC (dpu_enc , "encoder disabled\n" );
1218
1213
1219
- dpu_rm_release (& dpu_kms -> rm , drm_enc );
1214
+ dpu_rm_release (global_state , drm_enc );
1220
1215
1221
1216
mutex_unlock (& dpu_enc -> enc_lock );
1222
1217
}
0 commit comments