@@ -8875,6 +8875,56 @@ static void amdgpu_dm_update_cursor(struct drm_plane *plane,
88758875 }
88768876}
88778877
8878+ static void amdgpu_dm_enable_self_refresh (struct amdgpu_crtc * acrtc_attach ,
8879+ const struct dm_crtc_state * acrtc_state ,
8880+ const u64 current_ts )
8881+ {
8882+ struct psr_settings * psr = & acrtc_state -> stream -> link -> psr_settings ;
8883+ struct replay_settings * pr = & acrtc_state -> stream -> link -> replay_settings ;
8884+ struct amdgpu_dm_connector * aconn =
8885+ (struct amdgpu_dm_connector * )acrtc_state -> stream -> dm_stream_context ;
8886+
8887+ if (acrtc_state -> update_type > UPDATE_TYPE_FAST ) {
8888+ if (pr -> config .replay_supported && !pr -> replay_feature_enabled )
8889+ amdgpu_dm_link_setup_replay (acrtc_state -> stream -> link , aconn );
8890+ else if (psr -> psr_version != DC_PSR_VERSION_UNSUPPORTED &&
8891+ !psr -> psr_feature_enabled )
8892+ if (!aconn -> disallow_edp_enter_psr )
8893+ amdgpu_dm_link_setup_psr (acrtc_state -> stream );
8894+ }
8895+
8896+ /* Decrement skip count when SR is enabled and we're doing fast updates. */
8897+ if (acrtc_state -> update_type == UPDATE_TYPE_FAST &&
8898+ (psr -> psr_feature_enabled || pr -> config .replay_supported )) {
8899+ if (aconn -> sr_skip_count > 0 )
8900+ aconn -> sr_skip_count -- ;
8901+
8902+ /* Allow SR when skip count is 0. */
8903+ acrtc_attach -> dm_irq_params .allow_sr_entry = !aconn -> sr_skip_count ;
8904+
8905+ /*
8906+ * If sink supports PSR SU/Panel Replay, there is no need to rely on
8907+ * a vblank event disable request to enable PSR/RP. PSR SU/RP
8908+ * can be enabled immediately once OS demonstrates an
8909+ * adequate number of fast atomic commits to notify KMD
8910+ * of update events. See `vblank_control_worker()`.
8911+ */
8912+ if (acrtc_attach -> dm_irq_params .allow_sr_entry &&
8913+ #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
8914+ !amdgpu_dm_crc_window_is_activated (acrtc_state -> base .crtc ) &&
8915+ #endif
8916+ (current_ts - psr -> psr_dirty_rects_change_timestamp_ns ) > 500000000 ) {
8917+ if (pr -> replay_feature_enabled && !pr -> replay_allow_active )
8918+ amdgpu_dm_replay_enable (acrtc_state -> stream , true);
8919+ if (psr -> psr_version >= DC_PSR_VERSION_SU_1 &&
8920+ !psr -> psr_allow_active && !aconn -> disallow_edp_enter_psr )
8921+ amdgpu_dm_psr_enable (acrtc_state -> stream );
8922+ }
8923+ } else {
8924+ acrtc_attach -> dm_irq_params .allow_sr_entry = false;
8925+ }
8926+ }
8927+
88788928static void amdgpu_dm_commit_planes (struct drm_atomic_state * state ,
88798929 struct drm_device * dev ,
88808930 struct amdgpu_display_manager * dm ,
@@ -9203,9 +9253,12 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
92039253 bundle -> stream_update .abm_level = & acrtc_state -> abm_level ;
92049254
92059255 mutex_lock (& dm -> dc_lock );
9206- if ((acrtc_state -> update_type > UPDATE_TYPE_FAST ) &&
9207- acrtc_state -> stream -> link -> psr_settings .psr_allow_active )
9208- amdgpu_dm_psr_disable (acrtc_state -> stream );
9256+ if (acrtc_state -> update_type > UPDATE_TYPE_FAST ) {
9257+ if (acrtc_state -> stream -> link -> replay_settings .replay_allow_active )
9258+ amdgpu_dm_replay_disable (acrtc_state -> stream );
9259+ if (acrtc_state -> stream -> link -> psr_settings .psr_allow_active )
9260+ amdgpu_dm_psr_disable (acrtc_state -> stream );
9261+ }
92099262 mutex_unlock (& dm -> dc_lock );
92109263
92119264 /*
@@ -9246,57 +9299,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
92469299 dm_update_pflip_irq_state (drm_to_adev (dev ),
92479300 acrtc_attach );
92489301
9249- if (acrtc_state -> update_type > UPDATE_TYPE_FAST ) {
9250- if (acrtc_state -> stream -> link -> replay_settings .config .replay_supported &&
9251- !acrtc_state -> stream -> link -> replay_settings .replay_feature_enabled ) {
9252- struct amdgpu_dm_connector * aconn =
9253- (struct amdgpu_dm_connector * )acrtc_state -> stream -> dm_stream_context ;
9254- amdgpu_dm_link_setup_replay (acrtc_state -> stream -> link , aconn );
9255- } else if (acrtc_state -> stream -> link -> psr_settings .psr_version != DC_PSR_VERSION_UNSUPPORTED &&
9256- !acrtc_state -> stream -> link -> psr_settings .psr_feature_enabled ) {
9257-
9258- struct amdgpu_dm_connector * aconn = (struct amdgpu_dm_connector * )
9259- acrtc_state -> stream -> dm_stream_context ;
9260-
9261- if (!aconn -> disallow_edp_enter_psr )
9262- amdgpu_dm_link_setup_psr (acrtc_state -> stream );
9263- }
9264- }
9265-
9266- /* Decrement skip count when SR is enabled and we're doing fast updates. */
9267- if (acrtc_state -> update_type == UPDATE_TYPE_FAST &&
9268- acrtc_state -> stream -> link -> psr_settings .psr_feature_enabled ) {
9269- struct amdgpu_dm_connector * aconn =
9270- (struct amdgpu_dm_connector * )acrtc_state -> stream -> dm_stream_context ;
9271-
9272- if (aconn -> sr_skip_count > 0 )
9273- aconn -> sr_skip_count -- ;
9274-
9275- /* Allow SR when skip count is 0. */
9276- acrtc_attach -> dm_irq_params .allow_sr_entry = !aconn -> sr_skip_count ;
9277-
9278- /*
9279- * If sink supports PSR SU/Panel Replay, there is no need to rely on
9280- * a vblank event disable request to enable PSR/RP. PSR SU/RP
9281- * can be enabled immediately once OS demonstrates an
9282- * adequate number of fast atomic commits to notify KMD
9283- * of update events. See `vblank_control_worker()`.
9284- */
9285- if (acrtc_state -> stream -> link -> psr_settings .psr_version >= DC_PSR_VERSION_SU_1 &&
9286- acrtc_attach -> dm_irq_params .allow_sr_entry &&
9287- #ifdef CONFIG_DRM_AMD_SECURE_DISPLAY
9288- !amdgpu_dm_crc_window_is_activated (acrtc_state -> base .crtc ) &&
9289- #endif
9290- !acrtc_state -> stream -> link -> psr_settings .psr_allow_active &&
9291- !aconn -> disallow_edp_enter_psr &&
9292- (timestamp_ns -
9293- acrtc_state -> stream -> link -> psr_settings .psr_dirty_rects_change_timestamp_ns ) >
9294- 500000000 )
9295- amdgpu_dm_psr_enable (acrtc_state -> stream );
9296- } else {
9297- acrtc_attach -> dm_irq_params .allow_sr_entry = false;
9298- }
9299-
9302+ amdgpu_dm_enable_self_refresh (acrtc_attach , acrtc_state , timestamp_ns );
93009303 mutex_unlock (& dm -> dc_lock );
93019304 }
93029305
0 commit comments