@@ -907,13 +907,13 @@ static void sdma_v4_0_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se
907
907
908
908
909
909
/**
910
- * sdma_v4_0_gfx_stop - stop the gfx async dma engines
910
+ * sdma_v4_0_gfx_enable - enable the gfx async dma engines
911
911
*
912
912
* @adev: amdgpu_device pointer
913
- *
914
- * Stop the gfx async dma ring buffers (VEGA10).
913
+ * @enable: enable SDMA RB/IB
914
+ * control the gfx async dma ring buffers (VEGA10).
915
915
*/
916
- static void sdma_v4_0_gfx_stop (struct amdgpu_device * adev )
916
+ static void sdma_v4_0_gfx_enable (struct amdgpu_device * adev , bool enable )
917
917
{
918
918
u32 rb_cntl , ib_cntl ;
919
919
int i ;
@@ -922,10 +922,10 @@ static void sdma_v4_0_gfx_stop(struct amdgpu_device *adev)
922
922
923
923
for (i = 0 ; i < adev -> sdma .num_instances ; i ++ ) {
924
924
rb_cntl = RREG32_SDMA (i , mmSDMA0_GFX_RB_CNTL );
925
- rb_cntl = REG_SET_FIELD (rb_cntl , SDMA0_GFX_RB_CNTL , RB_ENABLE , 0 );
925
+ rb_cntl = REG_SET_FIELD (rb_cntl , SDMA0_GFX_RB_CNTL , RB_ENABLE , enable ? 1 : 0 );
926
926
WREG32_SDMA (i , mmSDMA0_GFX_RB_CNTL , rb_cntl );
927
927
ib_cntl = RREG32_SDMA (i , mmSDMA0_GFX_IB_CNTL );
928
- ib_cntl = REG_SET_FIELD (ib_cntl , SDMA0_GFX_IB_CNTL , IB_ENABLE , 0 );
928
+ ib_cntl = REG_SET_FIELD (ib_cntl , SDMA0_GFX_IB_CNTL , IB_ENABLE , enable ? 1 : 0 );
929
929
WREG32_SDMA (i , mmSDMA0_GFX_IB_CNTL , ib_cntl );
930
930
}
931
931
}
@@ -1044,7 +1044,7 @@ static void sdma_v4_0_enable(struct amdgpu_device *adev, bool enable)
1044
1044
int i ;
1045
1045
1046
1046
if (!enable ) {
1047
- sdma_v4_0_gfx_stop (adev );
1047
+ sdma_v4_0_gfx_enable (adev , enable );
1048
1048
sdma_v4_0_rlc_stop (adev );
1049
1049
if (adev -> sdma .has_page_queue )
1050
1050
sdma_v4_0_page_stop (adev );
@@ -1960,8 +1960,10 @@ static int sdma_v4_0_suspend(void *handle)
1960
1960
struct amdgpu_device * adev = (struct amdgpu_device * )handle ;
1961
1961
1962
1962
/* SMU saves SDMA state for us */
1963
- if (adev -> in_s0ix )
1963
+ if (adev -> in_s0ix ) {
1964
+ sdma_v4_0_gfx_enable (adev , false);
1964
1965
return 0 ;
1966
+ }
1965
1967
1966
1968
return sdma_v4_0_hw_fini (adev );
1967
1969
}
@@ -1971,8 +1973,12 @@ static int sdma_v4_0_resume(void *handle)
1971
1973
struct amdgpu_device * adev = (struct amdgpu_device * )handle ;
1972
1974
1973
1975
/* SMU restores SDMA state for us */
1974
- if (adev -> in_s0ix )
1976
+ if (adev -> in_s0ix ) {
1977
+ sdma_v4_0_enable (adev , true);
1978
+ sdma_v4_0_gfx_enable (adev , true);
1979
+ amdgpu_ttm_set_buffer_funcs_status (adev , true);
1975
1980
return 0 ;
1981
+ }
1976
1982
1977
1983
return sdma_v4_0_hw_init (adev );
1978
1984
}
0 commit comments