40
40
static void hdp_v4_0_flush_hdp (struct amdgpu_device * adev ,
41
41
struct amdgpu_ring * ring )
42
42
{
43
- if (!ring || !ring -> funcs -> emit_wreg )
43
+ if (!ring || !ring -> funcs -> emit_wreg ) {
44
44
WREG32 ((adev -> rmmio_remap .reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL ) >> 2 , 0 );
45
- else
45
+ RREG32 ((adev -> rmmio_remap .reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL ) >> 2 );
46
+ } else {
46
47
amdgpu_ring_emit_wreg (ring , (adev -> rmmio_remap .reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL ) >> 2 , 0 );
48
+ }
47
49
}
48
50
49
51
static void hdp_v4_0_invalidate_hdp (struct amdgpu_device * adev ,
@@ -54,11 +56,13 @@ static void hdp_v4_0_invalidate_hdp(struct amdgpu_device *adev,
54
56
amdgpu_ip_version (adev , HDP_HWIP , 0 ) == IP_VERSION (4 , 4 , 5 ))
55
57
return ;
56
58
57
- if (!ring || !ring -> funcs -> emit_wreg )
59
+ if (!ring || !ring -> funcs -> emit_wreg ) {
58
60
WREG32_SOC15_NO_KIQ (HDP , 0 , mmHDP_READ_CACHE_INVALIDATE , 1 );
59
- else
61
+ RREG32_SOC15_NO_KIQ (HDP , 0 , mmHDP_READ_CACHE_INVALIDATE );
62
+ } else {
60
63
amdgpu_ring_emit_wreg (ring , SOC15_REG_OFFSET (
61
64
HDP , 0 , mmHDP_READ_CACHE_INVALIDATE ), 1 );
65
+ }
62
66
}
63
67
64
68
static void hdp_v4_0_query_ras_error_count (struct amdgpu_device * adev ,
0 commit comments