@@ -110,6 +110,7 @@ static int vcn_v3_0_set_pg_state(struct amdgpu_vcn_inst *vinst,
110
110
enum amd_powergating_state state );
111
111
static int vcn_v3_0_pause_dpg_mode (struct amdgpu_vcn_inst * vinst ,
112
112
struct dpg_pause_state * new_state );
113
+ static int vcn_v3_0_reset (struct amdgpu_vcn_inst * vinst );
113
114
114
115
static void vcn_v3_0_dec_ring_set_wptr (struct amdgpu_ring * ring );
115
116
static void vcn_v3_0_enc_ring_set_wptr (struct amdgpu_ring * ring );
@@ -289,8 +290,14 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
289
290
290
291
if (adev -> pg_flags & AMD_PG_SUPPORT_VCN_DPG )
291
292
adev -> vcn .inst [i ].pause_dpg_mode = vcn_v3_0_pause_dpg_mode ;
293
+ adev -> vcn .inst [i ].reset = vcn_v3_0_reset ;
292
294
}
293
295
296
+ adev -> vcn .supported_reset =
297
+ amdgpu_get_soft_full_reset_mask (& adev -> vcn .inst [0 ].ring_enc [0 ]);
298
+ if (!amdgpu_sriov_vf (adev ))
299
+ adev -> vcn .supported_reset |= AMDGPU_RESET_TYPE_PER_QUEUE ;
300
+
294
301
if (amdgpu_sriov_vf (adev )) {
295
302
r = amdgpu_virt_alloc_mm_table (adev );
296
303
if (r )
@@ -306,6 +313,10 @@ static int vcn_v3_0_sw_init(struct amdgpu_ip_block *ip_block)
306
313
adev -> vcn .ip_dump = ptr ;
307
314
}
308
315
316
+ r = amdgpu_vcn_sysfs_reset_mask_init (adev );
317
+ if (r )
318
+ return r ;
319
+
309
320
return 0 ;
310
321
}
311
322
@@ -338,6 +349,8 @@ static int vcn_v3_0_sw_fini(struct amdgpu_ip_block *ip_block)
338
349
if (amdgpu_sriov_vf (adev ))
339
350
amdgpu_virt_free_mm_table (adev );
340
351
352
+ amdgpu_vcn_sysfs_reset_mask_fini (adev );
353
+
341
354
for (i = 0 ; i < adev -> vcn .num_vcn_inst ; i ++ ) {
342
355
r = amdgpu_vcn_suspend (adev , i );
343
356
if (r )
@@ -2033,6 +2046,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_ring_vm_funcs = {
2033
2046
.emit_wreg = vcn_v2_0_dec_ring_emit_wreg ,
2034
2047
.emit_reg_wait = vcn_v2_0_dec_ring_emit_reg_wait ,
2035
2048
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper ,
2049
+ .reset = amdgpu_vcn_ring_reset ,
2036
2050
};
2037
2051
2038
2052
/**
@@ -2131,6 +2145,7 @@ static const struct amdgpu_ring_funcs vcn_v3_0_enc_ring_vm_funcs = {
2131
2145
.emit_wreg = vcn_v2_0_enc_ring_emit_wreg ,
2132
2146
.emit_reg_wait = vcn_v2_0_enc_ring_emit_reg_wait ,
2133
2147
.emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper ,
2148
+ .reset = amdgpu_vcn_ring_reset ,
2134
2149
};
2135
2150
2136
2151
static void vcn_v3_0_set_dec_ring_funcs (struct amdgpu_device * adev )
@@ -2164,6 +2179,18 @@ static void vcn_v3_0_set_enc_ring_funcs(struct amdgpu_device *adev)
2164
2179
}
2165
2180
}
2166
2181
2182
+ static int vcn_v3_0_reset (struct amdgpu_vcn_inst * vinst )
2183
+ {
2184
+ int r ;
2185
+
2186
+ r = vcn_v3_0_stop (vinst );
2187
+ if (r )
2188
+ return r ;
2189
+ vcn_v3_0_enable_clock_gating (vinst );
2190
+ vcn_v3_0_enable_static_power_gating (vinst );
2191
+ return vcn_v3_0_start (vinst );
2192
+ }
2193
+
2167
2194
static bool vcn_v3_0_is_idle (struct amdgpu_ip_block * ip_block )
2168
2195
{
2169
2196
struct amdgpu_device * adev = ip_block -> adev ;
0 commit comments