@@ -4694,21 +4694,6 @@ static void gfx_v12_0_ring_emit_reg_write_reg_wait(struct amdgpu_ring *ring,
4694
4694
ref , mask , 0x20 );
4695
4695
}
4696
4696
4697
- static void gfx_v12_0_ring_soft_recovery (struct amdgpu_ring * ring ,
4698
- unsigned vmid )
4699
- {
4700
- struct amdgpu_device * adev = ring -> adev ;
4701
- uint32_t value = 0 ;
4702
-
4703
- value = REG_SET_FIELD (value , SQ_CMD , CMD , 0x03 );
4704
- value = REG_SET_FIELD (value , SQ_CMD , MODE , 0x01 );
4705
- value = REG_SET_FIELD (value , SQ_CMD , CHECK_VMID , 1 );
4706
- value = REG_SET_FIELD (value , SQ_CMD , VM_ID , vmid );
4707
- amdgpu_gfx_rlc_enter_safe_mode (adev , 0 );
4708
- WREG32_SOC15 (GC , 0 , regSQ_CMD , value );
4709
- amdgpu_gfx_rlc_exit_safe_mode (adev , 0 );
4710
- }
4711
-
4712
4697
static void
4713
4698
gfx_v12_0_set_gfx_eop_interrupt_state (struct amdgpu_device * adev ,
4714
4699
uint32_t me , uint32_t pipe ,
@@ -5321,7 +5306,7 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
5321
5306
if (!(adev -> gfx .gfx_supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE ))
5322
5307
return - EOPNOTSUPP ;
5323
5308
5324
- drm_sched_wqueue_stop ( & ring -> sched );
5309
+ amdgpu_ring_reset_helper_begin ( ring , timedout_fence );
5325
5310
5326
5311
r = amdgpu_mes_reset_legacy_queue (ring -> adev , ring , vmid , false);
5327
5312
if (r ) {
@@ -5343,12 +5328,7 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring,
5343
5328
return r ;
5344
5329
}
5345
5330
5346
- r = amdgpu_ring_test_ring (ring );
5347
- if (r )
5348
- return r ;
5349
- amdgpu_fence_driver_force_completion (ring );
5350
- drm_sched_wqueue_start (& ring -> sched );
5351
- return 0 ;
5331
+ return amdgpu_ring_reset_helper_end (ring , timedout_fence );
5352
5332
}
5353
5333
5354
5334
static int gfx_v12_0_reset_compute_pipe (struct amdgpu_ring * ring )
@@ -5444,7 +5424,7 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring,
5444
5424
if (!(adev -> gfx .compute_supported_reset & AMDGPU_RESET_TYPE_PER_QUEUE ))
5445
5425
return - EOPNOTSUPP ;
5446
5426
5447
- drm_sched_wqueue_stop ( & ring -> sched );
5427
+ amdgpu_ring_reset_helper_begin ( ring , timedout_fence );
5448
5428
5449
5429
r = amdgpu_mes_reset_legacy_queue (ring -> adev , ring , vmid , true);
5450
5430
if (r ) {
@@ -5465,12 +5445,7 @@ static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring,
5465
5445
return r ;
5466
5446
}
5467
5447
5468
- r = amdgpu_ring_test_ring (ring );
5469
- if (r )
5470
- return r ;
5471
- amdgpu_fence_driver_force_completion (ring );
5472
- drm_sched_wqueue_start (& ring -> sched );
5473
- return 0 ;
5448
+ return amdgpu_ring_reset_helper_end (ring , timedout_fence );
5474
5449
}
5475
5450
5476
5451
static void gfx_v12_0_ring_begin_use (struct amdgpu_ring * ring )
@@ -5548,7 +5523,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
5548
5523
.emit_wreg = gfx_v12_0_ring_emit_wreg ,
5549
5524
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait ,
5550
5525
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait ,
5551
- .soft_recovery = gfx_v12_0_ring_soft_recovery ,
5552
5526
.emit_mem_sync = gfx_v12_0_emit_mem_sync ,
5553
5527
.reset = gfx_v12_0_reset_kgq ,
5554
5528
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader ,
@@ -5587,7 +5561,6 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
5587
5561
.emit_wreg = gfx_v12_0_ring_emit_wreg ,
5588
5562
.emit_reg_wait = gfx_v12_0_ring_emit_reg_wait ,
5589
5563
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait ,
5590
- .soft_recovery = gfx_v12_0_ring_soft_recovery ,
5591
5564
.emit_mem_sync = gfx_v12_0_emit_mem_sync ,
5592
5565
.reset = gfx_v12_0_reset_kcq ,
5593
5566
.emit_cleaner_shader = gfx_v12_0_ring_emit_cleaner_shader ,
0 commit comments