@@ -684,12 +684,17 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
684
684
struct amdgpu_ring * ring = & adev -> gfx .kiq [inst ].ring ;
685
685
struct amdgpu_kiq * kiq = & adev -> gfx .kiq [inst ];
686
686
unsigned int ndw ;
687
- signed long r ;
687
+ int r ;
688
688
uint32_t seq ;
689
689
690
- if (!adev -> gmc .flush_pasid_uses_kiq || !ring -> sched .ready ||
691
- !down_read_trylock (& adev -> reset_domain -> sem )) {
690
+ /*
691
+ * A GPU reset should flush all TLBs anyway, so no need to do
692
+ * this while one is ongoing.
693
+ */
694
+ if (!down_read_trylock (& adev -> reset_domain -> sem ))
695
+ return 0 ;
692
696
697
+ if (!adev -> gmc .flush_pasid_uses_kiq || !ring -> sched .ready ) {
693
698
if (adev -> gmc .flush_tlb_needs_extra_type_2 )
694
699
adev -> gmc .gmc_funcs -> flush_gpu_tlb_pasid (adev , pasid ,
695
700
2 , all_hub ,
@@ -703,43 +708,40 @@ int amdgpu_gmc_flush_gpu_tlb_pasid(struct amdgpu_device *adev, uint16_t pasid,
703
708
adev -> gmc .gmc_funcs -> flush_gpu_tlb_pasid (adev , pasid ,
704
709
flush_type , all_hub ,
705
710
inst );
706
- return 0 ;
707
- }
711
+ r = 0 ;
712
+ } else {
713
+ /* 2 dwords flush + 8 dwords fence */
714
+ ndw = kiq -> pmf -> invalidate_tlbs_size + 8 ;
708
715
709
- /* 2 dwords flush + 8 dwords fence */
710
- ndw = kiq -> pmf -> invalidate_tlbs_size + 8 ;
716
+ if ( adev -> gmc . flush_tlb_needs_extra_type_2 )
717
+ ndw + = kiq -> pmf -> invalidate_tlbs_size ;
711
718
712
- if (adev -> gmc .flush_tlb_needs_extra_type_2 )
713
- ndw += kiq -> pmf -> invalidate_tlbs_size ;
719
+ if (adev -> gmc .flush_tlb_needs_extra_type_0 )
720
+ ndw += kiq -> pmf -> invalidate_tlbs_size ;
714
721
715
- if (adev -> gmc .flush_tlb_needs_extra_type_0 )
716
- ndw += kiq -> pmf -> invalidate_tlbs_size ;
722
+ spin_lock (& adev -> gfx .kiq [inst ].ring_lock );
723
+ amdgpu_ring_alloc (ring , ndw );
724
+ if (adev -> gmc .flush_tlb_needs_extra_type_2 )
725
+ kiq -> pmf -> kiq_invalidate_tlbs (ring , pasid , 2 , all_hub );
717
726
718
- spin_lock (& adev -> gfx .kiq [inst ].ring_lock );
719
- amdgpu_ring_alloc (ring , ndw );
720
- if (adev -> gmc .flush_tlb_needs_extra_type_2 )
721
- kiq -> pmf -> kiq_invalidate_tlbs (ring , pasid , 2 , all_hub );
727
+ if (flush_type == 2 && adev -> gmc .flush_tlb_needs_extra_type_0 )
728
+ kiq -> pmf -> kiq_invalidate_tlbs (ring , pasid , 0 , all_hub );
722
729
723
- if (flush_type == 2 && adev -> gmc .flush_tlb_needs_extra_type_0 )
724
- kiq -> pmf -> kiq_invalidate_tlbs (ring , pasid , 0 , all_hub );
730
+ kiq -> pmf -> kiq_invalidate_tlbs (ring , pasid , flush_type , all_hub );
731
+ r = amdgpu_fence_emit_polling (ring , & seq , MAX_KIQ_REG_WAIT );
732
+ if (r ) {
733
+ amdgpu_ring_undo (ring );
734
+ spin_unlock (& adev -> gfx .kiq [inst ].ring_lock );
735
+ goto error_unlock_reset ;
736
+ }
725
737
726
- kiq -> pmf -> kiq_invalidate_tlbs (ring , pasid , flush_type , all_hub );
727
- r = amdgpu_fence_emit_polling (ring , & seq , MAX_KIQ_REG_WAIT );
728
- if (r ) {
729
- amdgpu_ring_undo (ring );
738
+ amdgpu_ring_commit (ring );
730
739
spin_unlock (& adev -> gfx .kiq [inst ].ring_lock );
731
- goto error_unlock_reset ;
732
- }
733
-
734
- amdgpu_ring_commit (ring );
735
- spin_unlock (& adev -> gfx .kiq [inst ].ring_lock );
736
- r = amdgpu_fence_wait_polling (ring , seq , usec_timeout );
737
- if (r < 1 ) {
738
- dev_err (adev -> dev , "wait for kiq fence error: %ld.\n" , r );
739
- r = - ETIME ;
740
- goto error_unlock_reset ;
740
+ if (amdgpu_fence_wait_polling (ring , seq , usec_timeout ) < 1 ) {
741
+ dev_err (adev -> dev , "timeout waiting for kiq fence\n" );
742
+ r = - ETIME ;
743
+ }
741
744
}
742
- r = 0 ;
743
745
744
746
error_unlock_reset :
745
747
up_read (& adev -> reset_domain -> sem );
0 commit comments