@@ -1957,10 +1957,16 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
1957
1957
{
1958
1958
struct kfd_process_device * pdd ;
1959
1959
struct kfd_process * p ;
1960
+ int drain ;
1960
1961
uint32_t i ;
1961
1962
1962
1963
p = container_of (svms , struct kfd_process , svms );
1963
1964
1965
+ restart :
1966
+ drain = atomic_read (& svms -> drain_pagefaults );
1967
+ if (!drain )
1968
+ return ;
1969
+
1964
1970
for_each_set_bit (i , svms -> bitmap_supported , p -> n_pdds ) {
1965
1971
pdd = p -> pdds [i ];
1966
1972
if (!pdd )
@@ -1972,6 +1978,8 @@ static void svm_range_drain_retry_fault(struct svm_range_list *svms)
1972
1978
& pdd -> dev -> adev -> irq .ih1 );
1973
1979
pr_debug ("drain retry fault gpu %d svms 0x%p done\n" , i , svms );
1974
1980
}
1981
+ if (atomic_cmpxchg (& svms -> drain_pagefaults , drain , 0 ) != drain )
1982
+ goto restart ;
1975
1983
}
1976
1984
1977
1985
static void svm_range_deferred_list_work (struct work_struct * work )
@@ -1997,8 +2005,7 @@ static void svm_range_deferred_list_work(struct work_struct *work)
1997
2005
/* Checking for the need to drain retry faults must be inside
1998
2006
* mmap write lock to serialize with munmap notifiers.
1999
2007
*/
2000
- if (unlikely (READ_ONCE (svms -> drain_pagefaults ))) {
2001
- WRITE_ONCE (svms -> drain_pagefaults , false);
2008
+ if (unlikely (atomic_read (& svms -> drain_pagefaults ))) {
2002
2009
mmap_write_unlock (mm );
2003
2010
svm_range_drain_retry_fault (svms );
2004
2011
goto retry ;
@@ -2045,12 +2052,6 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange,
2045
2052
struct mm_struct * mm , enum svm_work_list_ops op )
2046
2053
{
2047
2054
spin_lock (& svms -> deferred_list_lock );
2048
- /* Make sure pending page faults are drained in the deferred worker
2049
- * before the range is freed to avoid straggler interrupts on
2050
- * unmapped memory causing "phantom faults".
2051
- */
2052
- if (op == SVM_OP_UNMAP_RANGE )
2053
- svms -> drain_pagefaults = true;
2054
2055
/* if prange is on the deferred list */
2055
2056
if (!list_empty (& prange -> deferred_list )) {
2056
2057
pr_debug ("update exist prange 0x%p work op %d\n" , prange , op );
@@ -2129,6 +2130,12 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
2129
2130
pr_debug ("svms 0x%p prange 0x%p [0x%lx 0x%lx] [0x%lx 0x%lx]\n" , svms ,
2130
2131
prange , prange -> start , prange -> last , start , last );
2131
2132
2133
+ /* Make sure pending page faults are drained in the deferred worker
2134
+ * before the range is freed to avoid straggler interrupts on
2135
+ * unmapped memory causing "phantom faults".
2136
+ */
2137
+ atomic_inc (& svms -> drain_pagefaults );
2138
+
2132
2139
unmap_parent = start <= prange -> start && last >= prange -> last ;
2133
2140
2134
2141
list_for_each_entry (pchild , & prange -> child_list , child_list ) {
@@ -2594,6 +2601,11 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid,
2594
2601
2595
2602
pr_debug ("restoring svms 0x%p fault address 0x%llx\n" , svms , addr );
2596
2603
2604
+ if (atomic_read (& svms -> drain_pagefaults )) {
2605
+ pr_debug ("draining retry fault, drop fault 0x%llx\n" , addr );
2606
+ goto out ;
2607
+ }
2608
+
2597
2609
/* p->lead_thread is available as kfd_process_wq_release flush the work
2598
2610
* before releasing task ref.
2599
2611
*/
@@ -2740,6 +2752,7 @@ void svm_range_list_fini(struct kfd_process *p)
2740
2752
* Ensure no retry fault comes in afterwards, as page fault handler will
2741
2753
* not find kfd process and take mm lock to recover fault.
2742
2754
*/
2755
+ atomic_inc (& p -> svms .drain_pagefaults );
2743
2756
svm_range_drain_retry_fault (& p -> svms );
2744
2757
2745
2758
@@ -2763,6 +2776,7 @@ int svm_range_list_init(struct kfd_process *p)
2763
2776
mutex_init (& svms -> lock );
2764
2777
INIT_LIST_HEAD (& svms -> list );
2765
2778
atomic_set (& svms -> evicted_ranges , 0 );
2779
+ atomic_set (& svms -> drain_pagefaults , 0 );
2766
2780
INIT_DELAYED_WORK (& svms -> restore_work , svm_range_restore_work );
2767
2781
INIT_WORK (& svms -> deferred_list_work , svm_range_deferred_list_work );
2768
2782
INIT_LIST_HEAD (& svms -> deferred_range_list );
0 commit comments