@@ -966,11 +966,12 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
966
966
return ret ;
967
967
}
968
968
969
- static int unmap_bo_from_gpuvm (struct amdgpu_device * adev ,
969
+ static void unmap_bo_from_gpuvm (struct kgd_mem * mem ,
970
970
struct kfd_mem_attachment * entry ,
971
971
struct amdgpu_sync * sync )
972
972
{
973
973
struct amdgpu_bo_va * bo_va = entry -> bo_va ;
974
+ struct amdgpu_device * adev = entry -> adev ;
974
975
struct amdgpu_vm * vm = bo_va -> base .vm ;
975
976
976
977
amdgpu_vm_bo_unmap (adev , bo_va , entry -> va );
@@ -979,15 +980,20 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
979
980
980
981
amdgpu_sync_fence (sync , bo_va -> last_pt_update );
981
982
982
- return 0 ;
983
+ kfd_mem_dmaunmap_attachment ( mem , entry ) ;
983
984
}
984
985
985
- static int update_gpuvm_pte (struct amdgpu_device * adev ,
986
- struct kfd_mem_attachment * entry ,
987
- struct amdgpu_sync * sync )
986
+ static int update_gpuvm_pte (struct kgd_mem * mem ,
987
+ struct kfd_mem_attachment * entry ,
988
+ struct amdgpu_sync * sync )
988
989
{
989
- int ret ;
990
990
struct amdgpu_bo_va * bo_va = entry -> bo_va ;
991
+ struct amdgpu_device * adev = entry -> adev ;
992
+ int ret ;
993
+
994
+ ret = kfd_mem_dmamap_attachment (mem , entry );
995
+ if (ret )
996
+ return ret ;
991
997
992
998
/* Update the page tables */
993
999
ret = amdgpu_vm_bo_update (adev , bo_va , false);
@@ -999,14 +1005,15 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
999
1005
return amdgpu_sync_fence (sync , bo_va -> last_pt_update );
1000
1006
}
1001
1007
1002
- static int map_bo_to_gpuvm (struct amdgpu_device * adev ,
1003
- struct kfd_mem_attachment * entry , struct amdgpu_sync * sync ,
1004
- bool no_update_pte )
1008
+ static int map_bo_to_gpuvm (struct kgd_mem * mem ,
1009
+ struct kfd_mem_attachment * entry ,
1010
+ struct amdgpu_sync * sync ,
1011
+ bool no_update_pte )
1005
1012
{
1006
1013
int ret ;
1007
1014
1008
1015
/* Set virtual address for the allocation */
1009
- ret = amdgpu_vm_bo_map (adev , entry -> bo_va , entry -> va , 0 ,
1016
+ ret = amdgpu_vm_bo_map (entry -> adev , entry -> bo_va , entry -> va , 0 ,
1010
1017
amdgpu_bo_size (entry -> bo_va -> base .bo ),
1011
1018
entry -> pte_flags );
1012
1019
if (ret ) {
@@ -1018,7 +1025,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
1018
1025
if (no_update_pte )
1019
1026
return 0 ;
1020
1027
1021
- ret = update_gpuvm_pte (adev , entry , sync );
1028
+ ret = update_gpuvm_pte (mem , entry , sync );
1022
1029
if (ret ) {
1023
1030
pr_err ("update_gpuvm_pte() failed\n" );
1024
1031
goto update_gpuvm_pte_failed ;
@@ -1027,7 +1034,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
1027
1034
return 0 ;
1028
1035
1029
1036
update_gpuvm_pte_failed :
1030
- unmap_bo_from_gpuvm (adev , entry , sync );
1037
+ unmap_bo_from_gpuvm (mem , entry , sync );
1031
1038
return ret ;
1032
1039
}
1033
1040
@@ -1601,7 +1608,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1601
1608
pr_debug ("\t map VA 0x%llx - 0x%llx in entry %p\n" ,
1602
1609
entry -> va , entry -> va + bo_size , entry );
1603
1610
1604
- ret = map_bo_to_gpuvm (adev , entry , ctx .sync ,
1611
+ ret = map_bo_to_gpuvm (mem , entry , ctx .sync ,
1605
1612
is_invalid_userptr );
1606
1613
if (ret ) {
1607
1614
pr_err ("Failed to map bo to gpuvm\n" );
@@ -1640,7 +1647,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
1640
1647
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu (
1641
1648
struct kgd_dev * kgd , struct kgd_mem * mem , void * drm_priv )
1642
1649
{
1643
- struct amdgpu_device * adev = get_amdgpu_device (kgd );
1644
1650
struct amdgpu_vm * avm = drm_priv_to_vm (drm_priv );
1645
1651
struct amdkfd_process_info * process_info = avm -> process_info ;
1646
1652
unsigned long bo_size = mem -> bo -> tbo .base .size ;
@@ -1675,13 +1681,8 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
1675
1681
pr_debug ("\t unmap VA 0x%llx - 0x%llx from entry %p\n" ,
1676
1682
entry -> va , entry -> va + bo_size , entry );
1677
1683
1678
- ret = unmap_bo_from_gpuvm (adev , entry , ctx .sync );
1679
- if (ret == 0 ) {
1680
- entry -> is_mapped = false;
1681
- } else {
1682
- pr_err ("failed to unmap VA 0x%llx\n" , mem -> va );
1683
- goto unreserve_out ;
1684
- }
1684
+ unmap_bo_from_gpuvm (mem , entry , ctx .sync );
1685
+ entry -> is_mapped = false;
1685
1686
1686
1687
mem -> mapped_to_gpu_memory -- ;
1687
1688
pr_debug ("\t DEC mapping count %d\n" ,
@@ -2058,9 +2059,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
2058
2059
if (!attachment -> is_mapped )
2059
2060
continue ;
2060
2061
2061
- ret = update_gpuvm_pte ((struct amdgpu_device * )
2062
- attachment -> adev ,
2063
- attachment , & sync );
2062
+ kfd_mem_dmaunmap_attachment (mem , attachment );
2063
+ ret = update_gpuvm_pte (mem , attachment , & sync );
2064
2064
if (ret ) {
2065
2065
pr_err ("%s: update PTE failed\n" , __func__ );
2066
2066
/* make sure this gets validated again */
@@ -2262,9 +2262,11 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
2262
2262
goto validate_map_fail ;
2263
2263
}
2264
2264
list_for_each_entry (attachment , & mem -> attachments , list ) {
2265
- ret = update_gpuvm_pte ((struct amdgpu_device * )
2266
- attachment -> adev , attachment ,
2267
- & sync_obj );
2265
+ if (!attachment -> is_mapped )
2266
+ continue ;
2267
+
2268
+ kfd_mem_dmaunmap_attachment (mem , attachment );
2269
+ ret = update_gpuvm_pte (mem , attachment , & sync_obj );
2268
2270
if (ret ) {
2269
2271
pr_debug ("Memory eviction: update PTE failed. Try again\n" );
2270
2272
goto validate_map_fail ;
0 commit comments