Skip to content

Commit b72ed8a

Browse files
fxkamdalexdeucher
authored andcommitted
drm/amdgpu: DMA map/unmap when updating GPU mappings
DMA map kfd_mem_attachments in update_gpuvm_pte. This function is called with the BO and page tables reserved, so we can safely update the DMA mapping. DMA unmap when a BO is unmapped from a GPU and before updating mappings in restore workers. Signed-off-by: Felix Kuehling <[email protected]> Acked-by: Oak Zeng <[email protected]> Acked-by: Ramesh Errabolu <[email protected]> Signed-off-by: Alex Deucher <[email protected]>
1 parent 264fb4d commit b72ed8a

File tree

1 file changed

+29
-27
lines changed

1 file changed

+29
-27
lines changed

drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c

Lines changed: 29 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -966,11 +966,12 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx,
966966
return ret;
967967
}
968968

969-
static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
969+
static void unmap_bo_from_gpuvm(struct kgd_mem *mem,
970970
struct kfd_mem_attachment *entry,
971971
struct amdgpu_sync *sync)
972972
{
973973
struct amdgpu_bo_va *bo_va = entry->bo_va;
974+
struct amdgpu_device *adev = entry->adev;
974975
struct amdgpu_vm *vm = bo_va->base.vm;
975976

976977
amdgpu_vm_bo_unmap(adev, bo_va, entry->va);
@@ -979,15 +980,20 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev,
979980

980981
amdgpu_sync_fence(sync, bo_va->last_pt_update);
981982

982-
return 0;
983+
kfd_mem_dmaunmap_attachment(mem, entry);
983984
}
984985

985-
static int update_gpuvm_pte(struct amdgpu_device *adev,
986-
struct kfd_mem_attachment *entry,
987-
struct amdgpu_sync *sync)
986+
static int update_gpuvm_pte(struct kgd_mem *mem,
987+
struct kfd_mem_attachment *entry,
988+
struct amdgpu_sync *sync)
988989
{
989-
int ret;
990990
struct amdgpu_bo_va *bo_va = entry->bo_va;
991+
struct amdgpu_device *adev = entry->adev;
992+
int ret;
993+
994+
ret = kfd_mem_dmamap_attachment(mem, entry);
995+
if (ret)
996+
return ret;
991997

992998
/* Update the page tables */
993999
ret = amdgpu_vm_bo_update(adev, bo_va, false);
@@ -999,14 +1005,15 @@ static int update_gpuvm_pte(struct amdgpu_device *adev,
9991005
return amdgpu_sync_fence(sync, bo_va->last_pt_update);
10001006
}
10011007

1002-
static int map_bo_to_gpuvm(struct amdgpu_device *adev,
1003-
struct kfd_mem_attachment *entry, struct amdgpu_sync *sync,
1004-
bool no_update_pte)
1008+
static int map_bo_to_gpuvm(struct kgd_mem *mem,
1009+
struct kfd_mem_attachment *entry,
1010+
struct amdgpu_sync *sync,
1011+
bool no_update_pte)
10051012
{
10061013
int ret;
10071014

10081015
/* Set virtual address for the allocation */
1009-
ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0,
1016+
ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0,
10101017
amdgpu_bo_size(entry->bo_va->base.bo),
10111018
entry->pte_flags);
10121019
if (ret) {
@@ -1018,7 +1025,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
10181025
if (no_update_pte)
10191026
return 0;
10201027

1021-
ret = update_gpuvm_pte(adev, entry, sync);
1028+
ret = update_gpuvm_pte(mem, entry, sync);
10221029
if (ret) {
10231030
pr_err("update_gpuvm_pte() failed\n");
10241031
goto update_gpuvm_pte_failed;
@@ -1027,7 +1034,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev,
10271034
return 0;
10281035

10291036
update_gpuvm_pte_failed:
1030-
unmap_bo_from_gpuvm(adev, entry, sync);
1037+
unmap_bo_from_gpuvm(mem, entry, sync);
10311038
return ret;
10321039
}
10331040

@@ -1601,7 +1608,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
16011608
pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n",
16021609
entry->va, entry->va + bo_size, entry);
16031610

1604-
ret = map_bo_to_gpuvm(adev, entry, ctx.sync,
1611+
ret = map_bo_to_gpuvm(mem, entry, ctx.sync,
16051612
is_invalid_userptr);
16061613
if (ret) {
16071614
pr_err("Failed to map bo to gpuvm\n");
@@ -1640,7 +1647,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(
16401647
int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
16411648
struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv)
16421649
{
1643-
struct amdgpu_device *adev = get_amdgpu_device(kgd);
16441650
struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv);
16451651
struct amdkfd_process_info *process_info = avm->process_info;
16461652
unsigned long bo_size = mem->bo->tbo.base.size;
@@ -1675,13 +1681,8 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
16751681
pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n",
16761682
entry->va, entry->va + bo_size, entry);
16771683

1678-
ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync);
1679-
if (ret == 0) {
1680-
entry->is_mapped = false;
1681-
} else {
1682-
pr_err("failed to unmap VA 0x%llx\n", mem->va);
1683-
goto unreserve_out;
1684-
}
1684+
unmap_bo_from_gpuvm(mem, entry, ctx.sync);
1685+
entry->is_mapped = false;
16851686

16861687
mem->mapped_to_gpu_memory--;
16871688
pr_debug("\t DEC mapping count %d\n",
@@ -2058,9 +2059,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
20582059
if (!attachment->is_mapped)
20592060
continue;
20602061

2061-
ret = update_gpuvm_pte((struct amdgpu_device *)
2062-
attachment->adev,
2063-
attachment, &sync);
2062+
kfd_mem_dmaunmap_attachment(mem, attachment);
2063+
ret = update_gpuvm_pte(mem, attachment, &sync);
20642064
if (ret) {
20652065
pr_err("%s: update PTE failed\n", __func__);
20662066
/* make sure this gets validated again */
@@ -2262,9 +2262,11 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef)
22622262
goto validate_map_fail;
22632263
}
22642264
list_for_each_entry(attachment, &mem->attachments, list) {
2265-
ret = update_gpuvm_pte((struct amdgpu_device *)
2266-
attachment->adev, attachment,
2267-
&sync_obj);
2265+
if (!attachment->is_mapped)
2266+
continue;
2267+
2268+
kfd_mem_dmaunmap_attachment(mem, attachment);
2269+
ret = update_gpuvm_pte(mem, attachment, &sync_obj);
22682270
if (ret) {
22692271
pr_debug("Memory eviction: update PTE failed. Try again\n");
22702272
goto validate_map_fail;

0 commit comments

Comments
 (0)