Skip to content

Commit a5fb8b9

Browse files
Jordan Crouserobclark
authored andcommitted
drm/msm/a6xx: Use the DMA API for GMU memory objects
The GMU has very few memory allocations and uses a flat memory space so there is no good reason to go out of our way to bypass the DMA APIs which were basically designed for this exact scenario. v7: Check return value of dma_set_mask_and_coherent v4: Use dma_alloc_wc() v3: Set the dma mask correctly and use dma_addr_t for the iova type v2: Pass force_dma false to of_dma_configure to require that the DMA region be set up and return error from of_dma_configure to fail probe. Reviewed-by: Michael J. Ruhl <[email protected]> Signed-off-by: Jordan Crouse <[email protected]> Signed-off-by: Rob Clark <[email protected]>
1 parent a168b51 commit a5fb8b9

File tree

2 files changed

+14
-107
lines changed

2 files changed

+14
-107
lines changed

drivers/gpu/drm/msm/adreno/a6xx_gmu.c

Lines changed: 13 additions & 102 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
33

44
#include <linux/clk.h>
5+
#include <linux/dma-mapping.h>
56
#include <linux/interconnect.h>
67
#include <linux/pm_domain.h>
78
#include <linux/pm_opp.h>
@@ -920,116 +921,32 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
920921

921922
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
922923
{
923-
int count, i;
924-
u64 iova;
925-
926924
if (IS_ERR_OR_NULL(bo))
927925
return;
928926

929-
count = bo->size >> PAGE_SHIFT;
930-
iova = bo->iova;
931-
932-
for (i = 0; i < count; i++, iova += PAGE_SIZE) {
933-
iommu_unmap(gmu->domain, iova, PAGE_SIZE);
934-
__free_pages(bo->pages[i], 0);
935-
}
936-
937-
kfree(bo->pages);
927+
dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
938928
kfree(bo);
939929
}
940930

941931
static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
942932
size_t size)
943933
{
944934
struct a6xx_gmu_bo *bo;
945-
int ret, count, i;
946935

947936
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
948937
if (!bo)
949938
return ERR_PTR(-ENOMEM);
950939

951940
bo->size = PAGE_ALIGN(size);
952941

953-
count = bo->size >> PAGE_SHIFT;
942+
bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
954943

955-
bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
956-
if (!bo->pages) {
944+
if (!bo->virt) {
957945
kfree(bo);
958946
return ERR_PTR(-ENOMEM);
959947
}
960948

961-
for (i = 0; i < count; i++) {
962-
bo->pages[i] = alloc_page(GFP_KERNEL);
963-
if (!bo->pages[i])
964-
goto err;
965-
}
966-
967-
bo->iova = gmu->uncached_iova_base;
968-
969-
for (i = 0; i < count; i++) {
970-
ret = iommu_map(gmu->domain,
971-
bo->iova + (PAGE_SIZE * i),
972-
page_to_phys(bo->pages[i]), PAGE_SIZE,
973-
IOMMU_READ | IOMMU_WRITE);
974-
975-
if (ret) {
976-
DRM_DEV_ERROR(gmu->dev, "Unable to map GMU buffer object\n");
977-
978-
for (i = i - 1 ; i >= 0; i--)
979-
iommu_unmap(gmu->domain,
980-
bo->iova + (PAGE_SIZE * i),
981-
PAGE_SIZE);
982-
983-
goto err;
984-
}
985-
}
986-
987-
bo->virt = vmap(bo->pages, count, VM_IOREMAP,
988-
pgprot_writecombine(PAGE_KERNEL));
989-
if (!bo->virt)
990-
goto err;
991-
992-
/* Align future IOVA addresses on 1MB boundaries */
993-
gmu->uncached_iova_base += ALIGN(size, SZ_1M);
994-
995949
return bo;
996-
997-
err:
998-
for (i = 0; i < count; i++) {
999-
if (bo->pages[i])
1000-
__free_pages(bo->pages[i], 0);
1001-
}
1002-
1003-
kfree(bo->pages);
1004-
kfree(bo);
1005-
1006-
return ERR_PTR(-ENOMEM);
1007-
}
1008-
1009-
static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
1010-
{
1011-
int ret;
1012-
1013-
/*
1014-
* The GMU address space is hardcoded to treat the range
1015-
* 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
1016-
* between the GMU and the CPU will live in this space
1017-
*/
1018-
gmu->uncached_iova_base = 0x60000000;
1019-
1020-
1021-
gmu->domain = iommu_domain_alloc(&platform_bus_type);
1022-
if (!gmu->domain)
1023-
return -ENODEV;
1024-
1025-
ret = iommu_attach_device(gmu->domain, gmu->dev);
1026-
1027-
if (ret) {
1028-
iommu_domain_free(gmu->domain);
1029-
gmu->domain = NULL;
1030-
}
1031-
1032-
return ret;
1033950
}
1034951

1035952
/* Return the 'arc-level' for the given frequency */
@@ -1289,10 +1206,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
12891206

12901207
a6xx_gmu_memory_free(gmu, gmu->hfi);
12911208

1292-
iommu_detach_device(gmu->domain, gmu->dev);
1293-
1294-
iommu_domain_free(gmu->domain);
1295-
12961209
free_irq(gmu->gmu_irq, gmu);
12971210
free_irq(gmu->hfi_irq, gmu);
12981211

@@ -1313,7 +1226,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
13131226

13141227
gmu->dev = &pdev->dev;
13151228

1316-
of_dma_configure(gmu->dev, node, true);
1229+
/* Pass force_dma false to require the DT to set the dma region */
1230+
ret = of_dma_configure(gmu->dev, node, false);
1231+
if (ret)
1232+
return ret;
1233+
1234+
/* Set the mask after the of_dma_configure() */
1235+
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
1236+
if (ret)
1237+
return ret;
13171238

13181239
/* Fow now, don't do anything fancy until we get our feet under us */
13191240
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@@ -1325,11 +1246,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
13251246
if (ret)
13261247
goto err_put_device;
13271248

1328-
/* Set up the IOMMU context bank */
1329-
ret = a6xx_gmu_memory_probe(gmu);
1330-
if (ret)
1331-
goto err_put_device;
1332-
13331249
/* Allocate memory for for the HFI queues */
13341250
gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
13351251
if (IS_ERR(gmu->hfi))
@@ -1375,11 +1291,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
13751291
err_memory:
13761292
a6xx_gmu_memory_free(gmu, gmu->hfi);
13771293

1378-
if (gmu->domain) {
1379-
iommu_detach_device(gmu->domain, gmu->dev);
1380-
1381-
iommu_domain_free(gmu->domain);
1382-
}
13831294
ret = -ENODEV;
13841295

13851296
err_put_device:

drivers/gpu/drm/msm/adreno/a6xx_gmu.h

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,7 @@
1212
struct a6xx_gmu_bo {
1313
void *virt;
1414
size_t size;
15-
u64 iova;
16-
struct page **pages;
15+
dma_addr_t iova;
1716
};
1817

1918
/*
@@ -49,9 +48,6 @@ struct a6xx_gmu {
4948
int hfi_irq;
5049
int gmu_irq;
5150

52-
struct iommu_domain *domain;
53-
u64 uncached_iova_base;
54-
5551
struct device *gxpd;
5652

5753
int idle_level;

0 commit comments

Comments
 (0)