Skip to content

Commit 29ac897

Browse files
fltorobclark
authored andcommitted
drm/msm/a6xx: use msm_gem for GMU memory objects
This gives more fine-grained control over how memory is allocated over the DMA api. In particular, it allows using an address range or pinning to a fixed address. Signed-off-by: Jonathan Marek <[email protected]> Reviewed-by: Jordan Crouse <[email protected]> Signed-off-by: Rob Clark <[email protected]>
1 parent 0b462d7 commit 29ac897

File tree

3 files changed

+88
-42
lines changed

3 files changed

+88
-42
lines changed

drivers/gpu/drm/msm/adreno/a6xx_gmu.c

Lines changed: 79 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -2,14 +2,16 @@
22
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
33

44
#include <linux/clk.h>
5-
#include <linux/dma-mapping.h>
65
#include <linux/interconnect.h>
76
#include <linux/pm_domain.h>
87
#include <linux/pm_opp.h>
98
#include <soc/qcom/cmd-db.h>
9+
#include <drm/drm_gem.h>
1010

1111
#include "a6xx_gpu.h"
1212
#include "a6xx_gmu.xml.h"
13+
#include "msm_gem.h"
14+
#include "msm_mmu.h"
1315

1416
static void a6xx_gmu_fault(struct a6xx_gmu *gmu)
1517
{
@@ -628,7 +630,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
628630
gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
629631

630632
/* Write the iova of the HFI table */
631-
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
633+
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi.iova);
632634
gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
633635

634636
gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
@@ -927,34 +929,77 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
927929
return 0;
928930
}
929931

930-
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
932+
static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
931933
{
932-
if (IS_ERR_OR_NULL(bo))
933-
return;
934+
msm_gem_kernel_put(gmu->hfi.obj, gmu->aspace, false);
935+
msm_gem_kernel_put(gmu->debug.obj, gmu->aspace, false);
936+
937+
gmu->aspace->mmu->funcs->detach(gmu->aspace->mmu);
938+
msm_gem_address_space_put(gmu->aspace);
939+
}
940+
941+
static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
942+
size_t size, u64 iova)
943+
{
944+
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
945+
struct drm_device *dev = a6xx_gpu->base.base.dev;
946+
uint32_t flags = MSM_BO_WC;
947+
u64 range_start, range_end;
948+
int ret;
949+
950+
size = PAGE_ALIGN(size);
951+
if (!iova) {
952+
/* no fixed address - use GMU's uncached range */
953+
range_start = 0x60000000;
954+
range_end = 0x80000000;
955+
} else {
956+
/* range for fixed address */
957+
range_start = iova;
958+
range_end = iova + size;
959+
}
960+
961+
bo->obj = msm_gem_new(dev, size, flags);
962+
if (IS_ERR(bo->obj))
963+
return PTR_ERR(bo->obj);
934964

935-
dma_free_wc(gmu->dev, bo->size, bo->virt, bo->iova);
936-
kfree(bo);
965+
ret = msm_gem_get_and_pin_iova_range(bo->obj, gmu->aspace, &bo->iova,
966+
range_start >> PAGE_SHIFT, range_end >> PAGE_SHIFT);
967+
if (ret) {
968+
drm_gem_object_put(bo->obj);
969+
return ret;
970+
}
971+
972+
bo->virt = msm_gem_get_vaddr(bo->obj);
973+
bo->size = size;
974+
975+
return 0;
937976
}
938977

939-
static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
940-
size_t size)
978+
static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
941979
{
942-
struct a6xx_gmu_bo *bo;
980+
struct iommu_domain *domain;
981+
int ret;
943982

944-
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
945-
if (!bo)
946-
return ERR_PTR(-ENOMEM);
983+
domain = iommu_domain_alloc(&platform_bus_type);
984+
if (!domain)
985+
return -ENODEV;
947986

948-
bo->size = PAGE_ALIGN(size);
987+
domain->geometry.aperture_start = 0x00000000;
988+
domain->geometry.aperture_end = 0x7fffffff;
949989

950-
bo->virt = dma_alloc_wc(gmu->dev, bo->size, &bo->iova, GFP_KERNEL);
990+
gmu->aspace = msm_gem_address_space_create(gmu->dev, domain, "gmu");
991+
if (IS_ERR(gmu->aspace)) {
992+
iommu_domain_free(domain);
993+
return PTR_ERR(gmu->aspace);
994+
}
951995

952-
if (!bo->virt) {
953-
kfree(bo);
954-
return ERR_PTR(-ENOMEM);
996+
ret = gmu->aspace->mmu->funcs->attach(gmu->aspace->mmu);
997+
if (ret) {
998+
msm_gem_address_space_put(gmu->aspace);
999+
return ret;
9551000
}
9561001

957-
return bo;
1002+
return 0;
9581003
}
9591004

9601005
/* Return the 'arc-level' for the given frequency */
@@ -1212,7 +1257,7 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
12121257
iounmap(gmu->mmio);
12131258
gmu->mmio = NULL;
12141259

1215-
a6xx_gmu_memory_free(gmu, gmu->hfi);
1260+
a6xx_gmu_memory_free(gmu);
12161261

12171262
free_irq(gmu->gmu_irq, gmu);
12181263
free_irq(gmu->hfi_irq, gmu);
@@ -1234,15 +1279,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
12341279

12351280
gmu->dev = &pdev->dev;
12361281

1237-
/* Pass force_dma false to require the DT to set the dma region */
1238-
ret = of_dma_configure(gmu->dev, node, false);
1239-
if (ret)
1240-
return ret;
1241-
1242-
/* Set the mask after the of_dma_configure() */
1243-
ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(31));
1244-
if (ret)
1245-
return ret;
1282+
of_dma_configure(gmu->dev, node, true);
12461283

12471284
/* Fow now, don't do anything fancy until we get our feet under us */
12481285
gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
@@ -1254,20 +1291,26 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
12541291
if (ret)
12551292
goto err_put_device;
12561293

1294+
ret = a6xx_gmu_memory_probe(gmu);
1295+
if (ret)
1296+
goto err_put_device;
1297+
12571298
/* Allocate memory for for the HFI queues */
1258-
gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1259-
if (IS_ERR(gmu->hfi))
1299+
ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0);
1300+
if (ret)
12601301
goto err_memory;
12611302

12621303
/* Allocate memory for the GMU debug region */
1263-
gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
1264-
if (IS_ERR(gmu->debug))
1304+
ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0);
1305+
if (ret)
12651306
goto err_memory;
12661307

12671308
/* Map the GMU registers */
12681309
gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
1269-
if (IS_ERR(gmu->mmio))
1310+
if (IS_ERR(gmu->mmio)) {
1311+
ret = PTR_ERR(gmu->mmio);
12701312
goto err_memory;
1313+
}
12711314

12721315
/* Get the HFI and GMU interrupts */
12731316
gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
@@ -1296,11 +1339,11 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
12961339
iounmap(gmu->mmio);
12971340
free_irq(gmu->gmu_irq, gmu);
12981341
free_irq(gmu->hfi_irq, gmu);
1299-
err_memory:
1300-
a6xx_gmu_memory_free(gmu, gmu->hfi);
13011342

13021343
ret = -ENODEV;
13031344

1345+
err_memory:
1346+
a6xx_gmu_memory_free(gmu);
13041347
err_put_device:
13051348
/* Drop reference taken in of_find_device_by_node */
13061349
put_device(gmu->dev);

drivers/gpu/drm/msm/adreno/a6xx_gmu.h

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,9 +10,10 @@
1010
#include "a6xx_hfi.h"
1111

1212
struct a6xx_gmu_bo {
13+
struct drm_gem_object *obj;
1314
void *virt;
1415
size_t size;
15-
dma_addr_t iova;
16+
u64 iova;
1617
};
1718

1819
/*
@@ -43,6 +44,8 @@ struct a6xx_gmu_bo {
4344
struct a6xx_gmu {
4445
struct device *dev;
4546

47+
struct msm_gem_address_space *aspace;
48+
4649
void * __iomem mmio;
4750

4851
int hfi_irq;
@@ -52,8 +55,8 @@ struct a6xx_gmu {
5255

5356
int idle_level;
5457

55-
struct a6xx_gmu_bo *hfi;
56-
struct a6xx_gmu_bo *debug;
58+
struct a6xx_gmu_bo hfi;
59+
struct a6xx_gmu_bo debug;
5760

5861
int nr_clocks;
5962
struct clk_bulk_data *clocks;

drivers/gpu/drm/msm/adreno/a6xx_hfi.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -176,8 +176,8 @@ static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
176176
{
177177
struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
178178

179-
msg.dbg_buffer_addr = (u32) gmu->debug->iova;
180-
msg.dbg_buffer_size = (u32) gmu->debug->size;
179+
msg.dbg_buffer_addr = (u32) gmu->debug.iova;
180+
msg.dbg_buffer_size = (u32) gmu->debug.size;
181181
msg.boot_state = boot_state;
182182

183183
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
@@ -385,7 +385,7 @@ static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
385385

386386
void a6xx_hfi_init(struct a6xx_gmu *gmu)
387387
{
388-
struct a6xx_gmu_bo *hfi = gmu->hfi;
388+
struct a6xx_gmu_bo *hfi = &gmu->hfi;
389389
struct a6xx_hfi_queue_table_header *table = hfi->virt;
390390
struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
391391
u64 offset;

0 commit comments

Comments
 (0)