2
2
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
3
3
4
4
#include <linux/clk.h>
5
- #include <linux/dma-mapping.h>
6
5
#include <linux/interconnect.h>
7
6
#include <linux/pm_domain.h>
8
7
#include <linux/pm_opp.h>
9
8
#include <soc/qcom/cmd-db.h>
9
+ #include <drm/drm_gem.h>
10
10
11
11
#include "a6xx_gpu.h"
12
12
#include "a6xx_gmu.xml.h"
13
+ #include "msm_gem.h"
14
+ #include "msm_mmu.h"
13
15
14
16
static void a6xx_gmu_fault (struct a6xx_gmu * gmu )
15
17
{
@@ -628,7 +630,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
628
630
gmu_write (gmu , REG_A6XX_GMU_CM3_BOOT_CONFIG , 0x02 );
629
631
630
632
/* Write the iova of the HFI table */
631
- gmu_write (gmu , REG_A6XX_GMU_HFI_QTBL_ADDR , gmu -> hfi -> iova );
633
+ gmu_write (gmu , REG_A6XX_GMU_HFI_QTBL_ADDR , gmu -> hfi . iova );
632
634
gmu_write (gmu , REG_A6XX_GMU_HFI_QTBL_INFO , 1 );
633
635
634
636
gmu_write (gmu , REG_A6XX_GMU_AHB_FENCE_RANGE_0 ,
@@ -927,34 +929,77 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
927
929
return 0 ;
928
930
}
929
931
930
- static void a6xx_gmu_memory_free (struct a6xx_gmu * gmu , struct a6xx_gmu_bo * bo )
932
+ static void a6xx_gmu_memory_free (struct a6xx_gmu * gmu )
931
933
{
932
- if (IS_ERR_OR_NULL (bo ))
933
- return ;
934
+ msm_gem_kernel_put (gmu -> hfi .obj , gmu -> aspace , false);
935
+ msm_gem_kernel_put (gmu -> debug .obj , gmu -> aspace , false);
936
+
937
+ gmu -> aspace -> mmu -> funcs -> detach (gmu -> aspace -> mmu );
938
+ msm_gem_address_space_put (gmu -> aspace );
939
+ }
940
+
941
+ static int a6xx_gmu_memory_alloc (struct a6xx_gmu * gmu , struct a6xx_gmu_bo * bo ,
942
+ size_t size , u64 iova )
943
+ {
944
+ struct a6xx_gpu * a6xx_gpu = container_of (gmu , struct a6xx_gpu , gmu );
945
+ struct drm_device * dev = a6xx_gpu -> base .base .dev ;
946
+ uint32_t flags = MSM_BO_WC ;
947
+ u64 range_start , range_end ;
948
+ int ret ;
949
+
950
+ size = PAGE_ALIGN (size );
951
+ if (!iova ) {
952
+ /* no fixed address - use GMU's uncached range */
953
+ range_start = 0x60000000 ;
954
+ range_end = 0x80000000 ;
955
+ } else {
956
+ /* range for fixed address */
957
+ range_start = iova ;
958
+ range_end = iova + size ;
959
+ }
960
+
961
+ bo -> obj = msm_gem_new (dev , size , flags );
962
+ if (IS_ERR (bo -> obj ))
963
+ return PTR_ERR (bo -> obj );
934
964
935
- dma_free_wc (gmu -> dev , bo -> size , bo -> virt , bo -> iova );
936
- kfree (bo );
965
+ ret = msm_gem_get_and_pin_iova_range (bo -> obj , gmu -> aspace , & bo -> iova ,
966
+ range_start >> PAGE_SHIFT , range_end >> PAGE_SHIFT );
967
+ if (ret ) {
968
+ drm_gem_object_put (bo -> obj );
969
+ return ret ;
970
+ }
971
+
972
+ bo -> virt = msm_gem_get_vaddr (bo -> obj );
973
+ bo -> size = size ;
974
+
975
+ return 0 ;
937
976
}
938
977
939
- static struct a6xx_gmu_bo * a6xx_gmu_memory_alloc (struct a6xx_gmu * gmu ,
940
- size_t size )
978
+ static int a6xx_gmu_memory_probe (struct a6xx_gmu * gmu )
941
979
{
942
- struct a6xx_gmu_bo * bo ;
980
+ struct iommu_domain * domain ;
981
+ int ret ;
943
982
944
- bo = kzalloc ( sizeof ( * bo ), GFP_KERNEL );
945
- if (!bo )
946
- return ERR_PTR ( - ENOMEM ) ;
983
+ domain = iommu_domain_alloc ( & platform_bus_type );
984
+ if (!domain )
985
+ return - ENODEV ;
947
986
948
- bo -> size = PAGE_ALIGN (size );
987
+ domain -> geometry .aperture_start = 0x00000000 ;
988
+ domain -> geometry .aperture_end = 0x7fffffff ;
949
989
950
- bo -> virt = dma_alloc_wc (gmu -> dev , bo -> size , & bo -> iova , GFP_KERNEL );
990
+ gmu -> aspace = msm_gem_address_space_create (gmu -> dev , domain , "gmu" );
991
+ if (IS_ERR (gmu -> aspace )) {
992
+ iommu_domain_free (domain );
993
+ return PTR_ERR (gmu -> aspace );
994
+ }
951
995
952
- if (!bo -> virt ) {
953
- kfree (bo );
954
- return ERR_PTR (- ENOMEM );
996
+ ret = gmu -> aspace -> mmu -> funcs -> attach (gmu -> aspace -> mmu );
997
+ if (ret ) {
998
+ msm_gem_address_space_put (gmu -> aspace );
999
+ return ret ;
955
1000
}
956
1001
957
- return bo ;
1002
+ return 0 ;
958
1003
}
959
1004
960
1005
/* Return the 'arc-level' for the given frequency */
@@ -1212,7 +1257,7 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1212
1257
iounmap (gmu -> mmio );
1213
1258
gmu -> mmio = NULL ;
1214
1259
1215
- a6xx_gmu_memory_free (gmu , gmu -> hfi );
1260
+ a6xx_gmu_memory_free (gmu );
1216
1261
1217
1262
free_irq (gmu -> gmu_irq , gmu );
1218
1263
free_irq (gmu -> hfi_irq , gmu );
@@ -1234,15 +1279,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1234
1279
1235
1280
gmu -> dev = & pdev -> dev ;
1236
1281
1237
- /* Pass force_dma false to require the DT to set the dma region */
1238
- ret = of_dma_configure (gmu -> dev , node , false);
1239
- if (ret )
1240
- return ret ;
1241
-
1242
- /* Set the mask after the of_dma_configure() */
1243
- ret = dma_set_mask_and_coherent (& pdev -> dev , DMA_BIT_MASK (31 ));
1244
- if (ret )
1245
- return ret ;
1282
+ of_dma_configure (gmu -> dev , node , true);
1246
1283
1247
1284
/* Fow now, don't do anything fancy until we get our feet under us */
1248
1285
gmu -> idle_level = GMU_IDLE_STATE_ACTIVE ;
@@ -1254,20 +1291,26 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1254
1291
if (ret )
1255
1292
goto err_put_device ;
1256
1293
1294
+ ret = a6xx_gmu_memory_probe (gmu );
1295
+ if (ret )
1296
+ goto err_put_device ;
1297
+
1257
1298
/* Allocate memory for for the HFI queues */
1258
- gmu -> hfi = a6xx_gmu_memory_alloc (gmu , SZ_16K );
1259
- if (IS_ERR ( gmu -> hfi ) )
1299
+ ret = a6xx_gmu_memory_alloc (gmu , & gmu -> hfi , SZ_16K , 0 );
1300
+ if (ret )
1260
1301
goto err_memory ;
1261
1302
1262
1303
/* Allocate memory for the GMU debug region */
1263
- gmu -> debug = a6xx_gmu_memory_alloc (gmu , SZ_16K );
1264
- if (IS_ERR ( gmu -> debug ) )
1304
+ ret = a6xx_gmu_memory_alloc (gmu , & gmu -> debug , SZ_16K , 0 );
1305
+ if (ret )
1265
1306
goto err_memory ;
1266
1307
1267
1308
/* Map the GMU registers */
1268
1309
gmu -> mmio = a6xx_gmu_get_mmio (pdev , "gmu" );
1269
- if (IS_ERR (gmu -> mmio ))
1310
+ if (IS_ERR (gmu -> mmio )) {
1311
+ ret = PTR_ERR (gmu -> mmio );
1270
1312
goto err_memory ;
1313
+ }
1271
1314
1272
1315
/* Get the HFI and GMU interrupts */
1273
1316
gmu -> hfi_irq = a6xx_gmu_get_irq (gmu , pdev , "hfi" , a6xx_hfi_irq );
@@ -1296,11 +1339,11 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1296
1339
iounmap (gmu -> mmio );
1297
1340
free_irq (gmu -> gmu_irq , gmu );
1298
1341
free_irq (gmu -> hfi_irq , gmu );
1299
- err_memory :
1300
- a6xx_gmu_memory_free (gmu , gmu -> hfi );
1301
1342
1302
1343
ret = - ENODEV ;
1303
1344
1345
+ err_memory :
1346
+ a6xx_gmu_memory_free (gmu );
1304
1347
err_put_device :
1305
1348
/* Drop reference taken in of_find_device_by_node */
1306
1349
put_device (gmu -> dev );
0 commit comments