2
2
/* Copyright (c) 2017-2019 The Linux Foundation. All rights reserved. */
3
3
4
4
#include <linux/clk.h>
5
+ #include <linux/dma-mapping.h>
5
6
#include <linux/interconnect.h>
6
7
#include <linux/pm_domain.h>
7
8
#include <linux/pm_opp.h>
@@ -920,116 +921,32 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
920
921
921
922
static void a6xx_gmu_memory_free (struct a6xx_gmu * gmu , struct a6xx_gmu_bo * bo )
922
923
{
923
- int count , i ;
924
- u64 iova ;
925
-
926
924
if (IS_ERR_OR_NULL (bo ))
927
925
return ;
928
926
929
- count = bo -> size >> PAGE_SHIFT ;
930
- iova = bo -> iova ;
931
-
932
- for (i = 0 ; i < count ; i ++ , iova += PAGE_SIZE ) {
933
- iommu_unmap (gmu -> domain , iova , PAGE_SIZE );
934
- __free_pages (bo -> pages [i ], 0 );
935
- }
936
-
937
- kfree (bo -> pages );
927
+ dma_free_wc (gmu -> dev , bo -> size , bo -> virt , bo -> iova );
938
928
kfree (bo );
939
929
}
940
930
941
931
static struct a6xx_gmu_bo * a6xx_gmu_memory_alloc (struct a6xx_gmu * gmu ,
942
932
size_t size )
943
933
{
944
934
struct a6xx_gmu_bo * bo ;
945
- int ret , count , i ;
946
935
947
936
bo = kzalloc (sizeof (* bo ), GFP_KERNEL );
948
937
if (!bo )
949
938
return ERR_PTR (- ENOMEM );
950
939
951
940
bo -> size = PAGE_ALIGN (size );
952
941
953
- count = bo -> size >> PAGE_SHIFT ;
942
+ bo -> virt = dma_alloc_wc ( gmu -> dev , bo -> size , & bo -> iova , GFP_KERNEL ) ;
954
943
955
- bo -> pages = kcalloc (count , sizeof (struct page * ), GFP_KERNEL );
956
- if (!bo -> pages ) {
944
+ if (!bo -> virt ) {
957
945
kfree (bo );
958
946
return ERR_PTR (- ENOMEM );
959
947
}
960
948
961
- for (i = 0 ; i < count ; i ++ ) {
962
- bo -> pages [i ] = alloc_page (GFP_KERNEL );
963
- if (!bo -> pages [i ])
964
- goto err ;
965
- }
966
-
967
- bo -> iova = gmu -> uncached_iova_base ;
968
-
969
- for (i = 0 ; i < count ; i ++ ) {
970
- ret = iommu_map (gmu -> domain ,
971
- bo -> iova + (PAGE_SIZE * i ),
972
- page_to_phys (bo -> pages [i ]), PAGE_SIZE ,
973
- IOMMU_READ | IOMMU_WRITE );
974
-
975
- if (ret ) {
976
- DRM_DEV_ERROR (gmu -> dev , "Unable to map GMU buffer object\n" );
977
-
978
- for (i = i - 1 ; i >= 0 ; i -- )
979
- iommu_unmap (gmu -> domain ,
980
- bo -> iova + (PAGE_SIZE * i ),
981
- PAGE_SIZE );
982
-
983
- goto err ;
984
- }
985
- }
986
-
987
- bo -> virt = vmap (bo -> pages , count , VM_IOREMAP ,
988
- pgprot_writecombine (PAGE_KERNEL ));
989
- if (!bo -> virt )
990
- goto err ;
991
-
992
- /* Align future IOVA addresses on 1MB boundaries */
993
- gmu -> uncached_iova_base += ALIGN (size , SZ_1M );
994
-
995
949
return bo ;
996
-
997
- err :
998
- for (i = 0 ; i < count ; i ++ ) {
999
- if (bo -> pages [i ])
1000
- __free_pages (bo -> pages [i ], 0 );
1001
- }
1002
-
1003
- kfree (bo -> pages );
1004
- kfree (bo );
1005
-
1006
- return ERR_PTR (- ENOMEM );
1007
- }
1008
-
1009
- static int a6xx_gmu_memory_probe (struct a6xx_gmu * gmu )
1010
- {
1011
- int ret ;
1012
-
1013
- /*
1014
- * The GMU address space is hardcoded to treat the range
1015
- * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
1016
- * between the GMU and the CPU will live in this space
1017
- */
1018
- gmu -> uncached_iova_base = 0x60000000 ;
1019
-
1020
-
1021
- gmu -> domain = iommu_domain_alloc (& platform_bus_type );
1022
- if (!gmu -> domain )
1023
- return - ENODEV ;
1024
-
1025
- ret = iommu_attach_device (gmu -> domain , gmu -> dev );
1026
-
1027
- if (ret ) {
1028
- iommu_domain_free (gmu -> domain );
1029
- gmu -> domain = NULL ;
1030
- }
1031
-
1032
- return ret ;
1033
950
}
1034
951
1035
952
/* Return the 'arc-level' for the given frequency */
@@ -1289,10 +1206,6 @@ void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
1289
1206
1290
1207
a6xx_gmu_memory_free (gmu , gmu -> hfi );
1291
1208
1292
- iommu_detach_device (gmu -> domain , gmu -> dev );
1293
-
1294
- iommu_domain_free (gmu -> domain );
1295
-
1296
1209
free_irq (gmu -> gmu_irq , gmu );
1297
1210
free_irq (gmu -> hfi_irq , gmu );
1298
1211
@@ -1313,7 +1226,15 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1313
1226
1314
1227
gmu -> dev = & pdev -> dev ;
1315
1228
1316
- of_dma_configure (gmu -> dev , node , true);
1229
+ /* Pass force_dma false to require the DT to set the dma region */
1230
+ ret = of_dma_configure (gmu -> dev , node , false);
1231
+ if (ret )
1232
+ return ret ;
1233
+
1234
+ /* Set the mask after the of_dma_configure() */
1235
+ ret = dma_set_mask_and_coherent (& pdev -> dev , DMA_BIT_MASK (31 ));
1236
+ if (ret )
1237
+ return ret ;
1317
1238
1318
1239
/* Fow now, don't do anything fancy until we get our feet under us */
1319
1240
gmu -> idle_level = GMU_IDLE_STATE_ACTIVE ;
@@ -1325,11 +1246,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1325
1246
if (ret )
1326
1247
goto err_put_device ;
1327
1248
1328
- /* Set up the IOMMU context bank */
1329
- ret = a6xx_gmu_memory_probe (gmu );
1330
- if (ret )
1331
- goto err_put_device ;
1332
-
1333
1249
/* Allocate memory for for the HFI queues */
1334
1250
gmu -> hfi = a6xx_gmu_memory_alloc (gmu , SZ_16K );
1335
1251
if (IS_ERR (gmu -> hfi ))
@@ -1375,11 +1291,6 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
1375
1291
err_memory :
1376
1292
a6xx_gmu_memory_free (gmu , gmu -> hfi );
1377
1293
1378
- if (gmu -> domain ) {
1379
- iommu_detach_device (gmu -> domain , gmu -> dev );
1380
-
1381
- iommu_domain_free (gmu -> domain );
1382
- }
1383
1294
ret = - ENODEV ;
1384
1295
1385
1296
err_put_device :
0 commit comments