@@ -722,7 +722,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
722
722
ret = pm_runtime_get_sync (gpu -> dev );
723
723
if (ret < 0 ) {
724
724
dev_err (gpu -> dev , "Failed to enable GPU power domain\n" );
725
- return ret ;
725
+ goto pm_put ;
726
726
}
727
727
728
728
etnaviv_hw_identify (gpu );
@@ -819,6 +819,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
819
819
820
820
fail :
821
821
pm_runtime_mark_last_busy (gpu -> dev );
822
+ pm_put :
822
823
pm_runtime_put_autosuspend (gpu -> dev );
823
824
824
825
return ret ;
@@ -859,7 +860,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
859
860
860
861
ret = pm_runtime_get_sync (gpu -> dev );
861
862
if (ret < 0 )
862
- return ret ;
863
+ goto pm_put ;
863
864
864
865
dma_lo = gpu_read (gpu , VIVS_FE_DMA_LOW );
865
866
dma_hi = gpu_read (gpu , VIVS_FE_DMA_HIGH );
@@ -1003,6 +1004,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m)
1003
1004
ret = 0 ;
1004
1005
1005
1006
pm_runtime_mark_last_busy (gpu -> dev );
1007
+ pm_put :
1006
1008
pm_runtime_put_autosuspend (gpu -> dev );
1007
1009
1008
1010
return ret ;
@@ -1016,7 +1018,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
1016
1018
dev_err (gpu -> dev , "recover hung GPU!\n" );
1017
1019
1018
1020
if (pm_runtime_get_sync (gpu -> dev ) < 0 )
1019
- return ;
1021
+ goto pm_put ;
1020
1022
1021
1023
mutex_lock (& gpu -> lock );
1022
1024
@@ -1035,6 +1037,7 @@ void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu)
1035
1037
1036
1038
mutex_unlock (& gpu -> lock );
1037
1039
pm_runtime_mark_last_busy (gpu -> dev );
1040
+ pm_put :
1038
1041
pm_runtime_put_autosuspend (gpu -> dev );
1039
1042
}
1040
1043
@@ -1308,8 +1311,10 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
1308
1311
1309
1312
if (!submit -> runtime_resumed ) {
1310
1313
ret = pm_runtime_get_sync (gpu -> dev );
1311
- if (ret < 0 )
1314
+ if (ret < 0 ) {
1315
+ pm_runtime_put_noidle (gpu -> dev );
1312
1316
return NULL ;
1317
+ }
1313
1318
submit -> runtime_resumed = true;
1314
1319
}
1315
1320
@@ -1326,6 +1331,7 @@ struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit)
1326
1331
ret = event_alloc (gpu , nr_events , event );
1327
1332
if (ret ) {
1328
1333
DRM_ERROR ("no free events\n" );
1334
+ pm_runtime_put_noidle (gpu -> dev );
1329
1335
return NULL ;
1330
1336
}
1331
1337
@@ -1487,52 +1493,40 @@ static int etnaviv_gpu_clk_enable(struct etnaviv_gpu *gpu)
1487
1493
{
1488
1494
int ret ;
1489
1495
1490
- if (gpu -> clk_reg ) {
1491
- ret = clk_prepare_enable (gpu -> clk_reg );
1492
- if (ret )
1493
- return ret ;
1494
- }
1496
+ ret = clk_prepare_enable (gpu -> clk_reg );
1497
+ if (ret )
1498
+ return ret ;
1495
1499
1496
- if (gpu -> clk_bus ) {
1497
- ret = clk_prepare_enable (gpu -> clk_bus );
1498
- if (ret )
1499
- return ret ;
1500
- }
1500
+ ret = clk_prepare_enable (gpu -> clk_bus );
1501
+ if (ret )
1502
+ goto disable_clk_reg ;
1501
1503
1502
- if (gpu -> clk_core ) {
1503
- ret = clk_prepare_enable (gpu -> clk_core );
1504
- if (ret )
1505
- goto disable_clk_bus ;
1506
- }
1504
+ ret = clk_prepare_enable (gpu -> clk_core );
1505
+ if (ret )
1506
+ goto disable_clk_bus ;
1507
1507
1508
- if (gpu -> clk_shader ) {
1509
- ret = clk_prepare_enable (gpu -> clk_shader );
1510
- if (ret )
1511
- goto disable_clk_core ;
1512
- }
1508
+ ret = clk_prepare_enable (gpu -> clk_shader );
1509
+ if (ret )
1510
+ goto disable_clk_core ;
1513
1511
1514
1512
return 0 ;
1515
1513
1516
1514
disable_clk_core :
1517
- if (gpu -> clk_core )
1518
- clk_disable_unprepare (gpu -> clk_core );
1515
+ clk_disable_unprepare (gpu -> clk_core );
1519
1516
disable_clk_bus :
1520
- if (gpu -> clk_bus )
1521
- clk_disable_unprepare (gpu -> clk_bus );
1517
+ clk_disable_unprepare (gpu -> clk_bus );
1518
+ disable_clk_reg :
1519
+ clk_disable_unprepare (gpu -> clk_reg );
1522
1520
1523
1521
return ret ;
1524
1522
}
1525
1523
1526
1524
static int etnaviv_gpu_clk_disable (struct etnaviv_gpu * gpu )
1527
1525
{
1528
- if (gpu -> clk_shader )
1529
- clk_disable_unprepare (gpu -> clk_shader );
1530
- if (gpu -> clk_core )
1531
- clk_disable_unprepare (gpu -> clk_core );
1532
- if (gpu -> clk_bus )
1533
- clk_disable_unprepare (gpu -> clk_bus );
1534
- if (gpu -> clk_reg )
1535
- clk_disable_unprepare (gpu -> clk_reg );
1526
+ clk_disable_unprepare (gpu -> clk_shader );
1527
+ clk_disable_unprepare (gpu -> clk_core );
1528
+ clk_disable_unprepare (gpu -> clk_bus );
1529
+ clk_disable_unprepare (gpu -> clk_reg );
1536
1530
1537
1531
return 0 ;
1538
1532
}
@@ -1783,26 +1777,26 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
1783
1777
}
1784
1778
1785
1779
/* Get Clocks: */
1786
- gpu -> clk_reg = devm_clk_get (& pdev -> dev , "reg" );
1780
+ gpu -> clk_reg = devm_clk_get_optional (& pdev -> dev , "reg" );
1787
1781
DBG ("clk_reg: %p" , gpu -> clk_reg );
1788
1782
if (IS_ERR (gpu -> clk_reg ))
1789
- gpu -> clk_reg = NULL ;
1783
+ return PTR_ERR ( gpu -> clk_reg ) ;
1790
1784
1791
- gpu -> clk_bus = devm_clk_get (& pdev -> dev , "bus" );
1785
+ gpu -> clk_bus = devm_clk_get_optional (& pdev -> dev , "bus" );
1792
1786
DBG ("clk_bus: %p" , gpu -> clk_bus );
1793
1787
if (IS_ERR (gpu -> clk_bus ))
1794
- gpu -> clk_bus = NULL ;
1788
+ return PTR_ERR ( gpu -> clk_bus ) ;
1795
1789
1796
1790
gpu -> clk_core = devm_clk_get (& pdev -> dev , "core" );
1797
1791
DBG ("clk_core: %p" , gpu -> clk_core );
1798
1792
if (IS_ERR (gpu -> clk_core ))
1799
- gpu -> clk_core = NULL ;
1793
+ return PTR_ERR ( gpu -> clk_core ) ;
1800
1794
gpu -> base_rate_core = clk_get_rate (gpu -> clk_core );
1801
1795
1802
- gpu -> clk_shader = devm_clk_get (& pdev -> dev , "shader" );
1796
+ gpu -> clk_shader = devm_clk_get_optional (& pdev -> dev , "shader" );
1803
1797
DBG ("clk_shader: %p" , gpu -> clk_shader );
1804
1798
if (IS_ERR (gpu -> clk_shader ))
1805
- gpu -> clk_shader = NULL ;
1799
+ return PTR_ERR ( gpu -> clk_shader ) ;
1806
1800
gpu -> base_rate_shader = clk_get_rate (gpu -> clk_shader );
1807
1801
1808
1802
/* TODO: figure out max mapped size */
0 commit comments