@@ -911,6 +911,9 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
911
911
if (adreno_is_a618 (adreno_gpu ))
912
912
return ;
913
913
914
+ if (adreno_is_a619_holi (adreno_gpu ))
915
+ hbb_lo = 0 ;
916
+
914
917
if (adreno_is_a640_family (adreno_gpu ))
915
918
amsbc = 1 ;
916
919
@@ -1135,7 +1138,12 @@ static int hw_init(struct msm_gpu *gpu)
1135
1138
}
1136
1139
1137
1140
/* Clear GBIF halt in case GX domain was not collapsed */
1138
- if (a6xx_has_gbif (adreno_gpu )) {
1141
+ if (adreno_is_a619_holi (adreno_gpu )) {
1142
+ gpu_write (gpu , REG_A6XX_GBIF_HALT , 0 );
1143
+ gpu_write (gpu , REG_A6XX_RBBM_GPR0_CNTL , 0 );
1144
+ /* Let's make extra sure that the GPU can access the memory.. */
1145
+ mb ();
1146
+ } else if (a6xx_has_gbif (adreno_gpu )) {
1139
1147
gpu_write (gpu , REG_A6XX_GBIF_HALT , 0 );
1140
1148
gpu_write (gpu , REG_A6XX_RBBM_GBIF_HALT , 0 );
1141
1149
/* Let's make extra sure that the GPU can access the memory.. */
@@ -1144,6 +1152,9 @@ static int hw_init(struct msm_gpu *gpu)
1144
1152
1145
1153
gpu_write (gpu , REG_A6XX_RBBM_SECVID_TSB_CNTL , 0 );
1146
1154
1155
+ if (adreno_is_a619_holi (adreno_gpu ))
1156
+ a6xx_sptprac_enable (gmu );
1157
+
1147
1158
/*
1148
1159
* Disable the trusted memory range - we don't actually supported secure
1149
1160
* memory rendering at this point in time and we don't want to block off
@@ -1760,12 +1771,18 @@ static void a6xx_llc_slices_init(struct platform_device *pdev,
1760
1771
#define GBIF_CLIENT_HALT_MASK BIT(0)
1761
1772
#define GBIF_ARB_HALT_MASK BIT(1)
1762
1773
#define VBIF_XIN_HALT_CTRL0_MASK GENMASK(3, 0)
1774
+ #define VBIF_RESET_ACK_MASK 0xF0
1775
+ #define GPR0_GBIF_HALT_REQUEST 0x1E0
1763
1776
1764
1777
void a6xx_bus_clear_pending_transactions (struct adreno_gpu * adreno_gpu , bool gx_off )
1765
1778
{
1766
1779
struct msm_gpu * gpu = & adreno_gpu -> base ;
1767
1780
1768
- if (!a6xx_has_gbif (adreno_gpu )) {
1781
+ if (adreno_is_a619_holi (adreno_gpu )) {
1782
+ gpu_write (gpu , REG_A6XX_RBBM_GPR0_CNTL , GPR0_GBIF_HALT_REQUEST );
1783
+ spin_until ((gpu_read (gpu , REG_A6XX_RBBM_VBIF_GX_RESET_STATUS ) &
1784
+ (VBIF_RESET_ACK_MASK )) == VBIF_RESET_ACK_MASK );
1785
+ } else if (!a6xx_has_gbif (adreno_gpu )) {
1769
1786
gpu_write (gpu , REG_A6XX_VBIF_XIN_HALT_CTRL0 , VBIF_XIN_HALT_CTRL0_MASK );
1770
1787
spin_until ((gpu_read (gpu , REG_A6XX_VBIF_XIN_HALT_CTRL1 ) &
1771
1788
(VBIF_XIN_HALT_CTRL0_MASK )) == VBIF_XIN_HALT_CTRL0_MASK );
@@ -1861,6 +1878,9 @@ static int a6xx_pm_resume(struct msm_gpu *gpu)
1861
1878
if (ret )
1862
1879
goto err_bulk_clk ;
1863
1880
1881
+ if (adreno_is_a619_holi (adreno_gpu ))
1882
+ a6xx_sptprac_enable (gmu );
1883
+
1864
1884
/* If anything goes south, tear the GPU down piece by piece.. */
1865
1885
if (ret ) {
1866
1886
err_bulk_clk :
@@ -1920,6 +1940,9 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
1920
1940
/* Drain the outstanding traffic on memory buses */
1921
1941
a6xx_bus_clear_pending_transactions (adreno_gpu , true);
1922
1942
1943
+ if (adreno_is_a619_holi (adreno_gpu ))
1944
+ a6xx_sptprac_disable (gmu );
1945
+
1923
1946
clk_bulk_disable_unprepare (gpu -> nr_clocks , gpu -> grp_clks );
1924
1947
1925
1948
pm_runtime_put_sync (gmu -> gxpd );
0 commit comments