@@ -157,7 +157,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
157
157
* GPU registers so we need to add 0x1a800 to the register value on A630
158
158
* to get the right value from PM4.
159
159
*/
160
- get_stats_counter (ring , REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800 ,
160
+ get_stats_counter (ring , REG_A6XX_CP_ALWAYS_ON_COUNTER_LO ,
161
161
rbmemptr_stats (ring , index , alwayson_start ));
162
162
163
163
/* Invalidate CCU depth and color */
@@ -187,7 +187,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
187
187
188
188
get_stats_counter (ring , REG_A6XX_RBBM_PERFCTR_CP_0_LO ,
189
189
rbmemptr_stats (ring , index , cpcycles_end ));
190
- get_stats_counter (ring , REG_A6XX_GMU_ALWAYS_ON_COUNTER_L + 0x1a800 ,
190
+ get_stats_counter (ring , REG_A6XX_CP_ALWAYS_ON_COUNTER_LO ,
191
191
rbmemptr_stats (ring , index , alwayson_end ));
192
192
193
193
/* Write the fence to the scratch register */
@@ -206,8 +206,8 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
206
206
OUT_RING (ring , submit -> seqno );
207
207
208
208
trace_msm_gpu_submit_flush (submit ,
209
- gmu_read64 ( & a6xx_gpu -> gmu , REG_A6XX_GMU_ALWAYS_ON_COUNTER_L ,
210
- REG_A6XX_GMU_ALWAYS_ON_COUNTER_H ));
209
+ gpu_read64 ( gpu , REG_A6XX_CP_ALWAYS_ON_COUNTER_LO ,
210
+ REG_A6XX_CP_ALWAYS_ON_COUNTER_HI ));
211
211
212
212
a6xx_flush (gpu , ring );
213
213
}
@@ -462,6 +462,113 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
462
462
gpu_write (gpu , REG_A6XX_RBBM_CLOCK_CNTL , state ? clock_cntl_on : 0 );
463
463
}
464
464
465
+ /* For a615, a616, a618, A619, a630, a640 and a680 */
466
+ static const u32 a6xx_protect [] = {
467
+ A6XX_PROTECT_RDONLY (0x00000 , 0x04ff ),
468
+ A6XX_PROTECT_RDONLY (0x00501 , 0x0005 ),
469
+ A6XX_PROTECT_RDONLY (0x0050b , 0x02f4 ),
470
+ A6XX_PROTECT_NORDWR (0x0050e , 0x0000 ),
471
+ A6XX_PROTECT_NORDWR (0x00510 , 0x0000 ),
472
+ A6XX_PROTECT_NORDWR (0x00534 , 0x0000 ),
473
+ A6XX_PROTECT_NORDWR (0x00800 , 0x0082 ),
474
+ A6XX_PROTECT_NORDWR (0x008a0 , 0x0008 ),
475
+ A6XX_PROTECT_NORDWR (0x008ab , 0x0024 ),
476
+ A6XX_PROTECT_RDONLY (0x008de , 0x00ae ),
477
+ A6XX_PROTECT_NORDWR (0x00900 , 0x004d ),
478
+ A6XX_PROTECT_NORDWR (0x0098d , 0x0272 ),
479
+ A6XX_PROTECT_NORDWR (0x00e00 , 0x0001 ),
480
+ A6XX_PROTECT_NORDWR (0x00e03 , 0x000c ),
481
+ A6XX_PROTECT_NORDWR (0x03c00 , 0x00c3 ),
482
+ A6XX_PROTECT_RDONLY (0x03cc4 , 0x1fff ),
483
+ A6XX_PROTECT_NORDWR (0x08630 , 0x01cf ),
484
+ A6XX_PROTECT_NORDWR (0x08e00 , 0x0000 ),
485
+ A6XX_PROTECT_NORDWR (0x08e08 , 0x0000 ),
486
+ A6XX_PROTECT_NORDWR (0x08e50 , 0x001f ),
487
+ A6XX_PROTECT_NORDWR (0x09624 , 0x01db ),
488
+ A6XX_PROTECT_NORDWR (0x09e70 , 0x0001 ),
489
+ A6XX_PROTECT_NORDWR (0x09e78 , 0x0187 ),
490
+ A6XX_PROTECT_NORDWR (0x0a630 , 0x01cf ),
491
+ A6XX_PROTECT_NORDWR (0x0ae02 , 0x0000 ),
492
+ A6XX_PROTECT_NORDWR (0x0ae50 , 0x032f ),
493
+ A6XX_PROTECT_NORDWR (0x0b604 , 0x0000 ),
494
+ A6XX_PROTECT_NORDWR (0x0be02 , 0x0001 ),
495
+ A6XX_PROTECT_NORDWR (0x0be20 , 0x17df ),
496
+ A6XX_PROTECT_NORDWR (0x0f000 , 0x0bff ),
497
+ A6XX_PROTECT_RDONLY (0x0fc00 , 0x1fff ),
498
+ A6XX_PROTECT_NORDWR (0x11c00 , 0x0000 ), /* note: infinite range */
499
+ };
500
+
501
+ /* These are for a620 and a650 */
502
+ static const u32 a650_protect [] = {
503
+ A6XX_PROTECT_RDONLY (0x00000 , 0x04ff ),
504
+ A6XX_PROTECT_RDONLY (0x00501 , 0x0005 ),
505
+ A6XX_PROTECT_RDONLY (0x0050b , 0x02f4 ),
506
+ A6XX_PROTECT_NORDWR (0x0050e , 0x0000 ),
507
+ A6XX_PROTECT_NORDWR (0x00510 , 0x0000 ),
508
+ A6XX_PROTECT_NORDWR (0x00534 , 0x0000 ),
509
+ A6XX_PROTECT_NORDWR (0x00800 , 0x0082 ),
510
+ A6XX_PROTECT_NORDWR (0x008a0 , 0x0008 ),
511
+ A6XX_PROTECT_NORDWR (0x008ab , 0x0024 ),
512
+ A6XX_PROTECT_RDONLY (0x008de , 0x00ae ),
513
+ A6XX_PROTECT_NORDWR (0x00900 , 0x004d ),
514
+ A6XX_PROTECT_NORDWR (0x0098d , 0x0272 ),
515
+ A6XX_PROTECT_NORDWR (0x00e00 , 0x0001 ),
516
+ A6XX_PROTECT_NORDWR (0x00e03 , 0x000c ),
517
+ A6XX_PROTECT_NORDWR (0x03c00 , 0x00c3 ),
518
+ A6XX_PROTECT_RDONLY (0x03cc4 , 0x1fff ),
519
+ A6XX_PROTECT_NORDWR (0x08630 , 0x01cf ),
520
+ A6XX_PROTECT_NORDWR (0x08e00 , 0x0000 ),
521
+ A6XX_PROTECT_NORDWR (0x08e08 , 0x0000 ),
522
+ A6XX_PROTECT_NORDWR (0x08e50 , 0x001f ),
523
+ A6XX_PROTECT_NORDWR (0x08e80 , 0x027f ),
524
+ A6XX_PROTECT_NORDWR (0x09624 , 0x01db ),
525
+ A6XX_PROTECT_NORDWR (0x09e60 , 0x0011 ),
526
+ A6XX_PROTECT_NORDWR (0x09e78 , 0x0187 ),
527
+ A6XX_PROTECT_NORDWR (0x0a630 , 0x01cf ),
528
+ A6XX_PROTECT_NORDWR (0x0ae02 , 0x0000 ),
529
+ A6XX_PROTECT_NORDWR (0x0ae50 , 0x032f ),
530
+ A6XX_PROTECT_NORDWR (0x0b604 , 0x0000 ),
531
+ A6XX_PROTECT_NORDWR (0x0b608 , 0x0007 ),
532
+ A6XX_PROTECT_NORDWR (0x0be02 , 0x0001 ),
533
+ A6XX_PROTECT_NORDWR (0x0be20 , 0x17df ),
534
+ A6XX_PROTECT_NORDWR (0x0f000 , 0x0bff ),
535
+ A6XX_PROTECT_RDONLY (0x0fc00 , 0x1fff ),
536
+ A6XX_PROTECT_NORDWR (0x18400 , 0x1fff ),
537
+ A6XX_PROTECT_NORDWR (0x1a800 , 0x1fff ),
538
+ A6XX_PROTECT_NORDWR (0x1f400 , 0x0443 ),
539
+ A6XX_PROTECT_RDONLY (0x1f844 , 0x007b ),
540
+ A6XX_PROTECT_NORDWR (0x1f887 , 0x001b ),
541
+ A6XX_PROTECT_NORDWR (0x1f8c0 , 0x0000 ), /* note: infinite range */
542
+ };
543
+
544
+ static void a6xx_set_cp_protect (struct msm_gpu * gpu )
545
+ {
546
+ struct adreno_gpu * adreno_gpu = to_adreno_gpu (gpu );
547
+ const u32 * regs = a6xx_protect ;
548
+ unsigned i , count = ARRAY_SIZE (a6xx_protect ), count_max = 32 ;
549
+
550
+ BUILD_BUG_ON (ARRAY_SIZE (a6xx_protect ) > 32 );
551
+ BUILD_BUG_ON (ARRAY_SIZE (a650_protect ) > 48 );
552
+
553
+ if (adreno_is_a650 (adreno_gpu )) {
554
+ regs = a650_protect ;
555
+ count = ARRAY_SIZE (a650_protect );
556
+ count_max = 48 ;
557
+ }
558
+
559
+ /*
560
+ * Enable access protection to privileged registers, fault on an access
561
+ * protect violation and select the last span to protect from the start
562
+ * address all the way to the end of the register address space
563
+ */
564
+ gpu_write (gpu , REG_A6XX_CP_PROTECT_CNTL , BIT (0 ) | BIT (1 ) | BIT (3 ));
565
+
566
+ for (i = 0 ; i < count - 1 ; i ++ )
567
+ gpu_write (gpu , REG_A6XX_CP_PROTECT (i ), regs [i ]);
568
+ /* last CP_PROTECT to have "infinite" length on the last entry */
569
+ gpu_write (gpu , REG_A6XX_CP_PROTECT (count_max - 1 ), regs [i ]);
570
+ }
571
+
465
572
static void a6xx_set_ubwc_config (struct msm_gpu * gpu )
466
573
{
467
574
struct adreno_gpu * adreno_gpu = to_adreno_gpu (gpu );
@@ -489,7 +596,7 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
489
596
rgb565_predicator << 11 | amsbc << 4 | lower_bit << 1 );
490
597
gpu_write (gpu , REG_A6XX_TPL1_NC_MODE_CNTL , lower_bit << 1 );
491
598
gpu_write (gpu , REG_A6XX_SP_NC_MODE_CNTL ,
492
- uavflagprd_inv >> 4 | lower_bit << 1 );
599
+ uavflagprd_inv << 4 | lower_bit << 1 );
493
600
gpu_write (gpu , REG_A6XX_UCHE_MODE_CNTL , lower_bit << 21 );
494
601
}
495
602
@@ -776,41 +883,7 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
776
883
}
777
884
778
885
/* Protect registers from the CP */
779
- gpu_write (gpu , REG_A6XX_CP_PROTECT_CNTL , 0x00000003 );
780
-
781
- gpu_write (gpu , REG_A6XX_CP_PROTECT (0 ),
782
- A6XX_PROTECT_RDONLY (0x600 , 0x51 ));
783
- gpu_write (gpu , REG_A6XX_CP_PROTECT (1 ), A6XX_PROTECT_RW (0xae50 , 0x2 ));
784
- gpu_write (gpu , REG_A6XX_CP_PROTECT (2 ), A6XX_PROTECT_RW (0x9624 , 0x13 ));
785
- gpu_write (gpu , REG_A6XX_CP_PROTECT (3 ), A6XX_PROTECT_RW (0x8630 , 0x8 ));
786
- gpu_write (gpu , REG_A6XX_CP_PROTECT (4 ), A6XX_PROTECT_RW (0x9e70 , 0x1 ));
787
- gpu_write (gpu , REG_A6XX_CP_PROTECT (5 ), A6XX_PROTECT_RW (0x9e78 , 0x187 ));
788
- gpu_write (gpu , REG_A6XX_CP_PROTECT (6 ), A6XX_PROTECT_RW (0xf000 , 0x810 ));
789
- gpu_write (gpu , REG_A6XX_CP_PROTECT (7 ),
790
- A6XX_PROTECT_RDONLY (0xfc00 , 0x3 ));
791
- gpu_write (gpu , REG_A6XX_CP_PROTECT (8 ), A6XX_PROTECT_RW (0x50e , 0x0 ));
792
- gpu_write (gpu , REG_A6XX_CP_PROTECT (9 ), A6XX_PROTECT_RDONLY (0x50f , 0x0 ));
793
- gpu_write (gpu , REG_A6XX_CP_PROTECT (10 ), A6XX_PROTECT_RW (0x510 , 0x0 ));
794
- gpu_write (gpu , REG_A6XX_CP_PROTECT (11 ),
795
- A6XX_PROTECT_RDONLY (0x0 , 0x4f9 ));
796
- gpu_write (gpu , REG_A6XX_CP_PROTECT (12 ),
797
- A6XX_PROTECT_RDONLY (0x501 , 0xa ));
798
- gpu_write (gpu , REG_A6XX_CP_PROTECT (13 ),
799
- A6XX_PROTECT_RDONLY (0x511 , 0x44 ));
800
- gpu_write (gpu , REG_A6XX_CP_PROTECT (14 ), A6XX_PROTECT_RW (0xe00 , 0xe ));
801
- gpu_write (gpu , REG_A6XX_CP_PROTECT (15 ), A6XX_PROTECT_RW (0x8e00 , 0x0 ));
802
- gpu_write (gpu , REG_A6XX_CP_PROTECT (16 ), A6XX_PROTECT_RW (0x8e50 , 0xf ));
803
- gpu_write (gpu , REG_A6XX_CP_PROTECT (17 ), A6XX_PROTECT_RW (0xbe02 , 0x0 ));
804
- gpu_write (gpu , REG_A6XX_CP_PROTECT (18 ),
805
- A6XX_PROTECT_RW (0xbe20 , 0x11f3 ));
806
- gpu_write (gpu , REG_A6XX_CP_PROTECT (19 ), A6XX_PROTECT_RW (0x800 , 0x82 ));
807
- gpu_write (gpu , REG_A6XX_CP_PROTECT (20 ), A6XX_PROTECT_RW (0x8a0 , 0x8 ));
808
- gpu_write (gpu , REG_A6XX_CP_PROTECT (21 ), A6XX_PROTECT_RW (0x8ab , 0x19 ));
809
- gpu_write (gpu , REG_A6XX_CP_PROTECT (22 ), A6XX_PROTECT_RW (0x900 , 0x4d ));
810
- gpu_write (gpu , REG_A6XX_CP_PROTECT (23 ), A6XX_PROTECT_RW (0x98d , 0x76 ));
811
- gpu_write (gpu , REG_A6XX_CP_PROTECT (24 ),
812
- A6XX_PROTECT_RDONLY (0x980 , 0x4 ));
813
- gpu_write (gpu , REG_A6XX_CP_PROTECT (25 ), A6XX_PROTECT_RW (0xa630 , 0x0 ));
886
+ a6xx_set_cp_protect (gpu );
814
887
815
888
/* Enable expanded apriv for targets that support it */
816
889
if (gpu -> hw_apriv ) {
@@ -1211,7 +1284,7 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
1211
1284
if (ret )
1212
1285
return ret ;
1213
1286
1214
- if (adreno_gpu -> base . hw_apriv || a6xx_gpu -> has_whereami )
1287
+ if (a6xx_gpu -> shadow_bo )
1215
1288
for (i = 0 ; i < gpu -> nr_rings ; i ++ )
1216
1289
a6xx_gpu -> shadow [i ] = 0 ;
1217
1290
0 commit comments