@@ -274,9 +274,7 @@ static int ufs_qcom_check_hibern8(struct ufs_hba *hba)
274
274
275
275
static void ufs_qcom_select_unipro_mode (struct ufs_qcom_host * host )
276
276
{
277
- ufshcd_rmwl (host -> hba , QUNIPRO_SEL ,
278
- ufs_qcom_cap_qunipro (host ) ? QUNIPRO_SEL : 0 ,
279
- REG_UFS_CFG1 );
277
+ ufshcd_rmwl (host -> hba , QUNIPRO_SEL , QUNIPRO_SEL , REG_UFS_CFG1 );
280
278
281
279
if (host -> hw_ver .major >= 0x05 )
282
280
ufshcd_rmwl (host -> hba , QUNIPRO_G4_SEL , 0 , REG_UFS_CFG0 );
@@ -333,18 +331,8 @@ static u32 ufs_qcom_get_hs_gear(struct ufs_hba *hba)
333
331
{
334
332
struct ufs_qcom_host * host = ufshcd_get_variant (hba );
335
333
336
- if (host -> hw_ver .major == 0x1 ) {
337
- /*
338
- * HS-G3 operations may not reliably work on legacy QCOM
339
- * UFS host controller hardware even though capability
340
- * exchange during link startup phase may end up
341
- * negotiating maximum supported gear as G3.
342
- * Hence downgrade the maximum supported gear to HS-G2.
343
- */
344
- return UFS_HS_G2 ;
345
- } else if (host -> hw_ver .major >= 0x4 ) {
334
+ if (host -> hw_ver .major >= 0x4 )
346
335
return UFS_QCOM_MAX_GEAR (ufshcd_readl (hba , REG_UFS_PARAM0 ));
347
- }
348
336
349
337
/* Default is HS-G3 */
350
338
return UFS_HS_G3 ;
@@ -457,41 +445,16 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
457
445
{
458
446
struct ufs_qcom_host * host = ufshcd_get_variant (hba );
459
447
struct ufs_clk_info * clki ;
460
- u32 core_clk_period_in_ns ;
461
- u32 tx_clk_cycles_per_us = 0 ;
462
448
unsigned long core_clk_rate = 0 ;
463
449
u32 core_clk_cycles_per_us ;
464
450
465
- static u32 pwm_fr_table [][2 ] = {
466
- {UFS_PWM_G1 , 0x1 },
467
- {UFS_PWM_G2 , 0x1 },
468
- {UFS_PWM_G3 , 0x1 },
469
- {UFS_PWM_G4 , 0x1 },
470
- };
471
-
472
- static u32 hs_fr_table_rA [][2 ] = {
473
- {UFS_HS_G1 , 0x1F },
474
- {UFS_HS_G2 , 0x3e },
475
- {UFS_HS_G3 , 0x7D },
476
- };
477
-
478
- static u32 hs_fr_table_rB [][2 ] = {
479
- {UFS_HS_G1 , 0x24 },
480
- {UFS_HS_G2 , 0x49 },
481
- {UFS_HS_G3 , 0x92 },
482
- };
483
-
484
451
/*
485
- * The Qunipro controller does not use following registers:
486
- * SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
487
- * UFS_REG_PA_LINK_STARTUP_TIMER.
488
- * However UTP controller uses SYS1CLK_1US_REG register for Interrupt
452
+ * UTP controller uses SYS1CLK_1US_REG register for Interrupt
489
453
* Aggregation logic.
490
454
* It is mandatory to write SYS1CLK_1US_REG register on UFS host
491
455
* controller V4.0.0 onwards.
492
456
*/
493
- if (host -> hw_ver .major < 4 && ufs_qcom_cap_qunipro (host ) &&
494
- !ufshcd_is_intr_aggr_allowed (hba ))
457
+ if (host -> hw_ver .major < 4 && !ufshcd_is_intr_aggr_allowed (hba ))
495
458
return 0 ;
496
459
497
460
if (gear == 0 ) {
@@ -524,87 +487,13 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
524
487
mb ();
525
488
}
526
489
527
- if (ufs_qcom_cap_qunipro (host ))
528
- return 0 ;
529
-
530
- core_clk_period_in_ns = NSEC_PER_SEC / core_clk_rate ;
531
- core_clk_period_in_ns <<= OFFSET_CLK_NS_REG ;
532
- core_clk_period_in_ns &= MASK_CLK_NS_REG ;
533
-
534
- switch (hs ) {
535
- case FASTAUTO_MODE :
536
- case FAST_MODE :
537
- if (rate == PA_HS_MODE_A ) {
538
- if (gear > ARRAY_SIZE (hs_fr_table_rA )) {
539
- dev_err (hba -> dev ,
540
- "%s: index %d exceeds table size %zu\n" ,
541
- __func__ , gear ,
542
- ARRAY_SIZE (hs_fr_table_rA ));
543
- return - EINVAL ;
544
- }
545
- tx_clk_cycles_per_us = hs_fr_table_rA [gear - 1 ][1 ];
546
- } else if (rate == PA_HS_MODE_B ) {
547
- if (gear > ARRAY_SIZE (hs_fr_table_rB )) {
548
- dev_err (hba -> dev ,
549
- "%s: index %d exceeds table size %zu\n" ,
550
- __func__ , gear ,
551
- ARRAY_SIZE (hs_fr_table_rB ));
552
- return - EINVAL ;
553
- }
554
- tx_clk_cycles_per_us = hs_fr_table_rB [gear - 1 ][1 ];
555
- } else {
556
- dev_err (hba -> dev , "%s: invalid rate = %d\n" ,
557
- __func__ , rate );
558
- return - EINVAL ;
559
- }
560
- break ;
561
- case SLOWAUTO_MODE :
562
- case SLOW_MODE :
563
- if (gear > ARRAY_SIZE (pwm_fr_table )) {
564
- dev_err (hba -> dev ,
565
- "%s: index %d exceeds table size %zu\n" ,
566
- __func__ , gear ,
567
- ARRAY_SIZE (pwm_fr_table ));
568
- return - EINVAL ;
569
- }
570
- tx_clk_cycles_per_us = pwm_fr_table [gear - 1 ][1 ];
571
- break ;
572
- case UNCHANGED :
573
- default :
574
- dev_err (hba -> dev , "%s: invalid mode = %d\n" , __func__ , hs );
575
- return - EINVAL ;
576
- }
577
-
578
- if (ufshcd_readl (hba , REG_UFS_TX_SYMBOL_CLK_NS_US ) !=
579
- (core_clk_period_in_ns | tx_clk_cycles_per_us )) {
580
- /* this register 2 fields shall be written at once */
581
- ufshcd_writel (hba , core_clk_period_in_ns | tx_clk_cycles_per_us ,
582
- REG_UFS_TX_SYMBOL_CLK_NS_US );
583
- /*
584
- * make sure above write gets applied before we return from
585
- * this function.
586
- */
587
- mb ();
588
- }
589
-
590
- if (update_link_startup_timer && host -> hw_ver .major != 0x5 ) {
591
- ufshcd_writel (hba , ((core_clk_rate / MSEC_PER_SEC ) * 100 ),
592
- REG_UFS_CFG0 );
593
- /*
594
- * make sure that this configuration is applied before
595
- * we return
596
- */
597
- mb ();
598
- }
599
-
600
490
return 0 ;
601
491
}
602
492
603
493
static int ufs_qcom_link_startup_notify (struct ufs_hba * hba ,
604
494
enum ufs_notify_change_status status )
605
495
{
606
496
int err = 0 ;
607
- struct ufs_qcom_host * host = ufshcd_get_variant (hba );
608
497
609
498
switch (status ) {
610
499
case PRE_CHANGE :
@@ -615,11 +504,9 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
615
504
return - EINVAL ;
616
505
}
617
506
618
- if (ufs_qcom_cap_qunipro (host )) {
619
- err = ufs_qcom_set_core_clk_ctrl (hba , true);
620
- if (err )
621
- dev_err (hba -> dev , "cfg core clk ctrl failed\n" );
622
- }
507
+ err = ufs_qcom_set_core_clk_ctrl (hba , true);
508
+ if (err )
509
+ dev_err (hba -> dev , "cfg core clk ctrl failed\n" );
623
510
/*
624
511
* Some UFS devices (and may be host) have issues if LCC is
625
512
* enabled. So we are setting PA_Local_TX_LCC_Enable to 0
@@ -918,12 +805,7 @@ static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
918
805
919
806
static u32 ufs_qcom_get_ufs_hci_version (struct ufs_hba * hba )
920
807
{
921
- struct ufs_qcom_host * host = ufshcd_get_variant (hba );
922
-
923
- if (host -> hw_ver .major == 0x1 )
924
- return ufshci_version (1 , 1 );
925
- else
926
- return ufshci_version (2 , 0 );
808
+ return ufshci_version (2 , 0 );
927
809
}
928
810
929
811
/**
@@ -939,46 +821,21 @@ static void ufs_qcom_advertise_quirks(struct ufs_hba *hba)
939
821
{
940
822
struct ufs_qcom_host * host = ufshcd_get_variant (hba );
941
823
942
- if (host -> hw_ver .major == 0x01 ) {
943
- hba -> quirks |= UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
944
- | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP
945
- | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE ;
946
-
947
- if (host -> hw_ver .minor == 0x0001 && host -> hw_ver .step == 0x0001 )
948
- hba -> quirks |= UFSHCD_QUIRK_BROKEN_INTR_AGGR ;
949
-
950
- hba -> quirks |= UFSHCD_QUIRK_BROKEN_LCC ;
951
- }
952
-
953
- if (host -> hw_ver .major == 0x2 ) {
824
+ if (host -> hw_ver .major == 0x2 )
954
825
hba -> quirks |= UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION ;
955
826
956
- if (!ufs_qcom_cap_qunipro (host ))
957
- /* Legacy UniPro mode still need following quirks */
958
- hba -> quirks |= (UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS
959
- | UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE
960
- | UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP );
961
- }
962
-
963
827
if (host -> hw_ver .major > 0x3 )
964
828
hba -> quirks |= UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH ;
965
829
}
966
830
967
831
static void ufs_qcom_set_caps (struct ufs_hba * hba )
968
832
{
969
- struct ufs_qcom_host * host = ufshcd_get_variant (hba );
970
-
971
833
hba -> caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING ;
972
834
hba -> caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING ;
973
835
hba -> caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND ;
974
836
hba -> caps |= UFSHCD_CAP_WB_EN ;
975
837
hba -> caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE ;
976
838
hba -> caps |= UFSHCD_CAP_RPM_AUTOSUSPEND ;
977
-
978
- if (host -> hw_ver .major >= 0x2 ) {
979
- host -> caps = UFS_QCOM_CAP_QUNIPRO |
980
- UFS_QCOM_CAP_RETAIN_SEC_CFG_AFTER_PWR_COLLAPSE ;
981
- }
982
839
}
983
840
984
841
/**
@@ -1101,9 +958,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
1101
958
{
1102
959
int err ;
1103
960
struct device * dev = hba -> dev ;
1104
- struct platform_device * pdev = to_platform_device (dev );
1105
961
struct ufs_qcom_host * host ;
1106
- struct resource * res ;
1107
962
struct ufs_clk_info * clki ;
1108
963
1109
964
host = devm_kzalloc (dev , sizeof (* host ), GFP_KERNEL );
@@ -1154,25 +1009,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
1154
1009
ufs_qcom_get_controller_revision (hba , & host -> hw_ver .major ,
1155
1010
& host -> hw_ver .minor , & host -> hw_ver .step );
1156
1011
1157
- /*
1158
- * for newer controllers, device reference clock control bit has
1159
- * moved inside UFS controller register address space itself.
1160
- */
1161
- if (host -> hw_ver .major >= 0x02 ) {
1162
- host -> dev_ref_clk_ctrl_mmio = hba -> mmio_base + REG_UFS_CFG1 ;
1163
- host -> dev_ref_clk_en_mask = BIT (26 );
1164
- } else {
1165
- /* "dev_ref_clk_ctrl_mem" is optional resource */
1166
- res = platform_get_resource_byname (pdev , IORESOURCE_MEM ,
1167
- "dev_ref_clk_ctrl_mem" );
1168
- if (res ) {
1169
- host -> dev_ref_clk_ctrl_mmio =
1170
- devm_ioremap_resource (dev , res );
1171
- if (IS_ERR (host -> dev_ref_clk_ctrl_mmio ))
1172
- host -> dev_ref_clk_ctrl_mmio = NULL ;
1173
- host -> dev_ref_clk_en_mask = BIT (5 );
1174
- }
1175
- }
1012
+ host -> dev_ref_clk_ctrl_mmio = hba -> mmio_base + REG_UFS_CFG1 ;
1013
+ host -> dev_ref_clk_en_mask = BIT (26 );
1176
1014
1177
1015
list_for_each_entry (clki , & hba -> clk_list_head , list ) {
1178
1016
if (!strcmp (clki -> name , "core_clk_unipro" ))
@@ -1351,9 +1189,6 @@ static int ufs_qcom_clk_scale_up_pre_change(struct ufs_hba *hba)
1351
1189
struct ufs_pa_layer_attr * attr = & host -> dev_req_params ;
1352
1190
int ret ;
1353
1191
1354
- if (!ufs_qcom_cap_qunipro (host ))
1355
- return 0 ;
1356
-
1357
1192
ret = ufs_qcom_cfg_timers (hba , attr -> gear_rx , attr -> pwr_rx ,
1358
1193
attr -> hs_rate , false, true);
1359
1194
if (ret ) {
@@ -1371,13 +1206,9 @@ static int ufs_qcom_clk_scale_up_post_change(struct ufs_hba *hba)
1371
1206
1372
1207
static int ufs_qcom_clk_scale_down_pre_change (struct ufs_hba * hba )
1373
1208
{
1374
- struct ufs_qcom_host * host = ufshcd_get_variant (hba );
1375
1209
int err ;
1376
1210
u32 core_clk_ctrl_reg ;
1377
1211
1378
- if (!ufs_qcom_cap_qunipro (host ))
1379
- return 0 ;
1380
-
1381
1212
err = ufshcd_dme_get (hba ,
1382
1213
UIC_ARG_MIB (DME_VS_CORE_CLK_CTRL ),
1383
1214
& core_clk_ctrl_reg );
@@ -1396,11 +1227,6 @@ static int ufs_qcom_clk_scale_down_pre_change(struct ufs_hba *hba)
1396
1227
1397
1228
static int ufs_qcom_clk_scale_down_post_change (struct ufs_hba * hba )
1398
1229
{
1399
- struct ufs_qcom_host * host = ufshcd_get_variant (hba );
1400
-
1401
- if (!ufs_qcom_cap_qunipro (host ))
1402
- return 0 ;
1403
-
1404
1230
/* set unipro core clock attributes and clear clock divider */
1405
1231
return ufs_qcom_set_core_clk_ctrl (hba , false);
1406
1232
}
0 commit comments