@@ -93,8 +93,7 @@ static const struct __ufs_qcom_bw_table {
93
93
static struct ufs_qcom_host * ufs_qcom_hosts [MAX_UFS_QCOM_HOSTS ];
94
94
95
95
static void ufs_qcom_get_default_testbus_cfg (struct ufs_qcom_host * host );
96
- static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div (struct ufs_hba * hba ,
97
- u32 clk_cycles );
96
+ static int ufs_qcom_set_core_clk_ctrl (struct ufs_hba * hba , bool is_scale_up );
98
97
99
98
static struct ufs_qcom_host * rcdev_to_ufs_host (struct reset_controller_dev * rcd )
100
99
{
@@ -460,7 +459,7 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
460
459
return ret ;
461
460
}
462
461
463
- phy_set_mode_ext (phy , PHY_MODE_UFS_HS_B , host -> hs_gear );
462
+ phy_set_mode_ext (phy , PHY_MODE_UFS_HS_B , host -> phy_gear );
464
463
465
464
/* power on phy - start serdes and phy's power and clocks */
466
465
ret = phy_power_on (phy );
@@ -528,11 +527,20 @@ static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
528
527
return err ;
529
528
}
530
529
531
- /*
530
+ /**
531
+ * ufs_qcom_cfg_timers - Configure ufs qcom cfg timers
532
+ *
533
+ * @hba: host controller instance
534
+ * @gear: Current operating gear
535
+ * @hs: current power mode
536
+ * @rate: current operating rate (A or B)
537
+ * @update_link_startup_timer: indicate if link_start ongoing
538
+ * @is_pre_scale_up: flag to check if pre scale up condition.
532
539
* Return: zero for success and non-zero in case of a failure.
533
540
*/
534
541
static int ufs_qcom_cfg_timers (struct ufs_hba * hba , u32 gear ,
535
- u32 hs , u32 rate , bool update_link_startup_timer )
542
+ u32 hs , u32 rate , bool update_link_startup_timer ,
543
+ bool is_pre_scale_up )
536
544
{
537
545
struct ufs_qcom_host * host = ufshcd_get_variant (hba );
538
546
struct ufs_clk_info * clki ;
@@ -563,11 +571,14 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
563
571
/*
564
572
* The Qunipro controller does not use following registers:
565
573
* SYS1CLK_1US_REG, TX_SYMBOL_CLK_1US_REG, CLK_NS_REG &
566
- * UFS_REG_PA_LINK_STARTUP_TIMER
567
- * But UTP controller uses SYS1CLK_1US_REG register for Interrupt
574
+ * UFS_REG_PA_LINK_STARTUP_TIMER.
575
+ * However UTP controller uses SYS1CLK_1US_REG register for Interrupt
568
576
* Aggregation logic.
569
- */
570
- if (ufs_qcom_cap_qunipro (host ) && !ufshcd_is_intr_aggr_allowed (hba ))
577
+ * It is mandatory to write SYS1CLK_1US_REG register on UFS host
578
+ * controller V4.0.0 onwards.
579
+ */
580
+ if (host -> hw_ver .major < 4 && ufs_qcom_cap_qunipro (host ) &&
581
+ !ufshcd_is_intr_aggr_allowed (hba ))
571
582
return 0 ;
572
583
573
584
if (gear == 0 ) {
@@ -576,8 +587,14 @@ static int ufs_qcom_cfg_timers(struct ufs_hba *hba, u32 gear,
576
587
}
577
588
578
589
list_for_each_entry (clki , & hba -> clk_list_head , list ) {
579
- if (!strcmp (clki -> name , "core_clk" ))
580
- core_clk_rate = clk_get_rate (clki -> clk );
590
+ if (!strcmp (clki -> name , "core_clk" )) {
591
+ if (is_pre_scale_up )
592
+ core_clk_rate = clki -> max_freq ;
593
+ else
594
+ core_clk_rate = clk_get_rate (clki -> clk );
595
+ break ;
596
+ }
597
+
581
598
}
582
599
583
600
/* If frequency is smaller than 1MHz, set to 1MHz */
@@ -679,20 +696,17 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
679
696
switch (status ) {
680
697
case PRE_CHANGE :
681
698
if (ufs_qcom_cfg_timers (hba , UFS_PWM_G1 , SLOWAUTO_MODE ,
682
- 0 , true)) {
699
+ 0 , true, false )) {
683
700
dev_err (hba -> dev , "%s: ufs_qcom_cfg_timers() failed\n" ,
684
701
__func__ );
685
702
return - EINVAL ;
686
703
}
687
704
688
- if (ufs_qcom_cap_qunipro (host ))
689
- /*
690
- * set unipro core clock cycles to 150 & clear clock
691
- * divider
692
- */
693
- err = ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div (hba ,
694
- 150 );
695
-
705
+ if (ufs_qcom_cap_qunipro (host )) {
706
+ err = ufs_qcom_set_core_clk_ctrl (hba , true);
707
+ if (err )
708
+ dev_err (hba -> dev , "cfg core clk ctrl failed\n" );
709
+ }
696
710
/*
697
711
* Some UFS devices (and may be host) have issues if LCC is
698
712
* enabled. So we are setting PA_Local_TX_LCC_Enable to 0
@@ -909,8 +923,13 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
909
923
return ret ;
910
924
}
911
925
912
- /* Use the agreed gear */
913
- host -> hs_gear = dev_req_params -> gear_tx ;
926
+ /*
927
+ * Update phy_gear only when the gears are scaled to a higher value. This is
928
+ * because, the PHY gear settings are backwards compatible and we only need to
929
+ * change the PHY gear settings while scaling to higher gears.
930
+ */
931
+ if (dev_req_params -> gear_tx > host -> phy_gear )
932
+ host -> phy_gear = dev_req_params -> gear_tx ;
914
933
915
934
/* enable the device ref clock before changing to HS mode */
916
935
if (!ufshcd_is_hs_mode (& hba -> pwr_info ) &&
@@ -926,7 +945,7 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
926
945
case POST_CHANGE :
927
946
if (ufs_qcom_cfg_timers (hba , dev_req_params -> gear_rx ,
928
947
dev_req_params -> pwr_rx ,
929
- dev_req_params -> hs_rate , false)) {
948
+ dev_req_params -> hs_rate , false, false )) {
930
949
dev_err (hba -> dev , "%s: ufs_qcom_cfg_timers() failed\n" ,
931
950
__func__ );
932
951
/*
@@ -1277,7 +1296,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
1277
1296
* Power up the PHY using the minimum supported gear (UFS_HS_G2).
1278
1297
* Switching to max gear will be performed during reinit if supported.
1279
1298
*/
1280
- host -> hs_gear = UFS_HS_G2 ;
1299
+ host -> phy_gear = UFS_HS_G2 ;
1281
1300
1282
1301
return 0 ;
1283
1302
@@ -1296,47 +1315,155 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
1296
1315
phy_exit (host -> generic_phy );
1297
1316
}
1298
1317
1299
- static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div (struct ufs_hba * hba ,
1300
- u32 clk_cycles )
1318
+ /**
1319
+ * ufs_qcom_set_clk_40ns_cycles - Configure 40ns clk cycles
1320
+ *
1321
+ * @hba: host controller instance
1322
+ * @cycles_in_1us: No of cycles in 1us to be configured
1323
+ *
1324
+ * Returns error if dme get/set configuration for 40ns fails
1325
+ * and returns zero on success.
1326
+ */
1327
+ static int ufs_qcom_set_clk_40ns_cycles (struct ufs_hba * hba ,
1328
+ u32 cycles_in_1us )
1301
1329
{
1330
+ struct ufs_qcom_host * host = ufshcd_get_variant (hba );
1331
+ u32 cycles_in_40ns ;
1332
+ u32 reg ;
1302
1333
int err ;
1303
- u32 core_clk_ctrl_reg ;
1304
1334
1305
- if (clk_cycles > DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK )
1335
+ /*
1336
+ * UFS host controller V4.0.0 onwards needs to program
1337
+ * PA_VS_CORE_CLK_40NS_CYCLES attribute per programmed
1338
+ * frequency of unipro core clk of UFS host controller.
1339
+ */
1340
+ if (host -> hw_ver .major < 4 )
1341
+ return 0 ;
1342
+
1343
+ /*
1344
+ * Generic formulae for cycles_in_40ns = (freq_unipro/25) is not
1345
+ * applicable for all frequencies. For ex: ceil(37.5 MHz/25) will
1346
+ * be 2 and ceil(403 MHZ/25) will be 17 whereas Hardware
1347
+ * specification expect to be 16. Hence use exact hardware spec
1348
+ * mandated value for cycles_in_40ns instead of calculating using
1349
+ * generic formulae.
1350
+ */
1351
+ switch (cycles_in_1us ) {
1352
+ case UNIPRO_CORE_CLK_FREQ_403_MHZ :
1353
+ cycles_in_40ns = 16 ;
1354
+ break ;
1355
+ case UNIPRO_CORE_CLK_FREQ_300_MHZ :
1356
+ cycles_in_40ns = 12 ;
1357
+ break ;
1358
+ case UNIPRO_CORE_CLK_FREQ_201_5_MHZ :
1359
+ cycles_in_40ns = 8 ;
1360
+ break ;
1361
+ case UNIPRO_CORE_CLK_FREQ_150_MHZ :
1362
+ cycles_in_40ns = 6 ;
1363
+ break ;
1364
+ case UNIPRO_CORE_CLK_FREQ_100_MHZ :
1365
+ cycles_in_40ns = 4 ;
1366
+ break ;
1367
+ case UNIPRO_CORE_CLK_FREQ_75_MHZ :
1368
+ cycles_in_40ns = 3 ;
1369
+ break ;
1370
+ case UNIPRO_CORE_CLK_FREQ_37_5_MHZ :
1371
+ cycles_in_40ns = 2 ;
1372
+ break ;
1373
+ default :
1374
+ dev_err (hba -> dev , "UNIPRO clk freq %u MHz not supported\n" ,
1375
+ cycles_in_1us );
1306
1376
return - EINVAL ;
1377
+ }
1378
+
1379
+ err = ufshcd_dme_get (hba , UIC_ARG_MIB (PA_VS_CORE_CLK_40NS_CYCLES ), & reg );
1380
+ if (err )
1381
+ return err ;
1382
+
1383
+ reg &= ~PA_VS_CORE_CLK_40NS_CYCLES_MASK ;
1384
+ reg |= cycles_in_40ns ;
1385
+
1386
+ return ufshcd_dme_set (hba , UIC_ARG_MIB (PA_VS_CORE_CLK_40NS_CYCLES ), reg );
1387
+ }
1388
+
1389
+ static int ufs_qcom_set_core_clk_ctrl (struct ufs_hba * hba , bool is_scale_up )
1390
+ {
1391
+ struct ufs_qcom_host * host = ufshcd_get_variant (hba );
1392
+ struct list_head * head = & hba -> clk_list_head ;
1393
+ struct ufs_clk_info * clki ;
1394
+ u32 cycles_in_1us ;
1395
+ u32 core_clk_ctrl_reg ;
1396
+ int err ;
1397
+
1398
+ list_for_each_entry (clki , head , list ) {
1399
+ if (!IS_ERR_OR_NULL (clki -> clk ) &&
1400
+ !strcmp (clki -> name , "core_clk_unipro" )) {
1401
+ if (is_scale_up )
1402
+ cycles_in_1us = ceil (clki -> max_freq , (1000 * 1000 ));
1403
+ else
1404
+ cycles_in_1us = ceil (clk_get_rate (clki -> clk ), (1000 * 1000 ));
1405
+ break ;
1406
+ }
1407
+ }
1307
1408
1308
1409
err = ufshcd_dme_get (hba ,
1309
1410
UIC_ARG_MIB (DME_VS_CORE_CLK_CTRL ),
1310
1411
& core_clk_ctrl_reg );
1311
1412
if (err )
1312
1413
return err ;
1313
1414
1314
- core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_MAX_CORE_CLK_1US_CYCLES_MASK ;
1315
- core_clk_ctrl_reg |= clk_cycles ;
1415
+ /* Bit mask is different for UFS host controller V4.0.0 onwards */
1416
+ if (host -> hw_ver .major >= 4 ) {
1417
+ if (!FIELD_FIT (CLK_1US_CYCLES_MASK_V4 , cycles_in_1us ))
1418
+ return - ERANGE ;
1419
+ core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK_V4 ;
1420
+ core_clk_ctrl_reg |= FIELD_PREP (CLK_1US_CYCLES_MASK_V4 , cycles_in_1us );
1421
+ } else {
1422
+ if (!FIELD_FIT (CLK_1US_CYCLES_MASK , cycles_in_1us ))
1423
+ return - ERANGE ;
1424
+ core_clk_ctrl_reg &= ~CLK_1US_CYCLES_MASK ;
1425
+ core_clk_ctrl_reg |= FIELD_PREP (CLK_1US_CYCLES_MASK , cycles_in_1us );
1426
+ }
1316
1427
1317
1428
/* Clear CORE_CLK_DIV_EN */
1318
1429
core_clk_ctrl_reg &= ~DME_VS_CORE_CLK_CTRL_CORE_CLK_DIV_EN_BIT ;
1319
1430
1320
- return ufshcd_dme_set (hba ,
1431
+ err = ufshcd_dme_set (hba ,
1321
1432
UIC_ARG_MIB (DME_VS_CORE_CLK_CTRL ),
1322
1433
core_clk_ctrl_reg );
1323
- }
1434
+ if (err )
1435
+ return err ;
1324
1436
1325
- static int ufs_qcom_clk_scale_up_pre_change (struct ufs_hba * hba )
1326
- {
1327
- /* nothing to do as of now */
1328
- return 0 ;
1437
+ /* Configure unipro core clk 40ns attribute */
1438
+ return ufs_qcom_set_clk_40ns_cycles (hba , cycles_in_1us );
1329
1439
}
1330
1440
1331
- static int ufs_qcom_clk_scale_up_post_change (struct ufs_hba * hba )
1441
+ static int ufs_qcom_clk_scale_up_pre_change (struct ufs_hba * hba )
1332
1442
{
1333
1443
struct ufs_qcom_host * host = ufshcd_get_variant (hba );
1444
+ struct ufs_pa_layer_attr * attr = & host -> dev_req_params ;
1445
+ int ret ;
1334
1446
1335
1447
if (!ufs_qcom_cap_qunipro (host ))
1336
1448
return 0 ;
1337
1449
1338
- /* set unipro core clock cycles to 150 and clear clock divider */
1339
- return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div (hba , 150 );
1450
+ if (attr ) {
1451
+ ret = ufs_qcom_cfg_timers (hba , attr -> gear_rx ,
1452
+ attr -> pwr_rx , attr -> hs_rate ,
1453
+ false, true);
1454
+ if (ret ) {
1455
+ dev_err (hba -> dev , "%s ufs cfg timer failed\n" ,
1456
+ __func__ );
1457
+ return ret ;
1458
+ }
1459
+ }
1460
+ /* set unipro core clock attributes and clear clock divider */
1461
+ return ufs_qcom_set_core_clk_ctrl (hba , true);
1462
+ }
1463
+
1464
+ static int ufs_qcom_clk_scale_up_post_change (struct ufs_hba * hba )
1465
+ {
1466
+ return 0 ;
1340
1467
}
1341
1468
1342
1469
static int ufs_qcom_clk_scale_down_pre_change (struct ufs_hba * hba )
@@ -1371,15 +1498,14 @@ static int ufs_qcom_clk_scale_down_post_change(struct ufs_hba *hba)
1371
1498
if (!ufs_qcom_cap_qunipro (host ))
1372
1499
return 0 ;
1373
1500
1374
- /* set unipro core clock cycles to 75 and clear clock divider */
1375
- return ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div (hba , 75 );
1501
+ /* set unipro core clock attributes and clear clock divider */
1502
+ return ufs_qcom_set_core_clk_ctrl (hba , false );
1376
1503
}
1377
1504
1378
1505
static int ufs_qcom_clk_scale_notify (struct ufs_hba * hba ,
1379
1506
bool scale_up , enum ufs_notify_change_status status )
1380
1507
{
1381
1508
struct ufs_qcom_host * host = ufshcd_get_variant (hba );
1382
- struct ufs_pa_layer_attr * dev_req_params = & host -> dev_req_params ;
1383
1509
int err = 0 ;
1384
1510
1385
1511
/* check the host controller state before sending hibern8 cmd */
@@ -1409,11 +1535,6 @@ static int ufs_qcom_clk_scale_notify(struct ufs_hba *hba,
1409
1535
return err ;
1410
1536
}
1411
1537
1412
- ufs_qcom_cfg_timers (hba ,
1413
- dev_req_params -> gear_rx ,
1414
- dev_req_params -> pwr_rx ,
1415
- dev_req_params -> hs_rate ,
1416
- false);
1417
1538
ufs_qcom_icc_update_bw (host );
1418
1539
ufshcd_uic_hibern8_exit (hba );
1419
1540
}
0 commit comments