@@ -748,8 +748,6 @@ static int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
748
748
*/
749
749
static inline u32 ufshcd_get_intr_mask (struct ufs_hba * hba )
750
750
{
751
- if (hba -> ufs_version == ufshci_version (1 , 0 ))
752
- return INTERRUPT_MASK_ALL_VER_10 ;
753
751
if (hba -> ufs_version <= ufshci_version (2 , 0 ))
754
752
return INTERRUPT_MASK_ALL_VER_11 ;
755
753
@@ -990,30 +988,6 @@ bool ufshcd_is_hba_active(struct ufs_hba *hba)
990
988
}
991
989
EXPORT_SYMBOL_GPL (ufshcd_is_hba_active );
992
990
993
- u32 ufshcd_get_local_unipro_ver (struct ufs_hba * hba )
994
- {
995
- /* HCI version 1.0 and 1.1 supports UniPro 1.41 */
996
- if (hba -> ufs_version <= ufshci_version (1 , 1 ))
997
- return UFS_UNIPRO_VER_1_41 ;
998
- else
999
- return UFS_UNIPRO_VER_1_6 ;
1000
- }
1001
- EXPORT_SYMBOL (ufshcd_get_local_unipro_ver );
1002
-
1003
- static bool ufshcd_is_unipro_pa_params_tuning_req (struct ufs_hba * hba )
1004
- {
1005
- /*
1006
- * If both host and device support UniPro ver1.6 or later, PA layer
1007
- * parameters tuning happens during link startup itself.
1008
- *
1009
- * We can manually tune PA layer parameters if either host or device
1010
- * doesn't support UniPro ver 1.6 or later. But to keep manual tuning
1011
- * logic simple, we will only do manual tuning if local unipro version
1012
- * doesn't support ver1.6 or later.
1013
- */
1014
- return ufshcd_get_local_unipro_ver (hba ) < UFS_UNIPRO_VER_1_6 ;
1015
- }
1016
-
1017
991
/**
1018
992
* ufshcd_pm_qos_init - initialize PM QoS request
1019
993
* @hba: per adapter instance
@@ -2674,14 +2648,7 @@ static void ufshcd_enable_intr(struct ufs_hba *hba, u32 intrs)
2674
2648
{
2675
2649
u32 set = ufshcd_readl (hba , REG_INTERRUPT_ENABLE );
2676
2650
2677
- if (hba -> ufs_version == ufshci_version (1 , 0 )) {
2678
- u32 rw ;
2679
- rw = set & INTERRUPT_MASK_RW_VER_10 ;
2680
- set = rw | ((set ^ intrs ) & intrs );
2681
- } else {
2682
- set |= intrs ;
2683
- }
2684
-
2651
+ set |= intrs ;
2685
2652
ufshcd_writel (hba , set , REG_INTERRUPT_ENABLE );
2686
2653
}
2687
2654
@@ -2694,16 +2661,7 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2694
2661
{
2695
2662
u32 set = ufshcd_readl (hba , REG_INTERRUPT_ENABLE );
2696
2663
2697
- if (hba -> ufs_version == ufshci_version (1 , 0 )) {
2698
- u32 rw ;
2699
- rw = (set & INTERRUPT_MASK_RW_VER_10 ) &
2700
- ~(intrs & INTERRUPT_MASK_RW_VER_10 );
2701
- set = rw | ((set & intrs ) & ~INTERRUPT_MASK_RW_VER_10 );
2702
-
2703
- } else {
2704
- set &= ~intrs ;
2705
- }
2706
-
2664
+ set &= ~intrs ;
2707
2665
ufshcd_writel (hba , set , REG_INTERRUPT_ENABLE );
2708
2666
}
2709
2667
@@ -2715,21 +2673,17 @@ static void ufshcd_disable_intr(struct ufs_hba *hba, u32 intrs)
2715
2673
* @upiu_flags: flags required in the header
2716
2674
* @cmd_dir: requests data direction
2717
2675
* @ehs_length: Total EHS Length (in 32‐bytes units of all Extra Header Segments)
2718
- * @legacy_type: UTP_CMD_TYPE_SCSI or UTP_CMD_TYPE_DEV_MANAGE
2719
2676
*/
2720
2677
static void
2721
2678
ufshcd_prepare_req_desc_hdr (struct ufs_hba * hba , struct ufshcd_lrb * lrbp ,
2722
2679
u8 * upiu_flags , enum dma_data_direction cmd_dir ,
2723
- int ehs_length , enum utp_cmd_type legacy_type )
2680
+ int ehs_length )
2724
2681
{
2725
2682
struct utp_transfer_req_desc * req_desc = lrbp -> utr_descriptor_ptr ;
2726
2683
struct request_desc_header * h = & req_desc -> header ;
2727
2684
enum utp_data_direction data_direction ;
2728
2685
2729
- if (hba -> ufs_version <= ufshci_version (1 , 1 ))
2730
- lrbp -> command_type = legacy_type ;
2731
- else
2732
- lrbp -> command_type = UTP_CMD_TYPE_UFS_STORAGE ;
2686
+ lrbp -> command_type = UTP_CMD_TYPE_UFS_STORAGE ;
2733
2687
2734
2688
* h = (typeof (* h )){ };
2735
2689
@@ -2863,7 +2817,7 @@ static int ufshcd_compose_devman_upiu(struct ufs_hba *hba,
2863
2817
u8 upiu_flags ;
2864
2818
int ret = 0 ;
2865
2819
2866
- ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags , DMA_NONE , 0 , UTP_CMD_TYPE_DEV_MANAGE );
2820
+ ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags , DMA_NONE , 0 );
2867
2821
2868
2822
if (hba -> dev_cmd .type == DEV_CMD_TYPE_QUERY )
2869
2823
ufshcd_prepare_utp_query_req_upiu (hba , lrbp , upiu_flags );
@@ -2887,8 +2841,7 @@ static void ufshcd_comp_scsi_upiu(struct ufs_hba *hba, struct ufshcd_lrb *lrbp)
2887
2841
unsigned int ioprio_class = IOPRIO_PRIO_CLASS (req_get_ioprio (rq ));
2888
2842
u8 upiu_flags ;
2889
2843
2890
- ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags ,
2891
- lrbp -> cmd -> sc_data_direction , 0 , UTP_CMD_TYPE_SCSI );
2844
+ ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags , lrbp -> cmd -> sc_data_direction , 0 );
2892
2845
if (ioprio_class == IOPRIO_CLASS_RT )
2893
2846
upiu_flags |= UPIU_CMD_FLAGS_CP ;
2894
2847
ufshcd_prepare_utp_scsi_cmd_upiu (lrbp , upiu_flags );
@@ -5559,15 +5512,12 @@ void ufshcd_compl_one_cqe(struct ufs_hba *hba, int task_tag,
5559
5512
ufshcd_release_scsi_cmd (hba , lrbp );
5560
5513
/* Do not touch lrbp after scsi done */
5561
5514
scsi_done (cmd );
5562
- } else if (lrbp -> command_type == UTP_CMD_TYPE_DEV_MANAGE ||
5563
- lrbp -> command_type == UTP_CMD_TYPE_UFS_STORAGE ) {
5564
- if (hba -> dev_cmd .complete ) {
5565
- if (cqe ) {
5566
- ocs = le32_to_cpu (cqe -> status ) & MASK_OCS ;
5567
- lrbp -> utr_descriptor_ptr -> header .ocs = ocs ;
5568
- }
5569
- complete (hba -> dev_cmd .complete );
5515
+ } else if (hba -> dev_cmd .complete ) {
5516
+ if (cqe ) {
5517
+ ocs = le32_to_cpu (cqe -> status ) & MASK_OCS ;
5518
+ lrbp -> utr_descriptor_ptr -> header .ocs = ocs ;
5570
5519
}
5520
+ complete (hba -> dev_cmd .complete );
5571
5521
}
5572
5522
}
5573
5523
@@ -7220,7 +7170,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba,
7220
7170
7221
7171
ufshcd_setup_dev_cmd (hba , lrbp , cmd_type , 0 , tag );
7222
7172
7223
- ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags , DMA_NONE , 0 , UTP_CMD_TYPE_DEV_MANAGE );
7173
+ ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags , DMA_NONE , 0 );
7224
7174
7225
7175
/* update the task tag in the request upiu */
7226
7176
req_upiu -> header .task_tag = tag ;
@@ -7372,7 +7322,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r
7372
7322
7373
7323
ufshcd_setup_dev_cmd (hba , lrbp , DEV_CMD_TYPE_RPMB , UFS_UPIU_RPMB_WLUN , tag );
7374
7324
7375
- ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags , DMA_NONE , ehs , UTP_CMD_TYPE_DEV_MANAGE );
7325
+ ufshcd_prepare_req_desc_hdr (hba , lrbp , & upiu_flags , DMA_NONE , ehs );
7376
7326
7377
7327
/* update the task tag */
7378
7328
req_upiu -> header .task_tag = tag ;
@@ -8359,83 +8309,6 @@ static void ufs_put_device_desc(struct ufs_hba *hba)
8359
8309
dev_info -> model = NULL ;
8360
8310
}
8361
8311
8362
- /**
8363
- * ufshcd_tune_pa_tactivate - Tunes PA_TActivate of local UniPro
8364
- * @hba: per-adapter instance
8365
- *
8366
- * PA_TActivate parameter can be tuned manually if UniPro version is less than
8367
- * 1.61. PA_TActivate needs to be greater than or equal to peerM-PHY's
8368
- * RX_MIN_ACTIVATETIME_CAPABILITY attribute. This optimal value can help reduce
8369
- * the hibern8 exit latency.
8370
- *
8371
- * Return: zero on success, non-zero error value on failure.
8372
- */
8373
- static int ufshcd_tune_pa_tactivate (struct ufs_hba * hba )
8374
- {
8375
- int ret = 0 ;
8376
- u32 peer_rx_min_activatetime = 0 , tuned_pa_tactivate ;
8377
-
8378
- ret = ufshcd_dme_peer_get (hba ,
8379
- UIC_ARG_MIB_SEL (
8380
- RX_MIN_ACTIVATETIME_CAPABILITY ,
8381
- UIC_ARG_MPHY_RX_GEN_SEL_INDEX (0 )),
8382
- & peer_rx_min_activatetime );
8383
- if (ret )
8384
- goto out ;
8385
-
8386
- /* make sure proper unit conversion is applied */
8387
- tuned_pa_tactivate =
8388
- ((peer_rx_min_activatetime * RX_MIN_ACTIVATETIME_UNIT_US )
8389
- / PA_TACTIVATE_TIME_UNIT_US );
8390
- ret = ufshcd_dme_set (hba , UIC_ARG_MIB (PA_TACTIVATE ),
8391
- tuned_pa_tactivate );
8392
-
8393
- out :
8394
- return ret ;
8395
- }
8396
-
8397
- /**
8398
- * ufshcd_tune_pa_hibern8time - Tunes PA_Hibern8Time of local UniPro
8399
- * @hba: per-adapter instance
8400
- *
8401
- * PA_Hibern8Time parameter can be tuned manually if UniPro version is less than
8402
- * 1.61. PA_Hibern8Time needs to be maximum of local M-PHY's
8403
- * TX_HIBERN8TIME_CAPABILITY & peer M-PHY's RX_HIBERN8TIME_CAPABILITY.
8404
- * This optimal value can help reduce the hibern8 exit latency.
8405
- *
8406
- * Return: zero on success, non-zero error value on failure.
8407
- */
8408
- static int ufshcd_tune_pa_hibern8time (struct ufs_hba * hba )
8409
- {
8410
- int ret = 0 ;
8411
- u32 local_tx_hibern8_time_cap = 0 , peer_rx_hibern8_time_cap = 0 ;
8412
- u32 max_hibern8_time , tuned_pa_hibern8time ;
8413
-
8414
- ret = ufshcd_dme_get (hba ,
8415
- UIC_ARG_MIB_SEL (TX_HIBERN8TIME_CAPABILITY ,
8416
- UIC_ARG_MPHY_TX_GEN_SEL_INDEX (0 )),
8417
- & local_tx_hibern8_time_cap );
8418
- if (ret )
8419
- goto out ;
8420
-
8421
- ret = ufshcd_dme_peer_get (hba ,
8422
- UIC_ARG_MIB_SEL (RX_HIBERN8TIME_CAPABILITY ,
8423
- UIC_ARG_MPHY_RX_GEN_SEL_INDEX (0 )),
8424
- & peer_rx_hibern8_time_cap );
8425
- if (ret )
8426
- goto out ;
8427
-
8428
- max_hibern8_time = max (local_tx_hibern8_time_cap ,
8429
- peer_rx_hibern8_time_cap );
8430
- /* make sure proper unit conversion is applied */
8431
- tuned_pa_hibern8time = ((max_hibern8_time * HIBERN8TIME_UNIT_US )
8432
- / PA_HIBERN8_TIME_UNIT_US );
8433
- ret = ufshcd_dme_set (hba , UIC_ARG_MIB (PA_HIBERN8TIME ),
8434
- tuned_pa_hibern8time );
8435
- out :
8436
- return ret ;
8437
- }
8438
-
8439
8312
/**
8440
8313
* ufshcd_quirk_tune_host_pa_tactivate - Ensures that host PA_TACTIVATE is
8441
8314
* less than device PA_TACTIVATE time.
@@ -8508,11 +8381,6 @@ static int ufshcd_quirk_tune_host_pa_tactivate(struct ufs_hba *hba)
8508
8381
8509
8382
static void ufshcd_tune_unipro_params (struct ufs_hba * hba )
8510
8383
{
8511
- if (ufshcd_is_unipro_pa_params_tuning_req (hba )) {
8512
- ufshcd_tune_pa_tactivate (hba );
8513
- ufshcd_tune_pa_hibern8time (hba );
8514
- }
8515
-
8516
8384
ufshcd_vops_apply_dev_quirks (hba );
8517
8385
8518
8386
if (hba -> dev_quirks & UFS_DEVICE_QUIRK_PA_TACTIVATE )
0 commit comments