154
154
#define QCOM_PCIE_LINK_SPEED_TO_BW (speed ) \
155
155
Mbps_to_icc(PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]))
156
156
157
- #define QCOM_PCIE_1_0_0_MAX_CLOCKS 4
158
157
struct qcom_pcie_resources_1_0_0 {
159
- struct clk_bulk_data clks [QCOM_PCIE_1_0_0_MAX_CLOCKS ];
158
+ struct clk_bulk_data * clks ;
159
+ int num_clks ;
160
160
struct reset_control * core ;
161
161
struct regulator * vdda ;
162
162
};
163
163
164
- #define QCOM_PCIE_2_1_0_MAX_CLOCKS 5
165
164
#define QCOM_PCIE_2_1_0_MAX_RESETS 6
166
165
#define QCOM_PCIE_2_1_0_MAX_SUPPLY 3
167
166
struct qcom_pcie_resources_2_1_0 {
168
- struct clk_bulk_data clks [QCOM_PCIE_2_1_0_MAX_CLOCKS ];
167
+ struct clk_bulk_data * clks ;
168
+ int num_clks ;
169
169
struct reset_control_bulk_data resets [QCOM_PCIE_2_1_0_MAX_RESETS ];
170
170
int num_resets ;
171
171
struct regulator_bulk_data supplies [QCOM_PCIE_2_1_0_MAX_SUPPLY ];
172
172
};
173
173
174
- #define QCOM_PCIE_2_3_2_MAX_CLOCKS 4
175
174
#define QCOM_PCIE_2_3_2_MAX_SUPPLY 2
176
175
struct qcom_pcie_resources_2_3_2 {
177
- struct clk_bulk_data clks [QCOM_PCIE_2_3_2_MAX_CLOCKS ];
176
+ struct clk_bulk_data * clks ;
177
+ int num_clks ;
178
178
struct regulator_bulk_data supplies [QCOM_PCIE_2_3_2_MAX_SUPPLY ];
179
179
};
180
180
181
- #define QCOM_PCIE_2_3_3_MAX_CLOCKS 5
182
181
#define QCOM_PCIE_2_3_3_MAX_RESETS 7
183
182
struct qcom_pcie_resources_2_3_3 {
184
- struct clk_bulk_data clks [QCOM_PCIE_2_3_3_MAX_CLOCKS ];
183
+ struct clk_bulk_data * clks ;
184
+ int num_clks ;
185
185
struct reset_control_bulk_data rst [QCOM_PCIE_2_3_3_MAX_RESETS ];
186
186
};
187
187
188
- #define QCOM_PCIE_2_4_0_MAX_CLOCKS 4
189
188
#define QCOM_PCIE_2_4_0_MAX_RESETS 12
190
189
struct qcom_pcie_resources_2_4_0 {
191
- struct clk_bulk_data clks [ QCOM_PCIE_2_4_0_MAX_CLOCKS ] ;
190
+ struct clk_bulk_data * clks ;
192
191
int num_clks ;
193
192
struct reset_control_bulk_data resets [QCOM_PCIE_2_4_0_MAX_RESETS ];
194
193
int num_resets ;
195
194
};
196
195
197
- #define QCOM_PCIE_2_7_0_MAX_CLOCKS 15
198
196
#define QCOM_PCIE_2_7_0_MAX_SUPPLIES 2
199
197
struct qcom_pcie_resources_2_7_0 {
200
- struct clk_bulk_data clks [ QCOM_PCIE_2_7_0_MAX_CLOCKS ] ;
198
+ struct clk_bulk_data * clks ;
201
199
int num_clks ;
202
200
struct regulator_bulk_data supplies [QCOM_PCIE_2_7_0_MAX_SUPPLIES ];
203
201
struct reset_control * rst ;
204
202
};
205
203
206
- #define QCOM_PCIE_2_9_0_MAX_CLOCKS 5
207
204
struct qcom_pcie_resources_2_9_0 {
208
- struct clk_bulk_data clks [QCOM_PCIE_2_9_0_MAX_CLOCKS ];
205
+ struct clk_bulk_data * clks ;
206
+ int num_clks ;
209
207
struct reset_control * rst ;
210
208
};
211
209
@@ -337,21 +335,11 @@ static int qcom_pcie_get_resources_2_1_0(struct qcom_pcie *pcie)
337
335
if (ret )
338
336
return ret ;
339
337
340
- res -> clks [0 ].id = "iface" ;
341
- res -> clks [1 ].id = "core" ;
342
- res -> clks [2 ].id = "phy" ;
343
- res -> clks [3 ].id = "aux" ;
344
- res -> clks [4 ].id = "ref" ;
345
-
346
- /* iface, core, phy are required */
347
- ret = devm_clk_bulk_get (dev , 3 , res -> clks );
348
- if (ret < 0 )
349
- return ret ;
350
-
351
- /* aux, ref are optional */
352
- ret = devm_clk_bulk_get_optional (dev , 2 , res -> clks + 3 );
353
- if (ret < 0 )
354
- return ret ;
338
+ res -> num_clks = devm_clk_bulk_get_all (dev , & res -> clks );
339
+ if (res -> num_clks < 0 ) {
340
+ dev_err (dev , "Failed to get clocks\n" );
341
+ return res -> num_clks ;
342
+ }
355
343
356
344
res -> resets [0 ].id = "pci" ;
357
345
res -> resets [1 ].id = "axi" ;
@@ -373,7 +361,7 @@ static void qcom_pcie_deinit_2_1_0(struct qcom_pcie *pcie)
373
361
{
374
362
struct qcom_pcie_resources_2_1_0 * res = & pcie -> res .v2_1_0 ;
375
363
376
- clk_bulk_disable_unprepare (ARRAY_SIZE ( res -> clks ) , res -> clks );
364
+ clk_bulk_disable_unprepare (res -> num_clks , res -> clks );
377
365
reset_control_bulk_assert (res -> num_resets , res -> resets );
378
366
379
367
writel (1 , pcie -> parf + PARF_PHY_CTRL );
@@ -425,7 +413,7 @@ static int qcom_pcie_post_init_2_1_0(struct qcom_pcie *pcie)
425
413
val &= ~PHY_TEST_PWR_DOWN ;
426
414
writel (val , pcie -> parf + PARF_PHY_CTRL );
427
415
428
- ret = clk_bulk_prepare_enable (ARRAY_SIZE ( res -> clks ) , res -> clks );
416
+ ret = clk_bulk_prepare_enable (res -> num_clks , res -> clks );
429
417
if (ret )
430
418
return ret ;
431
419
@@ -476,20 +464,16 @@ static int qcom_pcie_get_resources_1_0_0(struct qcom_pcie *pcie)
476
464
struct qcom_pcie_resources_1_0_0 * res = & pcie -> res .v1_0_0 ;
477
465
struct dw_pcie * pci = pcie -> pci ;
478
466
struct device * dev = pci -> dev ;
479
- int ret ;
480
467
481
468
res -> vdda = devm_regulator_get (dev , "vdda" );
482
469
if (IS_ERR (res -> vdda ))
483
470
return PTR_ERR (res -> vdda );
484
471
485
- res -> clks [0 ].id = "iface" ;
486
- res -> clks [1 ].id = "aux" ;
487
- res -> clks [2 ].id = "master_bus" ;
488
- res -> clks [3 ].id = "slave_bus" ;
489
-
490
- ret = devm_clk_bulk_get (dev , ARRAY_SIZE (res -> clks ), res -> clks );
491
- if (ret < 0 )
492
- return ret ;
472
+ res -> num_clks = devm_clk_bulk_get_all (dev , & res -> clks );
473
+ if (res -> num_clks < 0 ) {
474
+ dev_err (dev , "Failed to get clocks\n" );
475
+ return res -> num_clks ;
476
+ }
493
477
494
478
res -> core = devm_reset_control_get_exclusive (dev , "core" );
495
479
return PTR_ERR_OR_ZERO (res -> core );
@@ -500,7 +484,7 @@ static void qcom_pcie_deinit_1_0_0(struct qcom_pcie *pcie)
500
484
struct qcom_pcie_resources_1_0_0 * res = & pcie -> res .v1_0_0 ;
501
485
502
486
reset_control_assert (res -> core );
503
- clk_bulk_disable_unprepare (ARRAY_SIZE ( res -> clks ) , res -> clks );
487
+ clk_bulk_disable_unprepare (res -> num_clks , res -> clks );
504
488
regulator_disable (res -> vdda );
505
489
}
506
490
@@ -517,7 +501,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
517
501
return ret ;
518
502
}
519
503
520
- ret = clk_bulk_prepare_enable (ARRAY_SIZE ( res -> clks ) , res -> clks );
504
+ ret = clk_bulk_prepare_enable (res -> num_clks , res -> clks );
521
505
if (ret ) {
522
506
dev_err (dev , "cannot prepare/enable clocks\n" );
523
507
goto err_assert_reset ;
@@ -532,7 +516,7 @@ static int qcom_pcie_init_1_0_0(struct qcom_pcie *pcie)
532
516
return 0 ;
533
517
534
518
err_disable_clks :
535
- clk_bulk_disable_unprepare (ARRAY_SIZE ( res -> clks ) , res -> clks );
519
+ clk_bulk_disable_unprepare (res -> num_clks , res -> clks );
536
520
err_assert_reset :
537
521
reset_control_assert (res -> core );
538
522
@@ -580,14 +564,11 @@ static int qcom_pcie_get_resources_2_3_2(struct qcom_pcie *pcie)
580
564
if (ret )
581
565
return ret ;
582
566
583
- res -> clks [0 ].id = "aux" ;
584
- res -> clks [1 ].id = "cfg" ;
585
- res -> clks [2 ].id = "bus_master" ;
586
- res -> clks [3 ].id = "bus_slave" ;
587
-
588
- ret = devm_clk_bulk_get (dev , ARRAY_SIZE (res -> clks ), res -> clks );
589
- if (ret < 0 )
590
- return ret ;
567
+ res -> num_clks = devm_clk_bulk_get_all (dev , & res -> clks );
568
+ if (res -> num_clks < 0 ) {
569
+ dev_err (dev , "Failed to get clocks\n" );
570
+ return res -> num_clks ;
571
+ }
591
572
592
573
return 0 ;
593
574
}
@@ -596,7 +577,7 @@ static void qcom_pcie_deinit_2_3_2(struct qcom_pcie *pcie)
596
577
{
597
578
struct qcom_pcie_resources_2_3_2 * res = & pcie -> res .v2_3_2 ;
598
579
599
- clk_bulk_disable_unprepare (ARRAY_SIZE ( res -> clks ) , res -> clks );
580
+ clk_bulk_disable_unprepare (res -> num_clks , res -> clks );
600
581
regulator_bulk_disable (ARRAY_SIZE (res -> supplies ), res -> supplies );
601
582
}
602
583
@@ -613,7 +594,7 @@ static int qcom_pcie_init_2_3_2(struct qcom_pcie *pcie)
613
594
return ret ;
614
595
}
615
596
616
- ret = clk_bulk_prepare_enable (ARRAY_SIZE ( res -> clks ) , res -> clks );
597
+ ret = clk_bulk_prepare_enable (res -> num_clks , res -> clks );
617
598
if (ret ) {
618
599
dev_err (dev , "cannot prepare/enable clocks\n" );
619
600
regulator_bulk_disable (ARRAY_SIZE (res -> supplies ), res -> supplies );
@@ -661,17 +642,11 @@ static int qcom_pcie_get_resources_2_4_0(struct qcom_pcie *pcie)
661
642
bool is_ipq = of_device_is_compatible (dev -> of_node , "qcom,pcie-ipq4019" );
662
643
int ret ;
663
644
664
- res -> clks [0 ].id = "aux" ;
665
- res -> clks [1 ].id = "master_bus" ;
666
- res -> clks [2 ].id = "slave_bus" ;
667
- res -> clks [3 ].id = "iface" ;
668
-
669
- /* qcom,pcie-ipq4019 is defined without "iface" */
670
- res -> num_clks = is_ipq ? 3 : 4 ;
671
-
672
- ret = devm_clk_bulk_get (dev , res -> num_clks , res -> clks );
673
- if (ret < 0 )
674
- return ret ;
645
+ res -> num_clks = devm_clk_bulk_get_all (dev , & res -> clks );
646
+ if (res -> num_clks < 0 ) {
647
+ dev_err (dev , "Failed to get clocks\n" );
648
+ return res -> num_clks ;
649
+ }
675
650
676
651
res -> resets [0 ].id = "axi_m" ;
677
652
res -> resets [1 ].id = "axi_s" ;
@@ -742,15 +717,11 @@ static int qcom_pcie_get_resources_2_3_3(struct qcom_pcie *pcie)
742
717
struct device * dev = pci -> dev ;
743
718
int ret ;
744
719
745
- res -> clks [0 ].id = "iface" ;
746
- res -> clks [1 ].id = "axi_m" ;
747
- res -> clks [2 ].id = "axi_s" ;
748
- res -> clks [3 ].id = "ahb" ;
749
- res -> clks [4 ].id = "aux" ;
750
-
751
- ret = devm_clk_bulk_get (dev , ARRAY_SIZE (res -> clks ), res -> clks );
752
- if (ret < 0 )
753
- return ret ;
720
+ res -> num_clks = devm_clk_bulk_get_all (dev , & res -> clks );
721
+ if (res -> num_clks < 0 ) {
722
+ dev_err (dev , "Failed to get clocks\n" );
723
+ return res -> num_clks ;
724
+ }
754
725
755
726
res -> rst [0 ].id = "axi_m" ;
756
727
res -> rst [1 ].id = "axi_s" ;
@@ -771,7 +742,7 @@ static void qcom_pcie_deinit_2_3_3(struct qcom_pcie *pcie)
771
742
{
772
743
struct qcom_pcie_resources_2_3_3 * res = & pcie -> res .v2_3_3 ;
773
744
774
- clk_bulk_disable_unprepare (ARRAY_SIZE ( res -> clks ) , res -> clks );
745
+ clk_bulk_disable_unprepare (res -> num_clks , res -> clks );
775
746
}
776
747
777
748
static int qcom_pcie_init_2_3_3 (struct qcom_pcie * pcie )
@@ -801,7 +772,7 @@ static int qcom_pcie_init_2_3_3(struct qcom_pcie *pcie)
801
772
*/
802
773
usleep_range (2000 , 2500 );
803
774
804
- ret = clk_bulk_prepare_enable (ARRAY_SIZE ( res -> clks ) , res -> clks );
775
+ ret = clk_bulk_prepare_enable (res -> num_clks , res -> clks );
805
776
if (ret ) {
806
777
dev_err (dev , "cannot prepare/enable clocks\n" );
807
778
goto err_assert_resets ;
@@ -862,8 +833,6 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
862
833
struct qcom_pcie_resources_2_7_0 * res = & pcie -> res .v2_7_0 ;
863
834
struct dw_pcie * pci = pcie -> pci ;
864
835
struct device * dev = pci -> dev ;
865
- unsigned int num_clks , num_opt_clks ;
866
- unsigned int idx ;
867
836
int ret ;
868
837
869
838
res -> rst = devm_reset_control_array_get_exclusive (dev );
@@ -877,36 +846,11 @@ static int qcom_pcie_get_resources_2_7_0(struct qcom_pcie *pcie)
877
846
if (ret )
878
847
return ret ;
879
848
880
- idx = 0 ;
881
- res -> clks [idx ++ ].id = "aux" ;
882
- res -> clks [idx ++ ].id = "cfg" ;
883
- res -> clks [idx ++ ].id = "bus_master" ;
884
- res -> clks [idx ++ ].id = "bus_slave" ;
885
- res -> clks [idx ++ ].id = "slave_q2a" ;
886
-
887
- num_clks = idx ;
888
-
889
- ret = devm_clk_bulk_get (dev , num_clks , res -> clks );
890
- if (ret < 0 )
891
- return ret ;
892
-
893
- res -> clks [idx ++ ].id = "tbu" ;
894
- res -> clks [idx ++ ].id = "ddrss_sf_tbu" ;
895
- res -> clks [idx ++ ].id = "aggre0" ;
896
- res -> clks [idx ++ ].id = "aggre1" ;
897
- res -> clks [idx ++ ].id = "noc_aggr" ;
898
- res -> clks [idx ++ ].id = "noc_aggr_4" ;
899
- res -> clks [idx ++ ].id = "noc_aggr_south_sf" ;
900
- res -> clks [idx ++ ].id = "cnoc_qx" ;
901
- res -> clks [idx ++ ].id = "sleep" ;
902
- res -> clks [idx ++ ].id = "cnoc_sf_axi" ;
903
-
904
- num_opt_clks = idx - num_clks ;
905
- res -> num_clks = idx ;
906
-
907
- ret = devm_clk_bulk_get_optional (dev , num_opt_clks , res -> clks + num_clks );
908
- if (ret < 0 )
909
- return ret ;
849
+ res -> num_clks = devm_clk_bulk_get_all (dev , & res -> clks );
850
+ if (res -> num_clks < 0 ) {
851
+ dev_err (dev , "Failed to get clocks\n" );
852
+ return res -> num_clks ;
853
+ }
910
854
911
855
return 0 ;
912
856
}
@@ -1101,17 +1045,12 @@ static int qcom_pcie_get_resources_2_9_0(struct qcom_pcie *pcie)
1101
1045
struct qcom_pcie_resources_2_9_0 * res = & pcie -> res .v2_9_0 ;
1102
1046
struct dw_pcie * pci = pcie -> pci ;
1103
1047
struct device * dev = pci -> dev ;
1104
- int ret ;
1105
-
1106
- res -> clks [0 ].id = "iface" ;
1107
- res -> clks [1 ].id = "axi_m" ;
1108
- res -> clks [2 ].id = "axi_s" ;
1109
- res -> clks [3 ].id = "axi_bridge" ;
1110
- res -> clks [4 ].id = "rchng" ;
1111
1048
1112
- ret = devm_clk_bulk_get (dev , ARRAY_SIZE (res -> clks ), res -> clks );
1113
- if (ret < 0 )
1114
- return ret ;
1049
+ res -> num_clks = devm_clk_bulk_get_all (dev , & res -> clks );
1050
+ if (res -> num_clks < 0 ) {
1051
+ dev_err (dev , "Failed to get clocks\n" );
1052
+ return res -> num_clks ;
1053
+ }
1115
1054
1116
1055
res -> rst = devm_reset_control_array_get_exclusive (dev );
1117
1056
if (IS_ERR (res -> rst ))
@@ -1124,7 +1063,7 @@ static void qcom_pcie_deinit_2_9_0(struct qcom_pcie *pcie)
1124
1063
{
1125
1064
struct qcom_pcie_resources_2_9_0 * res = & pcie -> res .v2_9_0 ;
1126
1065
1127
- clk_bulk_disable_unprepare (ARRAY_SIZE ( res -> clks ) , res -> clks );
1066
+ clk_bulk_disable_unprepare (res -> num_clks , res -> clks );
1128
1067
}
1129
1068
1130
1069
static int qcom_pcie_init_2_9_0 (struct qcom_pcie * pcie )
@@ -1153,7 +1092,7 @@ static int qcom_pcie_init_2_9_0(struct qcom_pcie *pcie)
1153
1092
1154
1093
usleep_range (2000 , 2500 );
1155
1094
1156
- return clk_bulk_prepare_enable (ARRAY_SIZE ( res -> clks ) , res -> clks );
1095
+ return clk_bulk_prepare_enable (res -> num_clks , res -> clks );
1157
1096
}
1158
1097
1159
1098
static int qcom_pcie_post_init_2_9_0 (struct qcom_pcie * pcie )
0 commit comments