@@ -267,6 +267,12 @@ struct qcom_pcie_cfg {
267
267
bool no_l0s ;
268
268
};
269
269
270
+ struct qcom_pcie_port {
271
+ struct list_head list ;
272
+ struct gpio_desc * reset ;
273
+ struct phy * phy ;
274
+ };
275
+
270
276
struct qcom_pcie {
271
277
struct dw_pcie * pci ;
272
278
void __iomem * parf ; /* DT parf */
@@ -279,24 +285,37 @@ struct qcom_pcie {
279
285
struct icc_path * icc_cpu ;
280
286
const struct qcom_pcie_cfg * cfg ;
281
287
struct dentry * debugfs ;
288
+ struct list_head ports ;
282
289
bool suspended ;
283
290
bool use_pm_opp ;
284
291
};
285
292
286
293
#define to_qcom_pcie (x ) dev_get_drvdata((x)->dev)
287
294
288
- static void qcom_ep_reset_assert (struct qcom_pcie * pcie )
295
+ static void qcom_perst_assert (struct qcom_pcie * pcie , bool assert )
289
296
{
290
- gpiod_set_value_cansleep (pcie -> reset , 1 );
297
+ struct qcom_pcie_port * port ;
298
+ int val = assert ? 1 : 0 ;
299
+
300
+ if (list_empty (& pcie -> ports ))
301
+ gpiod_set_value_cansleep (pcie -> reset , val );
302
+ else
303
+ list_for_each_entry (port , & pcie -> ports , list )
304
+ gpiod_set_value_cansleep (port -> reset , val );
305
+
291
306
usleep_range (PERST_DELAY_US , PERST_DELAY_US + 500 );
292
307
}
293
308
309
+ static void qcom_ep_reset_assert (struct qcom_pcie * pcie )
310
+ {
311
+ qcom_perst_assert (pcie , true);
312
+ }
313
+
294
314
static void qcom_ep_reset_deassert (struct qcom_pcie * pcie )
295
315
{
296
316
/* Ensure that PERST has been asserted for at least 100 ms */
297
317
msleep (PCIE_T_PVPERL_MS );
298
- gpiod_set_value_cansleep (pcie -> reset , 0 );
299
- usleep_range (PERST_DELAY_US , PERST_DELAY_US + 500 );
318
+ qcom_perst_assert (pcie , false);
300
319
}
301
320
302
321
static int qcom_pcie_start_link (struct dw_pcie * pci )
@@ -1234,6 +1253,59 @@ static bool qcom_pcie_link_up(struct dw_pcie *pci)
1234
1253
return val & PCI_EXP_LNKSTA_DLLLA ;
1235
1254
}
1236
1255
1256
+ static void qcom_pcie_phy_exit (struct qcom_pcie * pcie )
1257
+ {
1258
+ struct qcom_pcie_port * port ;
1259
+
1260
+ if (list_empty (& pcie -> ports ))
1261
+ phy_exit (pcie -> phy );
1262
+ else
1263
+ list_for_each_entry (port , & pcie -> ports , list )
1264
+ phy_exit (port -> phy );
1265
+ }
1266
+
1267
+ static void qcom_pcie_phy_power_off (struct qcom_pcie * pcie )
1268
+ {
1269
+ struct qcom_pcie_port * port ;
1270
+
1271
+ if (list_empty (& pcie -> ports )) {
1272
+ phy_power_off (pcie -> phy );
1273
+ } else {
1274
+ list_for_each_entry (port , & pcie -> ports , list )
1275
+ phy_power_off (port -> phy );
1276
+ }
1277
+ }
1278
+
1279
+ static int qcom_pcie_phy_power_on (struct qcom_pcie * pcie )
1280
+ {
1281
+ struct qcom_pcie_port * port ;
1282
+ int ret = 0 ;
1283
+
1284
+ if (list_empty (& pcie -> ports )) {
1285
+ ret = phy_set_mode_ext (pcie -> phy , PHY_MODE_PCIE , PHY_MODE_PCIE_RC );
1286
+ if (ret )
1287
+ return ret ;
1288
+
1289
+ ret = phy_power_on (pcie -> phy );
1290
+ if (ret )
1291
+ return ret ;
1292
+ } else {
1293
+ list_for_each_entry (port , & pcie -> ports , list ) {
1294
+ ret = phy_set_mode_ext (port -> phy , PHY_MODE_PCIE , PHY_MODE_PCIE_RC );
1295
+ if (ret )
1296
+ return ret ;
1297
+
1298
+ ret = phy_power_on (port -> phy );
1299
+ if (ret ) {
1300
+ qcom_pcie_phy_power_off (pcie );
1301
+ return ret ;
1302
+ }
1303
+ }
1304
+ }
1305
+
1306
+ return ret ;
1307
+ }
1308
+
1237
1309
static int qcom_pcie_host_init (struct dw_pcie_rp * pp )
1238
1310
{
1239
1311
struct dw_pcie * pci = to_dw_pcie_from_pp (pp );
@@ -1246,11 +1318,7 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
1246
1318
if (ret )
1247
1319
return ret ;
1248
1320
1249
- ret = phy_set_mode_ext (pcie -> phy , PHY_MODE_PCIE , PHY_MODE_PCIE_RC );
1250
- if (ret )
1251
- goto err_deinit ;
1252
-
1253
- ret = phy_power_on (pcie -> phy );
1321
+ ret = qcom_pcie_phy_power_on (pcie );
1254
1322
if (ret )
1255
1323
goto err_deinit ;
1256
1324
@@ -1273,7 +1341,7 @@ static int qcom_pcie_host_init(struct dw_pcie_rp *pp)
1273
1341
err_assert_reset :
1274
1342
qcom_ep_reset_assert (pcie );
1275
1343
err_disable_phy :
1276
- phy_power_off (pcie -> phy );
1344
+ qcom_pcie_phy_power_off (pcie );
1277
1345
err_deinit :
1278
1346
pcie -> cfg -> ops -> deinit (pcie );
1279
1347
@@ -1286,7 +1354,7 @@ static void qcom_pcie_host_deinit(struct dw_pcie_rp *pp)
1286
1354
struct qcom_pcie * pcie = to_qcom_pcie (pci );
1287
1355
1288
1356
qcom_ep_reset_assert (pcie );
1289
- phy_power_off (pcie -> phy );
1357
+ qcom_pcie_phy_power_off (pcie );
1290
1358
pcie -> cfg -> ops -> deinit (pcie );
1291
1359
}
1292
1360
@@ -1631,10 +1699,85 @@ static const struct pci_ecam_ops pci_qcom_ecam_ops = {
1631
1699
}
1632
1700
};
1633
1701
1702
+ static int qcom_pcie_parse_port (struct qcom_pcie * pcie , struct device_node * node )
1703
+ {
1704
+ struct device * dev = pcie -> pci -> dev ;
1705
+ struct qcom_pcie_port * port ;
1706
+ struct gpio_desc * reset ;
1707
+ struct phy * phy ;
1708
+ int ret ;
1709
+
1710
+ reset = devm_fwnode_gpiod_get (dev , of_fwnode_handle (node ),
1711
+ "reset" , GPIOD_OUT_HIGH , "PERST#" );
1712
+ if (IS_ERR (reset ))
1713
+ return PTR_ERR (reset );
1714
+
1715
+ phy = devm_of_phy_get (dev , node , NULL );
1716
+ if (IS_ERR (phy ))
1717
+ return PTR_ERR (phy );
1718
+
1719
+ port = devm_kzalloc (dev , sizeof (* port ), GFP_KERNEL );
1720
+ if (!port )
1721
+ return - ENOMEM ;
1722
+
1723
+ ret = phy_init (phy );
1724
+ if (ret )
1725
+ return ret ;
1726
+
1727
+ port -> reset = reset ;
1728
+ port -> phy = phy ;
1729
+ INIT_LIST_HEAD (& port -> list );
1730
+ list_add_tail (& port -> list , & pcie -> ports );
1731
+
1732
+ return 0 ;
1733
+ }
1734
+
1735
+ static int qcom_pcie_parse_ports (struct qcom_pcie * pcie )
1736
+ {
1737
+ struct device * dev = pcie -> pci -> dev ;
1738
+ struct qcom_pcie_port * port , * tmp ;
1739
+ int ret = - ENOENT ;
1740
+
1741
+ for_each_available_child_of_node_scoped (dev -> of_node , of_port ) {
1742
+ ret = qcom_pcie_parse_port (pcie , of_port );
1743
+ if (ret )
1744
+ goto err_port_del ;
1745
+ }
1746
+
1747
+ return ret ;
1748
+
1749
+ err_port_del :
1750
+ list_for_each_entry_safe (port , tmp , & pcie -> ports , list )
1751
+ list_del (& port -> list );
1752
+
1753
+ return ret ;
1754
+ }
1755
+
1756
+ static int qcom_pcie_parse_legacy_binding (struct qcom_pcie * pcie )
1757
+ {
1758
+ struct device * dev = pcie -> pci -> dev ;
1759
+ int ret ;
1760
+
1761
+ pcie -> phy = devm_phy_optional_get (dev , "pciephy" );
1762
+ if (IS_ERR (pcie -> phy ))
1763
+ return PTR_ERR (pcie -> phy );
1764
+
1765
+ pcie -> reset = devm_gpiod_get_optional (dev , "perst" , GPIOD_OUT_HIGH );
1766
+ if (IS_ERR (pcie -> reset ))
1767
+ return PTR_ERR (pcie -> reset );
1768
+
1769
+ ret = phy_init (pcie -> phy );
1770
+ if (ret )
1771
+ return ret ;
1772
+
1773
+ return 0 ;
1774
+ }
1775
+
1634
1776
static int qcom_pcie_probe (struct platform_device * pdev )
1635
1777
{
1636
1778
const struct qcom_pcie_cfg * pcie_cfg ;
1637
1779
unsigned long max_freq = ULONG_MAX ;
1780
+ struct qcom_pcie_port * port , * tmp ;
1638
1781
struct device * dev = & pdev -> dev ;
1639
1782
struct dev_pm_opp * opp ;
1640
1783
struct qcom_pcie * pcie ;
@@ -1701,6 +1844,8 @@ static int qcom_pcie_probe(struct platform_device *pdev)
1701
1844
goto err_pm_runtime_put ;
1702
1845
}
1703
1846
1847
+ INIT_LIST_HEAD (& pcie -> ports );
1848
+
1704
1849
pci -> dev = dev ;
1705
1850
pci -> ops = & dw_pcie_ops ;
1706
1851
pp = & pci -> pp ;
@@ -1709,12 +1854,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
1709
1854
1710
1855
pcie -> cfg = pcie_cfg ;
1711
1856
1712
- pcie -> reset = devm_gpiod_get_optional (dev , "perst" , GPIOD_OUT_HIGH );
1713
- if (IS_ERR (pcie -> reset )) {
1714
- ret = PTR_ERR (pcie -> reset );
1715
- goto err_pm_runtime_put ;
1716
- }
1717
-
1718
1857
pcie -> parf = devm_platform_ioremap_resource_byname (pdev , "parf" );
1719
1858
if (IS_ERR (pcie -> parf )) {
1720
1859
ret = PTR_ERR (pcie -> parf );
@@ -1737,12 +1876,6 @@ static int qcom_pcie_probe(struct platform_device *pdev)
1737
1876
}
1738
1877
}
1739
1878
1740
- pcie -> phy = devm_phy_optional_get (dev , "pciephy" );
1741
- if (IS_ERR (pcie -> phy )) {
1742
- ret = PTR_ERR (pcie -> phy );
1743
- goto err_pm_runtime_put ;
1744
- }
1745
-
1746
1879
/* OPP table is optional */
1747
1880
ret = devm_pm_opp_of_add_table (dev );
1748
1881
if (ret && ret != - ENODEV ) {
@@ -1789,9 +1922,23 @@ static int qcom_pcie_probe(struct platform_device *pdev)
1789
1922
1790
1923
pp -> ops = & qcom_pcie_dw_ops ;
1791
1924
1792
- ret = phy_init (pcie -> phy );
1793
- if (ret )
1794
- goto err_pm_runtime_put ;
1925
+ ret = qcom_pcie_parse_ports (pcie );
1926
+ if (ret ) {
1927
+ if (ret != - ENOENT ) {
1928
+ dev_err_probe (pci -> dev , ret ,
1929
+ "Failed to parse Root Port: %d\n" , ret );
1930
+ goto err_pm_runtime_put ;
1931
+ }
1932
+
1933
+ /*
1934
+ * In the case of properties not populated in Root Port node,
1935
+ * fallback to the legacy method of parsing the Host Bridge
1936
+ * node. This is to maintain DT backwards compatibility.
1937
+ */
1938
+ ret = qcom_pcie_parse_legacy_binding (pcie );
1939
+ if (ret )
1940
+ goto err_pm_runtime_put ;
1941
+ }
1795
1942
1796
1943
platform_set_drvdata (pdev , pcie );
1797
1944
@@ -1836,7 +1983,9 @@ static int qcom_pcie_probe(struct platform_device *pdev)
1836
1983
err_host_deinit :
1837
1984
dw_pcie_host_deinit (pp );
1838
1985
err_phy_exit :
1839
- phy_exit (pcie -> phy );
1986
+ qcom_pcie_phy_exit (pcie );
1987
+ list_for_each_entry_safe (port , tmp , & pcie -> ports , list )
1988
+ list_del (& port -> list );
1840
1989
err_pm_runtime_put :
1841
1990
pm_runtime_put (dev );
1842
1991
pm_runtime_disable (dev );
0 commit comments