Skip to content

Commit 560d4e7

Browse files
committed
Merge tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux
Pull clk fixes from Stephen Boyd: "A couple clk driver fixes, a build fix, and a deadlock fix: - Mediatek mt7988 has broken PCIe because the wrong parent is used - Mediatek clk drivers may deadlock when registering their clks because the clk provider device is repeatedly runtime PM resumed and suspended during probe and clk registration. Resuming the clk provider device deadlocks with an ABBA deadlock due to genpd_lock and the clk prepare_lock. The fix is to keep the device runtime resumed while registering clks. - Another runtime PM related deadlock, this time with disabling unused clks during late init. We get an ABBA deadlock where a device is runtime PM resuming (or suspending) while the disabling of unused clks is happening in parallel. That runtime PM action calls into the clk framework and tries to grab the clk prepare_lock while the disabling of unused clks holds the prepare_lock and is waiting for that runtime PM action to complete. The fix is to runtime resume all the clk provider devices before grabbing the clk prepare_lock during disable unused. - A build fix to provide an empty devm_clk_rate_exclusive_get() function when CONFIG_COMMON_CLK=n" * tag 'clk-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/clk/linux: clk: mediatek: mt7988-infracfg: fix clocks for 2nd PCIe port clk: mediatek: Do a runtime PM get on controllers during probe clk: Get runtime PM before walking tree for clk_summary clk: Get runtime PM before walking tree during disable_unused clk: Initialize struct clk_core kref earlier clk: Don't hold prepare_lock when calling kref_put() clk: Remove prepare_lock hold assertion in __clk_release() clk: Provide !COMMON_CLK dummy for devm_clk_rate_exclusive_get()
2 parents 13a2e42 + d3e8a91 commit 560d4e7

File tree

4 files changed

+156
-39
lines changed

4 files changed

+156
-39
lines changed

drivers/clk/clk.c

Lines changed: 135 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,10 @@ static HLIST_HEAD(clk_root_list);
3737
static HLIST_HEAD(clk_orphan_list);
3838
static LIST_HEAD(clk_notifier_list);
3939

40+
/* List of registered clks that use runtime PM */
41+
static HLIST_HEAD(clk_rpm_list);
42+
static DEFINE_MUTEX(clk_rpm_list_lock);
43+
4044
static const struct hlist_head *all_lists[] = {
4145
&clk_root_list,
4246
&clk_orphan_list,
@@ -59,6 +63,7 @@ struct clk_core {
5963
struct clk_hw *hw;
6064
struct module *owner;
6165
struct device *dev;
66+
struct hlist_node rpm_node;
6267
struct device_node *of_node;
6368
struct clk_core *parent;
6469
struct clk_parent_map *parents;
@@ -122,6 +127,89 @@ static void clk_pm_runtime_put(struct clk_core *core)
122127
pm_runtime_put_sync(core->dev);
123128
}
124129

130+
/**
131+
* clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
132+
*
133+
* Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
134+
* that disabling unused clks avoids a deadlock where a device is runtime PM
135+
* resuming/suspending and the runtime PM callback is trying to grab the
136+
* prepare_lock for something like clk_prepare_enable() while
137+
* clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
138+
* PM resume/suspend the device as well.
139+
*
140+
* Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
141+
* success. Otherwise the lock is released on failure.
142+
*
143+
* Return: 0 on success, negative errno otherwise.
144+
*/
145+
static int clk_pm_runtime_get_all(void)
146+
{
147+
int ret;
148+
struct clk_core *core, *failed;
149+
150+
/*
151+
* Grab the list lock to prevent any new clks from being registered
152+
* or unregistered until clk_pm_runtime_put_all().
153+
*/
154+
mutex_lock(&clk_rpm_list_lock);
155+
156+
/*
157+
* Runtime PM "get" all the devices that are needed for the clks
158+
* currently registered. Do this without holding the prepare_lock, to
159+
* avoid the deadlock.
160+
*/
161+
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
162+
ret = clk_pm_runtime_get(core);
163+
if (ret) {
164+
failed = core;
165+
pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
166+
dev_name(failed->dev), failed->name);
167+
goto err;
168+
}
169+
}
170+
171+
return 0;
172+
173+
err:
174+
hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
175+
if (core == failed)
176+
break;
177+
178+
clk_pm_runtime_put(core);
179+
}
180+
mutex_unlock(&clk_rpm_list_lock);
181+
182+
return ret;
183+
}
184+
185+
/**
186+
* clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
187+
*
188+
* Put the runtime PM references taken in clk_pm_runtime_get_all() and release
189+
* the 'clk_rpm_list_lock'.
190+
*/
191+
static void clk_pm_runtime_put_all(void)
192+
{
193+
struct clk_core *core;
194+
195+
hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
196+
clk_pm_runtime_put(core);
197+
mutex_unlock(&clk_rpm_list_lock);
198+
}
199+
200+
static void clk_pm_runtime_init(struct clk_core *core)
201+
{
202+
struct device *dev = core->dev;
203+
204+
if (dev && pm_runtime_enabled(dev)) {
205+
core->rpm_enabled = true;
206+
207+
mutex_lock(&clk_rpm_list_lock);
208+
hlist_add_head(&core->rpm_node, &clk_rpm_list);
209+
mutex_unlock(&clk_rpm_list_lock);
210+
}
211+
}
212+
125213
/*** locking ***/
126214
static void clk_prepare_lock(void)
127215
{
@@ -1381,9 +1469,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
13811469
if (core->flags & CLK_IGNORE_UNUSED)
13821470
return;
13831471

1384-
if (clk_pm_runtime_get(core))
1385-
return;
1386-
13871472
if (clk_core_is_prepared(core)) {
13881473
trace_clk_unprepare(core);
13891474
if (core->ops->unprepare_unused)
@@ -1392,8 +1477,6 @@ static void __init clk_unprepare_unused_subtree(struct clk_core *core)
13921477
core->ops->unprepare(core->hw);
13931478
trace_clk_unprepare_complete(core);
13941479
}
1395-
1396-
clk_pm_runtime_put(core);
13971480
}
13981481

13991482
static void __init clk_disable_unused_subtree(struct clk_core *core)
@@ -1409,9 +1492,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
14091492
if (core->flags & CLK_OPS_PARENT_ENABLE)
14101493
clk_core_prepare_enable(core->parent);
14111494

1412-
if (clk_pm_runtime_get(core))
1413-
goto unprepare_out;
1414-
14151495
flags = clk_enable_lock();
14161496

14171497
if (core->enable_count)
@@ -1436,8 +1516,6 @@ static void __init clk_disable_unused_subtree(struct clk_core *core)
14361516

14371517
unlock_out:
14381518
clk_enable_unlock(flags);
1439-
clk_pm_runtime_put(core);
1440-
unprepare_out:
14411519
if (core->flags & CLK_OPS_PARENT_ENABLE)
14421520
clk_core_disable_unprepare(core->parent);
14431521
}
@@ -1453,6 +1531,7 @@ __setup("clk_ignore_unused", clk_ignore_unused_setup);
14531531
static int __init clk_disable_unused(void)
14541532
{
14551533
struct clk_core *core;
1534+
int ret;
14561535

14571536
if (clk_ignore_unused) {
14581537
pr_warn("clk: Not disabling unused clocks\n");
@@ -1461,6 +1540,13 @@ static int __init clk_disable_unused(void)
14611540

14621541
pr_info("clk: Disabling unused clocks\n");
14631542

1543+
ret = clk_pm_runtime_get_all();
1544+
if (ret)
1545+
return ret;
1546+
/*
1547+
* Grab the prepare lock to keep the clk topology stable while iterating
1548+
* over clks.
1549+
*/
14641550
clk_prepare_lock();
14651551

14661552
hlist_for_each_entry(core, &clk_root_list, child_node)
@@ -1477,6 +1563,8 @@ static int __init clk_disable_unused(void)
14771563

14781564
clk_prepare_unlock();
14791565

1566+
clk_pm_runtime_put_all();
1567+
14801568
return 0;
14811569
}
14821570
late_initcall_sync(clk_disable_unused);
@@ -3252,9 +3340,7 @@ static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
32523340
{
32533341
struct clk_core *child;
32543342

3255-
clk_pm_runtime_get(c);
32563343
clk_summary_show_one(s, c, level);
3257-
clk_pm_runtime_put(c);
32583344

32593345
hlist_for_each_entry(child, &c->children, child_node)
32603346
clk_summary_show_subtree(s, child, level + 1);
@@ -3264,11 +3350,15 @@ static int clk_summary_show(struct seq_file *s, void *data)
32643350
{
32653351
struct clk_core *c;
32663352
struct hlist_head **lists = s->private;
3353+
int ret;
32673354

32683355
seq_puts(s, " enable prepare protect duty hardware connection\n");
32693356
seq_puts(s, " clock count count count rate accuracy phase cycle enable consumer id\n");
32703357
seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
32713358

3359+
ret = clk_pm_runtime_get_all();
3360+
if (ret)
3361+
return ret;
32723362

32733363
clk_prepare_lock();
32743364

@@ -3277,6 +3367,7 @@ static int clk_summary_show(struct seq_file *s, void *data)
32773367
clk_summary_show_subtree(s, c, 0);
32783368

32793369
clk_prepare_unlock();
3370+
clk_pm_runtime_put_all();
32803371

32813372
return 0;
32823373
}
@@ -3324,8 +3415,14 @@ static int clk_dump_show(struct seq_file *s, void *data)
33243415
struct clk_core *c;
33253416
bool first_node = true;
33263417
struct hlist_head **lists = s->private;
3418+
int ret;
3419+
3420+
ret = clk_pm_runtime_get_all();
3421+
if (ret)
3422+
return ret;
33273423

33283424
seq_putc(s, '{');
3425+
33293426
clk_prepare_lock();
33303427

33313428
for (; *lists; lists++) {
@@ -3338,6 +3435,7 @@ static int clk_dump_show(struct seq_file *s, void *data)
33383435
}
33393436

33403437
clk_prepare_unlock();
3438+
clk_pm_runtime_put_all();
33413439

33423440
seq_puts(s, "}\n");
33433441
return 0;
@@ -3981,8 +4079,6 @@ static int __clk_core_init(struct clk_core *core)
39814079
}
39824080

39834081
clk_core_reparent_orphans_nolock();
3984-
3985-
kref_init(&core->ref);
39864082
out:
39874083
clk_pm_runtime_put(core);
39884084
unlock:
@@ -4211,6 +4307,22 @@ static void clk_core_free_parent_map(struct clk_core *core)
42114307
kfree(core->parents);
42124308
}
42134309

4310+
/* Free memory allocated for a struct clk_core */
4311+
static void __clk_release(struct kref *ref)
4312+
{
4313+
struct clk_core *core = container_of(ref, struct clk_core, ref);
4314+
4315+
if (core->rpm_enabled) {
4316+
mutex_lock(&clk_rpm_list_lock);
4317+
hlist_del(&core->rpm_node);
4318+
mutex_unlock(&clk_rpm_list_lock);
4319+
}
4320+
4321+
clk_core_free_parent_map(core);
4322+
kfree_const(core->name);
4323+
kfree(core);
4324+
}
4325+
42144326
static struct clk *
42154327
__clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
42164328
{
@@ -4231,6 +4343,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
42314343
goto fail_out;
42324344
}
42334345

4346+
kref_init(&core->ref);
4347+
42344348
core->name = kstrdup_const(init->name, GFP_KERNEL);
42354349
if (!core->name) {
42364350
ret = -ENOMEM;
@@ -4243,9 +4357,8 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
42434357
}
42444358
core->ops = init->ops;
42454359

4246-
if (dev && pm_runtime_enabled(dev))
4247-
core->rpm_enabled = true;
42484360
core->dev = dev;
4361+
clk_pm_runtime_init(core);
42494362
core->of_node = np;
42504363
if (dev && dev->driver)
42514364
core->owner = dev->driver->owner;
@@ -4285,12 +4398,10 @@ __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
42854398
hw->clk = NULL;
42864399

42874400
fail_create_clk:
4288-
clk_core_free_parent_map(core);
42894401
fail_parents:
42904402
fail_ops:
4291-
kfree_const(core->name);
42924403
fail_name:
4293-
kfree(core);
4404+
kref_put(&core->ref, __clk_release);
42944405
fail_out:
42954406
return ERR_PTR(ret);
42964407
}
@@ -4370,18 +4481,6 @@ int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
43704481
}
43714482
EXPORT_SYMBOL_GPL(of_clk_hw_register);
43724483

4373-
/* Free memory allocated for a clock. */
4374-
static void __clk_release(struct kref *ref)
4375-
{
4376-
struct clk_core *core = container_of(ref, struct clk_core, ref);
4377-
4378-
lockdep_assert_held(&prepare_lock);
4379-
4380-
clk_core_free_parent_map(core);
4381-
kfree_const(core->name);
4382-
kfree(core);
4383-
}
4384-
43854484
/*
43864485
* Empty clk_ops for unregistered clocks. These are used temporarily
43874486
* after clk_unregister() was called on a clock and until last clock
@@ -4472,7 +4571,8 @@ void clk_unregister(struct clk *clk)
44724571
if (ops == &clk_nodrv_ops) {
44734572
pr_err("%s: unregistered clock: %s\n", __func__,
44744573
clk->core->name);
4475-
goto unlock;
4574+
clk_prepare_unlock();
4575+
return;
44764576
}
44774577
/*
44784578
* Assign empty clock ops for consumers that might still hold
@@ -4506,11 +4606,10 @@ void clk_unregister(struct clk *clk)
45064606
if (clk->core->protect_count)
45074607
pr_warn("%s: unregistering protected clock: %s\n",
45084608
__func__, clk->core->name);
4609+
clk_prepare_unlock();
45094610

45104611
kref_put(&clk->core->ref, __clk_release);
45114612
free_clk(clk);
4512-
unlock:
4513-
clk_prepare_unlock();
45144613
}
45154614
EXPORT_SYMBOL_GPL(clk_unregister);
45164615

@@ -4669,13 +4768,11 @@ void __clk_put(struct clk *clk)
46694768
if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
46704769
clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
46714770

4672-
owner = clk->core->owner;
4673-
kref_put(&clk->core->ref, __clk_release);
4674-
46754771
clk_prepare_unlock();
46764772

4773+
owner = clk->core->owner;
4774+
kref_put(&clk->core->ref, __clk_release);
46774775
module_put(owner);
4678-
46794776
free_clk(clk);
46804777
}
46814778

drivers/clk/mediatek/clk-mt7988-infracfg.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -156,7 +156,7 @@ static const struct mtk_gate infra_clks[] = {
156156
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P1, "infra_pcie_peri_ck_26m_ck_p1",
157157
"csw_infra_f26m_sel", 8),
158158
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P2, "infra_pcie_peri_ck_26m_ck_p2",
159-
"csw_infra_f26m_sel", 9),
159+
"infra_pcie_peri_ck_26m_ck_p3", 9),
160160
GATE_INFRA0(CLK_INFRA_PCIE_PERI_26M_CK_P3, "infra_pcie_peri_ck_26m_ck_p3",
161161
"csw_infra_f26m_sel", 10),
162162
/* INFRA1 */

0 commit comments

Comments
 (0)