Skip to content

Commit 5c03d96

Browse files
Flavio Ceolinnashif
authored andcommitted
pm: device_runtime: Simplify mutex usage
Assuming that pm_device_state_set is synchronous it is possible to simplify the mutex usage. Now there are two places where the lock is held, one in the worqueue handler and other in pm_device_request to cover the synchronous path. It is no longer needed held the lock in the pm_device_state_set callback and not needed to wait on the conditional variable after set the state in the synchronous path. Signed-off-by: Flavio Ceolin <[email protected]>
1 parent d67a578 commit 5c03d96

File tree

1 file changed

+13
-10
lines changed

1 file changed

+13
-10
lines changed

subsys/pm/device_runtime.c

Lines changed: 13 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -24,9 +24,7 @@ static void device_pm_callback(const struct device *dev,
2424
{
2525
__ASSERT(retval == 0, "Device set power state failed");
2626

27-
(void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
2827
dev->pm->state = *state;
29-
(void)k_mutex_unlock(&dev->pm->lock);
3028

3129
/*
3230
* This function returns the number of woken threads on success. There
@@ -40,8 +38,6 @@ static void pm_device_runtime_state_set(struct pm_device *pm)
4038
const struct device *dev = pm->dev;
4139
int ret = 0;
4240

43-
(void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
44-
4541
switch (dev->pm->state) {
4642
case PM_DEVICE_STATE_ACTIVE:
4743
if ((dev->pm->usage == 0) && dev->pm->enable) {
@@ -71,24 +67,24 @@ static void pm_device_runtime_state_set(struct pm_device *pm)
7167
}
7268

7369
__ASSERT(ret == 0, "Set Power state error");
74-
goto end;
70+
return;
7571

7672
handler_out:
7773
/*
7874
* This function returns the number of woken threads on success. There
7975
* is nothing we can do with this information. Just ignoring it.
8076
*/
8177
(void)k_condvar_broadcast(&dev->pm->condvar);
82-
end:
83-
(void)k_mutex_unlock(&dev->pm->lock);
8478
}
8579

8680
static void pm_work_handler(struct k_work *work)
8781
{
8882
struct pm_device *pm = CONTAINER_OF(work,
8983
struct pm_device, work);
9084

85+
(void)k_mutex_lock(&pm->lock, K_FOREVER);
9186
pm_device_runtime_state_set(pm);
87+
(void)k_mutex_unlock(&pm->lock);
9288
}
9389

9490
static int pm_device_request(const struct device *dev,
@@ -157,16 +153,23 @@ static int pm_device_request(const struct device *dev,
157153
goto out_unlock;
158154
}
159155

156+
while ((k_work_delayable_is_pending(&dev->pm->work)) ||
157+
(dev->pm->state == PM_DEVICE_STATE_SUSPENDING) ||
158+
(dev->pm->state == PM_DEVICE_STATE_RESUMING)) {
159+
ret = k_condvar_wait(&dev->pm->condvar, &dev->pm->lock,
160+
K_FOREVER);
161+
if (ret != 0) {
162+
break;
163+
}
164+
}
165+
160166
pm_device_runtime_state_set(dev->pm);
161-
(void)k_mutex_unlock(&dev->pm->lock);
162-
pm_device_wait(dev, K_FOREVER);
163167

164168
/*
165169
* dev->pm->state was set in device_pm_callback(). As the device
166170
* may not have been properly changed to the target_state or another
167171
* thread we check it here before returning.
168172
*/
169-
(void)k_mutex_lock(&dev->pm->lock, K_FOREVER);
170173
ret = target_state == dev->pm->state ? 0 : -EIO;
171174

172175
out_unlock:

0 commit comments

Comments
 (0)