Skip to content

Commit e917746

Browse files
drivers: clock_management: add framework-wide lock
Add framework-wide mutex that locks access to the clock framework when one thread is utilizing user APIs. Signed-off-by: Daniel DeGrasse <[email protected]>
1 parent c3b6d7d commit e917746

File tree

2 files changed

+67
-26
lines changed

2 files changed

+67
-26
lines changed

drivers/clock_management/clock_management_common.c

Lines changed: 61 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
* SPDX-License-Identifier: Apache-2.0
55
*/
66

7+
#include <zephyr/kernel.h>
78
#include <zephyr/drivers/clock_management.h>
89
#include <zephyr/drivers/clock_management/clock_helpers.h>
910
#include <string.h>
@@ -26,6 +27,8 @@ LOG_MODULE_REGISTER(clock_management, CONFIG_CLOCK_MANAGEMENT_LOG_LEVEL);
2627
#define GET_CLK_CORE(clk) ((const struct clk *)clk)
2728
#endif
2829

30+
K_MUTEX_DEFINE(clock_management_mutex);
31+
2932
/** Calculates clock rank factor, which scales with frequency */
3033
#define CLK_RANK(clk_hw, freq) \
3134
(((clk_hw)->rank) + (((uint32_t)(clk_hw)->rank_factor) * (freq)))
@@ -958,15 +961,20 @@ static int clock_apply_state(const struct clk *clk_hw,
958961
int clock_management_get_rate(const struct clock_output *clk)
959962
{
960963
const struct clock_output_data *data;
964+
int ret;
961965

962966
if (!clk) {
963967
return -EINVAL;
964968
}
965969

966-
data = GET_CLK_CORE(clk)->hw_data;
970+
k_mutex_lock(&clock_management_mutex, K_FOREVER);
967971

972+
data = GET_CLK_CORE(clk)->hw_data;
968973
/* Read rate */
969-
return clock_management_clk_rate(data->parent);
974+
ret = clock_management_clk_rate(data->parent);
975+
976+
k_mutex_unlock(&clock_management_mutex);
977+
return ret;
970978
}
971979

972980
static int clock_management_onoff(const struct clk *clk_hw, bool on)
@@ -1034,8 +1042,14 @@ static int clock_management_onoff(const struct clk *clk_hw, bool on)
10341042
int clock_management_on(const struct clock_output *clk)
10351043
{
10361044
const struct clock_output_data *data = GET_CLK_CORE(clk)->hw_data;
1045+
int ret;
1046+
1047+
k_mutex_lock(&clock_management_mutex, K_FOREVER);
1048+
1049+
ret = clock_management_onoff(data->parent, true);
10371050

1038-
return clock_management_onoff(data->parent, true);
1051+
k_mutex_unlock(&clock_management_mutex);
1052+
return ret;
10391053
}
10401054

10411055
/**
@@ -1053,8 +1067,15 @@ int clock_management_on(const struct clock_output *clk)
10531067
int clock_management_off(const struct clock_output *clk)
10541068
{
10551069
const struct clock_output_data *data = GET_CLK_CORE(clk)->hw_data;
1070+
int ret;
1071+
1072+
k_mutex_lock(&clock_management_mutex, K_FOREVER);
1073+
1074+
ret = clock_management_onoff(data->parent, false);
1075+
1076+
k_mutex_unlock(&clock_management_mutex);
1077+
return ret;
10561078

1057-
return clock_management_onoff(data->parent, false);
10581079
}
10591080

10601081
/**
@@ -1089,6 +1110,8 @@ int clock_management_req_rate(const struct clock_output *clk,
10891110
return -EINVAL;
10901111
}
10911112

1113+
k_mutex_lock(&clock_management_mutex, K_FOREVER);
1114+
10921115
data = GET_CLK_CORE(clk)->hw_data;
10931116

10941117
#ifdef CONFIG_CLOCK_MANAGEMENT_RUNTIME
@@ -1104,7 +1127,8 @@ int clock_management_req_rate(const struct clock_output *clk,
11041127
*/
11051128
if ((new_req.min_freq > req->max_freq) ||
11061129
(new_req.max_freq < req->min_freq)) {
1107-
return -ENOENT;
1130+
ret = -ENOENT;
1131+
goto out;
11081132
}
11091133
/*
11101134
* We now know the new constraint is compatible. Now, save the
@@ -1175,16 +1199,13 @@ int clock_management_req_rate(const struct clock_output *clk,
11751199
/* No best setting was found, try runtime clock setting */
11761200
ret = clock_management_round_internal(data->parent, combined_req,
11771201
&best_rank, false);
1178-
if (ret < 0) {
1179-
return ret;
1180-
}
11811202
out:
11821203
if (ret >= 0) {
11831204
/* A frequency was returned, check if it satisfies constraints */
11841205
if ((combined_req->min_freq > ret) ||
11851206
(combined_req->max_freq < ret) ||
11861207
(best_rank > combined_req->max_rank)) {
1187-
return -ENOENT;
1208+
ret = -ENOENT;
11881209
}
11891210
}
11901211
#ifdef CONFIG_CLOCK_MANAGEMENT_SET_RATE
@@ -1195,11 +1216,15 @@ int clock_management_req_rate(const struct clock_output *clk,
11951216
}
11961217
#endif
11971218
#ifdef CONFIG_CLOCK_MANAGEMENT_RUNTIME
1198-
/* New clock state applied. Save the new combined constraint set. */
1199-
memcpy(data->combined_req, combined_req, sizeof(*data->combined_req));
1200-
/* Save the new constraint set for the consumer */
1201-
memcpy(clk->req, req, sizeof(*clk->req));
1219+
if (ret >= 0) {
1220+
/* New clock state applied. Save the new combined constraint set. */
1221+
memcpy(data->combined_req, combined_req, sizeof(*data->combined_req));
1222+
/* Save the new constraint set for the consumer */
1223+
memcpy(clk->req, req, sizeof(*clk->req));
1224+
}
12021225
#endif
1226+
1227+
k_mutex_unlock(&clock_management_mutex);
12031228
return ret;
12041229
}
12051230

@@ -1232,6 +1257,8 @@ int clock_management_req_ranked(const struct clock_output *clk,
12321257
return -EINVAL;
12331258
}
12341259

1260+
k_mutex_lock(&clock_management_mutex, K_FOREVER);
1261+
12351262
data = GET_CLK_CORE(clk)->hw_data;
12361263

12371264
#ifdef CONFIG_CLOCK_MANAGEMENT_RUNTIME
@@ -1247,7 +1274,8 @@ int clock_management_req_ranked(const struct clock_output *clk,
12471274
*/
12481275
if ((new_req.min_freq > req->max_freq) ||
12491276
(new_req.max_freq < req->min_freq)) {
1250-
return -ENOENT;
1277+
ret = -ENOENT;
1278+
goto out;
12511279
}
12521280
/*
12531281
* We now know the new constraint is compatible. Now, save the
@@ -1314,16 +1342,13 @@ int clock_management_req_ranked(const struct clock_output *clk,
13141342
/* No best setting was found, try runtime clock setting */
13151343
ret = clock_management_round_internal(data->parent, combined_req,
13161344
&best_rank, true);
1317-
if (ret < 0) {
1318-
return ret;
1319-
}
13201345
out:
13211346
if (ret >= 0) {
13221347
/* A frequency was returned, check if it satisfies constraints */
13231348
if ((combined_req->min_freq > ret) ||
13241349
(combined_req->max_freq < ret) ||
13251350
(best_rank > combined_req->max_rank)) {
1326-
return -ENOENT;
1351+
ret = -ENOENT;
13271352
}
13281353
}
13291354
#ifdef CONFIG_CLOCK_MANAGEMENT_SET_RATE
@@ -1333,11 +1358,14 @@ int clock_management_req_ranked(const struct clock_output *clk,
13331358
}
13341359
#endif
13351360
#ifdef CONFIG_CLOCK_MANAGEMENT_RUNTIME
1336-
/* New clock state applied. Save the new combined constraint set. */
1337-
memcpy(data->combined_req, combined_req, sizeof(*data->combined_req));
1338-
/* Save the new constraint set for the consumer */
1339-
memcpy(clk->req, req, sizeof(*clk->req));
1361+
if (ret >= 0) {
1362+
/* New clock state applied. Save the new combined constraint set. */
1363+
memcpy(data->combined_req, combined_req, sizeof(*data->combined_req));
1364+
/* Save the new constraint set for the consumer */
1365+
memcpy(clk->req, req, sizeof(*clk->req));
1366+
}
13401367
#endif
1368+
k_mutex_unlock(&clock_management_mutex);
13411369
return ret;
13421370
}
13431371

@@ -1365,10 +1393,13 @@ int clock_management_apply_state(const struct clock_output *clk,
13651393
return -EINVAL;
13661394
}
13671395

1396+
k_mutex_lock(&clock_management_mutex, K_FOREVER);
1397+
13681398
data = GET_CLK_CORE(clk)->hw_data;
13691399

13701400
if (state >= data->num_states) {
1371-
return -EINVAL;
1401+
ret = -EINVAL;
1402+
goto out;
13721403
}
13731404

13741405
clk_state = data->output_states[state];
@@ -1381,7 +1412,8 @@ int clock_management_apply_state(const struct clock_output *clk,
13811412
/* Make sure this state fits within other consumer's constraints */
13821413
if ((temp.min_freq > clk_state->frequency) ||
13831414
(temp.max_freq < clk_state->frequency)) {
1384-
return -EINVAL;
1415+
ret = -EINVAL;
1416+
goto out;
13851417
}
13861418

13871419
/* Save new constraint set */
@@ -1390,8 +1422,9 @@ int clock_management_apply_state(const struct clock_output *clk,
13901422

13911423
ret = clock_apply_state(GET_CLK_CORE(clk), clk_state);
13921424
if (ret < 0) {
1393-
return ret;
1425+
goto out;
13941426
}
1427+
ret = clk_state->frequency;
13951428
#ifdef CONFIG_CLOCK_MANAGEMENT_RUNTIME
13961429
if (clk_state->locking) {
13971430
/* Set a constraint based on this clock state */
@@ -1409,7 +1442,9 @@ int clock_management_apply_state(const struct clock_output *clk,
14091442
memcpy(clk->req, &constraint, sizeof(*clk->req));
14101443
}
14111444
#endif
1412-
return clk_state->frequency;
1445+
out:
1446+
k_mutex_unlock(&clock_management_mutex);
1447+
return ret;
14131448
}
14141449

14151450
#define CLOCK_STATE_NAME(node) \

include/zephyr/drivers/clock_management.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
*/
2121

2222
#include <zephyr/drivers/clock_management/clock_driver.h>
23+
#include <zephyr/kernel.h>
2324
#include <errno.h>
2425

2526
#ifdef __cplusplus
@@ -575,8 +576,13 @@ static inline int clock_management_set_callback(const struct clock_output *clk,
575576
return -EINVAL;
576577
}
577578

579+
extern struct k_mutex clock_management_mutex;
580+
581+
k_mutex_lock(&clock_management_mutex, K_FOREVER);
582+
578583
clk->cb->clock_callback = callback;
579584
clk->cb->user_data = user_data;
585+
k_mutex_unlock(&clock_management_mutex);
580586
return 0;
581587
#else
582588
return -ENOTSUP;

0 commit comments

Comments
 (0)