Skip to content

Commit 8379d68

Browse files
committed
[nrf fromlist] soc: nrf54h: gpd: use callback to fetch nrfs async result
Busy-waiting for the result of the nrfs service calls can stall, so let's use a callback that flags a semaphore instead. Since the API is supposed to be callable in the context of pre-kernel, fallback to busy-wait on that scenario. Upstream PR #: 80819 Signed-off-by: Gerard Marull-Paretas <[email protected]> (cherry picked from commit 714d82d232977309e0bdc8dadd2d7a71ee914a4b)
1 parent 712f1fe commit 8379d68

File tree

1 file changed

+49
-6
lines changed
  • soc/nordic/nrf54h/gpd

1 file changed

+49
-6
lines changed

soc/nordic/nrf54h/gpd/gpd.c

Lines changed: 49 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -28,6 +28,9 @@ struct gpd_onoff_manager {
2828
struct onoff_manager mgr;
2929
onoff_notify_fn notify;
3030
uint8_t id;
31+
struct k_mutex lock;
32+
struct k_sem sem;
33+
int res;
3134
};
3235

3336
static void start(struct onoff_manager *mgr, onoff_notify_fn notify);
@@ -41,9 +44,21 @@ static void stop(struct onoff_manager *mgr, onoff_notify_fn notify);
4144
#define GPD_SERVICE_REQ_ERR BIT(3)
4245
static atomic_t gpd_service_status = ATOMIC_INIT(0);
4346

44-
static struct gpd_onoff_manager fast_active1 = {.id = NRF_GPD_FAST_ACTIVE1};
45-
static struct gpd_onoff_manager slow_active = {.id = NRF_GPD_SLOW_ACTIVE};
46-
static struct gpd_onoff_manager slow_main = {.id = NRF_GPD_SLOW_MAIN};
47+
static struct gpd_onoff_manager fast_active1 = {
48+
.id = NRF_GPD_FAST_ACTIVE1,
49+
.lock = Z_MUTEX_INITIALIZER(fast_active1.lock),
50+
.sem = Z_SEM_INITIALIZER(fast_active1.sem, 0, 1),
51+
};
52+
static struct gpd_onoff_manager slow_active = {
53+
.id = NRF_GPD_SLOW_ACTIVE,
54+
.lock = Z_MUTEX_INITIALIZER(slow_active.lock),
55+
.sem = Z_SEM_INITIALIZER(slow_active.sem, 0, 1),
56+
};
57+
static struct gpd_onoff_manager slow_main = {
58+
.id = NRF_GPD_SLOW_MAIN,
59+
.lock = Z_MUTEX_INITIALIZER(slow_main.lock),
60+
.sem = Z_SEM_INITIALIZER(slow_main.sem, 0, 1),
61+
};
4762

4863
static const struct onoff_transitions transitions =
4964
ONOFF_TRANSITIONS_INITIALIZER(start, stop, NULL);
@@ -62,6 +77,18 @@ static struct gpd_onoff_manager *get_mgr(uint8_t id)
6277
}
6378
}
6479

80+
static void request_cb(struct onoff_manager *mgr_, struct onoff_client *cli, uint32_t state,
81+
int res)
82+
{
83+
ARG_UNUSED(cli);
84+
ARG_UNUSED(state);
85+
86+
struct gpd_onoff_manager *gpd_mgr = CONTAINER_OF(mgr_, struct gpd_onoff_manager, mgr);
87+
88+
gpd_mgr->res = res;
89+
k_sem_give(&gpd_mgr->sem);
90+
}
91+
6592
static int nrf_gpd_sync(struct gpd_onoff_manager *gpd_mgr)
6693
{
6794
int64_t start;
@@ -181,11 +208,27 @@ int nrf_gpd_request(uint8_t id)
181208
return -EIO;
182209
}
183210

184-
sys_notify_init_spinwait(&client.notify);
211+
if (k_is_pre_kernel()) {
212+
sys_notify_init_spinwait(&client.notify);
213+
214+
ret = onoff_request(&gpd_mgr->mgr, &client);
215+
if (ret < 0) {
216+
return ret;
217+
}
185218

186-
onoff_request(&gpd_mgr->mgr, &client);
219+
while (sys_notify_fetch_result(&client.notify, &ret) == -EAGAIN) {
220+
}
221+
} else {
222+
sys_notify_init_callback(&client.notify, request_cb);
223+
k_mutex_lock(&gpd_mgr->lock, K_FOREVER);
224+
225+
ret = onoff_request(&gpd_mgr->mgr, &client);
226+
if (ret >= 0) {
227+
(void)k_sem_take(&gpd_mgr->sem, K_FOREVER);
228+
ret = gpd_mgr->res;
229+
}
187230

188-
while (sys_notify_fetch_result(&client.notify, &ret) == -EAGAIN) {
231+
k_mutex_unlock(&gpd_mgr->lock);
189232
}
190233

191234
return ret;

0 commit comments

Comments
 (0)