|
| 1 | +/* |
| 2 | + * Copyright (c) 2024 Nordic Semiconductor ASA |
| 3 | + * SPDX-License-Identifier: Apache-2.0 |
| 4 | + */ |
| 5 | + |
| 6 | +#include <errno.h> |
| 7 | + |
| 8 | +#include <zephyr/init.h> |
| 9 | +#include <zephyr/logging/log.h> |
| 10 | +#include <zephyr/sys/atomic.h> |
| 11 | +#include <zephyr/sys/onoff.h> |
| 12 | +#include <zephyr/spinlock.h> |
| 13 | +#include <zephyr/sys/util.h> |
| 14 | + |
| 15 | +#include <nrf/gpd.h> |
| 16 | +#include <nrfs_gdpwr.h> |
| 17 | +#include <nrfs_backend_ipc_service.h> |
| 18 | + |
| 19 | +LOG_MODULE_REGISTER(gpd, CONFIG_SOC_LOG_LEVEL); |
| 20 | + |
| 21 | +/* enforce alignment between DT<->nrfs */ |
| 22 | +BUILD_ASSERT(GDPWR_POWER_DOMAIN_ACTIVE_FAST == NRF_GPD_FAST_ACTIVE1); |
| 23 | +BUILD_ASSERT(GDPWR_POWER_DOMAIN_ACTIVE_SLOW == NRF_GPD_SLOW_ACTIVE); |
| 24 | +BUILD_ASSERT(GDPWR_POWER_DOMAIN_MAIN_SLOW == NRF_GPD_SLOW_MAIN); |
| 25 | + |
| 26 | +struct gpd_onoff_manager { |
| 27 | + struct onoff_manager mgr; |
| 28 | + onoff_notify_fn notify; |
| 29 | + uint8_t id; |
| 30 | +}; |
| 31 | + |
| 32 | +static void start(struct onoff_manager *mgr, onoff_notify_fn notify); |
| 33 | +static void stop(struct onoff_manager *mgr, onoff_notify_fn notify); |
| 34 | + |
| 35 | +#define GPD_READY_TIMEOUT_MS 1000 |
| 36 | + |
| 37 | +#define GPD_SERVICE_READY BIT(0) |
| 38 | +#define GPD_SERVICE_ERROR BIT(1) |
| 39 | +#define GPD_SERVICE_REQ_OK BIT(2) |
| 40 | +#define GPD_SERVICE_REQ_ERR BIT(3) |
| 41 | +static atomic_t gpd_service_status = ATOMIC_INIT(0); |
| 42 | + |
| 43 | +static struct gpd_onoff_manager fast_active1 = {.id = NRF_GPD_FAST_ACTIVE1}; |
| 44 | +static struct gpd_onoff_manager slow_active = {.id = NRF_GPD_SLOW_ACTIVE}; |
| 45 | +static struct gpd_onoff_manager slow_main = {.id = NRF_GPD_SLOW_MAIN}; |
| 46 | + |
| 47 | +static const struct onoff_transitions transitions = |
| 48 | + ONOFF_TRANSITIONS_INITIALIZER(start, stop, NULL); |
| 49 | + |
| 50 | +static struct gpd_onoff_manager *get_mgr(uint8_t id) |
| 51 | +{ |
| 52 | + switch (id) { |
| 53 | + case NRF_GPD_FAST_ACTIVE1: |
| 54 | + return &fast_active1; |
| 55 | + case NRF_GPD_SLOW_ACTIVE: |
| 56 | + return &slow_active; |
| 57 | + case NRF_GPD_SLOW_MAIN: |
| 58 | + return &slow_main; |
| 59 | + default: |
| 60 | + return NULL; |
| 61 | + } |
| 62 | +} |
| 63 | + |
| 64 | +static int nrf_gpd_sync(struct gpd_onoff_manager *gpd_mgr) |
| 65 | +{ |
| 66 | + int64_t start; |
| 67 | + nrfs_err_t err; |
| 68 | + gdpwr_request_type_t request; |
| 69 | + |
| 70 | + K_SPINLOCK(&gpd_mgr->mgr.lock) { |
| 71 | + if (gpd_mgr->mgr.refs == 0) { |
| 72 | + request = GDPWR_POWER_REQUEST_CLEAR; |
| 73 | + } else { |
| 74 | + request = GDPWR_POWER_REQUEST_SET; |
| 75 | + } |
| 76 | + } |
| 77 | + |
| 78 | + atomic_clear_bit(&gpd_service_status, GPD_SERVICE_REQ_ERR); |
| 79 | + atomic_clear_bit(&gpd_service_status, GPD_SERVICE_REQ_OK); |
| 80 | + |
| 81 | + err = nrfs_gdpwr_power_request(gpd_mgr->id, request, gpd_mgr); |
| 82 | + if (err != NRFS_SUCCESS) { |
| 83 | + return -EIO; |
| 84 | + } |
| 85 | + |
| 86 | + start = k_uptime_get(); |
| 87 | + while (k_uptime_get() - start < GPD_READY_TIMEOUT_MS) { |
| 88 | + if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_REQ_ERR)) { |
| 89 | + return -EIO; |
| 90 | + } |
| 91 | + |
| 92 | + if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_REQ_OK)) { |
| 93 | + return 0; |
| 94 | + } |
| 95 | + } |
| 96 | + |
| 97 | + LOG_ERR("nRFs GDPWR request timed out"); |
| 98 | + |
| 99 | + return -ETIMEDOUT; |
| 100 | +} |
| 101 | + |
| 102 | +static void evt_handler(nrfs_gdpwr_evt_t const *p_evt, void *context) |
| 103 | +{ |
| 104 | + if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_READY)) { |
| 105 | + struct gpd_onoff_manager *gpd_mgr = context; |
| 106 | + |
| 107 | + switch (p_evt->type) { |
| 108 | + case NRFS_GDPWR_REQ_APPLIED: |
| 109 | + gpd_mgr->notify(&gpd_mgr->mgr, 0); |
| 110 | + break; |
| 111 | + default: |
| 112 | + LOG_ERR("nRFs GDPWR request not applied"); |
| 113 | + gpd_mgr->notify(&gpd_mgr->mgr, -EIO); |
| 114 | + break; |
| 115 | + } |
| 116 | + } else { |
| 117 | + switch (p_evt->type) { |
| 118 | + case NRFS_GDPWR_REQ_APPLIED: |
| 119 | + atomic_set_bit(&gpd_service_status, GPD_SERVICE_REQ_OK); |
| 120 | + break; |
| 121 | + default: |
| 122 | + LOG_ERR("nRFs GDPWR request not applied"); |
| 123 | + atomic_set_bit(&gpd_service_status, GPD_SERVICE_REQ_ERR); |
| 124 | + break; |
| 125 | + } |
| 126 | + } |
| 127 | +} |
| 128 | + |
| 129 | +static void start(struct onoff_manager *mgr, onoff_notify_fn notify) |
| 130 | +{ |
| 131 | + struct gpd_onoff_manager *gpd_mgr = CONTAINER_OF(mgr, struct gpd_onoff_manager, mgr); |
| 132 | + |
| 133 | + gpd_mgr->notify = notify; |
| 134 | + |
| 135 | + if (!atomic_test_bit(&gpd_service_status, GPD_SERVICE_READY)) { |
| 136 | + notify(mgr, 0); |
| 137 | + } else { |
| 138 | + nrfs_err_t err; |
| 139 | + |
| 140 | + err = nrfs_gdpwr_power_request(gpd_mgr->id, GDPWR_POWER_REQUEST_SET, gpd_mgr); |
| 141 | + if (err != NRFS_SUCCESS) { |
| 142 | + LOG_ERR("nRFs GDPWR request failed (%d)", err); |
| 143 | + notify(mgr, -EIO); |
| 144 | + } |
| 145 | + } |
| 146 | +} |
| 147 | + |
| 148 | +static void stop(struct onoff_manager *mgr, onoff_notify_fn notify) |
| 149 | +{ |
| 150 | + struct gpd_onoff_manager *gpd_mgr = CONTAINER_OF(mgr, struct gpd_onoff_manager, mgr); |
| 151 | + |
| 152 | + gpd_mgr->notify = notify; |
| 153 | + |
| 154 | + if (!atomic_test_bit(&gpd_service_status, GPD_SERVICE_READY)) { |
| 155 | + notify(mgr, 0); |
| 156 | + } else { |
| 157 | + nrfs_err_t err; |
| 158 | + |
| 159 | + err = nrfs_gdpwr_power_request(gpd_mgr->id, GDPWR_POWER_REQUEST_CLEAR, gpd_mgr); |
| 160 | + if (err != NRFS_SUCCESS) { |
| 161 | + LOG_ERR("nRFs GDPWR request failed (%d)", err); |
| 162 | + notify(mgr, -EIO); |
| 163 | + } |
| 164 | + } |
| 165 | +} |
| 166 | + |
| 167 | +int nrf_gpd_request(uint8_t id) |
| 168 | +{ |
| 169 | + int ret; |
| 170 | + struct onoff_client client; |
| 171 | + struct gpd_onoff_manager *gpd_mgr; |
| 172 | + |
| 173 | + gpd_mgr = get_mgr(id); |
| 174 | + if (gpd_mgr == NULL) { |
| 175 | + return -EINVAL; |
| 176 | + } |
| 177 | + |
| 178 | + if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_ERROR)) { |
| 179 | + LOG_ERR("GPD service did not initialize properly"); |
| 180 | + return -EIO; |
| 181 | + } |
| 182 | + |
| 183 | + sys_notify_init_spinwait(&client.notify); |
| 184 | + |
| 185 | + onoff_request(&gpd_mgr->mgr, &client); |
| 186 | + |
| 187 | + while (sys_notify_fetch_result(&client.notify, &ret) == -EAGAIN) { |
| 188 | + } |
| 189 | + |
| 190 | + return ret; |
| 191 | +} |
| 192 | + |
| 193 | +int nrf_gpd_release(uint8_t id) |
| 194 | +{ |
| 195 | + struct gpd_onoff_manager *gpd_mgr; |
| 196 | + |
| 197 | + gpd_mgr = get_mgr(id); |
| 198 | + if (gpd_mgr == NULL) { |
| 199 | + return -EINVAL; |
| 200 | + } |
| 201 | + |
| 202 | + if (atomic_test_bit(&gpd_service_status, GPD_SERVICE_ERROR)) { |
| 203 | + LOG_ERR("GPD service did not initialize properly"); |
| 204 | + return -EIO; |
| 205 | + } |
| 206 | + |
| 207 | + return onoff_release(&gpd_mgr->mgr); |
| 208 | +} |
| 209 | + |
| 210 | +static int nrf_gpd_pre_init(void) |
| 211 | +{ |
| 212 | + int ret; |
| 213 | + |
| 214 | + ret = onoff_manager_init(&fast_active1.mgr, &transitions); |
| 215 | + if (ret < 0) { |
| 216 | + return ret; |
| 217 | + } |
| 218 | + |
| 219 | + ret = onoff_manager_init(&slow_active.mgr, &transitions); |
| 220 | + if (ret < 0) { |
| 221 | + return ret; |
| 222 | + } |
| 223 | + |
| 224 | + ret = onoff_manager_init(&slow_main.mgr, &transitions); |
| 225 | + if (ret < 0) { |
| 226 | + return ret; |
| 227 | + } |
| 228 | + |
| 229 | + return 0; |
| 230 | +} |
| 231 | + |
| 232 | +static int nrf_gpd_post_init(void) |
| 233 | +{ |
| 234 | + nrfs_err_t err; |
| 235 | + int ret; |
| 236 | + |
| 237 | + err = nrfs_backend_wait_for_connection(K_FOREVER); |
| 238 | + if (err != NRFS_SUCCESS) { |
| 239 | + ret = -EIO; |
| 240 | + goto err; |
| 241 | + } |
| 242 | + |
| 243 | + err = nrfs_gdpwr_init(evt_handler); |
| 244 | + if (err != NRFS_SUCCESS) { |
| 245 | + ret = -EIO; |
| 246 | + goto err; |
| 247 | + } |
| 248 | + |
| 249 | + /* submit GD requests now to align collected statuses */ |
| 250 | + ret = nrf_gpd_sync(&fast_active1); |
| 251 | + if (ret < 0) { |
| 252 | + goto err; |
| 253 | + } |
| 254 | + |
| 255 | + ret = nrf_gpd_sync(&slow_active); |
| 256 | + if (ret < 0) { |
| 257 | + goto err; |
| 258 | + } |
| 259 | + |
| 260 | + ret = nrf_gpd_sync(&slow_main); |
| 261 | + if (ret < 0) { |
| 262 | + goto err; |
| 263 | + } |
| 264 | + |
| 265 | + atomic_set_bit(&gpd_service_status, GPD_SERVICE_READY); |
| 266 | + |
| 267 | + return 0; |
| 268 | + |
| 269 | +err: |
| 270 | + atomic_set_bit(&gpd_service_status, GPD_SERVICE_ERROR); |
| 271 | + |
| 272 | + return ret; |
| 273 | +} |
| 274 | + |
| 275 | +SYS_INIT(nrf_gpd_pre_init, PRE_KERNEL_1, 0); |
| 276 | +SYS_INIT(nrf_gpd_post_init, APPLICATION, CONFIG_APPLICATION_INIT_PRIORITY); |
0 commit comments