diff --git a/include/zephyr/pm/policy.h b/include/zephyr/pm/policy.h index e1e910f3e0a..057488d722d 100644 --- a/include/zephyr/pm/policy.h +++ b/include/zephyr/pm/policy.h @@ -67,7 +67,7 @@ struct pm_policy_latency_request { struct pm_policy_event { /** @cond INTERNAL_HIDDEN */ sys_snode_t node; - uint32_t value_cyc; + int64_t uptime_ticks; /** @endcond */ }; @@ -137,7 +137,6 @@ void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id); */ bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id); - /** * @brief Register an event. * @@ -145,30 +144,31 @@ bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id); * will wake up the system at a known time in the future. By registering such * event, the policy manager will be able to decide whether certain power states * are worth entering or not. - * CPU is woken up before the time passed in cycle to prevent the event handling - * latency * - * @note It is mandatory to unregister events once they have happened by using - * pm_policy_event_unregister(). Not doing so is an API contract violation, - * because the system would continue to consider them as valid events in the - * *far* future, that is, after the cycle counter rollover. + * CPU is woken up before the time passed in cycle to minimize event handling + * latency. Once woken up, the CPU will be kept awake until the event has been + * handled, which is signaled by pm_policy_event_unregister() or moving event + * into the future using pm_policy_event_update(). * * @param evt Event. - * @param cycle When the event will occur, in absolute time (cycles). + * @param uptime_ticks When the event will occur, in uptime ticks. * - * @see pm_policy_event_unregister + * @see pm_policy_event_unregister() */ -void pm_policy_event_register(struct pm_policy_event *evt, uint32_t cycle); +void pm_policy_event_register(struct pm_policy_event *evt, int64_t uptime_ticks); /** * @brief Update an event. * + * This shortcut allows for moving the time an event will occur without the + * need for an unregister + register cycle. + * * @param evt Event. - * @param cycle When the event will occur, in absolute time (cycles). + * @param uptime_ticks When the event will occur, in uptime ticks. * * @see pm_policy_event_register */ -void pm_policy_event_update(struct pm_policy_event *evt, uint32_t cycle); +void pm_policy_event_update(struct pm_policy_event *evt, int64_t uptime_ticks); /** * @brief Unregister an event. @@ -208,10 +208,14 @@ void pm_policy_device_power_lock_put(const struct device *dev); /** * @brief Returns the ticks until the next event * - * If an event is registred, it will return the number of ticks until the next event as - * a positive or zero value. Otherwise it returns -1 + * If an event is registred, it will return the number of ticks until the next event, if the + * "next"/"oldest" registered event is in the past, it will return 0. Otherwise it returns -1. + * + * @retval >0 If next registered event is in the future + * @retval 0 If next registered event is now or in the past + * @retval -1 Otherwise */ -int32_t pm_policy_next_event_ticks(void); +int64_t pm_policy_next_event_ticks(void); #else static inline void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id) @@ -261,7 +265,7 @@ static inline void pm_policy_device_power_lock_put(const struct device *dev) ARG_UNUSED(dev); } -static inline int32_t pm_policy_next_event_ticks(void) +static inline int64_t pm_policy_next_event_ticks(void) { return -1; } diff --git a/subsys/pm/policy/policy_default.c b/subsys/pm/policy/policy_default.c index 74a0443e6cd..eaf317d24cf 100644 --- a/subsys/pm/policy/policy_default.c +++ b/subsys/pm/policy/policy_default.c @@ -9,7 +9,6 @@ #include #include -extern struct pm_policy_event *next_event; extern int32_t max_latency_cyc; const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks) @@ -30,27 +29,6 @@ const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks) num_cpu_states = pm_state_cpu_get_all(cpu, &cpu_states); - if ((next_event) && (next_event->value_cyc >= 0)) { - uint32_t cyc_curr = k_cycle_get_32(); - int64_t cyc_evt = next_event->value_cyc - cyc_curr; - - /* event happening after cycle counter max value, pad */ - if (next_event->value_cyc <= cyc_curr) { - cyc_evt += UINT32_MAX; - } - - if (cyc_evt > 0) { - /* if there's no system wakeup event always wins, - * otherwise, who comes earlier wins - */ - if (cyc < 0) { - cyc = cyc_evt; - } else { - cyc = MIN(cyc, cyc_evt); - } - } - } - for (int16_t i = (int16_t)num_cpu_states - 1; i >= 0; i--) { const struct pm_state_info *state = &cpu_states[i]; uint32_t min_residency_cyc, exit_latency_cyc; diff --git a/subsys/pm/policy/policy_events.c b/subsys/pm/policy/policy_events.c index dc06bffdf55..4a689d3f36c 100644 --- a/subsys/pm/policy/policy_events.c +++ b/subsys/pm/policy/policy_events.c @@ -20,82 +20,66 @@ static sys_slist_t events_list; /** Pointer to Next Event. */ struct pm_policy_event *next_event; -/** @brief Update next event. */ -static void update_next_event(uint32_t cyc) +static void update_next_event(void) { - int64_t new_next_event_cyc = -1; struct pm_policy_event *evt; - /* unset the next event pointer */ next_event = NULL; SYS_SLIST_FOR_EACH_CONTAINER(&events_list, evt, node) { - uint64_t cyc_evt = evt->value_cyc; - - /* - * cyc value is a 32-bit rolling counter: - * - * |---------------->-----------------------| - * 0 cyc UINT32_MAX - * - * Values from [0, cyc) are events happening later than - * [cyc, UINT32_MAX], so pad [0, cyc) with UINT32_MAX + 1 to do - * the comparison. - */ - if (cyc_evt < cyc) { - cyc_evt += (uint64_t)UINT32_MAX + 1U; + if (next_event == NULL) { + next_event = evt; + continue; } - if ((new_next_event_cyc < 0) || (cyc_evt < new_next_event_cyc)) { - new_next_event_cyc = cyc_evt; - next_event = evt; + if (next_event->uptime_ticks <= evt->uptime_ticks) { + continue; } + + next_event = evt; } } -int32_t pm_policy_next_event_ticks(void) +int64_t pm_policy_next_event_ticks(void) { - int32_t cyc_evt = -1; - - if ((next_event) && (next_event->value_cyc > 0)) { - cyc_evt = next_event->value_cyc - k_cycle_get_32(); - cyc_evt = MAX(0, cyc_evt); - BUILD_ASSERT(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC >= CONFIG_SYS_CLOCK_TICKS_PER_SEC, - "HW Cycles per sec should be greater that ticks per sec"); - return k_cyc_to_ticks_floor32(cyc_evt); - } + int64_t ticks = -1; - return -1; -} + K_SPINLOCK(&events_lock) { + if (next_event == NULL) { + K_SPINLOCK_BREAK; + } -void pm_policy_event_register(struct pm_policy_event *evt, uint32_t time_us) -{ - k_spinlock_key_t key = k_spin_lock(&events_lock); - uint32_t cyc = k_cycle_get_32(); + ticks = next_event->uptime_ticks - k_uptime_ticks(); - evt->value_cyc = cyc + k_us_to_cyc_ceil32(time_us); - sys_slist_append(&events_list, &evt->node); - update_next_event(cyc); + if (ticks < 0) { + ticks = 0; + } + } - k_spin_unlock(&events_lock, key); + return ticks; } -void pm_policy_event_update(struct pm_policy_event *evt, uint32_t cycle) +void pm_policy_event_register(struct pm_policy_event *evt, int64_t uptime_ticks) { - k_spinlock_key_t key = k_spin_lock(&events_lock); - - evt->value_cyc = cycle; - update_next_event(k_cycle_get_32()); + K_SPINLOCK(&events_lock) { + evt->uptime_ticks = uptime_ticks; + sys_slist_append(&events_list, &evt->node); + update_next_event(); + } +} - k_spin_unlock(&events_lock, key); +void pm_policy_event_update(struct pm_policy_event *evt, int64_t uptime_ticks) +{ + K_SPINLOCK(&events_lock) { + evt->uptime_ticks = uptime_ticks; + update_next_event(); + } } void pm_policy_event_unregister(struct pm_policy_event *evt) { - k_spinlock_key_t key = k_spin_lock(&events_lock); - - (void)sys_slist_find_and_remove(&events_list, &evt->node); - update_next_event(k_cycle_get_32()); - - k_spin_unlock(&events_lock, key); + K_SPINLOCK(&events_lock) { + (void)sys_slist_find_and_remove(&events_list, &evt->node); + update_next_event(); + } } diff --git a/tests/subsys/pm/policy_api/src/main.c b/tests/subsys/pm/policy_api/src/main.c index bc3564bee8a..e336b756294 100644 --- a/tests/subsys/pm/policy_api/src/main.c +++ b/tests/subsys/pm/policy_api/src/main.c @@ -304,68 +304,36 @@ ZTEST(policy_api, test_pm_policy_next_state_custom) } #endif /* CONFIG_PM_POLICY_CUSTOM */ -#ifdef CONFIG_PM_POLICY_DEFAULT -/* note: we can't easily mock k_cycle_get_32(), so test is not ideal */ ZTEST(policy_api, test_pm_policy_events) { - struct pm_policy_event evt1, evt2; - const struct pm_state_info *next; - uint32_t now; - - now = k_cyc_to_ticks_ceil32(k_cycle_get_32()); - - /* events: - * - 10ms from now (time < runtime idle latency) - * - 200ms from now (time > runtime idle, < suspend to ram latencies) - * - * system wakeup: - * - 2s from now (time > suspend to ram latency) - * - * first event wins, so we must stay active - */ - pm_policy_event_register(&evt1, k_ms_to_cyc_floor32(10) + k_cycle_get_32()); - pm_policy_event_register(&evt2, k_ms_to_cyc_floor32(200) + k_cycle_get_32()); - next = pm_policy_next_state(0U, now + k_sec_to_ticks_floor32(2)); - zassert_is_null(next); - - /* remove first event so second event now wins, meaning we can now enter - * runtime idle - */ + struct pm_policy_event evt1; + struct pm_policy_event evt2; + int64_t now_uptime_ticks; + int64_t evt1_1_uptime_ticks; + int64_t evt1_2_uptime_ticks; + int64_t evt2_uptime_ticks; + + now_uptime_ticks = k_uptime_ticks(); + evt1_1_uptime_ticks = now_uptime_ticks + 100; + evt1_2_uptime_ticks = now_uptime_ticks + 200; + evt2_uptime_ticks = now_uptime_ticks + 2000; + + zassert_equal(pm_policy_next_event_ticks(), -1); + pm_policy_event_register(&evt1, evt1_1_uptime_ticks); + pm_policy_event_register(&evt2, evt2_uptime_ticks); + zassert_within(pm_policy_next_event_ticks(), 100, 50); pm_policy_event_unregister(&evt1); - next = pm_policy_next_state(0U, now + k_sec_to_ticks_floor32(2)); - zassert_equal(next->state, PM_STATE_RUNTIME_IDLE); - - /* remove second event, now we can enter deepest state */ + zassert_within(pm_policy_next_event_ticks(), 2000, 50); pm_policy_event_unregister(&evt2); - next = pm_policy_next_state(0U, now + k_sec_to_ticks_floor32(2)); - zassert_equal(next->state, PM_STATE_SUSPEND_TO_RAM); - - /* events: - * - 2s from now (time > suspend to ram latency) - * - * system wakeup: - * - 200ms from now (time > runtime idle, < suspend to ram latencies) - * - * system wakeup wins, so we can go up to runtime idle. - */ - pm_policy_event_register(&evt1, k_sec_to_cyc_floor32(2) + k_cycle_get_32()); - next = pm_policy_next_state(0U, now + k_ms_to_ticks_floor32(200)); - zassert_equal(next->state, PM_STATE_RUNTIME_IDLE); - - /* modify event to occur in 10ms, so it now wins system wakeup and - * requires to stay awake - */ - pm_policy_event_update(&evt1, k_ms_to_cyc_floor32(10) + k_cycle_get_32()); - next = pm_policy_next_state(0U, now + k_ms_to_ticks_floor32(200)); - zassert_is_null(next); - + zassert_equal(pm_policy_next_event_ticks(), -1); + pm_policy_event_register(&evt2, evt2_uptime_ticks); + zassert_within(pm_policy_next_event_ticks(), 2000, 50); + pm_policy_event_register(&evt1, evt1_1_uptime_ticks); + zassert_within(pm_policy_next_event_ticks(), 100, 50); + pm_policy_event_update(&evt1, evt1_2_uptime_ticks); + zassert_within(pm_policy_next_event_ticks(), 200, 50); pm_policy_event_unregister(&evt1); + pm_policy_event_unregister(&evt2); } -#else -ZTEST(policy_api, test_pm_policy_events) -{ - ztest_test_skip(); -} -#endif /* CONFIG_PM_POLICY_CUSTOM */ ZTEST_SUITE(policy_api, NULL, NULL, NULL, NULL, NULL);