From 20bc0c5e28764fa4a3c53f18e7acc639447238ab Mon Sep 17 00:00:00 2001 From: Bjarki Arge Andreasen Date: Tue, 29 Oct 2024 10:52:37 +0100 Subject: [PATCH 1/3] [nrf fromtree] pm: policy: fix pm_policy_event_register arg The pm_policy_event_register() API takes absolute cycles as the second arg, like pm_policy_event_update(), but the arg is renamed time_us and treated as a relative time in us rather than abs cycles. Fix implementation of pm_policy_event_register() to treat cycles like pm_policy_event_update() and API docs suggest. Signed-off-by: Bjarki Arge Andreasen (cherry picked from commit 145d04101d416e20af04b8a9c8bf5fe8fbb0c77b) --- subsys/pm/policy/policy_events.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/subsys/pm/policy/policy_events.c b/subsys/pm/policy/policy_events.c index dc06bffdf55..53fec3edc5d 100644 --- a/subsys/pm/policy/policy_events.c +++ b/subsys/pm/policy/policy_events.c @@ -68,14 +68,13 @@ int32_t pm_policy_next_event_ticks(void) return -1; } -void pm_policy_event_register(struct pm_policy_event *evt, uint32_t time_us) +void pm_policy_event_register(struct pm_policy_event *evt, uint32_t cycle) { k_spinlock_key_t key = k_spin_lock(&events_lock); - uint32_t cyc = k_cycle_get_32(); - evt->value_cyc = cyc + k_us_to_cyc_ceil32(time_us); + evt->value_cyc = cycle; sys_slist_append(&events_list, &evt->node); - update_next_event(cyc); + update_next_event(k_cycle_get_32()); k_spin_unlock(&events_lock, key); } From 5d6552e56b064713ac4e38ad74d8b5abb7b6e420 Mon Sep 17 00:00:00 2001 From: Bjarki Arge Andreasen Date: Tue, 29 Oct 2024 15:59:09 +0100 Subject: [PATCH 2/3] [nrf fromtree] pm: policy: separate default policy and events The default policy currently directly references the private variable next_event from policy_events.c to then convert the cycle of said event (if exists) to a kernel tick in the future, something policy_events.c already implements and exposes through pm_policy_next_event_ticks(). Additionally, the implementation of pm_policy_next_state() in policy_default.c already gets the nearest kernel tick, wherein the next event has already been accounted for in, see implementation of pm_system_suspend(). This commit removes the redundant and layer violating computation if the tick of the next event from policy_default.c and updates the test test_pm_policy_events to not use default policy to determine if pm_policy_next_event_ticks() is correct. Signed-off-by: Bjarki Arge Andreasen (cherry picked from commit 14117b453d44cc91b74365f9c7d624b8778b8c10) --- subsys/pm/policy/policy_default.c | 22 ------- tests/subsys/pm/policy_api/src/main.c | 82 ++++++++------------------- 2 files changed, 25 insertions(+), 79 deletions(-) diff --git a/subsys/pm/policy/policy_default.c b/subsys/pm/policy/policy_default.c index 74a0443e6cd..eaf317d24cf 100644 --- a/subsys/pm/policy/policy_default.c +++ b/subsys/pm/policy/policy_default.c @@ -9,7 +9,6 @@ #include #include -extern struct pm_policy_event *next_event; extern int32_t max_latency_cyc; const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks) @@ -30,27 +29,6 @@ const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks) num_cpu_states = pm_state_cpu_get_all(cpu, &cpu_states); - if ((next_event) && (next_event->value_cyc >= 0)) { - uint32_t cyc_curr = k_cycle_get_32(); - int64_t cyc_evt = next_event->value_cyc - cyc_curr; - - /* event happening after cycle counter max value, pad */ - if (next_event->value_cyc <= cyc_curr) { - cyc_evt += UINT32_MAX; - } - - if (cyc_evt > 0) { - /* if there's no system wakeup event always wins, - * otherwise, who comes earlier wins - */ - if (cyc < 0) { - cyc = cyc_evt; - } else { - cyc = MIN(cyc, cyc_evt); - } - } - } - for (int16_t i = (int16_t)num_cpu_states - 1; i >= 0; i--) { const struct pm_state_info *state = &cpu_states[i]; uint32_t min_residency_cyc, exit_latency_cyc; diff --git a/tests/subsys/pm/policy_api/src/main.c b/tests/subsys/pm/policy_api/src/main.c index bc3564bee8a..e61f44a8b36 100644 --- a/tests/subsys/pm/policy_api/src/main.c +++ b/tests/subsys/pm/policy_api/src/main.c @@ -304,68 +304,36 @@ ZTEST(policy_api, test_pm_policy_next_state_custom) } #endif /* CONFIG_PM_POLICY_CUSTOM */ -#ifdef CONFIG_PM_POLICY_DEFAULT -/* note: we can't easily mock k_cycle_get_32(), so test is not ideal */ ZTEST(policy_api, test_pm_policy_events) { - struct pm_policy_event evt1, evt2; - const struct pm_state_info *next; - uint32_t now; - - now = k_cyc_to_ticks_ceil32(k_cycle_get_32()); - - /* events: - * - 10ms from now (time < runtime idle latency) - * - 200ms from now (time > runtime idle, < suspend to ram latencies) - * - * system wakeup: - * - 2s from now (time > suspend to ram latency) - * - * first event wins, so we must stay active - */ - pm_policy_event_register(&evt1, k_ms_to_cyc_floor32(10) + k_cycle_get_32()); - pm_policy_event_register(&evt2, k_ms_to_cyc_floor32(200) + k_cycle_get_32()); - next = pm_policy_next_state(0U, now + k_sec_to_ticks_floor32(2)); - zassert_is_null(next); - - /* remove first event so second event now wins, meaning we can now enter - * runtime idle - */ + struct pm_policy_event evt1; + struct pm_policy_event evt2; + uint32_t now_cycle; + uint32_t evt1_1_cycle; + uint32_t evt1_2_cycle; + uint32_t evt2_cycle; + + now_cycle = k_cycle_get_32(); + evt1_1_cycle = now_cycle + k_ticks_to_cyc_floor32(100); + evt1_2_cycle = now_cycle + k_ticks_to_cyc_floor32(200); + evt2_cycle = now_cycle + k_ticks_to_cyc_floor32(2000); + + zassert_equal(pm_policy_next_event_ticks(), -1); + pm_policy_event_register(&evt1, evt1_1_cycle); + pm_policy_event_register(&evt2, evt2_cycle); + zassert_within(pm_policy_next_event_ticks(), 100, 50); pm_policy_event_unregister(&evt1); - next = pm_policy_next_state(0U, now + k_sec_to_ticks_floor32(2)); - zassert_equal(next->state, PM_STATE_RUNTIME_IDLE); - - /* remove second event, now we can enter deepest state */ + zassert_within(pm_policy_next_event_ticks(), 2000, 50); pm_policy_event_unregister(&evt2); - next = pm_policy_next_state(0U, now + k_sec_to_ticks_floor32(2)); - zassert_equal(next->state, PM_STATE_SUSPEND_TO_RAM); - - /* events: - * - 2s from now (time > suspend to ram latency) - * - * system wakeup: - * - 200ms from now (time > runtime idle, < suspend to ram latencies) - * - * system wakeup wins, so we can go up to runtime idle. - */ - pm_policy_event_register(&evt1, k_sec_to_cyc_floor32(2) + k_cycle_get_32()); - next = pm_policy_next_state(0U, now + k_ms_to_ticks_floor32(200)); - zassert_equal(next->state, PM_STATE_RUNTIME_IDLE); - - /* modify event to occur in 10ms, so it now wins system wakeup and - * requires to stay awake - */ - pm_policy_event_update(&evt1, k_ms_to_cyc_floor32(10) + k_cycle_get_32()); - next = pm_policy_next_state(0U, now + k_ms_to_ticks_floor32(200)); - zassert_is_null(next); - + zassert_equal(pm_policy_next_event_ticks(), -1); + pm_policy_event_register(&evt2, evt2_cycle); + zassert_within(pm_policy_next_event_ticks(), 2000, 50); + pm_policy_event_register(&evt1, evt1_1_cycle); + zassert_within(pm_policy_next_event_ticks(), 100, 50); + pm_policy_event_update(&evt1, evt1_2_cycle); + zassert_within(pm_policy_next_event_ticks(), 200, 50); pm_policy_event_unregister(&evt1); + pm_policy_event_unregister(&evt2); } -#else -ZTEST(policy_api, test_pm_policy_events) -{ - ztest_test_skip(); -} -#endif /* CONFIG_PM_POLICY_CUSTOM */ ZTEST_SUITE(policy_api, NULL, NULL, NULL, NULL, NULL); From 3ab495a1f307078cbcada91cb167e687d801c0d6 Mon Sep 17 00:00:00 2001 From: Bjarki Arge Andreasen Date: Tue, 29 Oct 2024 11:49:40 +0100 Subject: [PATCH 3/3] [nrf fromtree] pm: policy: event: use uptime ticks Update events to use uptime ticks, which is a monotonic clock which in the same res as kernel ticks. This makes comparisons simple and removes the complexity of dealing with wrapping counter values. The wrapping is particularly problematic for events since this makes it quite complex to track if an event has occured in the past, or will occur in the future. This info is needed to know if an event has actually been handled or not. Signed-off-by: Bjarki Arge Andreasen (cherry picked from commit 59779ebe4b12d08bee7e8d15028fb0ee2152b4e3) --- include/zephyr/pm/policy.h | 38 +++++++----- subsys/pm/policy/policy_events.c | 89 +++++++++++---------------- tests/subsys/pm/policy_api/src/main.c | 26 ++++---- 3 files changed, 71 insertions(+), 82 deletions(-) diff --git a/include/zephyr/pm/policy.h b/include/zephyr/pm/policy.h index e1e910f3e0a..057488d722d 100644 --- a/include/zephyr/pm/policy.h +++ b/include/zephyr/pm/policy.h @@ -67,7 +67,7 @@ struct pm_policy_latency_request { struct pm_policy_event { /** @cond INTERNAL_HIDDEN */ sys_snode_t node; - uint32_t value_cyc; + int64_t uptime_ticks; /** @endcond */ }; @@ -137,7 +137,6 @@ void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id); */ bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id); - /** * @brief Register an event. * @@ -145,30 +144,31 @@ bool pm_policy_state_lock_is_active(enum pm_state state, uint8_t substate_id); * will wake up the system at a known time in the future. By registering such * event, the policy manager will be able to decide whether certain power states * are worth entering or not. - * CPU is woken up before the time passed in cycle to prevent the event handling - * latency * - * @note It is mandatory to unregister events once they have happened by using - * pm_policy_event_unregister(). Not doing so is an API contract violation, - * because the system would continue to consider them as valid events in the - * *far* future, that is, after the cycle counter rollover. + * CPU is woken up before the time passed in cycle to minimize event handling + * latency. Once woken up, the CPU will be kept awake until the event has been + * handled, which is signaled by pm_policy_event_unregister() or moving event + * into the future using pm_policy_event_update(). * * @param evt Event. - * @param cycle When the event will occur, in absolute time (cycles). + * @param uptime_ticks When the event will occur, in uptime ticks. * - * @see pm_policy_event_unregister + * @see pm_policy_event_unregister() */ -void pm_policy_event_register(struct pm_policy_event *evt, uint32_t cycle); +void pm_policy_event_register(struct pm_policy_event *evt, int64_t uptime_ticks); /** * @brief Update an event. * + * This shortcut allows for moving the time an event will occur without the + * need for an unregister + register cycle. + * * @param evt Event. - * @param cycle When the event will occur, in absolute time (cycles). + * @param uptime_ticks When the event will occur, in uptime ticks. * * @see pm_policy_event_register */ -void pm_policy_event_update(struct pm_policy_event *evt, uint32_t cycle); +void pm_policy_event_update(struct pm_policy_event *evt, int64_t uptime_ticks); /** * @brief Unregister an event. @@ -208,10 +208,14 @@ void pm_policy_device_power_lock_put(const struct device *dev); /** * @brief Returns the ticks until the next event * - * If an event is registred, it will return the number of ticks until the next event as - * a positive or zero value. Otherwise it returns -1 + * If an event is registred, it will return the number of ticks until the next event, if the + * "next"/"oldest" registered event is in the past, it will return 0. Otherwise it returns -1. + * + * @retval >0 If next registered event is in the future + * @retval 0 If next registered event is now or in the past + * @retval -1 Otherwise */ -int32_t pm_policy_next_event_ticks(void); +int64_t pm_policy_next_event_ticks(void); #else static inline void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id) @@ -261,7 +265,7 @@ static inline void pm_policy_device_power_lock_put(const struct device *dev) ARG_UNUSED(dev); } -static inline int32_t pm_policy_next_event_ticks(void) +static inline int64_t pm_policy_next_event_ticks(void) { return -1; } diff --git a/subsys/pm/policy/policy_events.c b/subsys/pm/policy/policy_events.c index 53fec3edc5d..4a689d3f36c 100644 --- a/subsys/pm/policy/policy_events.c +++ b/subsys/pm/policy/policy_events.c @@ -20,81 +20,66 @@ static sys_slist_t events_list; /** Pointer to Next Event. */ struct pm_policy_event *next_event; -/** @brief Update next event. */ -static void update_next_event(uint32_t cyc) +static void update_next_event(void) { - int64_t new_next_event_cyc = -1; struct pm_policy_event *evt; - /* unset the next event pointer */ next_event = NULL; SYS_SLIST_FOR_EACH_CONTAINER(&events_list, evt, node) { - uint64_t cyc_evt = evt->value_cyc; - - /* - * cyc value is a 32-bit rolling counter: - * - * |---------------->-----------------------| - * 0 cyc UINT32_MAX - * - * Values from [0, cyc) are events happening later than - * [cyc, UINT32_MAX], so pad [0, cyc) with UINT32_MAX + 1 to do - * the comparison. - */ - if (cyc_evt < cyc) { - cyc_evt += (uint64_t)UINT32_MAX + 1U; + if (next_event == NULL) { + next_event = evt; + continue; } - if ((new_next_event_cyc < 0) || (cyc_evt < new_next_event_cyc)) { - new_next_event_cyc = cyc_evt; - next_event = evt; + if (next_event->uptime_ticks <= evt->uptime_ticks) { + continue; } + + next_event = evt; } } -int32_t pm_policy_next_event_ticks(void) +int64_t pm_policy_next_event_ticks(void) { - int32_t cyc_evt = -1; - - if ((next_event) && (next_event->value_cyc > 0)) { - cyc_evt = next_event->value_cyc - k_cycle_get_32(); - cyc_evt = MAX(0, cyc_evt); - BUILD_ASSERT(CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC >= CONFIG_SYS_CLOCK_TICKS_PER_SEC, - "HW Cycles per sec should be greater that ticks per sec"); - return k_cyc_to_ticks_floor32(cyc_evt); - } + int64_t ticks = -1; - return -1; -} + K_SPINLOCK(&events_lock) { + if (next_event == NULL) { + K_SPINLOCK_BREAK; + } -void pm_policy_event_register(struct pm_policy_event *evt, uint32_t cycle) -{ - k_spinlock_key_t key = k_spin_lock(&events_lock); + ticks = next_event->uptime_ticks - k_uptime_ticks(); - evt->value_cyc = cycle; - sys_slist_append(&events_list, &evt->node); - update_next_event(k_cycle_get_32()); + if (ticks < 0) { + ticks = 0; + } + } - k_spin_unlock(&events_lock, key); + return ticks; } -void pm_policy_event_update(struct pm_policy_event *evt, uint32_t cycle) +void pm_policy_event_register(struct pm_policy_event *evt, int64_t uptime_ticks) { - k_spinlock_key_t key = k_spin_lock(&events_lock); - - evt->value_cyc = cycle; - update_next_event(k_cycle_get_32()); + K_SPINLOCK(&events_lock) { + evt->uptime_ticks = uptime_ticks; + sys_slist_append(&events_list, &evt->node); + update_next_event(); + } +} - k_spin_unlock(&events_lock, key); +void pm_policy_event_update(struct pm_policy_event *evt, int64_t uptime_ticks) +{ + K_SPINLOCK(&events_lock) { + evt->uptime_ticks = uptime_ticks; + update_next_event(); + } } void pm_policy_event_unregister(struct pm_policy_event *evt) { - k_spinlock_key_t key = k_spin_lock(&events_lock); - - (void)sys_slist_find_and_remove(&events_list, &evt->node); - update_next_event(k_cycle_get_32()); - - k_spin_unlock(&events_lock, key); + K_SPINLOCK(&events_lock) { + (void)sys_slist_find_and_remove(&events_list, &evt->node); + update_next_event(); + } } diff --git a/tests/subsys/pm/policy_api/src/main.c b/tests/subsys/pm/policy_api/src/main.c index e61f44a8b36..e336b756294 100644 --- a/tests/subsys/pm/policy_api/src/main.c +++ b/tests/subsys/pm/policy_api/src/main.c @@ -308,29 +308,29 @@ ZTEST(policy_api, test_pm_policy_events) { struct pm_policy_event evt1; struct pm_policy_event evt2; - uint32_t now_cycle; - uint32_t evt1_1_cycle; - uint32_t evt1_2_cycle; - uint32_t evt2_cycle; + int64_t now_uptime_ticks; + int64_t evt1_1_uptime_ticks; + int64_t evt1_2_uptime_ticks; + int64_t evt2_uptime_ticks; - now_cycle = k_cycle_get_32(); - evt1_1_cycle = now_cycle + k_ticks_to_cyc_floor32(100); - evt1_2_cycle = now_cycle + k_ticks_to_cyc_floor32(200); - evt2_cycle = now_cycle + k_ticks_to_cyc_floor32(2000); + now_uptime_ticks = k_uptime_ticks(); + evt1_1_uptime_ticks = now_uptime_ticks + 100; + evt1_2_uptime_ticks = now_uptime_ticks + 200; + evt2_uptime_ticks = now_uptime_ticks + 2000; zassert_equal(pm_policy_next_event_ticks(), -1); - pm_policy_event_register(&evt1, evt1_1_cycle); - pm_policy_event_register(&evt2, evt2_cycle); + pm_policy_event_register(&evt1, evt1_1_uptime_ticks); + pm_policy_event_register(&evt2, evt2_uptime_ticks); zassert_within(pm_policy_next_event_ticks(), 100, 50); pm_policy_event_unregister(&evt1); zassert_within(pm_policy_next_event_ticks(), 2000, 50); pm_policy_event_unregister(&evt2); zassert_equal(pm_policy_next_event_ticks(), -1); - pm_policy_event_register(&evt2, evt2_cycle); + pm_policy_event_register(&evt2, evt2_uptime_ticks); zassert_within(pm_policy_next_event_ticks(), 2000, 50); - pm_policy_event_register(&evt1, evt1_1_cycle); + pm_policy_event_register(&evt1, evt1_1_uptime_ticks); zassert_within(pm_policy_next_event_ticks(), 100, 50); - pm_policy_event_update(&evt1, evt1_2_cycle); + pm_policy_event_update(&evt1, evt1_2_uptime_ticks); zassert_within(pm_policy_next_event_ticks(), 200, 50); pm_policy_event_unregister(&evt1); pm_policy_event_unregister(&evt2);