10
10
#include <zephyr/sys/__assert.h>
11
11
#include <zephyr/sys/atomic.h>
12
12
#include <zephyr/toolchain.h>
13
+ #include <zephyr/spinlock.h>
13
14
14
15
#if DT_HAS_COMPAT_STATUS_OKAY (zephyr_power_state )
15
16
16
- #define DT_SUB_LOCK_INIT (node_id ) \
17
- { .state = PM_STATE_DT_INIT(node_id), \
18
- .substate_id = DT_PROP_OR(node_id, substate_id, 0), \
19
- .lock = ATOMIC_INIT( 0), \
17
+ #define DT_SUB_LOCK_INIT (node_id ) \
18
+ { .state = PM_STATE_DT_INIT(node_id), \
19
+ .substate_id = DT_PROP_OR(node_id, substate_id, 0), \
20
+ .exit_latency_us = DT_PROP_OR(node_id, exit_latency_us, 0), \
20
21
},
21
22
22
23
/**
23
24
* State and substate lock structure.
24
25
*
25
- * This struct is associating a reference counting to each <state,substate>
26
- * couple to be used with the pm_policy_substate_lock_* functions.
26
+ * Struct holds all power states defined in the device tree. Array with counter
27
+ * variables is in RAM and n-th counter is used for n-th power state. Structure
28
+ * also holds exit latency for each state. It is used to disable power states
29
+ * based on current latency requirement.
27
30
*
28
31
* Operations on this array are in the order of O(n) with the number of power
29
32
* states and this is mostly due to the random nature of the substate value
30
33
* (that can be anything from a small integer value to a bitmask). We can
31
34
* probably do better with an hashmap.
32
35
*/
33
- static struct {
36
+ static const struct {
34
37
enum pm_state state ;
35
38
uint8_t substate_id ;
36
- atomic_t lock ;
37
- } substate_lock_t [] = {
39
+ uint32_t exit_latency_us ;
40
+ } substates [] = {
38
41
DT_FOREACH_STATUS_OKAY (zephyr_power_state , DT_SUB_LOCK_INIT )
39
42
};
43
+ static atomic_t lock_cnt [ARRAY_SIZE (substates )];
44
+ static atomic_t latency_mask = BIT_MASK (ARRAY_SIZE (substates ));
45
+ static atomic_t unlock_mask = BIT_MASK (ARRAY_SIZE (substates ));
46
+ static struct k_spinlock lock ;
40
47
41
48
#endif
42
49
43
50
void pm_policy_state_lock_get (enum pm_state state , uint8_t substate_id )
44
51
{
45
52
#if DT_HAS_COMPAT_STATUS_OKAY (zephyr_power_state )
46
- for (size_t i = 0 ; i < ARRAY_SIZE (substate_lock_t ); i ++ ) {
47
- if (substate_lock_t [i ].state == state &&
48
- (substate_lock_t [i ].substate_id == substate_id ||
49
- substate_id == PM_ALL_SUBSTATES )) {
50
- atomic_inc (& substate_lock_t [i ].lock );
53
+ for (size_t i = 0 ; i < ARRAY_SIZE (substates ); i ++ ) {
54
+ if (substates [i ].state == state &&
55
+ (substates [i ].substate_id == substate_id || substate_id == PM_ALL_SUBSTATES )) {
56
+ k_spinlock_key_t key = k_spin_lock (& lock );
57
+
58
+ if (lock_cnt [i ] == 0 ) {
59
+ unlock_mask &= ~BIT (i );
60
+ }
61
+ lock_cnt [i ]++ ;
62
+ k_spin_unlock (& lock , key );
51
63
}
52
64
}
53
65
#endif
@@ -56,15 +68,17 @@ void pm_policy_state_lock_get(enum pm_state state, uint8_t substate_id)
56
68
void pm_policy_state_lock_put (enum pm_state state , uint8_t substate_id )
57
69
{
58
70
#if DT_HAS_COMPAT_STATUS_OKAY (zephyr_power_state )
59
- for (size_t i = 0 ; i < ARRAY_SIZE (substate_lock_t ); i ++ ) {
60
- if (substate_lock_t [i ].state == state &&
61
- (substate_lock_t [i ].substate_id == substate_id ||
62
- substate_id == PM_ALL_SUBSTATES )) {
63
- atomic_t cnt = atomic_dec (& substate_lock_t [i ].lock );
64
-
65
- ARG_UNUSED (cnt );
71
+ for (size_t i = 0 ; i < ARRAY_SIZE (substates ); i ++ ) {
72
+ if (substates [i ].state == state &&
73
+ (substates [i ].substate_id == substate_id || substate_id == PM_ALL_SUBSTATES )) {
74
+ k_spinlock_key_t key = k_spin_lock (& lock );
66
75
67
- __ASSERT (cnt >= 1 , "Unbalanced state lock get/put" );
76
+ __ASSERT (lock_cnt [i ] > 0 , "Unbalanced state lock get/put" );
77
+ lock_cnt [i ]-- ;
78
+ if (lock_cnt [i ] == 0 ) {
79
+ unlock_mask |= BIT (i );
80
+ }
81
+ k_spin_unlock (& lock , key );
68
82
}
69
83
}
70
84
#endif
@@ -73,14 +87,64 @@ void pm_policy_state_lock_put(enum pm_state state, uint8_t substate_id)
73
87
bool pm_policy_state_lock_is_active (enum pm_state state , uint8_t substate_id )
74
88
{
75
89
#if DT_HAS_COMPAT_STATUS_OKAY (zephyr_power_state )
76
- for (size_t i = 0 ; i < ARRAY_SIZE (substate_lock_t ); i ++ ) {
77
- if (substate_lock_t [i ].state == state &&
78
- (substate_lock_t [i ].substate_id == substate_id ||
79
- substate_id == PM_ALL_SUBSTATES )) {
80
- return (atomic_get (& substate_lock_t [i ].lock ) != 0 );
90
+ for (size_t i = 0 ; i < ARRAY_SIZE (substates ); i ++ ) {
91
+ if (substates [i ].state == state &&
92
+ (substates [i ].substate_id == substate_id || substate_id == PM_ALL_SUBSTATES )) {
93
+ return atomic_get (& lock_cnt [i ]) != 0 ;
81
94
}
82
95
}
83
96
#endif
84
97
85
98
return false;
86
99
}
100
+
101
+ bool pm_policy_state_is_available (enum pm_state state , uint8_t substate_id )
102
+ {
103
+ #if DT_HAS_COMPAT_STATUS_OKAY (zephyr_power_state )
104
+ for (size_t i = 0 ; i < ARRAY_SIZE (substates ); i ++ ) {
105
+ if (substates [i ].state == state &&
106
+ (substates [i ].substate_id == substate_id || substate_id == PM_ALL_SUBSTATES )) {
107
+ return (atomic_get (& lock_cnt [i ]) == 0 ) &&
108
+ (atomic_get (& latency_mask ) & BIT (i ));
109
+ }
110
+ }
111
+ #endif
112
+
113
+ return false;
114
+ }
115
+
116
+ bool pm_policy_state_any_active (void )
117
+ {
118
+ #if DT_HAS_COMPAT_STATUS_OKAY (zephyr_power_state )
119
+ /* Check if there is any power state that is not locked and not disabled due
120
+ * to latency requirements.
121
+ */
122
+ return atomic_get (& unlock_mask ) & atomic_get (& latency_mask );
123
+ #endif
124
+ return true;
125
+ }
126
+
127
+ #if DT_HAS_COMPAT_STATUS_OKAY (zephyr_power_state )
128
+ /* Callback is called whenever latency requirement changes. It is called under lock. */
129
+ static void pm_policy_latency_update_locked (int32_t max_latency_us )
130
+ {
131
+ for (size_t i = 0 ; i < ARRAY_SIZE (substates ); i ++ ) {
132
+ if (substates [i ].exit_latency_us >= max_latency_us ) {
133
+ latency_mask &= ~BIT (i );
134
+ } else {
135
+ latency_mask |= BIT (i );
136
+ }
137
+ }
138
+ }
139
+
140
+ static int pm_policy_latency_init (void )
141
+ {
142
+ static struct pm_policy_latency_subscription sub ;
143
+
144
+ pm_policy_latency_changed_subscribe (& sub , pm_policy_latency_update_locked );
145
+
146
+ return 0 ;
147
+ }
148
+
149
+ SYS_INIT (pm_policy_latency_init , PRE_KERNEL_1 , 0 );
150
+ #endif
0 commit comments