2
2
3
3
#include <linux/sched/cputime.h>
4
4
5
- static DEFINE_MUTEX ( cgroup_rstat_mutex );
5
+ static DEFINE_SPINLOCK ( cgroup_rstat_lock );
6
6
static DEFINE_PER_CPU (raw_spinlock_t , cgroup_rstat_cpu_lock ) ;
7
7
8
8
static void cgroup_base_stat_flush (struct cgroup * cgrp , int cpu );
@@ -132,21 +132,31 @@ static struct cgroup *cgroup_rstat_cpu_pop_updated(struct cgroup *pos,
132
132
}
133
133
134
134
/* see cgroup_rstat_flush() */
135
- static void cgroup_rstat_flush_locked (struct cgroup * cgrp )
135
+ static void cgroup_rstat_flush_locked (struct cgroup * cgrp , bool may_sleep )
136
+ __releases (& cgroup_rstat_lock ) __acquires (& cgroup_rstat_lock )
136
137
{
137
138
int cpu ;
138
139
139
- lockdep_assert_held (& cgroup_rstat_mutex );
140
+ lockdep_assert_held (& cgroup_rstat_lock );
140
141
141
142
for_each_possible_cpu (cpu ) {
142
143
raw_spinlock_t * cpu_lock = per_cpu_ptr (& cgroup_rstat_cpu_lock ,
143
144
cpu );
144
145
struct cgroup * pos = NULL ;
145
146
146
- raw_spin_lock_irq (cpu_lock );
147
+ raw_spin_lock (cpu_lock );
147
148
while ((pos = cgroup_rstat_cpu_pop_updated (pos , cgrp , cpu )))
148
149
cgroup_base_stat_flush (pos , cpu );
149
- raw_spin_unlock_irq (cpu_lock );
150
+ raw_spin_unlock (cpu_lock );
151
+
152
+ /* if @may_sleep, play nice and yield if necessary */
153
+ if (may_sleep && (need_resched () ||
154
+ spin_needbreak (& cgroup_rstat_lock ))) {
155
+ spin_unlock_irq (& cgroup_rstat_lock );
156
+ if (!cond_resched ())
157
+ cpu_relax ();
158
+ spin_lock_irq (& cgroup_rstat_lock );
159
+ }
150
160
}
151
161
}
152
162
@@ -160,12 +170,31 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp)
160
170
*
161
171
* This also gets all cgroups in the subtree including @cgrp off the
162
172
* ->updated_children lists.
173
+ *
174
+ * This function may block.
163
175
*/
164
176
void cgroup_rstat_flush (struct cgroup * cgrp )
165
177
{
166
- mutex_lock (& cgroup_rstat_mutex );
167
- cgroup_rstat_flush_locked (cgrp );
168
- mutex_unlock (& cgroup_rstat_mutex );
178
+ might_sleep ();
179
+
180
+ spin_lock_irq (& cgroup_rstat_lock );
181
+ cgroup_rstat_flush_locked (cgrp , true);
182
+ spin_unlock_irq (& cgroup_rstat_lock );
183
+ }
184
+
185
+ /**
186
+ * cgroup_rstat_flush_irqsafe - irqsafe version of cgroup_rstat_flush()
187
+ * @cgrp: target cgroup
188
+ *
189
+ * This function can be called from any context.
190
+ */
191
+ void cgroup_rstat_flush_irqsafe (struct cgroup * cgrp )
192
+ {
193
+ unsigned long flags ;
194
+
195
+ spin_lock_irqsave (& cgroup_rstat_lock , flags );
196
+ cgroup_rstat_flush_locked (cgrp , false);
197
+ spin_unlock_irqrestore (& cgroup_rstat_lock , flags );
169
198
}
170
199
171
200
/**
@@ -174,21 +203,24 @@ void cgroup_rstat_flush(struct cgroup *cgrp)
174
203
*
175
204
* Flush stats in @cgrp's subtree and prevent further flushes. Must be
176
205
* paired with cgroup_rstat_flush_release().
206
+ *
207
+ * This function may block.
177
208
*/
178
209
void cgroup_rstat_flush_hold (struct cgroup * cgrp )
179
- __acquires (& cgroup_rstat_mutex )
210
+ __acquires (& cgroup_rstat_lock )
180
211
{
181
- mutex_lock (& cgroup_rstat_mutex );
182
- cgroup_rstat_flush_locked (cgrp );
212
+ might_sleep ();
213
+ spin_lock_irq (& cgroup_rstat_lock );
214
+ cgroup_rstat_flush_locked (cgrp , true);
183
215
}
184
216
185
217
/**
186
218
* cgroup_rstat_flush_release - release cgroup_rstat_flush_hold()
187
219
*/
188
220
void cgroup_rstat_flush_release (void )
189
- __releases (& cgroup_rstat_mutex )
221
+ __releases (& cgroup_rstat_lock )
190
222
{
191
- mutex_unlock ( & cgroup_rstat_mutex );
223
+ spin_unlock_irq ( & cgroup_rstat_lock );
192
224
}
193
225
194
226
int cgroup_rstat_init (struct cgroup * cgrp )
0 commit comments