@@ -2291,6 +2291,7 @@ __perf_remove_from_context(struct perf_event *event,
2291
2291
2292
2292
if (!ctx -> nr_events && ctx -> is_active ) {
2293
2293
ctx -> is_active = 0 ;
2294
+ ctx -> rotate_necessary = 0 ;
2294
2295
if (ctx -> task ) {
2295
2296
WARN_ON_ONCE (cpuctx -> task_ctx != ctx );
2296
2297
cpuctx -> task_ctx = NULL ;
@@ -3188,12 +3189,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
3188
3189
if (!ctx -> nr_active || !(is_active & EVENT_ALL ))
3189
3190
return ;
3190
3191
3191
- /*
3192
- * If we had been multiplexing, no rotations are necessary, now no events
3193
- * are active.
3194
- */
3195
- ctx -> rotate_necessary = 0 ;
3196
-
3197
3192
perf_pmu_disable (ctx -> pmu );
3198
3193
if (is_active & EVENT_PINNED ) {
3199
3194
list_for_each_entry_safe (event , tmp , & ctx -> pinned_active , active_list )
@@ -3203,6 +3198,13 @@ static void ctx_sched_out(struct perf_event_context *ctx,
3203
3198
if (is_active & EVENT_FLEXIBLE ) {
3204
3199
list_for_each_entry_safe (event , tmp , & ctx -> flexible_active , active_list )
3205
3200
group_sched_out (event , cpuctx , ctx );
3201
+
3202
+ /*
3203
+ * Since we cleared EVENT_FLEXIBLE, also clear
3204
+ * rotate_necessary, is will be reset by
3205
+ * ctx_flexible_sched_in() when needed.
3206
+ */
3207
+ ctx -> rotate_necessary = 0 ;
3206
3208
}
3207
3209
perf_pmu_enable (ctx -> pmu );
3208
3210
}
@@ -3985,6 +3987,12 @@ ctx_event_to_rotate(struct perf_event_context *ctx)
3985
3987
typeof (* event ), group_node );
3986
3988
}
3987
3989
3990
+ /*
3991
+ * Unconditionally clear rotate_necessary; if ctx_flexible_sched_in()
3992
+ * finds there are unschedulable events, it will set it again.
3993
+ */
3994
+ ctx -> rotate_necessary = 0 ;
3995
+
3988
3996
return event ;
3989
3997
}
3990
3998
0 commit comments