@@ -983,16 +983,10 @@ perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
983
983
event -> shadow_ctx_time = now - t -> timestamp ;
984
984
}
985
985
986
- /*
987
- * Update cpuctx->cgrp so that it is set when first cgroup event is added and
988
- * cleared when last cgroup event is removed.
989
- */
990
986
static inline void
991
- list_update_cgroup_event (struct perf_event * event ,
992
- struct perf_event_context * ctx , bool add )
987
+ perf_cgroup_event_enable (struct perf_event * event , struct perf_event_context * ctx )
993
988
{
994
989
struct perf_cpu_context * cpuctx ;
995
- struct list_head * cpuctx_entry ;
996
990
997
991
if (!is_cgroup_event (event ))
998
992
return ;
@@ -1009,28 +1003,41 @@ list_update_cgroup_event(struct perf_event *event,
1009
1003
* because if the first would mismatch, the second would not try again
1010
1004
* and we would leave cpuctx->cgrp unset.
1011
1005
*/
1012
- if (add && !cpuctx -> cgrp ) {
1006
+ if (ctx -> is_active && !cpuctx -> cgrp ) {
1013
1007
struct perf_cgroup * cgrp = perf_cgroup_from_task (current , ctx );
1014
1008
1015
1009
if (cgroup_is_descendant (cgrp -> css .cgroup , event -> cgrp -> css .cgroup ))
1016
1010
cpuctx -> cgrp = cgrp ;
1017
1011
}
1018
1012
1019
- if (add && ctx -> nr_cgroups ++ )
1013
+ if (ctx -> nr_cgroups ++ )
1020
1014
return ;
1021
- else if (!add && -- ctx -> nr_cgroups )
1015
+
1016
+ list_add (& cpuctx -> cgrp_cpuctx_entry ,
1017
+ per_cpu_ptr (& cgrp_cpuctx_list , event -> cpu ));
1018
+ }
1019
+
1020
+ static inline void
1021
+ perf_cgroup_event_disable (struct perf_event * event , struct perf_event_context * ctx )
1022
+ {
1023
+ struct perf_cpu_context * cpuctx ;
1024
+
1025
+ if (!is_cgroup_event (event ))
1022
1026
return ;
1023
1027
1024
- /* no cgroup running */
1025
- if (!add )
1028
+ /*
1029
+ * Because cgroup events are always per-cpu events,
1030
+ * @ctx == &cpuctx->ctx.
1031
+ */
1032
+ cpuctx = container_of (ctx , struct perf_cpu_context , ctx );
1033
+
1034
+ if (-- ctx -> nr_cgroups )
1035
+ return ;
1036
+
1037
+ if (ctx -> is_active && cpuctx -> cgrp )
1026
1038
cpuctx -> cgrp = NULL ;
1027
1039
1028
- cpuctx_entry = & cpuctx -> cgrp_cpuctx_entry ;
1029
- if (add )
1030
- list_add (cpuctx_entry ,
1031
- per_cpu_ptr (& cgrp_cpuctx_list , event -> cpu ));
1032
- else
1033
- list_del (cpuctx_entry );
1040
+ list_del (& cpuctx -> cgrp_cpuctx_entry );
1034
1041
}
1035
1042
1036
1043
#else /* !CONFIG_CGROUP_PERF */
@@ -1096,11 +1103,14 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event)
1096
1103
}
1097
1104
1098
1105
static inline void
1099
- list_update_cgroup_event (struct perf_event * event ,
1100
- struct perf_event_context * ctx , bool add )
1106
+ perf_cgroup_event_enable (struct perf_event * event , struct perf_event_context * ctx )
1101
1107
{
1102
1108
}
1103
1109
1110
+ static inline void
1111
+ perf_cgroup_event_disable (struct perf_event * event , struct perf_event_context * ctx )
1112
+ {
1113
+ }
1104
1114
#endif
1105
1115
1106
1116
/*
@@ -1791,13 +1801,14 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1791
1801
add_event_to_groups (event , ctx );
1792
1802
}
1793
1803
1794
- list_update_cgroup_event (event , ctx , true);
1795
-
1796
1804
list_add_rcu (& event -> event_entry , & ctx -> event_list );
1797
1805
ctx -> nr_events ++ ;
1798
1806
if (event -> attr .inherit_stat )
1799
1807
ctx -> nr_stat ++ ;
1800
1808
1809
+ if (event -> state > PERF_EVENT_STATE_OFF )
1810
+ perf_cgroup_event_enable (event , ctx );
1811
+
1801
1812
ctx -> generation ++ ;
1802
1813
}
1803
1814
@@ -1976,8 +1987,6 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1976
1987
1977
1988
event -> attach_state &= ~PERF_ATTACH_CONTEXT ;
1978
1989
1979
- list_update_cgroup_event (event , ctx , false);
1980
-
1981
1990
ctx -> nr_events -- ;
1982
1991
if (event -> attr .inherit_stat )
1983
1992
ctx -> nr_stat -- ;
@@ -1994,8 +2003,10 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1994
2003
* of error state is by explicit re-enabling
1995
2004
* of the event
1996
2005
*/
1997
- if (event -> state > PERF_EVENT_STATE_OFF )
2006
+ if (event -> state > PERF_EVENT_STATE_OFF ) {
2007
+ perf_cgroup_event_disable (event , ctx );
1998
2008
perf_event_set_state (event , PERF_EVENT_STATE_OFF );
2009
+ }
1999
2010
2000
2011
ctx -> generation ++ ;
2001
2012
}
@@ -2226,6 +2237,7 @@ event_sched_out(struct perf_event *event,
2226
2237
2227
2238
if (READ_ONCE (event -> pending_disable ) >= 0 ) {
2228
2239
WRITE_ONCE (event -> pending_disable , -1 );
2240
+ perf_cgroup_event_disable (event , ctx );
2229
2241
state = PERF_EVENT_STATE_OFF ;
2230
2242
}
2231
2243
perf_event_set_state (event , state );
@@ -2363,6 +2375,7 @@ static void __perf_event_disable(struct perf_event *event,
2363
2375
event_sched_out (event , cpuctx , ctx );
2364
2376
2365
2377
perf_event_set_state (event , PERF_EVENT_STATE_OFF );
2378
+ perf_cgroup_event_disable (event , ctx );
2366
2379
}
2367
2380
2368
2381
/*
@@ -2746,7 +2759,7 @@ static int __perf_install_in_context(void *info)
2746
2759
}
2747
2760
2748
2761
#ifdef CONFIG_CGROUP_PERF
2749
- if (is_cgroup_event (event )) {
2762
+ if (event -> state > PERF_EVENT_STATE_OFF && is_cgroup_event (event )) {
2750
2763
/*
2751
2764
* If the current cgroup doesn't match the event's
2752
2765
* cgroup, we should not try to schedule it.
@@ -2906,6 +2919,7 @@ static void __perf_event_enable(struct perf_event *event,
2906
2919
ctx_sched_out (ctx , cpuctx , EVENT_TIME );
2907
2920
2908
2921
perf_event_set_state (event , PERF_EVENT_STATE_INACTIVE );
2922
+ perf_cgroup_event_enable (event , ctx );
2909
2923
2910
2924
if (!ctx -> is_active )
2911
2925
return ;
@@ -3616,8 +3630,10 @@ static int merge_sched_in(struct perf_event *event, void *data)
3616
3630
}
3617
3631
3618
3632
if (event -> state == PERF_EVENT_STATE_INACTIVE ) {
3619
- if (event -> attr .pinned )
3633
+ if (event -> attr .pinned ) {
3634
+ perf_cgroup_event_disable (event , ctx );
3620
3635
perf_event_set_state (event , PERF_EVENT_STATE_ERROR );
3636
+ }
3621
3637
3622
3638
* can_add_hw = 0 ;
3623
3639
ctx -> rotate_necessary = 1 ;
0 commit comments