@@ -1219,7 +1219,7 @@ static int perf_mux_hrtimer_restart_ipi(void *arg)
1219
1219
1220
1220
static __always_inline struct perf_cpu_pmu_context * this_cpc (struct pmu * pmu )
1221
1221
{
1222
- return this_cpu_ptr (pmu -> cpu_pmu_context );
1222
+ return * this_cpu_ptr (pmu -> cpu_pmu_context );
1223
1223
}
1224
1224
1225
1225
void perf_pmu_disable (struct pmu * pmu )
@@ -5007,11 +5007,14 @@ find_get_pmu_context(struct pmu *pmu, struct perf_event_context *ctx,
5007
5007
*/
5008
5008
struct perf_cpu_pmu_context * cpc ;
5009
5009
5010
- cpc = per_cpu_ptr (pmu -> cpu_pmu_context , event -> cpu );
5010
+ cpc = * per_cpu_ptr (pmu -> cpu_pmu_context , event -> cpu );
5011
5011
epc = & cpc -> epc ;
5012
5012
raw_spin_lock_irq (& ctx -> lock );
5013
5013
if (!epc -> ctx ) {
5014
- atomic_set (& epc -> refcount , 1 );
5014
+ /*
5015
+ * One extra reference for the pmu; see perf_pmu_free().
5016
+ */
5017
+ atomic_set (& epc -> refcount , 2 );
5015
5018
epc -> embedded = 1 ;
5016
5019
list_add (& epc -> pmu_ctx_entry , & ctx -> pmu_ctx_list );
5017
5020
epc -> ctx = ctx ;
@@ -5087,6 +5090,15 @@ static void get_pmu_ctx(struct perf_event_pmu_context *epc)
5087
5090
WARN_ON_ONCE (!atomic_inc_not_zero (& epc -> refcount ));
5088
5091
}
5089
5092
5093
+ static void free_cpc_rcu (struct rcu_head * head )
5094
+ {
5095
+ struct perf_cpu_pmu_context * cpc =
5096
+ container_of (head , typeof (* cpc ), epc .rcu_head );
5097
+
5098
+ kfree (cpc -> epc .task_ctx_data );
5099
+ kfree (cpc );
5100
+ }
5101
+
5090
5102
static void free_epc_rcu (struct rcu_head * head )
5091
5103
{
5092
5104
struct perf_event_pmu_context * epc = container_of (head , typeof (* epc ), rcu_head );
@@ -5121,8 +5133,10 @@ static void put_pmu_ctx(struct perf_event_pmu_context *epc)
5121
5133
5122
5134
raw_spin_unlock_irqrestore (& ctx -> lock , flags );
5123
5135
5124
- if (epc -> embedded )
5136
+ if (epc -> embedded ) {
5137
+ call_rcu (& epc -> rcu_head , free_cpc_rcu );
5125
5138
return ;
5139
+ }
5126
5140
5127
5141
call_rcu (& epc -> rcu_head , free_epc_rcu );
5128
5142
}
@@ -11752,7 +11766,7 @@ perf_event_mux_interval_ms_store(struct device *dev,
11752
11766
cpus_read_lock ();
11753
11767
for_each_online_cpu (cpu ) {
11754
11768
struct perf_cpu_pmu_context * cpc ;
11755
- cpc = per_cpu_ptr (pmu -> cpu_pmu_context , cpu );
11769
+ cpc = * per_cpu_ptr (pmu -> cpu_pmu_context , cpu );
11756
11770
cpc -> hrtimer_interval = ns_to_ktime (NSEC_PER_MSEC * timer );
11757
11771
11758
11772
cpu_function_call (cpu , perf_mux_hrtimer_restart_ipi , cpc );
@@ -11925,7 +11939,25 @@ static void perf_pmu_free(struct pmu *pmu)
11925
11939
device_del (pmu -> dev );
11926
11940
put_device (pmu -> dev );
11927
11941
}
11928
- free_percpu (pmu -> cpu_pmu_context );
11942
+
11943
+ if (pmu -> cpu_pmu_context ) {
11944
+ int cpu ;
11945
+
11946
+ for_each_possible_cpu (cpu ) {
11947
+ struct perf_cpu_pmu_context * cpc ;
11948
+
11949
+ cpc = * per_cpu_ptr (pmu -> cpu_pmu_context , cpu );
11950
+ if (!cpc )
11951
+ continue ;
11952
+ if (cpc -> epc .embedded ) {
11953
+ /* refcount managed */
11954
+ put_pmu_ctx (& cpc -> epc );
11955
+ continue ;
11956
+ }
11957
+ kfree (cpc );
11958
+ }
11959
+ free_percpu (pmu -> cpu_pmu_context );
11960
+ }
11929
11961
}
11930
11962
11931
11963
DEFINE_FREE (pmu_unregister , struct pmu * , if (_T ) perf_pmu_free (_T ))
@@ -11964,14 +11996,20 @@ int perf_pmu_register(struct pmu *_pmu, const char *name, int type)
11964
11996
return ret ;
11965
11997
}
11966
11998
11967
- pmu -> cpu_pmu_context = alloc_percpu (struct perf_cpu_pmu_context );
11999
+ pmu -> cpu_pmu_context = alloc_percpu (struct perf_cpu_pmu_context * );
11968
12000
if (!pmu -> cpu_pmu_context )
11969
12001
return - ENOMEM ;
11970
12002
11971
12003
for_each_possible_cpu (cpu ) {
11972
- struct perf_cpu_pmu_context * cpc ;
12004
+ struct perf_cpu_pmu_context * cpc =
12005
+ kmalloc_node (sizeof (struct perf_cpu_pmu_context ),
12006
+ GFP_KERNEL | __GFP_ZERO ,
12007
+ cpu_to_node (cpu ));
12008
+
12009
+ if (!cpc )
12010
+ return - ENOMEM ;
11973
12011
11974
- cpc = per_cpu_ptr (pmu -> cpu_pmu_context , cpu );
12012
+ * per_cpu_ptr (pmu -> cpu_pmu_context , cpu ) = cpc ;
11975
12013
__perf_init_event_pmu_context (& cpc -> epc , pmu );
11976
12014
__perf_mux_hrtimer_init (cpc , cpu );
11977
12015
}
0 commit comments