@@ -1217,23 +1217,28 @@ static int perf_mux_hrtimer_restart_ipi(void *arg)
1217
1217
return perf_mux_hrtimer_restart (arg );
1218
1218
}
1219
1219
1220
+ static __always_inline struct perf_cpu_pmu_context * this_cpc (struct pmu * pmu )
1221
+ {
1222
+ return this_cpu_ptr (pmu -> cpu_pmu_context );
1223
+ }
1224
+
1220
1225
void perf_pmu_disable (struct pmu * pmu )
1221
1226
{
1222
- int * count = & this_cpu_ptr (pmu -> cpu_pmu_context )-> pmu_disable_count ;
1227
+ int * count = & this_cpc (pmu )-> pmu_disable_count ;
1223
1228
if (!(* count )++ )
1224
1229
pmu -> pmu_disable (pmu );
1225
1230
}
1226
1231
1227
1232
void perf_pmu_enable (struct pmu * pmu )
1228
1233
{
1229
- int * count = & this_cpu_ptr (pmu -> cpu_pmu_context )-> pmu_disable_count ;
1234
+ int * count = & this_cpc (pmu )-> pmu_disable_count ;
1230
1235
if (!-- (* count ))
1231
1236
pmu -> pmu_enable (pmu );
1232
1237
}
1233
1238
1234
1239
static void perf_assert_pmu_disabled (struct pmu * pmu )
1235
1240
{
1236
- int * count = & this_cpu_ptr (pmu -> cpu_pmu_context )-> pmu_disable_count ;
1241
+ int * count = & this_cpc (pmu )-> pmu_disable_count ;
1237
1242
WARN_ON_ONCE (* count == 0 );
1238
1243
}
1239
1244
@@ -2355,7 +2360,7 @@ static void
2355
2360
event_sched_out (struct perf_event * event , struct perf_event_context * ctx )
2356
2361
{
2357
2362
struct perf_event_pmu_context * epc = event -> pmu_ctx ;
2358
- struct perf_cpu_pmu_context * cpc = this_cpu_ptr (epc -> pmu -> cpu_pmu_context );
2363
+ struct perf_cpu_pmu_context * cpc = this_cpc (epc -> pmu );
2359
2364
enum perf_event_state state = PERF_EVENT_STATE_INACTIVE ;
2360
2365
2361
2366
// XXX cpc serialization, probably per-cpu IRQ disabled
@@ -2496,9 +2501,8 @@ __perf_remove_from_context(struct perf_event *event,
2496
2501
pmu_ctx -> rotate_necessary = 0 ;
2497
2502
2498
2503
if (ctx -> task && ctx -> is_active ) {
2499
- struct perf_cpu_pmu_context * cpc ;
2504
+ struct perf_cpu_pmu_context * cpc = this_cpc ( pmu_ctx -> pmu ) ;
2500
2505
2501
- cpc = this_cpu_ptr (pmu_ctx -> pmu -> cpu_pmu_context );
2502
2506
WARN_ON_ONCE (cpc -> task_epc && cpc -> task_epc != pmu_ctx );
2503
2507
cpc -> task_epc = NULL ;
2504
2508
}
@@ -2636,7 +2640,7 @@ static int
2636
2640
event_sched_in (struct perf_event * event , struct perf_event_context * ctx )
2637
2641
{
2638
2642
struct perf_event_pmu_context * epc = event -> pmu_ctx ;
2639
- struct perf_cpu_pmu_context * cpc = this_cpu_ptr (epc -> pmu -> cpu_pmu_context );
2643
+ struct perf_cpu_pmu_context * cpc = this_cpc (epc -> pmu );
2640
2644
int ret = 0 ;
2641
2645
2642
2646
WARN_ON_ONCE (event -> ctx != ctx );
@@ -2743,7 +2747,7 @@ group_sched_in(struct perf_event *group_event, struct perf_event_context *ctx)
2743
2747
static int group_can_go_on (struct perf_event * event , int can_add_hw )
2744
2748
{
2745
2749
struct perf_event_pmu_context * epc = event -> pmu_ctx ;
2746
- struct perf_cpu_pmu_context * cpc = this_cpu_ptr (epc -> pmu -> cpu_pmu_context );
2750
+ struct perf_cpu_pmu_context * cpc = this_cpc (epc -> pmu );
2747
2751
2748
2752
/*
2749
2753
* Groups consisting entirely of software events can always go on.
@@ -3366,9 +3370,8 @@ static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx,
3366
3370
struct pmu * pmu = pmu_ctx -> pmu ;
3367
3371
3368
3372
if (ctx -> task && !(ctx -> is_active & EVENT_ALL )) {
3369
- struct perf_cpu_pmu_context * cpc ;
3373
+ struct perf_cpu_pmu_context * cpc = this_cpc ( pmu ) ;
3370
3374
3371
- cpc = this_cpu_ptr (pmu -> cpu_pmu_context );
3372
3375
WARN_ON_ONCE (cpc -> task_epc && cpc -> task_epc != pmu_ctx );
3373
3376
cpc -> task_epc = NULL ;
3374
3377
}
@@ -3615,7 +3618,7 @@ static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, bool sched_in
3615
3618
struct perf_cpu_pmu_context * cpc ;
3616
3619
3617
3620
list_for_each_entry (pmu_ctx , & ctx -> pmu_ctx_list , pmu_ctx_entry ) {
3618
- cpc = this_cpu_ptr (pmu_ctx -> pmu -> cpu_pmu_context );
3621
+ cpc = this_cpc (pmu_ctx -> pmu );
3619
3622
3620
3623
if (cpc -> sched_cb_usage && pmu_ctx -> pmu -> sched_task )
3621
3624
pmu_ctx -> pmu -> sched_task (pmu_ctx , sched_in );
@@ -3724,7 +3727,7 @@ static DEFINE_PER_CPU(int, perf_sched_cb_usages);
3724
3727
3725
3728
void perf_sched_cb_dec (struct pmu * pmu )
3726
3729
{
3727
- struct perf_cpu_pmu_context * cpc = this_cpu_ptr (pmu -> cpu_pmu_context );
3730
+ struct perf_cpu_pmu_context * cpc = this_cpc (pmu );
3728
3731
3729
3732
this_cpu_dec (perf_sched_cb_usages );
3730
3733
barrier ();
@@ -3736,7 +3739,7 @@ void perf_sched_cb_dec(struct pmu *pmu)
3736
3739
3737
3740
void perf_sched_cb_inc (struct pmu * pmu )
3738
3741
{
3739
- struct perf_cpu_pmu_context * cpc = this_cpu_ptr (pmu -> cpu_pmu_context );
3742
+ struct perf_cpu_pmu_context * cpc = this_cpc (pmu );
3740
3743
3741
3744
if (!cpc -> sched_cb_usage ++ )
3742
3745
list_add (& cpc -> sched_cb_entry , this_cpu_ptr (& sched_cb_list ));
@@ -3853,7 +3856,7 @@ static void __link_epc(struct perf_event_pmu_context *pmu_ctx)
3853
3856
if (!pmu_ctx -> ctx -> task )
3854
3857
return ;
3855
3858
3856
- cpc = this_cpu_ptr (pmu_ctx -> pmu -> cpu_pmu_context );
3859
+ cpc = this_cpc (pmu_ctx -> pmu );
3857
3860
WARN_ON_ONCE (cpc -> task_epc && cpc -> task_epc != pmu_ctx );
3858
3861
cpc -> task_epc = pmu_ctx ;
3859
3862
}
@@ -3982,10 +3985,9 @@ static int merge_sched_in(struct perf_event *event, void *data)
3982
3985
perf_cgroup_event_disable (event , ctx );
3983
3986
perf_event_set_state (event , PERF_EVENT_STATE_ERROR );
3984
3987
} else {
3985
- struct perf_cpu_pmu_context * cpc ;
3988
+ struct perf_cpu_pmu_context * cpc = this_cpc ( event -> pmu_ctx -> pmu ) ;
3986
3989
3987
3990
event -> pmu_ctx -> rotate_necessary = 1 ;
3988
- cpc = this_cpu_ptr (event -> pmu_ctx -> pmu -> cpu_pmu_context );
3989
3991
perf_mux_hrtimer_restart (cpc );
3990
3992
group_update_userpage (event );
3991
3993
}
0 commit comments