Skip to content

Commit b2996f5

Browse files
Peter ZijlstraIngo Molnar
authored andcommitted
perf/core: Add this_cpc() helper
As a preparation for adding yet another indirection. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Reviewed-by: Ravi Bangoria <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 4baeb06 commit b2996f5

File tree

1 file changed

+18
-16
lines changed

1 file changed

+18
-16
lines changed

kernel/events/core.c

Lines changed: 18 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1217,23 +1217,28 @@ static int perf_mux_hrtimer_restart_ipi(void *arg)
12171217
return perf_mux_hrtimer_restart(arg);
12181218
}
12191219

1220+
static __always_inline struct perf_cpu_pmu_context *this_cpc(struct pmu *pmu)
1221+
{
1222+
return this_cpu_ptr(pmu->cpu_pmu_context);
1223+
}
1224+
12201225
void perf_pmu_disable(struct pmu *pmu)
12211226
{
1222-
int *count = &this_cpu_ptr(pmu->cpu_pmu_context)->pmu_disable_count;
1227+
int *count = &this_cpc(pmu)->pmu_disable_count;
12231228
if (!(*count)++)
12241229
pmu->pmu_disable(pmu);
12251230
}
12261231

12271232
void perf_pmu_enable(struct pmu *pmu)
12281233
{
1229-
int *count = &this_cpu_ptr(pmu->cpu_pmu_context)->pmu_disable_count;
1234+
int *count = &this_cpc(pmu)->pmu_disable_count;
12301235
if (!--(*count))
12311236
pmu->pmu_enable(pmu);
12321237
}
12331238

12341239
static void perf_assert_pmu_disabled(struct pmu *pmu)
12351240
{
1236-
int *count = &this_cpu_ptr(pmu->cpu_pmu_context)->pmu_disable_count;
1241+
int *count = &this_cpc(pmu)->pmu_disable_count;
12371242
WARN_ON_ONCE(*count == 0);
12381243
}
12391244

@@ -2355,7 +2360,7 @@ static void
23552360
event_sched_out(struct perf_event *event, struct perf_event_context *ctx)
23562361
{
23572362
struct perf_event_pmu_context *epc = event->pmu_ctx;
2358-
struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2363+
struct perf_cpu_pmu_context *cpc = this_cpc(epc->pmu);
23592364
enum perf_event_state state = PERF_EVENT_STATE_INACTIVE;
23602365

23612366
// XXX cpc serialization, probably per-cpu IRQ disabled
@@ -2496,9 +2501,8 @@ __perf_remove_from_context(struct perf_event *event,
24962501
pmu_ctx->rotate_necessary = 0;
24972502

24982503
if (ctx->task && ctx->is_active) {
2499-
struct perf_cpu_pmu_context *cpc;
2504+
struct perf_cpu_pmu_context *cpc = this_cpc(pmu_ctx->pmu);
25002505

2501-
cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
25022506
WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
25032507
cpc->task_epc = NULL;
25042508
}
@@ -2636,7 +2640,7 @@ static int
26362640
event_sched_in(struct perf_event *event, struct perf_event_context *ctx)
26372641
{
26382642
struct perf_event_pmu_context *epc = event->pmu_ctx;
2639-
struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2643+
struct perf_cpu_pmu_context *cpc = this_cpc(epc->pmu);
26402644
int ret = 0;
26412645

26422646
WARN_ON_ONCE(event->ctx != ctx);
@@ -2743,7 +2747,7 @@ group_sched_in(struct perf_event *group_event, struct perf_event_context *ctx)
27432747
static int group_can_go_on(struct perf_event *event, int can_add_hw)
27442748
{
27452749
struct perf_event_pmu_context *epc = event->pmu_ctx;
2746-
struct perf_cpu_pmu_context *cpc = this_cpu_ptr(epc->pmu->cpu_pmu_context);
2750+
struct perf_cpu_pmu_context *cpc = this_cpc(epc->pmu);
27472751

27482752
/*
27492753
* Groups consisting entirely of software events can always go on.
@@ -3366,9 +3370,8 @@ static void __pmu_ctx_sched_out(struct perf_event_pmu_context *pmu_ctx,
33663370
struct pmu *pmu = pmu_ctx->pmu;
33673371

33683372
if (ctx->task && !(ctx->is_active & EVENT_ALL)) {
3369-
struct perf_cpu_pmu_context *cpc;
3373+
struct perf_cpu_pmu_context *cpc = this_cpc(pmu);
33703374

3371-
cpc = this_cpu_ptr(pmu->cpu_pmu_context);
33723375
WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
33733376
cpc->task_epc = NULL;
33743377
}
@@ -3615,7 +3618,7 @@ static void perf_ctx_sched_task_cb(struct perf_event_context *ctx, bool sched_in
36153618
struct perf_cpu_pmu_context *cpc;
36163619

36173620
list_for_each_entry(pmu_ctx, &ctx->pmu_ctx_list, pmu_ctx_entry) {
3618-
cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
3621+
cpc = this_cpc(pmu_ctx->pmu);
36193622

36203623
if (cpc->sched_cb_usage && pmu_ctx->pmu->sched_task)
36213624
pmu_ctx->pmu->sched_task(pmu_ctx, sched_in);
@@ -3724,7 +3727,7 @@ static DEFINE_PER_CPU(int, perf_sched_cb_usages);
37243727

37253728
void perf_sched_cb_dec(struct pmu *pmu)
37263729
{
3727-
struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context);
3730+
struct perf_cpu_pmu_context *cpc = this_cpc(pmu);
37283731

37293732
this_cpu_dec(perf_sched_cb_usages);
37303733
barrier();
@@ -3736,7 +3739,7 @@ void perf_sched_cb_dec(struct pmu *pmu)
37363739

37373740
void perf_sched_cb_inc(struct pmu *pmu)
37383741
{
3739-
struct perf_cpu_pmu_context *cpc = this_cpu_ptr(pmu->cpu_pmu_context);
3742+
struct perf_cpu_pmu_context *cpc = this_cpc(pmu);
37403743

37413744
if (!cpc->sched_cb_usage++)
37423745
list_add(&cpc->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
@@ -3853,7 +3856,7 @@ static void __link_epc(struct perf_event_pmu_context *pmu_ctx)
38533856
if (!pmu_ctx->ctx->task)
38543857
return;
38553858

3856-
cpc = this_cpu_ptr(pmu_ctx->pmu->cpu_pmu_context);
3859+
cpc = this_cpc(pmu_ctx->pmu);
38573860
WARN_ON_ONCE(cpc->task_epc && cpc->task_epc != pmu_ctx);
38583861
cpc->task_epc = pmu_ctx;
38593862
}
@@ -3982,10 +3985,9 @@ static int merge_sched_in(struct perf_event *event, void *data)
39823985
perf_cgroup_event_disable(event, ctx);
39833986
perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
39843987
} else {
3985-
struct perf_cpu_pmu_context *cpc;
3988+
struct perf_cpu_pmu_context *cpc = this_cpc(event->pmu_ctx->pmu);
39863989

39873990
event->pmu_ctx->rotate_necessary = 1;
3988-
cpc = this_cpu_ptr(event->pmu_ctx->pmu->cpu_pmu_context);
39893991
perf_mux_hrtimer_restart(cpc);
39903992
group_update_userpage(event);
39913993
}

0 commit comments

Comments
 (0)