Skip to content

Commit 4c1c9de

Browse files
ubizjakPeter Zijlstra
authored andcommitted
perf/x86: Use local64_try_cmpxchg
Use local64_try_cmpxchg instead of local64_cmpxchg (*ptr, old, new) == old. x86 CMPXCHG instruction returns success in ZF flag, so this change saves a compare after cmpxchg (and related move instruction in front of cmpxchg). Also, try_cmpxchg implicitly assigns old *ptr value to "old" when cmpxchg fails. There is no need to re-read the value in the loop. No functional change intended. Cc. "H. Peter Anvin" <[email protected]> Signed-off-by: Uros Bizjak <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 7c21282 commit 4c1c9de

File tree

4 files changed

+13
-19
lines changed

4 files changed

+13
-19
lines changed

arch/x86/events/amd/ibs.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -156,8 +156,8 @@ perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
156156
* count to the generic event atomically:
157157
*/
158158
prev_raw_count = local64_read(&hwc->prev_count);
159-
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
160-
new_raw_count) != prev_raw_count)
159+
if (!local64_try_cmpxchg(&hwc->prev_count,
160+
&prev_raw_count, new_raw_count))
161161
return 0;
162162

163163
/*

arch/x86/events/core.c

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -129,13 +129,11 @@ u64 x86_perf_event_update(struct perf_event *event)
129129
* exchange a new raw count - then add that new-prev delta
130130
* count to the generic event atomically:
131131
*/
132-
again:
133132
prev_raw_count = local64_read(&hwc->prev_count);
134-
rdpmcl(hwc->event_base_rdpmc, new_raw_count);
135-
136-
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
137-
new_raw_count) != prev_raw_count)
138-
goto again;
133+
do {
134+
rdpmcl(hwc->event_base_rdpmc, new_raw_count);
135+
} while (!local64_try_cmpxchg(&hwc->prev_count,
136+
&prev_raw_count, new_raw_count));
139137

140138
/*
141139
* Now we have the new raw value and have updated the prev

arch/x86/events/intel/cstate.c

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -365,13 +365,11 @@ static void cstate_pmu_event_update(struct perf_event *event)
365365
struct hw_perf_event *hwc = &event->hw;
366366
u64 prev_raw_count, new_raw_count;
367367

368-
again:
369368
prev_raw_count = local64_read(&hwc->prev_count);
370-
new_raw_count = cstate_pmu_read_counter(event);
371-
372-
if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
373-
new_raw_count) != prev_raw_count)
374-
goto again;
369+
do {
370+
new_raw_count = cstate_pmu_read_counter(event);
371+
} while (!local64_try_cmpxchg(&hwc->prev_count,
372+
&prev_raw_count, new_raw_count));
375373

376374
local64_add(new_raw_count - prev_raw_count, &event->count);
377375
}

arch/x86/events/msr.c

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -244,12 +244,10 @@ static void msr_event_update(struct perf_event *event)
244244
s64 delta;
245245

246246
/* Careful, an NMI might modify the previous event value: */
247-
again:
248247
prev = local64_read(&event->hw.prev_count);
249-
now = msr_read_counter(event);
250-
251-
if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
252-
goto again;
248+
do {
249+
now = msr_read_counter(event);
250+
} while (!local64_try_cmpxchg(&event->hw.prev_count, &prev, now));
253251

254252
delta = now - prev;
255253
if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) {

0 commit comments

Comments
 (0)