Skip to content

Commit 527ec89

Browse files
x56Jasonlanlanxiyiji
authored andcommitted
deepin: Fix kabi for CWF PMU support
Intel inclusion category: bugfix bugzilla: https://gitee.com/openeuler/intel-kernel/issues/ICZHEB CVE: NA -------------------------------- Following upstream commits introduced 2 fields (config1 and dyn_constraint) in struct hw_perf_event, which breaks kABI. ec980e4 ("perf/x86/intel: Support auto counter reload") 4dfe323 ("perf/x86: Add dynamic constraint") To fix this kABI breakage, we introduce struct hw_perf_event_ext, and use one KABI_RESERVE field in struct perf_event as pointer to this struct hw_perf_event_ext. This is viable because hw_perf_event is always embedded in struct perf_event, so we can always access hw_perf_event_ext from perf_event when needed. We also create a kmem_cache for struct hw_per_event_ext. Another kABI changes are caused by the following commit: 0e102ce ("KVM: x86/pmu: Change ambiguous _mask suffix to _rsvd in kvm_pmu") But the fix is trivial. Fixes: ec980e4 ("perf/x86/intel: Support auto counter reload") Fixes: 4dfe323 ("perf/x86: Add dynamic constraint") Signed-off-by: Jason Zeng <jason.zeng@intel.com> Link: #1356 [Backport: drop arch/x86/include/asm/kvm_host.h for no rename it] Signed-off-by: Wentao Guan <guanwentao@uniontech.com>
1 parent 93cecaf commit 527ec89

File tree

4 files changed

+33
-12
lines changed

4 files changed

+33
-12
lines changed

arch/x86/events/core.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -675,7 +675,7 @@ static int __x86_pmu_event_init(struct perf_event *event)
675675
event->hw.idx = -1;
676676
event->hw.last_cpu = -1;
677677
event->hw.last_tag = ~0ULL;
678-
event->hw.dyn_constraint = ~0ULL;
678+
event->hw_ext->dyn_constraint = ~0ULL;
679679

680680
/* mark unused */
681681
event->hw.extra_reg.idx = EXTRA_REG_NONE;

arch/x86/events/intel/core.c

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2927,6 +2927,7 @@ static void intel_pmu_config_acr(int idx, u64 mask, u32 reload)
29272927
static void intel_pmu_enable_acr(struct perf_event *event)
29282928
{
29292929
struct hw_perf_event *hwc = &event->hw;
2930+
struct hw_perf_event_ext *hw_ext = event->hw_ext;
29302931

29312932
if (!is_acr_event_group(event) || !event->attr.config2) {
29322933
/*
@@ -2937,7 +2938,7 @@ static void intel_pmu_enable_acr(struct perf_event *event)
29372938
return;
29382939
}
29392940

2940-
intel_pmu_config_acr(hwc->idx, hwc->config1, -hwc->sample_period);
2941+
intel_pmu_config_acr(hwc->idx, hw_ext->config1, -hwc->sample_period);
29412942
}
29422943

29432944
DEFINE_STATIC_CALL_NULL(intel_pmu_enable_acr_event, intel_pmu_enable_acr);
@@ -2998,7 +2999,7 @@ static void intel_pmu_acr_late_setup(struct cpu_hw_events *cpuc)
29982999
if (i + idx >= cpuc->n_events ||
29993000
!is_acr_event_group(cpuc->event_list[i + idx]))
30003001
return;
3001-
__set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw.config1);
3002+
__set_bit(cpuc->assign[i + idx], (unsigned long *)&event->hw_ext->config1);
30023003
}
30033004
}
30043005
i = j - 1;
@@ -3844,9 +3845,9 @@ intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
38443845
if (cpuc->excl_cntrs)
38453846
return intel_get_excl_constraints(cpuc, event, idx, c2);
38463847

3847-
if (event->hw.dyn_constraint != ~0ULL) {
3848+
if (event->hw_ext->dyn_constraint != ~0ULL) {
38483849
c2 = dyn_constraint(cpuc, c2, idx);
3849-
c2->idxmsk64 &= event->hw.dyn_constraint;
3850+
c2->idxmsk64 &= event->hw_ext->dyn_constraint;
38503851
c2->weight = hweight64(c2->idxmsk64);
38513852
}
38523853

@@ -4208,7 +4209,7 @@ static bool intel_pmu_is_acr_group(struct perf_event *event)
42084209
static inline void intel_pmu_set_acr_cntr_constr(struct perf_event *event,
42094210
u64 *cause_mask, int *num)
42104211
{
4211-
event->hw.dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64);
4212+
event->hw_ext->dyn_constraint &= hybrid(event->pmu, acr_cntr_mask64);
42124213
*cause_mask |= event->attr.config2;
42134214
*num += 1;
42144215
}
@@ -4217,7 +4218,7 @@ static inline void intel_pmu_set_acr_caused_constr(struct perf_event *event,
42174218
int idx, u64 cause_mask)
42184219
{
42194220
if (test_bit(idx, (unsigned long *)&cause_mask))
4220-
event->hw.dyn_constraint &= hybrid(event->pmu, acr_cause_mask64);
4221+
event->hw_ext->dyn_constraint &= hybrid(event->pmu, acr_cause_mask64);
42214222
}
42224223

42234224
static int intel_pmu_hw_config(struct perf_event *event)
@@ -4283,7 +4284,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
42834284
return -EINVAL;
42844285
if (branch_sample_counters(leader)) {
42854286
num++;
4286-
leader->hw.dyn_constraint &= x86_pmu.lbr_counters;
4287+
leader->hw_ext->dyn_constraint &= x86_pmu.lbr_counters;
42874288
}
42884289
leader->hw.flags |= PERF_X86_EVENT_BRANCH_COUNTERS;
42894290

@@ -4292,7 +4293,7 @@ static int intel_pmu_hw_config(struct perf_event *event)
42924293
return -EINVAL;
42934294
if (branch_sample_counters(sibling)) {
42944295
num++;
4295-
sibling->hw.dyn_constraint &= x86_pmu.lbr_counters;
4296+
sibling->hw_ext->dyn_constraint &= x86_pmu.lbr_counters;
42964297
}
42974298
}
42984299

include/linux/perf_event.h

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,17 @@ struct hw_perf_event_extra {
150150

151151
static_assert((PERF_EVENT_FLAG_USER_READ_CNT & PERF_EVENT_FLAG_ARCH) == 0);
152152

153+
struct hw_perf_event_ext {
154+
#ifdef CONFIG_PERF_EVENTS
155+
union {
156+
struct {
157+
u64 config1;
158+
u64 dyn_constraint;
159+
};
160+
};
161+
#endif
162+
};
163+
153164
/**
154165
* struct hw_perf_event - performance event hardware details:
155166
*/
@@ -158,9 +169,7 @@ struct hw_perf_event {
158169
union {
159170
struct { /* hardware */
160171
u64 config;
161-
u64 config1;
162172
u64 last_tag;
163-
u64 dyn_constraint;
164173
unsigned long config_base;
165174
unsigned long event_base;
166175
int event_base_rdpmc;
@@ -854,7 +863,7 @@ struct perf_event {
854863
*/
855864
__u32 orig_type;
856865

857-
DEEPIN_KABI_RESERVE(1)
866+
DEEPIN_KABI_USE(1, struct hw_perf_event_ext *hw_ext)
858867
DEEPIN_KABI_RESERVE(2)
859868
DEEPIN_KABI_RESERVE(3)
860869
DEEPIN_KABI_RESERVE(4)

kernel/events/core.c

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -425,6 +425,7 @@ static DEFINE_MUTEX(pmus_lock);
425425
static struct srcu_struct pmus_srcu;
426426
static cpumask_var_t perf_online_mask;
427427
static struct kmem_cache *perf_event_cache;
428+
static struct kmem_cache *perf_hw_event_cache;
428429

429430
/*
430431
* perf event paranoia level:
@@ -5012,6 +5013,7 @@ static void free_event_rcu(struct rcu_head *head)
50125013
if (event->ns)
50135014
put_pid_ns(event->ns);
50145015
perf_event_free_filter(event);
5016+
kmem_cache_free(perf_hw_event_cache, event->hw_ext);
50155017
kmem_cache_free(perf_event_cache, event);
50165018
}
50175019

@@ -12067,6 +12069,14 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
1206712069
if (!event)
1206812070
return ERR_PTR(-ENOMEM);
1206912071

12072+
event->hw_ext = kmem_cache_alloc_node(perf_hw_event_cache,
12073+
GFP_KERNEL | __GFP_ZERO,
12074+
node);
12075+
if (!event->hw_ext) {
12076+
kmem_cache_free(perf_event_cache, event);
12077+
return ERR_PTR(-ENOMEM);
12078+
}
12079+
1207012080
/*
1207112081
* Single events are their own group leaders, with an
1207212082
* empty sibling list:
@@ -13929,6 +13939,7 @@ void __init perf_event_init(void)
1392913939
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
1393013940

1393113941
perf_event_cache = KMEM_CACHE(perf_event, SLAB_PANIC);
13942+
perf_hw_event_cache = KMEM_CACHE(hw_perf_event_ext, SLAB_PANIC);
1393213943

1393313944
/*
1393413945
* Build time assertion that we keep the data_head at the intended

0 commit comments

Comments
 (0)