Skip to content

Commit a932aa0

Browse files
Kan LiangPeter Zijlstra
authored andcommitted
perf/x86: Add Lunar Lake and Arrow Lake support
From PMU's perspective, Lunar Lake and Arrow Lake are similar to the previous generation Meteor Lake. Both are hybrid platforms, with e-core and p-core. The key differences include: - The e-core supports 3 new fixed counters - The p-core supports an updated PEBS Data Source format - More GP counters (Updated event constraint table) - New Architectural performance monitoring V6 (New Perfmon MSRs aliasing, umask2, eq). - New PEBS format V6 (Counters Snapshotting group) - New RDPMC metrics clear mode The legacy features, the 3 new fixed counters and updated event constraint table are enabled in this patch. The new PEBS data source format, the architectural performance monitoring V6, the PEBS format V6, and the new RDPMC metrics clear mode are supported in the following patches. Signed-off-by: Kan Liang <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Reviewed-by: Andi Kleen <[email protected]> Reviewed-by: Ian Rogers <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 722e42e commit a932aa0

File tree

4 files changed

+147
-0
lines changed

4 files changed

+147
-0
lines changed

arch/x86/events/intel/core.c

Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,17 @@ static struct event_constraint intel_grt_event_constraints[] __read_mostly = {
220220
EVENT_CONSTRAINT_END
221221
};
222222

223+
static struct event_constraint intel_skt_event_constraints[] __read_mostly = {
224+
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
225+
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
226+
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* pseudo CPU_CLK_UNHALTED.REF */
227+
FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
228+
FIXED_EVENT_CONSTRAINT(0x0073, 4), /* TOPDOWN_BAD_SPECULATION.ALL */
229+
FIXED_EVENT_CONSTRAINT(0x019c, 5), /* TOPDOWN_FE_BOUND.ALL */
230+
FIXED_EVENT_CONSTRAINT(0x02c2, 6), /* TOPDOWN_RETIRING.ALL */
231+
EVENT_CONSTRAINT_END
232+
};
233+
223234
static struct event_constraint intel_skl_event_constraints[] = {
224235
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
225236
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
@@ -370,6 +381,55 @@ static struct extra_reg intel_rwc_extra_regs[] __read_mostly = {
370381
EVENT_EXTRA_END
371382
};
372383

384+
static struct event_constraint intel_lnc_event_constraints[] = {
385+
FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
386+
FIXED_EVENT_CONSTRAINT(0x0100, 0), /* INST_RETIRED.PREC_DIST */
387+
FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
388+
FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
389+
FIXED_EVENT_CONSTRAINT(0x013c, 2), /* CPU_CLK_UNHALTED.REF_TSC_P */
390+
FIXED_EVENT_CONSTRAINT(0x0400, 3), /* SLOTS */
391+
METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_RETIRING, 0),
392+
METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BAD_SPEC, 1),
393+
METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FE_BOUND, 2),
394+
METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BE_BOUND, 3),
395+
METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_HEAVY_OPS, 4),
396+
METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_BR_MISPREDICT, 5),
397+
METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_FETCH_LAT, 6),
398+
METRIC_EVENT_CONSTRAINT(INTEL_TD_METRIC_MEM_BOUND, 7),
399+
400+
INTEL_UEVENT_CONSTRAINT(0x0148, 0x4),
401+
INTEL_UEVENT_CONSTRAINT(0x0175, 0x4),
402+
403+
INTEL_EVENT_CONSTRAINT(0x2e, 0x3ff),
404+
INTEL_EVENT_CONSTRAINT(0x3c, 0x3ff),
405+
/*
406+
* Generally event codes < 0x90 are restricted to counters 0-3.
407+
* The 0x2E and 0x3C are exception, which has no restriction.
408+
*/
409+
INTEL_EVENT_CONSTRAINT_RANGE(0x01, 0x8f, 0xf),
410+
411+
INTEL_UEVENT_CONSTRAINT(0x01a3, 0xf),
412+
INTEL_UEVENT_CONSTRAINT(0x02a3, 0xf),
413+
INTEL_UEVENT_CONSTRAINT(0x08a3, 0x4),
414+
INTEL_UEVENT_CONSTRAINT(0x0ca3, 0x4),
415+
INTEL_UEVENT_CONSTRAINT(0x04a4, 0x1),
416+
INTEL_UEVENT_CONSTRAINT(0x08a4, 0x1),
417+
INTEL_UEVENT_CONSTRAINT(0x10a4, 0x1),
418+
INTEL_UEVENT_CONSTRAINT(0x01b1, 0x8),
419+
INTEL_UEVENT_CONSTRAINT(0x02cd, 0x3),
420+
INTEL_EVENT_CONSTRAINT(0xce, 0x1),
421+
422+
INTEL_EVENT_CONSTRAINT_RANGE(0xd0, 0xdf, 0xf),
423+
/*
424+
* Generally event codes >= 0x90 are likely to have no restrictions.
425+
* The exception are defined as above.
426+
*/
427+
INTEL_EVENT_CONSTRAINT_RANGE(0x90, 0xfe, 0x3ff),
428+
429+
EVENT_CONSTRAINT_END
430+
};
431+
432+
373433
EVENT_ATTR_STR(mem-loads, mem_ld_nhm, "event=0x0b,umask=0x10,ldlat=3");
374434
EVENT_ATTR_STR(mem-loads, mem_ld_snb, "event=0xcd,umask=0x1,ldlat=3");
375435
EVENT_ATTR_STR(mem-stores, mem_st_snb, "event=0xcd,umask=0x2");
@@ -5790,6 +5850,23 @@ static struct attribute *adl_hybrid_events_attrs[] = {
57905850
NULL,
57915851
};
57925852

5853+
EVENT_ATTR_STR_HYBRID(topdown-retiring, td_retiring_lnl, "event=0xc2,umask=0x02;event=0x00,umask=0x80", hybrid_big_small);
5854+
EVENT_ATTR_STR_HYBRID(topdown-fe-bound, td_fe_bound_lnl, "event=0x9c,umask=0x01;event=0x00,umask=0x82", hybrid_big_small);
5855+
EVENT_ATTR_STR_HYBRID(topdown-be-bound, td_be_bound_lnl, "event=0xa4,umask=0x02;event=0x00,umask=0x83", hybrid_big_small);
5856+
5857+
static struct attribute *lnl_hybrid_events_attrs[] = {
5858+
EVENT_PTR(slots_adl),
5859+
EVENT_PTR(td_retiring_lnl),
5860+
EVENT_PTR(td_bad_spec_adl),
5861+
EVENT_PTR(td_fe_bound_lnl),
5862+
EVENT_PTR(td_be_bound_lnl),
5863+
EVENT_PTR(td_heavy_ops_adl),
5864+
EVENT_PTR(td_br_mis_adl),
5865+
EVENT_PTR(td_fetch_lat_adl),
5866+
EVENT_PTR(td_mem_bound_adl),
5867+
NULL
5868+
};
5869+
57935870
/* Must be in IDX order */
57945871
EVENT_ATTR_STR_HYBRID(mem-loads, mem_ld_adl, "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3", hybrid_big_small);
57955872
EVENT_ATTR_STR_HYBRID(mem-stores, mem_st_adl, "event=0xd0,umask=0x6;event=0xcd,umask=0x2", hybrid_big_small);
@@ -6139,6 +6216,21 @@ static __always_inline void intel_pmu_init_grt(struct pmu *pmu)
61396216
intel_pmu_ref_cycles_ext();
61406217
}
61416218

6219+
static __always_inline void intel_pmu_init_lnc(struct pmu *pmu)
6220+
{
6221+
intel_pmu_init_glc(pmu);
6222+
hybrid(pmu, event_constraints) = intel_lnc_event_constraints;
6223+
hybrid(pmu, pebs_constraints) = intel_lnc_pebs_event_constraints;
6224+
hybrid(pmu, extra_regs) = intel_rwc_extra_regs;
6225+
}
6226+
6227+
static __always_inline void intel_pmu_init_skt(struct pmu *pmu)
6228+
{
6229+
intel_pmu_init_grt(pmu);
6230+
hybrid(pmu, event_constraints) = intel_skt_event_constraints;
6231+
hybrid(pmu, extra_regs) = intel_cmt_extra_regs;
6232+
}
6233+
61426234
__init int intel_pmu_init(void)
61436235
{
61446236
struct attribute **extra_skl_attr = &empty_attrs;
@@ -6864,6 +6956,31 @@ __init int intel_pmu_init(void)
68646956
name = "meteorlake_hybrid";
68656957
break;
68666958

6959+
case INTEL_LUNARLAKE_M:
6960+
case INTEL_ARROWLAKE:
6961+
intel_pmu_init_hybrid(hybrid_big_small);
6962+
6963+
x86_pmu.get_event_constraints = mtl_get_event_constraints;
6964+
x86_pmu.hw_config = adl_hw_config;
6965+
6966+
td_attr = lnl_hybrid_events_attrs;
6967+
mem_attr = mtl_hybrid_mem_attrs;
6968+
tsx_attr = adl_hybrid_tsx_attrs;
6969+
extra_attr = boot_cpu_has(X86_FEATURE_RTM) ?
6970+
mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr;
6971+
6972+
/* Initialize big core specific PerfMon capabilities.*/
6973+
pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_CORE_IDX];
6974+
intel_pmu_init_lnc(&pmu->pmu);
6975+
6976+
/* Initialize Atom core specific PerfMon capabilities.*/
6977+
pmu = &x86_pmu.hybrid_pmu[X86_HYBRID_PMU_ATOM_IDX];
6978+
intel_pmu_init_skt(&pmu->pmu);
6979+
6980+
pr_cont("Lunarlake Hybrid events, ");
6981+
name = "lunarlake_hybrid";
6982+
break;
6983+
68676984
default:
68686985
switch (x86_pmu.version) {
68696986
case 1:

arch/x86/events/intel/ds.c

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1086,6 +1086,30 @@ struct event_constraint intel_glc_pebs_event_constraints[] = {
10861086
EVENT_CONSTRAINT_END
10871087
};
10881088

1089+
struct event_constraint intel_lnc_pebs_event_constraints[] = {
1090+
INTEL_FLAGS_UEVENT_CONSTRAINT(0x100, 0x100000000ULL), /* INST_RETIRED.PREC_DIST */
1091+
INTEL_FLAGS_UEVENT_CONSTRAINT(0x0400, 0x800000000ULL),
1092+
1093+
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x11d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_LOADS */
1094+
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x12d0, 0xf), /* MEM_INST_RETIRED.STLB_MISS_STORES */
1095+
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x21d0, 0xf), /* MEM_INST_RETIRED.LOCK_LOADS */
1096+
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x41d0, 0xf), /* MEM_INST_RETIRED.SPLIT_LOADS */
1097+
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x42d0, 0xf), /* MEM_INST_RETIRED.SPLIT_STORES */
1098+
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_LD(0x81d0, 0xf), /* MEM_INST_RETIRED.ALL_LOADS */
1099+
INTEL_FLAGS_UEVENT_CONSTRAINT_DATALA_ST(0x82d0, 0xf), /* MEM_INST_RETIRED.ALL_STORES */
1100+
1101+
INTEL_FLAGS_EVENT_CONSTRAINT_DATALA_LD_RANGE(0xd1, 0xd4, 0xf),
1102+
1103+
INTEL_FLAGS_EVENT_CONSTRAINT(0xd0, 0xf),
1104+
1105+
/*
1106+
* Everything else is handled by PMU_FL_PEBS_ALL, because we
1107+
* need the full constraints from the main table.
1108+
*/
1109+
1110+
EVENT_CONSTRAINT_END
1111+
};
1112+
10891113
struct event_constraint *intel_pebs_constraints(struct perf_event *event)
10901114
{
10911115
struct event_constraint *pebs_constraints = hybrid(event->pmu, pebs_constraints);

arch/x86/events/perf_event.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1582,6 +1582,8 @@ extern struct event_constraint intel_icl_pebs_event_constraints[];
15821582

15831583
extern struct event_constraint intel_glc_pebs_event_constraints[];
15841584

1585+
extern struct event_constraint intel_lnc_pebs_event_constraints[];
1586+
15851587
struct event_constraint *intel_pebs_constraints(struct perf_event *event);
15861588

15871589
void intel_pmu_pebs_add(struct perf_event *event);

arch/x86/include/asm/perf_event.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -307,6 +307,10 @@ struct x86_pmu_capability {
307307
#define INTEL_PMC_IDX_FIXED_SLOTS (INTEL_PMC_IDX_FIXED + 3)
308308
#define INTEL_PMC_MSK_FIXED_SLOTS (1ULL << INTEL_PMC_IDX_FIXED_SLOTS)
309309

310+
/* TOPDOWN_BAD_SPECULATION.ALL: fixed counter 4 (Atom only) */
311+
/* TOPDOWN_FE_BOUND.ALL: fixed counter 5 (Atom only) */
312+
/* TOPDOWN_RETIRING.ALL: fixed counter 6 (Atom only) */
313+
310314
static inline bool use_fixed_pseudo_encoding(u64 code)
311315
{
312316
return !(code & 0xff);

0 commit comments

Comments
 (0)