@@ -4599,6 +4599,28 @@ static inline bool erratum_hsw11(struct perf_event *event)
4599
4599
X86_CONFIG (.event = 0xc0 , .umask = 0x01 );
4600
4600
}
4601
4601
4602
+ static struct event_constraint *
4603
+ arl_h_get_event_constraints (struct cpu_hw_events * cpuc , int idx ,
4604
+ struct perf_event * event )
4605
+ {
4606
+ struct x86_hybrid_pmu * pmu = hybrid_pmu (event -> pmu );
4607
+
4608
+ if (pmu -> pmu_type == hybrid_tiny )
4609
+ return cmt_get_event_constraints (cpuc , idx , event );
4610
+
4611
+ return mtl_get_event_constraints (cpuc , idx , event );
4612
+ }
4613
+
4614
+ static int arl_h_hw_config (struct perf_event * event )
4615
+ {
4616
+ struct x86_hybrid_pmu * pmu = hybrid_pmu (event -> pmu );
4617
+
4618
+ if (pmu -> pmu_type == hybrid_tiny )
4619
+ return intel_pmu_hw_config (event );
4620
+
4621
+ return adl_hw_config (event );
4622
+ }
4623
+
4602
4624
/*
4603
4625
* The HSW11 requires a period larger than 100 which is the same as the BDM11.
4604
4626
* A minimum period of 128 is enforced as well for the INST_RETIRED.ALL.
@@ -5974,6 +5996,37 @@ static struct attribute *lnl_hybrid_events_attrs[] = {
5974
5996
NULL
5975
5997
};
5976
5998
5999
+ /* The event string must be in PMU IDX order. */
6000
+ EVENT_ATTR_STR_HYBRID (topdown - retiring ,
6001
+ td_retiring_arl_h ,
6002
+ "event=0xc2,umask=0x02;event=0x00,umask=0x80;event=0xc2,umask=0x0" ,
6003
+ hybrid_big_small_tiny );
6004
+ EVENT_ATTR_STR_HYBRID (topdown - bad - spec ,
6005
+ td_bad_spec_arl_h ,
6006
+ "event=0x73,umask=0x0;event=0x00,umask=0x81;event=0x73,umask=0x0" ,
6007
+ hybrid_big_small_tiny );
6008
+ EVENT_ATTR_STR_HYBRID (topdown - fe - bound ,
6009
+ td_fe_bound_arl_h ,
6010
+ "event=0x9c,umask=0x01;event=0x00,umask=0x82;event=0x71,umask=0x0" ,
6011
+ hybrid_big_small_tiny );
6012
+ EVENT_ATTR_STR_HYBRID (topdown - be - bound ,
6013
+ td_be_bound_arl_h ,
6014
+ "event=0xa4,umask=0x02;event=0x00,umask=0x83;event=0x74,umask=0x0" ,
6015
+ hybrid_big_small_tiny );
6016
+
6017
+ static struct attribute * arl_h_hybrid_events_attrs [] = {
6018
+ EVENT_PTR (slots_adl ),
6019
+ EVENT_PTR (td_retiring_arl_h ),
6020
+ EVENT_PTR (td_bad_spec_arl_h ),
6021
+ EVENT_PTR (td_fe_bound_arl_h ),
6022
+ EVENT_PTR (td_be_bound_arl_h ),
6023
+ EVENT_PTR (td_heavy_ops_adl ),
6024
+ EVENT_PTR (td_br_mis_adl ),
6025
+ EVENT_PTR (td_fetch_lat_adl ),
6026
+ EVENT_PTR (td_mem_bound_adl ),
6027
+ NULL ,
6028
+ };
6029
+
5977
6030
/* Must be in IDX order */
5978
6031
EVENT_ATTR_STR_HYBRID (mem - loads , mem_ld_adl , "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3" , hybrid_big_small );
5979
6032
EVENT_ATTR_STR_HYBRID (mem - stores , mem_st_adl , "event=0xd0,umask=0x6;event=0xcd,umask=0x2" , hybrid_big_small );
@@ -5992,6 +6045,21 @@ static struct attribute *mtl_hybrid_mem_attrs[] = {
5992
6045
NULL
5993
6046
};
5994
6047
6048
+ EVENT_ATTR_STR_HYBRID (mem - loads ,
6049
+ mem_ld_arl_h ,
6050
+ "event=0xd0,umask=0x5,ldlat=3;event=0xcd,umask=0x1,ldlat=3;event=0xd0,umask=0x5,ldlat=3" ,
6051
+ hybrid_big_small_tiny );
6052
+ EVENT_ATTR_STR_HYBRID (mem - stores ,
6053
+ mem_st_arl_h ,
6054
+ "event=0xd0,umask=0x6;event=0xcd,umask=0x2;event=0xd0,umask=0x6" ,
6055
+ hybrid_big_small_tiny );
6056
+
6057
+ static struct attribute * arl_h_hybrid_mem_attrs [] = {
6058
+ EVENT_PTR (mem_ld_arl_h ),
6059
+ EVENT_PTR (mem_st_arl_h ),
6060
+ NULL ,
6061
+ };
6062
+
5995
6063
EVENT_ATTR_STR_HYBRID (tx - start , tx_start_adl , "event=0xc9,umask=0x1" , hybrid_big );
5996
6064
EVENT_ATTR_STR_HYBRID (tx - commit , tx_commit_adl , "event=0xc9,umask=0x2" , hybrid_big );
5997
6065
EVENT_ATTR_STR_HYBRID (tx - abort , tx_abort_adl , "event=0xc9,umask=0x4" , hybrid_big );
@@ -6015,8 +6083,8 @@ static struct attribute *adl_hybrid_tsx_attrs[] = {
6015
6083
6016
6084
FORMAT_ATTR_HYBRID (in_tx , hybrid_big );
6017
6085
FORMAT_ATTR_HYBRID (in_tx_cp , hybrid_big );
6018
- FORMAT_ATTR_HYBRID (offcore_rsp , hybrid_big_small );
6019
- FORMAT_ATTR_HYBRID (ldlat , hybrid_big_small );
6086
+ FORMAT_ATTR_HYBRID (offcore_rsp , hybrid_big_small_tiny );
6087
+ FORMAT_ATTR_HYBRID (ldlat , hybrid_big_small_tiny );
6020
6088
FORMAT_ATTR_HYBRID (frontend , hybrid_big );
6021
6089
6022
6090
#define ADL_HYBRID_RTM_FORMAT_ATTR \
@@ -6039,7 +6107,7 @@ static struct attribute *adl_hybrid_extra_attr[] = {
6039
6107
NULL
6040
6108
};
6041
6109
6042
- FORMAT_ATTR_HYBRID (snoop_rsp , hybrid_small );
6110
+ FORMAT_ATTR_HYBRID (snoop_rsp , hybrid_small_tiny );
6043
6111
6044
6112
static struct attribute * mtl_hybrid_extra_attr_rtm [] = {
6045
6113
ADL_HYBRID_RTM_FORMAT_ATTR ,
@@ -7121,6 +7189,37 @@ __init int intel_pmu_init(void)
7121
7189
name = "lunarlake_hybrid" ;
7122
7190
break ;
7123
7191
7192
+ case INTEL_ARROWLAKE_H :
7193
+ intel_pmu_init_hybrid (hybrid_big_small_tiny );
7194
+
7195
+ x86_pmu .pebs_latency_data = arl_h_latency_data ;
7196
+ x86_pmu .get_event_constraints = arl_h_get_event_constraints ;
7197
+ x86_pmu .hw_config = arl_h_hw_config ;
7198
+
7199
+ td_attr = arl_h_hybrid_events_attrs ;
7200
+ mem_attr = arl_h_hybrid_mem_attrs ;
7201
+ tsx_attr = adl_hybrid_tsx_attrs ;
7202
+ extra_attr = boot_cpu_has (X86_FEATURE_RTM ) ?
7203
+ mtl_hybrid_extra_attr_rtm : mtl_hybrid_extra_attr ;
7204
+
7205
+ /* Initialize big core specific PerfMon capabilities. */
7206
+ pmu = & x86_pmu .hybrid_pmu [X86_HYBRID_PMU_CORE_IDX ];
7207
+ intel_pmu_init_lnc (& pmu -> pmu );
7208
+
7209
+ /* Initialize Atom core specific PerfMon capabilities. */
7210
+ pmu = & x86_pmu .hybrid_pmu [X86_HYBRID_PMU_ATOM_IDX ];
7211
+ intel_pmu_init_skt (& pmu -> pmu );
7212
+
7213
+ /* Initialize Lower Power Atom specific PerfMon capabilities. */
7214
+ pmu = & x86_pmu .hybrid_pmu [X86_HYBRID_PMU_TINY_IDX ];
7215
+ intel_pmu_init_grt (& pmu -> pmu );
7216
+ pmu -> extra_regs = intel_cmt_extra_regs ;
7217
+
7218
+ intel_pmu_pebs_data_source_arl_h ();
7219
+ pr_cont ("ArrowLake-H Hybrid events, " );
7220
+ name = "arrowlake_h_hybrid" ;
7221
+ break ;
7222
+
7124
7223
default :
7125
7224
switch (x86_pmu .version ) {
7126
7225
case 1 :
0 commit comments