@@ -61,7 +61,8 @@ static const struct class pseudo_lock_class = {
61
61
};
62
62
63
63
/**
64
- * get_prefetch_disable_bits - prefetch disable bits of supported platforms
64
+ * resctrl_arch_get_prefetch_disable_bits - prefetch disable bits of supported
65
+ * platforms
65
66
* @void: It takes no parameters.
66
67
*
67
68
* Capture the list of platforms that have been validated to support
@@ -75,13 +76,13 @@ static const struct class pseudo_lock_class = {
75
76
* in the SDM.
76
77
*
77
78
* When adding a platform here also add support for its cache events to
78
- * measure_cycles_perf_fn ()
79
+ * resctrl_arch_measure_l*_residency ()
79
80
*
80
81
* Return:
81
82
* If platform is supported, the bits to disable hardware prefetchers, 0
82
83
* if platform is not supported.
83
84
*/
84
- static u64 get_prefetch_disable_bits (void )
85
+ u64 resctrl_arch_get_prefetch_disable_bits (void )
85
86
{
86
87
if (boot_cpu_data .x86_vendor != X86_VENDOR_INTEL ||
87
88
boot_cpu_data .x86 != 6 )
@@ -408,7 +409,7 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp)
408
409
}
409
410
410
411
/**
411
- * pseudo_lock_fn - Load kernel memory into cache
412
+ * resctrl_arch_pseudo_lock_fn - Load kernel memory into cache
412
413
* @_rdtgrp: resource group to which pseudo-lock region belongs
413
414
*
414
415
* This is the core pseudo-locking flow.
@@ -426,7 +427,7 @@ static void pseudo_lock_free(struct rdtgroup *rdtgrp)
426
427
*
427
428
* Return: 0. Waiter on waitqueue will be woken on completion.
428
429
*/
429
- static int pseudo_lock_fn (void * _rdtgrp )
430
+ int resctrl_arch_pseudo_lock_fn (void * _rdtgrp )
430
431
{
431
432
struct rdtgroup * rdtgrp = _rdtgrp ;
432
433
struct pseudo_lock_region * plr = rdtgrp -> plr ;
@@ -712,7 +713,7 @@ int rdtgroup_locksetup_enter(struct rdtgroup *rdtgrp)
712
713
* Not knowing the bits to disable prefetching implies that this
713
714
* platform does not support Cache Pseudo-Locking.
714
715
*/
715
- prefetch_disable_bits = get_prefetch_disable_bits ();
716
+ prefetch_disable_bits = resctrl_arch_get_prefetch_disable_bits ();
716
717
if (prefetch_disable_bits == 0 ) {
717
718
rdt_last_cmd_puts ("Pseudo-locking not supported\n" );
718
719
return - EINVAL ;
@@ -872,7 +873,8 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
872
873
}
873
874
874
875
/**
875
- * measure_cycles_lat_fn - Measure cycle latency to read pseudo-locked memory
876
+ * resctrl_arch_measure_cycles_lat_fn - Measure cycle latency to read
877
+ * pseudo-locked memory
876
878
* @_plr: pseudo-lock region to measure
877
879
*
878
880
* There is no deterministic way to test if a memory region is cached. One
@@ -885,7 +887,7 @@ bool rdtgroup_pseudo_locked_in_hierarchy(struct rdt_ctrl_domain *d)
885
887
*
886
888
* Return: 0. Waiter on waitqueue will be woken on completion.
887
889
*/
888
- static int measure_cycles_lat_fn (void * _plr )
890
+ int resctrl_arch_measure_cycles_lat_fn (void * _plr )
889
891
{
890
892
struct pseudo_lock_region * plr = _plr ;
891
893
u32 saved_low , saved_high ;
@@ -1069,7 +1071,7 @@ static int measure_residency_fn(struct perf_event_attr *miss_attr,
1069
1071
return 0 ;
1070
1072
}
1071
1073
1072
- static int measure_l2_residency (void * _plr )
1074
+ int resctrl_arch_measure_l2_residency (void * _plr )
1073
1075
{
1074
1076
struct pseudo_lock_region * plr = _plr ;
1075
1077
struct residency_counts counts = {0 };
@@ -1107,7 +1109,7 @@ static int measure_l2_residency(void *_plr)
1107
1109
return 0 ;
1108
1110
}
1109
1111
1110
- static int measure_l3_residency (void * _plr )
1112
+ int resctrl_arch_measure_l3_residency (void * _plr )
1111
1113
{
1112
1114
struct pseudo_lock_region * plr = _plr ;
1113
1115
struct residency_counts counts = {0 };
@@ -1205,14 +1207,14 @@ static int pseudo_lock_measure_cycles(struct rdtgroup *rdtgrp, int sel)
1205
1207
plr -> cpu = cpu ;
1206
1208
1207
1209
if (sel == 1 )
1208
- thread = kthread_run_on_cpu (measure_cycles_lat_fn , plr ,
1209
- cpu , "pseudo_lock_measure/%u" );
1210
+ thread = kthread_run_on_cpu (resctrl_arch_measure_cycles_lat_fn ,
1211
+ plr , cpu , "pseudo_lock_measure/%u" );
1210
1212
else if (sel == 2 )
1211
- thread = kthread_run_on_cpu (measure_l2_residency , plr ,
1212
- cpu , "pseudo_lock_measure/%u" );
1213
+ thread = kthread_run_on_cpu (resctrl_arch_measure_l2_residency ,
1214
+ plr , cpu , "pseudo_lock_measure/%u" );
1213
1215
else if (sel == 3 )
1214
- thread = kthread_run_on_cpu (measure_l3_residency , plr ,
1215
- cpu , "pseudo_lock_measure/%u" );
1216
+ thread = kthread_run_on_cpu (resctrl_arch_measure_l3_residency ,
1217
+ plr , cpu , "pseudo_lock_measure/%u" );
1216
1218
else
1217
1219
goto out ;
1218
1220
@@ -1307,7 +1309,7 @@ int rdtgroup_pseudo_lock_create(struct rdtgroup *rdtgrp)
1307
1309
1308
1310
plr -> thread_done = 0 ;
1309
1311
1310
- thread = kthread_run_on_cpu (pseudo_lock_fn , rdtgrp ,
1312
+ thread = kthread_run_on_cpu (resctrl_arch_pseudo_lock_fn , rdtgrp ,
1311
1313
plr -> cpu , "pseudo_lock/%u" );
1312
1314
if (IS_ERR (thread )) {
1313
1315
ret = PTR_ERR (thread );
0 commit comments