@@ -349,17 +349,102 @@ static DECLARE_WORK(disable_freq_invariance_work,
349349DEFINE_PER_CPU (unsigned long , arch_freq_scale ) = SCHED_CAPACITY_SCALE ;
350350EXPORT_PER_CPU_SYMBOL_GPL (arch_freq_scale );
351351
352+ static DEFINE_STATIC_KEY_FALSE (arch_hybrid_cap_scale_key );
353+
354+ struct arch_hybrid_cpu_scale {
355+ unsigned long capacity ;
356+ unsigned long freq_ratio ;
357+ };
358+
359+ static struct arch_hybrid_cpu_scale __percpu * arch_cpu_scale ;
360+
361+ /**
362+ * arch_enable_hybrid_capacity_scale() - Enable hybrid CPU capacity scaling
363+ *
364+ * Allocate memory for per-CPU data used by hybrid CPU capacity scaling,
365+ * initialize it and set the static key controlling its code paths.
366+ *
367+ * Must be called before arch_set_cpu_capacity().
368+ */
369+ bool arch_enable_hybrid_capacity_scale (void )
370+ {
371+ int cpu ;
372+
373+ if (static_branch_unlikely (& arch_hybrid_cap_scale_key )) {
374+ WARN_ONCE (1 , "Hybrid CPU capacity scaling already enabled" );
375+ return true;
376+ }
377+
378+ arch_cpu_scale = alloc_percpu (struct arch_hybrid_cpu_scale );
379+ if (!arch_cpu_scale )
380+ return false;
381+
382+ for_each_possible_cpu (cpu ) {
383+ per_cpu_ptr (arch_cpu_scale , cpu )-> capacity = SCHED_CAPACITY_SCALE ;
384+ per_cpu_ptr (arch_cpu_scale , cpu )-> freq_ratio = arch_max_freq_ratio ;
385+ }
386+
387+ static_branch_enable (& arch_hybrid_cap_scale_key );
388+
389+ pr_info ("Hybrid CPU capacity scaling enabled\n" );
390+
391+ return true;
392+ }
393+
394+ /**
395+ * arch_set_cpu_capacity() - Set scale-invariance parameters for a CPU
396+ * @cpu: Target CPU.
397+ * @cap: Capacity of @cpu at its maximum frequency, relative to @max_cap.
398+ * @max_cap: System-wide maximum CPU capacity.
399+ * @cap_freq: Frequency of @cpu corresponding to @cap.
400+ * @base_freq: Frequency of @cpu at which MPERF counts.
401+ *
402+ * The units in which @cap and @max_cap are expressed do not matter, so long
403+ * as they are consistent, because the former is effectively divided by the
404+ * latter. Analogously for @cap_freq and @base_freq.
405+ *
406+ * After calling this function for all CPUs, call arch_rebuild_sched_domains()
407+ * to let the scheduler know that capacity-aware scheduling can be used going
408+ * forward.
409+ */
410+ void arch_set_cpu_capacity (int cpu , unsigned long cap , unsigned long max_cap ,
411+ unsigned long cap_freq , unsigned long base_freq )
412+ {
413+ if (static_branch_likely (& arch_hybrid_cap_scale_key )) {
414+ WRITE_ONCE (per_cpu_ptr (arch_cpu_scale , cpu )-> capacity ,
415+ div_u64 (cap << SCHED_CAPACITY_SHIFT , max_cap ));
416+ WRITE_ONCE (per_cpu_ptr (arch_cpu_scale , cpu )-> freq_ratio ,
417+ div_u64 (cap_freq << SCHED_CAPACITY_SHIFT , base_freq ));
418+ } else {
419+ WARN_ONCE (1 , "Hybrid CPU capacity scaling not enabled" );
420+ }
421+ }
422+
423+ unsigned long arch_scale_cpu_capacity (int cpu )
424+ {
425+ if (static_branch_unlikely (& arch_hybrid_cap_scale_key ))
426+ return READ_ONCE (per_cpu_ptr (arch_cpu_scale , cpu )-> capacity );
427+
428+ return SCHED_CAPACITY_SCALE ;
429+ }
430+ EXPORT_SYMBOL_GPL (arch_scale_cpu_capacity );
431+
352432static void scale_freq_tick (u64 acnt , u64 mcnt )
353433{
354- u64 freq_scale ;
434+ u64 freq_scale , freq_ratio ;
355435
356436 if (!arch_scale_freq_invariant ())
357437 return ;
358438
359439 if (check_shl_overflow (acnt , 2 * SCHED_CAPACITY_SHIFT , & acnt ))
360440 goto error ;
361441
362- if (check_mul_overflow (mcnt , arch_max_freq_ratio , & mcnt ) || !mcnt )
442+ if (static_branch_unlikely (& arch_hybrid_cap_scale_key ))
443+ freq_ratio = READ_ONCE (this_cpu_ptr (arch_cpu_scale )-> freq_ratio );
444+ else
445+ freq_ratio = arch_max_freq_ratio ;
446+
447+ if (check_mul_overflow (mcnt , freq_ratio , & mcnt ) || !mcnt )
363448 goto error ;
364449
365450 freq_scale = div64_u64 (acnt , mcnt );
0 commit comments