Skip to content

Commit 2e64066

Browse files
committed
Merge tag 'riscv-for-linus-6.1-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux
Pull RISC-V updates from Palmer Dabbelt: - Improvements to the CPU topology subsystem, which fix some issues where RISC-V would report bad topology information. - The default NR_CPUS has increased to XLEN, and the maximum configurable value is 512. - The CD-ROM filesystems have been enabled in the defconfig. - Support for THP_SWAP has been added for rv64 systems. There are also a handful of cleanups and fixes throughout the tree. * tag 'riscv-for-linus-6.1-mw1' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux: riscv: enable THP_SWAP for RV64 RISC-V: Print SSTC in canonical order riscv: compat: s/failed/unsupported if compat mode isn't supported RISC-V: Increase range and default value of NR_CPUS cpuidle: riscv-sbi: Fix CPU_PM_CPU_IDLE_ENTER_xyz() macro usage perf: RISC-V: throttle perf events perf: RISC-V: exclude invalid pmu counters from SBI calls riscv: enable CD-ROM file systems in defconfig riscv: topology: fix default topology reporting arm64: topology: move store_cpu_topology() to shared code
2 parents 57c9272 + 87f81e6 commit 2e64066

File tree

11 files changed

+63
-62
lines changed

11 files changed

+63
-62
lines changed

arch/arm64/kernel/topology.c

Lines changed: 0 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -22,46 +22,6 @@
2222
#include <asm/cputype.h>
2323
#include <asm/topology.h>
2424

25-
void store_cpu_topology(unsigned int cpuid)
26-
{
27-
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
28-
u64 mpidr;
29-
30-
if (cpuid_topo->package_id != -1)
31-
goto topology_populated;
32-
33-
mpidr = read_cpuid_mpidr();
34-
35-
/* Uniprocessor systems can rely on default topology values */
36-
if (mpidr & MPIDR_UP_BITMASK)
37-
return;
38-
39-
/*
40-
* This would be the place to create cpu topology based on MPIDR.
41-
*
42-
* However, it cannot be trusted to depict the actual topology; some
43-
* pieces of the architecture enforce an artificial cap on Aff0 values
44-
* (e.g. GICv3's ICC_SGI1R_EL1 limits it to 15), leading to an
45-
* artificial cycling of Aff1, Aff2 and Aff3 values. IOW, these end up
46-
* having absolutely no relationship to the actual underlying system
47-
* topology, and cannot be reasonably used as core / package ID.
48-
*
49-
* If the MT bit is set, Aff0 *could* be used to define a thread ID, but
50-
* we still wouldn't be able to obtain a sane core ID. This means we
51-
* need to entirely ignore MPIDR for any topology deduction.
52-
*/
53-
cpuid_topo->thread_id = -1;
54-
cpuid_topo->core_id = cpuid;
55-
cpuid_topo->package_id = cpu_to_node(cpuid);
56-
57-
pr_debug("CPU%u: cluster %d core %d thread %d mpidr %#016llx\n",
58-
cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
59-
cpuid_topo->thread_id, mpidr);
60-
61-
topology_populated:
62-
update_siblings_masks(cpuid);
63-
}
64-
6525
#ifdef CONFIG_ACPI
6626
static bool __init acpi_cpu_is_threaded(int cpu)
6727
{

arch/riscv/Kconfig

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -45,14 +45,15 @@ config RISCV
4545
select ARCH_WANT_FRAME_POINTERS
4646
select ARCH_WANT_GENERAL_HUGETLB
4747
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
48+
select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
4849
select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
4950
select BUILDTIME_TABLE_SORT if MMU
5051
select CLONE_BACKWARDS
5152
select CLINT_TIMER if !MMU
5253
select COMMON_CLK
5354
select CPU_PM if CPU_IDLE
5455
select EDAC_SUPPORT
55-
select GENERIC_ARCH_TOPOLOGY if SMP
56+
select GENERIC_ARCH_TOPOLOGY
5657
select GENERIC_ATOMIC64 if !64BIT
5758
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
5859
select GENERIC_EARLY_IOREMAP
@@ -309,10 +310,13 @@ config SMP
309310
If you don't know what to do here, say N.
310311

311312
config NR_CPUS
312-
int "Maximum number of CPUs (2-32)"
313-
range 2 32
313+
int "Maximum number of CPUs (2-512)"
314314
depends on SMP
315-
default "8"
315+
range 2 512 if !SBI_V01
316+
range 2 32 if SBI_V01 && 32BIT
317+
range 2 64 if SBI_V01 && 64BIT
318+
default "32" if 32BIT
319+
default "64" if 64BIT
316320

317321
config HOTPLUG_CPU
318322
bool "Support for hot-pluggable CPUs"

arch/riscv/configs/defconfig

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -166,6 +166,9 @@ CONFIG_BTRFS_FS=m
166166
CONFIG_BTRFS_FS_POSIX_ACL=y
167167
CONFIG_AUTOFS4_FS=y
168168
CONFIG_OVERLAY_FS=m
169+
CONFIG_ISO9660_FS=y
170+
CONFIG_JOLIET=y
171+
CONFIG_ZISOFS=y
169172
CONFIG_MSDOS_FS=y
170173
CONFIG_VFAT_FS=y
171174
CONFIG_TMPFS=y

arch/riscv/kernel/cpu.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -92,10 +92,10 @@ int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
9292
*/
9393
static struct riscv_isa_ext_data isa_ext_arr[] = {
9494
__RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
95+
__RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
9596
__RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
9697
__RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
9798
__RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
98-
__RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
9999
__RISCV_ISA_EXT_DATA("", RISCV_ISA_EXT_MAX),
100100
};
101101

arch/riscv/kernel/process.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -105,7 +105,7 @@ static int __init compat_mode_detect(void)
105105
csr_write(CSR_STATUS, tmp);
106106

107107
pr_info("riscv: ELF compat mode %s",
108-
compat_mode_supported ? "supported" : "failed");
108+
compat_mode_supported ? "supported" : "unsupported");
109109

110110
return 0;
111111
}

arch/riscv/kernel/smpboot.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -49,6 +49,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
4949
unsigned int curr_cpuid;
5050

5151
curr_cpuid = smp_processor_id();
52+
store_cpu_topology(curr_cpuid);
5253
numa_store_cpu_info(curr_cpuid);
5354
numa_add_cpu(curr_cpuid);
5455

@@ -162,9 +163,9 @@ asmlinkage __visible void smp_callin(void)
162163
mmgrab(mm);
163164
current->active_mm = mm;
164165

166+
store_cpu_topology(curr_cpuid);
165167
notify_cpu_starting(curr_cpuid);
166168
numa_add_cpu(curr_cpuid);
167-
update_siblings_masks(curr_cpuid);
168169
set_cpu_online(curr_cpuid, 1);
169170

170171
/*

drivers/base/arch_topology.c

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -841,4 +841,23 @@ void __init init_cpu_topology(void)
841841
return;
842842
}
843843
}
844+
845+
void store_cpu_topology(unsigned int cpuid)
846+
{
847+
struct cpu_topology *cpuid_topo = &cpu_topology[cpuid];
848+
849+
if (cpuid_topo->package_id != -1)
850+
goto topology_populated;
851+
852+
cpuid_topo->thread_id = -1;
853+
cpuid_topo->core_id = cpuid;
854+
cpuid_topo->package_id = cpu_to_node(cpuid);
855+
856+
pr_debug("CPU%u: package %d core %d thread %d\n",
857+
cpuid, cpuid_topo->package_id, cpuid_topo->core_id,
858+
cpuid_topo->thread_id);
859+
860+
topology_populated:
861+
update_siblings_masks(cpuid);
862+
}
844863
#endif

drivers/cpuidle/cpuidle-riscv-sbi.c

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -97,8 +97,13 @@ static int sbi_cpuidle_enter_state(struct cpuidle_device *dev,
9797
struct cpuidle_driver *drv, int idx)
9898
{
9999
u32 *states = __this_cpu_read(sbi_cpuidle_data.states);
100+
u32 state = states[idx];
100101

101-
return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, states[idx]);
102+
if (state & SBI_HSM_SUSP_NON_RET_BIT)
103+
return CPU_PM_CPU_IDLE_ENTER_PARAM(sbi_suspend, idx, state);
104+
else
105+
return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(sbi_suspend,
106+
idx, state);
102107
}
103108

104109
static int __sbi_enter_domain_idle_state(struct cpuidle_device *dev,

drivers/perf/riscv_pmu_legacy.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,6 @@
1414

1515
#define RISCV_PMU_LEGACY_CYCLE 0
1616
#define RISCV_PMU_LEGACY_INSTRET 1
17-
#define RISCV_PMU_LEGACY_NUM_CTR 2
1817

1918
static bool pmu_init_done;
2019

@@ -83,7 +82,8 @@ static void pmu_legacy_init(struct riscv_pmu *pmu)
8382
{
8483
pr_info("Legacy PMU implementation is available\n");
8584

86-
pmu->num_counters = RISCV_PMU_LEGACY_NUM_CTR;
85+
pmu->cmask = BIT(RISCV_PMU_LEGACY_CYCLE) |
86+
BIT(RISCV_PMU_LEGACY_INSTRET);
8787
pmu->ctr_start = pmu_legacy_ctr_start;
8888
pmu->ctr_stop = NULL;
8989
pmu->event_map = pmu_legacy_event_map;

drivers/perf/riscv_pmu_sbi.c

Lines changed: 20 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
#include <linux/of_irq.h>
1919
#include <linux/of.h>
2020
#include <linux/cpu_pm.h>
21+
#include <linux/sched/clock.h>
2122

2223
#include <asm/sbi.h>
2324
#include <asm/hwcap.h>
@@ -271,7 +272,6 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
271272
struct sbiret ret;
272273
int idx;
273274
uint64_t cbase = 0;
274-
uint64_t cmask = GENMASK_ULL(rvpmu->num_counters - 1, 0);
275275
unsigned long cflags = 0;
276276

277277
if (event->attr.exclude_kernel)
@@ -281,11 +281,12 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
281281

282282
/* retrieve the available counter index */
283283
#if defined(CONFIG_32BIT)
284-
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
285-
cflags, hwc->event_base, hwc->config, hwc->config >> 32);
284+
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
285+
rvpmu->cmask, cflags, hwc->event_base, hwc->config,
286+
hwc->config >> 32);
286287
#else
287-
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase, cmask,
288-
cflags, hwc->event_base, hwc->config, 0);
288+
ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_CFG_MATCH, cbase,
289+
rvpmu->cmask, cflags, hwc->event_base, hwc->config, 0);
289290
#endif
290291
if (ret.error) {
291292
pr_debug("Not able to find a counter for event %lx config %llx\n",
@@ -294,7 +295,7 @@ static int pmu_sbi_ctr_get_idx(struct perf_event *event)
294295
}
295296

296297
idx = ret.value;
297-
if (idx >= rvpmu->num_counters || !pmu_ctr_list[idx].value)
298+
if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value)
298299
return -ENOENT;
299300

300301
/* Additional sanity check for the counter id */
@@ -463,7 +464,7 @@ static int pmu_sbi_find_num_ctrs(void)
463464
return sbi_err_map_linux_errno(ret.error);
464465
}
465466

466-
static int pmu_sbi_get_ctrinfo(int nctr)
467+
static int pmu_sbi_get_ctrinfo(int nctr, unsigned long *mask)
467468
{
468469
struct sbiret ret;
469470
int i, num_hw_ctr = 0, num_fw_ctr = 0;
@@ -478,6 +479,9 @@ static int pmu_sbi_get_ctrinfo(int nctr)
478479
if (ret.error)
479480
/* The logical counter ids are not expected to be contiguous */
480481
continue;
482+
483+
*mask |= BIT(i);
484+
481485
cinfo.value = ret.value;
482486
if (cinfo.type == SBI_PMU_CTR_TYPE_FW)
483487
num_fw_ctr++;
@@ -498,7 +502,7 @@ static inline void pmu_sbi_stop_all(struct riscv_pmu *pmu)
498502
* which may include counters that are not enabled yet.
499503
*/
500504
sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP,
501-
0, GENMASK_ULL(pmu->num_counters - 1, 0), 0, 0, 0, 0);
505+
0, pmu->cmask, 0, 0, 0, 0);
502506
}
503507

504508
static inline void pmu_sbi_stop_hw_ctrs(struct riscv_pmu *pmu)
@@ -567,6 +571,7 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
567571
unsigned long overflow;
568572
unsigned long overflowed_ctrs = 0;
569573
struct cpu_hw_events *cpu_hw_evt = dev;
574+
u64 start_clock = sched_clock();
570575

571576
if (WARN_ON_ONCE(!cpu_hw_evt))
572577
return IRQ_NONE;
@@ -635,7 +640,9 @@ static irqreturn_t pmu_sbi_ovf_handler(int irq, void *dev)
635640
perf_event_overflow(event, &data, regs);
636641
}
637642
}
643+
638644
pmu_sbi_start_overflow_mask(pmu, overflowed_ctrs);
645+
perf_sample_event_took(sched_clock() - start_clock);
639646

640647
return IRQ_HANDLED;
641648
}
@@ -788,8 +795,9 @@ static void riscv_pmu_destroy(struct riscv_pmu *pmu)
788795
static int pmu_sbi_device_probe(struct platform_device *pdev)
789796
{
790797
struct riscv_pmu *pmu = NULL;
791-
int num_counters;
798+
unsigned long cmask = 0;
792799
int ret = -ENODEV;
800+
int num_counters;
793801

794802
pr_info("SBI PMU extension is available\n");
795803
pmu = riscv_pmu_alloc();
@@ -803,7 +811,7 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
803811
}
804812

805813
/* cache all the information about counters now */
806-
if (pmu_sbi_get_ctrinfo(num_counters))
814+
if (pmu_sbi_get_ctrinfo(num_counters, &cmask))
807815
goto out_free;
808816

809817
ret = pmu_sbi_setup_irqs(pmu, pdev);
@@ -812,8 +820,9 @@ static int pmu_sbi_device_probe(struct platform_device *pdev)
812820
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
813821
pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE;
814822
}
823+
815824
pmu->pmu.attr_groups = riscv_pmu_attr_groups;
816-
pmu->num_counters = num_counters;
825+
pmu->cmask = cmask;
817826
pmu->ctr_start = pmu_sbi_ctr_start;
818827
pmu->ctr_stop = pmu_sbi_ctr_stop;
819828
pmu->event_map = pmu_sbi_event_map;

0 commit comments

Comments
 (0)