Skip to content

Commit 730a6e5

Browse files
committed
Merge remote-tracking branch 'stable/linux-6.6.y' into rpi-6.6.y
2 parents 4895707 + ab6cc4e commit 730a6e5

File tree

110 files changed

+1909
-739
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

110 files changed

+1909
-739
lines changed

Documentation/power/runtime_pm.rst

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -347,7 +347,9 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
347347

348348
`int pm_runtime_resume_and_get(struct device *dev);`
349349
- run pm_runtime_resume(dev) and if successful, increment the device's
350-
usage counter; return the result of pm_runtime_resume
350+
usage counter; returns 0 on success (whether or not the device's
351+
runtime PM status was already 'active') or the error code from
352+
pm_runtime_resume() on failure.
351353

352354
`int pm_request_idle(struct device *dev);`
353355
- submit a request to execute the subsystem-level idle callback for the

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# SPDX-License-Identifier: GPL-2.0
22
VERSION = 6
33
PATCHLEVEL = 6
4-
SUBLEVEL = 66
4+
SUBLEVEL = 67
55
EXTRAVERSION =
66
NAME = Pinguïn Aangedreven
77

arch/arm64/kvm/sys_regs.c

Lines changed: 50 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1330,6 +1330,7 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
13301330
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
13311331

13321332
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1333+
val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
13331334
break;
13341335
case SYS_ID_AA64ISAR1_EL1:
13351336
if (!vcpu_has_ptrauth(vcpu))
@@ -1472,6 +1473,13 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
14721473

14731474
val &= ~ID_AA64PFR0_EL1_AMU_MASK;
14741475

1476+
/*
1477+
* MPAM is disabled by default as KVM also needs a set of PARTID to
1478+
* program the MPAMVPMx_EL2 PARTID remapping registers with. But some
1479+
* older kernels let the guest see the ID bit.
1480+
*/
1481+
val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
1482+
14751483
return val;
14761484
}
14771485

@@ -1560,6 +1568,42 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
15601568
return set_id_reg(vcpu, rd, val);
15611569
}
15621570

1571+
static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1572+
const struct sys_reg_desc *rd, u64 user_val)
1573+
{
1574+
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1575+
u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
1576+
1577+
/*
1578+
* Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
1579+
* in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
1580+
* guests, but didn't add trap handling. KVM doesn't support MPAM and
1581+
* always returns an UNDEF for these registers. The guest must see 0
1582+
* for this field.
1583+
*
1584+
* But KVM must also accept values from user-space that were provided
1585+
* by KVM. On CPUs that support MPAM, permit user-space to write
1586+
* the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
1587+
*/
1588+
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
1589+
user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
1590+
1591+
return set_id_reg(vcpu, rd, user_val);
1592+
}
1593+
1594+
static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
1595+
const struct sys_reg_desc *rd, u64 user_val)
1596+
{
1597+
u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
1598+
u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
1599+
1600+
/* See set_id_aa64pfr0_el1 for comment about MPAM */
1601+
if ((hw_val & mpam_mask) == (user_val & mpam_mask))
1602+
user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
1603+
1604+
return set_id_reg(vcpu, rd, user_val);
1605+
}
1606+
15631607
/*
15641608
* cpufeature ID register user accessors
15651609
*
@@ -2018,10 +2062,14 @@ static const struct sys_reg_desc sys_reg_descs[] = {
20182062
{ SYS_DESC(SYS_ID_AA64PFR0_EL1),
20192063
.access = access_id_reg,
20202064
.get_user = get_id_reg,
2021-
.set_user = set_id_reg,
2065+
.set_user = set_id_aa64pfr0_el1,
20222066
.reset = read_sanitised_id_aa64pfr0_el1,
20232067
.val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, },
2024-
ID_SANITISED(ID_AA64PFR1_EL1),
2068+
{ SYS_DESC(SYS_ID_AA64PFR1_EL1),
2069+
.access = access_id_reg,
2070+
.get_user = get_id_reg,
2071+
.set_user = set_id_aa64pfr1_el1,
2072+
.reset = kvm_read_sanitised_id_reg, },
20252073
ID_UNALLOCATED(4,2),
20262074
ID_UNALLOCATED(4,3),
20272075
ID_SANITISED(ID_AA64ZFR0_EL1),

arch/riscv/include/asm/kfence.h

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
2222
else
2323
set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));
2424

25-
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
25+
preempt_disable();
26+
local_flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
27+
preempt_enable();
2628

2729
return true;
2830
}

arch/riscv/kernel/setup.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -246,7 +246,7 @@ static void __init init_resources(void)
246246
static void __init parse_dtb(void)
247247
{
248248
/* Early scan of device tree from init memory */
249-
if (early_init_dt_scan(dtb_early_va, __pa(dtb_early_va))) {
249+
if (early_init_dt_scan(dtb_early_va, dtb_early_pa)) {
250250
const char *name = of_flat_dt_get_machine_name();
251251

252252
if (name) {

arch/x86/events/intel/ds.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1354,7 +1354,7 @@ void intel_pmu_pebs_enable(struct perf_event *event)
13541354
* hence we need to drain when changing said
13551355
* size.
13561356
*/
1357-
intel_pmu_drain_large_pebs(cpuc);
1357+
intel_pmu_drain_pebs_buffer();
13581358
adaptive_pebs_record_size_update();
13591359
wrmsrl(MSR_PEBS_DATA_CFG, pebs_data_cfg);
13601360
cpuc->active_pebs_data_cfg = pebs_data_cfg;

arch/x86/include/asm/processor.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -190,6 +190,8 @@ static inline unsigned long long l1tf_pfn_limit(void)
190190
return BIT_ULL(boot_cpu_data.x86_cache_bits - 1 - PAGE_SHIFT);
191191
}
192192

193+
void init_cpu_devs(void);
194+
void get_cpu_vendor(struct cpuinfo_x86 *c);
193195
extern void early_cpu_init(void);
194196
extern void identify_secondary_cpu(struct cpuinfo_x86 *);
195197
extern void print_cpu_info(struct cpuinfo_x86 *);

arch/x86/include/asm/static_call.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -65,4 +65,19 @@
6565

6666
extern bool __static_call_fixup(void *tramp, u8 op, void *dest);
6767

68+
extern void __static_call_update_early(void *tramp, void *func);
69+
70+
#define static_call_update_early(name, _func) \
71+
({ \
72+
typeof(&STATIC_CALL_TRAMP(name)) __F = (_func); \
73+
if (static_call_initialized) { \
74+
__static_call_update(&STATIC_CALL_KEY(name), \
75+
STATIC_CALL_TRAMP_ADDR(name), __F);\
76+
} else { \
77+
WRITE_ONCE(STATIC_CALL_KEY(name).func, _func); \
78+
__static_call_update_early(STATIC_CALL_TRAMP_ADDR(name),\
79+
__F); \
80+
} \
81+
})
82+
6883
#endif /* _ASM_STATIC_CALL_H */

arch/x86/include/asm/sync_core.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
#include <asm/special_insns.h>
99

1010
#ifdef CONFIG_X86_32
11-
static inline void iret_to_self(void)
11+
static __always_inline void iret_to_self(void)
1212
{
1313
asm volatile (
1414
"pushfl\n\t"
@@ -19,7 +19,7 @@ static inline void iret_to_self(void)
1919
: ASM_CALL_CONSTRAINT : : "memory");
2020
}
2121
#else
22-
static inline void iret_to_self(void)
22+
static __always_inline void iret_to_self(void)
2323
{
2424
unsigned int tmp;
2525

@@ -55,7 +55,7 @@ static inline void iret_to_self(void)
5555
* Like all of Linux's memory ordering operations, this is a
5656
* compiler barrier as well.
5757
*/
58-
static inline void sync_core(void)
58+
static __always_inline void sync_core(void)
5959
{
6060
/*
6161
* The SERIALIZE instruction is the most straightforward way to

arch/x86/include/asm/xen/hypercall.h

Lines changed: 22 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -39,9 +39,11 @@
3939
#include <linux/string.h>
4040
#include <linux/types.h>
4141
#include <linux/pgtable.h>
42+
#include <linux/instrumentation.h>
4243

4344
#include <trace/events/xen.h>
4445

46+
#include <asm/alternative.h>
4547
#include <asm/page.h>
4648
#include <asm/smap.h>
4749
#include <asm/nospec-branch.h>
@@ -86,11 +88,20 @@ struct xen_dm_op_buf;
8688
* there aren't more than 5 arguments...)
8789
*/
8890

89-
extern struct { char _entry[32]; } hypercall_page[];
91+
void xen_hypercall_func(void);
92+
DECLARE_STATIC_CALL(xen_hypercall, xen_hypercall_func);
9093

91-
#define __HYPERCALL "call hypercall_page+%c[offset]"
92-
#define __HYPERCALL_ENTRY(x) \
93-
[offset] "i" (__HYPERVISOR_##x * sizeof(hypercall_page[0]))
94+
#ifdef MODULE
95+
#define __ADDRESSABLE_xen_hypercall
96+
#else
97+
#define __ADDRESSABLE_xen_hypercall __ADDRESSABLE_ASM_STR(__SCK__xen_hypercall)
98+
#endif
99+
100+
#define __HYPERCALL \
101+
__ADDRESSABLE_xen_hypercall \
102+
"call __SCT__xen_hypercall"
103+
104+
#define __HYPERCALL_ENTRY(x) "a" (x)
94105

95106
#ifdef CONFIG_X86_32
96107
#define __HYPERCALL_RETREG "eax"
@@ -148,7 +159,7 @@ extern struct { char _entry[32]; } hypercall_page[];
148159
__HYPERCALL_0ARG(); \
149160
asm volatile (__HYPERCALL \
150161
: __HYPERCALL_0PARAM \
151-
: __HYPERCALL_ENTRY(name) \
162+
: __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
152163
: __HYPERCALL_CLOBBER0); \
153164
(type)__res; \
154165
})
@@ -159,7 +170,7 @@ extern struct { char _entry[32]; } hypercall_page[];
159170
__HYPERCALL_1ARG(a1); \
160171
asm volatile (__HYPERCALL \
161172
: __HYPERCALL_1PARAM \
162-
: __HYPERCALL_ENTRY(name) \
173+
: __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
163174
: __HYPERCALL_CLOBBER1); \
164175
(type)__res; \
165176
})
@@ -170,7 +181,7 @@ extern struct { char _entry[32]; } hypercall_page[];
170181
__HYPERCALL_2ARG(a1, a2); \
171182
asm volatile (__HYPERCALL \
172183
: __HYPERCALL_2PARAM \
173-
: __HYPERCALL_ENTRY(name) \
184+
: __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
174185
: __HYPERCALL_CLOBBER2); \
175186
(type)__res; \
176187
})
@@ -181,7 +192,7 @@ extern struct { char _entry[32]; } hypercall_page[];
181192
__HYPERCALL_3ARG(a1, a2, a3); \
182193
asm volatile (__HYPERCALL \
183194
: __HYPERCALL_3PARAM \
184-
: __HYPERCALL_ENTRY(name) \
195+
: __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
185196
: __HYPERCALL_CLOBBER3); \
186197
(type)__res; \
187198
})
@@ -192,7 +203,7 @@ extern struct { char _entry[32]; } hypercall_page[];
192203
__HYPERCALL_4ARG(a1, a2, a3, a4); \
193204
asm volatile (__HYPERCALL \
194205
: __HYPERCALL_4PARAM \
195-
: __HYPERCALL_ENTRY(name) \
206+
: __HYPERCALL_ENTRY(__HYPERVISOR_ ## name) \
196207
: __HYPERCALL_CLOBBER4); \
197208
(type)__res; \
198209
})
@@ -206,12 +217,9 @@ xen_single_call(unsigned int call,
206217
__HYPERCALL_DECLS;
207218
__HYPERCALL_5ARG(a1, a2, a3, a4, a5);
208219

209-
if (call >= PAGE_SIZE / sizeof(hypercall_page[0]))
210-
return -EINVAL;
211-
212-
asm volatile(CALL_NOSPEC
220+
asm volatile(__HYPERCALL
213221
: __HYPERCALL_5PARAM
214-
: [thunk_target] "a" (&hypercall_page[call])
222+
: __HYPERCALL_ENTRY(call)
215223
: __HYPERCALL_CLOBBER5);
216224

217225
return (long)__res;

0 commit comments

Comments
 (0)