Skip to content

Commit 098c793

Browse files
committed
Merge tag 'x86_urgent_for_5.8_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov: - AMD Memory bandwidth counter width fix, by Babu Moger. - Use the proper length type in the 32-bit truncate() syscall variant, by Jiri Slaby. - Reinit IA32_FEAT_CTL during wakeup to fix the case where after resume, VMXON would #GP due to VMX not being properly enabled, by Sean Christopherson. - Fix a static checker warning in the resctrl code, by Dan Carpenter. - Add a CR4 pinning mask for bits which cannot change after boot, by Kees Cook. - Align the start of the loop of __clear_user() to 16 bytes, to improve performance on AMD zen1 and zen2 microarchitectures, by Matt Fleming. * tag 'x86_urgent_for_5.8_rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/asm/64: Align start of __clear_user() loop to 16-bytes x86/cpu: Use pinning mask for CR4 bits needing to be 0 x86/resctrl: Fix a NULL vs IS_ERR() static checker warning in rdt_cdp_peer_get() x86/cpu: Reinitialize IA32_FEAT_CTL MSR on BSP during wakeup syscalls: Fix offset type of ksys_ftruncate() x86/resctrl: Fix memory bandwidth counter width for AMD
2 parents c141b30 + bb5570a commit 098c793

File tree

11 files changed

+33
-21
lines changed

11 files changed

+33
-21
lines changed

arch/x86/include/asm/cpu.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -58,4 +58,9 @@ static inline bool handle_guest_split_lock(unsigned long ip)
5858
return false;
5959
}
6060
#endif
61+
#ifdef CONFIG_IA32_FEAT_CTL
62+
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
63+
#else
64+
static inline void init_ia32_feat_ctl(struct cpuinfo_x86 *c) {}
65+
#endif
6166
#endif /* _ASM_X86_CPU_H */

arch/x86/kernel/cpu/centaur.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
#include <linux/sched.h>
44
#include <linux/sched/clock.h>
55

6+
#include <asm/cpu.h>
67
#include <asm/cpufeature.h>
78
#include <asm/e820/api.h>
89
#include <asm/mtrr.h>

arch/x86/kernel/cpu/common.c

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -347,6 +347,9 @@ static __always_inline void setup_umip(struct cpuinfo_x86 *c)
347347
cr4_clear_bits(X86_CR4_UMIP);
348348
}
349349

350+
/* These bits should not change their value after CPU init is finished. */
351+
static const unsigned long cr4_pinned_mask =
352+
X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP | X86_CR4_FSGSBASE;
350353
static DEFINE_STATIC_KEY_FALSE_RO(cr_pinning);
351354
static unsigned long cr4_pinned_bits __ro_after_init;
352355

@@ -371,20 +374,20 @@ EXPORT_SYMBOL(native_write_cr0);
371374

372375
void native_write_cr4(unsigned long val)
373376
{
374-
unsigned long bits_missing = 0;
377+
unsigned long bits_changed = 0;
375378

376379
set_register:
377380
asm volatile("mov %0,%%cr4": "+r" (val), "+m" (cr4_pinned_bits));
378381

379382
if (static_branch_likely(&cr_pinning)) {
380-
if (unlikely((val & cr4_pinned_bits) != cr4_pinned_bits)) {
381-
bits_missing = ~val & cr4_pinned_bits;
382-
val |= bits_missing;
383+
if (unlikely((val & cr4_pinned_mask) != cr4_pinned_bits)) {
384+
bits_changed = (val & cr4_pinned_mask) ^ cr4_pinned_bits;
385+
val = (val & ~cr4_pinned_mask) | cr4_pinned_bits;
383386
goto set_register;
384387
}
385-
/* Warn after we've set the missing bits. */
386-
WARN_ONCE(bits_missing, "CR4 bits went missing: %lx!?\n",
387-
bits_missing);
388+
/* Warn after we've corrected the changed bits. */
389+
WARN_ONCE(bits_changed, "pinned CR4 bits changed: 0x%lx!?\n",
390+
bits_changed);
388391
}
389392
}
390393
#if IS_MODULE(CONFIG_LKDTM)
@@ -419,7 +422,7 @@ void cr4_init(void)
419422
if (boot_cpu_has(X86_FEATURE_PCID))
420423
cr4 |= X86_CR4_PCIDE;
421424
if (static_branch_likely(&cr_pinning))
422-
cr4 |= cr4_pinned_bits;
425+
cr4 = (cr4 & ~cr4_pinned_mask) | cr4_pinned_bits;
423426

424427
__write_cr4(cr4);
425428

@@ -434,10 +437,7 @@ void cr4_init(void)
434437
*/
435438
static void __init setup_cr_pinning(void)
436439
{
437-
unsigned long mask;
438-
439-
mask = (X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_UMIP);
440-
cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & mask;
440+
cr4_pinned_bits = this_cpu_read(cpu_tlbstate.cr4) & cr4_pinned_mask;
441441
static_key_enable(&cr_pinning.key);
442442
}
443443

arch/x86/kernel/cpu/cpu.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -81,8 +81,4 @@ extern void update_srbds_msr(void);
8181

8282
extern u64 x86_read_arch_cap_msr(void);
8383

84-
#ifdef CONFIG_IA32_FEAT_CTL
85-
void init_ia32_feat_ctl(struct cpuinfo_x86 *c);
86-
#endif
87-
8884
#endif /* ARCH_X86_CPU_H */

arch/x86/kernel/cpu/resctrl/core.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -981,10 +981,10 @@ void resctrl_cpu_detect(struct cpuinfo_x86 *c)
981981

982982
c->x86_cache_max_rmid = ecx;
983983
c->x86_cache_occ_scale = ebx;
984-
if (c->x86_vendor == X86_VENDOR_INTEL)
985-
c->x86_cache_mbm_width_offset = eax & 0xff;
986-
else
987-
c->x86_cache_mbm_width_offset = -1;
984+
c->x86_cache_mbm_width_offset = eax & 0xff;
985+
986+
if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
987+
c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
988988
}
989989
}
990990

arch/x86/kernel/cpu/resctrl/internal.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,7 @@
3737
#define MBA_IS_LINEAR 0x4
3838
#define MBA_MAX_MBPS U32_MAX
3939
#define MAX_MBA_BW_AMD 0x800
40+
#define MBM_CNTR_WIDTH_OFFSET_AMD 20
4041

4142
#define RMID_VAL_ERROR BIT_ULL(63)
4243
#define RMID_VAL_UNAVAIL BIT_ULL(62)

arch/x86/kernel/cpu/resctrl/rdtgroup.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1117,6 +1117,7 @@ static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
11171117
_d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
11181118
if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
11191119
_r_cdp = NULL;
1120+
_d_cdp = NULL;
11201121
ret = -EINVAL;
11211122
}
11221123

arch/x86/kernel/cpu/zhaoxin.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@
22
#include <linux/sched.h>
33
#include <linux/sched/clock.h>
44

5+
#include <asm/cpu.h>
56
#include <asm/cpufeature.h>
67

78
#include "cpu.h"

arch/x86/lib/usercopy_64.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
2424
asm volatile(
2525
" testq %[size8],%[size8]\n"
2626
" jz 4f\n"
27+
" .align 16\n"
2728
"0: movq $0,(%[dst])\n"
2829
" addq $8,%[dst]\n"
2930
" decl %%ecx ; jnz 0b\n"

arch/x86/power/cpu.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -193,6 +193,8 @@ static void fix_processor_context(void)
193193
*/
194194
static void notrace __restore_processor_state(struct saved_context *ctxt)
195195
{
196+
struct cpuinfo_x86 *c;
197+
196198
if (ctxt->misc_enable_saved)
197199
wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
198200
/*
@@ -263,6 +265,10 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
263265
mtrr_bp_restore();
264266
perf_restore_debug_store();
265267
msr_restore_context(ctxt);
268+
269+
c = &cpu_data(smp_processor_id());
270+
if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL))
271+
init_ia32_feat_ctl(c);
266272
}
267273

268274
/* Needed by apm.c */

0 commit comments

Comments
 (0)