Skip to content

Commit 6173d4e

Browse files
committed
Merge tag 'v6.1.119' into 6.1-main
This is the 6.1.119 stable release # -----BEGIN PGP SIGNATURE----- # # iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmdAlzgACgkQONu9yGCS # aT42uQ//dtRiaR95GtZ741kjEp3bGCYbSvhQsnCF2O5Ft1ajd7zenog2oZ83Xvnk # JGZwtRvLrBvYekBFHiftF0i0vCxmLPQVx+GUIqCFU/ZZdITw+n99DH/zfl9soXmh # 0DpZqNos0TkBdPmw0vYB+mIibAjG5brMwfdrXy5U0T1lrw1EeaUxat05fMBiF5XO # vSK450RgzGa7h+8jUyK4eR7P+aUjNoUl3XZstFK1YzbVznXomvmXGFB0Xt3KIFEZ # 5waNFJnYkkB/W9IgrcXNtskPUwh3wJ0RltavQcwnCIeRUCousW+tFPn9OGHbOC9V # VJGiIuzGIQYEY71Ds7O6CD/3EXWs3fn1qJUU40IvJJvhh5z8G3zJWGXolaIJqLzW # EkfzXF+mtMzjPm+IR2UrGtVMvDEOi7CdBVGDUrxqRiSrZvBKgu4zbF1ZiDI49hcP # GuYgcPbEF28W4DoABhbzDMvIkyuyFvBInFHnI04KFlHsGMx14Y6FaMtoOz8g8z+0 # NX43nyL2JiWsSSmiViRJlP2cIdqjZb+c0CdunTo6w/Ho4rnRbaiL3Mggt70CBMCM # /yUCLNcx0c/sEwb0YF+JNAjqGmhVm+aUZ3CRqjPSN44k4wu6TaokhWnaB9g83KP8 # BvpGK0LWnS2whCHS0SkM96Pp5QYXDoQnRJmw/xo+oYwu4/+dK2s= # =No10 # -----END PGP SIGNATURE----- # gpg: Signature made Fri Nov 22 15:37:44 2024 CET # gpg: using RSA key 647F28654894E3BD457199BE38DBBDC86092693E # gpg: Can't check signature: No public key
2 parents 3427489 + e4d90d6 commit 6173d4e

File tree

72 files changed

+759
-583
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

72 files changed

+759
-583
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
# SPDX-License-Identifier: GPL-2.0
22
VERSION = 6
33
PATCHLEVEL = 1
4-
SUBLEVEL = 118
4+
SUBLEVEL = 119
55
EXTRAVERSION =
66
NAME = Curry Ramen
77

arch/arm/kernel/head.S

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -252,27 +252,31 @@ __create_page_tables:
252252
*/
253253
add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER)
254254
ldr r6, =(_end - 1)
255+
256+
/* For XIP, kernel_sec_start/kernel_sec_end are currently in RO memory */
257+
#ifndef CONFIG_XIP_KERNEL
255258
adr_l r5, kernel_sec_start @ _pa(kernel_sec_start)
256259
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
257260
str r8, [r5, #4] @ Save physical start of kernel (BE)
258261
#else
259262
str r8, [r5] @ Save physical start of kernel (LE)
263+
#endif
260264
#endif
261265
orr r3, r8, r7 @ Add the MMU flags
262266
add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER)
263267
1: str r3, [r0], #1 << PMD_ENTRY_ORDER
264268
add r3, r3, #1 << SECTION_SHIFT
265269
cmp r0, r6
266270
bls 1b
271+
#ifndef CONFIG_XIP_KERNEL
267272
eor r3, r3, r7 @ Remove the MMU flags
268273
adr_l r5, kernel_sec_end @ _pa(kernel_sec_end)
269274
#if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32
270275
str r3, [r5, #4] @ Save physical end of kernel (BE)
271276
#else
272277
str r3, [r5] @ Save physical end of kernel (LE)
273278
#endif
274-
275-
#ifdef CONFIG_XIP_KERNEL
279+
#else
276280
/*
277281
* Map the kernel image separately as it is not located in RAM.
278282
*/

arch/arm/mm/mmu.c

Lines changed: 21 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -1401,18 +1401,6 @@ static void __init devicemaps_init(const struct machine_desc *mdesc)
14011401
create_mapping(&map);
14021402
}
14031403

1404-
/*
1405-
* Map the kernel if it is XIP.
1406-
* It is always first in the modulearea.
1407-
*/
1408-
#ifdef CONFIG_XIP_KERNEL
1409-
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
1410-
map.virtual = MODULES_VADDR;
1411-
map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
1412-
map.type = MT_ROM;
1413-
create_mapping(&map);
1414-
#endif
1415-
14161404
/*
14171405
* Map the cache flushing regions.
14181406
*/
@@ -1602,12 +1590,27 @@ static void __init map_kernel(void)
16021590
* This will only persist until we turn on proper memory management later on
16031591
* and we remap the whole kernel with page granularity.
16041592
*/
1593+
#ifdef CONFIG_XIP_KERNEL
1594+
phys_addr_t kernel_nx_start = kernel_sec_start;
1595+
#else
16051596
phys_addr_t kernel_x_start = kernel_sec_start;
16061597
phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
16071598
phys_addr_t kernel_nx_start = kernel_x_end;
1599+
#endif
16081600
phys_addr_t kernel_nx_end = kernel_sec_end;
16091601
struct map_desc map;
16101602

1603+
/*
1604+
* Map the kernel if it is XIP.
1605+
* It is always first in the modulearea.
1606+
*/
1607+
#ifdef CONFIG_XIP_KERNEL
1608+
map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
1609+
map.virtual = MODULES_VADDR;
1610+
map.length = ((unsigned long)_exiprom - map.virtual + ~SECTION_MASK) & SECTION_MASK;
1611+
map.type = MT_ROM;
1612+
create_mapping(&map);
1613+
#else
16111614
map.pfn = __phys_to_pfn(kernel_x_start);
16121615
map.virtual = __phys_to_virt(kernel_x_start);
16131616
map.length = kernel_x_end - kernel_x_start;
@@ -1617,7 +1620,7 @@ static void __init map_kernel(void)
16171620
/* If the nx part is small it may end up covered by the tail of the RWX section */
16181621
if (kernel_x_end == kernel_nx_end)
16191622
return;
1620-
1623+
#endif
16211624
map.pfn = __phys_to_pfn(kernel_nx_start);
16221625
map.virtual = __phys_to_virt(kernel_nx_start);
16231626
map.length = kernel_nx_end - kernel_nx_start;
@@ -1762,6 +1765,11 @@ void __init paging_init(const struct machine_desc *mdesc)
17621765
{
17631766
void *zero_page;
17641767

1768+
#ifdef CONFIG_XIP_KERNEL
1769+
/* Store the kernel RW RAM region start/end in these variables */
1770+
kernel_sec_start = CONFIG_PHYS_OFFSET & SECTION_MASK;
1771+
kernel_sec_end = round_up(__pa(_end), SECTION_SIZE);
1772+
#endif
17651773
pr_debug("physical kernel sections: 0x%08llx-0x%08llx\n",
17661774
kernel_sec_start, kernel_sec_end);
17671775

arch/arm64/include/asm/mman.h

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,8 @@
33
#define __ASM_MMAN_H__
44

55
#include <linux/compiler.h>
6+
#include <linux/fs.h>
7+
#include <linux/shmem_fs.h>
68
#include <linux/types.h>
79
#include <uapi/asm/mman.h>
810

@@ -21,19 +23,21 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot,
2123
}
2224
#define arch_calc_vm_prot_bits(prot, pkey) arch_calc_vm_prot_bits(prot, pkey)
2325

24-
static inline unsigned long arch_calc_vm_flag_bits(unsigned long flags)
26+
static inline unsigned long arch_calc_vm_flag_bits(struct file *file,
27+
unsigned long flags)
2528
{
2629
/*
2730
* Only allow MTE on anonymous mappings as these are guaranteed to be
2831
* backed by tags-capable memory. The vm_flags may be overridden by a
2932
* filesystem supporting MTE (RAM-based).
3033
*/
31-
if (system_supports_mte() && (flags & MAP_ANONYMOUS))
34+
if (system_supports_mte() &&
35+
((flags & MAP_ANONYMOUS) || shmem_file(file)))
3236
return VM_MTE_ALLOWED;
3337

3438
return 0;
3539
}
36-
#define arch_calc_vm_flag_bits(flags) arch_calc_vm_flag_bits(flags)
40+
#define arch_calc_vm_flag_bits(file, flags) arch_calc_vm_flag_bits(file, flags)
3741

3842
static inline bool arch_validate_prot(unsigned long prot,
3943
unsigned long addr __always_unused)

arch/parisc/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ config PARISC
1818
select ARCH_SUPPORTS_HUGETLBFS if PA20
1919
select ARCH_SUPPORTS_MEMORY_FAILURE
2020
select ARCH_STACKWALK
21+
select ARCH_HAS_CACHE_LINE_SIZE
2122
select ARCH_HAS_DEBUG_VM_PGTABLE
2223
select HAVE_RELIABLE_STACKTRACE
2324
select DMA_OPS

arch/parisc/include/asm/cache.h

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,16 @@
2020

2121
#define SMP_CACHE_BYTES L1_CACHE_BYTES
2222

23-
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
23+
#ifdef CONFIG_PA20
24+
#define ARCH_DMA_MINALIGN 128
25+
#else
26+
#define ARCH_DMA_MINALIGN 32
27+
#endif
28+
#define ARCH_KMALLOC_MINALIGN 16 /* ldcw requires 16-byte alignment */
29+
30+
#define arch_slab_minalign() ((unsigned)dcache_stride)
31+
#define cache_line_size() dcache_stride
32+
#define dma_get_cache_alignment cache_line_size
2433

2534
#define __read_mostly __section(".data..read_mostly")
2635

arch/x86/kvm/lapic.c

Lines changed: 18 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -2453,19 +2453,26 @@ void kvm_apic_update_apicv(struct kvm_vcpu *vcpu)
24532453
{
24542454
struct kvm_lapic *apic = vcpu->arch.apic;
24552455

2456-
if (apic->apicv_active) {
2457-
/* irr_pending is always true when apicv is activated. */
2458-
apic->irr_pending = true;
2456+
/*
2457+
* When APICv is enabled, KVM must always search the IRR for a pending
2458+
* IRQ, as other vCPUs and devices can set IRR bits even if the vCPU
2459+
* isn't running. If APICv is disabled, KVM _should_ search the IRR
2460+
* for a pending IRQ. But KVM currently doesn't ensure *all* hardware,
2461+
* e.g. CPUs and IOMMUs, has seen the change in state, i.e. searching
2462+
* the IRR at this time could race with IRQ delivery from hardware that
2463+
* still sees APICv as being enabled.
2464+
*
2465+
* FIXME: Ensure other vCPUs and devices observe the change in APICv
2466+
* state prior to updating KVM's metadata caches, so that KVM
2467+
* can safely search the IRR and set irr_pending accordingly.
2468+
*/
2469+
apic->irr_pending = true;
2470+
2471+
if (apic->apicv_active)
24592472
apic->isr_count = 1;
2460-
} else {
2461-
/*
2462-
* Don't clear irr_pending, searching the IRR can race with
2463-
* updates from the CPU as APICv is still active from hardware's
2464-
* perspective. The flag will be cleared as appropriate when
2465-
* KVM injects the interrupt.
2466-
*/
2473+
else
24672474
apic->isr_count = count_vectors(apic->regs + APIC_ISR);
2468-
}
2475+
24692476
apic->highest_isr_cache = -1;
24702477
}
24712478
EXPORT_SYMBOL_GPL(kvm_apic_update_apicv);

arch/x86/kvm/vmx/nested.c

Lines changed: 25 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1126,11 +1126,14 @@ static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
11261126
struct vcpu_vmx *vmx = to_vmx(vcpu);
11271127

11281128
/*
1129-
* If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
1130-
* for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
1131-
* full TLB flush from the guest's perspective. This is required even
1132-
* if VPID is disabled in the host as KVM may need to synchronize the
1133-
* MMU in response to the guest TLB flush.
1129+
* If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the
1130+
* same VPID as the host, and so architecturally, linear and combined
1131+
* mappings for VPID=0 must be flushed at VM-Enter and VM-Exit. KVM
1132+
* emulates L2 sharing L1's VPID=0 by using vpid01 while running L2,
1133+
* and so KVM must also emulate TLB flush of VPID=0, i.e. vpid01. This
1134+
* is required if VPID is disabled in KVM, as a TLB flush (there are no
1135+
* VPIDs) still occurs from L1's perspective, and KVM may need to
1136+
* synchronize the MMU in response to the guest TLB flush.
11341137
*
11351138
* Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
11361139
* EPT is a special snowflake, as guest-physical mappings aren't
@@ -2196,6 +2199,17 @@ static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
21962199

21972200
vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
21982201

2202+
/*
2203+
* If VPID is disabled, then guest TLB accesses use VPID=0, i.e. the
2204+
* same VPID as the host. Emulate this behavior by using vpid01 for L2
2205+
* if VPID is disabled in vmcs12. Note, if VPID is disabled, VM-Enter
2206+
* and VM-Exit are architecturally required to flush VPID=0, but *only*
2207+
* VPID=0. I.e. using vpid02 would be ok (so long as KVM emulates the
2208+
* required flushes), but doing so would cause KVM to over-flush. E.g.
2209+
* if L1 runs L2 X with VPID12=1, then runs L2 Y with VPID12 disabled,
2210+
* and then runs L2 X again, then KVM can and should retain TLB entries
2211+
* for VPID12=1.
2212+
*/
21992213
if (enable_vpid) {
22002214
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
22012215
vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
@@ -5758,6 +5772,12 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
57585772
return nested_vmx_fail(vcpu,
57595773
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
57605774

5775+
/*
5776+
* Always flush the effective vpid02, i.e. never flush the current VPID
5777+
* and never explicitly flush vpid01. INVVPID targets a VPID, not a
5778+
* VMCS, and so whether or not the current vmcs12 has VPID enabled is
5779+
* irrelevant (and there may not be a loaded vmcs12).
5780+
*/
57615781
vpid02 = nested_get_vpid02(vcpu);
57625782
switch (type) {
57635783
case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:

arch/x86/kvm/vmx/vmx.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -209,9 +209,11 @@ module_param(ple_window_shrink, uint, 0444);
209209
static unsigned int ple_window_max = KVM_VMX_DEFAULT_PLE_WINDOW_MAX;
210210
module_param(ple_window_max, uint, 0444);
211211

212-
/* Default is SYSTEM mode, 1 for host-guest mode */
212+
/* Default is SYSTEM mode, 1 for host-guest mode (which is BROKEN) */
213213
int __read_mostly pt_mode = PT_MODE_SYSTEM;
214+
#ifdef CONFIG_BROKEN
214215
module_param(pt_mode, int, S_IRUGO);
216+
#endif
215217

216218
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
217219
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_flush_cond);
@@ -3098,7 +3100,7 @@ static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
30983100

30993101
static inline int vmx_get_current_vpid(struct kvm_vcpu *vcpu)
31003102
{
3101-
if (is_guest_mode(vcpu))
3103+
if (is_guest_mode(vcpu) && nested_cpu_has_vpid(get_vmcs12(vcpu)))
31023104
return nested_get_vpid02(vcpu);
31033105
return to_vmx(vcpu)->vpid;
31043106
}

arch/x86/mm/ioremap.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -650,7 +650,8 @@ static bool memremap_is_setup_data(resource_size_t phys_addr,
650650
paddr_next = data->next;
651651
len = data->len;
652652

653-
if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
653+
if ((phys_addr > paddr) &&
654+
(phys_addr < (paddr + sizeof(struct setup_data) + len))) {
654655
memunmap(data);
655656
return true;
656657
}
@@ -712,7 +713,8 @@ static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
712713
paddr_next = data->next;
713714
len = data->len;
714715

715-
if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
716+
if ((phys_addr > paddr) &&
717+
(phys_addr < (paddr + sizeof(struct setup_data) + len))) {
716718
early_memunmap(data, sizeof(*data));
717719
return true;
718720
}

0 commit comments

Comments
 (0)