Skip to content

Commit 7cd9a11

Browse files
rpptPeter Zijlstra
authored andcommitted
Revert "mm/execmem: Unify early execmem_cache behaviour"
The commit d6d1e3e ("mm/execmem: Unify early execmem_cache behaviour") changed early behaviour of execemem ROX cache to allow its usage in early x86 code that allocates text pages when CONFIG_MITGATION_ITS is enabled. The permission management of the pages allocated from execmem for ITS mitigation is now completely contained in arch/x86/kernel/alternatives.c and therefore there is no need to special case early allocations in execmem. This reverts commit d6d1e3e. Signed-off-by: Mike Rapoport (Microsoft) <[email protected]> Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Cc: [email protected] Link: https://lkml.kernel.org/r/[email protected]
1 parent a82b264 commit 7cd9a11

File tree

4 files changed

+4
-50
lines changed

4 files changed

+4
-50
lines changed

arch/x86/mm/init_32.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,6 @@
3030
#include <linux/initrd.h>
3131
#include <linux/cpumask.h>
3232
#include <linux/gfp.h>
33-
#include <linux/execmem.h>
3433

3534
#include <asm/asm.h>
3635
#include <asm/bios_ebda.h>
@@ -749,8 +748,6 @@ void mark_rodata_ro(void)
749748
pr_info("Write protecting kernel text and read-only data: %luk\n",
750749
size >> 10);
751750

752-
execmem_cache_make_ro();
753-
754751
kernel_set_to_readonly = 1;
755752

756753
#ifdef CONFIG_CPA_DEBUG

arch/x86/mm/init_64.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,6 @@
3434
#include <linux/gfp.h>
3535
#include <linux/kcore.h>
3636
#include <linux/bootmem_info.h>
37-
#include <linux/execmem.h>
3837

3938
#include <asm/processor.h>
4039
#include <asm/bios_ebda.h>
@@ -1392,8 +1391,6 @@ void mark_rodata_ro(void)
13921391
(end - start) >> 10);
13931392
set_memory_ro(start, (end - start) >> PAGE_SHIFT);
13941393

1395-
execmem_cache_make_ro();
1396-
13971394
kernel_set_to_readonly = 1;
13981395

13991396
/*

include/linux/execmem.h

Lines changed: 1 addition & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ enum execmem_range_flags {
5454
EXECMEM_ROX_CACHE = (1 << 1),
5555
};
5656

57-
#if defined(CONFIG_ARCH_HAS_EXECMEM_ROX) && defined(CONFIG_EXECMEM)
57+
#ifdef CONFIG_ARCH_HAS_EXECMEM_ROX
5858
/**
5959
* execmem_fill_trapping_insns - set memory to contain instructions that
6060
* will trap
@@ -94,15 +94,9 @@ int execmem_make_temp_rw(void *ptr, size_t size);
9494
* Return: 0 on success or negative error code on failure.
9595
*/
9696
int execmem_restore_rox(void *ptr, size_t size);
97-
98-
/*
99-
* Called from mark_readonly(), where the system transitions to ROX.
100-
*/
101-
void execmem_cache_make_ro(void);
10297
#else
10398
static inline int execmem_make_temp_rw(void *ptr, size_t size) { return 0; }
10499
static inline int execmem_restore_rox(void *ptr, size_t size) { return 0; }
105-
static inline void execmem_cache_make_ro(void) { }
106100
#endif
107101

108102
/**

mm/execmem.c

Lines changed: 3 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -254,34 +254,6 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
254254
return ptr;
255255
}
256256

257-
static bool execmem_cache_rox = false;
258-
259-
void execmem_cache_make_ro(void)
260-
{
261-
struct maple_tree *free_areas = &execmem_cache.free_areas;
262-
struct maple_tree *busy_areas = &execmem_cache.busy_areas;
263-
MA_STATE(mas_free, free_areas, 0, ULONG_MAX);
264-
MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX);
265-
struct mutex *mutex = &execmem_cache.mutex;
266-
void *area;
267-
268-
execmem_cache_rox = true;
269-
270-
mutex_lock(mutex);
271-
272-
mas_for_each(&mas_free, area, ULONG_MAX) {
273-
unsigned long pages = mas_range_len(&mas_free) >> PAGE_SHIFT;
274-
set_memory_ro(mas_free.index, pages);
275-
}
276-
277-
mas_for_each(&mas_busy, area, ULONG_MAX) {
278-
unsigned long pages = mas_range_len(&mas_busy) >> PAGE_SHIFT;
279-
set_memory_ro(mas_busy.index, pages);
280-
}
281-
282-
mutex_unlock(mutex);
283-
}
284-
285257
static int execmem_cache_populate(struct execmem_range *range, size_t size)
286258
{
287259
unsigned long vm_flags = VM_ALLOW_HUGE_VMAP;
@@ -302,15 +274,9 @@ static int execmem_cache_populate(struct execmem_range *range, size_t size)
302274
/* fill memory with instructions that will trap */
303275
execmem_fill_trapping_insns(p, alloc_size, /* writable = */ true);
304276

305-
if (execmem_cache_rox) {
306-
err = set_memory_rox((unsigned long)p, vm->nr_pages);
307-
if (err)
308-
goto err_free_mem;
309-
} else {
310-
err = set_memory_x((unsigned long)p, vm->nr_pages);
311-
if (err)
312-
goto err_free_mem;
313-
}
277+
err = set_memory_rox((unsigned long)p, vm->nr_pages);
278+
if (err)
279+
goto err_free_mem;
314280

315281
err = execmem_cache_add(p, alloc_size);
316282
if (err)

0 commit comments

Comments
 (0)