Skip to content

Commit e559489

Browse files
pillo79nashif
authored andcommitted
llext: stricter alignment checks for MPU/MMU regions
This patch moves the alignment checks for MPU and MMU regions to the beginning of the llext_copy_region() function. This is done to ensure that the correct region alignment and size are used even in the case where the region is reused from the ELF file buffer, avoiding MMU/MPU configuration issues. This also relaxes the same checks for regions that are accessed by the kernel only (e.g. symbol and string tables), which do not need special MMU/MPU treatment. This exposed an inconsistency in the MMU code, which was setting the permission on the correct regions, but later restoring the default permissions on every region, including the now-unaligned ones. Signed-off-by: Luca Burelli <[email protected]>
1 parent 9fa6a95 commit e559489

File tree

1 file changed

+28
-20
lines changed

1 file changed

+28
-20
lines changed

subsys/llext/llext_mem.c

Lines changed: 28 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,30 @@ static int llext_copy_region(struct llext_loader *ldr, struct llext *ext,
7171
}
7272
ext->mem_size[mem_idx] = region_alloc;
7373

74+
/*
75+
* Calculate the minimum region size and alignment that can satisfy
76+
* MMU/MPU requirements. This only applies to regions that contain
77+
* program-accessible data (not to string tables, for example).
78+
*/
79+
if (region->sh_flags & SHF_ALLOC) {
80+
if (IS_ENABLED(CONFIG_ARM_MPU)) {
81+
/* On ARM with an MPU, regions must be sized and
82+
* aligned to the same power of two (larger than 32).
83+
*/
84+
uintptr_t block_sz = MAX(MAX(region_alloc, region_align), LLEXT_PAGE_SIZE);
85+
86+
block_sz = 1 << LOG2CEIL(block_sz); /* align to next power of two */
87+
region_alloc = block_sz;
88+
region_align = block_sz;
89+
} else if (IS_ENABLED(CONFIG_MMU)) {
90+
/* MMU targets map memory in page-sized chunks. Round
91+
* the region to multiples of those.
92+
*/
93+
region_alloc = ROUND_UP(region_alloc, LLEXT_PAGE_SIZE);
94+
region_align = MAX(region_align, LLEXT_PAGE_SIZE);
95+
}
96+
}
97+
7498
if (ldr->storage == LLEXT_STORAGE_WRITABLE) {
7599
/*
76100
* Try to reuse data areas from the ELF buffer, if possible.
@@ -114,24 +138,7 @@ static int llext_copy_region(struct llext_loader *ldr, struct llext *ext,
114138
return -EFAULT;
115139
}
116140

117-
/*
118-
* Calculate the desired region size and alignment for a new allocation.
119-
*/
120-
if (IS_ENABLED(CONFIG_ARM_MPU)) {
121-
/* On ARM with an MPU, regions must be sized and aligned to the same
122-
* power of two (larger than 32).
123-
*/
124-
uintptr_t block_size = MAX(MAX(region_alloc, region_align), LLEXT_PAGE_SIZE);
125-
126-
block_size = 1 << LOG2CEIL(block_size); /* align to next power of two */
127-
region_alloc = block_size;
128-
region_align = block_size;
129-
} else {
130-
/* Otherwise, round the region to multiples of LLEXT_PAGE_SIZE. */
131-
region_alloc = ROUND_UP(region_alloc, LLEXT_PAGE_SIZE);
132-
region_align = MAX(region_align, LLEXT_PAGE_SIZE);
133-
}
134-
141+
/* Allocate a suitably aligned area for the region. */
135142
ext->mem[mem_idx] = llext_aligned_alloc(region_align, region_alloc);
136143
if (!ext->mem[mem_idx]) {
137144
LOG_ERR("Failed allocating %zd bytes %zd-aligned for region %d",
@@ -269,8 +276,9 @@ void llext_free_regions(struct llext *ext)
269276
{
270277
for (int i = 0; i < LLEXT_MEM_COUNT; i++) {
271278
#ifdef CONFIG_MMU
272-
if (ext->mmu_permissions_set && ext->mem_size[i] != 0) {
273-
/* restore default RAM permissions */
279+
if (ext->mmu_permissions_set && ext->mem_size[i] != 0 &&
280+
(i == LLEXT_MEM_TEXT || i == LLEXT_MEM_RODATA)) {
281+
/* restore default RAM permissions of changed regions */
274282
k_mem_update_flags(ext->mem[i],
275283
ROUND_UP(ext->mem_size[i], LLEXT_PAGE_SIZE),
276284
K_MEM_PERM_RW);

0 commit comments

Comments
 (0)