Skip to content

Commit 9a39abb

Browse files
Vasily Gorbikhcahca
authored andcommitted
s390/boot: simplify and fix kernel memory layout setup
Initial KASAN shadow memory range was picked to preserve original kernel modules area position. With protected execution support, which might impose addressing limitation on vmalloc area and hence affect modules area position, current fixed KASAN shadow memory range is only making kernel memory layout setup more complex. So move it to the very end of available virtual space and simplify calculations. At the same time return to previous kernel address space split. In particular commit 0c4f262 ("s390: setup kernel memory layout early") introduced precise identity map size calculation and keeping vmemmap left most starting from a fresh region table entry. This didn't take into account additional mapping region requirement for potential DCSS mapping above available physical memory. So go back to virtual space split between 1:1 mapping & vmemmap array once vmalloc area size is subtracted. Cc: [email protected] Fixes: 0c4f262 ("s390: setup kernel memory layout early") Reported-by: Gerald Schaefer <[email protected]> Reviewed-by: Heiko Carstens <[email protected]> Reviewed-by: Alexander Gordeev <[email protected]> Signed-off-by: Vasily Gorbik <[email protected]> Signed-off-by: Heiko Carstens <[email protected]>
1 parent 6ad5f02 commit 9a39abb

File tree

2 files changed

+32
-58
lines changed

2 files changed

+32
-58
lines changed

arch/s390/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ config ARCH_SUPPORTS_UPROBES
4747
config KASAN_SHADOW_OFFSET
4848
hex
4949
depends on KASAN
50-
default 0x18000000000000
50+
default 0x1C000000000000
5151

5252
config S390
5353
def_bool y

arch/s390/boot/startup.c

Lines changed: 31 additions & 57 deletions
Original file line numberDiff line numberDiff line change
@@ -149,82 +149,56 @@ static void setup_ident_map_size(unsigned long max_physmem_end)
149149

150150
static void setup_kernel_memory_layout(void)
151151
{
152-
bool vmalloc_size_verified = false;
153-
unsigned long vmemmap_off;
154-
unsigned long vspace_left;
152+
unsigned long vmemmap_start;
155153
unsigned long rte_size;
156154
unsigned long pages;
157-
unsigned long vmax;
158155

159156
pages = ident_map_size / PAGE_SIZE;
160157
/* vmemmap contains a multiple of PAGES_PER_SECTION struct pages */
161158
vmemmap_size = SECTION_ALIGN_UP(pages) * sizeof(struct page);
162159

163160
/* choose kernel address space layout: 4 or 3 levels. */
164-
vmemmap_off = round_up(ident_map_size, _REGION3_SIZE);
161+
vmemmap_start = round_up(ident_map_size, _REGION3_SIZE);
165162
if (IS_ENABLED(CONFIG_KASAN) ||
166163
vmalloc_size > _REGION2_SIZE ||
167-
vmemmap_off + vmemmap_size + vmalloc_size + MODULES_LEN > _REGION2_SIZE)
168-
vmax = _REGION1_SIZE;
169-
else
170-
vmax = _REGION2_SIZE;
171-
172-
/* keep vmemmap_off aligned to a top level region table entry */
173-
rte_size = vmax == _REGION1_SIZE ? _REGION2_SIZE : _REGION3_SIZE;
174-
MODULES_END = vmax;
175-
if (is_prot_virt_host()) {
176-
/*
177-
* forcing modules and vmalloc area under the ultravisor
178-
* secure storage limit, so that any vmalloc allocation
179-
* we do could be used to back secure guest storage.
180-
*/
181-
adjust_to_uv_max(&MODULES_END);
182-
}
183-
184-
#ifdef CONFIG_KASAN
185-
if (MODULES_END < vmax) {
186-
/* force vmalloc and modules below kasan shadow */
187-
MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
164+
vmemmap_start + vmemmap_size + vmalloc_size + MODULES_LEN >
165+
_REGION2_SIZE) {
166+
MODULES_END = _REGION1_SIZE;
167+
rte_size = _REGION2_SIZE;
188168
} else {
189-
/*
190-
* leave vmalloc and modules above kasan shadow but make
191-
* sure they don't overlap with it
192-
*/
193-
vmalloc_size = min(vmalloc_size, vmax - KASAN_SHADOW_END - MODULES_LEN);
194-
vmalloc_size_verified = true;
195-
vspace_left = KASAN_SHADOW_START;
169+
MODULES_END = _REGION2_SIZE;
170+
rte_size = _REGION3_SIZE;
196171
}
172+
/*
173+
* forcing modules and vmalloc area under the ultravisor
174+
* secure storage limit, so that any vmalloc allocation
175+
* we do could be used to back secure guest storage.
176+
*/
177+
adjust_to_uv_max(&MODULES_END);
178+
#ifdef CONFIG_KASAN
179+
/* force vmalloc and modules below kasan shadow */
180+
MODULES_END = min(MODULES_END, KASAN_SHADOW_START);
197181
#endif
198182
MODULES_VADDR = MODULES_END - MODULES_LEN;
199183
VMALLOC_END = MODULES_VADDR;
200184

201-
if (vmalloc_size_verified) {
202-
VMALLOC_START = VMALLOC_END - vmalloc_size;
203-
} else {
204-
vmemmap_off = round_up(ident_map_size, rte_size);
205-
206-
if (vmemmap_off + vmemmap_size > VMALLOC_END ||
207-
vmalloc_size > VMALLOC_END - vmemmap_off - vmemmap_size) {
208-
/*
209-
* allow vmalloc area to occupy up to 1/2 of
210-
* the rest virtual space left.
211-
*/
212-
vmalloc_size = min(vmalloc_size, VMALLOC_END / 2);
213-
}
214-
VMALLOC_START = VMALLOC_END - vmalloc_size;
215-
vspace_left = VMALLOC_START;
216-
}
185+
/* allow vmalloc area to occupy up to about 1/2 of the rest virtual space left */
186+
vmalloc_size = min(vmalloc_size, round_down(VMALLOC_END / 2, _REGION3_SIZE));
187+
VMALLOC_START = VMALLOC_END - vmalloc_size;
217188

218-
pages = vspace_left / (PAGE_SIZE + sizeof(struct page));
189+
/* split remaining virtual space between 1:1 mapping & vmemmap array */
190+
pages = VMALLOC_START / (PAGE_SIZE + sizeof(struct page));
219191
pages = SECTION_ALIGN_UP(pages);
220-
vmemmap_off = round_up(vspace_left - pages * sizeof(struct page), rte_size);
221-
/* keep vmemmap left most starting from a fresh region table entry */
222-
vmemmap_off = min(vmemmap_off, round_up(ident_map_size, rte_size));
223-
/* take care that identity map is lower then vmemmap */
224-
ident_map_size = min(ident_map_size, vmemmap_off);
192+
/* keep vmemmap_start aligned to a top level region table entry */
193+
vmemmap_start = round_down(VMALLOC_START - pages * sizeof(struct page), rte_size);
194+
/* vmemmap_start is the future VMEM_MAX_PHYS, make sure it is within MAX_PHYSMEM */
195+
vmemmap_start = min(vmemmap_start, 1UL << MAX_PHYSMEM_BITS);
196+
/* make sure identity map doesn't overlay with vmemmap */
197+
ident_map_size = min(ident_map_size, vmemmap_start);
225198
vmemmap_size = SECTION_ALIGN_UP(ident_map_size / PAGE_SIZE) * sizeof(struct page);
226-
VMALLOC_START = max(vmemmap_off + vmemmap_size, VMALLOC_START);
227-
vmemmap = (struct page *)vmemmap_off;
199+
/* make sure vmemmap doesn't overlay with vmalloc area */
200+
VMALLOC_START = max(vmemmap_start + vmemmap_size, VMALLOC_START);
201+
vmemmap = (struct page *)vmemmap_start;
228202
}
229203

230204
/*

0 commit comments

Comments
 (0)