Skip to content

Commit 6aa3baa

Browse files
swahlhpeKAGA-KOKO
authored andcommitted
x86/platform/uv: Remove uv bios and efi code related to EFI_UV1_MEMMAP
With UV1 removed, EFI_UV1_MEMMAP is not longer used. Remove the code used by it and the related code in EFI. Signed-off-by: Steve Wahl <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Reviewed-by: Ard Biesheuvel <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 66d67fe commit 6aa3baa

File tree

2 files changed

+2
-159
lines changed

2 files changed

+2
-159
lines changed

arch/x86/platform/efi/efi.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -496,7 +496,7 @@ void __init efi_init(void)
496496
efi_print_memmap();
497497
}
498498

499-
#if defined(CONFIG_X86_32) || defined(CONFIG_X86_UV)
499+
#if defined(CONFIG_X86_32)
500500

501501
void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
502502
{

arch/x86/platform/uv/bios_uv.c

Lines changed: 1 addition & 158 deletions
Original file line numberDiff line numberDiff line change
@@ -30,17 +30,7 @@ static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
3030
*/
3131
return BIOS_STATUS_UNIMPLEMENTED;
3232

33-
/*
34-
* If EFI_UV1_MEMMAP is set, we need to fall back to using our old EFI
35-
* callback method, which uses efi_call() directly, with the kernel page tables:
36-
*/
37-
if (unlikely(efi_enabled(EFI_UV1_MEMMAP))) {
38-
kernel_fpu_begin();
39-
ret = efi_call((void *)__va(tab->function), (u64)which, a1, a2, a3, a4, a5);
40-
kernel_fpu_end();
41-
} else {
42-
ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
43-
}
33+
ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
4434

4535
return ret;
4636
}
@@ -209,150 +199,3 @@ int uv_bios_init(void)
209199
pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision);
210200
return 0;
211201
}
212-
213-
static void __init early_code_mapping_set_exec(int executable)
214-
{
215-
efi_memory_desc_t *md;
216-
217-
if (!(__supported_pte_mask & _PAGE_NX))
218-
return;
219-
220-
/* Make EFI service code area executable */
221-
for_each_efi_memory_desc(md) {
222-
if (md->type == EFI_RUNTIME_SERVICES_CODE ||
223-
md->type == EFI_BOOT_SERVICES_CODE)
224-
efi_set_executable(md, executable);
225-
}
226-
}
227-
228-
void __init efi_uv1_memmap_phys_epilog(pgd_t *save_pgd)
229-
{
230-
/*
231-
* After the lock is released, the original page table is restored.
232-
*/
233-
int pgd_idx, i;
234-
int nr_pgds;
235-
pgd_t *pgd;
236-
p4d_t *p4d;
237-
pud_t *pud;
238-
239-
nr_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT) , PGDIR_SIZE);
240-
241-
for (pgd_idx = 0; pgd_idx < nr_pgds; pgd_idx++) {
242-
pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
243-
set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
244-
245-
if (!pgd_present(*pgd))
246-
continue;
247-
248-
for (i = 0; i < PTRS_PER_P4D; i++) {
249-
p4d = p4d_offset(pgd,
250-
pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
251-
252-
if (!p4d_present(*p4d))
253-
continue;
254-
255-
pud = (pud_t *)p4d_page_vaddr(*p4d);
256-
pud_free(&init_mm, pud);
257-
}
258-
259-
p4d = (p4d_t *)pgd_page_vaddr(*pgd);
260-
p4d_free(&init_mm, p4d);
261-
}
262-
263-
kfree(save_pgd);
264-
265-
__flush_tlb_all();
266-
early_code_mapping_set_exec(0);
267-
}
268-
269-
pgd_t * __init efi_uv1_memmap_phys_prolog(void)
270-
{
271-
unsigned long vaddr, addr_pgd, addr_p4d, addr_pud;
272-
pgd_t *save_pgd, *pgd_k, *pgd_efi;
273-
p4d_t *p4d, *p4d_k, *p4d_efi;
274-
pud_t *pud;
275-
276-
int pgd;
277-
int n_pgds, i, j;
278-
279-
early_code_mapping_set_exec(1);
280-
281-
n_pgds = DIV_ROUND_UP((max_pfn << PAGE_SHIFT), PGDIR_SIZE);
282-
save_pgd = kmalloc_array(n_pgds, sizeof(*save_pgd), GFP_KERNEL);
283-
if (!save_pgd)
284-
return NULL;
285-
286-
/*
287-
* Build 1:1 identity mapping for UV1 memmap usage. Note that
288-
* PAGE_OFFSET is PGDIR_SIZE aligned when KASLR is disabled, while
289-
* it is PUD_SIZE ALIGNED with KASLR enabled. So for a given physical
290-
* address X, the pud_index(X) != pud_index(__va(X)), we can only copy
291-
* PUD entry of __va(X) to fill in pud entry of X to build 1:1 mapping.
292-
* This means here we can only reuse the PMD tables of the direct mapping.
293-
*/
294-
for (pgd = 0; pgd < n_pgds; pgd++) {
295-
addr_pgd = (unsigned long)(pgd * PGDIR_SIZE);
296-
vaddr = (unsigned long)__va(pgd * PGDIR_SIZE);
297-
pgd_efi = pgd_offset_k(addr_pgd);
298-
save_pgd[pgd] = *pgd_efi;
299-
300-
p4d = p4d_alloc(&init_mm, pgd_efi, addr_pgd);
301-
if (!p4d) {
302-
pr_err("Failed to allocate p4d table!\n");
303-
goto out;
304-
}
305-
306-
for (i = 0; i < PTRS_PER_P4D; i++) {
307-
addr_p4d = addr_pgd + i * P4D_SIZE;
308-
p4d_efi = p4d + p4d_index(addr_p4d);
309-
310-
pud = pud_alloc(&init_mm, p4d_efi, addr_p4d);
311-
if (!pud) {
312-
pr_err("Failed to allocate pud table!\n");
313-
goto out;
314-
}
315-
316-
for (j = 0; j < PTRS_PER_PUD; j++) {
317-
addr_pud = addr_p4d + j * PUD_SIZE;
318-
319-
if (addr_pud > (max_pfn << PAGE_SHIFT))
320-
break;
321-
322-
vaddr = (unsigned long)__va(addr_pud);
323-
324-
pgd_k = pgd_offset_k(vaddr);
325-
p4d_k = p4d_offset(pgd_k, vaddr);
326-
pud[j] = *pud_offset(p4d_k, vaddr);
327-
}
328-
}
329-
pgd_offset_k(pgd * PGDIR_SIZE)->pgd &= ~_PAGE_NX;
330-
}
331-
332-
__flush_tlb_all();
333-
return save_pgd;
334-
out:
335-
efi_uv1_memmap_phys_epilog(save_pgd);
336-
return NULL;
337-
}
338-
339-
void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
340-
u32 type, u64 attribute)
341-
{
342-
unsigned long last_map_pfn;
343-
344-
if (type == EFI_MEMORY_MAPPED_IO)
345-
return ioremap(phys_addr, size);
346-
347-
last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size,
348-
PAGE_KERNEL);
349-
if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
350-
unsigned long top = last_map_pfn << PAGE_SHIFT;
351-
efi_ioremap(top, size - (top - phys_addr), type, attribute);
352-
}
353-
354-
if (!(attribute & EFI_MEMORY_WB))
355-
efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
356-
357-
return (void __iomem *)__va(phys_addr);
358-
}

0 commit comments

Comments
 (0)