Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,7 @@ xen/arch/x86/boot/*.bin
xen/arch/x86/boot/*.lnk
xen/arch/x86/efi.lds
xen/arch/x86/efi/check.efi
xen/arch/x86/efi/fixmlehdr
xen/arch/x86/efi/mkreloc
xen/arch/x86/include/asm/asm-macros.h
xen/arch/*/xen.lds
Expand Down
9 changes: 8 additions & 1 deletion xen/arch/x86/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,7 @@ extra-y += xen.lds

hostprogs-y += boot/mkelf32
hostprogs-y += efi/mkreloc
hostprogs-y += efi/fixmlehdr

# Allows usercopy.c to include itself
$(obj)/usercopy.o: CFLAGS-y += -iquote .
Expand Down Expand Up @@ -139,6 +140,10 @@ $(TARGET): $(TARGET)-syms $(efi-y) $(obj)/boot/mkelf32
CFLAGS-$(XEN_BUILD_EFI) += -DXEN_BUILD_EFI
CFLAGS-$(XEN_BUILD_EFI) += -ffile-prefix-map=$(XEN_ROOT)=.

ifeq ($(XEN_BUILD_EFI),y)
XEN_AFLAGS += -DXEN_BUILD_EFI
endif

$(TARGET)-syms: $(objtree)/prelink.o $(obj)/xen.lds
$(LD) $(XEN_LDFLAGS) -T $(obj)/xen.lds -N $< $(build_id_linker) \
$(objtree)/common/symbols-dummy.o -o $(@D)/.$(@F).0
Expand Down Expand Up @@ -208,7 +213,7 @@ note_file_option ?= $(note_file)

extra-$(XEN_BUILD_PE) += efi.lds
ifeq ($(XEN_BUILD_PE),y)
$(TARGET).efi: $(objtree)/prelink.o $(note_file) $(obj)/efi.lds $(obj)/efi/relocs-dummy.o $(obj)/efi/mkreloc
$(TARGET).efi: $(objtree)/prelink.o $(note_file) $(obj)/efi.lds $(obj)/efi/relocs-dummy.o $(obj)/efi/mkreloc $(obj)/efi/fixmlehdr
ifeq ($(CONFIG_DEBUG_INFO),y)
$(if $(filter --strip-debug,$(EFI_LDFLAGS)),echo,:) "Will strip debug info from $(@F)"
endif
Expand All @@ -228,6 +233,8 @@ endif
$(MAKE) $(build)=$(@D) .$(@F).1r.o .$(@F).1s.o
$(LD) $(call EFI_LDFLAGS,$(VIRT_BASE)) -T $(obj)/efi.lds -N $< \
$(@D)/.$(@F).1r.o $(@D)/.$(@F).1s.o $(orphan-handling-y) $(note_file_option) -o $@
# take image offset into account
$(obj)/efi/fixmlehdr $@ $(XEN_IMG_OFFSET)
$(NM) -pa --format=sysv $(@D)/$(@F) \
| $(objtree)/tools/symbols --all-symbols --xensyms --sysv --sort >$(@D)/$(@F).map
rm -f $(@D)/.$(@F).[0-9]* $(@D)/..$(@F).[0-9]*
Expand Down
126 changes: 125 additions & 1 deletion xen/arch/x86/boot/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -475,6 +475,13 @@ __pvh_start:
* which is supported by a given SINIT ACM
*/
slaunch_stub_entry:
#ifdef XEN_BUILD_EFI
mov %ebx, %esi
sub $sym_offs(slaunch_stub_entry), %esi
cmpb $0, sym_esi(slaunch_efi_boot)
jne slaunch_efi_jumpback
#endif

movl $SLAUNCH_BOOTLOADER_MAGIC,%eax

/* Fall through to Multiboot entry point. */
Expand Down Expand Up @@ -540,7 +547,6 @@ __start:
/* Push arguments to stack and call slaunch_early_tests(). */
push %esp /* pointer to output structure */
push %ebp /* Slaunch parameter on AMD */
push %ebx /* Multiboot parameter */
push $sym_offs(__2M_rwdata_end) /* end of target image */
push $sym_offs(_start) /* target base address */
push %esi /* load base address */
Expand Down Expand Up @@ -879,6 +885,124 @@ trampoline_setup:
/* Jump into the relocated trampoline. */
lret

#ifdef XEN_BUILD_EFI

/*
* The state matches that of slaunch_stub_entry above, but with %esi
* already initialized.
*/
slaunch_efi_jumpback:
lea STACK_SIZE - CPUINFO_sizeof + sym_esi(cpu0_stack), %esp

/* Prepare gdt and segments. */
add %esi, sym_esi(gdt_boot_base)
lgdt sym_esi(gdt_boot_descr)

mov $BOOT_DS, %ecx
mov %ecx, %ds
mov %ecx, %es
mov %ecx, %ss

push $BOOT_CS32
lea sym_esi(.Lgdt_is_set),%edx
push %edx
lret
.Lgdt_is_set:

/*
* Stash TSC as above because it was zeroed on jumping into bootloader
* to not interfere with measurements.
*/
rdtsc
mov %eax, sym_esi(boot_tsc_stamp)
mov %edx, 4 + sym_esi(boot_tsc_stamp)

/*
* Clear the pagetables before the use. We are loaded below 4GiB and
* this avoids the need for writing to higher dword of each entry.
* Additionally, this ensures those dwords are actually zero and the
* mappings aren't manipulated from outside.
*/
lea sym_esi(bootmap_start), %edi
lea sym_esi(bootmap_end), %ecx
sub %edi, %ecx
xor %eax, %eax
shr $2, %ecx
rep stosl

/* 1x L1 page, 512 entries mapping total of 2M. */
lea sym_esi(l1_bootmap), %edi
mov $512, %ecx
mov $(__PAGE_HYPERVISOR + 512 * PAGE_SIZE), %edx
.Lfill_l1_identmap:
sub $PAGE_SIZE, %edx
/* Loop runs for ecx=[512..1] for entries [511..0], hence -8. */
mov %edx, -8(%edi,%ecx,8)
loop .Lfill_l1_identmap

/* 4x L2 pages, each page mapping 1G of RAM. */
lea sym_esi(l2_bootmap), %edi
/* 1st entry points to L1. */
lea (sym_offs(l1_bootmap) + __PAGE_HYPERVISOR)(%esi), %edx
mov %edx, (%edi)
/* Other entries are 2MB pages. */
mov $(4 * 512 - 1), %ecx
/*
* Value below should be 4GB + flags, which wouldn't fit in 32b
* register. To avoid warning from the assembler, 4GB is skipped here.
* Substitution in first iteration makes the value roll over and point
* to 4GB - 2MB + flags.
*/
mov $(_PAGE_PSE + __PAGE_HYPERVISOR), %edx
.Lfill_l2_identmap:
sub $(1 << L2_PAGETABLE_SHIFT), %edx
/* Loop runs for ecx=[2047..1] for entries [2047..1]. */
mov %edx, (%edi,%ecx,8)
loop .Lfill_l2_identmap

/* 1x L3 page, mapping the 4x L2 pages. */
lea sym_esi(l3_bootmap), %edi
mov $4, %ecx
lea (sym_offs(l2_bootmap) + 4 * PAGE_SIZE + __PAGE_HYPERVISOR)(%esi), %edx
.Lfill_l3_identmap:
sub $PAGE_SIZE, %edx
/* Loop runs for ecx=[4..1] for entries [3..0], hence -8. */
mov %edx, -8(%edi,%ecx,8)
loop .Lfill_l3_identmap

/* 1x L4 page, mapping the L3 page. */
lea (sym_offs(l3_bootmap) + __PAGE_HYPERVISOR)(%esi), %edx
mov %edx, sym_esi(l4_bootmap)

/* Restore CR4, PAE must be enabled before IA-32e mode */
mov %cr4, %ecx
or $X86_CR4_PAE, %ecx
mov %ecx, %cr4

/* Load PML4 table location into PT base register */
lea sym_esi(l4_bootmap), %eax
mov %eax, %cr3

/* Enable IA-32e mode and paging */
mov $MSR_EFER, %ecx
rdmsr
or $EFER_LME >> 8, %ah
wrmsr

mov %cr0, %eax
or $X86_CR0_PG | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP, %eax
mov %eax, %cr0

/* Now in IA-32e compatibility mode, use lret to jump to 64b mode */
lea sym_esi(start_xen_from_efi), %ecx
push $BOOT_CS64
push %ecx
lret

.global start_xen_from_efi

#endif /* XEN_BUILD_EFI */

/*
* cmdline and reloc are written in C, and linked to be 32bit PIC with
* entrypoints at 0 and using the stdcall convention.
Expand Down
142 changes: 47 additions & 95 deletions xen/arch/x86/boot/slaunch_early.c
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,19 @@ asm (
#include "../include/asm/slaunch.h"
#include "../include/asm/x86-vendors.h"

/*
* The AMD-defined structure layout for the SLB. The last two fields are
* SL-specific.
*/
struct skinit_sl_header
{
uint16_t skl_entry_point;
uint16_t length;
uint8_t reserved[62];
uint16_t skl_info_offset;
uint16_t bootloader_data_offset;
} __packed;

struct early_tests_results
{
uint32_t mbi_pa;
Expand All @@ -47,124 +60,63 @@ static bool is_intel_cpu(void)
&& edx == X86_VENDOR_INTEL_EDX;
}

static void verify_pmr_ranges(struct txt_os_mle_data *os_mle,
struct txt_os_sinit_data *os_sinit,
uint32_t load_base_addr, uint32_t tgt_base_addr,
uint32_t xen_size)
{
int check_high_pmr = 0;

/* Verify the value of the low PMR base. It should always be 0. */
if (os_sinit->vtd_pmr_lo_base != 0)
txt_reset(SLAUNCH_ERROR_LO_PMR_BASE);

/*
* Low PMR size should not be 0 on current platforms. There is an ongoing
* transition to TPR-based DMA protection instead of PMR-based; this is not
* yet supported by the code.
*/
if (os_sinit->vtd_pmr_lo_size == 0)
txt_reset(SLAUNCH_ERROR_LO_PMR_BASE);

/* Check if regions overlap. Treat regions with no hole between as error. */
if (os_sinit->vtd_pmr_hi_size != 0 &&
os_sinit->vtd_pmr_hi_base <= os_sinit->vtd_pmr_lo_size)
txt_reset(SLAUNCH_ERROR_HI_PMR_BASE);

/* All regions accessed by 32b code must be below 4G. */
if (os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <= 0x100000000ull)
check_high_pmr = 1;

/*
* ACM checks that TXT heap and MLE memory is protected against DMA. We have
* to check if MBI and whole Xen memory is protected. The latter is done in
* case bootloader failed to set whole image as MLE and to make sure that
* both pre- and post-relocation code is protected.
*/

/* Check if all of Xen before relocation is covered by PMR. */
if (!is_in_pmr(os_sinit, load_base_addr, xen_size, check_high_pmr))
txt_reset(SLAUNCH_ERROR_LO_PMR_MLE);

/* Check if all of Xen after relocation is covered by PMR. */
if (load_base_addr != tgt_base_addr &&
!is_in_pmr(os_sinit, tgt_base_addr, xen_size, check_high_pmr))
txt_reset(SLAUNCH_ERROR_LO_PMR_MLE);

/* Check if MBI is covered by PMR. MBI starts with 'uint32_t total_size'. */
if (!is_in_pmr(os_sinit, os_mle->boot_params_addr,
*(uint32_t *)os_mle->boot_params_addr, check_high_pmr))
txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR);

/* Check if TPM event log (if present) is covered by PMR. */
/*
* FIXME: currently commented out as GRUB allocates it in a hole between
* PMR and reserved RAM, due to 2MB resolution of PMR. There are no other
* easy-to-use DMA protection mechanisms that would allow to protect that
* part of memory. TPR (TXT DMA Protection Range) gives 1MB resolution, but
* it still wouldn't be enough.
*
* One possible solution would be for GRUB to allocate log at lower address,
* but this would further increase memory space fragmentation. Another
* option is to align PMR up instead of down, making PMR cover part of
* reserved region, but it is unclear what the consequences may be.
*
* In tboot this issue was resolved by reserving leftover chunks of memory
* in e820 and/or UEFI memory map. This is also a valid solution, but would
* require more changes to GRUB than the ones listed above, as event log is
* allocated much earlier than PMRs.
*/
/*
if (os_mle->evtlog_addr != 0 && os_mle->evtlog_size != 0 &&
!is_in_pmr(os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size,
check_high_pmr))
txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR);
*/
}

void __stdcall slaunch_early_tests(uint32_t load_base_addr,
uint32_t tgt_base_addr,
uint32_t tgt_end_addr,
uint32_t multiboot_param,
uint32_t slaunch_param,
struct early_tests_results *result)
{
void *txt_heap;
struct txt_os_mle_data *os_mle;
struct slr_table *slrt;
struct txt_os_sinit_data *os_sinit;
struct slr_entry_intel_info *intel_info;
uint32_t size = tgt_end_addr - tgt_base_addr;

if ( !is_intel_cpu() )
{
/*
* Not an Intel CPU. Currently the only other option is AMD with SKINIT
* and secure-kernel-loader.
* and secure-kernel-loader (SKL).
*/
struct slr_entry_amd_info *amd_info;
const struct skinit_sl_header *sl_header = (void *)slaunch_param;

const uint16_t *sl_header = (void *)slaunch_param;
/* secure-kernel-loader passes MBI as a parameter for Multiboot
* kernel. */
result->mbi_pa = multiboot_param;
/* The forth 16-bit integer of SKL's header is an offset to
* bootloader's data, which is SLRT. */
result->slrt_pa = slaunch_param + sl_header[3];
return;
}
/*
* slaunch_param holds a physical address of SLB.
* Bootloader's data is SLRT.
*/
result->slrt_pa = slaunch_param + sl_header->bootloader_data_offset;
result->mbi_pa = 0;

/* Clear the TXT error registers for a clean start of day */
write_txt_reg(TXTCR_ERRORCODE, 0);
slrt = (struct slr_table *)result->slrt_pa;

txt_heap = _p(read_txt_reg(TXTCR_HEAP_BASE));
amd_info = (struct slr_entry_amd_info *)
slr_next_entry_by_tag (slrt, NULL, SLR_ENTRY_AMD_INFO);
/* Basic checks only, SKL checked and consumed the rest. */
if ( amd_info == NULL || amd_info->hdr.size != sizeof(*amd_info) )
return;

if (txt_os_mle_data_size(txt_heap) < sizeof(*os_mle) ||
txt_os_sinit_data_size(txt_heap) < sizeof(*os_sinit))
txt_reset(SLAUNCH_ERROR_GENERIC);
result->mbi_pa = amd_info->boot_params_base;
return;
}

txt_heap = txt_init();
os_mle = txt_os_mle_data_start(txt_heap);
os_sinit = txt_os_sinit_data_start(txt_heap);

verify_pmr_ranges(os_mle, os_sinit, load_base_addr, tgt_base_addr, size);

result->mbi_pa = os_mle->boot_params_addr;
result->slrt_pa = os_mle->slrt;
result->mbi_pa = 0;

slrt = (struct slr_table *)result->slrt_pa;

intel_info = (struct slr_entry_intel_info *)
slr_next_entry_by_tag (slrt, NULL, SLR_ENTRY_INTEL_INFO);
if ( intel_info == NULL || intel_info->hdr.size != sizeof(*intel_info) )
return;

result->mbi_pa = intel_info->boot_params_base;

txt_verify_pmr_ranges(os_mle, os_sinit, intel_info,
load_base_addr, tgt_base_addr, size);
}
14 changes: 11 additions & 3 deletions xen/arch/x86/boot/x86_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -229,14 +229,22 @@ GLOBAL(__page_tables_end)
.section .init.data, "aw", @progbits
.align PAGE_SIZE, 0

l1_bootmap:
bootmap_start:

l1_bootmap: /* 1x L1 page, mapping 2M of RAM. */
.fill L1_PAGETABLE_ENTRIES, 8, 0
.size l1_bootmap, . - l1_bootmap

GLOBAL(l2_bootmap)
GLOBAL(l2_bootmap) /* 4x L2 pages, each mapping 1G of RAM. */
.fill 4 * L2_PAGETABLE_ENTRIES, 8, 0
.size l2_bootmap, . - l2_bootmap

GLOBAL(l3_bootmap)
GLOBAL(l3_bootmap) /* 1x L3 page, mapping the 4x L2 pages. */
.fill L3_PAGETABLE_ENTRIES, 8, 0
.size l3_bootmap, . - l3_bootmap

l4_bootmap: /* 1x L4 page, mapping the L3 page. */
.fill L4_PAGETABLE_ENTRIES, 8, 0
.size l4_bootmap, . - l4_bootmap

bootmap_end:
Loading