diff --git a/.gitignore b/.gitignore index ea3243af9d..b344fd7b01 100644 --- a/.gitignore +++ b/.gitignore @@ -270,6 +270,7 @@ xen/arch/x86/boot/*.bin xen/arch/x86/boot/*.lnk xen/arch/x86/efi.lds xen/arch/x86/efi/check.efi +xen/arch/x86/efi/fixmlehdr xen/arch/x86/efi/mkreloc xen/arch/x86/include/asm/asm-macros.h xen/arch/*/xen.lds diff --git a/xen/arch/x86/Makefile b/xen/arch/x86/Makefile index 884929e051..ce2e80cc80 100644 --- a/xen/arch/x86/Makefile +++ b/xen/arch/x86/Makefile @@ -89,6 +89,7 @@ extra-y += xen.lds hostprogs-y += boot/mkelf32 hostprogs-y += efi/mkreloc +hostprogs-y += efi/fixmlehdr # Allows usercopy.c to include itself $(obj)/usercopy.o: CFLAGS-y += -iquote . @@ -139,6 +140,10 @@ $(TARGET): $(TARGET)-syms $(efi-y) $(obj)/boot/mkelf32 CFLAGS-$(XEN_BUILD_EFI) += -DXEN_BUILD_EFI CFLAGS-$(XEN_BUILD_EFI) += -ffile-prefix-map=$(XEN_ROOT)=. +ifeq ($(XEN_BUILD_EFI),y) +XEN_AFLAGS += -DXEN_BUILD_EFI +endif + $(TARGET)-syms: $(objtree)/prelink.o $(obj)/xen.lds $(LD) $(XEN_LDFLAGS) -T $(obj)/xen.lds -N $< $(build_id_linker) \ $(objtree)/common/symbols-dummy.o -o $(@D)/.$(@F).0 @@ -208,7 +213,7 @@ note_file_option ?= $(note_file) extra-$(XEN_BUILD_PE) += efi.lds ifeq ($(XEN_BUILD_PE),y) -$(TARGET).efi: $(objtree)/prelink.o $(note_file) $(obj)/efi.lds $(obj)/efi/relocs-dummy.o $(obj)/efi/mkreloc +$(TARGET).efi: $(objtree)/prelink.o $(note_file) $(obj)/efi.lds $(obj)/efi/relocs-dummy.o $(obj)/efi/mkreloc $(obj)/efi/fixmlehdr ifeq ($(CONFIG_DEBUG_INFO),y) $(if $(filter --strip-debug,$(EFI_LDFLAGS)),echo,:) "Will strip debug info from $(@F)" endif @@ -228,6 +233,8 @@ endif $(MAKE) $(build)=$(@D) .$(@F).1r.o .$(@F).1s.o $(LD) $(call EFI_LDFLAGS,$(VIRT_BASE)) -T $(obj)/efi.lds -N $< \ $(@D)/.$(@F).1r.o $(@D)/.$(@F).1s.o $(orphan-handling-y) $(note_file_option) -o $@ + # take image offset into account + $(obj)/efi/fixmlehdr $@ $(XEN_IMG_OFFSET) $(NM) -pa --format=sysv $(@D)/$(@F) \ | $(objtree)/tools/symbols --all-symbols --xensyms --sysv --sort >$(@D)/$(@F).map rm -f $(@D)/.$(@F).[0-9]* $(@D)/..$(@F).[0-9]* diff --git a/xen/arch/x86/boot/head.S b/xen/arch/x86/boot/head.S index ab33c7826c..925d89b313 100644 --- a/xen/arch/x86/boot/head.S +++ b/xen/arch/x86/boot/head.S @@ -475,6 +475,13 @@ __pvh_start: * which is supported by a given SINIT ACM */ slaunch_stub_entry: +#ifdef XEN_BUILD_EFI + mov %ebx, %esi + sub $sym_offs(slaunch_stub_entry), %esi + cmpb $0, sym_esi(slaunch_efi_boot) + jne slaunch_efi_jumpback +#endif + movl $SLAUNCH_BOOTLOADER_MAGIC,%eax /* Fall through to Multiboot entry point. */ @@ -540,7 +547,6 @@ __start: /* Push arguments to stack and call slaunch_early_tests(). */ push %esp /* pointer to output structure */ push %ebp /* Slaunch parameter on AMD */ - push %ebx /* Multiboot parameter */ push $sym_offs(__2M_rwdata_end) /* end of target image */ push $sym_offs(_start) /* target base address */ push %esi /* load base address */ @@ -879,6 +885,124 @@ trampoline_setup: /* Jump into the relocated trampoline. */ lret +#ifdef XEN_BUILD_EFI + + /* + * The state matches that of slaunch_stub_entry above, but with %esi + * already initialized. + */ +slaunch_efi_jumpback: + lea STACK_SIZE - CPUINFO_sizeof + sym_esi(cpu0_stack), %esp + + /* Prepare gdt and segments. */ + add %esi, sym_esi(gdt_boot_base) + lgdt sym_esi(gdt_boot_descr) + + mov $BOOT_DS, %ecx + mov %ecx, %ds + mov %ecx, %es + mov %ecx, %ss + + push $BOOT_CS32 + lea sym_esi(.Lgdt_is_set),%edx + push %edx + lret +.Lgdt_is_set: + + /* + * Stash TSC as above because it was zeroed on jumping into bootloader + * to not interfere with measurements. + */ + rdtsc + mov %eax, sym_esi(boot_tsc_stamp) + mov %edx, 4 + sym_esi(boot_tsc_stamp) + + /* + * Clear the pagetables before the use. We are loaded below 4GiB and + * this avoids the need for writing to higher dword of each entry. + * Additionally, this ensures those dwords are actually zero and the + * mappings aren't manipulated from outside. + */ + lea sym_esi(bootmap_start), %edi + lea sym_esi(bootmap_end), %ecx + sub %edi, %ecx + xor %eax, %eax + shr $2, %ecx + rep stosl + + /* 1x L1 page, 512 entries mapping total of 2M. */ + lea sym_esi(l1_bootmap), %edi + mov $512, %ecx + mov $(__PAGE_HYPERVISOR + 512 * PAGE_SIZE), %edx +.Lfill_l1_identmap: + sub $PAGE_SIZE, %edx + /* Loop runs for ecx=[512..1] for entries [511..0], hence -8. */ + mov %edx, -8(%edi,%ecx,8) + loop .Lfill_l1_identmap + + /* 4x L2 pages, each page mapping 1G of RAM. */ + lea sym_esi(l2_bootmap), %edi + /* 1st entry points to L1. */ + lea (sym_offs(l1_bootmap) + __PAGE_HYPERVISOR)(%esi), %edx + mov %edx, (%edi) + /* Other entries are 2MB pages. */ + mov $(4 * 512 - 1), %ecx + /* + * Value below should be 4GB + flags, which wouldn't fit in 32b + * register. To avoid warning from the assembler, 4GB is skipped here. + * Substitution in first iteration makes the value roll over and point + * to 4GB - 2MB + flags. + */ + mov $(_PAGE_PSE + __PAGE_HYPERVISOR), %edx +.Lfill_l2_identmap: + sub $(1 << L2_PAGETABLE_SHIFT), %edx + /* Loop runs for ecx=[2047..1] for entries [2047..1]. */ + mov %edx, (%edi,%ecx,8) + loop .Lfill_l2_identmap + + /* 1x L3 page, mapping the 4x L2 pages. */ + lea sym_esi(l3_bootmap), %edi + mov $4, %ecx + lea (sym_offs(l2_bootmap) + 4 * PAGE_SIZE + __PAGE_HYPERVISOR)(%esi), %edx +.Lfill_l3_identmap: + sub $PAGE_SIZE, %edx + /* Loop runs for ecx=[4..1] for entries [3..0], hence -8. */ + mov %edx, -8(%edi,%ecx,8) + loop .Lfill_l3_identmap + + /* 1x L4 page, mapping the L3 page. */ + lea (sym_offs(l3_bootmap) + __PAGE_HYPERVISOR)(%esi), %edx + mov %edx, sym_esi(l4_bootmap) + + /* Restore CR4, PAE must be enabled before IA-32e mode */ + mov %cr4, %ecx + or $X86_CR4_PAE, %ecx + mov %ecx, %cr4 + + /* Load PML4 table location into PT base register */ + lea sym_esi(l4_bootmap), %eax + mov %eax, %cr3 + + /* Enable IA-32e mode and paging */ + mov $MSR_EFER, %ecx + rdmsr + or $EFER_LME >> 8, %ah + wrmsr + + mov %cr0, %eax + or $X86_CR0_PG | X86_CR0_NE | X86_CR0_TS | X86_CR0_MP, %eax + mov %eax, %cr0 + + /* Now in IA-32e compatibility mode, use lret to jump to 64b mode */ + lea sym_esi(start_xen_from_efi), %ecx + push $BOOT_CS64 + push %ecx + lret + +.global start_xen_from_efi + +#endif /* XEN_BUILD_EFI */ + /* * cmdline and reloc are written in C, and linked to be 32bit PIC with * entrypoints at 0 and using the stdcall convention. diff --git a/xen/arch/x86/boot/slaunch_early.c b/xen/arch/x86/boot/slaunch_early.c index cf55bda0d6..53dc0effcc 100644 --- a/xen/arch/x86/boot/slaunch_early.c +++ b/xen/arch/x86/boot/slaunch_early.c @@ -31,6 +31,19 @@ asm ( #include "../include/asm/slaunch.h" #include "../include/asm/x86-vendors.h" +/* + * The AMD-defined structure layout for the SLB. The last two fields are + * SL-specific. + */ +struct skinit_sl_header +{ + uint16_t skl_entry_point; + uint16_t length; + uint8_t reserved[62]; + uint16_t skl_info_offset; + uint16_t bootloader_data_offset; +} __packed; + struct early_tests_results { uint32_t mbi_pa; @@ -47,124 +60,63 @@ static bool is_intel_cpu(void) && edx == X86_VENDOR_INTEL_EDX; } -static void verify_pmr_ranges(struct txt_os_mle_data *os_mle, - struct txt_os_sinit_data *os_sinit, - uint32_t load_base_addr, uint32_t tgt_base_addr, - uint32_t xen_size) -{ - int check_high_pmr = 0; - - /* Verify the value of the low PMR base. It should always be 0. */ - if (os_sinit->vtd_pmr_lo_base != 0) - txt_reset(SLAUNCH_ERROR_LO_PMR_BASE); - - /* - * Low PMR size should not be 0 on current platforms. There is an ongoing - * transition to TPR-based DMA protection instead of PMR-based; this is not - * yet supported by the code. - */ - if (os_sinit->vtd_pmr_lo_size == 0) - txt_reset(SLAUNCH_ERROR_LO_PMR_BASE); - - /* Check if regions overlap. Treat regions with no hole between as error. */ - if (os_sinit->vtd_pmr_hi_size != 0 && - os_sinit->vtd_pmr_hi_base <= os_sinit->vtd_pmr_lo_size) - txt_reset(SLAUNCH_ERROR_HI_PMR_BASE); - - /* All regions accessed by 32b code must be below 4G. */ - if (os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <= 0x100000000ull) - check_high_pmr = 1; - - /* - * ACM checks that TXT heap and MLE memory is protected against DMA. We have - * to check if MBI and whole Xen memory is protected. The latter is done in - * case bootloader failed to set whole image as MLE and to make sure that - * both pre- and post-relocation code is protected. - */ - - /* Check if all of Xen before relocation is covered by PMR. */ - if (!is_in_pmr(os_sinit, load_base_addr, xen_size, check_high_pmr)) - txt_reset(SLAUNCH_ERROR_LO_PMR_MLE); - - /* Check if all of Xen after relocation is covered by PMR. */ - if (load_base_addr != tgt_base_addr && - !is_in_pmr(os_sinit, tgt_base_addr, xen_size, check_high_pmr)) - txt_reset(SLAUNCH_ERROR_LO_PMR_MLE); - - /* Check if MBI is covered by PMR. MBI starts with 'uint32_t total_size'. */ - if (!is_in_pmr(os_sinit, os_mle->boot_params_addr, - *(uint32_t *)os_mle->boot_params_addr, check_high_pmr)) - txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR); - - /* Check if TPM event log (if present) is covered by PMR. */ - /* - * FIXME: currently commented out as GRUB allocates it in a hole between - * PMR and reserved RAM, due to 2MB resolution of PMR. There are no other - * easy-to-use DMA protection mechanisms that would allow to protect that - * part of memory. TPR (TXT DMA Protection Range) gives 1MB resolution, but - * it still wouldn't be enough. - * - * One possible solution would be for GRUB to allocate log at lower address, - * but this would further increase memory space fragmentation. Another - * option is to align PMR up instead of down, making PMR cover part of - * reserved region, but it is unclear what the consequences may be. - * - * In tboot this issue was resolved by reserving leftover chunks of memory - * in e820 and/or UEFI memory map. This is also a valid solution, but would - * require more changes to GRUB than the ones listed above, as event log is - * allocated much earlier than PMRs. - */ - /* - if (os_mle->evtlog_addr != 0 && os_mle->evtlog_size != 0 && - !is_in_pmr(os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size, - check_high_pmr)) - txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR); - */ -} - void __stdcall slaunch_early_tests(uint32_t load_base_addr, uint32_t tgt_base_addr, uint32_t tgt_end_addr, - uint32_t multiboot_param, uint32_t slaunch_param, struct early_tests_results *result) { void *txt_heap; struct txt_os_mle_data *os_mle; + struct slr_table *slrt; struct txt_os_sinit_data *os_sinit; + struct slr_entry_intel_info *intel_info; uint32_t size = tgt_end_addr - tgt_base_addr; if ( !is_intel_cpu() ) { /* * Not an Intel CPU. Currently the only other option is AMD with SKINIT - * and secure-kernel-loader. + * and secure-kernel-loader (SKL). */ + struct slr_entry_amd_info *amd_info; + const struct skinit_sl_header *sl_header = (void *)slaunch_param; - const uint16_t *sl_header = (void *)slaunch_param; - /* secure-kernel-loader passes MBI as a parameter for Multiboot - * kernel. */ - result->mbi_pa = multiboot_param; - /* The forth 16-bit integer of SKL's header is an offset to - * bootloader's data, which is SLRT. */ - result->slrt_pa = slaunch_param + sl_header[3]; - return; - } + /* + * slaunch_param holds a physical address of SLB. + * Bootloader's data is SLRT. + */ + result->slrt_pa = slaunch_param + sl_header->bootloader_data_offset; + result->mbi_pa = 0; - /* Clear the TXT error registers for a clean start of day */ - write_txt_reg(TXTCR_ERRORCODE, 0); + slrt = (struct slr_table *)result->slrt_pa; - txt_heap = _p(read_txt_reg(TXTCR_HEAP_BASE)); + amd_info = (struct slr_entry_amd_info *) + slr_next_entry_by_tag (slrt, NULL, SLR_ENTRY_AMD_INFO); + /* Basic checks only, SKL checked and consumed the rest. */ + if ( amd_info == NULL || amd_info->hdr.size != sizeof(*amd_info) ) + return; - if (txt_os_mle_data_size(txt_heap) < sizeof(*os_mle) || - txt_os_sinit_data_size(txt_heap) < sizeof(*os_sinit)) - txt_reset(SLAUNCH_ERROR_GENERIC); + result->mbi_pa = amd_info->boot_params_base; + return; + } + txt_heap = txt_init(); os_mle = txt_os_mle_data_start(txt_heap); os_sinit = txt_os_sinit_data_start(txt_heap); - verify_pmr_ranges(os_mle, os_sinit, load_base_addr, tgt_base_addr, size); - - result->mbi_pa = os_mle->boot_params_addr; result->slrt_pa = os_mle->slrt; + result->mbi_pa = 0; + + slrt = (struct slr_table *)result->slrt_pa; + + intel_info = (struct slr_entry_intel_info *) + slr_next_entry_by_tag (slrt, NULL, SLR_ENTRY_INTEL_INFO); + if ( intel_info == NULL || intel_info->hdr.size != sizeof(*intel_info) ) + return; + + result->mbi_pa = intel_info->boot_params_base; + + txt_verify_pmr_ranges(os_mle, os_sinit, intel_info, + load_base_addr, tgt_base_addr, size); } diff --git a/xen/arch/x86/boot/x86_64.S b/xen/arch/x86/boot/x86_64.S index a5f1f681c9..64b9f53462 100644 --- a/xen/arch/x86/boot/x86_64.S +++ b/xen/arch/x86/boot/x86_64.S @@ -229,14 +229,22 @@ GLOBAL(__page_tables_end) .section .init.data, "aw", @progbits .align PAGE_SIZE, 0 -l1_bootmap: +bootmap_start: + +l1_bootmap: /* 1x L1 page, mapping 2M of RAM. */ .fill L1_PAGETABLE_ENTRIES, 8, 0 .size l1_bootmap, . - l1_bootmap -GLOBAL(l2_bootmap) +GLOBAL(l2_bootmap) /* 4x L2 pages, each mapping 1G of RAM. */ .fill 4 * L2_PAGETABLE_ENTRIES, 8, 0 .size l2_bootmap, . - l2_bootmap -GLOBAL(l3_bootmap) +GLOBAL(l3_bootmap) /* 1x L3 page, mapping the 4x L2 pages. */ .fill L3_PAGETABLE_ENTRIES, 8, 0 .size l3_bootmap, . - l3_bootmap + +l4_bootmap: /* 1x L4 page, mapping the L3 page. */ + .fill L4_PAGETABLE_ENTRIES, 8, 0 + .size l4_bootmap, . - l4_bootmap + +bootmap_end: diff --git a/xen/arch/x86/efi/efi-boot.h b/xen/arch/x86/efi/efi-boot.h index a9a2991d64..5184a46ac0 100644 --- a/xen/arch/x86/efi/efi-boot.h +++ b/xen/arch/x86/efi/efi-boot.h @@ -9,8 +9,11 @@ #include #include #include +#include +#include static struct file __initdata ucode; +static uint64_t __initdata image_size; static multiboot_info_t __initdata mbi = { .flags = MBI_MODULES | MBI_LOADERNAME }; @@ -20,6 +23,9 @@ static multiboot_info_t __initdata mbi = { */ static module_t __initdata mb_modules[5]; +/* Indicates to head.S that it should jump back to start_xen_from_efi(). */ +bool __initdata slaunch_efi_boot; + static void __init edd_put_string(u8 *dst, size_t n, const char *src) { while ( n-- && *src ) @@ -234,10 +240,29 @@ static void __init efi_arch_pre_exit_boot(void) } } -static void __init noreturn efi_arch_post_exit_boot(void) +void __init noreturn start_xen_from_efi(void) { u64 cr4 = XEN_MINIMAL_CR4 & ~X86_CR4_PGE, efer; + if ( slaunch_active ) + { + struct slr_table *slrt = (struct slr_table *)efi.slr; + struct slr_entry_intel_info *intel_info; + + intel_info = (struct slr_entry_intel_info *) + slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_INTEL_INFO); + if ( intel_info != NULL ) + { + void *txt_heap = txt_init(); + struct txt_os_mle_data *os_mle = txt_os_mle_data_start(txt_heap); + struct txt_os_sinit_data *os_sinit = + txt_os_sinit_data_start(txt_heap); + + txt_verify_pmr_ranges(os_mle, os_sinit, intel_info, xen_phys_start, + __XEN_VIRT_START, image_size); + } + } + efi_arch_relocate_image(__XEN_VIRT_START - xen_phys_start); memcpy((void *)trampoline_phys, trampoline_start, cfg.size); @@ -278,6 +303,60 @@ static void __init noreturn efi_arch_post_exit_boot(void) unreachable(); } +static void __init attempt_secure_launch(void) +{ + struct slr_table *slrt; + struct slr_entry_dl_info *dlinfo; + dl_handler_func handler_callback; + + /* The presence of this table indicates a Secure Launch boot. */ + slrt = (struct slr_table *)efi.slr; + if ( efi.slr == EFI_INVALID_TABLE_ADDR || slrt->magic != SLR_TABLE_MAGIC || + slrt->revision != SLR_TABLE_REVISION ) + return; + + /* Avoid calls into firmware after DRTM. */ + __clear_bit(EFI_RS, &efi_flags); + + /* + * Make measurements less sensitive to hardware-specific details. + * + * Intentionally leaving efi_ct and efi_num_ct intact. + */ + efi_ih = 0; + efi_bs = NULL; + efi_bs_revision = 0; + efi_rs = NULL; + efi_version = 0; + efi_fw_vendor = NULL; + efi_fw_revision = 0; + StdOut = NULL; + StdErr = NULL; + boot_tsc_stamp = 0; + + slaunch_active = true; + slaunch_slrt = efi.slr; + slaunch_efi_boot = true; + + /* Jump through DL stub to initiate Secure Launch. */ + dlinfo = (struct slr_entry_dl_info *) + slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_DL_INFO); + + handler_callback = (dl_handler_func)dlinfo->dl_handler; + handler_callback(&dlinfo->bl_context); + + unreachable(); +} + +static void __init noreturn efi_arch_post_exit_boot(void) +{ + /* If Secure Launch happens, this doesn't return. Otherwise, + * start_xen_from_efi() is invoked after DRTM has been initiated. */ + attempt_secure_launch(); + + start_xen_from_efi(); +} + static void __init efi_arch_cfg_file_early(const EFI_LOADED_IMAGE *image, EFI_FILE_HANDLE dir_handle, const char *section) @@ -774,6 +853,7 @@ static void __init efi_arch_halt(void) static void __init efi_arch_load_addr_check(const EFI_LOADED_IMAGE *loaded_image) { xen_phys_start = (UINTN)loaded_image->ImageBase; + image_size = loaded_image->ImageSize; if ( (xen_phys_start + loaded_image->ImageSize - 1) >> 32 ) blexit(L"Xen must be loaded below 4Gb."); if ( xen_phys_start & ((1 << L2_PAGETABLE_SHIFT) - 1) ) diff --git a/xen/arch/x86/efi/fixmlehdr.c b/xen/arch/x86/efi/fixmlehdr.c new file mode 100644 index 0000000000..d443f3d75d --- /dev/null +++ b/xen/arch/x86/efi/fixmlehdr.c @@ -0,0 +1,122 @@ +#include +#include +#include +#include + +#define PREFIX_SIZE (4*1024) + +struct mle_header +{ + uint8_t uuid[16]; + uint32_t header_len; + uint32_t version; + uint32_t entry_point; + uint32_t first_valid_page; + uint32_t mle_start; + uint32_t mle_end; + uint32_t capabilities; + uint32_t cmdline_start; + uint32_t cmdline_end; +} __attribute__ ((packed)); + +static const uint8_t MLE_HEADER_UUID[] = { + 0x5a, 0xac, 0x82, 0x90, 0x6f, 0x47, 0xa7, 0x74, + 0x0f, 0x5c, 0x55, 0xa2, 0xcb, 0x51, 0xb6, 0x42 +}; + +int main(int argc, char *argv[]) +{ + FILE *fp; + struct mle_header header; + int i; + char *end_ptr; + long long correction; + const char *file_path; + + if ( argc != 3 ) + { + fprintf(stderr, "Usage: %s \n", argv[0]); + return 1; + } + + correction = strtoll(argv[2], &end_ptr, 0); + if ( *end_ptr != '\0' ) + { + fprintf(stderr, "Failed to parse '%s' as a number\n", argv[2]); + return 1; + } + if ( correction < INT32_MIN ) + { + fprintf(stderr, "Correction '%s' is too small\n", argv[2]); + return 1; + } + if ( correction > INT32_MAX ) + { + fprintf(stderr, "Correction '%s' is too large\n", argv[2]); + return 1; + } + + file_path = argv[1]; + + fp = fopen(file_path, "r+"); + if ( fp == NULL ) + { + fprintf(stderr, "Failed to open %s\n", file_path); + return 1; + } + + for ( i = 0; i < PREFIX_SIZE; i += 16 ) + { + uint8_t bytes[16]; + + if ( fread(bytes, sizeof(bytes), 1, fp) != 1 ) + { + fprintf(stderr, "Failed to find MLE header in %s\n", file_path); + goto fail; + } + + if ( memcmp(bytes, MLE_HEADER_UUID, 16) == 0 ) + { + break; + } + } + + if ( i >= PREFIX_SIZE ) + { + fprintf(stderr, "Failed to find MLE header in %s\n", file_path); + goto fail; + } + + if ( fseek(fp, -16, SEEK_CUR) ) + { + fprintf(stderr, "Failed to seek back to MLE header in %s\n", file_path); + goto fail; + } + + if ( fread(&header, sizeof(header), 1, fp) != 1 ) + { + fprintf(stderr, "Failed to read MLE header from %s\n", file_path); + goto fail; + } + + if ( fseek(fp, -(int)sizeof(header), SEEK_CUR) ) + { + fprintf(stderr, "Failed to seek back again to MLE header in %s\n", + file_path); + goto fail; + } + + header.entry_point += correction; + + if ( fwrite(&header, sizeof(header), 1, fp) != 1 ) + { + fprintf(stderr, "Failed to write MLE header in %s\n", file_path); + goto fail; + } + + return 0; + +fail: + fclose(fp); + return 1; +} diff --git a/xen/arch/x86/include/asm/intel_txt.h b/xen/arch/x86/include/asm/intel_txt.h index 5d171d4e9f..f9d1e15354 100644 --- a/xen/arch/x86/include/asm/intel_txt.h +++ b/xen/arch/x86/include/asm/intel_txt.h @@ -59,17 +59,18 @@ #define SLAUNCH_ERROR_HI_PMR_BASE 0xc0008014 #define SLAUNCH_ERROR_HI_PMR_SIZE 0xc0008015 #define SLAUNCH_ERROR_LO_PMR_BASE 0xc0008016 -#define SLAUNCH_ERROR_LO_PMR_MLE 0xc0008017 -#define SLAUNCH_ERROR_INITRD_TOO_BIG 0xc0008018 -#define SLAUNCH_ERROR_HEAP_ZERO_OFFSET 0xc0008019 -#define SLAUNCH_ERROR_WAKE_BLOCK_TOO_SMALL 0xc000801a -#define SLAUNCH_ERROR_MLE_BUFFER_OVERLAP 0xc000801b -#define SLAUNCH_ERROR_BUFFER_BEYOND_PMR 0xc000801c -#define SLAUNCH_ERROR_OS_SINIT_BAD_VERSION 0xc000801d -#define SLAUNCH_ERROR_EVENTLOG_MAP 0xc000801e -#define SLAUNCH_ERROR_TPM_NUMBER_ALGS 0xc000801f -#define SLAUNCH_ERROR_TPM_UNKNOWN_DIGEST 0xc0008020 -#define SLAUNCH_ERROR_TPM_INVALID_EVENT 0xc0008021 +#define SLAUNCH_ERROR_LO_PMR_SIZE 0xc0008017 +#define SLAUNCH_ERROR_LO_PMR_MLE 0xc0008018 +#define SLAUNCH_ERROR_INITRD_TOO_BIG 0xc0008019 +#define SLAUNCH_ERROR_HEAP_ZERO_OFFSET 0xc000801a +#define SLAUNCH_ERROR_WAKE_BLOCK_TOO_SMALL 0xc000801b +#define SLAUNCH_ERROR_MLE_BUFFER_OVERLAP 0xc000801c +#define SLAUNCH_ERROR_BUFFER_BEYOND_PMR 0xc000801d +#define SLAUNCH_ERROR_OS_SINIT_BAD_VERSION 0xc000801e +#define SLAUNCH_ERROR_EVENTLOG_MAP 0xc000801f +#define SLAUNCH_ERROR_TPM_NUMBER_ALGS 0xc0008020 +#define SLAUNCH_ERROR_TPM_UNKNOWN_DIGEST 0xc0008021 +#define SLAUNCH_ERROR_TPM_INVALID_EVENT 0xc0008022 #define SLAUNCH_BOOTLOADER_MAGIC 0x4c534254 @@ -86,11 +87,13 @@ extern char txt_ap_entry[]; extern uint32_t trampoline_gdt[]; +#include +#include + /* We need to differentiate between pre- and post paging enabled. */ #ifdef __BOOT_DEFS_H__ #define _txt(x) _p(x) #else -#include #include // __va() #define _txt(x) __va(x) #endif @@ -127,9 +130,9 @@ static inline void txt_reset(uint32_t error) */ struct txt_os_mle_data { uint32_t version; - uint32_t boot_params_addr; - uint32_t slrt; - uint32_t txt_info; + uint32_t reserved; + uint64_t slrt; + uint64_t txt_info; uint32_t ap_wake_block; uint32_t ap_wake_block_size; uint8_t mle_scratch[64]; @@ -327,6 +330,101 @@ static inline int is_in_pmr(struct txt_os_sinit_data *os_sinit, uint64_t base, return 0; } +static inline void *txt_init(void) +{ + void *txt_heap; + + /* Clear the TXT error registers for a clean start of day */ + write_txt_reg(TXTCR_ERRORCODE, 0); + + txt_heap = _p(read_txt_reg(TXTCR_HEAP_BASE)); + + if ( txt_os_mle_data_size(txt_heap) < sizeof(struct txt_os_mle_data) || + txt_os_sinit_data_size(txt_heap) < sizeof(struct txt_os_sinit_data) ) + txt_reset(SLAUNCH_ERROR_GENERIC); + + return txt_heap; +} + +static inline void txt_verify_pmr_ranges(struct txt_os_mle_data *os_mle, + struct txt_os_sinit_data *os_sinit, + struct slr_entry_intel_info *info, + uint32_t load_base_addr, + uint64_t tgt_base_addr, + uint32_t xen_size) +{ + int check_high_pmr = 0; + + /* Verify the value of the low PMR base. It should always be 0. */ + if ( os_sinit->vtd_pmr_lo_base != 0 ) + txt_reset(SLAUNCH_ERROR_LO_PMR_BASE); + + /* + * Low PMR size should not be 0 on current platforms. There is an ongoing + * transition to TPR-based DMA protection instead of PMR-based; this is not + * yet supported by the code. + */ + if ( os_sinit->vtd_pmr_lo_size == 0 ) + txt_reset(SLAUNCH_ERROR_LO_PMR_SIZE); + + /* Check if regions overlap. Treat regions with no hole between as error. */ + if ( os_sinit->vtd_pmr_hi_size != 0 && + os_sinit->vtd_pmr_hi_base <= os_sinit->vtd_pmr_lo_size ) + txt_reset(SLAUNCH_ERROR_HI_PMR_BASE); + + /* All regions accessed by 32b code must be below 4G. */ + if ( os_sinit->vtd_pmr_hi_base + os_sinit->vtd_pmr_hi_size <= + 0x100000000ull ) + check_high_pmr = 1; + + /* + * ACM checks that TXT heap and MLE memory is protected against DMA. We have + * to check if MBI and whole Xen memory is protected. The latter is done in + * case bootloader failed to set whole image as MLE and to make sure that + * both pre- and post-relocation code is protected. + */ + + /* Check if all of Xen before relocation is covered by PMR. */ + if ( !is_in_pmr(os_sinit, load_base_addr, xen_size, check_high_pmr) ) + txt_reset(SLAUNCH_ERROR_LO_PMR_MLE); + + /* Check if all of Xen after relocation is covered by PMR. */ + if ( load_base_addr != tgt_base_addr && + !is_in_pmr(os_sinit, tgt_base_addr, xen_size, check_high_pmr) ) + txt_reset(SLAUNCH_ERROR_LO_PMR_MLE); + + /* Check if MBI is covered by PMR. MBI starts with 'uint32_t total_size'. */ + if ( !is_in_pmr(os_sinit, info->boot_params_base, + *(uint32_t *)(uintptr_t)info->boot_params_base, + check_high_pmr) ) + txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR); + + /* Check if TPM event log (if present) is covered by PMR. */ + /* + * FIXME: currently commented out as GRUB allocates it in a hole between + * PMR and reserved RAM, due to 2MB resolution of PMR. There are no other + * easy-to-use DMA protection mechanisms that would allow to protect that + * part of memory. TPR (TXT DMA Protection Range) gives 1MB resolution, but + * it still wouldn't be enough. + * + * One possible solution would be for GRUB to allocate log at lower address, + * but this would further increase memory space fragmentation. Another + * option is to align PMR up instead of down, making PMR cover part of + * reserved region, but it is unclear what the consequences may be. + * + * In tboot this issue was resolved by reserving leftover chunks of memory + * in e820 and/or UEFI memory map. This is also a valid solution, but would + * require more changes to GRUB than the ones listed above, as event log is + * allocated much earlier than PMRs. + */ + /* + if ( os_mle->evtlog_addr != 0 && os_mle->evtlog_size != 0 && + !is_in_pmr(os_sinit, os_mle->evtlog_addr, os_mle->evtlog_size, + check_high_pmr) ) + txt_reset(SLAUNCH_ERROR_BUFFER_BEYOND_PMR); + */ +} + extern void map_txt_mem_regions(void); extern void protect_txt_mem_regions(void); extern void txt_restore_mtrrs(bool e820_verbose); diff --git a/xen/arch/x86/include/asm/tpm.h b/xen/arch/x86/include/asm/tpm.h index 3ca5d3528d..1a7c8d33f3 100644 --- a/xen/arch/x86/include/asm/tpm.h +++ b/xen/arch/x86/include/asm/tpm.h @@ -7,8 +7,9 @@ #define TPM_TIS_BASE 0xFED40000 #define TPM_TIS_SIZE 0x00010000 -void tpm_hash_extend(unsigned loc, unsigned pcr, uint8_t *buf, unsigned size, - uint32_t type, uint8_t *log_data, unsigned log_data_size); +void tpm_hash_extend(unsigned loc, unsigned pcr, const uint8_t *buf, + unsigned size, uint32_t type, const uint8_t *log_data, + unsigned log_data_size); /* Measures essential parts of SLR table before making use of them. */ void tpm_measure_slrt(void); diff --git a/xen/arch/x86/slaunch.c b/xen/arch/x86/slaunch.c index e618181a60..91aa6311a3 100644 --- a/xen/arch/x86/slaunch.c +++ b/xen/arch/x86/slaunch.c @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -140,17 +141,42 @@ void tpm_measure_slrt(void) if ( slrt->revision == 1 ) { + /* In revision one of the SLRT, only platform-specific info table is + * measured. */ if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) { - /* In revision one of the SLRT, only Intel info table is - * measured. */ - struct slr_entry_intel_info *intel_info = - (void *)slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_INTEL_INFO); - if ( intel_info == NULL ) + struct slr_entry_intel_info tmp; + struct slr_entry_intel_info *entry; + + entry = (struct slr_entry_intel_info *) + slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_INTEL_INFO); + if ( entry == NULL ) panic("SLRT is missing Intel-specific information!\n"); - tpm_hash_extend(DRTM_LOC, DRTM_DATA_PCR, (uint8_t *)intel_info, - sizeof(*intel_info), DLE_EVTYPE_SLAUNCH, NULL, 0); + tmp = *entry; + tmp.boot_params_base = 0; + tmp.txt_heap = 0; + + tpm_hash_extend(DRTM_LOC, DRTM_DATA_PCR, (uint8_t *)&tmp, + sizeof(tmp), DLE_EVTYPE_SLAUNCH, NULL, 0); + } + else if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) + { + struct slr_entry_amd_info tmp; + struct slr_entry_amd_info *entry; + + entry = (struct slr_entry_amd_info *) + slr_next_entry_by_tag(slrt, NULL, SLR_ENTRY_AMD_INFO); + if ( entry == NULL ) + panic("SLRT is missing AMD-specific information!\n"); + + tmp = *entry; + tmp.next = 0; + tmp.slrt_base = 0; + tmp.boot_params_base = 0; + + tpm_hash_extend(DRTM_LOC, DRTM_DATA_PCR, (uint8_t *)&tmp, + sizeof(tmp), DLE_EVTYPE_SLAUNCH, NULL, 0); } } else @@ -180,18 +206,45 @@ static struct slr_entry_policy *slr_get_policy(struct slr_table *slrt) return policy; } -static void check_drtm_policy(struct slr_table *slrt, - struct slr_entry_policy *policy, - struct slr_policy_entry *policy_entry, - const multiboot_info_t *mbi) +static void check_slrt_policy_entry(struct slr_policy_entry *policy_entry, + int idx, + struct slr_table *slrt) +{ + if ( policy_entry->entity_type != SLR_ET_SLRT ) + panic("Expected DRTM policy entry #%d to describe SLRT, got %#04x!\n", + idx, policy_entry->entity_type); + if ( policy_entry->pcr != DRTM_DATA_PCR ) + panic("SLRT was measured to PCR-%d instead of PCR-%d!\n", DRTM_DATA_PCR, + policy_entry->pcr); + if ( policy_entry->entity != (uint64_t)__pa(slrt) ) + panic("SLRT address (%#08lx) differs from its DRTM entry (%#08lx)\n", + __pa(slrt), policy_entry->entity); +} + +/* Returns number of policy entries that were already measured. */ +static unsigned int check_drtm_policy(struct slr_table *slrt, + struct slr_entry_policy *policy, + struct slr_policy_entry *policy_entry, + const multiboot_info_t *mbi) { uint32_t i; module_t *mods; uint32_t num_mod_entries; + int min_entries; - if ( policy->nr_entries < 2 ) - panic("DRTM policy in SLRT contains less than 2 entries (%d)!\n", - policy->nr_entries); + min_entries = efi_enabled(EFI_BOOT) ? 1 : 2; + if ( policy->nr_entries < min_entries ) + panic("DRTM policy in SLRT contains less than %d entries (%d)!\n", + min_entries, policy->nr_entries); + + if ( efi_enabled(EFI_BOOT) ) + { + check_slrt_policy_entry(&policy_entry[0], 0, slrt); + /* SLRT was measured in tpm_measure_slrt(). */ + return 1; + } + + /* This must be legacy MultiBoot2 boot. */ /* MBI policy entry must be the first one, so that measuring order matches * policy order. */ @@ -203,15 +256,7 @@ static void check_drtm_policy(struct slr_table *slrt, policy_entry[0].pcr); /* SLRT policy entry must be the second one. */ - if ( policy_entry[1].entity_type != SLR_ET_SLRT ) - panic("Second entry of DRTM policy in SLRT is not SLRT: %#04x!\n", - policy_entry[1].entity_type); - if ( policy_entry[1].pcr != DRTM_DATA_PCR ) - panic("SLRT was measured to %d instead of %d PCR!\n", DRTM_DATA_PCR, - policy_entry[1].pcr); - if ( policy_entry[1].entity != (uint64_t)__pa(slrt) ) - panic("SLRT address (%#08lx) differes from its DRTM entry (%#08lx)\n", - __pa(slrt), policy_entry[1].entity); + check_slrt_policy_entry(&policy_entry[1], 1, slrt); mods = __va(mbi->mods_addr); for ( i = 0; i < mbi->mods_count; i++ ) @@ -249,6 +294,12 @@ static void check_drtm_policy(struct slr_table *slrt, panic("Unexpected number of Multiboot modules: %d instead of %d\n", (int)mbi->mods_count, (int)num_mod_entries); } + + /* + * MBI was measured in tpm_extend_mbi(). + * SLRT was measured in tpm_measure_slrt(). + */ + return 2; } void tpm_process_drtm_policy(const multiboot_info_t *mbi) @@ -257,6 +308,7 @@ void tpm_process_drtm_policy(const multiboot_info_t *mbi) struct slr_entry_policy *policy; struct slr_policy_entry *policy_entry; uint16_t i; + unsigned int measured; slrt = slr_get_table(); @@ -264,13 +316,11 @@ void tpm_process_drtm_policy(const multiboot_info_t *mbi) policy_entry = (struct slr_policy_entry *) ((uint8_t *)policy + sizeof(*policy)); - check_drtm_policy(slrt, policy, policy_entry, mbi); - /* MBI was measured in tpm_extend_mbi(). */ - policy_entry[0].flags |= SLR_POLICY_FLAG_MEASURED; - /* SLRT was measured in tpm_measure_slrt(). */ - policy_entry[1].flags |= SLR_POLICY_FLAG_MEASURED; + measured = check_drtm_policy(slrt, policy, policy_entry, mbi); + for ( i = 0; i < measured; i++ ) + policy_entry[i].flags |= SLR_POLICY_FLAG_MEASURED; - for ( i = 2; i < policy->nr_entries; i++ ) + for ( i = measured; i < policy->nr_entries; i++ ) { uint64_t start = policy_entry[i].entity; uint64_t size = policy_entry[i].size; @@ -303,8 +353,8 @@ void tpm_process_drtm_policy(const multiboot_info_t *mbi) } if ( policy_entry[i].flags & SLR_POLICY_IMPLICIT_SIZE ) - panic("Unexpected implicitly-sized DRTM entry of Secure Launch at %d\n", - i); + panic("Unexpected implicitly-sized DRTM entry of Secure Launch at %d (type %d)\n", + i, policy_entry[i].entity_type); map_l2(start, size); tpm_hash_extend(DRTM_LOC, policy_entry[i].pcr, __va(start), size, @@ -314,4 +364,49 @@ void tpm_process_drtm_policy(const multiboot_info_t *mbi) policy_entry[i].flags |= SLR_POLICY_FLAG_MEASURED; } + + /* + * On x86 EFI platforms Xen reads its command-line options and kernel/initrd + * from configuration files (several can be chained). Bootloader can't know + * contents of the configuration beforehand without parsing it, so there + * will be no corresponding policy entries. Instead, measure command-line + * and all modules here. + */ + if ( efi_enabled(EFI_BOOT) ) + { +#define LOG_DATA(str) (uint8_t *)(str), (sizeof(str) - 1) + module_t *mods; + void *cmdline = __va(mbi->cmdline); + + tpm_hash_extend(DRTM_LOC, DRTM_DATA_PCR, cmdline, strlen(cmdline), + DLE_EVTYPE_SLAUNCH, LOG_DATA("Xen's command line")); + + mods = __va(mbi->mods_addr); + + for ( i = 0; i < mbi->mods_count; i++ ) + { + const module_t *mod = &mods[i]; + + paddr_t string = mod->string; + paddr_t start = (paddr_t)mod->mod_start << PAGE_SHIFT; + size_t size = mod->mod_end; + + /* + * Measuring module's name separately because module's command-line + * parameters are appended to its name when present. + * + * 2 MiB is minimally mapped size and it should more than suffice. + */ + map_l2(string, 2 * 1024 * 1024); + tpm_hash_extend(DRTM_LOC, DRTM_DATA_PCR, + __va(string), strlen(__va(string)), + DLE_EVTYPE_SLAUNCH, LOG_DATA("MB module string")); + + map_l2(start, size); + tpm_hash_extend(DRTM_LOC, DRTM_CODE_PCR, __va(start), size, + DLE_EVTYPE_SLAUNCH, LOG_DATA("MB module")); + } + +#undef LOG_DATA + } } diff --git a/xen/arch/x86/tpm.c b/xen/arch/x86/tpm.c index a713be6cd4..06fbbcfb45 100644 --- a/xen/arch/x86/tpm.c +++ b/xen/arch/x86/tpm.c @@ -317,7 +317,7 @@ union cmd_rsp { uint8_t buf[CMD_RSP_BUF_SIZE]; }; -static void tpm12_hash_extend(unsigned loc, uint8_t *buf, unsigned size, +static void tpm12_hash_extend(unsigned loc, const uint8_t *buf, unsigned size, unsigned pcr, uint8_t *out_digest) { union cmd_rsp cmd_rsp; @@ -391,7 +391,7 @@ union cmd_rsp { struct extend_rsp extend_r; }; -static void tpm12_hash_extend(unsigned loc, uint8_t *buf, unsigned size, +static void tpm12_hash_extend(unsigned loc, const uint8_t *buf, unsigned size, unsigned pcr, uint8_t *out_digest) { union cmd_rsp cmd_rsp; @@ -419,7 +419,7 @@ static void tpm12_hash_extend(unsigned loc, uint8_t *buf, unsigned size, static void *create_log_event12(struct txt_ev_log_container_12 *evt_log, uint32_t evt_log_size, uint32_t pcr, - uint32_t type, uint8_t *data, + uint32_t type, const uint8_t *data, unsigned data_size) { struct TPM12_PCREvent *new_entry; @@ -637,8 +637,8 @@ union tpm2_cmd_rsp { struct tpm2_sequence_complete_rsp finish_r; }; -static uint32_t tpm2_hash_extend(unsigned loc, uint8_t *buf, unsigned size, - unsigned pcr, +static uint32_t tpm2_hash_extend(unsigned loc, const uint8_t *buf, + unsigned size, unsigned pcr, struct tpm2_log_hashes *log_hashes) { uint32_t seq_handle; @@ -825,8 +825,8 @@ static bool tpm_supports_hash(unsigned loc, const struct tpm2_log_hash *hash) return rc == 0; } -static uint32_t tpm2_hash_extend(unsigned loc, uint8_t *buf, unsigned size, - unsigned pcr, +static uint32_t tpm2_hash_extend(unsigned loc, const uint8_t *buf, + unsigned size, unsigned pcr, const struct tpm2_log_hashes *log_hashes) { uint32_t rc; @@ -911,7 +911,7 @@ find_evt_log_ext_data(struct tpm2_spec_id_event *evt_log) static struct tpm2_log_hashes create_log_event20(struct tpm2_spec_id_event *evt_log, uint32_t evt_log_size, - uint32_t pcr, uint32_t type, uint8_t *data, + uint32_t pcr, uint32_t type, const uint8_t *data, unsigned data_size) { struct tpm2_log_hashes log_hashes = {0}; @@ -980,8 +980,9 @@ create_log_event20(struct tpm2_spec_id_event *evt_log, uint32_t evt_log_size, /************************** end of TPM2.0 specific ****************************/ -void tpm_hash_extend(unsigned loc, unsigned pcr, uint8_t *buf, unsigned size, - uint32_t type, uint8_t *log_data, unsigned log_data_size) +void tpm_hash_extend(unsigned loc, unsigned pcr, const uint8_t *buf, + unsigned size, uint32_t type, const uint8_t *log_data, + unsigned log_data_size) { void *evt_log_addr; uint32_t evt_log_size; diff --git a/xen/common/efi/boot.c b/xen/common/efi/boot.c index 21c7e99c7b..f16f9ef58b 100644 --- a/xen/common/efi/boot.c +++ b/xen/common/efi/boot.c @@ -18,6 +18,7 @@ #if EFI_PAGE_SIZE != PAGE_SIZE # error Cannot use xen/pfn.h here! #endif +#include #include #include #ifdef CONFIG_X86 @@ -983,6 +984,7 @@ static void __init efi_tables(void) static EFI_GUID __initdata mps_guid = MPS_TABLE_GUID; static EFI_GUID __initdata smbios_guid = SMBIOS_TABLE_GUID; static EFI_GUID __initdata smbios3_guid = SMBIOS3_TABLE_GUID; + static EFI_GUID __initdata slr_guid = UEFI_SLR_TABLE_GUID; if ( match_guid(&acpi2_guid, &efi_ct[i].VendorGuid) ) efi.acpi20 = (unsigned long)efi_ct[i].VendorTable; @@ -994,6 +996,8 @@ static void __init efi_tables(void) efi.smbios = (unsigned long)efi_ct[i].VendorTable; if ( match_guid(&smbios3_guid, &efi_ct[i].VendorGuid) ) efi.smbios3 = (unsigned long)efi_ct[i].VendorTable; + if ( match_guid(&slr_guid, &efi_ct[i].VendorGuid) ) + efi.slr = (unsigned long)efi_ct[i].VendorTable; if ( match_guid(&esrt_guid, &efi_ct[i].VendorGuid) ) esrt = (UINTN)efi_ct[i].VendorTable; } diff --git a/xen/common/efi/runtime.c b/xen/common/efi/runtime.c index 6dd799f966..3f46f11041 100644 --- a/xen/common/efi/runtime.c +++ b/xen/common/efi/runtime.c @@ -69,6 +69,7 @@ struct efi __read_mostly efi = { .mps = EFI_INVALID_TABLE_ADDR, .smbios = EFI_INVALID_TABLE_ADDR, .smbios3 = EFI_INVALID_TABLE_ADDR, + .slr = EFI_INVALID_TABLE_ADDR, }; const struct efi_pci_rom *__read_mostly efi_pci_roms; diff --git a/xen/include/xen/efi.h b/xen/include/xen/efi.h index 94a7e547f9..8500aae8f2 100644 --- a/xen/include/xen/efi.h +++ b/xen/include/xen/efi.h @@ -19,6 +19,7 @@ struct efi { unsigned long acpi20; /* ACPI table (ACPI 2.0) */ unsigned long smbios; /* SM BIOS table */ unsigned long smbios3; /* SMBIOS v3 table */ + unsigned long slr; /* SLR table */ }; extern struct efi efi; diff --git a/xen/include/xen/slr_table.h b/xen/include/xen/slr_table.h index 74dd27b2e4..a9f9e05389 100644 --- a/xen/include/xen/slr_table.h +++ b/xen/include/xen/slr_table.h @@ -83,8 +83,8 @@ struct slr_table */ struct slr_entry_hdr { - uint16_t tag; - uint16_t size; + uint32_t tag; + uint32_t size; } __packed; /* @@ -93,21 +93,25 @@ struct slr_entry_hdr struct slr_bl_context { uint16_t bootloader; - uint16_t reserved; + uint16_t reserved[3]; uint64_t context; } __packed; +typedef void (*dl_handler_func)(struct slr_bl_context *bl_context); + /* * DRTM Dynamic Launch Configuration */ struct slr_entry_dl_info { struct slr_entry_hdr hdr; - struct slr_bl_context bl_context; - uint64_t dl_handler; + uint64_t dce_size; uint64_t dce_base; - uint32_t dce_size; + uint64_t dlme_size; + uint64_t dlme_base; uint64_t dlme_entry; + struct slr_bl_context bl_context; + uint64_t dl_handler; } __packed; /* @@ -118,19 +122,8 @@ struct slr_entry_log_info struct slr_entry_hdr hdr; uint16_t format; uint16_t reserved; - uint64_t addr; uint32_t size; -} __packed; - -/* - * DRTM Measurement Policy - */ -struct slr_entry_policy -{ - struct slr_entry_hdr hdr; - uint16_t revision; - uint16_t nr_entries; - /* policy_entries[] */ + uint64_t addr; } __packed; /* @@ -142,11 +135,23 @@ struct slr_policy_entry uint16_t entity_type; uint16_t flags; uint16_t reserved; - uint64_t entity; uint64_t size; + uint64_t entity; char evt_info[TPM_EVENT_INFO_LENGTH]; } __packed; +/* + * DRTM Measurement Policy + */ +struct slr_entry_policy +{ + struct slr_entry_hdr hdr; + uint16_t reserved[2]; + uint16_t revision; + uint16_t nr_entries; + struct slr_policy_entry policy_entries[]; +} __packed; + /* * Secure Launch defined MTRR saving structures */ @@ -169,6 +174,8 @@ struct slr_txt_mtrr_state struct slr_entry_intel_info { struct slr_entry_hdr hdr; + uint64_t boot_params_base; + uint64_t txt_heap; uint64_t saved_misc_enable_msr; struct slr_txt_mtrr_state saved_bsp_mtrrs; } __packed; @@ -179,6 +186,14 @@ struct slr_entry_intel_info struct slr_entry_amd_info { struct slr_entry_hdr hdr; + uint64_t next; + uint32_t type; + uint32_t len; + uint64_t slrt_size; + uint64_t slrt_base; + uint64_t boot_params_base; + uint16_t psp_version; + uint16_t reserved[3]; } __packed; /* @@ -189,23 +204,27 @@ struct slr_entry_arm_info struct slr_entry_hdr hdr; } __packed; -struct slr_entry_uefi_config -{ - struct slr_entry_hdr hdr; - uint16_t revision; - uint16_t nr_entries; - /* uefi_cfg_entries[] */ -} __packed; - +/* + * UEFI config measurement entry + */ struct slr_uefi_cfg_entry { uint16_t pcr; uint16_t reserved; - uint64_t cfg; /* address or value */ uint32_t size; + uint64_t cfg; /* address or value */ char evt_info[TPM_EVENT_INFO_LENGTH]; } __packed; +struct slr_entry_uefi_config +{ + struct slr_entry_hdr hdr; + uint16_t reserved[2]; + uint16_t revision; + uint16_t nr_entries; + struct slr_uefi_cfg_entry uefi_cfg_entries[]; +} __packed; + static inline void * slr_end_of_entries(struct slr_table *table) {