Skip to content

Commit 3f17a05

Browse files
head.S: make state on entry to the kernel similar to that used by TXT
Intel TXT provides physical address of kernel entry point in %ebx. This is due to the fact that %ss is undefined there, so the kernel can't use the call/pop pair to obtain its load address. Even though it is possible on AMD with this implementation of SKL, keep things consistent and move the entry point address to %ebx as well. %ebp points to base of SLB, the kernel can use it to obtain offset to SLRT, and through it, bootloader context and payload argument saved within. Note that this commit (temporarily) breaks booting to non-Linux payloads. Signed-off-by: Krystian Hebel <[email protected]>
1 parent 1cc0a75 commit 3f17a05

File tree

2 files changed

+23
-49
lines changed

2 files changed

+23
-49
lines changed

head.S

Lines changed: 15 additions & 28 deletions
Original file line numberDiff line numberDiff line change
@@ -190,16 +190,23 @@ GLOBAL(_entry)
190190
call skl_main
191191

192192
/*
193-
* skl_main() is magic. It returns two pointers by register:
193+
* skl_main() returns a pointer to protected mode kernel entry in %eax. It
194+
* could also return the argument for the kernel (depending on kernel type,
195+
* this could be either Linux boot parameters or MBI for Multiboot2), but
196+
* for parity with what Intel TXT does, this isn't the case.
194197
*
195-
* %eax - protected mode kernel entry
196-
* %edx - argument for kernel entry point, depends on type of kernel
198+
* Intel TXT provides physical address of kernel entry point in %ebx. This
199+
* is due to the fact that %ss is undefined there, so the kernel can't use
200+
* the call/pop pair to obtain its load address. Even though it is possible
201+
* on AMD with this implementation of SKL, keep things consistent and move
202+
* the entry point address to %ebx as well.
197203
*
198-
* We stash the entry point in %edi and the argument in %esi to protect
199-
* them from clobbering during teardown.
204+
* %ebp points to base of SLB, it was set by the first instruction on SKL
205+
* entry and preserved across call to C. This is how the kernel can obtain
206+
* offset to SLRT, and through it, bootloader context and payload argument
207+
* saved within.
200208
*/
201-
mov %eax, %edi
202-
mov %edx, %esi
209+
mov %eax, %ebx
203210

204211
#ifdef __x86_64__
205212

@@ -232,28 +239,8 @@ GLOBAL(_entry)
232239
push $0
233240
popf
234241

235-
/*
236-
* Various kernels use different boot protocols, SKL supports some of
237-
* the common ones. Because of that, we are saving the same argument in
238-
* every possible place that any of the supported kernel types may look
239-
* for it. As of now, supported protocols include:
240-
*
241-
* - Linux x86 protected mode entry, not UEFI
242-
* - Multiboot2, also not UEFI
243-
* - simple payload started as 'entry(u32 arg)' function call. As we
244-
* don't expect it to return, __cdecl, __stdcall and __pascal calling
245-
* conventions work the same.
246-
*/
247-
/* Linux expects Zero Page address in %esi, it is already there */
248-
/* Multiboot2 expects MBI address in %ebx and magic number in %eax */
249-
mov %esi, %ebx
250-
mov $MULTIBOOT2_BOOTLOADER_MAGIC, %eax
251-
/* Simple payload expects argument on stack followed by return address */
252-
push %esi
253-
push $0
254-
255242
/* All set, jump to the kernel */
256-
jmp *%edi
243+
jmp *%ebx
257244
ENDFUNC(_entry)
258245

259246
.section .rodata, "a", @progbits

main.c

Lines changed: 8 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -231,27 +231,15 @@ static void dma_protection_setup(void)
231231
#endif
232232
}
233233

234-
/*
235-
* Function return ABI magic:
236-
*
237-
* By returning a simple object of two pointers, the SYSV ABI splits it across
238-
* %rax and %rdx rather than spilling it to the stack. This is far more
239-
* convenient for our asm caller to deal with.
240-
*/
241-
typedef struct {
242-
void *dlme_entry; /* %eax */
243-
void *dlme_arg; /* %edx */
244-
} asm_return_t;
245-
246-
asm_return_t skl_main(void)
234+
void *skl_main(void)
247235
{
248236
struct tpm *tpm;
249237
struct slr_entry_dl_info *dl_info;
250-
asm_return_t ret;
238+
void *dlme_entry;
251239
u32 entry_offset;
252240

253241
/*
254-
* Now in 64b mode, paging is setup. This is the launching point. We can
242+
* Now in 64b mode, paging is set up. This is the launching point. We can
255243
* now do what we want. At the end, trampoline to the PM entry point which
256244
* will include the Secure Launch stub.
257245
*/
@@ -298,20 +286,19 @@ asm_return_t skl_main(void)
298286
tpm_relinquish_locality(tpm);
299287
free_tpm(tpm);
300288

301-
ret.dlme_entry = _p(dl_info->dlme_base + dl_info->dlme_entry);
302-
ret.dlme_arg = _p(dl_info->bl_context.context);
289+
dlme_entry = _p(dl_info->dlme_base + dl_info->dlme_entry);
303290

304291
/* End of the line, off to the protected mode entry into the kernel */
305292
print("dlme_entry:\n");
306-
hexdump(ret.dlme_entry, 0x100);
307-
print("dlme_arg:\n");
308-
hexdump(ret.dlme_arg, 0x280);
293+
hexdump(dlme_entry, 0x100);
294+
print("bl_context:\n");
295+
hexdump(_p(dl_info->bl_context.context), 0x280);
309296
print("skl_base:\n");
310297
hexdump(_start, 0x100);
311298
print("bootloader_data:\n");
312299
hexdump(&bootloader_data, bootloader_data.size);
313300

314301
print("skl_main() is about to exit\n");
315302

316-
return ret;
303+
return dlme_entry;
317304
}

0 commit comments

Comments
 (0)