Skip to content

Commit ae8d5d4

Browse files
mdouchapevik
authored andcommitted
Add KVM helper functions for AMD SVM
Acked-by: Petr Vorel <[email protected]> Acked-by: Cyril Hrubis <[email protected]> Signed-off-by: Martin Doucha <[email protected]>
1 parent 6949959 commit ae8d5d4

File tree

6 files changed

+566
-1
lines changed

6 files changed

+566
-1
lines changed

doc/kvm-test-api.txt

Lines changed: 73 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -390,6 +390,79 @@ Developer's Manual, Volume 3, Chapter 4 for explanation of the fields.
390390
See Intel(R) 64 and IA-32 Architectures Software Developer's Manual
391391
for documentation of standard and model-specific x86 registers.
392392

393+
3.5 AMD SVM helper functions
394+
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
395+
396+
`#include "kvm_test.h"` +
397+
`#include "kvm_x86.h"` +
398+
`#include "kvm_x86_svm.h"`
399+
400+
The KVM guest library provides basic helper functions for creating and running
401+
nested virtual machines using the AMD SVM technology.
402+
403+
.Example code to execute nested VM
404+
[source,c]
405+
-------------------------------------------------------------------------------
406+
int guest_main(void)
407+
{
408+
...
409+
return 0;
410+
}
411+
412+
void main(void)
413+
{
414+
struct kvm_svm_vcpu *vm;
415+
416+
kvm_init_svm();
417+
vm = kvm_create_svm_vcpu(guest_main, 1);
418+
kvm_svm_vmrun(vm);
419+
}
420+
-------------------------------------------------------------------------------
421+
422+
- `int kvm_is_svm_supported(void)` - Returns non-zero value if the CPU
423+
supports AMD SVM, otherwise returns 0.
424+
425+
- `int kvm_get_svm_state(void)` - Returns non-zero value if SVM is currently
426+
enabled, otherwise returns 0.
427+
428+
- `void kvm_set_svm_state(int enabled)` - Enable or disable SVM according
429+
to argument. If SVM is disabled by host or not supported, the test will exit
430+
with `TCONF`.
431+
432+
- `void kvm_init_svm(void)` - Enable and fully initialize SVM, including
433+
allocating and setting up host save area VMCB. If SVM is disabled by host or
434+
not supported, the test will exit with `TCONF`.
435+
436+
- `struct kvm_vmcb *kvm_alloc_vmcb(void)` - Allocate new VMCB structure
437+
with correct memory alignment and fill it with zeroes.
438+
439+
- `void kvm_vmcb_set_intercept(struct kvm_vmcb *vmcb, unsigned int id, unsigned int state)` -
440+
Set SVM intercept bit `id` to given `state`.
441+
442+
- `void kvm_init_guest_vmcb(struct kvm_vmcb *vmcb, uint32_t asid, uint16_t ss, void *rsp, int (*guest_main)(void))` -
443+
Initialize new SVM virtual machine. The `asid` parameter is the nested
444+
page table ID. The `ss` and `rsp` parameters set the stack segment and stack
445+
pointer values, respectively. The `guest_main` parameter sets the code entry
446+
point of the virtual machine. All control registers, segment registers
447+
(except stack segment register), GDTR and IDTR will be copied
448+
from the current CPU state.
449+
450+
- `struct kvm_svm_vcpu *kvm_create_svm_vcpu(int (*guest_main)(void), int alloc_stack)` -
451+
Convenience function for allocating and initializing new SVM virtual CPU.
452+
The `guest_main` parameter is passed to `kvm_init_guest_vmcb()`,
453+
the `alloc_stack` parameter controls whether a new 8KB stack will be
454+
allocated and registered in GDT. Interception will be enabled for `VMSAVE`
455+
and `HLT` instructions. If you set `alloc_stack` to zero, you must configure
456+
the stack segment register and stack pointer manually.
457+
458+
- `void kvm_svm_vmrun(struct kvm_svm_vcpu *cpu)` - Start or continue execution
459+
of a nested virtual machine. Beware that FPU state is not saved. Do not use
460+
floating point types or values in nested guest code. Also do not use
461+
`tst_res()` or `tst_brk()` functions in nested guest code.
462+
463+
See AMD64 Architecture Programmer's Manual Volume 2 for documentation
464+
of the Secure Virtual Machine (SVM) technology.
465+
393466
4. KVM guest environment
394467
------------------------
395468

testcases/kernel/kvm/bootstrap_x86.S

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,8 @@
99
.set RESULT_ADDRESS, 0xfffff000
1010
.set KVM_GDT_SIZE, 32
1111

12+
.set MSR_VM_HSAVE_PA, 0xc0010117
13+
1214
/*
1315
* This section will be allocated at address 0x1000 and
1416
* jumped to from the reset stub provided by kvm_run.
@@ -351,6 +353,83 @@ kvm_yield:
351353
hlt
352354
ret
353355

356+
.global kvm_svm_guest_entry
357+
kvm_svm_guest_entry:
358+
call *%eax
359+
1: hlt
360+
jmp 1b
361+
362+
.global kvm_svm_vmrun
363+
kvm_svm_vmrun:
364+
push %edi
365+
mov 8(%esp), %edi
366+
push %ebx
367+
push %esi
368+
push %ebp
369+
370+
clgi
371+
372+
/* Save full host state */
373+
movl $MSR_VM_HSAVE_PA, %ecx
374+
rdmsr
375+
vmsave
376+
push %eax
377+
378+
/* Load guest registers */
379+
push %edi
380+
movl (%edi), %eax
381+
/* %eax is loaded by vmrun from VMCB */
382+
movl 0x0c(%edi), %ebx
383+
movl 0x14(%edi), %ecx
384+
movl 0x1c(%edi), %edx
385+
movl 0x2c(%edi), %esi
386+
movl 0x34(%edi), %ebp
387+
/* %esp is loaded by vmrun from VMCB */
388+
movl 0x24(%edi), %edi
389+
390+
vmload
391+
vmrun
392+
vmsave
393+
394+
/* Clear guest register buffer */
395+
push %edi
396+
push %ecx
397+
movl 8(%esp), %edi
398+
addl $4, %edi
399+
xorl %eax, %eax
400+
mov $32, %ecx
401+
pushfl
402+
cld
403+
rep stosl
404+
popfl
405+
406+
/* Save guest registers */
407+
pop %ecx
408+
pop %eax
409+
pop %edi
410+
movl %ebx, 0x0c(%edi)
411+
movl %ecx, 0x14(%edi)
412+
movl %edx, 0x1c(%edi)
413+
movl %eax, 0x24(%edi)
414+
movl %esi, 0x2c(%edi)
415+
movl %ebp, 0x34(%edi)
416+
/* Copy %eax and %esp from VMCB */
417+
movl (%edi), %esi
418+
movl 0x5f8(%esi), %eax
419+
movl %eax, 0x04(%edi)
420+
movl 0x5d8(%esi), %eax
421+
movl %eax, 0x3c(%edi)
422+
423+
pop %eax
424+
vmload
425+
stgi
426+
427+
pop %ebp
428+
pop %esi
429+
pop %ebx
430+
pop %edi
431+
ret
432+
354433

355434
.section .bss.pgtables, "aw", @nobits
356435
.global kvm_pagetable

testcases/kernel/kvm/bootstrap_x86_64.S

Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@
1010
.set RESULT_ADDRESS, 0xfffff000
1111
.set KVM_GDT_SIZE, 32
1212

13+
.set MSR_VM_HSAVE_PA, 0xc0010117
14+
1315
/*
1416
* This section will be allocated at address 0x1000 and
1517
* jumped to from the reset stub provided by kvm_run.
@@ -474,6 +476,93 @@ kvm_yield:
474476
hlt
475477
ret
476478

479+
.global kvm_svm_guest_entry
480+
kvm_svm_guest_entry:
481+
call *%rax
482+
1: hlt
483+
jmp 1b
484+
485+
.global kvm_svm_vmrun
486+
kvm_svm_vmrun:
487+
pushq %rbx
488+
pushq %rbp
489+
pushq %r12
490+
pushq %r13
491+
pushq %r14
492+
pushq %r15
493+
494+
clgi
495+
496+
/* Save full host state */
497+
movq $MSR_VM_HSAVE_PA, %rcx
498+
rdmsr
499+
shlq $32, %rdx
500+
orq %rdx, %rax
501+
vmsave
502+
pushq %rax
503+
504+
/* Load guest registers */
505+
pushq %rdi
506+
movq (%rdi), %rax
507+
/* %rax is loaded by vmrun from VMCB */
508+
movq 0x10(%rdi), %rbx
509+
movq 0x18(%rdi), %rcx
510+
movq 0x20(%rdi), %rdx
511+
movq 0x30(%rdi), %rsi
512+
movq 0x38(%rdi), %rbp
513+
/* %rsp is loaded by vmrun from VMCB */
514+
movq 0x48(%rdi), %r8
515+
movq 0x50(%rdi), %r9
516+
movq 0x58(%rdi), %r10
517+
movq 0x60(%rdi), %r11
518+
movq 0x68(%rdi), %r12
519+
movq 0x70(%rdi), %r13
520+
movq 0x78(%rdi), %r14
521+
movq 0x80(%rdi), %r15
522+
movq 0x28(%rdi), %rdi
523+
524+
vmload
525+
vmrun
526+
vmsave
527+
528+
/* Save guest registers */
529+
movq %rdi, %rax
530+
popq %rdi
531+
movq %rbx, 0x10(%rdi)
532+
movq %rcx, 0x18(%rdi)
533+
movq %rdx, 0x20(%rdi)
534+
/* %rax contains guest %rdi */
535+
movq %rax, 0x28(%rdi)
536+
movq %rsi, 0x30(%rdi)
537+
movq %rbp, 0x38(%rdi)
538+
movq %r8, 0x48(%rdi)
539+
movq %r9, 0x50(%rdi)
540+
movq %r10, 0x58(%rdi)
541+
movq %r11, 0x60(%rdi)
542+
movq %r12, 0x68(%rdi)
543+
movq %r13, 0x70(%rdi)
544+
movq %r14, 0x78(%rdi)
545+
movq %r15, 0x80(%rdi)
546+
/* copy guest %rax and %rsp from VMCB*/
547+
movq (%rdi), %rsi
548+
movq 0x5f8(%rsi), %rax
549+
movq %rax, 0x08(%rdi)
550+
movq 0x5d8(%rsi), %rax
551+
movq %rax, 0x40(%rdi)
552+
553+
/* Reload host state */
554+
popq %rax
555+
vmload
556+
557+
stgi
558+
559+
popq %r15
560+
popq %r14
561+
popq %r13
562+
popq %r12
563+
popq %rbp
564+
popq %rbx
565+
retq
477566

478567
.section .bss.pgtables, "aw", @nobits
479568
.global kvm_pagetable

testcases/kernel/kvm/include/kvm_x86.h

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -64,16 +64,25 @@
6464
/* CPUID constants */
6565
#define CPUID_GET_INPUT_RANGE 0x80000000
6666
#define CPUID_GET_EXT_FEATURES 0x80000001
67+
#define CPUID_GET_SVM_FEATURES 0x8000000a
6768

6869

6970
/* Model-specific CPU register constants */
7071
#define MSR_EFER 0xc0000080
72+
#define MSR_VM_CR 0xc0010114
73+
#define MSR_VM_HSAVE_PA 0xc0010117
7174

7275
#define EFER_SCE (1 << 0) /* SYSCALL/SYSRET instructions enabled */
7376
#define EFER_LME (1 << 8) /* CPU is running in 64bit mode */
7477
#define EFER_LMA (1 << 10) /* CPU uses 64bit memory paging (read-only) */
7578
#define EFER_NXE (1 << 11) /* Execute disable bit active */
79+
#define EFER_SVME (1 << 12) /* AMD SVM instructions enabled */
7680

81+
#define VM_CR_DPD (1 << 0)
82+
#define VM_CR_R_INIT (1 << 1)
83+
#define VM_CR_DIS_A20M (1 << 2)
84+
#define VM_CR_LOCK (1 << 3)
85+
#define VM_CR_SVMDIS (1 << 4)
7786

7887
/* Control register constants */
7988
#define CR4_VME (1 << 0)
@@ -162,6 +171,11 @@ struct kvm_sregs {
162171
uint16_t cs, ds, es, fs, gs, ss;
163172
};
164173

174+
struct kvm_regs64 {
175+
uint64_t rax, rbx, rcx, rdx, rdi, rsi, rbp, rsp;
176+
uint64_t r8, r9, r10, r11, r12, r13, r14, r15;
177+
};
178+
165179
extern struct page_table_entry_pae kvm_pagetable[];
166180
extern struct intr_descriptor kvm_idt[X86_INTR_COUNT];
167181
extern struct segment_descriptor kvm_gdt[KVM_GDT_SIZE];

0 commit comments

Comments
 (0)