Skip to content

Commit 7006b75

Browse files
tlendackybp3tk0v
authored andcommitted
x86/sev: Allow for use of the early boot GHCB for PSC requests
Using a GHCB for a page stage change (as opposed to the MSR protocol) allows for multiple pages to be processed in a single request. In prep for early PSC requests in support of unaccepted memory, update the invocation of vmgexit_psc() to be able to use the early boot GHCB and not just the per-CPU GHCB structure. In order to use the proper GHCB (early boot vs per-CPU), set a flag that indicates when the per-CPU GHCBs are available and registered. For APs, the per-CPU GHCBs are created before they are started and registered upon startup, so this flag can be used globally for the BSP and APs instead of creating a per-CPU flag. This will allow for a significant reduction in the number of MSR protocol page state change requests when accepting memory. Signed-off-by: Tom Lendacky <[email protected]> Signed-off-by: Borislav Petkov (AMD) <[email protected]> Link: https://lore.kernel.org/r/d6cbb21f87f81eb8282dd3bf6c34d9698c8a4bbc.1686063086.git.thomas.lendacky@amd.com
1 parent 69dcb1e commit 7006b75

File tree

1 file changed

+38
-23
lines changed

1 file changed

+38
-23
lines changed

arch/x86/kernel/sev.c

Lines changed: 38 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -119,7 +119,19 @@ static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
119119

120120
struct sev_config {
121121
__u64 debug : 1,
122-
__reserved : 63;
122+
123+
/*
124+
* A flag used by __set_pages_state() that indicates when the
125+
* per-CPU GHCB has been created and registered and thus can be
126+
* used by the BSP instead of the early boot GHCB.
127+
*
128+
* For APs, the per-CPU GHCB is created before they are started
129+
* and registered upon startup, so this flag can be used globally
130+
* for the BSP and APs.
131+
*/
132+
ghcbs_initialized : 1,
133+
134+
__reserved : 62;
123135
};
124136

125137
static struct sev_config sev_cfg __read_mostly;
@@ -662,7 +674,7 @@ static void pvalidate_pages(unsigned long vaddr, unsigned long npages, bool vali
662674
}
663675
}
664676

665-
static void __init early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)
677+
static void early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)
666678
{
667679
unsigned long paddr_end;
668680
u64 val;
@@ -756,26 +768,13 @@ void __init snp_prep_memory(unsigned long paddr, unsigned int sz, enum psc_op op
756768
WARN(1, "invalid memory op %d\n", op);
757769
}
758770

759-
static int vmgexit_psc(struct snp_psc_desc *desc)
771+
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
760772
{
761773
int cur_entry, end_entry, ret = 0;
762774
struct snp_psc_desc *data;
763-
struct ghcb_state state;
764775
struct es_em_ctxt ctxt;
765-
unsigned long flags;
766-
struct ghcb *ghcb;
767776

768-
/*
769-
* __sev_get_ghcb() needs to run with IRQs disabled because it is using
770-
* a per-CPU GHCB.
771-
*/
772-
local_irq_save(flags);
773-
774-
ghcb = __sev_get_ghcb(&state);
775-
if (!ghcb) {
776-
ret = 1;
777-
goto out_unlock;
778-
}
777+
vc_ghcb_invalidate(ghcb);
779778

780779
/* Copy the input desc into GHCB shared buffer */
781780
data = (struct snp_psc_desc *)ghcb->shared_buffer;
@@ -832,20 +831,18 @@ static int vmgexit_psc(struct snp_psc_desc *desc)
832831
}
833832

834833
out:
835-
__sev_put_ghcb(&state);
836-
837-
out_unlock:
838-
local_irq_restore(flags);
839-
840834
return ret;
841835
}
842836

843837
static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
844838
unsigned long vaddr_end, int op)
845839
{
840+
struct ghcb_state state;
846841
struct psc_hdr *hdr;
847842
struct psc_entry *e;
843+
unsigned long flags;
848844
unsigned long pfn;
845+
struct ghcb *ghcb;
849846
int i;
850847

851848
hdr = &data->hdr;
@@ -875,15 +872,31 @@ static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
875872
i++;
876873
}
877874

878-
if (vmgexit_psc(data))
875+
local_irq_save(flags);
876+
877+
if (sev_cfg.ghcbs_initialized)
878+
ghcb = __sev_get_ghcb(&state);
879+
else
880+
ghcb = boot_ghcb;
881+
882+
if (!ghcb || vmgexit_psc(ghcb, data))
879883
sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
884+
885+
if (sev_cfg.ghcbs_initialized)
886+
__sev_put_ghcb(&state);
887+
888+
local_irq_restore(flags);
880889
}
881890

882891
static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
883892
{
884893
unsigned long vaddr_end, next_vaddr;
885894
struct snp_psc_desc desc;
886895

896+
/* Use the MSR protocol when a GHCB is not available. */
897+
if (!boot_ghcb)
898+
return early_set_pages_state(__pa(vaddr), npages, op);
899+
887900
vaddr = vaddr & PAGE_MASK;
888901
vaddr_end = vaddr + (npages << PAGE_SHIFT);
889902

@@ -1261,6 +1274,8 @@ void setup_ghcb(void)
12611274
if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
12621275
snp_register_per_cpu_ghcb();
12631276

1277+
sev_cfg.ghcbs_initialized = true;
1278+
12641279
return;
12651280
}
12661281

0 commit comments

Comments
 (0)