1818#include <linux/cpumask.h>
1919#include <linux/iommu.h>
2020#include <linux/amd-iommu.h>
21+ #include <linux/nospec.h>
2122
2223#include <asm/sev.h>
2324#include <asm/processor.h>
@@ -77,12 +78,42 @@ struct rmpentry_raw {
7778 */
7879#define RMPTABLE_CPU_BOOKKEEPING_SZ 0x4000
7980
81+ /*
82+ * For a non-segmented RMP table, use the maximum physical addressing as the
83+ * segment size in order to always arrive at index 0 in the table.
84+ */
85+ #define RMPTABLE_NON_SEGMENTED_SHIFT 52
86+
87+ struct rmp_segment_desc {
88+ struct rmpentry_raw * rmp_entry ;
89+ u64 max_index ;
90+ u64 size ;
91+ };
92+
93+ /*
94+ * Segmented RMP Table support.
95+ * - The segment size is used for two purposes:
96+ * - Identify the amount of memory covered by an RMP segment
97+ * - Quickly locate an RMP segment table entry for a physical address
98+ *
99+ * - The RMP segment table contains pointers to an RMP table that covers
100+ * a specific portion of memory. There can be up to 512 8-byte entries,
101+ * one pages worth.
102+ */
103+ static struct rmp_segment_desc * * rmp_segment_table __ro_after_init ;
104+ static unsigned int rst_max_index __ro_after_init = 512 ;
105+
106+ static unsigned int rmp_segment_shift ;
107+ static u64 rmp_segment_size ;
108+ static u64 rmp_segment_mask ;
109+
110+ #define RST_ENTRY_INDEX (x ) ((x) >> rmp_segment_shift)
111+ #define RMP_ENTRY_INDEX (x ) ((u64)(PHYS_PFN((x) & rmp_segment_mask)))
112+
80113/* Mask to apply to a PFN to get the first PFN of a 2MB page */
81114#define PFN_PMD_MASK GENMASK_ULL(63, PMD_SHIFT - PAGE_SHIFT)
82115
83116static u64 probed_rmp_base , probed_rmp_size ;
84- static struct rmpentry_raw * rmptable __ro_after_init ;
85- static u64 rmptable_max_pfn __ro_after_init ;
86117
87118static LIST_HEAD (snp_leaked_pages_list );
88119static DEFINE_SPINLOCK (snp_leaked_pages_list_lock );
@@ -190,15 +221,101 @@ static bool __init clear_rmptable_bookkeeping(void)
190221 return true;
191222}
192223
224+ static bool __init alloc_rmp_segment_desc (u64 segment_pa , u64 segment_size , u64 pa )
225+ {
226+ u64 rst_index , rmp_segment_size_max ;
227+ struct rmp_segment_desc * desc ;
228+ void * rmp_segment ;
229+
230+ /* Calculate the maximum size an RMP can be (16 bytes/page mapped) */
231+ rmp_segment_size_max = PHYS_PFN (rmp_segment_size ) << 4 ;
232+
233+ /* Validate the RMP segment size */
234+ if (segment_size > rmp_segment_size_max ) {
235+ pr_err ("Invalid RMP size 0x%llx for configured segment size 0x%llx\n" ,
236+ segment_size , rmp_segment_size_max );
237+ return false;
238+ }
239+
240+ /* Validate the RMP segment table index */
241+ rst_index = RST_ENTRY_INDEX (pa );
242+ if (rst_index >= rst_max_index ) {
243+ pr_err ("Invalid RMP segment base address 0x%llx for configured segment size 0x%llx\n" ,
244+ pa , rmp_segment_size );
245+ return false;
246+ }
247+
248+ if (rmp_segment_table [rst_index ]) {
249+ pr_err ("RMP segment descriptor already exists at index %llu\n" , rst_index );
250+ return false;
251+ }
252+
253+ rmp_segment = memremap (segment_pa , segment_size , MEMREMAP_WB );
254+ if (!rmp_segment ) {
255+ pr_err ("Failed to map RMP segment addr 0x%llx size 0x%llx\n" ,
256+ segment_pa , segment_size );
257+ return false;
258+ }
259+
260+ desc = kzalloc (sizeof (* desc ), GFP_KERNEL );
261+ if (!desc ) {
262+ memunmap (rmp_segment );
263+ return false;
264+ }
265+
266+ desc -> rmp_entry = rmp_segment ;
267+ desc -> max_index = segment_size / sizeof (* desc -> rmp_entry );
268+ desc -> size = segment_size ;
269+
270+ rmp_segment_table [rst_index ] = desc ;
271+
272+ return true;
273+ }
274+
275+ static void __init free_rmp_segment_table (void )
276+ {
277+ unsigned int i ;
278+
279+ for (i = 0 ; i < rst_max_index ; i ++ ) {
280+ struct rmp_segment_desc * desc ;
281+
282+ desc = rmp_segment_table [i ];
283+ if (!desc )
284+ continue ;
285+
286+ memunmap (desc -> rmp_entry );
287+
288+ kfree (desc );
289+ }
290+
291+ free_page ((unsigned long )rmp_segment_table );
292+
293+ rmp_segment_table = NULL ;
294+ }
295+
296+ /* Allocate the table used to index into the RMP segments */
297+ static bool __init alloc_rmp_segment_table (void )
298+ {
299+ struct page * page ;
300+
301+ page = alloc_page (__GFP_ZERO );
302+ if (!page )
303+ return false;
304+
305+ rmp_segment_table = page_address (page );
306+
307+ return true;
308+ }
309+
193310/*
194311 * Do the necessary preparations which are verified by the firmware as
195312 * described in the SNP_INIT_EX firmware command description in the SNP
196313 * firmware ABI spec.
197314 */
198315static int __init snp_rmptable_init (void )
199316{
200- u64 max_rmp_pfn , calc_rmp_sz , rmptable_size , rmp_end , val ;
201- void * rmptable_start ;
317+ u64 max_rmp_pfn , calc_rmp_sz , rmptable_segment , rmptable_size , rmp_end , val ;
318+ unsigned int i ;
202319
203320 if (!cc_platform_has (CC_ATTR_HOST_SEV_SNP ))
204321 return 0 ;
@@ -227,17 +344,18 @@ static int __init snp_rmptable_init(void)
227344 goto nosnp ;
228345 }
229346
347+ if (!alloc_rmp_segment_table ())
348+ goto nosnp ;
349+
230350 /* Map only the RMP entries */
231- rmptable_start = memremap ( probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ ,
232- probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ ,
233- MEMREMAP_WB );
234- if (!rmptable_start ) {
235- pr_err ( "Failed to map RMP table\n" );
351+ rmptable_segment = probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ ;
352+ rmptable_size = probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ ;
353+
354+ if (!alloc_rmp_segment_desc ( rmptable_segment , rmptable_size , 0 ) ) {
355+ free_rmp_segment_table ( );
236356 goto nosnp ;
237357 }
238358
239- rmptable_size = probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ ;
240-
241359 /*
242360 * Check if SEV-SNP is already enabled, this can happen in case of
243361 * kexec boot.
@@ -248,12 +366,20 @@ static int __init snp_rmptable_init(void)
248366
249367 /* Zero out the RMP bookkeeping area */
250368 if (!clear_rmptable_bookkeeping ()) {
251- memunmap ( rmptable_start );
369+ free_rmp_segment_table ( );
252370 goto nosnp ;
253371 }
254372
255373 /* Zero out the RMP entries */
256- memset (rmptable_start , 0 , rmptable_size );
374+ for (i = 0 ; i < rst_max_index ; i ++ ) {
375+ struct rmp_segment_desc * desc ;
376+
377+ desc = rmp_segment_table [i ];
378+ if (!desc )
379+ continue ;
380+
381+ memset (desc -> rmp_entry , 0 , desc -> size );
382+ }
257383
258384 /* Flush the caches to ensure that data is written before SNP is enabled. */
259385 wbinvd_on_all_cpus ();
@@ -264,9 +390,6 @@ static int __init snp_rmptable_init(void)
264390 on_each_cpu (snp_enable , NULL , 1 );
265391
266392skip_enable :
267- rmptable = (struct rmpentry_raw * )rmptable_start ;
268- rmptable_max_pfn = rmptable_size / sizeof (struct rmpentry_raw ) - 1 ;
269-
270393 cpuhp_setup_state (CPUHP_AP_ONLINE_DYN , "x86/rmptable_init:online" , __snp_enable , NULL );
271394
272395 /*
@@ -287,6 +410,13 @@ static int __init snp_rmptable_init(void)
287410 */
288411device_initcall (snp_rmptable_init );
289412
413+ static void set_rmp_segment_info (unsigned int segment_shift )
414+ {
415+ rmp_segment_shift = segment_shift ;
416+ rmp_segment_size = 1ULL << rmp_segment_shift ;
417+ rmp_segment_mask = rmp_segment_size - 1 ;
418+ }
419+
290420#define RMP_ADDR_MASK GENMASK_ULL(51, 13)
291421
292422bool snp_probe_rmptable_info (void )
@@ -308,6 +438,11 @@ bool snp_probe_rmptable_info(void)
308438
309439 rmp_sz = rmp_end - rmp_base + 1 ;
310440
441+ /* Treat the contiguous RMP table as a single segment */
442+ rst_max_index = 1 ;
443+
444+ set_rmp_segment_info (RMPTABLE_NON_SEGMENTED_SHIFT );
445+
311446 probed_rmp_base = rmp_base ;
312447 probed_rmp_size = rmp_sz ;
313448
@@ -317,15 +452,41 @@ bool snp_probe_rmptable_info(void)
317452 return true;
318453}
319454
455+ /*
456+ * About the array_index_nospec() usage below:
457+ *
458+ * This function can get called by exported functions like
459+ * snp_lookup_rmpentry(), which is used by the KVM #PF handler, among
460+ * others, and since the @pfn passed in cannot always be trusted,
461+ * speculation should be stopped as a protective measure.
462+ */
320463static struct rmpentry_raw * get_raw_rmpentry (u64 pfn )
321464{
322- if (!rmptable )
465+ u64 paddr , rst_index , segment_index ;
466+ struct rmp_segment_desc * desc ;
467+
468+ if (!rmp_segment_table )
323469 return ERR_PTR (- ENODEV );
324470
325- if (unlikely (pfn > rmptable_max_pfn ))
471+ paddr = pfn << PAGE_SHIFT ;
472+
473+ rst_index = RST_ENTRY_INDEX (paddr );
474+ if (unlikely (rst_index >= rst_max_index ))
326475 return ERR_PTR (- EFAULT );
327476
328- return rmptable + pfn ;
477+ rst_index = array_index_nospec (rst_index , rst_max_index );
478+
479+ desc = rmp_segment_table [rst_index ];
480+ if (unlikely (!desc ))
481+ return ERR_PTR (- EFAULT );
482+
483+ segment_index = RMP_ENTRY_INDEX (paddr );
484+ if (unlikely (segment_index >= desc -> max_index ))
485+ return ERR_PTR (- EFAULT );
486+
487+ segment_index = array_index_nospec (segment_index , desc -> max_index );
488+
489+ return desc -> rmp_entry + segment_index ;
329490}
330491
331492static int get_rmpentry (u64 pfn , struct rmpentry * e )
0 commit comments