18
18
#include <linux/cpumask.h>
19
19
#include <linux/iommu.h>
20
20
#include <linux/amd-iommu.h>
21
+ #include <linux/nospec.h>
21
22
22
23
#include <asm/sev.h>
23
24
#include <asm/processor.h>
@@ -77,12 +78,42 @@ struct rmpentry_raw {
77
78
*/
78
79
#define RMPTABLE_CPU_BOOKKEEPING_SZ 0x4000
79
80
81
+ /*
82
+ * For a non-segmented RMP table, use the maximum physical addressing as the
83
+ * segment size in order to always arrive at index 0 in the table.
84
+ */
85
+ #define RMPTABLE_NON_SEGMENTED_SHIFT 52
86
+
87
+ struct rmp_segment_desc {
88
+ struct rmpentry_raw * rmp_entry ;
89
+ u64 max_index ;
90
+ u64 size ;
91
+ };
92
+
93
+ /*
94
+ * Segmented RMP Table support.
95
+ * - The segment size is used for two purposes:
96
+ * - Identify the amount of memory covered by an RMP segment
97
+ * - Quickly locate an RMP segment table entry for a physical address
98
+ *
99
+ * - The RMP segment table contains pointers to an RMP table that covers
100
+ * a specific portion of memory. There can be up to 512 8-byte entries,
101
+ * one pages worth.
102
+ */
103
+ static struct rmp_segment_desc * * rmp_segment_table __ro_after_init ;
104
+ static unsigned int rst_max_index __ro_after_init = 512 ;
105
+
106
+ static unsigned int rmp_segment_shift ;
107
+ static u64 rmp_segment_size ;
108
+ static u64 rmp_segment_mask ;
109
+
110
+ #define RST_ENTRY_INDEX (x ) ((x) >> rmp_segment_shift)
111
+ #define RMP_ENTRY_INDEX (x ) ((u64)(PHYS_PFN((x) & rmp_segment_mask)))
112
+
80
113
/* Mask to apply to a PFN to get the first PFN of a 2MB page */
81
114
#define PFN_PMD_MASK GENMASK_ULL(63, PMD_SHIFT - PAGE_SHIFT)
82
115
83
116
static u64 probed_rmp_base , probed_rmp_size ;
84
- static struct rmpentry_raw * rmptable __ro_after_init ;
85
- static u64 rmptable_max_pfn __ro_after_init ;
86
117
87
118
static LIST_HEAD (snp_leaked_pages_list );
88
119
static DEFINE_SPINLOCK (snp_leaked_pages_list_lock );
@@ -190,15 +221,101 @@ static bool __init clear_rmptable_bookkeeping(void)
190
221
return true;
191
222
}
192
223
224
+ static bool __init alloc_rmp_segment_desc (u64 segment_pa , u64 segment_size , u64 pa )
225
+ {
226
+ u64 rst_index , rmp_segment_size_max ;
227
+ struct rmp_segment_desc * desc ;
228
+ void * rmp_segment ;
229
+
230
+ /* Calculate the maximum size an RMP can be (16 bytes/page mapped) */
231
+ rmp_segment_size_max = PHYS_PFN (rmp_segment_size ) << 4 ;
232
+
233
+ /* Validate the RMP segment size */
234
+ if (segment_size > rmp_segment_size_max ) {
235
+ pr_err ("Invalid RMP size 0x%llx for configured segment size 0x%llx\n" ,
236
+ segment_size , rmp_segment_size_max );
237
+ return false;
238
+ }
239
+
240
+ /* Validate the RMP segment table index */
241
+ rst_index = RST_ENTRY_INDEX (pa );
242
+ if (rst_index >= rst_max_index ) {
243
+ pr_err ("Invalid RMP segment base address 0x%llx for configured segment size 0x%llx\n" ,
244
+ pa , rmp_segment_size );
245
+ return false;
246
+ }
247
+
248
+ if (rmp_segment_table [rst_index ]) {
249
+ pr_err ("RMP segment descriptor already exists at index %llu\n" , rst_index );
250
+ return false;
251
+ }
252
+
253
+ rmp_segment = memremap (segment_pa , segment_size , MEMREMAP_WB );
254
+ if (!rmp_segment ) {
255
+ pr_err ("Failed to map RMP segment addr 0x%llx size 0x%llx\n" ,
256
+ segment_pa , segment_size );
257
+ return false;
258
+ }
259
+
260
+ desc = kzalloc (sizeof (* desc ), GFP_KERNEL );
261
+ if (!desc ) {
262
+ memunmap (rmp_segment );
263
+ return false;
264
+ }
265
+
266
+ desc -> rmp_entry = rmp_segment ;
267
+ desc -> max_index = segment_size / sizeof (* desc -> rmp_entry );
268
+ desc -> size = segment_size ;
269
+
270
+ rmp_segment_table [rst_index ] = desc ;
271
+
272
+ return true;
273
+ }
274
+
275
+ static void __init free_rmp_segment_table (void )
276
+ {
277
+ unsigned int i ;
278
+
279
+ for (i = 0 ; i < rst_max_index ; i ++ ) {
280
+ struct rmp_segment_desc * desc ;
281
+
282
+ desc = rmp_segment_table [i ];
283
+ if (!desc )
284
+ continue ;
285
+
286
+ memunmap (desc -> rmp_entry );
287
+
288
+ kfree (desc );
289
+ }
290
+
291
+ free_page ((unsigned long )rmp_segment_table );
292
+
293
+ rmp_segment_table = NULL ;
294
+ }
295
+
296
+ /* Allocate the table used to index into the RMP segments */
297
+ static bool __init alloc_rmp_segment_table (void )
298
+ {
299
+ struct page * page ;
300
+
301
+ page = alloc_page (__GFP_ZERO );
302
+ if (!page )
303
+ return false;
304
+
305
+ rmp_segment_table = page_address (page );
306
+
307
+ return true;
308
+ }
309
+
193
310
/*
194
311
* Do the necessary preparations which are verified by the firmware as
195
312
* described in the SNP_INIT_EX firmware command description in the SNP
196
313
* firmware ABI spec.
197
314
*/
198
315
static int __init snp_rmptable_init (void )
199
316
{
200
- u64 max_rmp_pfn , calc_rmp_sz , rmptable_size , rmp_end , val ;
201
- void * rmptable_start ;
317
+ u64 max_rmp_pfn , calc_rmp_sz , rmptable_segment , rmptable_size , rmp_end , val ;
318
+ unsigned int i ;
202
319
203
320
if (!cc_platform_has (CC_ATTR_HOST_SEV_SNP ))
204
321
return 0 ;
@@ -227,17 +344,18 @@ static int __init snp_rmptable_init(void)
227
344
goto nosnp ;
228
345
}
229
346
347
+ if (!alloc_rmp_segment_table ())
348
+ goto nosnp ;
349
+
230
350
/* Map only the RMP entries */
231
- rmptable_start = memremap ( probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ ,
232
- probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ ,
233
- MEMREMAP_WB );
234
- if (!rmptable_start ) {
235
- pr_err ( "Failed to map RMP table\n" );
351
+ rmptable_segment = probed_rmp_base + RMPTABLE_CPU_BOOKKEEPING_SZ ;
352
+ rmptable_size = probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ ;
353
+
354
+ if (!alloc_rmp_segment_desc ( rmptable_segment , rmptable_size , 0 ) ) {
355
+ free_rmp_segment_table ( );
236
356
goto nosnp ;
237
357
}
238
358
239
- rmptable_size = probed_rmp_size - RMPTABLE_CPU_BOOKKEEPING_SZ ;
240
-
241
359
/*
242
360
* Check if SEV-SNP is already enabled, this can happen in case of
243
361
* kexec boot.
@@ -248,12 +366,20 @@ static int __init snp_rmptable_init(void)
248
366
249
367
/* Zero out the RMP bookkeeping area */
250
368
if (!clear_rmptable_bookkeeping ()) {
251
- memunmap ( rmptable_start );
369
+ free_rmp_segment_table ( );
252
370
goto nosnp ;
253
371
}
254
372
255
373
/* Zero out the RMP entries */
256
- memset (rmptable_start , 0 , rmptable_size );
374
+ for (i = 0 ; i < rst_max_index ; i ++ ) {
375
+ struct rmp_segment_desc * desc ;
376
+
377
+ desc = rmp_segment_table [i ];
378
+ if (!desc )
379
+ continue ;
380
+
381
+ memset (desc -> rmp_entry , 0 , desc -> size );
382
+ }
257
383
258
384
/* Flush the caches to ensure that data is written before SNP is enabled. */
259
385
wbinvd_on_all_cpus ();
@@ -264,9 +390,6 @@ static int __init snp_rmptable_init(void)
264
390
on_each_cpu (snp_enable , NULL , 1 );
265
391
266
392
skip_enable :
267
- rmptable = (struct rmpentry_raw * )rmptable_start ;
268
- rmptable_max_pfn = rmptable_size / sizeof (struct rmpentry_raw ) - 1 ;
269
-
270
393
cpuhp_setup_state (CPUHP_AP_ONLINE_DYN , "x86/rmptable_init:online" , __snp_enable , NULL );
271
394
272
395
/*
@@ -287,6 +410,13 @@ static int __init snp_rmptable_init(void)
287
410
*/
288
411
device_initcall (snp_rmptable_init );
289
412
413
+ static void set_rmp_segment_info (unsigned int segment_shift )
414
+ {
415
+ rmp_segment_shift = segment_shift ;
416
+ rmp_segment_size = 1ULL << rmp_segment_shift ;
417
+ rmp_segment_mask = rmp_segment_size - 1 ;
418
+ }
419
+
290
420
#define RMP_ADDR_MASK GENMASK_ULL(51, 13)
291
421
292
422
bool snp_probe_rmptable_info (void )
@@ -308,6 +438,11 @@ bool snp_probe_rmptable_info(void)
308
438
309
439
rmp_sz = rmp_end - rmp_base + 1 ;
310
440
441
+ /* Treat the contiguous RMP table as a single segment */
442
+ rst_max_index = 1 ;
443
+
444
+ set_rmp_segment_info (RMPTABLE_NON_SEGMENTED_SHIFT );
445
+
311
446
probed_rmp_base = rmp_base ;
312
447
probed_rmp_size = rmp_sz ;
313
448
@@ -317,15 +452,41 @@ bool snp_probe_rmptable_info(void)
317
452
return true;
318
453
}
319
454
455
+ /*
456
+ * About the array_index_nospec() usage below:
457
+ *
458
+ * This function can get called by exported functions like
459
+ * snp_lookup_rmpentry(), which is used by the KVM #PF handler, among
460
+ * others, and since the @pfn passed in cannot always be trusted,
461
+ * speculation should be stopped as a protective measure.
462
+ */
320
463
static struct rmpentry_raw * get_raw_rmpentry (u64 pfn )
321
464
{
322
- if (!rmptable )
465
+ u64 paddr , rst_index , segment_index ;
466
+ struct rmp_segment_desc * desc ;
467
+
468
+ if (!rmp_segment_table )
323
469
return ERR_PTR (- ENODEV );
324
470
325
- if (unlikely (pfn > rmptable_max_pfn ))
471
+ paddr = pfn << PAGE_SHIFT ;
472
+
473
+ rst_index = RST_ENTRY_INDEX (paddr );
474
+ if (unlikely (rst_index >= rst_max_index ))
326
475
return ERR_PTR (- EFAULT );
327
476
328
- return rmptable + pfn ;
477
+ rst_index = array_index_nospec (rst_index , rst_max_index );
478
+
479
+ desc = rmp_segment_table [rst_index ];
480
+ if (unlikely (!desc ))
481
+ return ERR_PTR (- EFAULT );
482
+
483
+ segment_index = RMP_ENTRY_INDEX (paddr );
484
+ if (unlikely (segment_index >= desc -> max_index ))
485
+ return ERR_PTR (- EFAULT );
486
+
487
+ segment_index = array_index_nospec (segment_index , desc -> max_index );
488
+
489
+ return desc -> rmp_entry + segment_index ;
329
490
}
330
491
331
492
static int get_rmpentry (u64 pfn , struct rmpentry * e )
0 commit comments