@@ -160,8 +160,6 @@ static inline void wbinvd(void)
160
160
PVOP_VCALL0 (cpu .wbinvd );
161
161
}
162
162
163
- #define get_kernel_rpl () (pv_info.kernel_rpl)
164
-
165
163
static inline u64 paravirt_read_msr (unsigned msr )
166
164
{
167
165
return PVOP_CALL1 (u64 , cpu .read_msr , msr );
@@ -277,12 +275,10 @@ static inline void load_TLS(struct thread_struct *t, unsigned cpu)
277
275
PVOP_VCALL2 (cpu .load_tls , t , cpu );
278
276
}
279
277
280
- #ifdef CONFIG_X86_64
281
278
static inline void load_gs_index (unsigned int gs )
282
279
{
283
280
PVOP_VCALL1 (cpu .load_gs_index , gs );
284
281
}
285
- #endif
286
282
287
283
static inline void write_ldt_entry (struct desc_struct * dt , int entry ,
288
284
const void * desc )
@@ -375,52 +371,22 @@ static inline void paravirt_release_p4d(unsigned long pfn)
375
371
376
372
static inline pte_t __pte (pteval_t val )
377
373
{
378
- pteval_t ret ;
379
-
380
- if (sizeof (pteval_t ) > sizeof (long ))
381
- ret = PVOP_CALLEE2 (pteval_t , mmu .make_pte , val , (u64 )val >> 32 );
382
- else
383
- ret = PVOP_CALLEE1 (pteval_t , mmu .make_pte , val );
384
-
385
- return (pte_t ) { .pte = ret };
374
+ return (pte_t ) { PVOP_CALLEE1 (pteval_t , mmu .make_pte , val ) };
386
375
}
387
376
388
377
static inline pteval_t pte_val (pte_t pte )
389
378
{
390
- pteval_t ret ;
391
-
392
- if (sizeof (pteval_t ) > sizeof (long ))
393
- ret = PVOP_CALLEE2 (pteval_t , mmu .pte_val ,
394
- pte .pte , (u64 )pte .pte >> 32 );
395
- else
396
- ret = PVOP_CALLEE1 (pteval_t , mmu .pte_val , pte .pte );
397
-
398
- return ret ;
379
+ return PVOP_CALLEE1 (pteval_t , mmu .pte_val , pte .pte );
399
380
}
400
381
401
382
static inline pgd_t __pgd (pgdval_t val )
402
383
{
403
- pgdval_t ret ;
404
-
405
- if (sizeof (pgdval_t ) > sizeof (long ))
406
- ret = PVOP_CALLEE2 (pgdval_t , mmu .make_pgd , val , (u64 )val >> 32 );
407
- else
408
- ret = PVOP_CALLEE1 (pgdval_t , mmu .make_pgd , val );
409
-
410
- return (pgd_t ) { ret };
384
+ return (pgd_t ) { PVOP_CALLEE1 (pgdval_t , mmu .make_pgd , val ) };
411
385
}
412
386
413
387
static inline pgdval_t pgd_val (pgd_t pgd )
414
388
{
415
- pgdval_t ret ;
416
-
417
- if (sizeof (pgdval_t ) > sizeof (long ))
418
- ret = PVOP_CALLEE2 (pgdval_t , mmu .pgd_val ,
419
- pgd .pgd , (u64 )pgd .pgd >> 32 );
420
- else
421
- ret = PVOP_CALLEE1 (pgdval_t , mmu .pgd_val , pgd .pgd );
422
-
423
- return ret ;
389
+ return PVOP_CALLEE1 (pgdval_t , mmu .pgd_val , pgd .pgd );
424
390
}
425
391
426
392
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
@@ -438,78 +404,34 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned
438
404
pte_t * ptep , pte_t old_pte , pte_t pte )
439
405
{
440
406
441
- if (sizeof (pteval_t ) > sizeof (long ))
442
- /* 5 arg words */
443
- pv_ops .mmu .ptep_modify_prot_commit (vma , addr , ptep , pte );
444
- else
445
- PVOP_VCALL4 (mmu .ptep_modify_prot_commit ,
446
- vma , addr , ptep , pte .pte );
407
+ PVOP_VCALL4 (mmu .ptep_modify_prot_commit , vma , addr , ptep , pte .pte );
447
408
}
448
409
449
410
static inline void set_pte (pte_t * ptep , pte_t pte )
450
411
{
451
- if (sizeof (pteval_t ) > sizeof (long ))
452
- PVOP_VCALL3 (mmu .set_pte , ptep , pte .pte , (u64 )pte .pte >> 32 );
453
- else
454
- PVOP_VCALL2 (mmu .set_pte , ptep , pte .pte );
455
- }
456
-
457
- static inline void set_pte_at (struct mm_struct * mm , unsigned long addr ,
458
- pte_t * ptep , pte_t pte )
459
- {
460
- if (sizeof (pteval_t ) > sizeof (long ))
461
- /* 5 arg words */
462
- pv_ops .mmu .set_pte_at (mm , addr , ptep , pte );
463
- else
464
- PVOP_VCALL4 (mmu .set_pte_at , mm , addr , ptep , pte .pte );
412
+ PVOP_VCALL2 (mmu .set_pte , ptep , pte .pte );
465
413
}
466
414
467
415
static inline void set_pmd (pmd_t * pmdp , pmd_t pmd )
468
416
{
469
- pmdval_t val = native_pmd_val (pmd );
470
-
471
- if (sizeof (pmdval_t ) > sizeof (long ))
472
- PVOP_VCALL3 (mmu .set_pmd , pmdp , val , (u64 )val >> 32 );
473
- else
474
- PVOP_VCALL2 (mmu .set_pmd , pmdp , val );
417
+ PVOP_VCALL2 (mmu .set_pmd , pmdp , native_pmd_val (pmd ));
475
418
}
476
419
477
- #if CONFIG_PGTABLE_LEVELS >= 3
478
420
static inline pmd_t __pmd (pmdval_t val )
479
421
{
480
- pmdval_t ret ;
481
-
482
- if (sizeof (pmdval_t ) > sizeof (long ))
483
- ret = PVOP_CALLEE2 (pmdval_t , mmu .make_pmd , val , (u64 )val >> 32 );
484
- else
485
- ret = PVOP_CALLEE1 (pmdval_t , mmu .make_pmd , val );
486
-
487
- return (pmd_t ) { ret };
422
+ return (pmd_t ) { PVOP_CALLEE1 (pmdval_t , mmu .make_pmd , val ) };
488
423
}
489
424
490
425
static inline pmdval_t pmd_val (pmd_t pmd )
491
426
{
492
- pmdval_t ret ;
493
-
494
- if (sizeof (pmdval_t ) > sizeof (long ))
495
- ret = PVOP_CALLEE2 (pmdval_t , mmu .pmd_val ,
496
- pmd .pmd , (u64 )pmd .pmd >> 32 );
497
- else
498
- ret = PVOP_CALLEE1 (pmdval_t , mmu .pmd_val , pmd .pmd );
499
-
500
- return ret ;
427
+ return PVOP_CALLEE1 (pmdval_t , mmu .pmd_val , pmd .pmd );
501
428
}
502
429
503
430
static inline void set_pud (pud_t * pudp , pud_t pud )
504
431
{
505
- pudval_t val = native_pud_val (pud );
506
-
507
- if (sizeof (pudval_t ) > sizeof (long ))
508
- PVOP_VCALL3 (mmu .set_pud , pudp , val , (u64 )val >> 32 );
509
- else
510
- PVOP_VCALL2 (mmu .set_pud , pudp , val );
432
+ PVOP_VCALL2 (mmu .set_pud , pudp , native_pud_val (pud ));
511
433
}
512
- #if CONFIG_PGTABLE_LEVELS >= 4
434
+
513
435
static inline pud_t __pud (pudval_t val )
514
436
{
515
437
pudval_t ret ;
@@ -526,7 +448,7 @@ static inline pudval_t pud_val(pud_t pud)
526
448
527
449
static inline void pud_clear (pud_t * pudp )
528
450
{
529
- set_pud (pudp , __pud (0 ));
451
+ set_pud (pudp , native_make_pud (0 ));
530
452
}
531
453
532
454
static inline void set_p4d (p4d_t * p4dp , p4d_t p4d )
@@ -563,40 +485,17 @@ static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
563
485
} while (0)
564
486
565
487
#define pgd_clear (pgdp ) do { \
566
- if (pgtable_l5_enabled()) \
567
- set_pgd(pgdp, __pgd (0)); \
488
+ if (pgtable_l5_enabled()) \
489
+ set_pgd(pgdp, native_make_pgd (0)); \
568
490
} while (0)
569
491
570
492
#endif /* CONFIG_PGTABLE_LEVELS == 5 */
571
493
572
494
static inline void p4d_clear (p4d_t * p4dp )
573
495
{
574
- set_p4d (p4dp , __p4d (0 ));
496
+ set_p4d (p4dp , native_make_p4d (0 ));
575
497
}
576
498
577
- #endif /* CONFIG_PGTABLE_LEVELS == 4 */
578
-
579
- #endif /* CONFIG_PGTABLE_LEVELS >= 3 */
580
-
581
- #ifdef CONFIG_X86_PAE
582
- /* Special-case pte-setting operations for PAE, which can't update a
583
- 64-bit pte atomically */
584
- static inline void set_pte_atomic (pte_t * ptep , pte_t pte )
585
- {
586
- PVOP_VCALL3 (mmu .set_pte_atomic , ptep , pte .pte , pte .pte >> 32 );
587
- }
588
-
589
- static inline void pte_clear (struct mm_struct * mm , unsigned long addr ,
590
- pte_t * ptep )
591
- {
592
- PVOP_VCALL3 (mmu .pte_clear , mm , addr , ptep );
593
- }
594
-
595
- static inline void pmd_clear (pmd_t * pmdp )
596
- {
597
- PVOP_VCALL1 (mmu .pmd_clear , pmdp );
598
- }
599
- #else /* !CONFIG_X86_PAE */
600
499
static inline void set_pte_atomic (pte_t * ptep , pte_t pte )
601
500
{
602
501
set_pte (ptep , pte );
@@ -605,14 +504,13 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
605
504
static inline void pte_clear (struct mm_struct * mm , unsigned long addr ,
606
505
pte_t * ptep )
607
506
{
608
- set_pte_at ( mm , addr , ptep , __pte (0 ));
507
+ set_pte ( ptep , native_make_pte (0 ));
609
508
}
610
509
611
510
static inline void pmd_clear (pmd_t * pmdp )
612
511
{
613
- set_pmd (pmdp , __pmd (0 ));
512
+ set_pmd (pmdp , native_make_pmd (0 ));
614
513
}
615
- #endif /* CONFIG_X86_PAE */
616
514
617
515
#define __HAVE_ARCH_START_CONTEXT_SWITCH
618
516
static inline void arch_start_context_switch (struct task_struct * prev )
@@ -682,16 +580,9 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
682
580
#endif /* SMP && PARAVIRT_SPINLOCKS */
683
581
684
582
#ifdef CONFIG_X86_32
685
- #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
686
- #define PV_RESTORE_REGS "popl %edx; popl %ecx;"
687
-
688
583
/* save and restore all caller-save registers, except return value */
689
584
#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
690
585
#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
691
-
692
- #define PV_FLAGS_ARG "0"
693
- #define PV_EXTRA_CLOBBERS
694
- #define PV_VEXTRA_CLOBBERS
695
586
#else
696
587
/* save and restore all caller-save registers, except return value */
697
588
#define PV_SAVE_ALL_CALLER_REGS \
@@ -712,14 +603,6 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
712
603
"pop %rsi;" \
713
604
"pop %rdx;" \
714
605
"pop %rcx;"
715
-
716
- /* We save some registers, but all of them, that's too much. We clobber all
717
- * caller saved registers but the argument parameter */
718
- #define PV_SAVE_REGS "pushq %%rdi;"
719
- #define PV_RESTORE_REGS "popq %%rdi;"
720
- #define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
721
- #define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
722
- #define PV_FLAGS_ARG "D"
723
606
#endif
724
607
725
608
/*
0 commit comments