Skip to content

Commit ee4a925

Browse files
committed
Merge tag 'x86-paravirt-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 paravirt cleanup from Ingo Molnar: "Clean up the paravirt code after the removal of 32-bit Xen PV support" * tag 'x86-paravirt-2020-10-12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86/paravirt: Avoid needless paravirt step clearing page table entries x86/paravirt: Remove set_pte_at() pv-op x86/entry/32: Simplify CONFIG_XEN_PV build dependency x86/paravirt: Use CONFIG_PARAVIRT_XXL instead of CONFIG_PARAVIRT x86/paravirt: Clean up paravirt macros x86/paravirt: Remove 32-bit support from CONFIG_PARAVIRT_XXL
2 parents ad884ff + 7c9f80c commit ee4a925

File tree

18 files changed

+27
-256
lines changed

18 files changed

+27
-256
lines changed

arch/x86/entry/entry_64.S

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -46,13 +46,13 @@
4646
.code64
4747
.section .entry.text, "ax"
4848

49-
#ifdef CONFIG_PARAVIRT
49+
#ifdef CONFIG_PARAVIRT_XXL
5050
SYM_CODE_START(native_usergs_sysret64)
5151
UNWIND_HINT_EMPTY
5252
swapgs
5353
sysretq
5454
SYM_CODE_END(native_usergs_sysret64)
55-
#endif /* CONFIG_PARAVIRT */
55+
#endif /* CONFIG_PARAVIRT_XXL */
5656

5757
/*
5858
* 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.

arch/x86/entry/vdso/vdso32/vclock_gettime.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#undef CONFIG_ILLEGAL_POINTER_VALUE
1515
#undef CONFIG_SPARSEMEM_VMEMMAP
1616
#undef CONFIG_NR_CPUS
17+
#undef CONFIG_PARAVIRT_XXL
1718

1819
#define CONFIG_X86_32 1
1920
#define CONFIG_PGTABLE_LEVELS 2

arch/x86/include/asm/fixmap.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ enum fixed_addresses {
9999
FIX_PCIE_MCFG,
100100
#endif
101101
#endif
102-
#ifdef CONFIG_PARAVIRT
102+
#ifdef CONFIG_PARAVIRT_XXL
103103
FIX_PARAVIRT_BOOTMAP,
104104
#endif
105105
#ifdef CONFIG_X86_INTEL_MID

arch/x86/include/asm/idtentry.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -547,7 +547,7 @@ DECLARE_IDTENTRY_RAW(X86_TRAP_MC, exc_machine_check);
547547

548548
/* NMI */
549549
DECLARE_IDTENTRY_NMI(X86_TRAP_NMI, exc_nmi);
550-
#if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64)
550+
#ifdef CONFIG_XEN_PV
551551
DECLARE_IDTENTRY_RAW(X86_TRAP_NMI, xenpv_exc_nmi);
552552
#endif
553553

@@ -557,7 +557,7 @@ DECLARE_IDTENTRY_DEBUG(X86_TRAP_DB, exc_debug);
557557
#else
558558
DECLARE_IDTENTRY_RAW(X86_TRAP_DB, exc_debug);
559559
#endif
560-
#if defined(CONFIG_XEN_PV) && defined(CONFIG_X86_64)
560+
#ifdef CONFIG_XEN_PV
561561
DECLARE_IDTENTRY_RAW(X86_TRAP_DB, xenpv_exc_debug);
562562
#endif
563563

arch/x86/include/asm/paravirt.h

Lines changed: 17 additions & 134 deletions
Original file line numberDiff line numberDiff line change
@@ -160,8 +160,6 @@ static inline void wbinvd(void)
160160
PVOP_VCALL0(cpu.wbinvd);
161161
}
162162

163-
#define get_kernel_rpl() (pv_info.kernel_rpl)
164-
165163
static inline u64 paravirt_read_msr(unsigned msr)
166164
{
167165
return PVOP_CALL1(u64, cpu.read_msr, msr);
@@ -277,12 +275,10 @@ static inline void load_TLS(struct thread_struct *t, unsigned cpu)
277275
PVOP_VCALL2(cpu.load_tls, t, cpu);
278276
}
279277

280-
#ifdef CONFIG_X86_64
281278
static inline void load_gs_index(unsigned int gs)
282279
{
283280
PVOP_VCALL1(cpu.load_gs_index, gs);
284281
}
285-
#endif
286282

287283
static inline void write_ldt_entry(struct desc_struct *dt, int entry,
288284
const void *desc)
@@ -375,52 +371,22 @@ static inline void paravirt_release_p4d(unsigned long pfn)
375371

376372
static inline pte_t __pte(pteval_t val)
377373
{
378-
pteval_t ret;
379-
380-
if (sizeof(pteval_t) > sizeof(long))
381-
ret = PVOP_CALLEE2(pteval_t, mmu.make_pte, val, (u64)val >> 32);
382-
else
383-
ret = PVOP_CALLEE1(pteval_t, mmu.make_pte, val);
384-
385-
return (pte_t) { .pte = ret };
374+
return (pte_t) { PVOP_CALLEE1(pteval_t, mmu.make_pte, val) };
386375
}
387376

388377
static inline pteval_t pte_val(pte_t pte)
389378
{
390-
pteval_t ret;
391-
392-
if (sizeof(pteval_t) > sizeof(long))
393-
ret = PVOP_CALLEE2(pteval_t, mmu.pte_val,
394-
pte.pte, (u64)pte.pte >> 32);
395-
else
396-
ret = PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
397-
398-
return ret;
379+
return PVOP_CALLEE1(pteval_t, mmu.pte_val, pte.pte);
399380
}
400381

401382
static inline pgd_t __pgd(pgdval_t val)
402383
{
403-
pgdval_t ret;
404-
405-
if (sizeof(pgdval_t) > sizeof(long))
406-
ret = PVOP_CALLEE2(pgdval_t, mmu.make_pgd, val, (u64)val >> 32);
407-
else
408-
ret = PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val);
409-
410-
return (pgd_t) { ret };
384+
return (pgd_t) { PVOP_CALLEE1(pgdval_t, mmu.make_pgd, val) };
411385
}
412386

413387
static inline pgdval_t pgd_val(pgd_t pgd)
414388
{
415-
pgdval_t ret;
416-
417-
if (sizeof(pgdval_t) > sizeof(long))
418-
ret = PVOP_CALLEE2(pgdval_t, mmu.pgd_val,
419-
pgd.pgd, (u64)pgd.pgd >> 32);
420-
else
421-
ret = PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
422-
423-
return ret;
389+
return PVOP_CALLEE1(pgdval_t, mmu.pgd_val, pgd.pgd);
424390
}
425391

426392
#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
@@ -438,78 +404,34 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned
438404
pte_t *ptep, pte_t old_pte, pte_t pte)
439405
{
440406

441-
if (sizeof(pteval_t) > sizeof(long))
442-
/* 5 arg words */
443-
pv_ops.mmu.ptep_modify_prot_commit(vma, addr, ptep, pte);
444-
else
445-
PVOP_VCALL4(mmu.ptep_modify_prot_commit,
446-
vma, addr, ptep, pte.pte);
407+
PVOP_VCALL4(mmu.ptep_modify_prot_commit, vma, addr, ptep, pte.pte);
447408
}
448409

449410
static inline void set_pte(pte_t *ptep, pte_t pte)
450411
{
451-
if (sizeof(pteval_t) > sizeof(long))
452-
PVOP_VCALL3(mmu.set_pte, ptep, pte.pte, (u64)pte.pte >> 32);
453-
else
454-
PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
455-
}
456-
457-
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
458-
pte_t *ptep, pte_t pte)
459-
{
460-
if (sizeof(pteval_t) > sizeof(long))
461-
/* 5 arg words */
462-
pv_ops.mmu.set_pte_at(mm, addr, ptep, pte);
463-
else
464-
PVOP_VCALL4(mmu.set_pte_at, mm, addr, ptep, pte.pte);
412+
PVOP_VCALL2(mmu.set_pte, ptep, pte.pte);
465413
}
466414

467415
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
468416
{
469-
pmdval_t val = native_pmd_val(pmd);
470-
471-
if (sizeof(pmdval_t) > sizeof(long))
472-
PVOP_VCALL3(mmu.set_pmd, pmdp, val, (u64)val >> 32);
473-
else
474-
PVOP_VCALL2(mmu.set_pmd, pmdp, val);
417+
PVOP_VCALL2(mmu.set_pmd, pmdp, native_pmd_val(pmd));
475418
}
476419

477-
#if CONFIG_PGTABLE_LEVELS >= 3
478420
static inline pmd_t __pmd(pmdval_t val)
479421
{
480-
pmdval_t ret;
481-
482-
if (sizeof(pmdval_t) > sizeof(long))
483-
ret = PVOP_CALLEE2(pmdval_t, mmu.make_pmd, val, (u64)val >> 32);
484-
else
485-
ret = PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val);
486-
487-
return (pmd_t) { ret };
422+
return (pmd_t) { PVOP_CALLEE1(pmdval_t, mmu.make_pmd, val) };
488423
}
489424

490425
static inline pmdval_t pmd_val(pmd_t pmd)
491426
{
492-
pmdval_t ret;
493-
494-
if (sizeof(pmdval_t) > sizeof(long))
495-
ret = PVOP_CALLEE2(pmdval_t, mmu.pmd_val,
496-
pmd.pmd, (u64)pmd.pmd >> 32);
497-
else
498-
ret = PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
499-
500-
return ret;
427+
return PVOP_CALLEE1(pmdval_t, mmu.pmd_val, pmd.pmd);
501428
}
502429

503430
static inline void set_pud(pud_t *pudp, pud_t pud)
504431
{
505-
pudval_t val = native_pud_val(pud);
506-
507-
if (sizeof(pudval_t) > sizeof(long))
508-
PVOP_VCALL3(mmu.set_pud, pudp, val, (u64)val >> 32);
509-
else
510-
PVOP_VCALL2(mmu.set_pud, pudp, val);
432+
PVOP_VCALL2(mmu.set_pud, pudp, native_pud_val(pud));
511433
}
512-
#if CONFIG_PGTABLE_LEVELS >= 4
434+
513435
static inline pud_t __pud(pudval_t val)
514436
{
515437
pudval_t ret;
@@ -526,7 +448,7 @@ static inline pudval_t pud_val(pud_t pud)
526448

527449
static inline void pud_clear(pud_t *pudp)
528450
{
529-
set_pud(pudp, __pud(0));
451+
set_pud(pudp, native_make_pud(0));
530452
}
531453

532454
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
@@ -563,40 +485,17 @@ static inline void __set_pgd(pgd_t *pgdp, pgd_t pgd)
563485
} while (0)
564486

565487
#define pgd_clear(pgdp) do { \
566-
if (pgtable_l5_enabled()) \
567-
set_pgd(pgdp, __pgd(0)); \
488+
if (pgtable_l5_enabled()) \
489+
set_pgd(pgdp, native_make_pgd(0)); \
568490
} while (0)
569491

570492
#endif /* CONFIG_PGTABLE_LEVELS == 5 */
571493

572494
static inline void p4d_clear(p4d_t *p4dp)
573495
{
574-
set_p4d(p4dp, __p4d(0));
496+
set_p4d(p4dp, native_make_p4d(0));
575497
}
576498

577-
#endif /* CONFIG_PGTABLE_LEVELS == 4 */
578-
579-
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
580-
581-
#ifdef CONFIG_X86_PAE
582-
/* Special-case pte-setting operations for PAE, which can't update a
583-
64-bit pte atomically */
584-
static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
585-
{
586-
PVOP_VCALL3(mmu.set_pte_atomic, ptep, pte.pte, pte.pte >> 32);
587-
}
588-
589-
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
590-
pte_t *ptep)
591-
{
592-
PVOP_VCALL3(mmu.pte_clear, mm, addr, ptep);
593-
}
594-
595-
static inline void pmd_clear(pmd_t *pmdp)
596-
{
597-
PVOP_VCALL1(mmu.pmd_clear, pmdp);
598-
}
599-
#else /* !CONFIG_X86_PAE */
600499
static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
601500
{
602501
set_pte(ptep, pte);
@@ -605,14 +504,13 @@ static inline void set_pte_atomic(pte_t *ptep, pte_t pte)
605504
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
606505
pte_t *ptep)
607506
{
608-
set_pte_at(mm, addr, ptep, __pte(0));
507+
set_pte(ptep, native_make_pte(0));
609508
}
610509

611510
static inline void pmd_clear(pmd_t *pmdp)
612511
{
613-
set_pmd(pmdp, __pmd(0));
512+
set_pmd(pmdp, native_make_pmd(0));
614513
}
615-
#endif /* CONFIG_X86_PAE */
616514

617515
#define __HAVE_ARCH_START_CONTEXT_SWITCH
618516
static inline void arch_start_context_switch(struct task_struct *prev)
@@ -682,16 +580,9 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
682580
#endif /* SMP && PARAVIRT_SPINLOCKS */
683581

684582
#ifdef CONFIG_X86_32
685-
#define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
686-
#define PV_RESTORE_REGS "popl %edx; popl %ecx;"
687-
688583
/* save and restore all caller-save registers, except return value */
689584
#define PV_SAVE_ALL_CALLER_REGS "pushl %ecx;"
690585
#define PV_RESTORE_ALL_CALLER_REGS "popl %ecx;"
691-
692-
#define PV_FLAGS_ARG "0"
693-
#define PV_EXTRA_CLOBBERS
694-
#define PV_VEXTRA_CLOBBERS
695586
#else
696587
/* save and restore all caller-save registers, except return value */
697588
#define PV_SAVE_ALL_CALLER_REGS \
@@ -712,14 +603,6 @@ bool __raw_callee_save___native_vcpu_is_preempted(long cpu);
712603
"pop %rsi;" \
713604
"pop %rdx;" \
714605
"pop %rcx;"
715-
716-
/* We save some registers, but all of them, that's too much. We clobber all
717-
* caller saved registers but the argument parameter */
718-
#define PV_SAVE_REGS "pushq %%rdi;"
719-
#define PV_RESTORE_REGS "popq %%rdi;"
720-
#define PV_EXTRA_CLOBBERS EXTRA_CLOBBERS, "rcx" , "rdx", "rsi"
721-
#define PV_VEXTRA_CLOBBERS EXTRA_CLOBBERS, "rdi", "rcx" , "rdx", "rsi"
722-
#define PV_FLAGS_ARG "D"
723606
#endif
724607

725608
/*

arch/x86/include/asm/paravirt_types.h

Lines changed: 0 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -68,12 +68,7 @@ struct paravirt_callee_save {
6868
/* general info */
6969
struct pv_info {
7070
#ifdef CONFIG_PARAVIRT_XXL
71-
unsigned int kernel_rpl;
72-
int shared_kernel_pmd;
73-
74-
#ifdef CONFIG_X86_64
7571
u16 extra_user_64bit_cs; /* __USER_CS if none */
76-
#endif
7772
#endif
7873

7974
const char *name;
@@ -126,9 +121,7 @@ struct pv_cpu_ops {
126121
void (*set_ldt)(const void *desc, unsigned entries);
127122
unsigned long (*store_tr)(void);
128123
void (*load_tls)(struct thread_struct *t, unsigned int cpu);
129-
#ifdef CONFIG_X86_64
130124
void (*load_gs_index)(unsigned int idx);
131-
#endif
132125
void (*write_ldt_entry)(struct desc_struct *ldt, int entrynum,
133126
const void *desc);
134127
void (*write_gdt_entry)(struct desc_struct *,
@@ -249,8 +242,6 @@ struct pv_mmu_ops {
249242

250243
/* Pagetable manipulation functions */
251244
void (*set_pte)(pte_t *ptep, pte_t pteval);
252-
void (*set_pte_at)(struct mm_struct *mm, unsigned long addr,
253-
pte_t *ptep, pte_t pteval);
254245
void (*set_pmd)(pmd_t *pmdp, pmd_t pmdval);
255246

256247
pte_t (*ptep_modify_prot_start)(struct vm_area_struct *vma, unsigned long addr,
@@ -264,21 +255,11 @@ struct pv_mmu_ops {
264255
struct paravirt_callee_save pgd_val;
265256
struct paravirt_callee_save make_pgd;
266257

267-
#if CONFIG_PGTABLE_LEVELS >= 3
268-
#ifdef CONFIG_X86_PAE
269-
void (*set_pte_atomic)(pte_t *ptep, pte_t pteval);
270-
void (*pte_clear)(struct mm_struct *mm, unsigned long addr,
271-
pte_t *ptep);
272-
void (*pmd_clear)(pmd_t *pmdp);
273-
274-
#endif /* CONFIG_X86_PAE */
275-
276258
void (*set_pud)(pud_t *pudp, pud_t pudval);
277259

278260
struct paravirt_callee_save pmd_val;
279261
struct paravirt_callee_save make_pmd;
280262

281-
#if CONFIG_PGTABLE_LEVELS >= 4
282263
struct paravirt_callee_save pud_val;
283264
struct paravirt_callee_save make_pud;
284265

@@ -291,10 +272,6 @@ struct pv_mmu_ops {
291272
void (*set_pgd)(pgd_t *pgdp, pgd_t pgdval);
292273
#endif /* CONFIG_PGTABLE_LEVELS >= 5 */
293274

294-
#endif /* CONFIG_PGTABLE_LEVELS >= 4 */
295-
296-
#endif /* CONFIG_PGTABLE_LEVELS >= 3 */
297-
298275
struct pv_lazy_ops lazy_mode;
299276

300277
/* dom0 ops */

arch/x86/include/asm/pgtable-3level_types.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,12 +20,7 @@ typedef union {
2020
} pte_t;
2121
#endif /* !__ASSEMBLY__ */
2222

23-
#ifdef CONFIG_PARAVIRT_XXL
24-
#define SHARED_KERNEL_PMD ((!static_cpu_has(X86_FEATURE_PTI) && \
25-
(pv_info.shared_kernel_pmd)))
26-
#else
2723
#define SHARED_KERNEL_PMD (!static_cpu_has(X86_FEATURE_PTI))
28-
#endif
2924

3025
#define ARCH_PAGE_TABLE_SYNC_MASK (SHARED_KERNEL_PMD ? 0 : PGTBL_PMD_MODIFIED)
3126

0 commit comments

Comments
 (0)