Skip to content

Commit e93d252

Browse files
t-8chKAGA-KOKO
authored andcommitted
x86/vdso: Split virtual clock pages into dedicated mapping
The generic vdso data storage cannot handle the special pvclock and hvclock pages. Split them into their own mapping, so the other vdso storage can be migrated to the generic code. Signed-off-by: Thomas Weißschuh <[email protected]> Signed-off-by: Thomas Gleixner <[email protected]> Link: https://lore.kernel.org/all/[email protected]
1 parent 05a6b8c commit e93d252

File tree

3 files changed

+64
-21
lines changed

3 files changed

+64
-21
lines changed

arch/x86/entry/vdso/vdso-layout.lds.S

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -17,14 +17,16 @@ SECTIONS
1717
* segment.
1818
*/
1919

20-
vvar_start = . - 4 * PAGE_SIZE;
20+
vvar_start = . - __VVAR_PAGES * PAGE_SIZE;
2121
vvar_page = vvar_start;
2222

2323
vdso_rng_data = vvar_page + __VDSO_RND_DATA_OFFSET;
2424

25-
pvclock_page = vvar_start + PAGE_SIZE;
26-
hvclock_page = vvar_start + 2 * PAGE_SIZE;
27-
timens_page = vvar_start + 3 * PAGE_SIZE;
25+
timens_page = vvar_start + PAGE_SIZE;
26+
27+
vclock_pages = vvar_start + VDSO_NR_VCLOCK_PAGES * PAGE_SIZE;
28+
pvclock_page = vclock_pages + VDSO_PAGE_PVCLOCK_OFFSET * PAGE_SIZE;
29+
hvclock_page = vclock_pages + VDSO_PAGE_HVCLOCK_OFFSET * PAGE_SIZE;
2830

2931
. = SIZEOF_HEADERS;
3032

arch/x86/entry/vdso/vma.c

Lines changed: 53 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@
2424
#include <asm/page.h>
2525
#include <asm/desc.h>
2626
#include <asm/cpufeature.h>
27+
#include <asm/vdso/vsyscall.h>
2728
#include <clocksource/hyperv_timer.h>
2829

2930
struct vdso_data *arch_get_vdso_data(void *vvar_page)
@@ -175,19 +176,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
175176
}
176177

177178
return vmf_insert_pfn(vma, vmf->address, pfn);
178-
} else if (sym_offset == image->sym_pvclock_page) {
179-
struct pvclock_vsyscall_time_info *pvti =
180-
pvclock_get_pvti_cpu0_va();
181-
if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK)) {
182-
return vmf_insert_pfn_prot(vma, vmf->address,
183-
__pa(pvti) >> PAGE_SHIFT,
184-
pgprot_decrypted(vma->vm_page_prot));
185-
}
186-
} else if (sym_offset == image->sym_hvclock_page) {
187-
pfn = hv_get_tsc_pfn();
188179

189-
if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
190-
return vmf_insert_pfn(vma, vmf->address, pfn);
191180
} else if (sym_offset == image->sym_timens_page) {
192181
struct page *timens_page = find_timens_vvar_page(vma);
193182

@@ -201,6 +190,33 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
201190
return VM_FAULT_SIGBUS;
202191
}
203192

193+
static vm_fault_t vvar_vclock_fault(const struct vm_special_mapping *sm,
194+
struct vm_area_struct *vma, struct vm_fault *vmf)
195+
{
196+
switch (vmf->pgoff) {
197+
#ifdef CONFIG_PARAVIRT_CLOCK
198+
case VDSO_PAGE_PVCLOCK_OFFSET:
199+
struct pvclock_vsyscall_time_info *pvti =
200+
pvclock_get_pvti_cpu0_va();
201+
if (pvti && vclock_was_used(VDSO_CLOCKMODE_PVCLOCK))
202+
return vmf_insert_pfn_prot(vma, vmf->address,
203+
__pa(pvti) >> PAGE_SHIFT,
204+
pgprot_decrypted(vma->vm_page_prot));
205+
break;
206+
#endif /* CONFIG_PARAVIRT_CLOCK */
207+
#ifdef CONFIG_HYPERV_TIMER
208+
case VDSO_PAGE_HVCLOCK_OFFSET:
209+
unsigned long pfn = hv_get_tsc_pfn();
210+
211+
if (pfn && vclock_was_used(VDSO_CLOCKMODE_HVCLOCK))
212+
return vmf_insert_pfn(vma, vmf->address, pfn);
213+
break;
214+
#endif /* CONFIG_HYPERV_TIMER */
215+
}
216+
217+
return VM_FAULT_SIGBUS;
218+
}
219+
204220
static const struct vm_special_mapping vdso_mapping = {
205221
.name = "[vdso]",
206222
.fault = vdso_fault,
@@ -210,6 +226,10 @@ static const struct vm_special_mapping vvar_mapping = {
210226
.name = "[vvar]",
211227
.fault = vvar_fault,
212228
};
229+
static const struct vm_special_mapping vvar_vclock_mapping = {
230+
.name = "[vvar_vclock]",
231+
.fault = vvar_vclock_fault,
232+
};
213233

214234
/*
215235
* Add vdso and vvar mappings to current process.
@@ -252,19 +272,34 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
252272

253273
vma = _install_special_mapping(mm,
254274
addr,
255-
-image->sym_vvar_start,
275+
(__VVAR_PAGES - VDSO_NR_VCLOCK_PAGES) * PAGE_SIZE,
256276
VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
257277
VM_PFNMAP,
258278
&vvar_mapping);
259279

260280
if (IS_ERR(vma)) {
261281
ret = PTR_ERR(vma);
262282
do_munmap(mm, text_start, image->size, NULL);
263-
} else {
264-
current->mm->context.vdso = (void __user *)text_start;
265-
current->mm->context.vdso_image = image;
283+
goto up_fail;
266284
}
267285

286+
vma = _install_special_mapping(mm,
287+
addr + (__VVAR_PAGES - VDSO_NR_VCLOCK_PAGES) * PAGE_SIZE,
288+
VDSO_NR_VCLOCK_PAGES * PAGE_SIZE,
289+
VM_READ|VM_MAYREAD|VM_IO|VM_DONTDUMP|
290+
VM_PFNMAP,
291+
&vvar_vclock_mapping);
292+
293+
if (IS_ERR(vma)) {
294+
ret = PTR_ERR(vma);
295+
do_munmap(mm, text_start, image->size, NULL);
296+
do_munmap(mm, addr, image->size, NULL);
297+
goto up_fail;
298+
}
299+
300+
current->mm->context.vdso = (void __user *)text_start;
301+
current->mm->context.vdso_image = image;
302+
268303
up_fail:
269304
mmap_write_unlock(mm);
270305
return ret;
@@ -286,7 +321,8 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
286321
*/
287322
for_each_vma(vmi, vma) {
288323
if (vma_is_special_mapping(vma, &vdso_mapping) ||
289-
vma_is_special_mapping(vma, &vvar_mapping)) {
324+
vma_is_special_mapping(vma, &vvar_mapping) ||
325+
vma_is_special_mapping(vma, &vvar_vclock_mapping)) {
290326
mmap_write_unlock(mm);
291327
return -EEXIST;
292328
}

arch/x86/include/asm/vdso/vsyscall.h

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,11 @@
33
#define __ASM_VDSO_VSYSCALL_H
44

55
#define __VDSO_RND_DATA_OFFSET 640
6+
#define __VVAR_PAGES 4
7+
8+
#define VDSO_NR_VCLOCK_PAGES 2
9+
#define VDSO_PAGE_PVCLOCK_OFFSET 0
10+
#define VDSO_PAGE_HVCLOCK_OFFSET 1
611

712
#ifndef __ASSEMBLY__
813

0 commit comments

Comments
 (0)