24
24
#include <asm/page.h>
25
25
#include <asm/desc.h>
26
26
#include <asm/cpufeature.h>
27
+ #include <asm/vdso/vsyscall.h>
27
28
#include <clocksource/hyperv_timer.h>
28
29
29
30
struct vdso_data * arch_get_vdso_data (void * vvar_page )
@@ -175,19 +176,7 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
175
176
}
176
177
177
178
return vmf_insert_pfn (vma , vmf -> address , pfn );
178
- } else if (sym_offset == image -> sym_pvclock_page ) {
179
- struct pvclock_vsyscall_time_info * pvti =
180
- pvclock_get_pvti_cpu0_va ();
181
- if (pvti && vclock_was_used (VDSO_CLOCKMODE_PVCLOCK )) {
182
- return vmf_insert_pfn_prot (vma , vmf -> address ,
183
- __pa (pvti ) >> PAGE_SHIFT ,
184
- pgprot_decrypted (vma -> vm_page_prot ));
185
- }
186
- } else if (sym_offset == image -> sym_hvclock_page ) {
187
- pfn = hv_get_tsc_pfn ();
188
179
189
- if (pfn && vclock_was_used (VDSO_CLOCKMODE_HVCLOCK ))
190
- return vmf_insert_pfn (vma , vmf -> address , pfn );
191
180
} else if (sym_offset == image -> sym_timens_page ) {
192
181
struct page * timens_page = find_timens_vvar_page (vma );
193
182
@@ -201,6 +190,33 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
201
190
return VM_FAULT_SIGBUS ;
202
191
}
203
192
193
+ static vm_fault_t vvar_vclock_fault (const struct vm_special_mapping * sm ,
194
+ struct vm_area_struct * vma , struct vm_fault * vmf )
195
+ {
196
+ switch (vmf -> pgoff ) {
197
+ #ifdef CONFIG_PARAVIRT_CLOCK
198
+ case VDSO_PAGE_PVCLOCK_OFFSET :
199
+ struct pvclock_vsyscall_time_info * pvti =
200
+ pvclock_get_pvti_cpu0_va ();
201
+ if (pvti && vclock_was_used (VDSO_CLOCKMODE_PVCLOCK ))
202
+ return vmf_insert_pfn_prot (vma , vmf -> address ,
203
+ __pa (pvti ) >> PAGE_SHIFT ,
204
+ pgprot_decrypted (vma -> vm_page_prot ));
205
+ break ;
206
+ #endif /* CONFIG_PARAVIRT_CLOCK */
207
+ #ifdef CONFIG_HYPERV_TIMER
208
+ case VDSO_PAGE_HVCLOCK_OFFSET :
209
+ unsigned long pfn = hv_get_tsc_pfn ();
210
+
211
+ if (pfn && vclock_was_used (VDSO_CLOCKMODE_HVCLOCK ))
212
+ return vmf_insert_pfn (vma , vmf -> address , pfn );
213
+ break ;
214
+ #endif /* CONFIG_HYPERV_TIMER */
215
+ }
216
+
217
+ return VM_FAULT_SIGBUS ;
218
+ }
219
+
204
220
static const struct vm_special_mapping vdso_mapping = {
205
221
.name = "[vdso]" ,
206
222
.fault = vdso_fault ,
@@ -210,6 +226,10 @@ static const struct vm_special_mapping vvar_mapping = {
210
226
.name = "[vvar]" ,
211
227
.fault = vvar_fault ,
212
228
};
229
+ static const struct vm_special_mapping vvar_vclock_mapping = {
230
+ .name = "[vvar_vclock]" ,
231
+ .fault = vvar_vclock_fault ,
232
+ };
213
233
214
234
/*
215
235
* Add vdso and vvar mappings to current process.
@@ -252,19 +272,34 @@ static int map_vdso(const struct vdso_image *image, unsigned long addr)
252
272
253
273
vma = _install_special_mapping (mm ,
254
274
addr ,
255
- - image -> sym_vvar_start ,
275
+ ( __VVAR_PAGES - VDSO_NR_VCLOCK_PAGES ) * PAGE_SIZE ,
256
276
VM_READ |VM_MAYREAD |VM_IO |VM_DONTDUMP |
257
277
VM_PFNMAP ,
258
278
& vvar_mapping );
259
279
260
280
if (IS_ERR (vma )) {
261
281
ret = PTR_ERR (vma );
262
282
do_munmap (mm , text_start , image -> size , NULL );
263
- } else {
264
- current -> mm -> context .vdso = (void __user * )text_start ;
265
- current -> mm -> context .vdso_image = image ;
283
+ goto up_fail ;
266
284
}
267
285
286
+ vma = _install_special_mapping (mm ,
287
+ addr + (__VVAR_PAGES - VDSO_NR_VCLOCK_PAGES ) * PAGE_SIZE ,
288
+ VDSO_NR_VCLOCK_PAGES * PAGE_SIZE ,
289
+ VM_READ |VM_MAYREAD |VM_IO |VM_DONTDUMP |
290
+ VM_PFNMAP ,
291
+ & vvar_vclock_mapping );
292
+
293
+ if (IS_ERR (vma )) {
294
+ ret = PTR_ERR (vma );
295
+ do_munmap (mm , text_start , image -> size , NULL );
296
+ do_munmap (mm , addr , image -> size , NULL );
297
+ goto up_fail ;
298
+ }
299
+
300
+ current -> mm -> context .vdso = (void __user * )text_start ;
301
+ current -> mm -> context .vdso_image = image ;
302
+
268
303
up_fail :
269
304
mmap_write_unlock (mm );
270
305
return ret ;
@@ -286,7 +321,8 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
286
321
*/
287
322
for_each_vma (vmi , vma ) {
288
323
if (vma_is_special_mapping (vma , & vdso_mapping ) ||
289
- vma_is_special_mapping (vma , & vvar_mapping )) {
324
+ vma_is_special_mapping (vma , & vvar_mapping ) ||
325
+ vma_is_special_mapping (vma , & vvar_vclock_mapping )) {
290
326
mmap_write_unlock (mm );
291
327
return - EEXIST ;
292
328
}
0 commit comments