@@ -38,8 +38,6 @@ struct vdso_abi_info {
38
38
const char * vdso_code_start ;
39
39
const char * vdso_code_end ;
40
40
unsigned long vdso_pages ;
41
- /* Data Mapping */
42
- struct vm_special_mapping * dm ;
43
41
/* Code Mapping */
44
42
struct vm_special_mapping * cm ;
45
43
};
@@ -112,6 +110,8 @@ struct vdso_data *arch_get_vdso_data(void *vvar_page)
112
110
return (struct vdso_data * )(vvar_page );
113
111
}
114
112
113
+ static const struct vm_special_mapping vvar_map ;
114
+
115
115
/*
116
116
* The vvar mapping contains data for a specific time namespace, so when a task
117
117
* changes namespace we must unmap its vvar data for the old namespace.
@@ -128,12 +128,8 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
128
128
mmap_read_lock (mm );
129
129
130
130
for_each_vma (vmi , vma ) {
131
- if (vma_is_special_mapping (vma , vdso_info [VDSO_ABI_AA64 ].dm ))
132
- zap_vma_pages (vma );
133
- #ifdef CONFIG_COMPAT_VDSO
134
- if (vma_is_special_mapping (vma , vdso_info [VDSO_ABI_AA32 ].dm ))
131
+ if (vma_is_special_mapping (vma , & vvar_map ))
135
132
zap_vma_pages (vma );
136
- #endif
137
133
}
138
134
139
135
mmap_read_unlock (mm );
@@ -175,6 +171,11 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
175
171
return vmf_insert_pfn (vma , vmf -> address , pfn );
176
172
}
177
173
174
+ static const struct vm_special_mapping vvar_map = {
175
+ .name = "[vvar]" ,
176
+ .fault = vvar_fault ,
177
+ };
178
+
178
179
static int __setup_additional_pages (enum vdso_abi abi ,
179
180
struct mm_struct * mm ,
180
181
struct linux_binprm * bprm ,
@@ -198,7 +199,7 @@ static int __setup_additional_pages(enum vdso_abi abi,
198
199
199
200
ret = _install_special_mapping (mm , vdso_base , VVAR_NR_PAGES * PAGE_SIZE ,
200
201
VM_READ |VM_MAYREAD |VM_PFNMAP ,
201
- vdso_info [ abi ]. dm );
202
+ & vvar_map );
202
203
if (IS_ERR (ret ))
203
204
goto up_fail ;
204
205
@@ -228,7 +229,6 @@ static int __setup_additional_pages(enum vdso_abi abi,
228
229
enum aarch32_map {
229
230
AA32_MAP_VECTORS , /* kuser helpers */
230
231
AA32_MAP_SIGPAGE ,
231
- AA32_MAP_VVAR ,
232
232
AA32_MAP_VDSO ,
233
233
};
234
234
@@ -253,10 +253,6 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
253
253
.pages = & aarch32_sig_page ,
254
254
.mremap = aarch32_sigpage_mremap ,
255
255
},
256
- [AA32_MAP_VVAR ] = {
257
- .name = "[vvar]" ,
258
- .fault = vvar_fault ,
259
- },
260
256
[AA32_MAP_VDSO ] = {
261
257
.name = "[vdso]" ,
262
258
.mremap = vdso_mremap ,
@@ -306,7 +302,6 @@ static int __init __aarch32_alloc_vdso_pages(void)
306
302
if (!IS_ENABLED (CONFIG_COMPAT_VDSO ))
307
303
return 0 ;
308
304
309
- vdso_info [VDSO_ABI_AA32 ].dm = & aarch32_vdso_maps [AA32_MAP_VVAR ];
310
305
vdso_info [VDSO_ABI_AA32 ].cm = & aarch32_vdso_maps [AA32_MAP_VDSO ];
311
306
312
307
return __vdso_init (VDSO_ABI_AA32 );
@@ -401,26 +396,14 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
401
396
}
402
397
#endif /* CONFIG_COMPAT */
403
398
404
- enum aarch64_map {
405
- AA64_MAP_VVAR ,
406
- AA64_MAP_VDSO ,
407
- };
408
-
409
- static struct vm_special_mapping aarch64_vdso_maps [] __ro_after_init = {
410
- [AA64_MAP_VVAR ] = {
411
- .name = "[vvar]" ,
412
- .fault = vvar_fault ,
413
- },
414
- [AA64_MAP_VDSO ] = {
415
- .name = "[vdso]" ,
416
- .mremap = vdso_mremap ,
417
- },
399
+ static struct vm_special_mapping aarch64_vdso_map __ro_after_init = {
400
+ .name = "[vdso]" ,
401
+ .mremap = vdso_mremap ,
418
402
};
419
403
420
404
static int __init vdso_init (void )
421
405
{
422
- vdso_info [VDSO_ABI_AA64 ].dm = & aarch64_vdso_maps [AA64_MAP_VVAR ];
423
- vdso_info [VDSO_ABI_AA64 ].cm = & aarch64_vdso_maps [AA64_MAP_VDSO ];
406
+ vdso_info [VDSO_ABI_AA64 ].cm = & aarch64_vdso_map ;
424
407
425
408
return __vdso_init (VDSO_ABI_AA64 );
426
409
}
0 commit comments