@@ -23,11 +23,6 @@ enum vvar_pages {
23
23
VVAR_NR_PAGES ,
24
24
};
25
25
26
- enum rv_vdso_map {
27
- RV_VDSO_MAP_VVAR ,
28
- RV_VDSO_MAP_VDSO ,
29
- };
30
-
31
26
#define VVAR_SIZE (VVAR_NR_PAGES << PAGE_SHIFT)
32
27
33
28
static union vdso_data_store vdso_data_store __page_aligned_data ;
@@ -38,8 +33,6 @@ struct __vdso_info {
38
33
const char * vdso_code_start ;
39
34
const char * vdso_code_end ;
40
35
unsigned long vdso_pages ;
41
- /* Data Mapping */
42
- struct vm_special_mapping * dm ;
43
36
/* Code Mapping */
44
37
struct vm_special_mapping * cm ;
45
38
};
@@ -92,6 +85,8 @@ struct vdso_data *arch_get_vdso_data(void *vvar_page)
92
85
return (struct vdso_data * )(vvar_page );
93
86
}
94
87
88
+ static const struct vm_special_mapping rv_vvar_map ;
89
+
95
90
/*
96
91
* The vvar mapping contains data for a specific time namespace, so when a task
97
92
* changes namespace we must unmap its vvar data for the old namespace.
@@ -108,12 +103,8 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
108
103
mmap_read_lock (mm );
109
104
110
105
for_each_vma (vmi , vma ) {
111
- if (vma_is_special_mapping (vma , vdso_info .dm ))
112
- zap_vma_pages (vma );
113
- #ifdef CONFIG_COMPAT
114
- if (vma_is_special_mapping (vma , compat_vdso_info .dm ))
106
+ if (vma_is_special_mapping (vma , & rv_vvar_map ))
115
107
zap_vma_pages (vma );
116
- #endif
117
108
}
118
109
119
110
mmap_read_unlock (mm );
@@ -155,43 +146,34 @@ static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
155
146
return vmf_insert_pfn (vma , vmf -> address , pfn );
156
147
}
157
148
158
- static struct vm_special_mapping rv_vdso_maps [] __ro_after_init = {
159
- [RV_VDSO_MAP_VVAR ] = {
160
- .name = "[vvar]" ,
161
- .fault = vvar_fault ,
162
- },
163
- [RV_VDSO_MAP_VDSO ] = {
164
- .name = "[vdso]" ,
165
- .mremap = vdso_mremap ,
166
- },
149
+ static const struct vm_special_mapping rv_vvar_map = {
150
+ .name = "[vvar]" ,
151
+ .fault = vvar_fault ,
152
+ };
153
+
154
+ static struct vm_special_mapping rv_vdso_map __ro_after_init = {
155
+ .name = "[vdso]" ,
156
+ .mremap = vdso_mremap ,
167
157
};
168
158
169
159
static struct __vdso_info vdso_info __ro_after_init = {
170
160
.name = "vdso" ,
171
161
.vdso_code_start = vdso_start ,
172
162
.vdso_code_end = vdso_end ,
173
- .dm = & rv_vdso_maps [RV_VDSO_MAP_VVAR ],
174
- .cm = & rv_vdso_maps [RV_VDSO_MAP_VDSO ],
163
+ .cm = & rv_vdso_map ,
175
164
};
176
165
177
166
#ifdef CONFIG_COMPAT
178
- static struct vm_special_mapping rv_compat_vdso_maps [] __ro_after_init = {
179
- [RV_VDSO_MAP_VVAR ] = {
180
- .name = "[vvar]" ,
181
- .fault = vvar_fault ,
182
- },
183
- [RV_VDSO_MAP_VDSO ] = {
184
- .name = "[vdso]" ,
185
- .mremap = vdso_mremap ,
186
- },
167
+ static struct vm_special_mapping rv_compat_vdso_map __ro_after_init = {
168
+ .name = "[vdso]" ,
169
+ .mremap = vdso_mremap ,
187
170
};
188
171
189
172
static struct __vdso_info compat_vdso_info __ro_after_init = {
190
173
.name = "compat_vdso" ,
191
174
.vdso_code_start = compat_vdso_start ,
192
175
.vdso_code_end = compat_vdso_end ,
193
- .dm = & rv_compat_vdso_maps [RV_VDSO_MAP_VVAR ],
194
- .cm = & rv_compat_vdso_maps [RV_VDSO_MAP_VDSO ],
176
+ .cm = & rv_compat_vdso_map ,
195
177
};
196
178
#endif
197
179
@@ -227,7 +209,7 @@ static int __setup_additional_pages(struct mm_struct *mm,
227
209
}
228
210
229
211
ret = _install_special_mapping (mm , vdso_base , VVAR_SIZE ,
230
- (VM_READ | VM_MAYREAD | VM_PFNMAP ), vdso_info -> dm );
212
+ (VM_READ | VM_MAYREAD | VM_PFNMAP ), & rv_vvar_map );
231
213
if (IS_ERR (ret ))
232
214
goto up_fail ;
233
215
0 commit comments