@@ -33,15 +33,14 @@ extern char vdso_start[], vdso_end[];
33
33
extern char vdso32_start [], vdso32_end [];
34
34
#endif /* CONFIG_COMPAT_VDSO */
35
35
36
- /* vdso_lookup arch_index */
37
- enum arch_vdso_type {
38
- ARM64_VDSO ,
36
+ enum vdso_abi {
37
+ VDSO_ABI_AA64 ,
39
38
#ifdef CONFIG_COMPAT_VDSO
40
- ARM64_VDSO32 ,
39
+ VDSO_ABI_AA32 ,
41
40
#endif /* CONFIG_COMPAT_VDSO */
42
41
};
43
42
44
- struct __vdso_abi {
43
+ struct vdso_abi_info {
45
44
const char * name ;
46
45
const char * vdso_code_start ;
47
46
const char * vdso_code_end ;
@@ -52,14 +51,14 @@ struct __vdso_abi {
52
51
struct vm_special_mapping * cm ;
53
52
};
54
53
55
- static struct __vdso_abi vdso_lookup [] __ro_after_init = {
56
- [ARM64_VDSO ] = {
54
+ static struct vdso_abi_info vdso_info [] __ro_after_init = {
55
+ [VDSO_ABI_AA64 ] = {
57
56
.name = "vdso" ,
58
57
.vdso_code_start = vdso_start ,
59
58
.vdso_code_end = vdso_end ,
60
59
},
61
60
#ifdef CONFIG_COMPAT_VDSO
62
- [ARM64_VDSO32 ] = {
61
+ [VDSO_ABI_AA32 ] = {
63
62
.name = "vdso32" ,
64
63
.vdso_code_start = vdso32_start ,
65
64
.vdso_code_end = vdso32_end ,
@@ -76,13 +75,13 @@ static union {
76
75
} vdso_data_store __page_aligned_data ;
77
76
struct vdso_data * vdso_data = vdso_data_store .data ;
78
77
79
- static int __vdso_remap (enum arch_vdso_type arch_index ,
78
+ static int __vdso_remap (enum vdso_abi abi ,
80
79
const struct vm_special_mapping * sm ,
81
80
struct vm_area_struct * new_vma )
82
81
{
83
82
unsigned long new_size = new_vma -> vm_end - new_vma -> vm_start ;
84
- unsigned long vdso_size = vdso_lookup [ arch_index ].vdso_code_end -
85
- vdso_lookup [ arch_index ].vdso_code_start ;
83
+ unsigned long vdso_size = vdso_info [ abi ].vdso_code_end -
84
+ vdso_info [ abi ].vdso_code_start ;
86
85
87
86
if (vdso_size != new_size )
88
87
return - EINVAL ;
@@ -92,24 +91,24 @@ static int __vdso_remap(enum arch_vdso_type arch_index,
92
91
return 0 ;
93
92
}
94
93
95
- static int __vdso_init (enum arch_vdso_type arch_index )
94
+ static int __vdso_init (enum vdso_abi abi )
96
95
{
97
96
int i ;
98
97
struct page * * vdso_pagelist ;
99
98
unsigned long pfn ;
100
99
101
- if (memcmp (vdso_lookup [ arch_index ].vdso_code_start , "\177ELF" , 4 )) {
100
+ if (memcmp (vdso_info [ abi ].vdso_code_start , "\177ELF" , 4 )) {
102
101
pr_err ("vDSO is not a valid ELF object!\n" );
103
102
return - EINVAL ;
104
103
}
105
104
106
- vdso_lookup [ arch_index ].vdso_pages = (
107
- vdso_lookup [ arch_index ].vdso_code_end -
108
- vdso_lookup [ arch_index ].vdso_code_start ) >>
105
+ vdso_info [ abi ].vdso_pages = (
106
+ vdso_info [ abi ].vdso_code_end -
107
+ vdso_info [ abi ].vdso_code_start ) >>
109
108
PAGE_SHIFT ;
110
109
111
110
/* Allocate the vDSO pagelist, plus a page for the data. */
112
- vdso_pagelist = kcalloc (vdso_lookup [ arch_index ].vdso_pages + 1 ,
111
+ vdso_pagelist = kcalloc (vdso_info [ abi ].vdso_pages + 1 ,
113
112
sizeof (struct page * ),
114
113
GFP_KERNEL );
115
114
if (vdso_pagelist == NULL )
@@ -120,26 +119,26 @@ static int __vdso_init(enum arch_vdso_type arch_index)
120
119
121
120
122
121
/* Grab the vDSO code pages. */
123
- pfn = sym_to_pfn (vdso_lookup [ arch_index ].vdso_code_start );
122
+ pfn = sym_to_pfn (vdso_info [ abi ].vdso_code_start );
124
123
125
- for (i = 0 ; i < vdso_lookup [ arch_index ].vdso_pages ; i ++ )
124
+ for (i = 0 ; i < vdso_info [ abi ].vdso_pages ; i ++ )
126
125
vdso_pagelist [i + 1 ] = pfn_to_page (pfn + i );
127
126
128
- vdso_lookup [ arch_index ].dm -> pages = & vdso_pagelist [0 ];
129
- vdso_lookup [ arch_index ].cm -> pages = & vdso_pagelist [1 ];
127
+ vdso_info [ abi ].dm -> pages = & vdso_pagelist [0 ];
128
+ vdso_info [ abi ].cm -> pages = & vdso_pagelist [1 ];
130
129
131
130
return 0 ;
132
131
}
133
132
134
- static int __setup_additional_pages (enum arch_vdso_type arch_index ,
133
+ static int __setup_additional_pages (enum vdso_abi abi ,
135
134
struct mm_struct * mm ,
136
135
struct linux_binprm * bprm ,
137
136
int uses_interp )
138
137
{
139
138
unsigned long vdso_base , vdso_text_len , vdso_mapping_len ;
140
139
void * ret ;
141
140
142
- vdso_text_len = vdso_lookup [ arch_index ].vdso_pages << PAGE_SHIFT ;
141
+ vdso_text_len = vdso_info [ abi ].vdso_pages << PAGE_SHIFT ;
143
142
/* Be sure to map the data page */
144
143
vdso_mapping_len = vdso_text_len + PAGE_SIZE ;
145
144
@@ -151,7 +150,7 @@ static int __setup_additional_pages(enum arch_vdso_type arch_index,
151
150
152
151
ret = _install_special_mapping (mm , vdso_base , PAGE_SIZE ,
153
152
VM_READ |VM_MAYREAD ,
154
- vdso_lookup [ arch_index ].dm );
153
+ vdso_info [ abi ].dm );
155
154
if (IS_ERR (ret ))
156
155
goto up_fail ;
157
156
@@ -160,7 +159,7 @@ static int __setup_additional_pages(enum arch_vdso_type arch_index,
160
159
ret = _install_special_mapping (mm , vdso_base , vdso_text_len ,
161
160
VM_READ |VM_EXEC |
162
161
VM_MAYREAD |VM_MAYWRITE |VM_MAYEXEC ,
163
- vdso_lookup [ arch_index ].cm );
162
+ vdso_info [ abi ].cm );
164
163
if (IS_ERR (ret ))
165
164
goto up_fail ;
166
165
@@ -179,7 +178,7 @@ static int __setup_additional_pages(enum arch_vdso_type arch_index,
179
178
static int aarch32_vdso_mremap (const struct vm_special_mapping * sm ,
180
179
struct vm_area_struct * new_vma )
181
180
{
182
- return __vdso_remap (ARM64_VDSO32 , sm , new_vma );
181
+ return __vdso_remap (VDSO_ABI_AA32 , sm , new_vma );
183
182
}
184
183
#endif /* CONFIG_COMPAT_VDSO */
185
184
@@ -253,10 +252,10 @@ static int __aarch32_alloc_vdso_pages(void)
253
252
{
254
253
int ret ;
255
254
256
- vdso_lookup [ ARM64_VDSO32 ].dm = & aarch32_vdso_spec [C_VVAR ];
257
- vdso_lookup [ ARM64_VDSO32 ].cm = & aarch32_vdso_spec [C_VDSO ];
255
+ vdso_info [ VDSO_ABI_AA32 ].dm = & aarch32_vdso_spec [C_VVAR ];
256
+ vdso_info [ VDSO_ABI_AA32 ].cm = & aarch32_vdso_spec [C_VDSO ];
258
257
259
- ret = __vdso_init (ARM64_VDSO32 );
258
+ ret = __vdso_init (VDSO_ABI_AA32 );
260
259
if (ret )
261
260
return ret ;
262
261
@@ -354,7 +353,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
354
353
goto out ;
355
354
356
355
#ifdef CONFIG_COMPAT_VDSO
357
- ret = __setup_additional_pages (ARM64_VDSO32 ,
356
+ ret = __setup_additional_pages (VDSO_ABI_AA32 ,
358
357
mm ,
359
358
bprm ,
360
359
uses_interp );
@@ -371,7 +370,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
371
370
static int vdso_mremap (const struct vm_special_mapping * sm ,
372
371
struct vm_area_struct * new_vma )
373
372
{
374
- return __vdso_remap (ARM64_VDSO , sm , new_vma );
373
+ return __vdso_remap (VDSO_ABI_AA64 , sm , new_vma );
375
374
}
376
375
377
376
/*
@@ -394,10 +393,10 @@ static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = {
394
393
395
394
static int __init vdso_init (void )
396
395
{
397
- vdso_lookup [ ARM64_VDSO ].dm = & vdso_spec [A_VVAR ];
398
- vdso_lookup [ ARM64_VDSO ].cm = & vdso_spec [A_VDSO ];
396
+ vdso_info [ VDSO_ABI_AA64 ].dm = & vdso_spec [A_VVAR ];
397
+ vdso_info [ VDSO_ABI_AA64 ].cm = & vdso_spec [A_VDSO ];
399
398
400
- return __vdso_init (ARM64_VDSO );
399
+ return __vdso_init (VDSO_ABI_AA64 );
401
400
}
402
401
arch_initcall (vdso_init );
403
402
@@ -410,7 +409,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
410
409
if (down_write_killable (& mm -> mmap_sem ))
411
410
return - EINTR ;
412
411
413
- ret = __setup_additional_pages (ARM64_VDSO ,
412
+ ret = __setup_additional_pages (VDSO_ABI_AA64 ,
414
413
mm ,
415
414
bprm ,
416
415
uses_interp );
0 commit comments