Skip to content

Commit d3418f3

Browse files
mrutland-armwilldeacon
authored andcommitted
arm64: vdso: use consistent 'abi' nomenclature
The current code doesn't use a consistent naming scheme for structures, enums, or variables, making it harder than necessary to determine the relationship between these. Let's make this easier by consistently using 'vdso_abi' nomenclature. The 'vdso_lookup' array is renamed to 'vdso_info' to describe what it contains rather than how it is consumed. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Vincenzo Frascino <[email protected]> Cc: Will Deacon <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Will Deacon <[email protected]>
1 parent 3ee16ff commit d3418f3

File tree

1 file changed

+34
-35
lines changed

1 file changed

+34
-35
lines changed

arch/arm64/kernel/vdso.c

Lines changed: 34 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -33,15 +33,14 @@ extern char vdso_start[], vdso_end[];
3333
extern char vdso32_start[], vdso32_end[];
3434
#endif /* CONFIG_COMPAT_VDSO */
3535

36-
/* vdso_lookup arch_index */
37-
enum arch_vdso_type {
38-
ARM64_VDSO,
36+
enum vdso_abi {
37+
VDSO_ABI_AA64,
3938
#ifdef CONFIG_COMPAT_VDSO
40-
ARM64_VDSO32,
39+
VDSO_ABI_AA32,
4140
#endif /* CONFIG_COMPAT_VDSO */
4241
};
4342

44-
struct __vdso_abi {
43+
struct vdso_abi_info {
4544
const char *name;
4645
const char *vdso_code_start;
4746
const char *vdso_code_end;
@@ -52,14 +51,14 @@ struct __vdso_abi {
5251
struct vm_special_mapping *cm;
5352
};
5453

55-
static struct __vdso_abi vdso_lookup[] __ro_after_init = {
56-
[ARM64_VDSO] = {
54+
static struct vdso_abi_info vdso_info[] __ro_after_init = {
55+
[VDSO_ABI_AA64] = {
5756
.name = "vdso",
5857
.vdso_code_start = vdso_start,
5958
.vdso_code_end = vdso_end,
6059
},
6160
#ifdef CONFIG_COMPAT_VDSO
62-
[ARM64_VDSO32] = {
61+
[VDSO_ABI_AA32] = {
6362
.name = "vdso32",
6463
.vdso_code_start = vdso32_start,
6564
.vdso_code_end = vdso32_end,
@@ -76,13 +75,13 @@ static union {
7675
} vdso_data_store __page_aligned_data;
7776
struct vdso_data *vdso_data = vdso_data_store.data;
7877

79-
static int __vdso_remap(enum arch_vdso_type arch_index,
78+
static int __vdso_remap(enum vdso_abi abi,
8079
const struct vm_special_mapping *sm,
8180
struct vm_area_struct *new_vma)
8281
{
8382
unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
84-
unsigned long vdso_size = vdso_lookup[arch_index].vdso_code_end -
85-
vdso_lookup[arch_index].vdso_code_start;
83+
unsigned long vdso_size = vdso_info[abi].vdso_code_end -
84+
vdso_info[abi].vdso_code_start;
8685

8786
if (vdso_size != new_size)
8887
return -EINVAL;
@@ -92,24 +91,24 @@ static int __vdso_remap(enum arch_vdso_type arch_index,
9291
return 0;
9392
}
9493

95-
static int __vdso_init(enum arch_vdso_type arch_index)
94+
static int __vdso_init(enum vdso_abi abi)
9695
{
9796
int i;
9897
struct page **vdso_pagelist;
9998
unsigned long pfn;
10099

101-
if (memcmp(vdso_lookup[arch_index].vdso_code_start, "\177ELF", 4)) {
100+
if (memcmp(vdso_info[abi].vdso_code_start, "\177ELF", 4)) {
102101
pr_err("vDSO is not a valid ELF object!\n");
103102
return -EINVAL;
104103
}
105104

106-
vdso_lookup[arch_index].vdso_pages = (
107-
vdso_lookup[arch_index].vdso_code_end -
108-
vdso_lookup[arch_index].vdso_code_start) >>
105+
vdso_info[abi].vdso_pages = (
106+
vdso_info[abi].vdso_code_end -
107+
vdso_info[abi].vdso_code_start) >>
109108
PAGE_SHIFT;
110109

111110
/* Allocate the vDSO pagelist, plus a page for the data. */
112-
vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages + 1,
111+
vdso_pagelist = kcalloc(vdso_info[abi].vdso_pages + 1,
113112
sizeof(struct page *),
114113
GFP_KERNEL);
115114
if (vdso_pagelist == NULL)
@@ -120,26 +119,26 @@ static int __vdso_init(enum arch_vdso_type arch_index)
120119

121120

122121
/* Grab the vDSO code pages. */
123-
pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start);
122+
pfn = sym_to_pfn(vdso_info[abi].vdso_code_start);
124123

125-
for (i = 0; i < vdso_lookup[arch_index].vdso_pages; i++)
124+
for (i = 0; i < vdso_info[abi].vdso_pages; i++)
126125
vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
127126

128-
vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0];
129-
vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1];
127+
vdso_info[abi].dm->pages = &vdso_pagelist[0];
128+
vdso_info[abi].cm->pages = &vdso_pagelist[1];
130129

131130
return 0;
132131
}
133132

134-
static int __setup_additional_pages(enum arch_vdso_type arch_index,
133+
static int __setup_additional_pages(enum vdso_abi abi,
135134
struct mm_struct *mm,
136135
struct linux_binprm *bprm,
137136
int uses_interp)
138137
{
139138
unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
140139
void *ret;
141140

142-
vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT;
141+
vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
143142
/* Be sure to map the data page */
144143
vdso_mapping_len = vdso_text_len + PAGE_SIZE;
145144

@@ -151,7 +150,7 @@ static int __setup_additional_pages(enum arch_vdso_type arch_index,
151150

152151
ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
153152
VM_READ|VM_MAYREAD,
154-
vdso_lookup[arch_index].dm);
153+
vdso_info[abi].dm);
155154
if (IS_ERR(ret))
156155
goto up_fail;
157156

@@ -160,7 +159,7 @@ static int __setup_additional_pages(enum arch_vdso_type arch_index,
160159
ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
161160
VM_READ|VM_EXEC|
162161
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
163-
vdso_lookup[arch_index].cm);
162+
vdso_info[abi].cm);
164163
if (IS_ERR(ret))
165164
goto up_fail;
166165

@@ -179,7 +178,7 @@ static int __setup_additional_pages(enum arch_vdso_type arch_index,
179178
static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
180179
struct vm_area_struct *new_vma)
181180
{
182-
return __vdso_remap(ARM64_VDSO32, sm, new_vma);
181+
return __vdso_remap(VDSO_ABI_AA32, sm, new_vma);
183182
}
184183
#endif /* CONFIG_COMPAT_VDSO */
185184

@@ -253,10 +252,10 @@ static int __aarch32_alloc_vdso_pages(void)
253252
{
254253
int ret;
255254

256-
vdso_lookup[ARM64_VDSO32].dm = &aarch32_vdso_spec[C_VVAR];
257-
vdso_lookup[ARM64_VDSO32].cm = &aarch32_vdso_spec[C_VDSO];
255+
vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_spec[C_VVAR];
256+
vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_spec[C_VDSO];
258257

259-
ret = __vdso_init(ARM64_VDSO32);
258+
ret = __vdso_init(VDSO_ABI_AA32);
260259
if (ret)
261260
return ret;
262261

@@ -354,7 +353,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
354353
goto out;
355354

356355
#ifdef CONFIG_COMPAT_VDSO
357-
ret = __setup_additional_pages(ARM64_VDSO32,
356+
ret = __setup_additional_pages(VDSO_ABI_AA32,
358357
mm,
359358
bprm,
360359
uses_interp);
@@ -371,7 +370,7 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
371370
static int vdso_mremap(const struct vm_special_mapping *sm,
372371
struct vm_area_struct *new_vma)
373372
{
374-
return __vdso_remap(ARM64_VDSO, sm, new_vma);
373+
return __vdso_remap(VDSO_ABI_AA64, sm, new_vma);
375374
}
376375

377376
/*
@@ -394,10 +393,10 @@ static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = {
394393

395394
static int __init vdso_init(void)
396395
{
397-
vdso_lookup[ARM64_VDSO].dm = &vdso_spec[A_VVAR];
398-
vdso_lookup[ARM64_VDSO].cm = &vdso_spec[A_VDSO];
396+
vdso_info[VDSO_ABI_AA64].dm = &vdso_spec[A_VVAR];
397+
vdso_info[VDSO_ABI_AA64].cm = &vdso_spec[A_VDSO];
399398

400-
return __vdso_init(ARM64_VDSO);
399+
return __vdso_init(VDSO_ABI_AA64);
401400
}
402401
arch_initcall(vdso_init);
403402

@@ -410,7 +409,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm,
410409
if (down_write_killable(&mm->mmap_sem))
411410
return -EINTR;
412411

413-
ret = __setup_additional_pages(ARM64_VDSO,
412+
ret = __setup_additional_pages(VDSO_ABI_AA64,
414413
mm,
415414
bprm,
416415
uses_interp);

0 commit comments

Comments
 (0)