Commit 1d09094a authored by Mark Rutland's avatar Mark Rutland Committed by Will Deacon

arm64: vdso: use consistent 'map' nomenclature

The current code doesn't use a consistent naming scheme for structures,
enums, or variables, making it harder than necessary to determine the
relationship between these.

Let's make this easier by consistently using 'map' nomenclature for
mappings created in userspace, minimizing redundant comments, and
using designated array initializers to tie indices to their respective
elements.

There should be no functional change as a result of this patch.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Vincenzo Frascino <vincenzo.frascino@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20200428164921.41641-5-mark.rutland@arm.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent d3418f38
...@@ -182,45 +182,36 @@ static int aarch32_vdso_mremap(const struct vm_special_mapping *sm, ...@@ -182,45 +182,36 @@ static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
} }
#endif /* CONFIG_COMPAT_VDSO */ #endif /* CONFIG_COMPAT_VDSO */
/* enum aarch32_map {
* aarch32_vdso_pages: AA32_MAP_VECTORS, /* kuser helpers */
* 0 - kuser helpers
* 1 - sigreturn code
* or (CONFIG_COMPAT_VDSO):
* 0 - kuser helpers
* 1 - vdso data
* 2 - vdso code
*/
#define C_VECTORS 0
#ifdef CONFIG_COMPAT_VDSO #ifdef CONFIG_COMPAT_VDSO
#define C_VVAR 1 AA32_MAP_VVAR,
#define C_VDSO 2 AA32_MAP_VDSO,
#define C_PAGES (C_VDSO + 1)
#else #else
#define C_SIGPAGE 1 AA32_MAP_SIGPAGE
#define C_PAGES (C_SIGPAGE + 1) #endif
#endif /* CONFIG_COMPAT_VDSO */ };
static struct page *aarch32_vectors_page __ro_after_init; static struct page *aarch32_vectors_page __ro_after_init;
#ifndef CONFIG_COMPAT_VDSO #ifndef CONFIG_COMPAT_VDSO
static struct page *aarch32_sig_page __ro_after_init; static struct page *aarch32_sig_page __ro_after_init;
#endif #endif
static struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = { static struct vm_special_mapping aarch32_vdso_maps[] = {
{ [AA32_MAP_VECTORS] = {
.name = "[vectors]", /* ABI */ .name = "[vectors]", /* ABI */
.pages = &aarch32_vectors_page, .pages = &aarch32_vectors_page,
}, },
#ifdef CONFIG_COMPAT_VDSO #ifdef CONFIG_COMPAT_VDSO
{ [AA32_MAP_VVAR] = {
.name = "[vvar]", .name = "[vvar]",
}, },
{ [AA32_MAP_VDSO] = {
.name = "[vdso]", .name = "[vdso]",
.mremap = aarch32_vdso_mremap, .mremap = aarch32_vdso_mremap,
}, },
#else #else
{ [AA32_MAP_SIGPAGE] = {
.name = "[sigpage]", /* ABI */ .name = "[sigpage]", /* ABI */
.pages = &aarch32_sig_page, .pages = &aarch32_sig_page,
}, },
...@@ -252,8 +243,8 @@ static int __aarch32_alloc_vdso_pages(void) ...@@ -252,8 +243,8 @@ static int __aarch32_alloc_vdso_pages(void)
{ {
int ret; int ret;
vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_spec[C_VVAR]; vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_spec[C_VDSO]; vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
ret = __vdso_init(VDSO_ABI_AA32); ret = __vdso_init(VDSO_ABI_AA32);
if (ret) if (ret)
...@@ -305,7 +296,7 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm) ...@@ -305,7 +296,7 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE, ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
VM_READ | VM_EXEC | VM_READ | VM_EXEC |
VM_MAYREAD | VM_MAYEXEC, VM_MAYREAD | VM_MAYEXEC,
&aarch32_vdso_spec[C_VECTORS]); &aarch32_vdso_maps[AA32_MAP_VECTORS]);
return PTR_ERR_OR_ZERO(ret); return PTR_ERR_OR_ZERO(ret);
} }
...@@ -329,7 +320,7 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm) ...@@ -329,7 +320,7 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
ret = _install_special_mapping(mm, addr, PAGE_SIZE, ret = _install_special_mapping(mm, addr, PAGE_SIZE,
VM_READ | VM_EXEC | VM_MAYREAD | VM_READ | VM_EXEC | VM_MAYREAD |
VM_MAYWRITE | VM_MAYEXEC, VM_MAYWRITE | VM_MAYEXEC,
&aarch32_vdso_spec[C_SIGPAGE]); &aarch32_vdso_maps[AA32_MAP_SIGPAGE]);
if (IS_ERR(ret)) if (IS_ERR(ret))
goto out; goto out;
...@@ -373,19 +364,16 @@ static int vdso_mremap(const struct vm_special_mapping *sm, ...@@ -373,19 +364,16 @@ static int vdso_mremap(const struct vm_special_mapping *sm,
return __vdso_remap(VDSO_ABI_AA64, sm, new_vma); return __vdso_remap(VDSO_ABI_AA64, sm, new_vma);
} }
/* enum aarch64_map {
* aarch64_vdso_pages: AA64_MAP_VVAR,
* 0 - vvar AA64_MAP_VDSO,
* 1 - vdso };
*/
#define A_VVAR 0 static struct vm_special_mapping aarch64_vdso_maps[] __ro_after_init = {
#define A_VDSO 1 [AA64_MAP_VVAR] = {
#define A_PAGES (A_VDSO + 1)
static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = {
{
.name = "[vvar]", .name = "[vvar]",
}, },
{ [AA64_MAP_VDSO] = {
.name = "[vdso]", .name = "[vdso]",
.mremap = vdso_mremap, .mremap = vdso_mremap,
}, },
...@@ -393,8 +381,8 @@ static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = { ...@@ -393,8 +381,8 @@ static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = {
static int __init vdso_init(void) static int __init vdso_init(void)
{ {
vdso_info[VDSO_ABI_AA64].dm = &vdso_spec[A_VVAR]; vdso_info[VDSO_ABI_AA64].dm = &aarch64_vdso_maps[AA64_MAP_VVAR];
vdso_info[VDSO_ABI_AA64].cm = &vdso_spec[A_VDSO]; vdso_info[VDSO_ABI_AA64].cm = &aarch64_vdso_maps[AA64_MAP_VDSO];
return __vdso_init(VDSO_ABI_AA64); return __vdso_init(VDSO_ABI_AA64);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment