Commit a39060b0 authored by Will Deacon's avatar Will Deacon

arm64: compat: Allow 32-bit vdso and sigpage to co-exist

In preparation for removing the signal trampoline from the compat vDSO,
allow the sigpage and the compat vDSO to co-exist.

For the moment the vDSO signal trampoline will still be used when built.
Subsequent patches will move to the sigpage consistently.
Acked-by: default avatarDave Martin <Dave.Martin@arm.com>
Reviewed-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: default avatarArd Biesheuvel <ardb@kernel.org>
Reviewed-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 87676cfc
...@@ -19,6 +19,9 @@ ...@@ -19,6 +19,9 @@
typedef struct { typedef struct {
atomic64_t id; atomic64_t id;
#ifdef CONFIG_COMPAT
void *sigpage;
#endif
void *vdso; void *vdso;
unsigned long flags; unsigned long flags;
} mm_context_t; } mm_context_t;
......
...@@ -29,9 +29,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE ...@@ -29,9 +29,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
obj-$(CONFIG_COMPAT) += sys32.o signal32.o \ obj-$(CONFIG_COMPAT) += sys32.o signal32.o \
sys_compat.o sys_compat.o
ifneq ($(CONFIG_COMPAT_VDSO), y)
obj-$(CONFIG_COMPAT) += sigreturn32.o obj-$(CONFIG_COMPAT) += sigreturn32.o
endif
obj-$(CONFIG_KUSER_HELPERS) += kuser32.o obj-$(CONFIG_KUSER_HELPERS) += kuser32.o
obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
......
...@@ -371,7 +371,7 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka, ...@@ -371,7 +371,7 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
if (ka->sa.sa_flags & SA_SIGINFO) if (ka->sa.sa_flags & SA_SIGINFO)
idx += 3; idx += 3;
retcode = (unsigned long)current->mm->context.vdso + retcode = (unsigned long)current->mm->context.sigpage +
(idx << 2) + thumb; (idx << 2) + thumb;
#endif #endif
} }
......
...@@ -191,15 +191,12 @@ enum aarch32_map { ...@@ -191,15 +191,12 @@ enum aarch32_map {
#ifdef CONFIG_COMPAT_VDSO #ifdef CONFIG_COMPAT_VDSO
AA32_MAP_VVAR, AA32_MAP_VVAR,
AA32_MAP_VDSO, AA32_MAP_VDSO,
#else
AA32_MAP_SIGPAGE
#endif #endif
AA32_MAP_SIGPAGE
}; };
static struct page *aarch32_vectors_page __ro_after_init; static struct page *aarch32_vectors_page __ro_after_init;
#ifndef CONFIG_COMPAT_VDSO
static struct page *aarch32_sig_page __ro_after_init; static struct page *aarch32_sig_page __ro_after_init;
#endif
static struct vm_special_mapping aarch32_vdso_maps[] = { static struct vm_special_mapping aarch32_vdso_maps[] = {
[AA32_MAP_VECTORS] = { [AA32_MAP_VECTORS] = {
...@@ -214,12 +211,11 @@ static struct vm_special_mapping aarch32_vdso_maps[] = { ...@@ -214,12 +211,11 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
.name = "[vdso]", .name = "[vdso]",
.mremap = aarch32_vdso_mremap, .mremap = aarch32_vdso_mremap,
}, },
#else #endif /* CONFIG_COMPAT_VDSO */
[AA32_MAP_SIGPAGE] = { [AA32_MAP_SIGPAGE] = {
.name = "[sigpage]", /* ABI */ .name = "[sigpage]", /* ABI */
.pages = &aarch32_sig_page, .pages = &aarch32_sig_page,
}, },
#endif /* CONFIG_COMPAT_VDSO */
}; };
static int aarch32_alloc_kuser_vdso_page(void) static int aarch32_alloc_kuser_vdso_page(void)
...@@ -242,27 +238,11 @@ static int aarch32_alloc_kuser_vdso_page(void) ...@@ -242,27 +238,11 @@ static int aarch32_alloc_kuser_vdso_page(void)
return 0; return 0;
} }
#ifdef CONFIG_COMPAT_VDSO static int aarch32_alloc_sigpage(void)
static int __aarch32_alloc_vdso_pages(void)
{
int ret;
vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
ret = __vdso_init(VDSO_ABI_AA32);
if (ret)
return ret;
return aarch32_alloc_kuser_vdso_page();
}
#else
static int __aarch32_alloc_vdso_pages(void)
{ {
extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
unsigned long sigpage; unsigned long sigpage;
int ret;
sigpage = get_zeroed_page(GFP_ATOMIC); sigpage = get_zeroed_page(GFP_ATOMIC);
if (!sigpage) if (!sigpage)
...@@ -271,18 +251,34 @@ static int __aarch32_alloc_vdso_pages(void) ...@@ -271,18 +251,34 @@ static int __aarch32_alloc_vdso_pages(void)
memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz); memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
aarch32_sig_page = virt_to_page(sigpage); aarch32_sig_page = virt_to_page(sigpage);
flush_dcache_page(aarch32_sig_page); flush_dcache_page(aarch32_sig_page);
return 0;
}
ret = aarch32_alloc_kuser_vdso_page(); #ifdef CONFIG_COMPAT_VDSO
if (ret) static int __aarch32_alloc_vdso_pages(void)
free_page(sigpage); {
vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];
return ret; return __vdso_init(VDSO_ABI_AA32);
} }
#endif /* CONFIG_COMPAT_VDSO */ #endif /* CONFIG_COMPAT_VDSO */
static int __init aarch32_alloc_vdso_pages(void) static int __init aarch32_alloc_vdso_pages(void)
{ {
return __aarch32_alloc_vdso_pages(); int ret;
#ifdef CONFIG_COMPAT_VDSO
ret = __aarch32_alloc_vdso_pages();
if (ret)
return ret;
#endif
ret = aarch32_alloc_sigpage();
if (ret)
return ret;
return aarch32_alloc_kuser_vdso_page();
} }
arch_initcall(aarch32_alloc_vdso_pages); arch_initcall(aarch32_alloc_vdso_pages);
...@@ -305,7 +301,6 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm) ...@@ -305,7 +301,6 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
return PTR_ERR_OR_ZERO(ret); return PTR_ERR_OR_ZERO(ret);
} }
#ifndef CONFIG_COMPAT_VDSO
static int aarch32_sigreturn_setup(struct mm_struct *mm) static int aarch32_sigreturn_setup(struct mm_struct *mm)
{ {
unsigned long addr; unsigned long addr;
...@@ -328,12 +323,11 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm) ...@@ -328,12 +323,11 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
if (IS_ERR(ret)) if (IS_ERR(ret))
goto out; goto out;
mm->context.vdso = (void *)addr; mm->context.sigpage = (void *)addr;
out: out:
return PTR_ERR_OR_ZERO(ret); return PTR_ERR_OR_ZERO(ret);
} }
#endif /* !CONFIG_COMPAT_VDSO */
int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{ {
...@@ -352,10 +346,11 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ...@@ -352,10 +346,11 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
mm, mm,
bprm, bprm,
uses_interp); uses_interp);
#else if (ret)
ret = aarch32_sigreturn_setup(mm); goto out;
#endif /* CONFIG_COMPAT_VDSO */ #endif /* CONFIG_COMPAT_VDSO */
ret = aarch32_sigreturn_setup(mm);
out: out:
mmap_write_unlock(mm); mmap_write_unlock(mm);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment