Commit d57778fe authored by Sven Schnelle's avatar Sven Schnelle Committed by Vasily Gorbik

s390/vdso: always enable vdso

With the upcoming move of the svc sigreturn instruction from
the signal frame to vdso we need to have vdso always enabled.
Signed-off-by: default avatarSven Schnelle <svens@linux.ibm.com>
Reviewed-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent b9639b31
...@@ -146,8 +146,6 @@ typedef s390_compat_regs compat_elf_gregset_t; ...@@ -146,8 +146,6 @@ typedef s390_compat_regs compat_elf_gregset_t;
#include <asm/vdso.h> #include <asm/vdso.h>
extern unsigned int vdso_enabled;
/* /*
* This is used to ensure we don't load something for the wrong architecture. * This is used to ensure we don't load something for the wrong architecture.
*/ */
...@@ -268,11 +266,10 @@ do { \ ...@@ -268,11 +266,10 @@ do { \
#define STACK_RND_MASK MMAP_RND_MASK #define STACK_RND_MASK MMAP_RND_MASK
/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */ /* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \ #define ARCH_DLINFO \
do { \ do { \
if (vdso_enabled) \ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \ (unsigned long)current->mm->context.vdso_base); \
(unsigned long)current->mm->context.vdso_base); \
} while (0) } while (0)
struct linux_binprm; struct linux_binprm;
......
...@@ -37,18 +37,6 @@ enum vvar_pages { ...@@ -37,18 +37,6 @@ enum vvar_pages {
VVAR_NR_PAGES, VVAR_NR_PAGES,
}; };
unsigned int __read_mostly vdso_enabled = 1;
static int __init vdso_setup(char *str)
{
bool enabled;
if (!kstrtobool(str, &enabled))
vdso_enabled = enabled;
return 1;
}
__setup("vdso=", vdso_setup);
#ifdef CONFIG_TIME_NS #ifdef CONFIG_TIME_NS
struct vdso_data *arch_get_vdso_data(void *vvar_page) struct vdso_data *arch_get_vdso_data(void *vvar_page)
{ {
...@@ -176,7 +164,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ...@@ -176,7 +164,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
int rc; int rc;
BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES); BUILD_BUG_ON(VVAR_NR_PAGES != __VVAR_PAGES);
if (!vdso_enabled || is_compat_task()) if (is_compat_task())
return 0; return 0;
if (mmap_write_lock_killable(mm)) if (mmap_write_lock_killable(mm))
return -EINTR; return -EINTR;
...@@ -218,10 +206,9 @@ static int __init vdso_init(void) ...@@ -218,10 +206,9 @@ static int __init vdso_init(void)
vdso_pages = (vdso64_end - vdso64_start) >> PAGE_SHIFT; vdso_pages = (vdso64_end - vdso64_start) >> PAGE_SHIFT;
pages = kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL); pages = kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
if (!pages) { if (!pages)
vdso_enabled = 0; panic("failed to allocate VDSO pages");
return -ENOMEM;
}
for (i = 0; i < vdso_pages; i++) for (i = 0; i < vdso_pages; i++)
pages[i] = virt_to_page(vdso64_start + i * PAGE_SIZE); pages[i] = virt_to_page(vdso64_start + i * PAGE_SIZE);
pages[vdso_pages] = NULL; pages[vdso_pages] = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment