Commit b67e612c authored by Andy Lutomirski's avatar Andy Lutomirski Committed by H. Peter Anvin

x86: Load the 32-bit vdso in place, just like the 64-bit vdsos

This replaces a decent amount of incomprehensible and buggy code
with much more straightforward code.  It also brings the 32-bit vdso
more in line with the 64-bit vdsos, so maybe someday they can share
even more code.

This wastes a small amount of kernel .data and .text space, but it
avoids a couple of allocations on startup, so it should be more or
less a wash memory-wise.
Signed-off-by: default avatarAndy Lutomirski <luto@amacapital.net>
Cc: Stefani Seibold <stefani@seibold.net>
Link: http://lkml.kernel.org/r/b8093933fad09ce181edb08a61dcd5d2592e9814.1395352498.git.luto@amacapital.netSigned-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 4e40112c
...@@ -25,14 +25,6 @@ extern const char VDSO32_PRELINK[]; ...@@ -25,14 +25,6 @@ extern const char VDSO32_PRELINK[];
extern void __user __kernel_sigreturn; extern void __user __kernel_sigreturn;
extern void __user __kernel_rt_sigreturn; extern void __user __kernel_rt_sigreturn;
/*
* These symbols are defined by vdso32.S to mark the bounds
* of the ELF DSO images included therein.
*/
extern const char vdso32_int80_start, vdso32_int80_end;
extern const char vdso32_syscall_start, vdso32_syscall_end;
extern const char vdso32_sysenter_start, vdso32_sysenter_end;
void __init patch_vdso32(void *vdso, size_t len); void __init patch_vdso32(void *vdso, size_t len);
#endif /* _ASM_X86_VDSO_H */ #endif /* _ASM_X86_VDSO_H */
#include <asm/page_types.h> #include "vdso_image.h"
#include <linux/linkage.h>
__PAGE_ALIGNED_DATA DEFINE_VDSO_IMAGE(vdso, "arch/x86/vdso/vdso.so")
.globl vdso_start, vdso_end
.align PAGE_SIZE
vdso_start:
.incbin "arch/x86/vdso/vdso.so"
vdso_end:
.align PAGE_SIZE /* extra data here leaks to userspace. */
.previous
.globl vdso_pages
.bss
.align 8
.type vdso_pages, @object
vdso_pages:
.zero (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE * 8
.size vdso_pages, .-vdso_pages
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/hpet.h> #include <asm/hpet.h>
#include <asm/vvar.h> #include <asm/vvar.h>
#include "vdso_image.h"
#ifdef CONFIG_COMPAT_VDSO #ifdef CONFIG_COMPAT_VDSO
#define VDSO_DEFAULT 0 #define VDSO_DEFAULT 0
...@@ -41,6 +42,12 @@ ...@@ -41,6 +42,12 @@
#define arch_setup_additional_pages syscall32_setup_pages #define arch_setup_additional_pages syscall32_setup_pages
#endif #endif
DECLARE_VDSO_IMAGE(vdso32_int80);
#ifdef CONFIG_COMPAT
DECLARE_VDSO_IMAGE(vdso32_syscall);
#endif
DECLARE_VDSO_IMAGE(vdso32_sysenter);
/* /*
* Should the kernel map a VDSO page into processes and pass its * Should the kernel map a VDSO page into processes and pass its
* address down to glibc upon exec()? * address down to glibc upon exec()?
...@@ -71,7 +78,7 @@ EXPORT_SYMBOL_GPL(vdso_enabled); ...@@ -71,7 +78,7 @@ EXPORT_SYMBOL_GPL(vdso_enabled);
#endif #endif
static struct page **vdso32_pages; static struct page **vdso32_pages;
static unsigned int vdso32_size; static unsigned vdso32_size;
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -117,31 +124,32 @@ void enable_sep_cpu(void) ...@@ -117,31 +124,32 @@ void enable_sep_cpu(void)
int __init sysenter_setup(void) int __init sysenter_setup(void)
{ {
void *vdso_pages; char *vdso32_start, *vdso32_end;
const void *vdso; int npages, i;
size_t vdso_len;
unsigned int i;
#ifdef CONFIG_COMPAT
if (vdso32_syscall()) { if (vdso32_syscall()) {
vdso = &vdso32_syscall_start; vdso32_start = vdso32_syscall_start;
vdso_len = &vdso32_syscall_end - &vdso32_syscall_start; vdso32_end = vdso32_syscall_end;
} else if (vdso32_sysenter()){ vdso32_pages = vdso32_syscall_pages;
vdso = &vdso32_sysenter_start; } else
vdso_len = &vdso32_sysenter_end - &vdso32_sysenter_start; #endif
if (vdso32_sysenter()) {
vdso32_start = vdso32_sysenter_start;
vdso32_end = vdso32_sysenter_end;
vdso32_pages = vdso32_sysenter_pages;
} else { } else {
vdso = &vdso32_int80_start; vdso32_start = vdso32_int80_start;
vdso_len = &vdso32_int80_end - &vdso32_int80_start; vdso32_end = vdso32_int80_end;
vdso32_pages = vdso32_int80_pages;
} }
vdso32_size = (vdso_len + PAGE_SIZE - 1) / PAGE_SIZE; npages = ((vdso32_end - vdso32_start) + PAGE_SIZE - 1) / PAGE_SIZE;
vdso32_pages = kmalloc(sizeof(*vdso32_pages) * vdso32_size, GFP_ATOMIC); vdso32_size = npages << PAGE_SHIFT;
vdso_pages = kmalloc(VDSO_OFFSET(vdso32_size), GFP_ATOMIC); for (i = 0; i < npages; i++)
vdso32_pages[i] = virt_to_page(vdso32_start + i*PAGE_SIZE);
for(i = 0; i != vdso32_size; ++i)
vdso32_pages[i] = virt_to_page(vdso_pages + VDSO_OFFSET(i));
memcpy(vdso_pages, vdso, vdso_len); patch_vdso32(vdso32_start, vdso32_size);
patch_vdso32(vdso_pages, vdso_len);
return 0; return 0;
} }
...@@ -177,7 +185,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) ...@@ -177,7 +185,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
*/ */
ret = install_special_mapping(mm, ret = install_special_mapping(mm,
addr, addr,
VDSO_OFFSET(vdso32_size), vdso32_size,
VM_READ|VM_EXEC| VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso32_pages); vdso32_pages);
......
#include <linux/init.h> #include "vdso_image.h"
__INITDATA DEFINE_VDSO_IMAGE(vdso32_int80, "arch/x86/vdso/vdso32-int80.so")
.globl vdso32_int80_start, vdso32_int80_end
vdso32_int80_start:
.incbin "arch/x86/vdso/vdso32-int80.so"
vdso32_int80_end:
.globl vdso32_syscall_start, vdso32_syscall_end
vdso32_syscall_start:
#ifdef CONFIG_COMPAT #ifdef CONFIG_COMPAT
.incbin "arch/x86/vdso/vdso32-syscall.so" DEFINE_VDSO_IMAGE(vdso32_syscall, "arch/x86/vdso/vdso32-syscall.so")
#endif #endif
vdso32_syscall_end:
.globl vdso32_sysenter_start, vdso32_sysenter_end
vdso32_sysenter_start:
.incbin "arch/x86/vdso/vdso32-sysenter.so"
vdso32_sysenter_end:
__FINIT DEFINE_VDSO_IMAGE(vdso32_sysenter, "arch/x86/vdso/vdso32-sysenter.so")
#ifndef _VDSO_IMAGE_H
#define _VDSO_IMAGE_H
#include <asm/page_types.h>
#include <linux/linkage.h>
#define DEFINE_VDSO_IMAGE(symname, filename) \
__PAGE_ALIGNED_DATA ; \
.globl symname##_start, symname##_end ; \
.align PAGE_SIZE ; \
symname##_start: ; \
.incbin filename ; \
symname##_end: ; \
.align PAGE_SIZE /* extra data here leaks to userspace. */ ; \
\
.previous ; \
\
.globl symname##_pages ; \
.bss ; \
.align 8 ; \
.type symname##_pages, @object ; \
symname##_pages: ; \
.zero (symname##_end - symname##_start + PAGE_SIZE - 1) / PAGE_SIZE * (BITS_PER_LONG / 8) ; \
.size symname##_pages, .-symname##_pages
#define DECLARE_VDSO_IMAGE(symname) \
extern char symname##_start[], symname##_end[]; \
extern struct page *symname##_pages[]
#endif /* _VDSO_IMAGE_H */
#include <asm/page_types.h> #include "vdso_image.h"
#include <linux/linkage.h>
__PAGE_ALIGNED_DATA DEFINE_VDSO_IMAGE(vdsox32, "arch/x86/vdso/vdsox32.so")
.globl vdsox32_start, vdsox32_end
.align PAGE_SIZE
vdsox32_start:
.incbin "arch/x86/vdso/vdsox32.so"
vdsox32_end:
.align PAGE_SIZE /* extra data here leaks to userspace. */
.previous
.globl vdsox32_pages
.bss
.align 8
.type vdsox32_pages, @object
vdsox32_pages:
.zero (vdsox32_end - vdsox32_start + PAGE_SIZE - 1) / PAGE_SIZE * 8
.size vdsox32_pages, .-vdsox32_pages
...@@ -15,19 +15,17 @@ ...@@ -15,19 +15,17 @@
#include <asm/proto.h> #include <asm/proto.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/page.h> #include <asm/page.h>
#include "vdso_image.h"
#if defined(CONFIG_X86_64) #if defined(CONFIG_X86_64)
unsigned int __read_mostly vdso_enabled = 1; unsigned int __read_mostly vdso_enabled = 1;
extern char vdso_start[], vdso_end[]; DECLARE_VDSO_IMAGE(vdso);
extern unsigned short vdso_sync_cpuid; extern unsigned short vdso_sync_cpuid;
extern struct page *vdso_pages[];
static unsigned vdso_size; static unsigned vdso_size;
#ifdef CONFIG_X86_X32_ABI #ifdef CONFIG_X86_X32_ABI
extern char vdsox32_start[], vdsox32_end[]; DECLARE_VDSO_IMAGE(vdsox32);
extern struct page *vdsox32_pages[];
static unsigned vdsox32_size; static unsigned vdsox32_size;
#endif #endif
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment