Commit a97a2d4d authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc updates from David Miller:
 "Mostly VDSO cleanups and optimizations"

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc: Several small VDSO vclock_gettime.c improvements.
  sparc: Validate VDSO for undefined symbols.
  sparc: Really use linker with LDFLAGS.
  sparc: Improve VDSO CFLAGS.
  sparc: Set DISABLE_BRANCH_PROFILING in VDSO CFLAGS.
  sparc: Don't bother masking out TICK_PRIV_BIT in VDSO code.
  sparc: Inline VDSO gettime code aggressively.
  sparc: Improve VDSO instruction patching.
  sparc: Fix parport build warnings.
parents 44786880 8dbc450f
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
*/ */
#define HAS_DMA #define HAS_DMA
#ifdef CONFIG_PARPORT_PC_FIFO
static DEFINE_SPINLOCK(dma_spin_lock); static DEFINE_SPINLOCK(dma_spin_lock);
#define claim_dma_lock() \ #define claim_dma_lock() \
...@@ -31,6 +32,7 @@ static DEFINE_SPINLOCK(dma_spin_lock); ...@@ -31,6 +32,7 @@ static DEFINE_SPINLOCK(dma_spin_lock);
#define release_dma_lock(__flags) \ #define release_dma_lock(__flags) \
spin_unlock_irqrestore(&dma_spin_lock, __flags); spin_unlock_irqrestore(&dma_spin_lock, __flags);
#endif
static struct sparc_ebus_info { static struct sparc_ebus_info {
struct ebus_dma_info info; struct ebus_dma_info info;
......
...@@ -121,8 +121,12 @@ struct thread_info { ...@@ -121,8 +121,12 @@ struct thread_info {
} }
/* how to get the thread information struct from C */ /* how to get the thread information struct from C */
#ifndef BUILD_VDSO
register struct thread_info *current_thread_info_reg asm("g6"); register struct thread_info *current_thread_info_reg asm("g6");
#define current_thread_info() (current_thread_info_reg) #define current_thread_info() (current_thread_info_reg)
#else
extern struct thread_info *current_thread_info(void);
#endif
/* thread information allocation */ /* thread information allocation */
#if PAGE_SHIFT == 13 #if PAGE_SHIFT == 13
......
...@@ -8,10 +8,10 @@ ...@@ -8,10 +8,10 @@
struct vdso_image { struct vdso_image {
void *data; void *data;
unsigned long size; /* Always a multiple of PAGE_SIZE */ unsigned long size; /* Always a multiple of PAGE_SIZE */
unsigned long tick_patch, tick_patch_len;
long sym_vvar_start; /* Negative offset to the vvar area */ long sym_vvar_start; /* Negative offset to the vvar area */
long sym_vread_tick; /* Start of vread_tick section */
long sym_vread_tick_patch_start; /* Start of tick read */
long sym_vread_tick_patch_end; /* End of tick read */
}; };
#ifdef CONFIG_SPARC64 #ifdef CONFIG_SPARC64
......
...@@ -53,8 +53,6 @@ ...@@ -53,8 +53,6 @@
DEFINE_SPINLOCK(rtc_lock); DEFINE_SPINLOCK(rtc_lock);
unsigned int __read_mostly vdso_fix_stick;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs) unsigned long profile_pc(struct pt_regs *regs)
{ {
...@@ -838,7 +836,6 @@ void __init time_init_early(void) ...@@ -838,7 +836,6 @@ void __init time_init_early(void)
} else { } else {
init_tick_ops(&tick_operations); init_tick_ops(&tick_operations);
clocksource_tick.archdata.vclock_mode = VCLOCK_TICK; clocksource_tick.archdata.vclock_mode = VCLOCK_TICK;
vdso_fix_stick = 1;
} }
} else { } else {
init_tick_ops(&stick_operations); init_tick_ops(&stick_operations);
......
...@@ -33,10 +33,8 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so) ...@@ -33,10 +33,8 @@ targets += $(vdso_img_sodbg) $(vdso_img-y:%=vdso%.so)
CPPFLAGS_vdso.lds += -P -C CPPFLAGS_vdso.lds += -P -C
VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \ VDSO_LDFLAGS_vdso.lds = -m elf64_sparc -soname linux-vdso.so.1 --no-undefined \
-Wl,--no-undefined \ -z max-page-size=8192 -z common-page-size=8192
-Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
$(DISABLE_LTO)
$(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE $(obj)/vdso64.so.dbg: $(obj)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso) $(call if_changed,vdso)
...@@ -54,13 +52,14 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE ...@@ -54,13 +52,14 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
# Don't omit frame pointers for ease of userspace debugging, but do # Don't omit frame pointers for ease of userspace debugging, but do
# optimize sibling calls. # optimize sibling calls.
# #
CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables \ CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables -m64 \
-m64 -ffixed-g2 -ffixed-g3 -fcall-used-g4 -fcall-used-g5 -ffixed-g6 \ $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
-ffixed-g7 $(filter -g%,$(KBUILD_CFLAGS)) \ -fno-omit-frame-pointer -foptimize-sibling-calls \
$(call cc-option, -fno-stack-protector) -fno-omit-frame-pointer \ -DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
-foptimize-sibling-calls -DBUILD_VDSO
$(vobjs): KBUILD_CFLAGS += $(CFL) SPARC_REG_CFLAGS = -ffixed-g4 -ffixed-g5 -fcall-used-g5 -fcall-used-g7
$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
# #
# vDSO code runs in userspace and -pg doesn't help with profiling anyway. # vDSO code runs in userspace and -pg doesn't help with profiling anyway.
...@@ -73,7 +72,7 @@ $(obj)/%.so: $(obj)/%.so.dbg ...@@ -73,7 +72,7 @@ $(obj)/%.so: $(obj)/%.so.dbg
$(call if_changed,objcopy) $(call if_changed,objcopy)
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds) CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf32_sparc,-soname=linux-gate.so.1 VDSO_LDFLAGS_vdso32.lds = -m elf32_sparc -soname linux-gate.so.1
#This makes sure the $(obj) subdirectory exists even though vdso32/ #This makes sure the $(obj) subdirectory exists even though vdso32/
#is not a kbuild sub-make subdirectory #is not a kbuild sub-make subdirectory
...@@ -91,7 +90,8 @@ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS)) ...@@ -91,7 +90,8 @@ KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_32 := $(filter-out -mcmodel=medlow,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out -mcmodel=medlow,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32)) KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic -mno-app-regs -ffixed-g7 KBUILD_CFLAGS_32 := $(filter-out $(SPARC_REG_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic
KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
...@@ -109,12 +109,13 @@ $(obj)/vdso32.so.dbg: FORCE \ ...@@ -109,12 +109,13 @@ $(obj)/vdso32.so.dbg: FORCE \
# The DSO images are built using a special linker script. # The DSO images are built using a special linker script.
# #
quiet_cmd_vdso = VDSO $@ quiet_cmd_vdso = VDSO $@
cmd_vdso = $(CC) -nostdlib -o $@ \ cmd_vdso = $(LD) -nostdlib -o $@ \
$(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \ $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
-Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) -T $(filter %.lds,$^) $(filter %.o,$^) && \
sh $(srctree)/$(src)/checkundef.sh '$(OBJDUMP)' '$@'
VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \ VDSO_LDFLAGS = -shared $(call ld-option, --hash-style=both) \
$(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(call ld-option, --build-id) -Bsymbolic
GCOV_PROFILE := n GCOV_PROFILE := n
# #
......
#!/bin/sh
objdump="$1"
file="$2"
$objdump -t "$file" | grep '*UUND*' | grep -v '#scratch' > /dev/null 2>&1
if [ $? -eq 1 ]; then
exit 0
else
echo "$file: undefined symbols found" >&2
exit 1
fi
...@@ -12,11 +12,6 @@ ...@@ -12,11 +12,6 @@
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/ */
/* Disable profiling for userspace code: */
#ifndef DISABLE_BRANCH_PROFILING
#define DISABLE_BRANCH_PROFILING
#endif
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/time.h> #include <linux/time.h>
#include <linux/string.h> #include <linux/string.h>
...@@ -26,13 +21,6 @@ ...@@ -26,13 +21,6 @@
#include <asm/clocksource.h> #include <asm/clocksource.h>
#include <asm/vvar.h> #include <asm/vvar.h>
#undef TICK_PRIV_BIT
#ifdef CONFIG_SPARC64
#define TICK_PRIV_BIT (1UL << 63)
#else
#define TICK_PRIV_BIT (1ULL << 63)
#endif
#ifdef CONFIG_SPARC64 #ifdef CONFIG_SPARC64
#define SYSCALL_STRING \ #define SYSCALL_STRING \
"ta 0x6d;" \ "ta 0x6d;" \
...@@ -60,24 +48,22 @@ ...@@ -60,24 +48,22 @@
* Compute the vvar page's address in the process address space, and return it * Compute the vvar page's address in the process address space, and return it
* as a pointer to the vvar_data. * as a pointer to the vvar_data.
*/ */
static notrace noinline struct vvar_data * notrace static __always_inline struct vvar_data *get_vvar_data(void)
get_vvar_data(void)
{ {
unsigned long ret; unsigned long ret;
/* /*
* vdso data page is the first vDSO page so grab the return address * vdso data page is the first vDSO page so grab the PC
* and move up a page to get to the data page. * and move up a page to get to the data page.
*/ */
ret = (unsigned long)__builtin_return_address(0); __asm__("rd %%pc, %0" : "=r" (ret));
ret &= ~(8192 - 1); ret &= ~(8192 - 1);
ret -= 8192; ret -= 8192;
return (struct vvar_data *) ret; return (struct vvar_data *) ret;
} }
static notrace long notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
vdso_fallback_gettime(long clock, struct timespec *ts)
{ {
register long num __asm__("g1") = __NR_clock_gettime; register long num __asm__("g1") = __NR_clock_gettime;
register long o0 __asm__("o0") = clock; register long o0 __asm__("o0") = clock;
...@@ -88,8 +74,7 @@ vdso_fallback_gettime(long clock, struct timespec *ts) ...@@ -88,8 +74,7 @@ vdso_fallback_gettime(long clock, struct timespec *ts)
return o0; return o0;
} }
static notrace __always_inline long notrace static long vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
{ {
register long num __asm__("g1") = __NR_gettimeofday; register long num __asm__("g1") = __NR_gettimeofday;
register long o0 __asm__("o0") = (long) tv; register long o0 __asm__("o0") = (long) tv;
...@@ -101,38 +86,43 @@ vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz) ...@@ -101,38 +86,43 @@ vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
} }
#ifdef CONFIG_SPARC64 #ifdef CONFIG_SPARC64
static notrace noinline u64 notrace static __always_inline u64 vread_tick(void)
vread_tick(void) { {
u64 ret; u64 ret;
__asm__ __volatile__("rd %%asr24, %0 \n" __asm__ __volatile__("1:\n\t"
".section .vread_tick_patch, \"ax\" \n" "rd %%tick, %0\n\t"
"rd %%tick, %0 \n" ".pushsection .tick_patch, \"a\"\n\t"
".previous \n" ".word 1b - ., 1f - .\n\t"
: "=&r" (ret)); ".popsection\n\t"
return ret & ~TICK_PRIV_BIT; ".pushsection .tick_patch_replacement, \"ax\"\n\t"
"1:\n\t"
"rd %%asr24, %0\n\t"
".popsection\n"
: "=r" (ret));
return ret;
} }
#else #else
static notrace noinline u64 notrace static __always_inline u64 vread_tick(void)
vread_tick(void)
{ {
unsigned int lo, hi; register unsigned long long ret asm("o4");
__asm__ __volatile__("rd %%asr24, %%g1\n\t" __asm__ __volatile__("1:\n\t"
"srlx %%g1, 32, %1\n\t" "rd %%tick, %L0\n\t"
"srl %%g1, 0, %0\n" "srlx %L0, 32, %H0\n\t"
".section .vread_tick_patch, \"ax\" \n" ".pushsection .tick_patch, \"a\"\n\t"
"rd %%tick, %%g1\n" ".word 1b - ., 1f - .\n\t"
".previous \n" ".popsection\n\t"
: "=&r" (lo), "=&r" (hi) ".pushsection .tick_patch_replacement, \"ax\"\n\t"
: "1:\n\t"
: "g1"); "rd %%asr24, %L0\n\t"
return lo | ((u64)hi << 32); ".popsection\n"
: "=r" (ret));
return ret;
} }
#endif #endif
static notrace inline u64 notrace static __always_inline u64 vgetsns(struct vvar_data *vvar)
vgetsns(struct vvar_data *vvar)
{ {
u64 v; u64 v;
u64 cycles; u64 cycles;
...@@ -142,13 +132,12 @@ vgetsns(struct vvar_data *vvar) ...@@ -142,13 +132,12 @@ vgetsns(struct vvar_data *vvar)
return v * vvar->clock.mult; return v * vvar->clock.mult;
} }
static notrace noinline int notrace static __always_inline int do_realtime(struct vvar_data *vvar,
do_realtime(struct vvar_data *vvar, struct timespec *ts) struct timespec *ts)
{ {
unsigned long seq; unsigned long seq;
u64 ns; u64 ns;
ts->tv_nsec = 0;
do { do {
seq = vvar_read_begin(vvar); seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->wall_time_sec; ts->tv_sec = vvar->wall_time_sec;
...@@ -157,18 +146,18 @@ do_realtime(struct vvar_data *vvar, struct timespec *ts) ...@@ -157,18 +146,18 @@ do_realtime(struct vvar_data *vvar, struct timespec *ts)
ns >>= vvar->clock.shift; ns >>= vvar->clock.shift;
} while (unlikely(vvar_read_retry(vvar, seq))); } while (unlikely(vvar_read_retry(vvar, seq)));
timespec_add_ns(ts, ns); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
return 0; return 0;
} }
static notrace noinline int notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
do_monotonic(struct vvar_data *vvar, struct timespec *ts) struct timespec *ts)
{ {
unsigned long seq; unsigned long seq;
u64 ns; u64 ns;
ts->tv_nsec = 0;
do { do {
seq = vvar_read_begin(vvar); seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->monotonic_time_sec; ts->tv_sec = vvar->monotonic_time_sec;
...@@ -177,13 +166,14 @@ do_monotonic(struct vvar_data *vvar, struct timespec *ts) ...@@ -177,13 +166,14 @@ do_monotonic(struct vvar_data *vvar, struct timespec *ts)
ns >>= vvar->clock.shift; ns >>= vvar->clock.shift;
} while (unlikely(vvar_read_retry(vvar, seq))); } while (unlikely(vvar_read_retry(vvar, seq)));
timespec_add_ns(ts, ns); ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
ts->tv_nsec = ns;
return 0; return 0;
} }
static notrace noinline int notrace static int do_realtime_coarse(struct vvar_data *vvar,
do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts) struct timespec *ts)
{ {
unsigned long seq; unsigned long seq;
...@@ -195,8 +185,8 @@ do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts) ...@@ -195,8 +185,8 @@ do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts)
return 0; return 0;
} }
static notrace noinline int notrace static int do_monotonic_coarse(struct vvar_data *vvar,
do_monotonic_coarse(struct vvar_data *vvar, struct timespec *ts) struct timespec *ts)
{ {
unsigned long seq; unsigned long seq;
......
...@@ -73,11 +73,8 @@ SECTIONS ...@@ -73,11 +73,8 @@ SECTIONS
.text : { *(.text*) } :text =0x90909090, .text : { *(.text*) } :text =0x90909090,
.vread_tick_patch : { .tick_patch : { *(.tick_patch) } :text
vread_tick_patch_start = .; .tick_patch_insns : { *(.tick_patch_insns) } :text
*(.vread_tick_patch)
vread_tick_patch_end = .;
}
/DISCARD/ : { /DISCARD/ : {
*(.discard) *(.discard)
......
...@@ -63,9 +63,6 @@ enum { ...@@ -63,9 +63,6 @@ enum {
sym_vvar_start, sym_vvar_start,
sym_VDSO_FAKE_SECTION_TABLE_START, sym_VDSO_FAKE_SECTION_TABLE_START,
sym_VDSO_FAKE_SECTION_TABLE_END, sym_VDSO_FAKE_SECTION_TABLE_END,
sym_vread_tick,
sym_vread_tick_patch_start,
sym_vread_tick_patch_end
}; };
struct vdso_sym { struct vdso_sym {
...@@ -81,9 +78,6 @@ struct vdso_sym required_syms[] = { ...@@ -81,9 +78,6 @@ struct vdso_sym required_syms[] = {
[sym_VDSO_FAKE_SECTION_TABLE_END] = { [sym_VDSO_FAKE_SECTION_TABLE_END] = {
"VDSO_FAKE_SECTION_TABLE_END", 0 "VDSO_FAKE_SECTION_TABLE_END", 0
}, },
[sym_vread_tick] = {"vread_tick", 1},
[sym_vread_tick_patch_start] = {"vread_tick_patch_start", 1},
[sym_vread_tick_patch_end] = {"vread_tick_patch_end", 1}
}; };
__attribute__((format(printf, 1, 2))) __attribute__((noreturn)) __attribute__((format(printf, 1, 2))) __attribute__((noreturn))
......
...@@ -17,10 +17,11 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, ...@@ -17,10 +17,11 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
unsigned long mapping_size; unsigned long mapping_size;
int i; int i;
unsigned long j; unsigned long j;
ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr; *patch_sec = NULL;
ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr; ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
ELF(Dyn) *dyn = 0, *dyn_end = 0; ELF(Dyn) *dyn = 0, *dyn_end = 0;
const char *secstrings;
INT_BITS syms[NSYMS] = {}; INT_BITS syms[NSYMS] = {};
ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_BE(&hdr->e_phoff)); ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_BE(&hdr->e_phoff));
...@@ -63,11 +64,18 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, ...@@ -63,11 +64,18 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
} }
/* Walk the section table */ /* Walk the section table */
secstrings_hdr = raw_addr + GET_BE(&hdr->e_shoff) +
GET_BE(&hdr->e_shentsize)*GET_BE(&hdr->e_shstrndx);
secstrings = raw_addr + GET_BE(&secstrings_hdr->sh_offset);
for (i = 0; i < GET_BE(&hdr->e_shnum); i++) { for (i = 0; i < GET_BE(&hdr->e_shnum); i++) {
ELF(Shdr) *sh = raw_addr + GET_BE(&hdr->e_shoff) + ELF(Shdr) *sh = raw_addr + GET_BE(&hdr->e_shoff) +
GET_BE(&hdr->e_shentsize) * i; GET_BE(&hdr->e_shentsize) * i;
if (GET_BE(&sh->sh_type) == SHT_SYMTAB) if (GET_BE(&sh->sh_type) == SHT_SYMTAB)
symtab_hdr = sh; symtab_hdr = sh;
if (!strcmp(secstrings + GET_BE(&sh->sh_name),
".tick_patch"))
patch_sec = sh;
} }
if (!symtab_hdr) if (!symtab_hdr)
...@@ -134,6 +142,12 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len, ...@@ -134,6 +142,12 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
fprintf(outfile, "const struct vdso_image %s_builtin = {\n", name); fprintf(outfile, "const struct vdso_image %s_builtin = {\n", name);
fprintf(outfile, "\t.data = raw_data,\n"); fprintf(outfile, "\t.data = raw_data,\n");
fprintf(outfile, "\t.size = %lu,\n", mapping_size); fprintf(outfile, "\t.size = %lu,\n", mapping_size);
if (patch_sec) {
fprintf(outfile, "\t.tick_patch = %lu,\n",
(unsigned long)GET_BE(&patch_sec->sh_offset));
fprintf(outfile, "\t.tick_patch_len = %lu,\n",
(unsigned long)GET_BE(&patch_sec->sh_size));
}
for (i = 0; i < NSYMS; i++) { for (i = 0; i < NSYMS; i++) {
if (required_syms[i].export && syms[i]) if (required_syms[i].export && syms[i])
fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n", fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
......
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/random.h> #include <linux/random.h>
#include <linux/elf.h> #include <linux/elf.h>
#include <asm/cacheflush.h>
#include <asm/spitfire.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/vvar.h> #include <asm/vvar.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -40,7 +42,25 @@ static struct vm_special_mapping vdso_mapping32 = { ...@@ -40,7 +42,25 @@ static struct vm_special_mapping vdso_mapping32 = {
struct vvar_data *vvar_data; struct vvar_data *vvar_data;
#define SAVE_INSTR_SIZE 4 struct tick_patch_entry {
s32 orig, repl;
};
static void stick_patch(const struct vdso_image *image)
{
struct tick_patch_entry *p, *p_end;
p = image->data + image->tick_patch;
p_end = (void *)p + image->tick_patch_len;
while (p < p_end) {
u32 *instr = (void *)&p->orig + p->orig;
u32 *repl = (void *)&p->repl + p->repl;
*instr = *repl;
flushi(instr);
p++;
}
}
/* /*
* Allocate pages for the vdso and vvar, and copy in the vdso text from the * Allocate pages for the vdso and vvar, and copy in the vdso text from the
...@@ -68,21 +88,8 @@ int __init init_vdso_image(const struct vdso_image *image, ...@@ -68,21 +88,8 @@ int __init init_vdso_image(const struct vdso_image *image,
if (!cpp) if (!cpp)
goto oom; goto oom;
if (vdso_fix_stick) { if (tlb_type != spitfire)
/* stick_patch(image);
* If the system uses %tick instead of %stick, patch the VDSO
* with instruction reading %tick instead of %stick.
*/
unsigned int j, k = SAVE_INSTR_SIZE;
unsigned char *data = image->data;
for (j = image->sym_vread_tick_patch_start;
j < image->sym_vread_tick_patch_end; j++) {
data[image->sym_vread_tick + k] = data[j];
k++;
}
}
for (i = 0; i < cnpages; i++) { for (i = 0; i < cnpages; i++) {
cp = alloc_page(GFP_KERNEL); cp = alloc_page(GFP_KERNEL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment