Commit 1deab8ce authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc

Pull sparc updates from David Miller:

 1) Add missing cmpxchg64() for 32-bit sparc.

 2) Timer conversions from Allen Pais and Kees Cook.

 3) vDSO support, from Nagarathnam Muthusamy.

 4) Fix sparc64 huge page table walks based upon bug report by Al Viro,
    from Nitin Gupta.

 5) Optimized fls() for T4 and above, from Vijay Kumar.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc:
  sparc64: Fix page table walk for PUD hugepages
  sparc64: Convert timers to user timer_setup()
  sparc64: convert mdesc_handle.refcnt from atomic_t to refcount_t
  sparc/led: Convert timers to use timer_setup()
  sparc64: Use sparc optimized fls and __fls for T4 and above
  sparc64: SPARC optimized __fls function
  sparc64: SPARC optimized fls function
  sparc64: Define SPARC default __fls function
  sparc64: Define SPARC default fls function
  vDSO for sparc
  sparc32: Add cmpxchg64().
  sbus: char: Move D7S_MINOR to include/linux/miscdevice.h
  sparc: time: Remove unneeded linux/miscdevice.h include
  sparc64: mmu_context: Add missing include files
parents 81700247 70f3c8b7
...@@ -7,3 +7,4 @@ obj-y += mm/ ...@@ -7,3 +7,4 @@ obj-y += mm/
obj-y += math-emu/ obj-y += math-emu/
obj-y += net/ obj-y += net/
obj-y += crypto/ obj-y += crypto/
obj-$(CONFIG_SPARC64) += vdso/
...@@ -84,6 +84,8 @@ config SPARC64 ...@@ -84,6 +84,8 @@ config SPARC64
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_QUEUED_SPINLOCKS
select GENERIC_TIME_VSYSCALL
select ARCH_CLOCKSOURCE_DATA
config ARCH_DEFCONFIG config ARCH_DEFCONFIG
string string
......
...@@ -81,6 +81,10 @@ install: ...@@ -81,6 +81,10 @@ install:
archclean: archclean:
$(Q)$(MAKE) $(clean)=$(boot) $(Q)$(MAKE) $(clean)=$(boot)
PHONY += vdso_install
vdso_install:
$(Q)$(MAKE) $(build)=arch/sparc/vdso $@
# This is the image used for packaging # This is the image used for packaging
KBUILD_IMAGE := $(boot)/zImage KBUILD_IMAGE := $(boot)/zImage
......
...@@ -23,10 +23,11 @@ void set_bit(unsigned long nr, volatile unsigned long *addr); ...@@ -23,10 +23,11 @@ void set_bit(unsigned long nr, volatile unsigned long *addr);
void clear_bit(unsigned long nr, volatile unsigned long *addr); void clear_bit(unsigned long nr, volatile unsigned long *addr);
void change_bit(unsigned long nr, volatile unsigned long *addr); void change_bit(unsigned long nr, volatile unsigned long *addr);
int fls(unsigned int word);
int __fls(unsigned long word);
#include <asm-generic/bitops/non-atomic.h> #include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/fls64.h>
#ifdef __KERNEL__ #ifdef __KERNEL__
......
/*
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
#ifndef _ASM_SPARC_CLOCKSOURCE_H
#define _ASM_SPARC_CLOCKSOURCE_H
/* VDSO clocksources */
#define VCLOCK_NONE 0 /* Nothing userspace can do. */
#define VCLOCK_TICK 1 /* Use %tick. */
#define VCLOCK_STICK 2 /* Use %stick. */
struct arch_clocksource_data {
int vclock_mode;
};
#endif /* _ASM_SPARC_CLOCKSOURCE_H */
...@@ -63,6 +63,9 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) ...@@ -63,6 +63,9 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
(unsigned long)_n_, sizeof(*(ptr))); \ (unsigned long)_n_, sizeof(*(ptr))); \
}) })
u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
#define cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
#include <asm-generic/cmpxchg-local.h> #include <asm-generic/cmpxchg-local.h>
/* /*
......
...@@ -211,4 +211,18 @@ do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ ...@@ -211,4 +211,18 @@ do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
(current->personality & (~PER_MASK))); \ (current->personality & (~PER_MASK))); \
} while (0) } while (0)
extern unsigned int vdso_enabled;
#define ARCH_DLINFO \
do { \
if (vdso_enabled) \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
(unsigned long)current->mm->context.vdso); \
} while (0)
struct linux_binprm;
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
extern int arch_setup_additional_pages(struct linux_binprm *bprm,
int uses_interp);
#endif /* !(__ASM_SPARC64_ELF_H) */ #endif /* !(__ASM_SPARC64_ELF_H) */
...@@ -97,6 +97,7 @@ typedef struct { ...@@ -97,6 +97,7 @@ typedef struct {
unsigned long thp_pte_count; unsigned long thp_pte_count;
struct tsb_config tsb_block[MM_NUM_TSBS]; struct tsb_config tsb_block[MM_NUM_TSBS];
struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; struct hv_tsb_descr tsb_descr[MM_NUM_TSBS];
void *vdso;
} mm_context_t; } mm_context_t;
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
...@@ -8,9 +8,11 @@ ...@@ -8,9 +8,11 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/smp.h>
#include <asm/spitfire.h> #include <asm/spitfire.h>
#include <asm-generic/mm_hooks.h> #include <asm-generic/mm_hooks.h>
#include <asm/percpu.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{ {
......
...@@ -200,6 +200,13 @@ unsigned long get_wchan(struct task_struct *task); ...@@ -200,6 +200,13 @@ unsigned long get_wchan(struct task_struct *task);
* To make a long story short, we are trying to yield the current cpu * To make a long story short, we are trying to yield the current cpu
* strand during busy loops. * strand during busy loops.
*/ */
#ifdef BUILD_VDSO
#define cpu_relax() asm volatile("\n99:\n\t" \
"rd %%ccr, %%g0\n\t" \
"rd %%ccr, %%g0\n\t" \
"rd %%ccr, %%g0\n\t" \
::: "memory")
#else /* ! BUILD_VDSO */
#define cpu_relax() asm volatile("\n99:\n\t" \ #define cpu_relax() asm volatile("\n99:\n\t" \
"rd %%ccr, %%g0\n\t" \ "rd %%ccr, %%g0\n\t" \
"rd %%ccr, %%g0\n\t" \ "rd %%ccr, %%g0\n\t" \
...@@ -211,6 +218,7 @@ unsigned long get_wchan(struct task_struct *task); ...@@ -211,6 +218,7 @@ unsigned long get_wchan(struct task_struct *task);
"nop\n\t" \ "nop\n\t" \
".previous" \ ".previous" \
::: "memory") ::: "memory")
#endif
/* Prefetch support. This is tuned for UltraSPARC-III and later. /* Prefetch support. This is tuned for UltraSPARC-III and later.
* UltraSPARC-I will treat these as nops, and UltraSPARC-II has * UltraSPARC-I will treat these as nops, and UltraSPARC-II has
......
...@@ -217,7 +217,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end; ...@@ -217,7 +217,7 @@ extern struct tsb_phys_patch_entry __tsb_phys_patch, __tsb_phys_patch_end;
sllx REG2, 32, REG2; \ sllx REG2, 32, REG2; \
andcc REG1, REG2, %g0; \ andcc REG1, REG2, %g0; \
be,pt %xcc, 700f; \ be,pt %xcc, 700f; \
sethi %hi(0x1ffc0000), REG2; \ sethi %hi(0xffe00000), REG2; \
sllx REG2, 1, REG2; \ sllx REG2, 1, REG2; \
brgez,pn REG1, FAIL_LABEL; \ brgez,pn REG1, FAIL_LABEL; \
andn REG1, REG2, REG1; \ andn REG1, REG2, REG1; \
......
/*
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
#ifndef _ASM_SPARC_VDSO_H
#define _ASM_SPARC_VDSO_H
struct vdso_image {
void *data;
unsigned long size; /* Always a multiple of PAGE_SIZE */
long sym_vvar_start; /* Negative offset to the vvar area */
long sym_vread_tick; /* Start of vread_tick section */
long sym_vread_tick_patch_start; /* Start of tick read */
long sym_vread_tick_patch_end; /* End of tick read */
};
#ifdef CONFIG_SPARC64
extern const struct vdso_image vdso_image_64_builtin;
#endif
#ifdef CONFIG_COMPAT
extern const struct vdso_image vdso_image_32_builtin;
#endif
#endif /* _ASM_SPARC_VDSO_H */
/*
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
#ifndef _ASM_SPARC_VVAR_DATA_H
#define _ASM_SPARC_VVAR_DATA_H
#include <asm/clocksource.h>
#include <linux/seqlock.h>
#include <linux/time.h>
#include <linux/types.h>
struct vvar_data {
unsigned int seq;
int vclock_mode;
struct { /* extract of a clocksource struct */
u64 cycle_last;
u64 mask;
int mult;
int shift;
} clock;
/* open coded 'struct timespec' */
u64 wall_time_sec;
u64 wall_time_snsec;
u64 monotonic_time_snsec;
u64 monotonic_time_sec;
u64 monotonic_time_coarse_sec;
u64 monotonic_time_coarse_nsec;
u64 wall_time_coarse_sec;
u64 wall_time_coarse_nsec;
int tz_minuteswest;
int tz_dsttime;
};
extern struct vvar_data *vvar_data;
extern int vdso_fix_stick;
static inline unsigned int vvar_read_begin(const struct vvar_data *s)
{
unsigned int ret;
repeat:
ret = READ_ONCE(s->seq);
if (unlikely(ret & 1)) {
cpu_relax();
goto repeat;
}
smp_rmb(); /* Finish all reads before we return seq */
return ret;
}
static inline int vvar_read_retry(const struct vvar_data *s,
unsigned int start)
{
smp_rmb(); /* Finish all reads before checking the value of seq */
return unlikely(s->seq != start);
}
static inline void vvar_write_begin(struct vvar_data *s)
{
++s->seq;
smp_wmb(); /* Makes sure that increment of seq is reflected */
}
static inline void vvar_write_end(struct vvar_data *s)
{
smp_wmb(); /* Makes the value of seq current before we increment */
++s->seq;
}
#endif /* _ASM_SPARC_VVAR_DATA_H */
#ifndef __ASMSPARC_AUXVEC_H #ifndef __ASMSPARC_AUXVEC_H
#define __ASMSPARC_AUXVEC_H #define __ASMSPARC_AUXVEC_H
#define AT_SYSINFO_EHDR 33
#define AT_VECTOR_SIZE_ARCH 1
#endif /* !(__ASMSPARC_AUXVEC_H) */ #endif /* !(__ASMSPARC_AUXVEC_H) */
...@@ -43,6 +43,7 @@ obj-$(CONFIG_SPARC32) += systbls_32.o ...@@ -43,6 +43,7 @@ obj-$(CONFIG_SPARC32) += systbls_32.o
obj-y += time_$(BITS).o obj-y += time_$(BITS).o
obj-$(CONFIG_SPARC32) += windows.o obj-$(CONFIG_SPARC32) += windows.o
obj-y += cpu.o obj-y += cpu.o
obj-$(CONFIG_SPARC64) += vdso.o
obj-$(CONFIG_SPARC32) += devices.o obj-$(CONFIG_SPARC32) += devices.o
obj-y += ptrace_$(BITS).o obj-y += ptrace_$(BITS).o
obj-y += unaligned_$(BITS).o obj-y += unaligned_$(BITS).o
......
...@@ -641,6 +641,8 @@ niagara4_patch: ...@@ -641,6 +641,8 @@ niagara4_patch:
nop nop
call niagara4_patch_pageops call niagara4_patch_pageops
nop nop
call niagara4_patch_fls
nop
ba,a,pt %xcc, 80f ba,a,pt %xcc, 80f
nop nop
......
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/miscdevice.h> #include <linux/miscdevice.h>
#include <linux/bootmem.h> #include <linux/bootmem.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/refcount.h>
#include <asm/cpudata.h> #include <asm/cpudata.h>
#include <asm/hypervisor.h> #include <asm/hypervisor.h>
...@@ -71,7 +72,7 @@ struct mdesc_handle { ...@@ -71,7 +72,7 @@ struct mdesc_handle {
struct list_head list; struct list_head list;
struct mdesc_mem_ops *mops; struct mdesc_mem_ops *mops;
void *self_base; void *self_base;
atomic_t refcnt; refcount_t refcnt;
unsigned int handle_size; unsigned int handle_size;
struct mdesc_hdr mdesc; struct mdesc_hdr mdesc;
}; };
...@@ -153,7 +154,7 @@ static void mdesc_handle_init(struct mdesc_handle *hp, ...@@ -153,7 +154,7 @@ static void mdesc_handle_init(struct mdesc_handle *hp,
memset(hp, 0, handle_size); memset(hp, 0, handle_size);
INIT_LIST_HEAD(&hp->list); INIT_LIST_HEAD(&hp->list);
hp->self_base = base; hp->self_base = base;
atomic_set(&hp->refcnt, 1); refcount_set(&hp->refcnt, 1);
hp->handle_size = handle_size; hp->handle_size = handle_size;
} }
...@@ -183,7 +184,7 @@ static void __init mdesc_memblock_free(struct mdesc_handle *hp) ...@@ -183,7 +184,7 @@ static void __init mdesc_memblock_free(struct mdesc_handle *hp)
unsigned int alloc_size; unsigned int alloc_size;
unsigned long start; unsigned long start;
BUG_ON(atomic_read(&hp->refcnt) != 0); BUG_ON(refcount_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list)); BUG_ON(!list_empty(&hp->list));
alloc_size = PAGE_ALIGN(hp->handle_size); alloc_size = PAGE_ALIGN(hp->handle_size);
...@@ -221,7 +222,7 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size) ...@@ -221,7 +222,7 @@ static struct mdesc_handle *mdesc_kmalloc(unsigned int mdesc_size)
static void mdesc_kfree(struct mdesc_handle *hp) static void mdesc_kfree(struct mdesc_handle *hp)
{ {
BUG_ON(atomic_read(&hp->refcnt) != 0); BUG_ON(refcount_read(&hp->refcnt) != 0);
BUG_ON(!list_empty(&hp->list)); BUG_ON(!list_empty(&hp->list));
kfree(hp->self_base); kfree(hp->self_base);
...@@ -260,7 +261,7 @@ struct mdesc_handle *mdesc_grab(void) ...@@ -260,7 +261,7 @@ struct mdesc_handle *mdesc_grab(void)
spin_lock_irqsave(&mdesc_lock, flags); spin_lock_irqsave(&mdesc_lock, flags);
hp = cur_mdesc; hp = cur_mdesc;
if (hp) if (hp)
atomic_inc(&hp->refcnt); refcount_inc(&hp->refcnt);
spin_unlock_irqrestore(&mdesc_lock, flags); spin_unlock_irqrestore(&mdesc_lock, flags);
return hp; return hp;
...@@ -272,7 +273,7 @@ void mdesc_release(struct mdesc_handle *hp) ...@@ -272,7 +273,7 @@ void mdesc_release(struct mdesc_handle *hp)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&mdesc_lock, flags); spin_lock_irqsave(&mdesc_lock, flags);
if (atomic_dec_and_test(&hp->refcnt)) { if (refcount_dec_and_test(&hp->refcnt)) {
list_del_init(&hp->list); list_del_init(&hp->list);
hp->mops->free(hp); hp->mops->free(hp);
} }
...@@ -514,7 +515,7 @@ void mdesc_update(void) ...@@ -514,7 +515,7 @@ void mdesc_update(void)
if (status != HV_EOK || real_len > len) { if (status != HV_EOK || real_len > len) {
printk(KERN_ERR "MD: mdesc reread fails with %lu\n", printk(KERN_ERR "MD: mdesc reread fails with %lu\n",
status); status);
atomic_dec(&hp->refcnt); refcount_dec(&hp->refcnt);
mdesc_free(hp); mdesc_free(hp);
goto out; goto out;
} }
...@@ -527,7 +528,7 @@ void mdesc_update(void) ...@@ -527,7 +528,7 @@ void mdesc_update(void)
mdesc_notify_clients(orig_hp, hp); mdesc_notify_clients(orig_hp, hp);
spin_lock_irqsave(&mdesc_lock, flags); spin_lock_irqsave(&mdesc_lock, flags);
if (atomic_dec_and_test(&orig_hp->refcnt)) if (refcount_dec_and_test(&orig_hp->refcnt))
mdesc_free(orig_hp); mdesc_free(orig_hp);
else else
list_add(&orig_hp->list, &mdesc_zombie_list); list_add(&orig_hp->list, &mdesc_zombie_list);
......
...@@ -28,7 +28,6 @@ ...@@ -28,7 +28,6 @@
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/cpufreq.h> #include <linux/cpufreq.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/miscdevice.h>
#include <linux/rtc/m48t59.h> #include <linux/rtc/m48t59.h>
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
#include <linux/clockchips.h> #include <linux/clockchips.h>
...@@ -54,6 +53,8 @@ ...@@ -54,6 +53,8 @@
DEFINE_SPINLOCK(rtc_lock); DEFINE_SPINLOCK(rtc_lock);
unsigned int __read_mostly vdso_fix_stick;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
unsigned long profile_pc(struct pt_regs *regs) unsigned long profile_pc(struct pt_regs *regs)
{ {
...@@ -831,12 +832,17 @@ static void init_tick_ops(struct sparc64_tick_ops *ops) ...@@ -831,12 +832,17 @@ static void init_tick_ops(struct sparc64_tick_ops *ops)
void __init time_init_early(void) void __init time_init_early(void)
{ {
if (tlb_type == spitfire) { if (tlb_type == spitfire) {
if (is_hummingbird()) if (is_hummingbird()) {
init_tick_ops(&hbtick_operations); init_tick_ops(&hbtick_operations);
else clocksource_tick.archdata.vclock_mode = VCLOCK_NONE;
} else {
init_tick_ops(&tick_operations); init_tick_ops(&tick_operations);
clocksource_tick.archdata.vclock_mode = VCLOCK_TICK;
vdso_fix_stick = 1;
}
} else { } else {
init_tick_ops(&stick_operations); init_tick_ops(&stick_operations);
clocksource_tick.archdata.vclock_mode = VCLOCK_STICK;
} }
} }
......
/*
* Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
* Copyright 2003 Andi Kleen, SuSE Labs.
*
* Thanks to hpa@transmeta.com for some useful hint.
* Special thanks to Ingo Molnar for his early experience with
* a different vsyscall implementation for Linux/IA32 and for the name.
*/
#include <linux/seqlock.h>
#include <linux/time.h>
#include <linux/timekeeper_internal.h>
#include <asm/vvar.h>
void update_vsyscall_tz(void)
{
if (unlikely(vvar_data == NULL))
return;
vvar_data->tz_minuteswest = sys_tz.tz_minuteswest;
vvar_data->tz_dsttime = sys_tz.tz_dsttime;
}
void update_vsyscall(struct timekeeper *tk)
{
struct vvar_data *vdata = vvar_data;
if (unlikely(vdata == NULL))
return;
vvar_write_begin(vdata);
vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
vdata->clock.mask = tk->tkr_mono.mask;
vdata->clock.mult = tk->tkr_mono.mult;
vdata->clock.shift = tk->tkr_mono.shift;
vdata->wall_time_sec = tk->xtime_sec;
vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec;
vdata->monotonic_time_sec = tk->xtime_sec +
tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec +
(tk->wall_to_monotonic.tv_nsec <<
tk->tkr_mono.shift);
while (vdata->monotonic_time_snsec >=
(((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
vdata->monotonic_time_snsec -=
((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
vdata->monotonic_time_sec++;
}
vdata->wall_time_coarse_sec = tk->xtime_sec;
vdata->wall_time_coarse_nsec =
(long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
vdata->monotonic_time_coarse_sec =
vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
vdata->monotonic_time_coarse_nsec =
vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
vdata->monotonic_time_coarse_sec++;
}
vvar_write_end(vdata);
}
...@@ -798,9 +798,9 @@ void vio_port_up(struct vio_driver_state *vio) ...@@ -798,9 +798,9 @@ void vio_port_up(struct vio_driver_state *vio)
} }
EXPORT_SYMBOL(vio_port_up); EXPORT_SYMBOL(vio_port_up);
static void vio_port_timer(unsigned long _arg) static void vio_port_timer(struct timer_list *t)
{ {
struct vio_driver_state *vio = (struct vio_driver_state *) _arg; struct vio_driver_state *vio = from_timer(vio, t, timer);
vio_port_up(vio); vio_port_up(vio);
} }
...@@ -849,7 +849,7 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev, ...@@ -849,7 +849,7 @@ int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
vio->ops = ops; vio->ops = ops;
setup_timer(&vio->timer, vio_port_timer, (unsigned long) vio); timer_setup(&vio->timer, vio_port_timer, 0);
return 0; return 0;
} }
......
...@@ -17,6 +17,9 @@ lib-$(CONFIG_SPARC64) += atomic_64.o ...@@ -17,6 +17,9 @@ lib-$(CONFIG_SPARC64) += atomic_64.o
lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o lib-$(CONFIG_SPARC32) += lshrdi3.o ashldi3.o
lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o lib-$(CONFIG_SPARC32) += muldi3.o bitext.o cmpdi2.o
lib-$(CONFIG_SPARC64) += multi3.o lib-$(CONFIG_SPARC64) += multi3.o
lib-$(CONFIG_SPARC64) += fls.o
lib-$(CONFIG_SPARC64) += fls64.o
obj-$(CONFIG_SPARC64) += NG4fls.o
lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o lib-$(CONFIG_SPARC64) += copy_page.o clear_page.o bzero.o
lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o lib-$(CONFIG_SPARC64) += csum_copy.o csum_copy_from_user.o csum_copy_to_user.o
......
/* NG4fls.S: SPARC optimized fls and __fls for T4 and above.
*
* Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
*/
#include <linux/linkage.h>
#define LZCNT_O0_G2 \
.word 0x85b002e8
.text
.register %g2, #scratch
.register %g3, #scratch
ENTRY(NG4fls)
LZCNT_O0_G2 !lzcnt %o0, %g2
mov 64, %g3
retl
sub %g3, %g2, %o0
ENDPROC(NG4fls)
ENTRY(__NG4fls)
brz,pn %o0, 1f
LZCNT_O0_G2 !lzcnt %o0, %g2
mov 63, %g3
sub %g3, %g2, %o0
1:
retl
nop
ENDPROC(__NG4fls)
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
* Copyright (C) 2012 David S. Miller <davem@davemloft.net> * Copyright (C) 2012 David S. Miller <davem@davemloft.net>
*/ */
#include <linux/linkage.h>
#define BRANCH_ALWAYS 0x10680000 #define BRANCH_ALWAYS 0x10680000
#define NOP 0x01000000 #define NOP 0x01000000
#define NG_DO_PATCH(OLD, NEW) \ #define NG_DO_PATCH(OLD, NEW) \
...@@ -53,3 +55,10 @@ niagara4_patch_pageops: ...@@ -53,3 +55,10 @@ niagara4_patch_pageops:
retl retl
nop nop
.size niagara4_patch_pageops,.-niagara4_patch_pageops .size niagara4_patch_pageops,.-niagara4_patch_pageops
ENTRY(niagara4_patch_fls)
NG_DO_PATCH(fls, NG4fls)
NG_DO_PATCH(__fls, __NG4fls)
retl
nop
ENDPROC(niagara4_patch_fls)
...@@ -173,6 +173,20 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new) ...@@ -173,6 +173,20 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
} }
EXPORT_SYMBOL(__cmpxchg_u32); EXPORT_SYMBOL(__cmpxchg_u32);
u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new)
{
unsigned long flags;
u64 prev;
spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
if ((prev = *ptr) == old)
*ptr = new;
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
return prev;
}
EXPORT_SYMBOL(__cmpxchg_u64);
unsigned long __xchg_u32(volatile u32 *ptr, u32 new) unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
{ {
unsigned long flags; unsigned long flags;
......
/* fls.S: SPARC default fls definition.
*
* SPARC default fls definition, which follows the same algorithm as
* in generic fls(). This function will be boot time patched on T4
* and onward.
*/
#include <linux/linkage.h>
#include <asm/export.h>
.text
.register %g2, #scratch
.register %g3, #scratch
ENTRY(fls)
brz,pn %o0, 6f
mov 0, %o1
sethi %hi(0xffff0000), %g3
mov %o0, %g2
andcc %o0, %g3, %g0
be,pt %icc, 8f
mov 32, %o1
sethi %hi(0xff000000), %g3
andcc %g2, %g3, %g0
bne,pt %icc, 3f
sethi %hi(0xf0000000), %g3
sll %o0, 8, %o0
1:
add %o1, -8, %o1
sra %o0, 0, %o0
mov %o0, %g2
2:
sethi %hi(0xf0000000), %g3
3:
andcc %g2, %g3, %g0
bne,pt %icc, 4f
sethi %hi(0xc0000000), %g3
sll %o0, 4, %o0
add %o1, -4, %o1
sra %o0, 0, %o0
mov %o0, %g2
4:
andcc %g2, %g3, %g0
be,a,pt %icc, 7f
sll %o0, 2, %o0
5:
xnor %g0, %o0, %o0
srl %o0, 31, %o0
sub %o1, %o0, %o1
6:
jmp %o7 + 8
sra %o1, 0, %o0
7:
add %o1, -2, %o1
ba,pt %xcc, 5b
sra %o0, 0, %o0
8:
sll %o0, 16, %o0
sethi %hi(0xff000000), %g3
sra %o0, 0, %o0
mov %o0, %g2
andcc %g2, %g3, %g0
bne,pt %icc, 2b
mov 16, %o1
ba,pt %xcc, 1b
sll %o0, 8, %o0
ENDPROC(fls)
EXPORT_SYMBOL(fls)
/* fls64.S: SPARC default __fls definition.
*
* SPARC default __fls definition, which follows the same algorithm as
* in generic __fls(). This function will be boot time patched on T4
* and onward.
*/
#include <linux/linkage.h>
#include <asm/export.h>
.text
.register %g2, #scratch
.register %g3, #scratch
ENTRY(__fls)
mov -1, %g2
sllx %g2, 32, %g2
and %o0, %g2, %g2
brnz,pt %g2, 1f
mov 63, %g1
sllx %o0, 32, %o0
mov 31, %g1
1:
mov -1, %g2
sllx %g2, 48, %g2
and %o0, %g2, %g2
brnz,pt %g2, 2f
mov -1, %g2
sllx %o0, 16, %o0
add %g1, -16, %g1
2:
mov -1, %g2
sllx %g2, 56, %g2
and %o0, %g2, %g2
brnz,pt %g2, 3f
mov -1, %g2
sllx %o0, 8, %o0
add %g1, -8, %g1
3:
sllx %g2, 60, %g2
and %o0, %g2, %g2
brnz,pt %g2, 4f
mov -1, %g2
sllx %o0, 4, %o0
add %g1, -4, %g1
4:
sllx %g2, 62, %g2
and %o0, %g2, %g2
brnz,pt %g2, 5f
mov -1, %g3
sllx %o0, 2, %o0
add %g1, -2, %g1
5:
mov 0, %g2
sllx %g3, 63, %g3
and %o0, %g3, %o0
movre %o0, 1, %g2
sub %g1, %g2, %g1
jmp %o7+8
sra %g1, 0, %o0
ENDPROC(__fls)
EXPORT_SYMBOL(__fls)
vdso.lds
vdso-image-*.c
vdso2c
#
# Building vDSO images for sparc.
#
KBUILD_CFLAGS += $(DISABLE_LTO)
VDSO64-$(CONFIG_SPARC64) := y
VDSOCOMPAT-$(CONFIG_COMPAT) := y
# files to link into the vdso
vobjs-y := vdso-note.o vclock_gettime.o
# files to link into kernel
obj-y += vma.o
# vDSO images to build
vdso_img-$(VDSO64-y) += 64
vdso_img-$(VDSOCOMPAT-y) += 32
vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
$(obj)/vdso.o: $(obj)/vdso.so
targets += vdso.lds $(vobjs-y)
# Build the vDSO image C files and link them in.
vdso_img_objs := $(vdso_img-y:%=vdso-image-%.o)
vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c)
vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg)
obj-y += $(vdso_img_objs)
targets += $(vdso_img_cfiles)
targets += $(vdso_img_sodbg)
.SECONDARY: $(vdso_img-y:%=$(obj)/vdso-image-%.c) \
$(vdso_img-y:%=$(obj)/vdso%.so)
export CPPFLAGS_vdso.lds += -P -C
VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
-Wl,--no-undefined \
-Wl,-z,max-page-size=8192 -Wl,-z,common-page-size=8192 \
$(DISABLE_LTO)
$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
$(call if_changed,vdso)
HOST_EXTRACFLAGS += -I$(srctree)/tools/include
hostprogs-y += vdso2c
quiet_cmd_vdso2c = VDSO2C $@
define cmd_vdso2c
$(obj)/vdso2c $< $(<:%.dbg=%) $@
endef
$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
$(call if_changed,vdso2c)
#
# Don't omit frame pointers for ease of userspace debugging, but do
# optimize sibling calls.
#
CFL := $(PROFILING) -mcmodel=medlow -fPIC -O2 -fasynchronous-unwind-tables \
-m64 -ffixed-g2 -ffixed-g3 -fcall-used-g4 -fcall-used-g5 -ffixed-g6 \
-ffixed-g7 $(filter -g%,$(KBUILD_CFLAGS)) \
$(call cc-option, -fno-stack-protector) -fno-omit-frame-pointer \
-foptimize-sibling-calls -DBUILD_VDSO
$(vobjs): KBUILD_CFLAGS += $(CFL)
#
# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
#
CFLAGS_REMOVE_vdso-note.o = -pg
CFLAGS_REMOVE_vclock_gettime.o = -pg
$(obj)/%.so: OBJCOPYFLAGS := -S
$(obj)/%.so: $(obj)/%.so.dbg
$(call if_changed,objcopy)
CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf32_sparc,-soname=linux-gate.so.1
#This makes sure the $(obj) subdirectory exists even though vdso32/
#is not a kbuild sub-make subdirectory
override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
targets += vdso32/vdso32.lds
targets += vdso32/vdso-note.o
targets += vdso32/vclock_gettime.o
KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS)) -DBUILD_VDSO
$(obj)/vdso32.so.dbg: KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
$(obj)/vdso32.so.dbg: asflags-$(CONFIG_SPARC64) += -m32
KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
KBUILD_CFLAGS_32 := $(filter-out -mcmodel=medlow,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS_32))
KBUILD_CFLAGS_32 += -m32 -msoft-float -fpic -mno-app-regs -ffixed-g7
KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
KBUILD_CFLAGS_32 += -mv8plus
$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
$(obj)/vdso32.so.dbg: FORCE \
$(obj)/vdso32/vdso32.lds \
$(obj)/vdso32/vclock_gettime.o \
$(obj)/vdso32/vdso-note.o
$(call if_changed,vdso)
#
# The DSO images are built using a special linker script.
#
quiet_cmd_vdso = VDSO $@
cmd_vdso = $(CC) -nostdlib -o $@ \
$(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
-Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
$(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic
GCOV_PROFILE := n
#
# Install the unstripped copies of vdso*.so. If our toolchain supports
# build-id, install .build-id links as well.
#
quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
define cmd_vdso_install
cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
if readelf -n $< |grep -q 'Build ID'; then \
buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
first=`echo $$buildid | cut -b-2`; \
last=`echo $$buildid | cut -b3-`; \
mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
fi
endef
vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
$(MODLIB)/vdso: FORCE
@mkdir -p $(MODLIB)/vdso
$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
$(call cmd,vdso_install)
PHONY += vdso_install $(vdso_img_insttargets)
vdso_install: $(vdso_img_insttargets) FORCE
/*
* Copyright 2006 Andi Kleen, SUSE Labs.
* Subject to the GNU Public License, v.2
*
* Fast user context implementation of clock_gettime, gettimeofday, and time.
*
* The code should have no internal unresolved relocations.
* Check with readelf after changing.
* Also alternative() doesn't work.
*/
/*
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
/* Disable profiling for userspace code: */
#ifndef DISABLE_BRANCH_PROFILING
#define DISABLE_BRANCH_PROFILING
#endif
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/string.h>
#include <asm/io.h>
#include <asm/unistd.h>
#include <asm/timex.h>
#include <asm/clocksource.h>
#include <asm/vvar.h>
#undef TICK_PRIV_BIT
#ifdef CONFIG_SPARC64
#define TICK_PRIV_BIT (1UL << 63)
#else
#define TICK_PRIV_BIT (1ULL << 63)
#endif
#define SYSCALL_STRING \
"ta 0x6d;" \
"sub %%g0, %%o0, %%o0;" \
#define SYSCALL_CLOBBERS \
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
"f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", \
"f16", "f17", "f18", "f19", "f20", "f21", "f22", "f23", \
"f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31", \
"f32", "f34", "f36", "f38", "f40", "f42", "f44", "f46", \
"f48", "f50", "f52", "f54", "f56", "f58", "f60", "f62", \
"cc", "memory"
/*
* Compute the vvar page's address in the process address space, and return it
* as a pointer to the vvar_data.
*/
static notrace noinline struct vvar_data *
get_vvar_data(void)
{
unsigned long ret;
/*
* vdso data page is the first vDSO page so grab the return address
* and move up a page to get to the data page.
*/
ret = (unsigned long)__builtin_return_address(0);
ret &= ~(8192 - 1);
ret -= 8192;
return (struct vvar_data *) ret;
}
static notrace long
vdso_fallback_gettime(long clock, struct timespec *ts)
{
register long num __asm__("g1") = __NR_clock_gettime;
register long o0 __asm__("o0") = clock;
register long o1 __asm__("o1") = (long) ts;
__asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
"0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
return o0;
}
static notrace __always_inline long
vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
{
register long num __asm__("g1") = __NR_gettimeofday;
register long o0 __asm__("o0") = (long) tv;
register long o1 __asm__("o1") = (long) tz;
__asm__ __volatile__(SYSCALL_STRING : "=r" (o0) : "r" (num),
"0" (o0), "r" (o1) : SYSCALL_CLOBBERS);
return o0;
}
#ifdef CONFIG_SPARC64
static notrace noinline u64
vread_tick(void) {
u64 ret;
__asm__ __volatile__("rd %%asr24, %0 \n"
".section .vread_tick_patch, \"ax\" \n"
"rd %%tick, %0 \n"
".previous \n"
: "=&r" (ret));
return ret & ~TICK_PRIV_BIT;
}
#else
static notrace noinline u64
vread_tick(void)
{
unsigned int lo, hi;
__asm__ __volatile__("rd %%asr24, %%g1\n\t"
"srlx %%g1, 32, %1\n\t"
"srl %%g1, 0, %0\n"
".section .vread_tick_patch, \"ax\" \n"
"rd %%tick, %%g1\n"
".previous \n"
: "=&r" (lo), "=&r" (hi)
:
: "g1");
return lo | ((u64)hi << 32);
}
#endif
static notrace inline u64
vgetsns(struct vvar_data *vvar)
{
u64 v;
u64 cycles;
cycles = vread_tick();
v = (cycles - vvar->clock.cycle_last) & vvar->clock.mask;
return v * vvar->clock.mult;
}
static notrace noinline int
do_realtime(struct vvar_data *vvar, struct timespec *ts)
{
unsigned long seq;
u64 ns;
ts->tv_nsec = 0;
do {
seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->wall_time_sec;
ns = vvar->wall_time_snsec;
ns += vgetsns(vvar);
ns >>= vvar->clock.shift;
} while (unlikely(vvar_read_retry(vvar, seq)));
timespec_add_ns(ts, ns);
return 0;
}
static notrace noinline int
do_monotonic(struct vvar_data *vvar, struct timespec *ts)
{
unsigned long seq;
u64 ns;
ts->tv_nsec = 0;
do {
seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->monotonic_time_sec;
ns = vvar->monotonic_time_snsec;
ns += vgetsns(vvar);
ns >>= vvar->clock.shift;
} while (unlikely(vvar_read_retry(vvar, seq)));
timespec_add_ns(ts, ns);
return 0;
}
static notrace noinline int
do_realtime_coarse(struct vvar_data *vvar, struct timespec *ts)
{
unsigned long seq;
do {
seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->wall_time_coarse_sec;
ts->tv_nsec = vvar->wall_time_coarse_nsec;
} while (unlikely(vvar_read_retry(vvar, seq)));
return 0;
}
static notrace noinline int
do_monotonic_coarse(struct vvar_data *vvar, struct timespec *ts)
{
unsigned long seq;
do {
seq = vvar_read_begin(vvar);
ts->tv_sec = vvar->monotonic_time_coarse_sec;
ts->tv_nsec = vvar->monotonic_time_coarse_nsec;
} while (unlikely(vvar_read_retry(vvar, seq)));
return 0;
}
notrace int
__vdso_clock_gettime(clockid_t clock, struct timespec *ts)
{
struct vvar_data *vvd = get_vvar_data();
switch (clock) {
case CLOCK_REALTIME:
if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
break;
return do_realtime(vvd, ts);
case CLOCK_MONOTONIC:
if (unlikely(vvd->vclock_mode == VCLOCK_NONE))
break;
return do_monotonic(vvd, ts);
case CLOCK_REALTIME_COARSE:
return do_realtime_coarse(vvd, ts);
case CLOCK_MONOTONIC_COARSE:
return do_monotonic_coarse(vvd, ts);
}
/*
* Unknown clock ID ? Fall back to the syscall.
*/
return vdso_fallback_gettime(clock, ts);
}
int
clock_gettime(clockid_t, struct timespec *)
__attribute__((weak, alias("__vdso_clock_gettime")));
notrace int
__vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
{
struct vvar_data *vvd = get_vvar_data();
if (likely(vvd->vclock_mode != VCLOCK_NONE)) {
if (likely(tv != NULL)) {
union tstv_t {
struct timespec ts;
struct timeval tv;
} *tstv = (union tstv_t *) tv;
do_realtime(vvd, &tstv->ts);
/*
* Assign before dividing to ensure that the division is
* done in the type of tv_usec, not tv_nsec.
*
* There cannot be > 1 billion usec in a second:
* do_realtime() has already distributed such overflow
* into tv_sec. So we can assign it to an int safely.
*/
tstv->tv.tv_usec = tstv->ts.tv_nsec;
tstv->tv.tv_usec /= 1000;
}
if (unlikely(tz != NULL)) {
/* Avoid memcpy. Some old compilers fail to inline it */
tz->tz_minuteswest = vvd->tz_minuteswest;
tz->tz_dsttime = vvd->tz_dsttime;
}
return 0;
}
return vdso_fallback_gettimeofday(tv, tz);
}
int
gettimeofday(struct timeval *, struct timezone *)
__attribute__((weak, alias("__vdso_gettimeofday")));
/*
* Linker script for vDSO. This is an ELF shared object prelinked to
* its virtual address, and with only one read-only segment.
* This script controls its layout.
*/
#if defined(BUILD_VDSO64)
# define SHDR_SIZE 64
#elif defined(BUILD_VDSO32)
# define SHDR_SIZE 40
#else
# error unknown VDSO target
#endif
#define NUM_FAKE_SHDRS 7
SECTIONS
{
/*
* User/kernel shared data is before the vDSO. This may be a little
* uglier than putting it after the vDSO, but it avoids issues with
* non-allocatable things that dangle past the end of the PT_LOAD
* segment. Page size is 8192 for both 64-bit and 32-bit vdso binaries
*/
vvar_start = . -8192;
vvar_data = vvar_start;
. = SIZEOF_HEADERS;
.hash : { *(.hash) } :text
.gnu.hash : { *(.gnu.hash) }
.dynsym : { *(.dynsym) }
.dynstr : { *(.dynstr) }
.gnu.version : { *(.gnu.version) }
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
.dynamic : { *(.dynamic) } :text :dynamic
.rodata : {
*(.rodata*)
*(.data*)
*(.sdata*)
*(.got.plt) *(.got)
*(.gnu.linkonce.d.*)
*(.bss*)
*(.dynbss*)
*(.gnu.linkonce.b.*)
/*
* Ideally this would live in a C file: kept in here for
* compatibility with x86-64.
*/
VDSO_FAKE_SECTION_TABLE_START = .;
. = . + NUM_FAKE_SHDRS * SHDR_SIZE;
VDSO_FAKE_SECTION_TABLE_END = .;
} :text
.fake_shstrtab : { *(.fake_shstrtab) } :text
.note : { *(.note.*) } :text :note
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
/*
* Text is well-separated from actual data: there's plenty of
* stuff that isn't used at runtime in between.
*/
.text : { *(.text*) } :text =0x90909090,
.vread_tick_patch : {
vread_tick_patch_start = .;
*(.vread_tick_patch)
vread_tick_patch_end = .;
}
/DISCARD/ : {
*(.discard)
*(.discard.*)
*(__bug_table)
}
}
/*
* Very old versions of ld do not recognize this name token; use the constant.
*/
#define PT_GNU_EH_FRAME 0x6474e550
/*
* We must supply the ELF program headers explicitly to get just one
* PT_LOAD segment, and set the flags explicitly to make segments read-only.
*/
PHDRS
{
text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
note PT_NOTE FLAGS(4); /* PF_R */
eh_frame_hdr PT_GNU_EH_FRAME;
}
/*
* This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
* Here we can supply some information useful to userland.
*/
#include <linux/uts.h>
#include <linux/version.h>
#include <linux/elfnote.h>
ELFNOTE_START(Linux, 0, "a")
.long LINUX_VERSION_CODE
ELFNOTE_END
/*
* Linker script for 64-bit vDSO.
* We #include the file to define the layout details.
*
* This file defines the version script giving the user-exported symbols in
* the DSO.
*/
#define BUILD_VDSO64
#include "vdso-layout.lds.S"
/*
* This controls what userland symbols we export from the vDSO.
*/
VERSION {
LINUX_2.6 {
global:
clock_gettime;
__vdso_clock_gettime;
gettimeofday;
__vdso_gettimeofday;
local: *;
};
}
/*
* vdso2c - A vdso image preparation tool
* Copyright (c) 2014 Andy Lutomirski and others
* Licensed under the GPL v2
*
* vdso2c requires stripped and unstripped input. It would be trivial
* to fully strip the input in here, but, for reasons described below,
* we need to write a section table. Doing this is more or less
* equivalent to dropping all non-allocatable sections, but it's
* easier to let objcopy handle that instead of doing it ourselves.
* If we ever need to do something fancier than what objcopy provides,
* it would be straightforward to add here.
*
* We keep a section table for a few reasons:
*
* Binutils has issues debugging the vDSO: it reads the section table to
* find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which
* would break build-id if we removed the section table. Binutils
* also requires that shstrndx != 0. See:
* https://sourceware.org/bugzilla/show_bug.cgi?id=17064
*
* elfutils might not look for PT_NOTE if there is a section table at
* all. I don't know whether this matters for any practical purpose.
*
* For simplicity, rather than hacking up a partial section table, we
* just write a mostly complete one. We omit non-dynamic symbols,
* though, since they're rather large.
*
* Once binutils gets fixed, we might be able to drop this for all but
* the 64-bit vdso, since build-id only works in kernel RPMs, and
* systems that update to new enough kernel RPMs will likely update
* binutils in sync. build-id has never worked for home-built kernel
* RPMs without manual symlinking, and I suspect that no one ever does
* that.
*/
/*
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
#include <inttypes.h>
#include <stdint.h>
#include <unistd.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <fcntl.h>
#include <err.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <tools/be_byteshift.h>
#include <linux/elf.h>
#include <linux/types.h>
#include <linux/kernel.h>
const char *outfilename;
/* Symbols that we need in vdso2c. */
enum {
sym_vvar_start,
sym_VDSO_FAKE_SECTION_TABLE_START,
sym_VDSO_FAKE_SECTION_TABLE_END,
sym_vread_tick,
sym_vread_tick_patch_start,
sym_vread_tick_patch_end
};
struct vdso_sym {
const char *name;
int export;
};
struct vdso_sym required_syms[] = {
[sym_vvar_start] = {"vvar_start", 1},
[sym_VDSO_FAKE_SECTION_TABLE_START] = {
"VDSO_FAKE_SECTION_TABLE_START", 0
},
[sym_VDSO_FAKE_SECTION_TABLE_END] = {
"VDSO_FAKE_SECTION_TABLE_END", 0
},
[sym_vread_tick] = {"vread_tick", 1},
[sym_vread_tick_patch_start] = {"vread_tick_patch_start", 1},
[sym_vread_tick_patch_end] = {"vread_tick_patch_end", 1}
};
__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
static void fail(const char *format, ...)
{
va_list ap;
va_start(ap, format);
fprintf(stderr, "Error: ");
vfprintf(stderr, format, ap);
if (outfilename)
unlink(outfilename);
exit(1);
va_end(ap);
}
/*
* Evil macros for big-endian reads and writes
*/
#define GBE(x, bits, ifnot) \
__builtin_choose_expr( \
(sizeof(*(x)) == bits/8), \
(__typeof__(*(x)))get_unaligned_be##bits(x), ifnot)
#define LAST_GBE(x) \
__builtin_choose_expr(sizeof(*(x)) == 1, *(x), (void)(0))
#define GET_BE(x) \
GBE(x, 64, GBE(x, 32, GBE(x, 16, LAST_GBE(x))))
#define PBE(x, val, bits, ifnot) \
__builtin_choose_expr( \
(sizeof(*(x)) == bits/8), \
put_unaligned_be##bits((val), (x)), ifnot)
#define LAST_PBE(x, val) \
__builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), (void)(0))
#define PUT_BE(x, val) \
PBE(x, val, 64, PBE(x, val, 32, PBE(x, val, 16, LAST_PBE(x, val))))
#define NSYMS ARRAY_SIZE(required_syms)
#define BITSFUNC3(name, bits, suffix) name##bits##suffix
#define BITSFUNC2(name, bits, suffix) BITSFUNC3(name, bits, suffix)
#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS, )
#define INT_BITS BITSFUNC2(int, ELF_BITS, _t)
#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
#define ELF_BITS 64
#include "vdso2c.h"
#undef ELF_BITS
#define ELF_BITS 32
#include "vdso2c.h"
#undef ELF_BITS
static void go(void *raw_addr, size_t raw_len,
void *stripped_addr, size_t stripped_len,
FILE *outfile, const char *name)
{
Elf64_Ehdr *hdr = (Elf64_Ehdr *)raw_addr;
if (hdr->e_ident[EI_CLASS] == ELFCLASS64) {
go64(raw_addr, raw_len, stripped_addr, stripped_len,
outfile, name);
} else if (hdr->e_ident[EI_CLASS] == ELFCLASS32) {
go32(raw_addr, raw_len, stripped_addr, stripped_len,
outfile, name);
} else {
fail("unknown ELF class\n");
}
}
static void map_input(const char *name, void **addr, size_t *len, int prot)
{
off_t tmp_len;
int fd = open(name, O_RDONLY);
if (fd == -1)
err(1, "%s", name);
tmp_len = lseek(fd, 0, SEEK_END);
if (tmp_len == (off_t)-1)
err(1, "lseek");
*len = (size_t)tmp_len;
*addr = mmap(NULL, tmp_len, prot, MAP_PRIVATE, fd, 0);
if (*addr == MAP_FAILED)
err(1, "mmap");
close(fd);
}
int main(int argc, char **argv)
{
size_t raw_len, stripped_len;
void *raw_addr, *stripped_addr;
FILE *outfile;
char *name, *tmp;
int namelen;
if (argc != 4) {
printf("Usage: vdso2c RAW_INPUT STRIPPED_INPUT OUTPUT\n");
return 1;
}
/*
* Figure out the struct name. If we're writing to a .so file,
* generate raw output insted.
*/
name = strdup(argv[3]);
namelen = strlen(name);
if (namelen >= 3 && !strcmp(name + namelen - 3, ".so")) {
name = NULL;
} else {
tmp = strrchr(name, '/');
if (tmp)
name = tmp + 1;
tmp = strchr(name, '.');
if (tmp)
*tmp = '\0';
for (tmp = name; *tmp; tmp++)
if (*tmp == '-')
*tmp = '_';
}
map_input(argv[1], &raw_addr, &raw_len, PROT_READ);
map_input(argv[2], &stripped_addr, &stripped_len, PROT_READ);
outfilename = argv[3];
outfile = fopen(outfilename, "w");
if (!outfile)
err(1, "%s", argv[2]);
go(raw_addr, raw_len, stripped_addr, stripped_len, outfile, name);
munmap(raw_addr, raw_len);
munmap(stripped_addr, stripped_len);
fclose(outfile);
return 0;
}
/*
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
/*
* This file is included up to twice from vdso2c.c. It generates code for
* 32-bit and 64-bit vDSOs. We will eventually need both for 64-bit builds,
* since 32-bit vDSOs will then be built for 32-bit userspace.
*/
static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
void *stripped_addr, size_t stripped_len,
FILE *outfile, const char *name)
{
int found_load = 0;
unsigned long load_size = -1; /* Work around bogus warning */
unsigned long mapping_size;
int i;
unsigned long j;
ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr;
ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
ELF(Dyn) *dyn = 0, *dyn_end = 0;
INT_BITS syms[NSYMS] = {};
ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_BE(&hdr->e_phoff));
/* Walk the segment table. */
for (i = 0; i < GET_BE(&hdr->e_phnum); i++) {
if (GET_BE(&pt[i].p_type) == PT_LOAD) {
if (found_load)
fail("multiple PT_LOAD segs\n");
if (GET_BE(&pt[i].p_offset) != 0 ||
GET_BE(&pt[i].p_vaddr) != 0)
fail("PT_LOAD in wrong place\n");
if (GET_BE(&pt[i].p_memsz) != GET_BE(&pt[i].p_filesz))
fail("cannot handle memsz != filesz\n");
load_size = GET_BE(&pt[i].p_memsz);
found_load = 1;
} else if (GET_BE(&pt[i].p_type) == PT_DYNAMIC) {
dyn = raw_addr + GET_BE(&pt[i].p_offset);
dyn_end = raw_addr + GET_BE(&pt[i].p_offset) +
GET_BE(&pt[i].p_memsz);
}
}
if (!found_load)
fail("no PT_LOAD seg\n");
if (stripped_len < load_size)
fail("stripped input is too short\n");
/* Walk the dynamic table */
for (i = 0; dyn + i < dyn_end &&
GET_BE(&dyn[i].d_tag) != DT_NULL; i++) {
typeof(dyn[i].d_tag) tag = GET_BE(&dyn[i].d_tag);
typeof(dyn[i].d_un.d_val) val = GET_BE(&dyn[i].d_un.d_val);
if ((tag == DT_RELSZ || tag == DT_RELASZ) && (val != 0))
fail("vdso image contains dynamic relocations\n");
}
/* Walk the section table */
for (i = 0; i < GET_BE(&hdr->e_shnum); i++) {
ELF(Shdr) *sh = raw_addr + GET_BE(&hdr->e_shoff) +
GET_BE(&hdr->e_shentsize) * i;
if (GET_BE(&sh->sh_type) == SHT_SYMTAB)
symtab_hdr = sh;
}
if (!symtab_hdr)
fail("no symbol table\n");
strtab_hdr = raw_addr + GET_BE(&hdr->e_shoff) +
GET_BE(&hdr->e_shentsize) * GET_BE(&symtab_hdr->sh_link);
/* Walk the symbol table */
for (i = 0;
i < GET_BE(&symtab_hdr->sh_size) / GET_BE(&symtab_hdr->sh_entsize);
i++) {
int k;
ELF(Sym) *sym = raw_addr + GET_BE(&symtab_hdr->sh_offset) +
GET_BE(&symtab_hdr->sh_entsize) * i;
const char *name = raw_addr + GET_BE(&strtab_hdr->sh_offset) +
GET_BE(&sym->st_name);
for (k = 0; k < NSYMS; k++) {
if (!strcmp(name, required_syms[k].name)) {
if (syms[k]) {
fail("duplicate symbol %s\n",
required_syms[k].name);
}
/*
* Careful: we use negative addresses, but
* st_value is unsigned, so we rely
* on syms[k] being a signed type of the
* correct width.
*/
syms[k] = GET_BE(&sym->st_value);
}
}
}
/* Validate mapping addresses. */
if (syms[sym_vvar_start] % 8192)
fail("vvar_begin must be a multiple of 8192\n");
if (!name) {
fwrite(stripped_addr, stripped_len, 1, outfile);
return;
}
mapping_size = (stripped_len + 8191) / 8192 * 8192;
fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
fprintf(outfile, "#include <linux/cache.h>\n");
fprintf(outfile, "#include <asm/vdso.h>\n");
fprintf(outfile, "\n");
fprintf(outfile,
"static unsigned char raw_data[%lu] __ro_after_init __aligned(8192)= {",
mapping_size);
for (j = 0; j < stripped_len; j++) {
if (j % 10 == 0)
fprintf(outfile, "\n\t");
fprintf(outfile, "0x%02X, ",
(int)((unsigned char *)stripped_addr)[j]);
}
fprintf(outfile, "\n};\n\n");
fprintf(outfile, "const struct vdso_image %s_builtin = {\n", name);
fprintf(outfile, "\t.data = raw_data,\n");
fprintf(outfile, "\t.size = %lu,\n", mapping_size);
for (i = 0; i < NSYMS; i++) {
if (required_syms[i].export && syms[i])
fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
required_syms[i].name, (int64_t)syms[i]);
}
fprintf(outfile, "};\n");
}
/*
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
#define BUILD_VDSO32
#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
#undef CONFIG_OPTIMIZE_INLINING
#endif
#ifdef CONFIG_SPARC64
/*
* in case of a 32 bit VDSO for a 64 bit kernel fake a 32 bit kernel
* configuration
*/
#undef CONFIG_64BIT
#undef CONFIG_SPARC64
#define BUILD_VDSO32_64
#define CONFIG_32BIT
#undef CONFIG_QUEUED_RWLOCKS
#undef CONFIG_QUEUED_SPINLOCKS
#endif
#include "../vclock_gettime.c"
/*
* This supplies .note.* sections to go into the PT_NOTE inside the vDSO
* text. Here we can supply some information useful to userland.
*/
#include <linux/uts.h>
#include <linux/version.h>
#include <linux/elfnote.h>
ELFNOTE_START(Linux, 0, "a")
.long LINUX_VERSION_CODE
ELFNOTE_END
/*
* Linker script for sparc32 vDSO
* We #include the file to define the layout details.
*
* This file defines the version script giving the user-exported symbols in
* the DSO.
*/
#define BUILD_VDSO32
#include "../vdso-layout.lds.S"
/*
* This controls what userland symbols we export from the vDSO.
*/
VERSION {
LINUX_2.6 {
global:
clock_gettime;
__vdso_clock_gettime;
gettimeofday;
__vdso_gettimeofday;
local: *;
};
}
/*
* Set up the VMAs to tell the VM about the vDSO.
* Copyright 2007 Andi Kleen, SUSE Labs.
* Subject to the GPL, v.2
*/
/*
* Copyright (c) 2017 Oracle and/or its affiliates. All rights reserved.
*/
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/random.h>
#include <linux/elf.h>
#include <asm/vdso.h>
#include <asm/vvar.h>
#include <asm/page.h>
unsigned int __read_mostly vdso_enabled = 1;
static struct vm_special_mapping vvar_mapping = {
.name = "[vvar]"
};
#ifdef CONFIG_SPARC64
static struct vm_special_mapping vdso_mapping64 = {
.name = "[vdso]"
};
#endif
#ifdef CONFIG_COMPAT
static struct vm_special_mapping vdso_mapping32 = {
.name = "[vdso]"
};
#endif
struct vvar_data *vvar_data;
#define SAVE_INSTR_SIZE 4
/*
* Allocate pages for the vdso and vvar, and copy in the vdso text from the
* kernel image.
*/
int __init init_vdso_image(const struct vdso_image *image,
struct vm_special_mapping *vdso_mapping)
{
int i;
struct page *dp, **dpp = NULL;
int dnpages = 0;
struct page *cp, **cpp = NULL;
int cnpages = (image->size) / PAGE_SIZE;
/*
* First, the vdso text. This is initialied data, an integral number of
* pages long.
*/
if (WARN_ON(image->size % PAGE_SIZE != 0))
goto oom;
cpp = kcalloc(cnpages, sizeof(struct page *), GFP_KERNEL);
vdso_mapping->pages = cpp;
if (!cpp)
goto oom;
if (vdso_fix_stick) {
/*
* If the system uses %tick instead of %stick, patch the VDSO
* with instruction reading %tick instead of %stick.
*/
unsigned int j, k = SAVE_INSTR_SIZE;
unsigned char *data = image->data;
for (j = image->sym_vread_tick_patch_start;
j < image->sym_vread_tick_patch_end; j++) {
data[image->sym_vread_tick + k] = data[j];
k++;
}
}
for (i = 0; i < cnpages; i++) {
cp = alloc_page(GFP_KERNEL);
if (!cp)
goto oom;
cpp[i] = cp;
copy_page(page_address(cp), image->data + i * PAGE_SIZE);
}
/*
* Now the vvar page. This is uninitialized data.
*/
if (vvar_data == NULL) {
dnpages = (sizeof(struct vvar_data) / PAGE_SIZE) + 1;
if (WARN_ON(dnpages != 1))
goto oom;
dpp = kcalloc(dnpages, sizeof(struct page *), GFP_KERNEL);
vvar_mapping.pages = dpp;
if (!dpp)
goto oom;
dp = alloc_page(GFP_KERNEL);
if (!dp)
goto oom;
dpp[0] = dp;
vvar_data = page_address(dp);
memset(vvar_data, 0, PAGE_SIZE);
vvar_data->seq = 0;
}
return 0;
oom:
if (cpp != NULL) {
for (i = 0; i < cnpages; i++) {
if (cpp[i] != NULL)
__free_page(cpp[i]);
}
kfree(cpp);
vdso_mapping->pages = NULL;
}
if (dpp != NULL) {
for (i = 0; i < dnpages; i++) {
if (dpp[i] != NULL)
__free_page(dpp[i]);
}
kfree(dpp);
vvar_mapping.pages = NULL;
}
pr_warn("Cannot allocate vdso\n");
vdso_enabled = 0;
return -ENOMEM;
}
static int __init init_vdso(void)
{
int err = 0;
#ifdef CONFIG_SPARC64
err = init_vdso_image(&vdso_image_64_builtin, &vdso_mapping64);
if (err)
return err;
#endif
#ifdef CONFIG_COMPAT
err = init_vdso_image(&vdso_image_32_builtin, &vdso_mapping32);
#endif
return err;
}
subsys_initcall(init_vdso);
struct linux_binprm;
/* Shuffle the vdso up a bit, randomly. */
static unsigned long vdso_addr(unsigned long start, unsigned int len)
{
unsigned int offset;
/* This loses some more bits than a modulo, but is cheaper */
offset = get_random_int() & (PTRS_PER_PTE - 1);
return start + (offset << PAGE_SHIFT);
}
static int map_vdso(const struct vdso_image *image,
struct vm_special_mapping *vdso_mapping)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long text_start, addr = 0;
int ret = 0;
down_write(&mm->mmap_sem);
/*
* First, get an unmapped region: then randomize it, and make sure that
* region is free.
*/
if (current->flags & PF_RANDOMIZE) {
addr = get_unmapped_area(NULL, 0,
image->size - image->sym_vvar_start,
0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
}
addr = vdso_addr(addr, image->size - image->sym_vvar_start);
}
addr = get_unmapped_area(NULL, addr,
image->size - image->sym_vvar_start, 0, 0);
if (IS_ERR_VALUE(addr)) {
ret = addr;
goto up_fail;
}
text_start = addr - image->sym_vvar_start;
current->mm->context.vdso = (void __user *)text_start;
/*
* MAYWRITE to allow gdb to COW and set breakpoints
*/
vma = _install_special_mapping(mm,
text_start,
image->size,
VM_READ|VM_EXEC|
VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
vdso_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto up_fail;
}
vma = _install_special_mapping(mm,
addr,
-image->sym_vvar_start,
VM_READ|VM_MAYREAD,
&vvar_mapping);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
do_munmap(mm, text_start, image->size, NULL);
}
up_fail:
if (ret)
current->mm->context.vdso = NULL;
up_write(&mm->mmap_sem);
return ret;
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
if (!vdso_enabled)
return 0;
#if defined CONFIG_COMPAT
if (!(is_32bit_task()))
return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
else
return map_vdso(&vdso_image_32_builtin, &vdso_mapping32);
#else
return map_vdso(&vdso_image_64_builtin, &vdso_mapping64);
#endif
}
static __init int vdso_setup(char *s)
{
int err;
unsigned long val;
err = kstrtoul(s, 10, &val);
vdso_enabled = val;
return err;
}
__setup("vdso=", vdso_setup);
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
#include <asm/display7seg.h> #include <asm/display7seg.h>
#define D7S_MINOR 193
#define DRIVER_NAME "d7s" #define DRIVER_NAME "d7s"
#define PFX DRIVER_NAME ": " #define PFX DRIVER_NAME ": "
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#define HWRNG_MINOR 183 #define HWRNG_MINOR 183
#define MICROCODE_MINOR 184 #define MICROCODE_MINOR 184
#define IRNET_MINOR 187 #define IRNET_MINOR 187
#define D7S_MINOR 193
#define VFIO_MINOR 196 #define VFIO_MINOR 196
#define TUN_MINOR 200 #define TUN_MINOR 200
#define CUSE_MINOR 203 #define CUSE_MINOR 203
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment