Commit 951f22d5 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Linus Torvalds

[PATCH] s390: spin lock retry

Split spin lock and r/w lock implementation into a single try which is done
inline and an out of line function that repeatedly tries to get the lock
before doing the cpu_relax().  Add a system control to set the number of
retries before a cpu is yielded.

The reason for the spin lock retry is that the diagnose 0x44 that is used to
give up the virtual cpu is quite expensive.  For spin locks that are held only
for a short period of time the costs of the diagnoses outweights the savings
for spin locks that are held for a longer timer.  The default retry count is
1000.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 8449d003
...@@ -658,10 +658,8 @@ startup:basr %r13,0 # get base ...@@ -658,10 +658,8 @@ startup:basr %r13,0 # get base
# #
la %r1,0f-.LPG1(%r13) # set program check address la %r1,0f-.LPG1(%r13) # set program check address
stg %r1,__LC_PGM_NEW_PSW+8 stg %r1,__LC_PGM_NEW_PSW+8
mvc __LC_DIAG44_OPCODE(8),.Lnop-.LPG1(%r13)
diag 0,0,0x44 # test diag 0x44 diag 0,0,0x44 # test diag 0x44
oi 7(%r12),32 # set diag44 flag oi 7(%r12),32 # set diag44 flag
mvc __LC_DIAG44_OPCODE(8),.Ldiag44-.LPG1(%r13)
0: 0:
# #
...@@ -702,7 +700,6 @@ startup:basr %r13,0 # get base ...@@ -702,7 +700,6 @@ startup:basr %r13,0 # get base
.L4malign:.quad 0xffffffffffc00000 .L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8 .Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
.Lnop: .long 0x07000700 .Lnop: .long 0x07000700
.Ldiag44:.long 0x83000044
.org PARMAREA-64 .org PARMAREA-64
.Lduct: .long 0,0,0,0,0,0,0,0 .Lduct: .long 0,0,0,0,0,0,0,0
......
...@@ -431,12 +431,6 @@ setup_lowcore(void) ...@@ -431,12 +431,6 @@ setup_lowcore(void)
ctl_set_bit(14, 29); ctl_set_bit(14, 29);
} }
#endif #endif
#ifdef CONFIG_ARCH_S390X
if (MACHINE_HAS_DIAG44)
lc->diag44_opcode = 0x83000044;
else
lc->diag44_opcode = 0x07000700;
#endif /* CONFIG_ARCH_S390X */
set_prefix((u32)(unsigned long) lc); set_prefix((u32)(unsigned long) lc);
} }
......
...@@ -5,5 +5,5 @@ ...@@ -5,5 +5,5 @@
EXTRA_AFLAGS := -traditional EXTRA_AFLAGS := -traditional
lib-y += delay.o string.o lib-y += delay.o string.o
lib-$(CONFIG_ARCH_S390_31) += uaccess.o lib-$(CONFIG_ARCH_S390_31) += uaccess.o spinlock.o
lib-$(CONFIG_ARCH_S390X) += uaccess64.o lib-$(CONFIG_ARCH_S390X) += uaccess64.o spinlock.o
/*
* arch/s390/lib/spinlock.c
* Out of line spinlock code.
*
* S390 version
* Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <asm/io.h>
atomic_t spin_retry_counter;
int spin_retry = 1000;
/**
* spin_retry= parameter
*/
static int __init spin_retry_setup(char *str)
{
spin_retry = simple_strtoul(str, &str, 0);
return 1;
}
__setup("spin_retry=", spin_retry_setup);
static inline void
_diag44(void)
{
#ifdef __s390x__
if (MACHINE_HAS_DIAG44)
#endif
asm volatile("diag 0,0,0x44");
}
void
_raw_spin_lock_wait(spinlock_t *lp, unsigned int pc)
{
int count = spin_retry;
while (1) {
if (count-- <= 0) {
_diag44();
count = spin_retry;
}
atomic_inc(&spin_retry_counter);
if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0)
return;
}
}
EXPORT_SYMBOL(_raw_spin_lock_wait);
int
_raw_spin_trylock_retry(spinlock_t *lp, unsigned int pc)
{
int count = spin_retry;
while (count-- > 0) {
atomic_inc(&spin_retry_counter);
if (_raw_compare_and_swap(&lp->lock, 0, pc) == 0)
return 1;
}
return 0;
}
EXPORT_SYMBOL(_raw_spin_trylock_retry);
void
_raw_read_lock_wait(rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
while (1) {
if (count-- <= 0) {
_diag44();
count = spin_retry;
}
atomic_inc(&spin_retry_counter);
old = rw->lock & 0x7fffffffU;
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
return;
}
}
EXPORT_SYMBOL(_raw_read_lock_wait);
int
_raw_read_trylock_retry(rwlock_t *rw)
{
unsigned int old;
int count = spin_retry;
while (count-- > 0) {
atomic_inc(&spin_retry_counter);
old = rw->lock & 0x7fffffffU;
if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old)
return 1;
}
return 0;
}
EXPORT_SYMBOL(_raw_read_trylock_retry);
void
_raw_write_lock_wait(rwlock_t *rw)
{
int count = spin_retry;
while (1) {
if (count-- <= 0) {
_diag44();
count = spin_retry;
}
atomic_inc(&spin_retry_counter);
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
return;
}
}
EXPORT_SYMBOL(_raw_write_lock_wait);
int
_raw_write_trylock_retry(rwlock_t *rw)
{
int count = spin_retry;
while (count-- > 0) {
atomic_inc(&spin_retry_counter);
if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)
return 1;
}
return 0;
}
EXPORT_SYMBOL(_raw_write_trylock_retry);
...@@ -90,7 +90,6 @@ ...@@ -90,7 +90,6 @@
#define __LC_SYSTEM_TIMER 0x278 #define __LC_SYSTEM_TIMER 0x278
#define __LC_LAST_UPDATE_CLOCK 0x280 #define __LC_LAST_UPDATE_CLOCK 0x280
#define __LC_STEAL_CLOCK 0x288 #define __LC_STEAL_CLOCK 0x288
#define __LC_DIAG44_OPCODE 0x290
#define __LC_KERNEL_STACK 0xD40 #define __LC_KERNEL_STACK 0xD40
#define __LC_THREAD_INFO 0xD48 #define __LC_THREAD_INFO 0xD48
#define __LC_ASYNC_STACK 0xD50 #define __LC_ASYNC_STACK 0xD50
...@@ -286,8 +285,7 @@ struct _lowcore ...@@ -286,8 +285,7 @@ struct _lowcore
__u64 system_timer; /* 0x278 */ __u64 system_timer; /* 0x278 */
__u64 last_update_clock; /* 0x280 */ __u64 last_update_clock; /* 0x280 */
__u64 steal_clock; /* 0x288 */ __u64 steal_clock; /* 0x288 */
__u32 diag44_opcode; /* 0x290 */ __u8 pad8[0xc00-0x290]; /* 0x290 */
__u8 pad8[0xc00-0x294]; /* 0x294 */
/* System info area */ /* System info area */
__u64 save_area[16]; /* 0xc00 */ __u64 save_area[16]; /* 0xc00 */
__u8 pad9[0xd40-0xc80]; /* 0xc80 */ __u8 pad9[0xd40-0xc80]; /* 0xc80 */
......
...@@ -203,7 +203,10 @@ unsigned long get_wchan(struct task_struct *p); ...@@ -203,7 +203,10 @@ unsigned long get_wchan(struct task_struct *p);
# define cpu_relax() asm volatile ("diag 0,0,68" : : : "memory") # define cpu_relax() asm volatile ("diag 0,0,68" : : : "memory")
#else /* __s390x__ */ #else /* __s390x__ */
# define cpu_relax() \ # define cpu_relax() \
asm volatile ("ex 0,%0" : : "i" (__LC_DIAG44_OPCODE) : "memory") do { \
if (MACHINE_HAS_DIAG44) \
asm volatile ("diag 0,0,68" : : : "memory"); \
} while (0)
#endif /* __s390x__ */ #endif /* __s390x__ */
/* /*
......
This diff is collapsed.
...@@ -145,6 +145,7 @@ enum ...@@ -145,6 +145,7 @@ enum
KERN_BOOTLOADER_TYPE=67, /* int: boot loader type */ KERN_BOOTLOADER_TYPE=67, /* int: boot loader type */
KERN_RANDOMIZE=68, /* int: randomize virtual address space */ KERN_RANDOMIZE=68, /* int: randomize virtual address space */
KERN_SETUID_DUMPABLE=69, /* int: behaviour of dumps for setuid core */ KERN_SETUID_DUMPABLE=69, /* int: behaviour of dumps for setuid core */
KERN_SPIN_RETRY=70, /* int: number of spinlock retries */
}; };
......
...@@ -114,6 +114,7 @@ extern int unaligned_enabled; ...@@ -114,6 +114,7 @@ extern int unaligned_enabled;
extern int sysctl_ieee_emulation_warnings; extern int sysctl_ieee_emulation_warnings;
#endif #endif
extern int sysctl_userprocess_debug; extern int sysctl_userprocess_debug;
extern int spin_retry;
#endif #endif
extern int sysctl_hz_timer; extern int sysctl_hz_timer;
...@@ -647,7 +648,16 @@ static ctl_table kern_table[] = { ...@@ -647,7 +648,16 @@ static ctl_table kern_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dointvec, .proc_handler = &proc_dointvec,
}, },
#if defined(CONFIG_ARCH_S390)
{
.ctl_name = KERN_SPIN_RETRY,
.procname = "spin_retry",
.data = &spin_retry,
.maxlen = sizeof (int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
#endif
{ .ctl_name = 0 } { .ctl_name = 0 }
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment