Commit 27f6b416 authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/vtimer: rework virtual timer interface

The current virtual timer interface is inherently per-cpu and hard to
use. The sole user of the interface is appldata which uses it to execute
a function after a specific amount of cputime has been used over all cpus.

Rework the virtual timer interface to hook into the cputime accounting.
This makes the interface independent from the CPU timer interrupts, and
makes the virtual timers global as opposed to per-cpu.
Overall the code is greatly simplified. The downside is that the accuracy
is not as good as the original implementation, but it is still good enough
for appldata.
Reviewed-by: default avatarJan Glauber <jang@linux.vnet.ibm.com>
Reviewed-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 921486b9
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
#include <linux/suspend.h> #include <linux/suspend.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <asm/appldata.h> #include <asm/appldata.h>
#include <asm/timer.h> #include <asm/vtimer.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/smp.h> #include <asm/smp.h>
...@@ -82,8 +82,7 @@ static struct ctl_table appldata_dir_table[] = { ...@@ -82,8 +82,7 @@ static struct ctl_table appldata_dir_table[] = {
/* /*
* Timer * Timer
*/ */
static DEFINE_PER_CPU(struct vtimer_list, appldata_timer); static struct vtimer_list appldata_timer;
static atomic_t appldata_expire_count = ATOMIC_INIT(0);
static DEFINE_SPINLOCK(appldata_timer_lock); static DEFINE_SPINLOCK(appldata_timer_lock);
static int appldata_interval = APPLDATA_CPU_INTERVAL; static int appldata_interval = APPLDATA_CPU_INTERVAL;
...@@ -113,10 +112,7 @@ static LIST_HEAD(appldata_ops_list); ...@@ -113,10 +112,7 @@ static LIST_HEAD(appldata_ops_list);
*/ */
static void appldata_timer_function(unsigned long data) static void appldata_timer_function(unsigned long data)
{ {
if (atomic_dec_and_test(&appldata_expire_count)) {
atomic_set(&appldata_expire_count, num_online_cpus());
queue_work(appldata_wq, (struct work_struct *) data); queue_work(appldata_wq, (struct work_struct *) data);
}
} }
/* /*
...@@ -129,7 +125,6 @@ static void appldata_work_fn(struct work_struct *work) ...@@ -129,7 +125,6 @@ static void appldata_work_fn(struct work_struct *work)
struct list_head *lh; struct list_head *lh;
struct appldata_ops *ops; struct appldata_ops *ops;
get_online_cpus();
mutex_lock(&appldata_ops_mutex); mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) { list_for_each(lh, &appldata_ops_list) {
ops = list_entry(lh, struct appldata_ops, list); ops = list_entry(lh, struct appldata_ops, list);
...@@ -138,7 +133,6 @@ static void appldata_work_fn(struct work_struct *work) ...@@ -138,7 +133,6 @@ static void appldata_work_fn(struct work_struct *work)
} }
} }
mutex_unlock(&appldata_ops_mutex); mutex_unlock(&appldata_ops_mutex);
put_online_cpus();
} }
/* /*
...@@ -166,20 +160,6 @@ int appldata_diag(char record_nr, u16 function, unsigned long buffer, ...@@ -166,20 +160,6 @@ int appldata_diag(char record_nr, u16 function, unsigned long buffer,
/****************************** /proc stuff **********************************/ /****************************** /proc stuff **********************************/
/*
* appldata_mod_vtimer_wrap()
*
* wrapper function for mod_virt_timer(), because smp_call_function_single()
* accepts only one parameter.
*/
static void __appldata_mod_vtimer_wrap(void *p) {
struct {
struct vtimer_list *timer;
u64 expires;
} *args = p;
mod_virt_timer_periodic(args->timer, args->expires);
}
#define APPLDATA_ADD_TIMER 0 #define APPLDATA_ADD_TIMER 0
#define APPLDATA_DEL_TIMER 1 #define APPLDATA_DEL_TIMER 1
#define APPLDATA_MOD_TIMER 2 #define APPLDATA_MOD_TIMER 2
...@@ -190,49 +170,28 @@ static void __appldata_mod_vtimer_wrap(void *p) { ...@@ -190,49 +170,28 @@ static void __appldata_mod_vtimer_wrap(void *p) {
* Add, delete or modify virtual timers on all online cpus. * Add, delete or modify virtual timers on all online cpus.
* The caller needs to get the appldata_timer_lock spinlock. * The caller needs to get the appldata_timer_lock spinlock.
*/ */
static void static void __appldata_vtimer_setup(int cmd)
__appldata_vtimer_setup(int cmd)
{ {
u64 per_cpu_interval; u64 timer_interval = (u64) appldata_interval * 1000 * TOD_MICRO;
int i;
switch (cmd) { switch (cmd) {
case APPLDATA_ADD_TIMER: case APPLDATA_ADD_TIMER:
if (appldata_timer_active) if (appldata_timer_active)
break; break;
per_cpu_interval = (u64) (appldata_interval*1000 / appldata_timer.expires = timer_interval;
num_online_cpus()) * TOD_MICRO; add_virt_timer_periodic(&appldata_timer);
for_each_online_cpu(i) {
per_cpu(appldata_timer, i).expires = per_cpu_interval;
smp_call_function_single(i, add_virt_timer_periodic,
&per_cpu(appldata_timer, i),
1);
}
appldata_timer_active = 1; appldata_timer_active = 1;
break; break;
case APPLDATA_DEL_TIMER: case APPLDATA_DEL_TIMER:
for_each_online_cpu(i) del_virt_timer(&appldata_timer);
del_virt_timer(&per_cpu(appldata_timer, i));
if (!appldata_timer_active) if (!appldata_timer_active)
break; break;
appldata_timer_active = 0; appldata_timer_active = 0;
atomic_set(&appldata_expire_count, num_online_cpus());
break; break;
case APPLDATA_MOD_TIMER: case APPLDATA_MOD_TIMER:
per_cpu_interval = (u64) (appldata_interval*1000 /
num_online_cpus()) * TOD_MICRO;
if (!appldata_timer_active) if (!appldata_timer_active)
break; break;
for_each_online_cpu(i) { mod_virt_timer_periodic(&appldata_timer, timer_interval);
struct {
struct vtimer_list *timer;
u64 expires;
} args;
args.timer = &per_cpu(appldata_timer, i);
args.expires = per_cpu_interval;
smp_call_function_single(i, __appldata_mod_vtimer_wrap,
&args, 1);
}
} }
} }
...@@ -263,14 +222,12 @@ appldata_timer_handler(ctl_table *ctl, int write, ...@@ -263,14 +222,12 @@ appldata_timer_handler(ctl_table *ctl, int write,
len = *lenp; len = *lenp;
if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT; return -EFAULT;
get_online_cpus();
spin_lock(&appldata_timer_lock); spin_lock(&appldata_timer_lock);
if (buf[0] == '1') if (buf[0] == '1')
__appldata_vtimer_setup(APPLDATA_ADD_TIMER); __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
else if (buf[0] == '0') else if (buf[0] == '0')
__appldata_vtimer_setup(APPLDATA_DEL_TIMER); __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
spin_unlock(&appldata_timer_lock); spin_unlock(&appldata_timer_lock);
put_online_cpus();
out: out:
*lenp = len; *lenp = len;
*ppos += len; *ppos += len;
...@@ -303,20 +260,17 @@ appldata_interval_handler(ctl_table *ctl, int write, ...@@ -303,20 +260,17 @@ appldata_interval_handler(ctl_table *ctl, int write,
goto out; goto out;
} }
len = *lenp; len = *lenp;
if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) { if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT; return -EFAULT;
}
interval = 0; interval = 0;
sscanf(buf, "%i", &interval); sscanf(buf, "%i", &interval);
if (interval <= 0) if (interval <= 0)
return -EINVAL; return -EINVAL;
get_online_cpus();
spin_lock(&appldata_timer_lock); spin_lock(&appldata_timer_lock);
appldata_interval = interval; appldata_interval = interval;
__appldata_vtimer_setup(APPLDATA_MOD_TIMER); __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock); spin_unlock(&appldata_timer_lock);
put_online_cpus();
out: out:
*lenp = len; *lenp = len;
*ppos += len; *ppos += len;
...@@ -483,14 +437,12 @@ static int appldata_freeze(struct device *dev) ...@@ -483,14 +437,12 @@ static int appldata_freeze(struct device *dev)
int rc; int rc;
struct list_head *lh; struct list_head *lh;
get_online_cpus();
spin_lock(&appldata_timer_lock); spin_lock(&appldata_timer_lock);
if (appldata_timer_active) { if (appldata_timer_active) {
__appldata_vtimer_setup(APPLDATA_DEL_TIMER); __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
appldata_timer_suspended = 1; appldata_timer_suspended = 1;
} }
spin_unlock(&appldata_timer_lock); spin_unlock(&appldata_timer_lock);
put_online_cpus();
mutex_lock(&appldata_ops_mutex); mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) { list_for_each(lh, &appldata_ops_list) {
...@@ -514,14 +466,12 @@ static int appldata_restore(struct device *dev) ...@@ -514,14 +466,12 @@ static int appldata_restore(struct device *dev)
int rc; int rc;
struct list_head *lh; struct list_head *lh;
get_online_cpus();
spin_lock(&appldata_timer_lock); spin_lock(&appldata_timer_lock);
if (appldata_timer_suspended) { if (appldata_timer_suspended) {
__appldata_vtimer_setup(APPLDATA_ADD_TIMER); __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
appldata_timer_suspended = 0; appldata_timer_suspended = 0;
} }
spin_unlock(&appldata_timer_lock); spin_unlock(&appldata_timer_lock);
put_online_cpus();
mutex_lock(&appldata_ops_mutex); mutex_lock(&appldata_ops_mutex);
list_for_each(lh, &appldata_ops_list) { list_for_each(lh, &appldata_ops_list) {
...@@ -565,53 +515,6 @@ static struct platform_driver appldata_pdrv = { ...@@ -565,53 +515,6 @@ static struct platform_driver appldata_pdrv = {
/******************************* init / exit *********************************/ /******************************* init / exit *********************************/
static void __cpuinit appldata_online_cpu(int cpu)
{
init_virt_timer(&per_cpu(appldata_timer, cpu));
per_cpu(appldata_timer, cpu).function = appldata_timer_function;
per_cpu(appldata_timer, cpu).data = (unsigned long)
&appldata_work;
atomic_inc(&appldata_expire_count);
spin_lock(&appldata_timer_lock);
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
}
static void __cpuinit appldata_offline_cpu(int cpu)
{
del_virt_timer(&per_cpu(appldata_timer, cpu));
if (atomic_dec_and_test(&appldata_expire_count)) {
atomic_set(&appldata_expire_count, num_online_cpus());
queue_work(appldata_wq, &appldata_work);
}
spin_lock(&appldata_timer_lock);
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
}
static int __cpuinit appldata_cpu_notify(struct notifier_block *self,
unsigned long action,
void *hcpu)
{
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
appldata_online_cpu((long) hcpu);
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
appldata_offline_cpu((long) hcpu);
break;
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block __cpuinitdata appldata_nb = {
.notifier_call = appldata_cpu_notify,
};
/* /*
* appldata_init() * appldata_init()
* *
...@@ -619,7 +522,10 @@ static struct notifier_block __cpuinitdata appldata_nb = { ...@@ -619,7 +522,10 @@ static struct notifier_block __cpuinitdata appldata_nb = {
*/ */
static int __init appldata_init(void) static int __init appldata_init(void)
{ {
int i, rc; int rc;
appldata_timer.function = appldata_timer_function;
appldata_timer.data = (unsigned long) &appldata_work;
rc = platform_driver_register(&appldata_pdrv); rc = platform_driver_register(&appldata_pdrv);
if (rc) if (rc)
...@@ -637,14 +543,6 @@ static int __init appldata_init(void) ...@@ -637,14 +543,6 @@ static int __init appldata_init(void)
goto out_device; goto out_device;
} }
get_online_cpus();
for_each_online_cpu(i)
appldata_online_cpu(i);
put_online_cpus();
/* Register cpu hotplug notifier */
register_hotcpu_notifier(&appldata_nb);
appldata_sysctl_header = register_sysctl_table(appldata_dir_table); appldata_sysctl_header = register_sysctl_table(appldata_dir_table);
return 0; return 0;
......
...@@ -168,9 +168,11 @@ struct s390_idle_data { ...@@ -168,9 +168,11 @@ struct s390_idle_data {
int nohz_delay; int nohz_delay;
unsigned int sequence; unsigned int sequence;
unsigned long long idle_count; unsigned long long idle_count;
unsigned long long idle_enter;
unsigned long long idle_exit;
unsigned long long idle_time; unsigned long long idle_time;
unsigned long long clock_idle_enter;
unsigned long long clock_idle_exit;
unsigned long long timer_idle_enter;
unsigned long long timer_idle_exit;
}; };
DECLARE_PER_CPU(struct s390_idle_data, s390_idle); DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
......
/* /*
* include/asm-s390/timer.h * Copyright IBM Corp. 2003, 2012
*
* (C) Copyright IBM Corp. 2003,2006
* Virtual CPU timer * Virtual CPU timer
* *
* Author: Jan Glauber (jang@de.ibm.com) * Author(s): Jan Glauber <jan.glauber@de.ibm.com>
*/ */
#ifndef _ASM_S390_TIMER_H #ifndef _ASM_S390_TIMER_H
#define _ASM_S390_TIMER_H #define _ASM_S390_TIMER_H
#include <linux/timer.h> #define VTIMER_MAX_SLICE (0x7fffffffffffffffULL)
#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
struct vtimer_list { struct vtimer_list {
struct list_head entry; struct list_head entry;
u64 expires;
int cpu; u64 interval;
__u64 expires;
__u64 interval;
void (*function)(unsigned long); void (*function)(unsigned long);
unsigned long data; unsigned long data;
}; };
/* the vtimer value will wrap after ca. 71 years */
struct vtimer_queue {
struct list_head list;
spinlock_t lock;
__u64 timer; /* last programmed timer */
__u64 elapsed; /* elapsed time of timer expire values */
__u64 idle_enter; /* cpu timer on idle enter */
__u64 idle_exit; /* cpu timer on idle exit */
};
extern void init_virt_timer(struct vtimer_list *timer); extern void init_virt_timer(struct vtimer_list *timer);
extern void add_virt_timer(void *new); extern void add_virt_timer(struct vtimer_list *timer);
extern void add_virt_timer_periodic(void *new); extern void add_virt_timer_periodic(struct vtimer_list *timer);
extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires); extern int mod_virt_timer(struct vtimer_list *timer, u64 expires);
extern int mod_virt_timer_periodic(struct vtimer_list *timer, __u64 expires); extern int mod_virt_timer_periodic(struct vtimer_list *timer, u64 expires);
extern int del_virt_timer(struct vtimer_list *timer); extern int del_virt_timer(struct vtimer_list *timer);
extern void init_cpu_vtimer(void); extern void init_cpu_vtimer(void);
extern void vtime_init(void); extern void vtime_init(void);
extern void vtime_stop_cpu(void); extern void vtime_stop_cpu(void);
extern void vtime_start_leave(void);
#endif /* _ASM_S390_TIMER_H */ #endif /* _ASM_S390_TIMER_H */
...@@ -9,7 +9,6 @@ ...@@ -9,7 +9,6 @@
#include <linux/kbuild.h> #include <linux/kbuild.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <asm/cputime.h> #include <asm/cputime.h>
#include <asm/timer.h>
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -72,11 +71,10 @@ int main(void) ...@@ -72,11 +71,10 @@ int main(void)
DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC); DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
BLANK(); BLANK();
/* idle data offsets */ /* idle data offsets */
DEFINE(__IDLE_ENTER, offsetof(struct s390_idle_data, idle_enter)); DEFINE(__CLOCK_IDLE_ENTER, offsetof(struct s390_idle_data, clock_idle_enter));
DEFINE(__IDLE_EXIT, offsetof(struct s390_idle_data, idle_exit)); DEFINE(__CLOCK_IDLE_EXIT, offsetof(struct s390_idle_data, clock_idle_exit));
/* vtimer queue offsets */ DEFINE(__TIMER_IDLE_ENTER, offsetof(struct s390_idle_data, timer_idle_enter));
DEFINE(__VQ_IDLE_ENTER, offsetof(struct vtimer_queue, idle_enter)); DEFINE(__TIMER_IDLE_EXIT, offsetof(struct s390_idle_data, timer_idle_exit));
DEFINE(__VQ_IDLE_EXIT, offsetof(struct vtimer_queue, idle_exit));
/* lowcore offsets */ /* lowcore offsets */
DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params)); DEFINE(__LC_EXT_PARAMS, offsetof(struct _lowcore, ext_params));
DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr)); DEFINE(__LC_EXT_CPU_ADDR, offsetof(struct _lowcore, ext_cpu_addr));
......
...@@ -616,17 +616,13 @@ ext_skip: ...@@ -616,17 +616,13 @@ ext_skip:
* Load idle PSW. The second "half" of this function is in cleanup_idle. * Load idle PSW. The second "half" of this function is in cleanup_idle.
*/ */
ENTRY(psw_idle) ENTRY(psw_idle)
st %r4,__SF_EMPTY(%r15) st %r3,__SF_EMPTY(%r15)
basr %r1,0 basr %r1,0
la %r1,psw_idle_lpsw+4-.(%r1) la %r1,psw_idle_lpsw+4-.(%r1)
st %r1,__SF_EMPTY+4(%r15) st %r1,__SF_EMPTY+4(%r15)
oi __SF_EMPTY+4(%r15),0x80 oi __SF_EMPTY+4(%r15),0x80
la %r1,.Lvtimer_max-psw_idle_lpsw-4(%r1) stck __CLOCK_IDLE_ENTER(%r2)
stck __IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2)
ltr %r5,%r5
stpt __VQ_IDLE_ENTER(%r3)
jz psw_idle_lpsw
spt 0(%r1)
psw_idle_lpsw: psw_idle_lpsw:
lpsw __SF_EMPTY(%r15) lpsw __SF_EMPTY(%r15)
br %r14 br %r14
...@@ -885,33 +881,28 @@ cleanup_io_restore_insn: ...@@ -885,33 +881,28 @@ cleanup_io_restore_insn:
cleanup_idle: cleanup_idle:
# copy interrupt clock & cpu timer # copy interrupt clock & cpu timer
mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
chi %r11,__LC_SAVE_AREA_ASYNC chi %r11,__LC_SAVE_AREA_ASYNC
je 0f je 0f
mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
0: # check if stck has been executed 0: # check if stck has been executed
cl %r9,BASED(cleanup_idle_insn) cl %r9,BASED(cleanup_idle_insn)
jhe 1f jhe 1f
mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2) mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3) mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
j 2f 1: # account system time going idle
1: # check if the cpu timer has been reprogrammed
ltr %r5,%r5
jz 2f
spt __VQ_IDLE_ENTER(%r3)
2: # account system time going idle
lm %r9,%r10,__LC_STEAL_TIMER lm %r9,%r10,__LC_STEAL_TIMER
ADD64 %r9,%r10,__IDLE_ENTER(%r2) ADD64 %r9,%r10,__CLOCK_IDLE_ENTER(%r2)
SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK SUB64 %r9,%r10,__LC_LAST_UPDATE_CLOCK
stm %r9,%r10,__LC_STEAL_TIMER stm %r9,%r10,__LC_STEAL_TIMER
mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2) mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
lm %r9,%r10,__LC_SYSTEM_TIMER lm %r9,%r10,__LC_SYSTEM_TIMER
ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER ADD64 %r9,%r10,__LC_LAST_UPDATE_TIMER
SUB64 %r9,%r10,__VQ_IDLE_ENTER(%r3) SUB64 %r9,%r10,__TIMER_IDLE_ENTER(%r2)
stm %r9,%r10,__LC_SYSTEM_TIMER stm %r9,%r10,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3) mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw # prepare return psw
n %r8,BASED(cleanup_idle_wait) # clear wait state bit n %r8,BASED(cleanup_idle_wait) # clear wait state bit
l %r9,24(%r11) # return from psw_idle l %r9,24(%r11) # return from psw_idle
......
...@@ -5,7 +5,6 @@ ...@@ -5,7 +5,6 @@
#include <linux/signal.h> #include <linux/signal.h>
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/cputime.h> #include <asm/cputime.h>
#include <asm/timer.h>
extern void (*pgm_check_table[128])(struct pt_regs *); extern void (*pgm_check_table[128])(struct pt_regs *);
extern void *restart_stack; extern void *restart_stack;
...@@ -17,8 +16,7 @@ void io_int_handler(void); ...@@ -17,8 +16,7 @@ void io_int_handler(void);
void mcck_int_handler(void); void mcck_int_handler(void);
void restart_int_handler(void); void restart_int_handler(void);
void restart_call_handler(void); void restart_call_handler(void);
void psw_idle(struct s390_idle_data *, struct vtimer_queue *, void psw_idle(struct s390_idle_data *, unsigned long);
unsigned long, int);
asmlinkage long do_syscall_trace_enter(struct pt_regs *regs); asmlinkage long do_syscall_trace_enter(struct pt_regs *regs);
asmlinkage void do_syscall_trace_exit(struct pt_regs *regs); asmlinkage void do_syscall_trace_exit(struct pt_regs *regs);
......
...@@ -642,15 +642,11 @@ ext_skip: ...@@ -642,15 +642,11 @@ ext_skip:
* Load idle PSW. The second "half" of this function is in cleanup_idle. * Load idle PSW. The second "half" of this function is in cleanup_idle.
*/ */
ENTRY(psw_idle) ENTRY(psw_idle)
stg %r4,__SF_EMPTY(%r15) stg %r3,__SF_EMPTY(%r15)
larl %r1,psw_idle_lpsw+4 larl %r1,psw_idle_lpsw+4
stg %r1,__SF_EMPTY+8(%r15) stg %r1,__SF_EMPTY+8(%r15)
larl %r1,.Lvtimer_max STCK __CLOCK_IDLE_ENTER(%r2)
STCK __IDLE_ENTER(%r2) stpt __TIMER_IDLE_ENTER(%r2)
ltr %r5,%r5
stpt __VQ_IDLE_ENTER(%r3)
jz psw_idle_lpsw
spt 0(%r1)
psw_idle_lpsw: psw_idle_lpsw:
lpswe __SF_EMPTY(%r15) lpswe __SF_EMPTY(%r15)
br %r14 br %r14
...@@ -918,33 +914,28 @@ cleanup_io_restore_insn: ...@@ -918,33 +914,28 @@ cleanup_io_restore_insn:
cleanup_idle: cleanup_idle:
# copy interrupt clock & cpu timer # copy interrupt clock & cpu timer
mvc __IDLE_EXIT(8,%r2),__LC_INT_CLOCK mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
mvc __VQ_IDLE_EXIT(8,%r3),__LC_ASYNC_ENTER_TIMER mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
cghi %r11,__LC_SAVE_AREA_ASYNC cghi %r11,__LC_SAVE_AREA_ASYNC
je 0f je 0f
mvc __IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
mvc __VQ_IDLE_EXIT(8,%r3),__LC_MCCK_ENTER_TIMER mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
0: # check if stck & stpt have been executed 0: # check if stck & stpt have been executed
clg %r9,BASED(cleanup_idle_insn) clg %r9,BASED(cleanup_idle_insn)
jhe 1f jhe 1f
mvc __IDLE_ENTER(8,%r2),__IDLE_EXIT(%r2) mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
mvc __VQ_IDLE_ENTER(8,%r3),__VQ_IDLE_EXIT(%r3) mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
j 2f 1: # account system time going idle
1: # check if the cpu timer has been reprogrammed
ltr %r5,%r5
jz 2f
spt __VQ_IDLE_ENTER(%r3)
2: # account system time going idle
lg %r9,__LC_STEAL_TIMER lg %r9,__LC_STEAL_TIMER
alg %r9,__IDLE_ENTER(%r2) alg %r9,__CLOCK_IDLE_ENTER(%r2)
slg %r9,__LC_LAST_UPDATE_CLOCK slg %r9,__LC_LAST_UPDATE_CLOCK
stg %r9,__LC_STEAL_TIMER stg %r9,__LC_STEAL_TIMER
mvc __LC_LAST_UPDATE_CLOCK(8),__IDLE_EXIT(%r2) mvc __LC_LAST_UPDATE_CLOCK(8),__CLOCK_IDLE_EXIT(%r2)
lg %r9,__LC_SYSTEM_TIMER lg %r9,__LC_SYSTEM_TIMER
alg %r9,__LC_LAST_UPDATE_TIMER alg %r9,__LC_LAST_UPDATE_TIMER
slg %r9,__VQ_IDLE_ENTER(%r3) slg %r9,__TIMER_IDLE_ENTER(%r2)
stg %r9,__LC_SYSTEM_TIMER stg %r9,__LC_SYSTEM_TIMER
mvc __LC_LAST_UPDATE_TIMER(8),__VQ_IDLE_EXIT(%r3) mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
# prepare return psw # prepare return psw
nihh %r8,0xfffd # clear wait state bit nihh %r8,0xfffd # clear wait state bit
lg %r9,48(%r11) # return from psw_idle lg %r9,48(%r11) # return from psw_idle
...@@ -960,8 +951,6 @@ cleanup_idle_insn: ...@@ -960,8 +951,6 @@ cleanup_idle_insn:
.quad __critical_start .quad __critical_start
.Lcritical_length: .Lcritical_length:
.quad __critical_end - __critical_start .quad __critical_end - __critical_start
.Lvtimer_max:
.quad 0x7fffffffffffffff
#if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE) #if defined(CONFIG_KVM) || defined(CONFIG_KVM_MODULE)
......
...@@ -25,8 +25,8 @@ ...@@ -25,8 +25,8 @@
#include <linux/module.h> #include <linux/module.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/vtimer.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/timer.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/switch_to.h> #include <asm/switch_to.h>
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/timer.h> #include <asm/vtimer.h>
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/sclp.h> #include <asm/sclp.h>
#include <asm/vdso.h> #include <asm/vdso.h>
...@@ -917,7 +917,7 @@ static ssize_t show_idle_count(struct device *dev, ...@@ -917,7 +917,7 @@ static ssize_t show_idle_count(struct device *dev,
do { do {
sequence = ACCESS_ONCE(idle->sequence); sequence = ACCESS_ONCE(idle->sequence);
idle_count = ACCESS_ONCE(idle->idle_count); idle_count = ACCESS_ONCE(idle->idle_count);
if (ACCESS_ONCE(idle->idle_enter)) if (ACCESS_ONCE(idle->clock_idle_enter))
idle_count++; idle_count++;
} while ((sequence & 1) || (idle->sequence != sequence)); } while ((sequence & 1) || (idle->sequence != sequence));
return sprintf(buf, "%llu\n", idle_count); return sprintf(buf, "%llu\n", idle_count);
...@@ -935,8 +935,8 @@ static ssize_t show_idle_time(struct device *dev, ...@@ -935,8 +935,8 @@ static ssize_t show_idle_time(struct device *dev,
now = get_clock(); now = get_clock();
sequence = ACCESS_ONCE(idle->sequence); sequence = ACCESS_ONCE(idle->sequence);
idle_time = ACCESS_ONCE(idle->idle_time); idle_time = ACCESS_ONCE(idle->idle_time);
idle_enter = ACCESS_ONCE(idle->idle_enter); idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->idle_exit); idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
} while ((sequence & 1) || (idle->sequence != sequence)); } while ((sequence & 1) || (idle->sequence != sequence));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
return sprintf(buf, "%llu\n", idle_time >> 12); return sprintf(buf, "%llu\n", idle_time >> 12);
......
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
#include <asm/vdso.h> #include <asm/vdso.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
#include <asm/timer.h> #include <asm/vtimer.h>
#include <asm/etr.h> #include <asm/etr.h>
#include <asm/cio.h> #include <asm/cio.h>
#include "entry.h" #include "entry.h"
......
This diff is collapsed.
...@@ -12,8 +12,8 @@ ...@@ -12,8 +12,8 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/irqflags.h> #include <linux/irqflags.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <asm/vtimer.h>
#include <asm/div64.h> #include <asm/div64.h>
#include <asm/timer.h>
void __delay(unsigned long loops) void __delay(unsigned long loops)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment