Commit a9b011f5 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (22 commits)
  [S390] Update default configuration.
  [S390] kprobes: defer setting of ctlblk state
  [S390] Enable tick based perf_counter on s390.
  [S390] dasd: fix refcounting in dasd_change_state
  [S390] lockless idle time accounting
  [S390] driver_data access
  [S390] pm: fix build error for !SMP
  [S390] dasd_pm: fix stop flag handling
  [S390] ap/zcrypt: Suspend/Resume ap bus and zcrypt
  [S390] qdio: Sanitize do_QDIO sanity checks
  [S390] qdio: leave inbound SBALs primed
  [S390] qdio: merge AI tasklet into interrupt handler
  [S390] qdio: extract all primed SBALs at once
  [S390] qdio: fix check for running under z/VM
  [S390] qdio: move adapter interrupt tasklet code
  [S390] Use del_timer instead of del_timer_sync
  [S390] s390: remove DEBUG_MALLOC
  [S390] vt220 console: convert from bootmem to slab
  [S390] sclp console: convert from bootmem to slab
  [S390] 3270 console: convert from bootmem to slab
  ...
parents b5bdd438 da6330fc
...@@ -94,6 +94,7 @@ config S390 ...@@ -94,6 +94,7 @@ config S390
select HAVE_KVM if 64BIT select HAVE_KVM if 64BIT
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select INIT_ALL_POSSIBLE select INIT_ALL_POSSIBLE
select HAVE_PERF_COUNTERS
source "init/Kconfig" source "init/Kconfig"
......
# #
# Automatically generated make config: don't edit # Automatically generated make config: don't edit
# Linux kernel version: 2.6.30-rc3 # Linux kernel version: 2.6.30
# Thu Apr 23 09:29:52 2009 # Mon Jun 22 11:08:16 2009
# #
CONFIG_SCHED_MC=y CONFIG_SCHED_MC=y
CONFIG_MMU=y CONFIG_MMU=y
...@@ -25,6 +25,7 @@ CONFIG_VIRT_CPU_ACCOUNTING=y ...@@ -25,6 +25,7 @@ CONFIG_VIRT_CPU_ACCOUNTING=y
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
CONFIG_S390=y CONFIG_S390=y
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
# #
# General setup # General setup
...@@ -90,7 +91,6 @@ CONFIG_SYSCTL_SYSCALL=y ...@@ -90,7 +91,6 @@ CONFIG_SYSCTL_SYSCALL=y
CONFIG_KALLSYMS=y CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_ALL is not set # CONFIG_KALLSYMS_ALL is not set
# CONFIG_KALLSYMS_EXTRA_PASS is not set # CONFIG_KALLSYMS_EXTRA_PASS is not set
# CONFIG_STRIP_ASM_SYMS is not set
CONFIG_HOTPLUG=y CONFIG_HOTPLUG=y
CONFIG_PRINTK=y CONFIG_PRINTK=y
CONFIG_BUG=y CONFIG_BUG=y
...@@ -103,7 +103,14 @@ CONFIG_TIMERFD=y ...@@ -103,7 +103,14 @@ CONFIG_TIMERFD=y
CONFIG_EVENTFD=y CONFIG_EVENTFD=y
CONFIG_SHMEM=y CONFIG_SHMEM=y
CONFIG_AIO=y CONFIG_AIO=y
CONFIG_HAVE_PERF_COUNTERS=y
#
# Performance Counters
#
# CONFIG_PERF_COUNTERS is not set
CONFIG_VM_EVENT_COUNTERS=y CONFIG_VM_EVENT_COUNTERS=y
# CONFIG_STRIP_ASM_SYMS is not set
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y CONFIG_SLAB=y
# CONFIG_SLUB is not set # CONFIG_SLUB is not set
...@@ -119,6 +126,11 @@ CONFIG_HAVE_KRETPROBES=y ...@@ -119,6 +126,11 @@ CONFIG_HAVE_KRETPROBES=y
CONFIG_HAVE_ARCH_TRACEHOOK=y CONFIG_HAVE_ARCH_TRACEHOOK=y
CONFIG_USE_GENERIC_SMP_HELPERS=y CONFIG_USE_GENERIC_SMP_HELPERS=y
CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES=y CONFIG_HAVE_DEFAULT_NO_SPIN_MUTEXES=y
#
# GCOV-based kernel profiling
#
# CONFIG_GCOV_KERNEL is not set
# CONFIG_SLOW_WORK is not set # CONFIG_SLOW_WORK is not set
# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set # CONFIG_HAVE_GENERIC_DMA_COHERENT is not set
CONFIG_SLABINFO=y CONFIG_SLABINFO=y
...@@ -150,7 +162,7 @@ CONFIG_DEFAULT_DEADLINE=y ...@@ -150,7 +162,7 @@ CONFIG_DEFAULT_DEADLINE=y
# CONFIG_DEFAULT_NOOP is not set # CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="deadline" CONFIG_DEFAULT_IOSCHED="deadline"
CONFIG_PREEMPT_NOTIFIERS=y CONFIG_PREEMPT_NOTIFIERS=y
# CONFIG_FREEZER is not set CONFIG_FREEZER=y
# #
# Base setup # Base setup
...@@ -199,6 +211,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y ...@@ -199,6 +211,7 @@ CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_SELECT_MEMORY_MODEL=y CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set # CONFIG_FLATMEM_MANUAL is not set
# CONFIG_DISCONTIGMEM_MANUAL is not set # CONFIG_DISCONTIGMEM_MANUAL is not set
...@@ -218,9 +231,9 @@ CONFIG_PHYS_ADDR_T_64BIT=y ...@@ -218,9 +231,9 @@ CONFIG_PHYS_ADDR_T_64BIT=y
CONFIG_ZONE_DMA_FLAG=1 CONFIG_ZONE_DMA_FLAG=1
CONFIG_BOUNCE=y CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y CONFIG_VIRT_TO_BUS=y
CONFIG_UNEVICTABLE_LRU=y
CONFIG_HAVE_MLOCK=y CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y CONFIG_HAVE_MLOCKED_PAGE_BIT=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
# #
# I/O subsystem configuration # I/O subsystem configuration
...@@ -257,6 +270,16 @@ CONFIG_KEXEC=y ...@@ -257,6 +270,16 @@ CONFIG_KEXEC=y
# CONFIG_ZFCPDUMP is not set # CONFIG_ZFCPDUMP is not set
CONFIG_S390_GUEST=y CONFIG_S390_GUEST=y
CONFIG_SECCOMP=y CONFIG_SECCOMP=y
#
# Power Management
#
CONFIG_PM=y
# CONFIG_PM_DEBUG is not set
CONFIG_PM_SLEEP_SMP=y
CONFIG_PM_SLEEP=y
CONFIG_HIBERNATION=y
CONFIG_PM_STD_PARTITION=""
CONFIG_NET=y CONFIG_NET=y
# #
...@@ -384,6 +407,7 @@ CONFIG_SCTP_HMAC_MD5=y ...@@ -384,6 +407,7 @@ CONFIG_SCTP_HMAC_MD5=y
# CONFIG_ECONET is not set # CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set # CONFIG_WAN_ROUTER is not set
# CONFIG_PHONET is not set # CONFIG_PHONET is not set
# CONFIG_IEEE802154 is not set
CONFIG_NET_SCHED=y CONFIG_NET_SCHED=y
# #
...@@ -446,6 +470,7 @@ CONFIG_CAN_BCM=m ...@@ -446,6 +470,7 @@ CONFIG_CAN_BCM=m
# CAN Device Drivers # CAN Device Drivers
# #
CONFIG_CAN_VCAN=m CONFIG_CAN_VCAN=m
# CONFIG_CAN_DEV is not set
# CONFIG_CAN_DEBUG_DEVICES is not set # CONFIG_CAN_DEBUG_DEVICES is not set
# CONFIG_AF_RXRPC is not set # CONFIG_AF_RXRPC is not set
# CONFIG_WIMAX is not set # CONFIG_WIMAX is not set
...@@ -524,10 +549,6 @@ CONFIG_BLK_DEV_SR=y ...@@ -524,10 +549,6 @@ CONFIG_BLK_DEV_SR=y
CONFIG_BLK_DEV_SR_VENDOR=y CONFIG_BLK_DEV_SR_VENDOR=y
CONFIG_CHR_DEV_SG=y CONFIG_CHR_DEV_SG=y
# CONFIG_CHR_DEV_SCH is not set # CONFIG_CHR_DEV_SCH is not set
#
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
CONFIG_SCSI_MULTI_LUN=y CONFIG_SCSI_MULTI_LUN=y
CONFIG_SCSI_CONSTANTS=y CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y CONFIG_SCSI_LOGGING=y
...@@ -578,7 +599,6 @@ CONFIG_DM_MULTIPATH=m ...@@ -578,7 +599,6 @@ CONFIG_DM_MULTIPATH=m
# CONFIG_DM_DELAY is not set # CONFIG_DM_DELAY is not set
# CONFIG_DM_UEVENT is not set # CONFIG_DM_UEVENT is not set
CONFIG_NETDEVICES=y CONFIG_NETDEVICES=y
CONFIG_COMPAT_NET_DEV_OPS=y
# CONFIG_IFB is not set # CONFIG_IFB is not set
CONFIG_DUMMY=m CONFIG_DUMMY=m
CONFIG_BONDING=m CONFIG_BONDING=m
...@@ -595,6 +615,7 @@ CONFIG_NET_ETHERNET=y ...@@ -595,6 +615,7 @@ CONFIG_NET_ETHERNET=y
# CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set # CONFIG_IBM_NEW_EMAC_NO_FLOW_CTRL is not set
# CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set # CONFIG_IBM_NEW_EMAC_MAL_CLR_ICINTSTAT is not set
# CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set # CONFIG_IBM_NEW_EMAC_MAL_COMMON_ERR is not set
# CONFIG_KS8842 is not set
CONFIG_NETDEV_1000=y CONFIG_NETDEV_1000=y
CONFIG_NETDEV_10000=y CONFIG_NETDEV_10000=y
# CONFIG_TR is not set # CONFIG_TR is not set
...@@ -674,6 +695,11 @@ CONFIG_S390_TAPE_34XX=m ...@@ -674,6 +695,11 @@ CONFIG_S390_TAPE_34XX=m
# CONFIG_MONREADER is not set # CONFIG_MONREADER is not set
CONFIG_MONWRITER=m CONFIG_MONWRITER=m
CONFIG_S390_VMUR=m CONFIG_S390_VMUR=m
#
# PPS support
#
# CONFIG_PPS is not set
# CONFIG_POWER_SUPPLY is not set # CONFIG_POWER_SUPPLY is not set
# CONFIG_THERMAL is not set # CONFIG_THERMAL is not set
# CONFIG_THERMAL_HWMON is not set # CONFIG_THERMAL_HWMON is not set
...@@ -683,6 +709,10 @@ CONFIG_S390_VMUR=m ...@@ -683,6 +709,10 @@ CONFIG_S390_VMUR=m
# CONFIG_NEW_LEDS is not set # CONFIG_NEW_LEDS is not set
CONFIG_ACCESSIBILITY=y CONFIG_ACCESSIBILITY=y
# CONFIG_AUXDISPLAY is not set # CONFIG_AUXDISPLAY is not set
#
# TI VLYNQ
#
# CONFIG_STAGING is not set # CONFIG_STAGING is not set
# #
...@@ -703,11 +733,12 @@ CONFIG_FS_MBCACHE=y ...@@ -703,11 +733,12 @@ CONFIG_FS_MBCACHE=y
# CONFIG_REISERFS_FS is not set # CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set # CONFIG_JFS_FS is not set
CONFIG_FS_POSIX_ACL=y CONFIG_FS_POSIX_ACL=y
CONFIG_FILE_LOCKING=y
# CONFIG_XFS_FS is not set # CONFIG_XFS_FS is not set
# CONFIG_GFS2_FS is not set # CONFIG_GFS2_FS is not set
# CONFIG_OCFS2_FS is not set # CONFIG_OCFS2_FS is not set
# CONFIG_BTRFS_FS is not set # CONFIG_BTRFS_FS is not set
CONFIG_FILE_LOCKING=y
CONFIG_FSNOTIFY=y
CONFIG_DNOTIFY=y CONFIG_DNOTIFY=y
CONFIG_INOTIFY=y CONFIG_INOTIFY=y
CONFIG_INOTIFY_USER=y CONFIG_INOTIFY_USER=y
...@@ -865,19 +896,23 @@ CONFIG_DEBUG_MEMORY_INIT=y ...@@ -865,19 +896,23 @@ CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_SYSCTL_SYSCALL_CHECK=y CONFIG_SYSCTL_SYSCALL_CHECK=y
# CONFIG_DEBUG_PAGEALLOC is not set # CONFIG_DEBUG_PAGEALLOC is not set
CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_HAVE_FUNCTION_TRACER=y
CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y
CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST=y
CONFIG_HAVE_DYNAMIC_FTRACE=y
CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y
CONFIG_HAVE_FTRACE_SYSCALLS=y
CONFIG_TRACING_SUPPORT=y CONFIG_TRACING_SUPPORT=y
CONFIG_FTRACE=y
#
# Tracers
#
# CONFIG_FUNCTION_TRACER is not set # CONFIG_FUNCTION_TRACER is not set
# CONFIG_IRQSOFF_TRACER is not set # CONFIG_IRQSOFF_TRACER is not set
# CONFIG_PREEMPT_TRACER is not set # CONFIG_PREEMPT_TRACER is not set
# CONFIG_SCHED_TRACER is not set # CONFIG_SCHED_TRACER is not set
# CONFIG_CONTEXT_SWITCH_TRACER is not set # CONFIG_ENABLE_DEFAULT_TRACERS is not set
# CONFIG_EVENT_TRACER is not set # CONFIG_FTRACE_SYSCALLS is not set
# CONFIG_BOOT_TRACER is not set # CONFIG_BOOT_TRACER is not set
# CONFIG_TRACE_BRANCH_PROFILING is not set CONFIG_BRANCH_PROFILE_NONE=y
# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set
# CONFIG_PROFILE_ALL_BRANCHES is not set
# CONFIG_STACK_TRACER is not set # CONFIG_STACK_TRACER is not set
# CONFIG_KMEMTRACE is not set # CONFIG_KMEMTRACE is not set
# CONFIG_WORKQUEUE_TRACER is not set # CONFIG_WORKQUEUE_TRACER is not set
...@@ -886,6 +921,7 @@ CONFIG_TRACING_SUPPORT=y ...@@ -886,6 +921,7 @@ CONFIG_TRACING_SUPPORT=y
CONFIG_SAMPLES=y CONFIG_SAMPLES=y
# CONFIG_SAMPLE_KOBJECT is not set # CONFIG_SAMPLE_KOBJECT is not set
# CONFIG_SAMPLE_KPROBES is not set # CONFIG_SAMPLE_KPROBES is not set
# CONFIG_KMEMCHECK is not set
# #
# Security options # Security options
......
...@@ -178,7 +178,7 @@ cputime64_to_clock_t(cputime64_t cputime) ...@@ -178,7 +178,7 @@ cputime64_to_clock_t(cputime64_t cputime)
} }
struct s390_idle_data { struct s390_idle_data {
spinlock_t lock; unsigned int sequence;
unsigned long long idle_count; unsigned long long idle_count;
unsigned long long idle_enter; unsigned long long idle_enter;
unsigned long long idle_time; unsigned long long idle_time;
......
...@@ -248,14 +248,5 @@ int debug_unregister_view(debug_info_t* id, struct debug_view* view); ...@@ -248,14 +248,5 @@ int debug_unregister_view(debug_info_t* id, struct debug_view* view);
#define PRINT_FATAL(x...) printk ( KERN_DEBUG PRINTK_HEADER x ) #define PRINT_FATAL(x...) printk ( KERN_DEBUG PRINTK_HEADER x )
#endif /* DASD_DEBUG */ #endif /* DASD_DEBUG */
#undef DEBUG_MALLOC
#ifdef DEBUG_MALLOC
void *b;
#define kmalloc(x...) (PRINT_INFO(" kmalloc %p\n",b=kmalloc(x)),b)
#define kfree(x) PRINT_INFO(" kfree %p\n",x);kfree(x)
#define get_zeroed_page(x...) (PRINT_INFO(" gfp %p\n",b=get_zeroed_page(x)),b)
#define __get_free_pages(x...) (PRINT_INFO(" gfps %p\n",b=__get_free_pages(x)),b)
#endif /* DEBUG_MALLOC */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* DEBUG_H */ #endif /* DEBUG_H */
/*
* Performance counter support - s390 specific definitions.
*
* Copyright 2009 Martin Schwidefsky, IBM Corporation.
*/
static inline void set_perf_counter_pending(void) {}
static inline void clear_perf_counter_pending(void) {}
...@@ -380,7 +380,7 @@ extern int qdio_establish(struct qdio_initialize *); ...@@ -380,7 +380,7 @@ extern int qdio_establish(struct qdio_initialize *);
extern int qdio_activate(struct ccw_device *); extern int qdio_activate(struct ccw_device *);
extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags, extern int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
int q_nr, int bufnr, int count); int q_nr, unsigned int bufnr, unsigned int count);
extern int qdio_cleanup(struct ccw_device*, int); extern int qdio_cleanup(struct ccw_device*, int);
extern int qdio_shutdown(struct ccw_device*, int); extern int qdio_shutdown(struct ccw_device*, int);
extern int qdio_free(struct ccw_device *); extern int qdio_free(struct ccw_device *);
......
...@@ -154,39 +154,35 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn) ...@@ -154,39 +154,35 @@ void __kprobes get_instruction_type(struct arch_specific_insn *ainsn)
static int __kprobes swap_instruction(void *aref) static int __kprobes swap_instruction(void *aref)
{ {
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long status = kcb->kprobe_status;
struct ins_replace_args *args = aref; struct ins_replace_args *args = aref;
int rc;
return probe_kernel_write(args->ptr, &args->new, sizeof(args->new)); kcb->kprobe_status = KPROBE_SWAP_INST;
rc = probe_kernel_write(args->ptr, &args->new, sizeof(args->new));
kcb->kprobe_status = status;
return rc;
} }
void __kprobes arch_arm_kprobe(struct kprobe *p) void __kprobes arch_arm_kprobe(struct kprobe *p)
{ {
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long status = kcb->kprobe_status;
struct ins_replace_args args; struct ins_replace_args args;
args.ptr = p->addr; args.ptr = p->addr;
args.old = p->opcode; args.old = p->opcode;
args.new = BREAKPOINT_INSTRUCTION; args.new = BREAKPOINT_INSTRUCTION;
kcb->kprobe_status = KPROBE_SWAP_INST;
stop_machine(swap_instruction, &args, NULL); stop_machine(swap_instruction, &args, NULL);
kcb->kprobe_status = status;
} }
void __kprobes arch_disarm_kprobe(struct kprobe *p) void __kprobes arch_disarm_kprobe(struct kprobe *p)
{ {
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
unsigned long status = kcb->kprobe_status;
struct ins_replace_args args; struct ins_replace_args args;
args.ptr = p->addr; args.ptr = p->addr;
args.old = BREAKPOINT_INSTRUCTION; args.old = BREAKPOINT_INSTRUCTION;
args.new = p->opcode; args.new = p->opcode;
kcb->kprobe_status = KPROBE_SWAP_INST;
stop_machine(swap_instruction, &args, NULL); stop_machine(swap_instruction, &args, NULL);
kcb->kprobe_status = status;
} }
void __kprobes arch_remove_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p)
......
...@@ -856,13 +856,20 @@ static ssize_t show_idle_count(struct sys_device *dev, ...@@ -856,13 +856,20 @@ static ssize_t show_idle_count(struct sys_device *dev,
{ {
struct s390_idle_data *idle; struct s390_idle_data *idle;
unsigned long long idle_count; unsigned long long idle_count;
unsigned int sequence;
idle = &per_cpu(s390_idle, dev->id); idle = &per_cpu(s390_idle, dev->id);
spin_lock(&idle->lock); repeat:
sequence = idle->sequence;
smp_rmb();
if (sequence & 1)
goto repeat;
idle_count = idle->idle_count; idle_count = idle->idle_count;
if (idle->idle_enter) if (idle->idle_enter)
idle_count++; idle_count++;
spin_unlock(&idle->lock); smp_rmb();
if (idle->sequence != sequence)
goto repeat;
return sprintf(buf, "%llu\n", idle_count); return sprintf(buf, "%llu\n", idle_count);
} }
static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL); static SYSDEV_ATTR(idle_count, 0444, show_idle_count, NULL);
...@@ -872,15 +879,22 @@ static ssize_t show_idle_time(struct sys_device *dev, ...@@ -872,15 +879,22 @@ static ssize_t show_idle_time(struct sys_device *dev,
{ {
struct s390_idle_data *idle; struct s390_idle_data *idle;
unsigned long long now, idle_time, idle_enter; unsigned long long now, idle_time, idle_enter;
unsigned int sequence;
idle = &per_cpu(s390_idle, dev->id); idle = &per_cpu(s390_idle, dev->id);
spin_lock(&idle->lock);
now = get_clock(); now = get_clock();
repeat:
sequence = idle->sequence;
smp_rmb();
if (sequence & 1)
goto repeat;
idle_time = idle->idle_time; idle_time = idle->idle_time;
idle_enter = idle->idle_enter; idle_enter = idle->idle_enter;
if (idle_enter != 0ULL && idle_enter < now) if (idle_enter != 0ULL && idle_enter < now)
idle_time += now - idle_enter; idle_time += now - idle_enter;
spin_unlock(&idle->lock); smp_rmb();
if (idle->sequence != sequence)
goto repeat;
return sprintf(buf, "%llu\n", idle_time >> 12); return sprintf(buf, "%llu\n", idle_time >> 12);
} }
static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL); static SYSDEV_ATTR(idle_time_us, 0444, show_idle_time, NULL);
...@@ -908,11 +922,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self, ...@@ -908,11 +922,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
case CPU_ONLINE: case CPU_ONLINE:
case CPU_ONLINE_FROZEN: case CPU_ONLINE_FROZEN:
idle = &per_cpu(s390_idle, cpu); idle = &per_cpu(s390_idle, cpu);
spin_lock_irq(&idle->lock); memset(idle, 0, sizeof(struct s390_idle_data));
idle->idle_enter = 0;
idle->idle_time = 0;
idle->idle_count = 0;
spin_unlock_irq(&idle->lock);
if (sysfs_create_group(&s->kobj, &cpu_online_attr_group)) if (sysfs_create_group(&s->kobj, &cpu_online_attr_group))
return NOTIFY_BAD; return NOTIFY_BAD;
break; break;
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#include <linux/notifier.h> #include <linux/notifier.h>
#include <linux/clocksource.h> #include <linux/clocksource.h>
#include <linux/clockchips.h> #include <linux/clockchips.h>
#include <linux/bootmem.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/s390_ext.h> #include <asm/s390_ext.h>
...@@ -62,9 +61,6 @@ ...@@ -62,9 +61,6 @@
u64 sched_clock_base_cc = -1; /* Force to data section. */ u64 sched_clock_base_cc = -1; /* Force to data section. */
static ext_int_info_t ext_int_info_cc;
static ext_int_info_t ext_int_etr_cc;
static DEFINE_PER_CPU(struct clock_event_device, comparators); static DEFINE_PER_CPU(struct clock_event_device, comparators);
/* /*
...@@ -255,15 +251,11 @@ void __init time_init(void) ...@@ -255,15 +251,11 @@ void __init time_init(void)
stp_reset(); stp_reset();
/* request the clock comparator external interrupt */ /* request the clock comparator external interrupt */
if (register_early_external_interrupt(0x1004, if (register_external_interrupt(0x1004, clock_comparator_interrupt))
clock_comparator_interrupt,
&ext_int_info_cc) != 0)
panic("Couldn't request external interrupt 0x1004"); panic("Couldn't request external interrupt 0x1004");
/* request the timing alert external interrupt */ /* request the timing alert external interrupt */
if (register_early_external_interrupt(0x1406, if (register_external_interrupt(0x1406, timing_alert_interrupt))
timing_alert_interrupt,
&ext_int_etr_cc) != 0)
panic("Couldn't request external interrupt 0x1406"); panic("Couldn't request external interrupt 0x1406");
if (clocksource_register(&clocksource_tod) != 0) if (clocksource_register(&clocksource_tod) != 0)
...@@ -1445,14 +1437,14 @@ static void __init stp_reset(void) ...@@ -1445,14 +1437,14 @@ static void __init stp_reset(void)
{ {
int rc; int rc;
stp_page = alloc_bootmem_pages(PAGE_SIZE); stp_page = (void *) get_zeroed_page(GFP_ATOMIC);
rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000); rc = chsc_sstpc(stp_page, STP_OP_CTRL, 0x0000);
if (rc == 0) if (rc == 0)
set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags); set_bit(CLOCK_SYNC_HAS_STP, &clock_sync_flags);
else if (stp_online) { else if (stp_online) {
pr_warning("The real or virtual hardware system does " pr_warning("The real or virtual hardware system does "
"not provide an STP interface\n"); "not provide an STP interface\n");
free_bootmem((unsigned long) stp_page, PAGE_SIZE); free_page((unsigned long) stp_page);
stp_page = NULL; stp_page = NULL;
stp_online = 0; stp_online = 0;
} }
......
...@@ -25,13 +25,9 @@ ...@@ -25,13 +25,9 @@
#include <asm/irq_regs.h> #include <asm/irq_regs.h>
#include <asm/cputime.h> #include <asm/cputime.h>
static ext_int_info_t ext_int_info_timer;
static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
DEFINE_PER_CPU(struct s390_idle_data, s390_idle) = { DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
.lock = __SPIN_LOCK_UNLOCKED(s390_idle.lock)
};
static inline __u64 get_vtimer(void) static inline __u64 get_vtimer(void)
{ {
...@@ -153,11 +149,13 @@ void vtime_start_cpu(void) ...@@ -153,11 +149,13 @@ void vtime_start_cpu(void)
vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer; vq->elapsed -= vq->idle - S390_lowcore.async_enter_timer;
} }
spin_lock(&idle->lock); idle->sequence++;
smp_wmb();
idle->idle_time += idle_time; idle->idle_time += idle_time;
idle->idle_enter = 0ULL; idle->idle_enter = 0ULL;
idle->idle_count++; idle->idle_count++;
spin_unlock(&idle->lock); smp_wmb();
idle->sequence++;
} }
void vtime_stop_cpu(void) void vtime_stop_cpu(void)
...@@ -244,15 +242,23 @@ cputime64_t s390_get_idle_time(int cpu) ...@@ -244,15 +242,23 @@ cputime64_t s390_get_idle_time(int cpu)
{ {
struct s390_idle_data *idle; struct s390_idle_data *idle;
unsigned long long now, idle_time, idle_enter; unsigned long long now, idle_time, idle_enter;
unsigned int sequence;
idle = &per_cpu(s390_idle, cpu); idle = &per_cpu(s390_idle, cpu);
spin_lock(&idle->lock);
now = get_clock(); now = get_clock();
repeat:
sequence = idle->sequence;
smp_rmb();
if (sequence & 1)
goto repeat;
idle_time = 0; idle_time = 0;
idle_enter = idle->idle_enter; idle_enter = idle->idle_enter;
if (idle_enter != 0ULL && idle_enter < now) if (idle_enter != 0ULL && idle_enter < now)
idle_time = now - idle_enter; idle_time = now - idle_enter;
spin_unlock(&idle->lock); smp_rmb();
if (idle->sequence != sequence)
goto repeat;
return idle_time; return idle_time;
} }
...@@ -557,8 +563,7 @@ void init_cpu_vtimer(void) ...@@ -557,8 +563,7 @@ void init_cpu_vtimer(void)
void __init vtime_init(void) void __init vtime_init(void)
{ {
/* request the cpu timer external interrupt */ /* request the cpu timer external interrupt */
if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt, if (register_external_interrupt(0x1005, do_cpu_timer_interrupt))
&ext_int_info_timer) != 0)
panic("Couldn't request external interrupt 0x1005"); panic("Couldn't request external interrupt 0x1005");
/* Enable cpu timer interrupts on the boot cpu. */ /* Enable cpu timer interrupts on the boot cpu. */
......
...@@ -109,10 +109,11 @@ swsusp_arch_resume: ...@@ -109,10 +109,11 @@ swsusp_arch_resume:
aghi %r15,-STACK_FRAME_OVERHEAD aghi %r15,-STACK_FRAME_OVERHEAD
stg %r1,__SF_BACKCHAIN(%r15) stg %r1,__SF_BACKCHAIN(%r15)
#ifdef CONFIG_SMP
/* Save boot cpu number */ /* Save boot cpu number */
brasl %r14,smp_get_phys_cpu_id brasl %r14,smp_get_phys_cpu_id
lgr %r10,%r2 lgr %r10,%r2
#endif
/* Deactivate DAT */ /* Deactivate DAT */
stnsm __SF_EMPTY(%r15),0xfb stnsm __SF_EMPTY(%r15),0xfb
...@@ -177,11 +178,12 @@ swsusp_arch_resume: ...@@ -177,11 +178,12 @@ swsusp_arch_resume:
/* Pointer to save arae */ /* Pointer to save arae */
lghi %r13,0x1000 lghi %r13,0x1000
#ifdef CONFIG_SMP
/* Switch CPUs */ /* Switch CPUs */
lgr %r2,%r10 /* get cpu id */ lgr %r2,%r10 /* get cpu id */
llgf %r3,0x318(%r13) llgf %r3,0x318(%r13)
brasl %r14,smp_switch_boot_cpu_in_resume brasl %r14,smp_switch_boot_cpu_in_resume
#endif
/* Restore prefix register */ /* Restore prefix register */
spx 0x318(%r13) spx 0x318(%r13)
......
...@@ -479,7 +479,9 @@ static void dasd_change_state(struct dasd_device *device) ...@@ -479,7 +479,9 @@ static void dasd_change_state(struct dasd_device *device)
rc = dasd_increase_state(device); rc = dasd_increase_state(device);
else else
rc = dasd_decrease_state(device); rc = dasd_decrease_state(device);
if (rc && rc != -EAGAIN) if (rc == -EAGAIN)
return;
if (rc)
device->target = device->state; device->target = device->state;
if (device->state == device->target) { if (device->state == device->target) {
...@@ -2503,15 +2505,25 @@ int dasd_generic_restore_device(struct ccw_device *cdev) ...@@ -2503,15 +2505,25 @@ int dasd_generic_restore_device(struct ccw_device *cdev)
if (IS_ERR(device)) if (IS_ERR(device))
return PTR_ERR(device); return PTR_ERR(device);
/* allow new IO again */
device->stopped &= ~DASD_STOPPED_PM;
device->stopped &= ~DASD_UNRESUMED_PM;
dasd_schedule_device_bh(device); dasd_schedule_device_bh(device);
if (device->block) if (device->block)
dasd_schedule_block_bh(device->block); dasd_schedule_block_bh(device->block);
if (device->discipline->restore) if (device->discipline->restore)
rc = device->discipline->restore(device); rc = device->discipline->restore(device);
if (rc)
/*
* if the resume failed for the DASD we put it in
* an UNRESUMED stop state
*/
device->stopped |= DASD_UNRESUMED_PM;
dasd_put_device(device); dasd_put_device(device);
return rc; return 0;
} }
EXPORT_SYMBOL_GPL(dasd_generic_restore_device); EXPORT_SYMBOL_GPL(dasd_generic_restore_device);
......
...@@ -3243,9 +3243,6 @@ int dasd_eckd_restore_device(struct dasd_device *device) ...@@ -3243,9 +3243,6 @@ int dasd_eckd_restore_device(struct dasd_device *device)
int is_known, rc; int is_known, rc;
struct dasd_uid temp_uid; struct dasd_uid temp_uid;
/* allow new IO again */
device->stopped &= ~DASD_STOPPED_PM;
private = (struct dasd_eckd_private *) device->private; private = (struct dasd_eckd_private *) device->private;
/* Read Configuration Data */ /* Read Configuration Data */
...@@ -3295,12 +3292,7 @@ int dasd_eckd_restore_device(struct dasd_device *device) ...@@ -3295,12 +3292,7 @@ int dasd_eckd_restore_device(struct dasd_device *device)
return 0; return 0;
out_err: out_err:
/* return -1;
* if the resume failed for the DASD we put it in
* an UNRESUMED stop state
*/
device->stopped |= DASD_UNRESUMED_PM;
return 0;
} }
static struct ccw_driver dasd_eckd_driver = { static struct ccw_driver dasd_eckd_driver = {
......
...@@ -20,10 +20,7 @@ ...@@ -20,10 +20,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/bootmem.h>
#include <asm/ccwdev.h> #include <asm/ccwdev.h>
#include <asm/cio.h> #include <asm/cio.h>
#include <asm/io.h> #include <asm/io.h>
...@@ -735,7 +732,7 @@ static int raw3215_pm_stop(struct ccw_device *cdev) ...@@ -735,7 +732,7 @@ static int raw3215_pm_stop(struct ccw_device *cdev)
unsigned long flags; unsigned long flags;
/* Empty the output buffer, then prevent new I/O. */ /* Empty the output buffer, then prevent new I/O. */
raw = cdev->dev.driver_data; raw = dev_get_drvdata(&cdev->dev);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw3215_make_room(raw, RAW3215_BUFFER_SIZE); raw3215_make_room(raw, RAW3215_BUFFER_SIZE);
raw->flags |= RAW3215_FROZEN; raw->flags |= RAW3215_FROZEN;
...@@ -749,7 +746,7 @@ static int raw3215_pm_start(struct ccw_device *cdev) ...@@ -749,7 +746,7 @@ static int raw3215_pm_start(struct ccw_device *cdev)
unsigned long flags; unsigned long flags;
/* Allow I/O again and flush output buffer. */ /* Allow I/O again and flush output buffer. */
raw = cdev->dev.driver_data; raw = dev_get_drvdata(&cdev->dev);
spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
raw->flags &= ~RAW3215_FROZEN; raw->flags &= ~RAW3215_FROZEN;
raw->flags |= RAW3215_FLUSHING; raw->flags |= RAW3215_FLUSHING;
...@@ -883,7 +880,7 @@ static int __init con3215_init(void) ...@@ -883,7 +880,7 @@ static int __init con3215_init(void)
raw3215_freelist = NULL; raw3215_freelist = NULL;
spin_lock_init(&raw3215_freelist_lock); spin_lock_init(&raw3215_freelist_lock);
for (i = 0; i < NR_3215_REQ; i++) { for (i = 0; i < NR_3215_REQ; i++) {
req = (struct raw3215_req *) alloc_bootmem_low(sizeof(struct raw3215_req)); req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA);
req->next = raw3215_freelist; req->next = raw3215_freelist;
raw3215_freelist = req; raw3215_freelist = req;
} }
...@@ -893,10 +890,9 @@ static int __init con3215_init(void) ...@@ -893,10 +890,9 @@ static int __init con3215_init(void)
return -ENODEV; return -ENODEV;
raw3215[0] = raw = (struct raw3215_info *) raw3215[0] = raw = (struct raw3215_info *)
alloc_bootmem_low(sizeof(struct raw3215_info)); kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA);
memset(raw, 0, sizeof(struct raw3215_info)); raw->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE); raw->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE);
raw->cdev = cdev; raw->cdev = cdev;
dev_set_drvdata(&cdev->dev, raw); dev_set_drvdata(&cdev->dev, raw);
cdev->handler = raw3215_irq; cdev->handler = raw3215_irq;
...@@ -906,9 +902,9 @@ static int __init con3215_init(void) ...@@ -906,9 +902,9 @@ static int __init con3215_init(void)
/* Request the console irq */ /* Request the console irq */
if (raw3215_startup(raw) != 0) { if (raw3215_startup(raw) != 0) {
free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE); kfree(raw->inbuf);
free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); kfree(raw->buffer);
free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); kfree(raw);
raw3215[0] = NULL; raw3215[0] = NULL;
return -ENODEV; return -ENODEV;
} }
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
* Copyright IBM Corp. 2003, 2009 * Copyright IBM Corp. 2003, 2009
*/ */
#include <linux/bootmem.h>
#include <linux/console.h> #include <linux/console.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
...@@ -600,16 +599,14 @@ con3270_init(void) ...@@ -600,16 +599,14 @@ con3270_init(void)
if (IS_ERR(rp)) if (IS_ERR(rp))
return PTR_ERR(rp); return PTR_ERR(rp);
condev = (struct con3270 *) alloc_bootmem_low(sizeof(struct con3270)); condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA);
memset(condev, 0, sizeof(struct con3270));
condev->view.dev = rp; condev->view.dev = rp;
condev->read = raw3270_request_alloc_bootmem(0); condev->read = raw3270_request_alloc(0);
condev->read->callback = con3270_read_callback; condev->read->callback = con3270_read_callback;
condev->read->callback_data = condev; condev->read->callback_data = condev;
condev->write = condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE);
raw3270_request_alloc_bootmem(CON3270_OUTPUT_BUFFER_SIZE); condev->kreset = raw3270_request_alloc(1);
condev->kreset = raw3270_request_alloc_bootmem(1);
INIT_LIST_HEAD(&condev->lines); INIT_LIST_HEAD(&condev->lines);
INIT_LIST_HEAD(&condev->update); INIT_LIST_HEAD(&condev->update);
...@@ -623,7 +620,7 @@ con3270_init(void) ...@@ -623,7 +620,7 @@ con3270_init(void)
INIT_LIST_HEAD(&condev->freemem); INIT_LIST_HEAD(&condev->freemem);
for (i = 0; i < CON3270_STRING_PAGES; i++) { for (i = 0; i < CON3270_STRING_PAGES; i++) {
cbuf = (void *) alloc_bootmem_low_pages(PAGE_SIZE); cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
add_string_memory(&condev->freemem, cbuf, PAGE_SIZE); add_string_memory(&condev->freemem, cbuf, PAGE_SIZE);
} }
condev->cline = alloc_string(&condev->freemem, condev->view.cols); condev->cline = alloc_string(&condev->freemem, condev->view.cols);
......
...@@ -320,7 +320,7 @@ static int mon_open(struct inode *inode, struct file *filp) ...@@ -320,7 +320,7 @@ static int mon_open(struct inode *inode, struct file *filp)
goto out_path; goto out_path;
} }
filp->private_data = monpriv; filp->private_data = monpriv;
monreader_device->driver_data = monpriv; dev_set_drvdata(&monreader_device, monpriv);
unlock_kernel(); unlock_kernel();
return nonseekable_open(inode, filp); return nonseekable_open(inode, filp);
...@@ -463,7 +463,7 @@ static struct miscdevice mon_dev = { ...@@ -463,7 +463,7 @@ static struct miscdevice mon_dev = {
*****************************************************************************/ *****************************************************************************/
static int monreader_freeze(struct device *dev) static int monreader_freeze(struct device *dev)
{ {
struct mon_private *monpriv = dev->driver_data; struct mon_private *monpriv = dev_get_drvdata(&dev);
int rc; int rc;
if (!monpriv) if (!monpriv)
...@@ -487,7 +487,7 @@ static int monreader_freeze(struct device *dev) ...@@ -487,7 +487,7 @@ static int monreader_freeze(struct device *dev)
static int monreader_thaw(struct device *dev) static int monreader_thaw(struct device *dev)
{ {
struct mon_private *monpriv = dev->driver_data; struct mon_private *monpriv = dev_get_drvdata(dev);
int rc; int rc;
if (!monpriv) if (!monpriv)
......
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
* Copyright IBM Corp. 2003, 2009 * Copyright IBM Corp. 2003, 2009
*/ */
#include <linux/bootmem.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -143,33 +142,6 @@ raw3270_request_alloc(size_t size) ...@@ -143,33 +142,6 @@ raw3270_request_alloc(size_t size)
return rq; return rq;
} }
#ifdef CONFIG_TN3270_CONSOLE
/*
* Allocate a new 3270 ccw request from bootmem. Only works very
* early in the boot process. Only con3270.c should be using this.
*/
struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size)
{
struct raw3270_request *rq;
rq = alloc_bootmem_low(sizeof(struct raw3270));
/* alloc output buffer. */
if (size > 0)
rq->buffer = alloc_bootmem_low(size);
rq->size = size;
INIT_LIST_HEAD(&rq->list);
/*
* Setup ccw.
*/
rq->ccw.cda = __pa(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
return rq;
}
#endif
/* /*
* Free 3270 ccw request * Free 3270 ccw request
*/ */
...@@ -846,8 +818,8 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev) ...@@ -846,8 +818,8 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev)
char *ascebc; char *ascebc;
int rc; int rc;
rp = (struct raw3270 *) alloc_bootmem_low(sizeof(struct raw3270)); rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA);
ascebc = (char *) alloc_bootmem(256); ascebc = kzalloc(256, GFP_KERNEL);
rc = raw3270_setup_device(cdev, rp, ascebc); rc = raw3270_setup_device(cdev, rp, ascebc);
if (rc) if (rc)
return ERR_PTR(rc); return ERR_PTR(rc);
...@@ -1350,7 +1322,7 @@ static int raw3270_pm_stop(struct ccw_device *cdev) ...@@ -1350,7 +1322,7 @@ static int raw3270_pm_stop(struct ccw_device *cdev)
struct raw3270_view *view; struct raw3270_view *view;
unsigned long flags; unsigned long flags;
rp = cdev->dev.driver_data; rp = dev_get_drvdata(&cdev->dev);
if (!rp) if (!rp)
return 0; return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
...@@ -1376,7 +1348,7 @@ static int raw3270_pm_start(struct ccw_device *cdev) ...@@ -1376,7 +1348,7 @@ static int raw3270_pm_start(struct ccw_device *cdev)
struct raw3270 *rp; struct raw3270 *rp;
unsigned long flags; unsigned long flags;
rp = cdev->dev.driver_data; rp = dev_get_drvdata(&cdev->dev);
if (!rp) if (!rp)
return 0; return 0;
spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags);
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/timer.h> #include <linux/timer.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/bootmem.h>
#include <linux/termios.h> #include <linux/termios.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/reboot.h> #include <linux/reboot.h>
...@@ -110,7 +109,7 @@ static void sclp_console_sync_queue(void) ...@@ -110,7 +109,7 @@ static void sclp_console_sync_queue(void)
spin_lock_irqsave(&sclp_con_lock, flags); spin_lock_irqsave(&sclp_con_lock, flags);
if (timer_pending(&sclp_con_timer)) if (timer_pending(&sclp_con_timer))
del_timer_sync(&sclp_con_timer); del_timer(&sclp_con_timer);
while (sclp_con_queue_running) { while (sclp_con_queue_running) {
spin_unlock_irqrestore(&sclp_con_lock, flags); spin_unlock_irqrestore(&sclp_con_lock, flags);
sclp_sync_wait(); sclp_sync_wait();
...@@ -298,8 +297,8 @@ sclp_console_init(void) ...@@ -298,8 +297,8 @@ sclp_console_init(void)
/* Allocate pages for output buffering */ /* Allocate pages for output buffering */
INIT_LIST_HEAD(&sclp_con_pages); INIT_LIST_HEAD(&sclp_con_pages);
for (i = 0; i < MAX_CONSOLE_PAGES; i++) { for (i = 0; i < MAX_CONSOLE_PAGES; i++) {
page = alloc_bootmem_low_pages(PAGE_SIZE); page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
list_add_tail((struct list_head *) page, &sclp_con_pages); list_add_tail(page, &sclp_con_pages);
} }
INIT_LIST_HEAD(&sclp_con_outqueue); INIT_LIST_HEAD(&sclp_con_outqueue);
spin_lock_init(&sclp_con_lock); spin_lock_init(&sclp_con_lock);
......
...@@ -20,7 +20,6 @@ ...@@ -20,7 +20,6 @@
#include <linux/major.h> #include <linux/major.h>
#include <linux/console.h> #include <linux/console.h>
#include <linux/kdev_t.h> #include <linux/kdev_t.h>
#include <linux/bootmem.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/reboot.h> #include <linux/reboot.h>
...@@ -601,10 +600,7 @@ static void __init __sclp_vt220_free_pages(void) ...@@ -601,10 +600,7 @@ static void __init __sclp_vt220_free_pages(void)
list_for_each_safe(page, p, &sclp_vt220_empty) { list_for_each_safe(page, p, &sclp_vt220_empty) {
list_del(page); list_del(page);
if (slab_is_available())
free_page((unsigned long) page); free_page((unsigned long) page);
else
free_bootmem((unsigned long) page, PAGE_SIZE);
} }
} }
...@@ -640,16 +636,12 @@ static int __init __sclp_vt220_init(int num_pages) ...@@ -640,16 +636,12 @@ static int __init __sclp_vt220_init(int num_pages)
sclp_vt220_flush_later = 0; sclp_vt220_flush_later = 0;
/* Allocate pages for output buffering */ /* Allocate pages for output buffering */
rc = -ENOMEM;
for (i = 0; i < num_pages; i++) { for (i = 0; i < num_pages; i++) {
if (slab_is_available())
page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
else if (!page)
page = alloc_bootmem_low_pages(PAGE_SIZE);
if (!page) {
rc = -ENOMEM;
goto out; goto out;
} list_add_tail(page, &sclp_vt220_empty);
list_add_tail((struct list_head *) page, &sclp_vt220_empty);
} }
rc = sclp_register(&sclp_vt220_register); rc = sclp_register(&sclp_vt220_register);
out: out:
......
...@@ -396,7 +396,7 @@ int tape_generic_pm_suspend(struct ccw_device *cdev) ...@@ -396,7 +396,7 @@ int tape_generic_pm_suspend(struct ccw_device *cdev)
{ {
struct tape_device *device; struct tape_device *device;
device = cdev->dev.driver_data; device = dev_get_drvdata(&cdev->dev);
if (!device) { if (!device) {
return -ENODEV; return -ENODEV;
} }
......
...@@ -663,7 +663,7 @@ static struct attribute *vmlogrdr_attrs[] = { ...@@ -663,7 +663,7 @@ static struct attribute *vmlogrdr_attrs[] = {
static int vmlogrdr_pm_prepare(struct device *dev) static int vmlogrdr_pm_prepare(struct device *dev)
{ {
int rc; int rc;
struct vmlogrdr_priv_t *priv = dev->driver_data; struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev);
rc = 0; rc = 0;
if (priv) { if (priv) {
...@@ -753,7 +753,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) ...@@ -753,7 +753,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
dev->bus = &iucv_bus; dev->bus = &iucv_bus;
dev->parent = iucv_root; dev->parent = iucv_root;
dev->driver = &vmlogrdr_driver; dev->driver = &vmlogrdr_driver;
dev->driver_data = priv; dev_set_drvdata(dev, priv);
/* /*
* The release function could be called after the * The release function could be called after the
* module has been unloaded. It's _only_ task is to * module has been unloaded. It's _only_ task is to
......
...@@ -170,7 +170,7 @@ static void urdev_put(struct urdev *urd) ...@@ -170,7 +170,7 @@ static void urdev_put(struct urdev *urd)
*/ */
static int ur_pm_suspend(struct ccw_device *cdev) static int ur_pm_suspend(struct ccw_device *cdev)
{ {
struct urdev *urd = cdev->dev.driver_data; struct urdev *urd = dev_get_drvdata(&cdev->dev);
TRACE("ur_pm_suspend: cdev=%p\n", cdev); TRACE("ur_pm_suspend: cdev=%p\n", cdev);
if (urd->open_flag) { if (urd->open_flag) {
......
...@@ -351,15 +351,6 @@ static inline unsigned long long get_usecs(void) ...@@ -351,15 +351,6 @@ static inline unsigned long long get_usecs(void)
((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
/* prototypes for thin interrupt */ /* prototypes for thin interrupt */
void qdio_sync_after_thinint(struct qdio_q *q);
int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state,
int auto_ack);
void qdio_check_outbound_after_thinint(struct qdio_q *q);
int qdio_inbound_q_moved(struct qdio_q *q);
void qdio_kick_handler(struct qdio_q *q);
void qdio_stop_polling(struct qdio_q *q);
int qdio_siga_sync_q(struct qdio_q *q);
void qdio_setup_thinint(struct qdio_irq *irq_ptr); void qdio_setup_thinint(struct qdio_irq *irq_ptr);
int qdio_establish_thinint(struct qdio_irq *irq_ptr); int qdio_establish_thinint(struct qdio_irq *irq_ptr);
void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); void qdio_shutdown_thinint(struct qdio_irq *irq_ptr);
...@@ -392,4 +383,6 @@ void qdio_setup_destroy_sysfs(struct ccw_device *cdev); ...@@ -392,4 +383,6 @@ void qdio_setup_destroy_sysfs(struct ccw_device *cdev);
int qdio_setup_init(void); int qdio_setup_init(void);
void qdio_setup_exit(void); void qdio_setup_exit(void);
int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state);
#endif /* _CIO_QDIO_H */ #endif /* _CIO_QDIO_H */
...@@ -70,9 +70,8 @@ static int qstat_show(struct seq_file *m, void *v) ...@@ -70,9 +70,8 @@ static int qstat_show(struct seq_file *m, void *v)
seq_printf(m, "slsb buffer states:\n"); seq_printf(m, "slsb buffer states:\n");
seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
qdio_siga_sync_q(q);
for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
get_buf_state(q, i, &state, 0); debug_get_buf_state(q, i, &state);
switch (state) { switch (state) {
case SLSB_P_INPUT_NOT_INIT: case SLSB_P_INPUT_NOT_INIT:
case SLSB_P_OUTPUT_NOT_INIT: case SLSB_P_OUTPUT_NOT_INIT:
......
...@@ -231,7 +231,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, ...@@ -231,7 +231,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr,
return i; return i;
} }
inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state, int auto_ack) unsigned char *state, int auto_ack)
{ {
return get_buf_states(q, bufnr, state, 1, auto_ack); return get_buf_states(q, bufnr, state, 1, auto_ack);
...@@ -276,7 +276,7 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr) ...@@ -276,7 +276,7 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
QDIO_MAX_BUFFERS_PER_Q); QDIO_MAX_BUFFERS_PER_Q);
} }
static int qdio_siga_sync(struct qdio_q *q, unsigned int output, static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
unsigned int input) unsigned int input)
{ {
int cc; int cc;
...@@ -293,7 +293,7 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output, ...@@ -293,7 +293,7 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output,
return cc; return cc;
} }
inline int qdio_siga_sync_q(struct qdio_q *q) static inline int qdio_siga_sync_q(struct qdio_q *q)
{ {
if (q->is_input_q) if (q->is_input_q)
return qdio_siga_sync(q, 0, q->mask); return qdio_siga_sync(q, 0, q->mask);
...@@ -358,8 +358,7 @@ static inline int qdio_siga_input(struct qdio_q *q) ...@@ -358,8 +358,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
return cc; return cc;
} }
/* called from thinint inbound handler */ static inline void qdio_sync_after_thinint(struct qdio_q *q)
void qdio_sync_after_thinint(struct qdio_q *q)
{ {
if (pci_out_supported(q)) { if (pci_out_supported(q)) {
if (need_siga_sync_thinint(q)) if (need_siga_sync_thinint(q))
...@@ -370,7 +369,14 @@ void qdio_sync_after_thinint(struct qdio_q *q) ...@@ -370,7 +369,14 @@ void qdio_sync_after_thinint(struct qdio_q *q)
qdio_siga_sync_q(q); qdio_siga_sync_q(q);
} }
inline void qdio_stop_polling(struct qdio_q *q) int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
unsigned char *state)
{
qdio_siga_sync_q(q);
return get_buf_states(q, bufnr, state, 1, 0);
}
static inline void qdio_stop_polling(struct qdio_q *q)
{ {
if (!q->u.in.polling) if (!q->u.in.polling)
return; return;
...@@ -449,13 +455,6 @@ static inline void inbound_primed(struct qdio_q *q, int count) ...@@ -449,13 +455,6 @@ static inline void inbound_primed(struct qdio_q *q, int count)
count--; count--;
if (!count) if (!count)
return; return;
/*
* Need to change all PRIMED buffers to NOT_INIT, otherwise
* we're loosing initiative in the thinint code.
*/
set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT,
count);
} }
static int get_inbound_buffer_frontier(struct qdio_q *q) static int get_inbound_buffer_frontier(struct qdio_q *q)
...@@ -470,19 +469,13 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) ...@@ -470,19 +469,13 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count); stop = add_buf(q->first_to_check, count);
/*
* No siga sync here, as a PCI or we after a thin interrupt
* will sync the queues.
*/
/* need to set count to 1 for non-qebsm */
if (!is_qebsm(q))
count = 1;
check_next:
if (q->first_to_check == stop) if (q->first_to_check == stop)
goto out; goto out;
/*
* No siga sync here, as a PCI or we after a thin interrupt
* already sync'ed the queues.
*/
count = get_buf_states(q, q->first_to_check, &state, count, 1); count = get_buf_states(q, q->first_to_check, &state, count, 1);
if (!count) if (!count)
goto out; goto out;
...@@ -490,14 +483,9 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) ...@@ -490,14 +483,9 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
switch (state) { switch (state) {
case SLSB_P_INPUT_PRIMED: case SLSB_P_INPUT_PRIMED:
inbound_primed(q, count); inbound_primed(q, count);
/*
* No siga-sync needed for non-qebsm here, as the inbound queue
* will be synced on the next siga-r, resp.
* tiqdio_is_inbound_q_done will do the siga-sync.
*/
q->first_to_check = add_buf(q->first_to_check, count); q->first_to_check = add_buf(q->first_to_check, count);
atomic_sub(count, &q->nr_buf_used); atomic_sub(count, &q->nr_buf_used);
goto check_next; break;
case SLSB_P_INPUT_ERROR: case SLSB_P_INPUT_ERROR:
announce_buffer_error(q, count); announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */ /* process the buffer, the upper layer will take care of it */
...@@ -516,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) ...@@ -516,7 +504,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
return q->first_to_check; return q->first_to_check;
} }
int qdio_inbound_q_moved(struct qdio_q *q) static int qdio_inbound_q_moved(struct qdio_q *q)
{ {
int bufnr; int bufnr;
...@@ -524,35 +512,32 @@ int qdio_inbound_q_moved(struct qdio_q *q) ...@@ -524,35 +512,32 @@ int qdio_inbound_q_moved(struct qdio_q *q)
if ((bufnr != q->last_move) || q->qdio_error) { if ((bufnr != q->last_move) || q->qdio_error) {
q->last_move = bufnr; q->last_move = bufnr;
if (!need_siga_sync(q) && !pci_out_supported(q)) if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM)
q->u.in.timestamp = get_usecs(); q->u.in.timestamp = get_usecs();
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved");
return 1; return 1;
} else } else
return 0; return 0;
} }
static int qdio_inbound_q_done(struct qdio_q *q) static inline int qdio_inbound_q_done(struct qdio_q *q)
{ {
unsigned char state = 0; unsigned char state = 0;
if (!atomic_read(&q->nr_buf_used)) if (!atomic_read(&q->nr_buf_used))
return 1; return 1;
/*
* We need that one for synchronization with the adapter, as it
* does a kind of PCI avoidance.
*/
qdio_siga_sync_q(q); qdio_siga_sync_q(q);
get_buf_state(q, q->first_to_check, &state, 0); get_buf_state(q, q->first_to_check, &state, 0);
if (state == SLSB_P_INPUT_PRIMED) if (state == SLSB_P_INPUT_PRIMED)
/* we got something to do */ /* more work coming */
return 0; return 0;
/* on VM, we don't poll, so the q is always done here */ if (is_thinint_irq(q->irq_ptr))
if (need_siga_sync(q) || pci_out_supported(q)) return 1;
/* don't poll under z/VM */
if (MACHINE_IS_VM)
return 1; return 1;
/* /*
...@@ -563,14 +548,11 @@ static int qdio_inbound_q_done(struct qdio_q *q) ...@@ -563,14 +548,11 @@ static int qdio_inbound_q_done(struct qdio_q *q)
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d",
q->first_to_check); q->first_to_check);
return 1; return 1;
} else { } else
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d",
q->first_to_check);
return 0; return 0;
}
} }
void qdio_kick_handler(struct qdio_q *q) static void qdio_kick_handler(struct qdio_q *q)
{ {
int start = q->first_to_kick; int start = q->first_to_kick;
int end = q->first_to_check; int end = q->first_to_check;
...@@ -619,7 +601,6 @@ static void __qdio_inbound_processing(struct qdio_q *q) ...@@ -619,7 +601,6 @@ static void __qdio_inbound_processing(struct qdio_q *q)
goto again; goto again;
} }
/* inbound tasklet */
void qdio_inbound_processing(unsigned long data) void qdio_inbound_processing(unsigned long data)
{ {
struct qdio_q *q = (struct qdio_q *)data; struct qdio_q *q = (struct qdio_q *)data;
...@@ -642,11 +623,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) ...@@ -642,11 +623,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK);
stop = add_buf(q->first_to_check, count); stop = add_buf(q->first_to_check, count);
/* need to set count to 1 for non-qebsm */
if (!is_qebsm(q))
count = 1;
check_next:
if (q->first_to_check == stop) if (q->first_to_check == stop)
return q->first_to_check; return q->first_to_check;
...@@ -661,13 +637,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) ...@@ -661,13 +637,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
atomic_sub(count, &q->nr_buf_used); atomic_sub(count, &q->nr_buf_used);
q->first_to_check = add_buf(q->first_to_check, count); q->first_to_check = add_buf(q->first_to_check, count);
/*
* We fetch all buffer states at once. get_buf_states may
* return count < stop. For QEBSM we do not loop.
*/
if (is_qebsm(q))
break; break;
goto check_next;
case SLSB_P_OUTPUT_ERROR: case SLSB_P_OUTPUT_ERROR:
announce_buffer_error(q, count); announce_buffer_error(q, count);
/* process the buffer, the upper layer will take care of it */ /* process the buffer, the upper layer will take care of it */
...@@ -797,8 +767,7 @@ void qdio_outbound_timer(unsigned long data) ...@@ -797,8 +767,7 @@ void qdio_outbound_timer(unsigned long data)
tasklet_schedule(&q->tasklet); tasklet_schedule(&q->tasklet);
} }
/* called from thinint inbound tasklet */ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
void qdio_check_outbound_after_thinint(struct qdio_q *q)
{ {
struct qdio_q *out; struct qdio_q *out;
int i; int i;
...@@ -811,6 +780,46 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q) ...@@ -811,6 +780,46 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q)
tasklet_schedule(&out->tasklet); tasklet_schedule(&out->tasklet);
} }
static void __tiqdio_inbound_processing(struct qdio_q *q)
{
qdio_perf_stat_inc(&perf_stats.thinint_inbound);
qdio_sync_after_thinint(q);
/*
* The interrupt could be caused by a PCI request. Check the
* PCI capable outbound queues.
*/
qdio_check_outbound_after_thinint(q);
if (!qdio_inbound_q_moved(q))
return;
qdio_kick_handler(q);
if (!qdio_inbound_q_done(q)) {
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
tasklet_schedule(&q->tasklet);
}
qdio_stop_polling(q);
/*
* We need to check again to not lose initiative after
* resetting the ACK state.
*/
if (!qdio_inbound_q_done(q)) {
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
tasklet_schedule(&q->tasklet);
}
}
void tiqdio_inbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
__tiqdio_inbound_processing(q);
}
static inline void qdio_set_state(struct qdio_irq *irq_ptr, static inline void qdio_set_state(struct qdio_irq *irq_ptr,
enum qdio_irq_states state) enum qdio_irq_states state)
{ {
...@@ -1488,18 +1497,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, ...@@ -1488,18 +1497,13 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
* @count: how many buffers to process * @count: how many buffers to process
*/ */
int do_QDIO(struct ccw_device *cdev, unsigned int callflags, int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
int q_nr, int bufnr, int count) int q_nr, unsigned int bufnr, unsigned int count)
{ {
struct qdio_irq *irq_ptr; struct qdio_irq *irq_ptr;
if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q)
(count > QDIO_MAX_BUFFERS_PER_Q) ||
(q_nr >= QDIO_MAX_QUEUES_PER_IRQ))
return -EINVAL; return -EINVAL;
if (!count)
return 0;
irq_ptr = cdev->private->qdio_data; irq_ptr = cdev->private->qdio_data;
if (!irq_ptr) if (!irq_ptr)
return -ENODEV; return -ENODEV;
......
...@@ -43,9 +43,6 @@ struct indicator_t { ...@@ -43,9 +43,6 @@ struct indicator_t {
}; };
static struct indicator_t *q_indicators; static struct indicator_t *q_indicators;
static void tiqdio_tasklet_fn(unsigned long data);
static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0);
static int css_qdio_omit_svs; static int css_qdio_omit_svs;
static inline unsigned long do_clear_global_summary(void) static inline unsigned long do_clear_global_summary(void)
...@@ -103,11 +100,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) ...@@ -103,11 +100,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
xchg(irq_ptr->dsci, 1); xchg(irq_ptr->dsci, 1);
} }
/*
* we cannot stop the tiqdio tasklet here since it is for all
* thinint qdio devices and it must run as long as there is a
* thinint device left
*/
void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
{ {
struct qdio_q *q; struct qdio_q *q;
...@@ -126,79 +118,39 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) ...@@ -126,79 +118,39 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
} }
} }
static inline int tiqdio_inbound_q_done(struct qdio_q *q)
{
unsigned char state = 0;
if (!atomic_read(&q->nr_buf_used))
return 1;
qdio_siga_sync_q(q);
get_buf_state(q, q->first_to_check, &state, 0);
if (state == SLSB_P_INPUT_PRIMED)
/* more work coming */
return 0;
return 1;
}
static inline int shared_ind(struct qdio_irq *irq_ptr) static inline int shared_ind(struct qdio_irq *irq_ptr)
{ {
return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
} }
static void __tiqdio_inbound_processing(struct qdio_q *q) /**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @ind: pointer to adapter local summary indicator
* @drv_data: NULL
*/
static void tiqdio_thinint_handler(void *ind, void *drv_data)
{ {
qdio_perf_stat_inc(&perf_stats.thinint_inbound); struct qdio_q *q;
qdio_sync_after_thinint(q);
qdio_perf_stat_inc(&perf_stats.thin_int);
/* /*
* Maybe we have work on our outbound queues... at least * SVS only when needed: issue SVS to benefit from iqdio interrupt
* we have to check the PCI capable queues. * avoidance (SVS clears adapter interrupt suppression overwrite)
*/ */
qdio_check_outbound_after_thinint(q); if (!css_qdio_omit_svs)
do_clear_global_summary();
if (!qdio_inbound_q_moved(q))
return;
qdio_kick_handler(q);
if (!tiqdio_inbound_q_done(q)) {
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
tasklet_schedule(&q->tasklet);
}
qdio_stop_polling(q);
/* /*
* We need to check again to not lose initiative after * reset local summary indicator (tiqdio_alsi) to stop adapter
* resetting the ACK state. * interrupts for now
*/ */
if (!tiqdio_inbound_q_done(q)) { xchg((u8 *)ind, 0);
qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2);
if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
tasklet_schedule(&q->tasklet);
}
}
void tiqdio_inbound_processing(unsigned long data)
{
struct qdio_q *q = (struct qdio_q *)data;
__tiqdio_inbound_processing(q);
}
/* check for work on all inbound thinint queues */
static void tiqdio_tasklet_fn(unsigned long data)
{
struct qdio_q *q;
qdio_perf_stat_inc(&perf_stats.tasklet_thinint);
again:
/* protect tiq_list entries, only changed in activate or shutdown */ /* protect tiq_list entries, only changed in activate or shutdown */
rcu_read_lock(); rcu_read_lock();
/* check for work on all inbound thinint queues */
list_for_each_entry_rcu(q, &tiq_list, entry) list_for_each_entry_rcu(q, &tiq_list, entry)
/* only process queues from changed sets */ /* only process queues from changed sets */
if (*q->irq_ptr->dsci) { if (*q->irq_ptr->dsci) {
...@@ -226,37 +178,6 @@ static void tiqdio_tasklet_fn(unsigned long data) ...@@ -226,37 +178,6 @@ static void tiqdio_tasklet_fn(unsigned long data)
if (*tiqdio_alsi) if (*tiqdio_alsi)
xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1);
} }
/* check for more work */
if (*tiqdio_alsi) {
xchg(tiqdio_alsi, 0);
qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop);
goto again;
}
}
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @ind: pointer to adapter local summary indicator
* @drv_data: NULL
*/
static void tiqdio_thinint_handler(void *ind, void *drv_data)
{
qdio_perf_stat_inc(&perf_stats.thin_int);
/*
* SVS only when needed: issue SVS to benefit from iqdio interrupt
* avoidance (SVS clears adapter interrupt suppression overwrite)
*/
if (!css_qdio_omit_svs)
do_clear_global_summary();
/*
* reset local summary indicator (tiqdio_alsi) to stop adapter
* interrupts for now, the tasklet will clean all dsci's
*/
xchg((u8 *)ind, 0);
tasklet_hi_schedule(&tiqdio_tasklet);
} }
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
...@@ -376,5 +297,4 @@ void __exit tiqdio_unregister_thinints(void) ...@@ -376,5 +297,4 @@ void __exit tiqdio_unregister_thinints(void)
s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC);
isc_unregister(QDIO_AIRQ_ISC); isc_unregister(QDIO_AIRQ_ISC);
} }
tasklet_kill(&tiqdio_tasklet);
} }
...@@ -54,6 +54,12 @@ static int ap_poll_thread_start(void); ...@@ -54,6 +54,12 @@ static int ap_poll_thread_start(void);
static void ap_poll_thread_stop(void); static void ap_poll_thread_stop(void);
static void ap_request_timeout(unsigned long); static void ap_request_timeout(unsigned long);
static inline void ap_schedule_poll_timer(void); static inline void ap_schedule_poll_timer(void);
static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags);
static int ap_device_remove(struct device *dev);
static int ap_device_probe(struct device *dev);
static void ap_interrupt_handler(void *unused1, void *unused2);
static void ap_reset(struct ap_device *ap_dev);
static void ap_config_timeout(unsigned long ptr);
/* /*
* Module description. * Module description.
...@@ -101,6 +107,10 @@ static struct hrtimer ap_poll_timer; ...@@ -101,6 +107,10 @@ static struct hrtimer ap_poll_timer;
* If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/
static unsigned long long poll_timeout = 250000; static unsigned long long poll_timeout = 250000;
/* Suspend flag */
static int ap_suspend_flag;
static struct bus_type ap_bus_type;
/** /**
* ap_using_interrupts() - Returns non-zero if interrupt support is * ap_using_interrupts() - Returns non-zero if interrupt support is
* available. * available.
...@@ -617,10 +627,79 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) ...@@ -617,10 +627,79 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env)
return retval; return retval;
} }
static int ap_bus_suspend(struct device *dev, pm_message_t state)
{
struct ap_device *ap_dev = to_ap_dev(dev);
unsigned long flags;
if (!ap_suspend_flag) {
ap_suspend_flag = 1;
/* Disable scanning for devices, thus we do not want to scan
* for them after removing.
*/
del_timer_sync(&ap_config_timer);
if (ap_work_queue != NULL) {
destroy_workqueue(ap_work_queue);
ap_work_queue = NULL;
}
tasklet_disable(&ap_tasklet);
}
/* Poll on the device until all requests are finished. */
do {
flags = 0;
__ap_poll_device(ap_dev, &flags);
} while ((flags & 1) || (flags & 2));
ap_device_remove(dev);
return 0;
}
static int ap_bus_resume(struct device *dev)
{
int rc = 0;
struct ap_device *ap_dev = to_ap_dev(dev);
if (ap_suspend_flag) {
ap_suspend_flag = 0;
if (!ap_interrupts_available())
ap_interrupt_indicator = NULL;
ap_device_probe(dev);
ap_reset(ap_dev);
setup_timer(&ap_dev->timeout, ap_request_timeout,
(unsigned long) ap_dev);
ap_scan_bus(NULL);
init_timer(&ap_config_timer);
ap_config_timer.function = ap_config_timeout;
ap_config_timer.data = 0;
ap_config_timer.expires = jiffies + ap_config_time * HZ;
add_timer(&ap_config_timer);
ap_work_queue = create_singlethread_workqueue("kapwork");
if (!ap_work_queue)
return -ENOMEM;
tasklet_enable(&ap_tasklet);
if (!ap_using_interrupts())
ap_schedule_poll_timer();
else
tasklet_schedule(&ap_tasklet);
if (ap_thread_flag)
rc = ap_poll_thread_start();
} else {
ap_device_probe(dev);
ap_reset(ap_dev);
setup_timer(&ap_dev->timeout, ap_request_timeout,
(unsigned long) ap_dev);
}
return rc;
}
static struct bus_type ap_bus_type = { static struct bus_type ap_bus_type = {
.name = "ap", .name = "ap",
.match = &ap_bus_match, .match = &ap_bus_match,
.uevent = &ap_uevent, .uevent = &ap_uevent,
.suspend = ap_bus_suspend,
.resume = ap_bus_resume
}; };
static int ap_device_probe(struct device *dev) static int ap_device_probe(struct device *dev)
...@@ -1066,7 +1145,7 @@ ap_config_timeout(unsigned long ptr) ...@@ -1066,7 +1145,7 @@ ap_config_timeout(unsigned long ptr)
*/ */
static inline void ap_schedule_poll_timer(void) static inline void ap_schedule_poll_timer(void)
{ {
if (ap_using_interrupts()) if (ap_using_interrupts() || ap_suspend_flag)
return; return;
if (hrtimer_is_queued(&ap_poll_timer)) if (hrtimer_is_queued(&ap_poll_timer))
return; return;
...@@ -1384,6 +1463,8 @@ static int ap_poll_thread(void *data) ...@@ -1384,6 +1463,8 @@ static int ap_poll_thread(void *data)
set_user_nice(current, 19); set_user_nice(current, 19);
while (1) { while (1) {
if (ap_suspend_flag)
return 0;
if (need_resched()) { if (need_resched()) {
schedule(); schedule();
continue; continue;
...@@ -1414,7 +1495,7 @@ static int ap_poll_thread_start(void) ...@@ -1414,7 +1495,7 @@ static int ap_poll_thread_start(void)
{ {
int rc; int rc;
if (ap_using_interrupts()) if (ap_using_interrupts() || ap_suspend_flag)
return 0; return 0;
mutex_lock(&ap_poll_thread_mutex); mutex_lock(&ap_poll_thread_mutex);
if (!ap_poll_kthread) { if (!ap_poll_kthread) {
......
...@@ -1307,7 +1307,7 @@ static void netiucv_pm_complete(struct device *dev) ...@@ -1307,7 +1307,7 @@ static void netiucv_pm_complete(struct device *dev)
*/ */
static int netiucv_pm_freeze(struct device *dev) static int netiucv_pm_freeze(struct device *dev)
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = NULL; struct net_device *ndev = NULL;
int rc = 0; int rc = 0;
...@@ -1331,7 +1331,7 @@ static int netiucv_pm_freeze(struct device *dev) ...@@ -1331,7 +1331,7 @@ static int netiucv_pm_freeze(struct device *dev)
*/ */
static int netiucv_pm_restore_thaw(struct device *dev) static int netiucv_pm_restore_thaw(struct device *dev)
{ {
struct netiucv_priv *priv = dev->driver_data; struct netiucv_priv *priv = dev_get_drvdata(dev);
struct net_device *ndev = NULL; struct net_device *ndev = NULL;
int rc = 0; int rc = 0;
......
...@@ -13,6 +13,12 @@ ...@@ -13,6 +13,12 @@
#define cpu_relax() asm volatile ("" ::: "memory"); #define cpu_relax() asm volatile ("" ::: "memory");
#endif #endif
#ifdef __s390__
#include "../../arch/s390/include/asm/unistd.h"
#define rmb() asm volatile("bcr 15,0" ::: "memory")
#define cpu_relax() asm volatile("" ::: "memory");
#endif
#include <time.h> #include <time.h>
#include <unistd.h> #include <unistd.h>
#include <sys/types.h> #include <sys/types.h>
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment