Commit 01133148 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6

* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6:
  [S390] fix s390x_newuname
  [S390] dasd: log sense for fatal errors
  [S390] cpu topology: fix locking
  [S390] cio: Fix refcount after moving devices.
  [S390] ftrace: fix kernel stack backchain walking
  [S390] ftrace: disable tracing on idle psw
  [S390] lockdep: fix compile bug
  [S390] kvm_s390: Fix oops in virtio device detection with "mem="
  [S390] sclp: emit error message if assign storage fails
  [S390] Fix range for add_active_range() in setup_memory()
parents c98114db d2f019fe
...@@ -61,22 +61,25 @@ STACK_SIZE = 1 << STACK_SHIFT ...@@ -61,22 +61,25 @@ STACK_SIZE = 1 << STACK_SHIFT
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
.macro TRACE_IRQS_ON .macro TRACE_IRQS_ON
l %r1,BASED(.Ltrace_irq_on) basr %r2,%r0
l %r1,BASED(.Ltrace_irq_on_caller)
basr %r14,%r1 basr %r14,%r1
.endm .endm
.macro TRACE_IRQS_OFF .macro TRACE_IRQS_OFF
l %r1,BASED(.Ltrace_irq_off) basr %r2,%r0
l %r1,BASED(.Ltrace_irq_off_caller)
basr %r14,%r1 basr %r14,%r1
.endm .endm
.macro TRACE_IRQS_CHECK .macro TRACE_IRQS_CHECK
basr %r2,%r0
tm SP_PSW(%r15),0x03 # irqs enabled? tm SP_PSW(%r15),0x03 # irqs enabled?
jz 0f jz 0f
l %r1,BASED(.Ltrace_irq_on) l %r1,BASED(.Ltrace_irq_on_caller)
basr %r14,%r1 basr %r14,%r1
j 1f j 1f
0: l %r1,BASED(.Ltrace_irq_off) 0: l %r1,BASED(.Ltrace_irq_off_caller)
basr %r14,%r1 basr %r14,%r1
1: 1:
.endm .endm
...@@ -1113,9 +1116,12 @@ cleanup_io_leave_insn: ...@@ -1113,9 +1116,12 @@ cleanup_io_leave_insn:
.Lschedtail: .long schedule_tail .Lschedtail: .long schedule_tail
.Lsysc_table: .long sys_call_table .Lsysc_table: .long sys_call_table
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
.Ltrace_irq_on: .long trace_hardirqs_on .Ltrace_irq_on_caller:
.Ltrace_irq_off: .long trace_hardirqs_on_caller
.long trace_hardirqs_off .Ltrace_irq_off_caller:
.long trace_hardirqs_off_caller
#endif
#ifdef CONFIG_LOCKDEP
.Llockdep_sys_exit: .Llockdep_sys_exit:
.long lockdep_sys_exit .long lockdep_sys_exit
#endif #endif
......
...@@ -61,19 +61,22 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ ...@@ -61,19 +61,22 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
.macro TRACE_IRQS_ON .macro TRACE_IRQS_ON
brasl %r14,trace_hardirqs_on basr %r2,%r0
brasl %r14,trace_hardirqs_on_caller
.endm .endm
.macro TRACE_IRQS_OFF .macro TRACE_IRQS_OFF
brasl %r14,trace_hardirqs_off basr %r2,%r0
brasl %r14,trace_hardirqs_off_caller
.endm .endm
.macro TRACE_IRQS_CHECK .macro TRACE_IRQS_CHECK
basr %r2,%r0
tm SP_PSW(%r15),0x03 # irqs enabled? tm SP_PSW(%r15),0x03 # irqs enabled?
jz 0f jz 0f
brasl %r14,trace_hardirqs_on brasl %r14,trace_hardirqs_on_caller
j 1f j 1f
0: brasl %r14,trace_hardirqs_off 0: brasl %r14,trace_hardirqs_off_caller
1: 1:
.endm .endm
#else #else
......
...@@ -136,9 +136,12 @@ static void default_idle(void) ...@@ -136,9 +136,12 @@ static void default_idle(void)
return; return;
} }
trace_hardirqs_on(); trace_hardirqs_on();
/* Don't trace preempt off for idle. */
stop_critical_timings();
/* Wait for external, I/O or machine check interrupt. */ /* Wait for external, I/O or machine check interrupt. */
__load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT | __load_psw_mask(psw_kernel_bits | PSW_MASK_WAIT |
PSW_MASK_IO | PSW_MASK_EXT); PSW_MASK_IO | PSW_MASK_EXT);
start_critical_timings();
} }
void cpu_idle(void) void cpu_idle(void)
......
...@@ -604,13 +604,13 @@ setup_memory(void) ...@@ -604,13 +604,13 @@ setup_memory(void)
if (memory_chunk[i].type != CHUNK_READ_WRITE) if (memory_chunk[i].type != CHUNK_READ_WRITE)
continue; continue;
start_chunk = PFN_DOWN(memory_chunk[i].addr); start_chunk = PFN_DOWN(memory_chunk[i].addr);
end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1; end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size);
end_chunk = min(end_chunk, end_pfn); end_chunk = min(end_chunk, end_pfn);
if (start_chunk >= end_chunk) if (start_chunk >= end_chunk)
continue; continue;
add_active_range(0, start_chunk, end_chunk); add_active_range(0, start_chunk, end_chunk);
pfn = max(start_chunk, start_pfn); pfn = max(start_chunk, start_pfn);
for (; pfn <= end_chunk; pfn++) for (; pfn < end_chunk; pfn++)
page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
} }
......
...@@ -198,7 +198,7 @@ asmlinkage long s390x_newuname(struct new_utsname __user *name) ...@@ -198,7 +198,7 @@ asmlinkage long s390x_newuname(struct new_utsname __user *name)
{ {
int ret = sys_newuname(name); int ret = sys_newuname(name);
if (current->personality == PER_LINUX32 && !ret) { if (personality(current->personality) == PER_LINUX32 && !ret) {
ret = copy_to_user(name->machine, "s390\0\0\0\0", 8); ret = copy_to_user(name->machine, "s390\0\0\0\0", 8);
if (ret) ret = -EFAULT; if (ret) ret = -EFAULT;
} }
......
...@@ -65,18 +65,21 @@ static int machine_has_topology_irq; ...@@ -65,18 +65,21 @@ static int machine_has_topology_irq;
static struct timer_list topology_timer; static struct timer_list topology_timer;
static void set_topology_timer(void); static void set_topology_timer(void);
static DECLARE_WORK(topology_work, topology_work_fn); static DECLARE_WORK(topology_work, topology_work_fn);
/* topology_lock protects the core linked list */
static DEFINE_SPINLOCK(topology_lock);
cpumask_t cpu_core_map[NR_CPUS]; cpumask_t cpu_core_map[NR_CPUS];
cpumask_t cpu_coregroup_map(unsigned int cpu) cpumask_t cpu_coregroup_map(unsigned int cpu)
{ {
struct core_info *core = &core_info; struct core_info *core = &core_info;
unsigned long flags;
cpumask_t mask; cpumask_t mask;
cpus_clear(mask); cpus_clear(mask);
if (!machine_has_topology) if (!machine_has_topology)
return cpu_present_map; return cpu_present_map;
mutex_lock(&smp_cpu_state_mutex); spin_lock_irqsave(&topology_lock, flags);
while (core) { while (core) {
if (cpu_isset(cpu, core->mask)) { if (cpu_isset(cpu, core->mask)) {
mask = core->mask; mask = core->mask;
...@@ -84,7 +87,7 @@ cpumask_t cpu_coregroup_map(unsigned int cpu) ...@@ -84,7 +87,7 @@ cpumask_t cpu_coregroup_map(unsigned int cpu)
} }
core = core->next; core = core->next;
} }
mutex_unlock(&smp_cpu_state_mutex); spin_unlock_irqrestore(&topology_lock, flags);
if (cpus_empty(mask)) if (cpus_empty(mask))
mask = cpumask_of_cpu(cpu); mask = cpumask_of_cpu(cpu);
return mask; return mask;
...@@ -133,7 +136,7 @@ static void tl_to_cores(struct tl_info *info) ...@@ -133,7 +136,7 @@ static void tl_to_cores(struct tl_info *info)
union tl_entry *tle, *end; union tl_entry *tle, *end;
struct core_info *core = &core_info; struct core_info *core = &core_info;
mutex_lock(&smp_cpu_state_mutex); spin_lock_irq(&topology_lock);
clear_cores(); clear_cores();
tle = info->tle; tle = info->tle;
end = (union tl_entry *)((unsigned long)info + info->length); end = (union tl_entry *)((unsigned long)info + info->length);
...@@ -157,7 +160,7 @@ static void tl_to_cores(struct tl_info *info) ...@@ -157,7 +160,7 @@ static void tl_to_cores(struct tl_info *info)
} }
tle = next_tle(tle); tle = next_tle(tle);
} }
mutex_unlock(&smp_cpu_state_mutex); spin_unlock_irq(&topology_lock);
} }
static void topology_update_polarization_simple(void) static void topology_update_polarization_simple(void)
......
...@@ -1746,6 +1746,11 @@ static void __dasd_process_block_ccw_queue(struct dasd_block *block, ...@@ -1746,6 +1746,11 @@ static void __dasd_process_block_ccw_queue(struct dasd_block *block,
goto restart; goto restart;
} }
/* log sense for fatal error */
if (cqr->status == DASD_CQR_FAILED) {
dasd_log_sense(cqr, &cqr->irb);
}
/* First of all call extended error reporting. */ /* First of all call extended error reporting. */
if (dasd_eer_enabled(base) && if (dasd_eer_enabled(base) &&
cqr->status == DASD_CQR_FAILED) { cqr->status == DASD_CQR_FAILED) {
......
...@@ -324,6 +324,9 @@ static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) ...@@ -324,6 +324,9 @@ static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
case 0x0120: case 0x0120:
break; break;
default: default:
pr_warning("assign storage failed (cmd=0x%08x, "
"response=0x%04x, rn=0x%04x)\n", cmd,
sccb->header.response_code, rn);
rc = -EIO; rc = -EIO;
break; break;
} }
......
...@@ -874,11 +874,15 @@ void ccw_device_move_to_orphanage(struct work_struct *work) ...@@ -874,11 +874,15 @@ void ccw_device_move_to_orphanage(struct work_struct *work)
replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev); replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
if (replacing_cdev) { if (replacing_cdev) {
sch_attach_disconnected_device(sch, replacing_cdev); sch_attach_disconnected_device(sch, replacing_cdev);
/* Release reference from get_disc_ccwdev_by_dev_id() */
put_device(&cdev->dev);
return; return;
} }
replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id); replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
if (replacing_cdev) { if (replacing_cdev) {
sch_attach_orphaned_device(sch, replacing_cdev); sch_attach_orphaned_device(sch, replacing_cdev);
/* Release reference from get_orphaned_ccwdev_by_dev_id() */
put_device(&cdev->dev);
return; return;
} }
sch_create_and_recog_new_device(sch); sch_create_and_recog_new_device(sch);
......
...@@ -322,13 +322,13 @@ static int __init kvm_devices_init(void) ...@@ -322,13 +322,13 @@ static int __init kvm_devices_init(void)
return rc; return rc;
} }
rc = vmem_add_mapping(PFN_PHYS(max_pfn), PAGE_SIZE); rc = vmem_add_mapping(real_memory_size, PAGE_SIZE);
if (rc) { if (rc) {
s390_root_dev_unregister(kvm_root); s390_root_dev_unregister(kvm_root);
return rc; return rc;
} }
kvm_devices = (void *) PFN_PHYS(max_pfn); kvm_devices = (void *) real_memory_size;
ctl_set_bit(0, 9); ctl_set_bit(0, 9);
register_external_interrupt(0x2603, kvm_extint_handler); register_external_interrupt(0x2603, kvm_extint_handler);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment