Commit 4937ce87 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6

* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6:
  [IA64] update sn2_defconfig
  [IA64] Fix kernel hangup in kdump on INIT
  [IA64] Fix kernel panic in kdump on INIT
  [IA64] Remove vector from ia64_machine_kexec()
  [IA64] Fix race when multiple cpus go through MCA
  [IA64] Remove needless delay in MCA rendezvous
  [IA64] add driver for ACPI methods to call native firmware
  [IA64] abstract SAL_CALL wrapper to allow other firmware entry points
  [IA64] perfmon: Remove exit_pfm_fs()
  [IA64] tree-wide: Misc __cpu{initdata, init, exit} annotations
parents 4271e0f7 432a7d65
......@@ -461,6 +461,16 @@ config IA64_ESI
firmware extensions, such as the ability to inject memory-errors
for test-purposes. If you're unsure, say N.
config IA64_HP_AML_NFW
bool "Support ACPI AML calls to native firmware"
help
This driver installs a global ACPI Operation Region handler for
region 0xA1. AML methods can use this OpRegion to call arbitrary
native firmware functions. The driver installs the OpRegion
handler if there is an HPQ5001 device or if the user supplies
the "force" module parameter, e.g., with the "aml_nfw.force"
kernel command line option.
source "drivers/sn/Kconfig"
config KEXEC
......
This diff is collapsed.
......@@ -8,3 +8,4 @@
obj-y := sba_iommu.o
obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += hwsw_iommu.o
obj-$(CONFIG_IA64_GENERIC) += hwsw_iommu.o
obj-$(CONFIG_IA64_HP_AML_NFW) += aml_nfw.o
/*
* OpRegion handler to allow AML to call native firmware
*
* (c) Copyright 2007 Hewlett-Packard Development Company, L.P.
* Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This driver implements HP Open Source Review Board proposal 1842,
* which was approved on 9/20/2006.
*
* For technical documentation, see the HP SPPA Firmware EAS, Appendix F.
*
* ACPI does not define a mechanism for AML methods to call native firmware
* interfaces such as PAL or SAL. This OpRegion handler adds such a mechanism.
* After the handler is installed, an AML method can call native firmware by
* storing the arguments and firmware entry point to specific offsets in the
* OpRegion. When AML reads the "return value" offset from the OpRegion, this
* handler loads up the arguments, makes the firmware call, and returns the
* result.
*/
#include <linux/module.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
#include <asm/sal.h>
MODULE_AUTHOR("Bjorn Helgaas <bjorn.helgaas@hp.com>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("ACPI opregion handler for native firmware calls");
static int force_register;
module_param_named(force, force_register, bool, 0);
MODULE_PARM_DESC(force, "Install opregion handler even without HPQ5001 device");
#define AML_NFW_SPACE 0xA1
struct ia64_pdesc {
void *ip;
void *gp;
};
/*
* N.B. The layout of this structure is defined in the HP SPPA FW EAS, and
* the member offsets are embedded in AML methods.
*/
struct ia64_nfw_context {
u64 arg[8];
struct ia64_sal_retval ret;
u64 ip;
u64 gp;
u64 pad[2];
};
static void *virt_map(u64 address)
{
if (address & (1UL << 63))
return (void *) (__IA64_UNCACHED_OFFSET | address);
return __va(address);
}
static void aml_nfw_execute(struct ia64_nfw_context *c)
{
struct ia64_pdesc virt_entry;
ia64_sal_handler entry;
virt_entry.ip = virt_map(c->ip);
virt_entry.gp = virt_map(c->gp);
entry = (ia64_sal_handler) &virt_entry;
IA64_FW_CALL(entry, c->ret,
c->arg[0], c->arg[1], c->arg[2], c->arg[3],
c->arg[4], c->arg[5], c->arg[6], c->arg[7]);
}
static void aml_nfw_read_arg(u8 *offset, u32 bit_width, acpi_integer *value)
{
switch (bit_width) {
case 8:
*value = *(u8 *)offset;
break;
case 16:
*value = *(u16 *)offset;
break;
case 32:
*value = *(u32 *)offset;
break;
case 64:
*value = *(u64 *)offset;
break;
}
}
static void aml_nfw_write_arg(u8 *offset, u32 bit_width, acpi_integer *value)
{
switch (bit_width) {
case 8:
*(u8 *) offset = *value;
break;
case 16:
*(u16 *) offset = *value;
break;
case 32:
*(u32 *) offset = *value;
break;
case 64:
*(u64 *) offset = *value;
break;
}
}
static acpi_status aml_nfw_handler(u32 function, acpi_physical_address address,
u32 bit_width, acpi_integer *value, void *handler_context,
void *region_context)
{
struct ia64_nfw_context *context = handler_context;
u8 *offset = (u8 *) context + address;
if (bit_width != 8 && bit_width != 16 &&
bit_width != 32 && bit_width != 64)
return AE_BAD_PARAMETER;
if (address + (bit_width >> 3) > sizeof(struct ia64_nfw_context))
return AE_BAD_PARAMETER;
switch (function) {
case ACPI_READ:
if (address == offsetof(struct ia64_nfw_context, ret))
aml_nfw_execute(context);
aml_nfw_read_arg(offset, bit_width, value);
break;
case ACPI_WRITE:
aml_nfw_write_arg(offset, bit_width, value);
break;
}
return AE_OK;
}
static struct ia64_nfw_context global_context;
static int global_handler_registered;
static int aml_nfw_add_global_handler(void)
{
acpi_status status;
if (global_handler_registered)
return 0;
status = acpi_install_address_space_handler(ACPI_ROOT_OBJECT,
AML_NFW_SPACE, aml_nfw_handler, NULL, &global_context);
if (ACPI_FAILURE(status))
return -ENODEV;
global_handler_registered = 1;
printk(KERN_INFO "Global 0x%02X opregion handler registered\n",
AML_NFW_SPACE);
return 0;
}
static int aml_nfw_remove_global_handler(void)
{
acpi_status status;
if (!global_handler_registered)
return 0;
status = acpi_remove_address_space_handler(ACPI_ROOT_OBJECT,
AML_NFW_SPACE, aml_nfw_handler);
if (ACPI_FAILURE(status))
return -ENODEV;
global_handler_registered = 0;
printk(KERN_INFO "Global 0x%02X opregion handler removed\n",
AML_NFW_SPACE);
return 0;
}
static int aml_nfw_add(struct acpi_device *device)
{
/*
* We would normally allocate a new context structure and install
* the address space handler for the specific device we found.
* But the HP-UX implementation shares a single global context
* and always puts the handler at the root, so we'll do the same.
*/
return aml_nfw_add_global_handler();
}
static int aml_nfw_remove(struct acpi_device *device, int type)
{
return aml_nfw_remove_global_handler();
}
static const struct acpi_device_id aml_nfw_ids[] = {
{"HPQ5001", 0},
{"", 0}
};
static struct acpi_driver acpi_aml_nfw_driver = {
.name = "native firmware",
.ids = aml_nfw_ids,
.ops = {
.add = aml_nfw_add,
.remove = aml_nfw_remove,
},
};
static int __init aml_nfw_init(void)
{
int result;
if (force_register)
aml_nfw_add_global_handler();
result = acpi_bus_register_driver(&acpi_aml_nfw_driver);
if (result < 0) {
aml_nfw_remove_global_handler();
return result;
}
return 0;
}
static void __exit aml_nfw_exit(void)
{
acpi_bus_unregister_driver(&acpi_aml_nfw_driver);
aml_nfw_remove_global_handler();
}
module_init(aml_nfw_init);
module_exit(aml_nfw_exit);
......@@ -118,11 +118,6 @@ machine_crash_shutdown(struct pt_regs *pt)
static void
machine_kdump_on_init(void)
{
if (!ia64_kimage) {
printk(KERN_NOTICE "machine_kdump_on_init(): "
"kdump not configured\n");
return;
}
local_irq_disable();
kexec_disable_iosapic();
machine_kexec(ia64_kimage);
......@@ -156,6 +151,14 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
if (!kdump_on_init)
return NOTIFY_DONE;
if (!ia64_kimage) {
if (val == DIE_INIT_MONARCH_LEAVE)
ia64_mca_printk(KERN_NOTICE
"%s: kdump not configured\n",
__FUNCTION__);
return NOTIFY_DONE;
}
if (val != DIE_INIT_MONARCH_LEAVE &&
val != DIE_INIT_SLAVE_LEAVE &&
val != DIE_INIT_MONARCH_PROCESS &&
......
......@@ -79,7 +79,6 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
relocate_new_kernel_t rnk;
void *pal_addr = efi_get_pal_addr();
unsigned long code_addr = (unsigned long)page_address(image->control_code_page);
unsigned long vector;
int ii;
BUG_ON(!image);
......@@ -107,11 +106,8 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg)
/* unmask TPR and clear any pending interrupts */
ia64_setreg(_IA64_REG_CR_TPR, 0);
ia64_srlz_d();
vector = ia64_get_ivr();
while (vector != IA64_SPURIOUS_INT_VECTOR) {
while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR)
ia64_eoi();
vector = ia64_get_ivr();
}
platform_kernel_launch_event();
rnk = (relocate_new_kernel_t)&code_addr;
(*rnk)(image->head, image->start, ia64_boot_param,
......
......@@ -701,8 +701,7 @@ ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused)
/*
* ia64_mca_wakeup
*
* Send an inter-cpu interrupt to wake-up a particular cpu
* and mark that cpu to be out of rendez.
* Send an inter-cpu interrupt to wake-up a particular cpu.
*
* Inputs : cpuid
* Outputs : None
......@@ -711,14 +710,12 @@ static void
ia64_mca_wakeup(int cpu)
{
platform_send_ipi(cpu, IA64_MCA_WAKEUP_VECTOR, IA64_IPI_DM_INT, 0);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
}
/*
* ia64_mca_wakeup_all
*
* Wakeup all the cpus which have rendez'ed previously.
* Wakeup all the slave cpus which have rendez'ed previously.
*
* Inputs : None
* Outputs : None
......@@ -741,7 +738,10 @@ ia64_mca_wakeup_all(void)
*
* This is handler used to put slave processors into spinloop
* while the monarch processor does the mca handling and later
* wake each slave up once the monarch is done.
* wake each slave up once the monarch is done. The state
* IA64_MCA_RENDEZ_CHECKIN_DONE indicates the cpu is rendez'ed
* in SAL. The state IA64_MCA_RENDEZ_CHECKIN_NOTDONE indicates
* the cpu has come out of OS rendezvous.
*
* Inputs : None
* Outputs : None
......@@ -778,6 +778,7 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg)
(long)&nd, 0, 0) == NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
/* Enable all interrupts */
local_irq_restore(flags);
return IRQ_HANDLED;
......@@ -1135,30 +1136,27 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
static void
ia64_wait_for_slaves(int monarch, const char *type)
{
int c, wait = 0, missing = 0;
for_each_online_cpu(c) {
if (c == monarch)
continue;
if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
udelay(1000); /* short wait first */
wait = 1;
break;
}
}
if (!wait)
goto all_in;
for_each_online_cpu(c) {
if (c == monarch)
continue;
if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
udelay(5*1000000); /* wait 5 seconds for slaves (arbitrary) */
if (ia64_mc_info.imi_rendez_checkin[c] == IA64_MCA_RENDEZ_CHECKIN_NOTDONE)
missing = 1;
break;
int c, i , wait;
/*
* wait 5 seconds total for slaves (arbitrary)
*/
for (i = 0; i < 5000; i++) {
wait = 0;
for_each_online_cpu(c) {
if (c == monarch)
continue;
if (ia64_mc_info.imi_rendez_checkin[c]
== IA64_MCA_RENDEZ_CHECKIN_NOTDONE) {
udelay(1000); /* short wait */
wait = 1;
break;
}
}
if (!wait)
goto all_in;
}
if (!missing)
goto all_in;
/*
* Maybe slave(s) dead. Print buffered messages immediately.
*/
......@@ -1224,26 +1222,27 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
if (sos->monarch) {
ia64_wait_for_slaves(cpu, "MCA");
/* Wakeup all the processors which are spinning in the
* rendezvous loop. They will leave SAL, then spin in the OS
* with interrupts disabled until this monarch cpu leaves the
* MCA handler. That gets control back to the OS so we can
* backtrace the other cpus, backtrace when spinning in SAL
* does not work.
*/
ia64_mca_wakeup_all();
if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
} else {
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
while (cpu_isset(cpu, mca_cpu))
cpu_relax(); /* spin until monarch wakes us */
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
}
/* Wakeup all the processors which are spinning in the rendezvous loop.
* They will leave SAL, then spin in the OS with interrupts disabled
* until this monarch cpu leaves the MCA handler. That gets control
* back to the OS so we can backtrace the other cpus, backtrace when
* spinning in SAL does not work.
*/
ia64_mca_wakeup_all();
if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, (long)&nd, 0, 0)
== NOTIFY_STOP)
ia64_mca_spin(__FUNCTION__);
/* Get the MCA error record and log it */
ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
......@@ -1277,21 +1276,22 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
/* wake up the next monarch cpu,
* and put this cpu in the rendez loop.
*/
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_CONCURRENT_MCA;
for_each_online_cpu(i) {
if (cpu_isset(i, mca_cpu)) {
monarch_cpu = i;
cpu_clear(i, mca_cpu); /* wake next cpu */
while (monarch_cpu != -1)
cpu_relax(); /* spin until last cpu leaves */
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
set_curr_task(cpu, previous_current);
ia64_mc_info.imi_rendez_checkin[cpu]
= IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
return;
}
}
}
set_curr_task(cpu, previous_current);
monarch_cpu = -1;
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
monarch_cpu = -1; /* This frees the slaves and previous monarchs */
}
static DECLARE_WORK(cmc_disable_work, ia64_mca_cmc_vector_disable_keventd);
......
......@@ -118,7 +118,5 @@ struct mca_table_entry {
extern const struct mca_table_entry *search_mca_tables (unsigned long addr);
extern int mca_recover_range(unsigned long);
extern void ia64_mca_printk(const char * fmt, ...)
__attribute__ ((format (printf, 1, 2)));
extern void ia64_mlogbuf_dump(void);
......@@ -907,7 +907,7 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi
return len;
}
static void
static void __cpuinit
create_palinfo_proc_entries(unsigned int cpu)
{
# define CPUSTR "cpu%d"
......@@ -968,7 +968,7 @@ remove_palinfo_proc_entries(unsigned int hcpu)
}
}
static int palinfo_cpu_callback(struct notifier_block *nfb,
static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int hotcpu = (unsigned long)hcpu;
......@@ -986,7 +986,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
static struct notifier_block palinfo_cpu_notifier =
static struct notifier_block palinfo_cpu_notifier __cpuinitdata =
{
.notifier_call = palinfo_cpu_callback,
.priority = 0,
......
......@@ -1538,13 +1538,6 @@ init_pfm_fs(void)
return err;
}
static void __exit
exit_pfm_fs(void)
{
unregister_filesystem(&pfm_fs_type);
mntput(pfmfs_mnt);
}
static ssize_t
pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
{
......
......@@ -574,7 +574,7 @@ static const struct file_operations salinfo_data_fops = {
.write = salinfo_log_write,
};
static int __devinit
static int __cpuinit
salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
{
unsigned int i, cpu = (unsigned long)hcpu;
......@@ -615,7 +615,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
return NOTIFY_OK;
}
static struct notifier_block salinfo_cpu_notifier =
static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
{
.notifier_call = salinfo_cpu_callback,
.priority = 0,
......
......@@ -118,11 +118,11 @@ struct cpu_cache_info {
struct kobject kobj;
};
static struct cpu_cache_info all_cpu_cache_info[NR_CPUS];
static struct cpu_cache_info all_cpu_cache_info[NR_CPUS] __cpuinitdata;
#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
#ifdef CONFIG_SMP
static void cache_shared_cpu_map_setup( unsigned int cpu,
static void __cpuinit cache_shared_cpu_map_setup( unsigned int cpu,
struct cache_info * this_leaf)
{
pal_cache_shared_info_t csi;
......@@ -157,7 +157,7 @@ static void cache_shared_cpu_map_setup( unsigned int cpu,
&csi) == PAL_STATUS_SUCCESS);
}
#else
static void cache_shared_cpu_map_setup(unsigned int cpu,
static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu,
struct cache_info * this_leaf)
{
cpu_set(cpu, this_leaf->shared_cpu_map);
......@@ -428,13 +428,13 @@ static struct notifier_block __cpuinitdata cache_cpu_notifier =
.notifier_call = cache_cpu_callback
};
static int __cpuinit cache_sysfs_init(void)
static int __init cache_sysfs_init(void)
{
int i;
for_each_online_cpu(i) {
cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE,
(void *)(long)i);
struct sys_device *sys_dev = get_cpu_sysdev((unsigned int)i);
cache_add_dev(sys_dev);
}
register_hotcpu_notifier(&cache_cpu_notifier);
......
......@@ -151,6 +151,8 @@ extern void ia64_mca_cmc_vector_setup(void);
extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *));
extern void ia64_unreg_MCA_extension(void);
extern u64 ia64_get_rnat(u64 *);
extern void ia64_mca_printk(const char * fmt, ...)
__attribute__ ((format (printf, 1, 2)));
struct ia64_mca_notify_die {
struct ia64_sal_os_state *sos;
......
......@@ -46,25 +46,28 @@
extern spinlock_t sal_lock;
/* SAL spec _requires_ eight args for each call. */
#define __SAL_CALL(result,a0,a1,a2,a3,a4,a5,a6,a7) \
result = (*ia64_sal)(a0,a1,a2,a3,a4,a5,a6,a7)
#define __IA64_FW_CALL(entry,result,a0,a1,a2,a3,a4,a5,a6,a7) \
result = (*entry)(a0,a1,a2,a3,a4,a5,a6,a7)
# define SAL_CALL(result,args...) do { \
# define IA64_FW_CALL(entry,result,args...) do { \
unsigned long __ia64_sc_flags; \
struct ia64_fpreg __ia64_sc_fr[6]; \
ia64_save_scratch_fpregs(__ia64_sc_fr); \
spin_lock_irqsave(&sal_lock, __ia64_sc_flags); \
__SAL_CALL(result, args); \
__IA64_FW_CALL(entry, result, args); \
spin_unlock_irqrestore(&sal_lock, __ia64_sc_flags); \
ia64_load_scratch_fpregs(__ia64_sc_fr); \
} while (0)
# define SAL_CALL(result,args...) \
IA64_FW_CALL(ia64_sal, result, args);
# define SAL_CALL_NOLOCK(result,args...) do { \
unsigned long __ia64_scn_flags; \
struct ia64_fpreg __ia64_scn_fr[6]; \
ia64_save_scratch_fpregs(__ia64_scn_fr); \
local_irq_save(__ia64_scn_flags); \
__SAL_CALL(result, args); \
__IA64_FW_CALL(ia64_sal, result, args); \
local_irq_restore(__ia64_scn_flags); \
ia64_load_scratch_fpregs(__ia64_scn_fr); \
} while (0)
......@@ -73,7 +76,7 @@ extern spinlock_t sal_lock;
struct ia64_fpreg __ia64_scs_fr[6]; \
ia64_save_scratch_fpregs(__ia64_scs_fr); \
preempt_disable(); \
__SAL_CALL(result, args); \
__IA64_FW_CALL(ia64_sal, result, args); \
preempt_enable(); \
ia64_load_scratch_fpregs(__ia64_scs_fr); \
} while (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment