Commit 7fa2668b authored by Tony Luck's avatar Tony Luck Committed by David Mosberger

[PATCH] ia64: switching between CPEI & CPEP

A couple of months ago Hidetoshi Seto from Fujitsu proposed
a patch to provide similar switching between interrupt mode
and polling mode for corrected platform events (CPE) as we
have for processor events (CMC) [with the obvious difference
that not all platforms support an interrupt for CPE].

I dusted it off and made a couple of very minor cleanups (which
Seto-san checked out last night on his test setup and confirmed
still passes).
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
Signed-off-by: default avatarDavid Mosberger <davidm@hpl.hp.com>
parent 39659d98
......@@ -108,6 +108,7 @@ struct ia64_mca_tlb_info ia64_mca_tlb_list[NR_CPUS];
#define MAX_CPE_POLL_INTERVAL (15*60*HZ) /* 15 minutes */
#define MIN_CPE_POLL_INTERVAL (2*60*HZ) /* 2 minutes */
#define CMC_POLL_INTERVAL (1*60*HZ) /* 1 minute */
#define CPE_HISTORY_LENGTH 5
#define CMC_HISTORY_LENGTH 5
static struct timer_list cpe_poll_timer;
......@@ -127,6 +128,8 @@ static int cmc_polling_enabled = 1;
*/
static int cpe_poll_enabled = 1;
static int cpe_vector = -1;
extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
/*
......@@ -272,14 +275,55 @@ ia64_mca_log_sal_error_record(int sal_info_type)
static irqreturn_t
ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
{
IA64_MCA_DEBUG("%s: received interrupt. CPU:%d vector = %#x\n",
__FUNCTION__, smp_processor_id(), cpe_irq);
static unsigned long cpe_history[CPE_HISTORY_LENGTH];
static int index;
static spinlock_t cpe_history_lock = SPIN_LOCK_UNLOCKED;
IA64_MCA_DEBUG("%s: received interrupt vector = %#x on CPU %d\n",
__FUNCTION__, cpe_irq, smp_processor_id());
/* SAL spec states this should run w/ interrupts enabled */
local_irq_enable();
/* Get the CMC error record and log it */
/* Get the CPE error record and log it */
ia64_mca_log_sal_error_record(SAL_INFO_TYPE_CPE);
spin_lock(&cpe_history_lock);
if (!cpe_poll_enabled && cpe_vector >= 0) {
int i, count = 1; /* we know 1 happened now */
unsigned long now = jiffies;
for (i = 0; i < CPE_HISTORY_LENGTH; i++) {
if (now - cpe_history[i] <= HZ)
count++;
}
IA64_MCA_DEBUG(KERN_INFO "CPE threshold %d/%d\n", count, CPE_HISTORY_LENGTH);
if (count >= CPE_HISTORY_LENGTH) {
cpe_poll_enabled = 1;
spin_unlock(&cpe_history_lock);
disable_irq_nosync(local_vector_to_irq(IA64_CPE_VECTOR));
/*
* Corrected errors will still be corrected, but
* make sure there's a log somewhere that indicates
* something is generating more than we can handle.
*/
printk(KERN_WARNING "WARNING: Switching to polling CPE handler; error records may be lost\n");
mod_timer(&cpe_poll_timer, jiffies + MIN_CPE_POLL_INTERVAL);
/* lock already released, get out now */
return IRQ_HANDLED;
} else {
cpe_history[index++] = now;
if (index == CPE_HISTORY_LENGTH)
index = 0;
}
}
spin_unlock(&cpe_history_lock);
return IRQ_HANDLED;
}
......@@ -905,7 +949,7 @@ ia64_mca_cmc_int_handler(int cmc_irq, void *arg, struct pt_regs *ptregs)
* handled
*/
static irqreturn_t
ia64_mca_cmc_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
ia64_mca_cmc_int_caller(int cmc_irq, void *arg, struct pt_regs *ptregs)
{
static int start_count = -1;
unsigned int cpuid;
......@@ -916,7 +960,7 @@ ia64_mca_cmc_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
if (start_count == -1)
start_count = IA64_LOG_COUNT(SAL_INFO_TYPE_CMC);
ia64_mca_cmc_int_handler(cpe_irq, arg, ptregs);
ia64_mca_cmc_int_handler(cmc_irq, arg, ptregs);
for (++cpuid ; cpuid < NR_CPUS && !cpu_online(cpuid) ; cpuid++);
......@@ -977,7 +1021,7 @@ static irqreturn_t
ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
{
static int start_count = -1;
static int poll_time = MAX_CPE_POLL_INTERVAL;
static int poll_time = MIN_CPE_POLL_INTERVAL;
unsigned int cpuid;
cpuid = smp_processor_id();
......@@ -995,15 +1039,23 @@ ia64_mca_cpe_int_caller(int cpe_irq, void *arg, struct pt_regs *ptregs)
} else {
/*
* If a log was recorded, increase our polling frequency,
* otherwise, backoff.
* otherwise, backoff or return to interrupt mode.
*/
if (start_count != IA64_LOG_COUNT(SAL_INFO_TYPE_CPE)) {
poll_time = max(MIN_CPE_POLL_INTERVAL, poll_time / 2);
} else {
} else if (cpe_vector < 0) {
poll_time = min(MAX_CPE_POLL_INTERVAL, poll_time * 2);
} else {
poll_time = MIN_CPE_POLL_INTERVAL;
printk(KERN_WARNING "Returning to interrupt driven CPE handler\n");
enable_irq(local_vector_to_irq(IA64_CPE_VECTOR));
cpe_poll_enabled = 0;
}
if (cpe_poll_enabled)
mod_timer(&cpe_poll_timer, jiffies + poll_time);
start_count = -1;
mod_timer(&cpe_poll_timer, jiffies + poll_time);
}
return IRQ_HANDLED;
......@@ -1248,21 +1300,23 @@ ia64_mca_init(void)
register_percpu_irq(IA64_MCA_WAKEUP_VECTOR, &mca_wkup_irqaction);
#ifdef CONFIG_ACPI
/* Setup the CPE interrupt vector */
/* Setup the CPEI/P vector and handler */
{
irq_desc_t *desc;
unsigned int irq;
int cpev = acpi_request_vector(ACPI_INTERRUPT_CPEI);
if (cpev >= 0) {
cpe_vector = acpi_request_vector(ACPI_INTERRUPT_CPEI);
if (cpe_vector >= 0) {
for (irq = 0; irq < NR_IRQS; ++irq)
if (irq_to_vector(irq) == cpev) {
if (irq_to_vector(irq) == cpe_vector) {
desc = irq_descp(irq);
desc->status |= IRQ_PER_CPU;
setup_irq(irq, &mca_cpe_irqaction);
}
ia64_mca_register_cpev(cpev);
ia64_mca_register_cpev(cpe_vector);
}
register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
}
#endif
......@@ -1302,9 +1356,10 @@ ia64_mca_late_init(void)
#ifdef CONFIG_ACPI
/* If platform doesn't support CPEI, get the timer going. */
if (acpi_request_vector(ACPI_INTERRUPT_CPEI) < 0 && cpe_poll_enabled) {
register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);
if (cpe_vector < 0 && cpe_poll_enabled) {
ia64_mca_cpe_poll(0UL);
} else {
cpe_poll_enabled = 0;
}
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment