Commit b1ffe92e authored by Corey Minyard's avatar Corey Minyard Committed by Linus Torvalds

[PATCH] IPMI driver version 19 release

This fixes some performance problems.  Some vendors implement firmware
updates over IPMI, and this speeds up that process quite a bit.

 * Improve the "send - wait for response - send -wait for response -
   etc" performance when using high-res timers.  Before, an ~10ms delay
   would be added to each message, because it didn't restart the timer
   if nothing was happing when a new message was started.

 * Add some checking for leaked messages.
parent 856eeb53
...@@ -61,6 +61,14 @@ ...@@ -61,6 +61,14 @@
/* Measure times between events in the driver. */ /* Measure times between events in the driver. */
#undef DEBUG_TIMING #undef DEBUG_TIMING
/* Timing parameters. Call every 10 ms when not doing anything,
otherwise call every KCS_SHORT_TIMEOUT_USEC microseconds. */
#define KCS_TIMEOUT_TIME_USEC 10000
#define KCS_USEC_PER_JIFFY (1000000/HZ)
#define KCS_TIMEOUT_JIFFIES (KCS_TIMEOUT_TIME_USEC/KCS_USEC_PER_JIFFY)
#define KCS_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
short timeout */
#ifdef CONFIG_IPMI_KCS #ifdef CONFIG_IPMI_KCS
/* This forces a dependency to the config file for this option. */ /* This forces a dependency to the config file for this option. */
#endif #endif
...@@ -132,6 +140,8 @@ struct kcs_info ...@@ -132,6 +140,8 @@ struct kcs_info
int interrupt_disabled; int interrupt_disabled;
}; };
static void kcs_restart_short_timer(struct kcs_info *kcs_info);
static void deliver_recv_msg(struct kcs_info *kcs_info, struct ipmi_smi_msg *msg) static void deliver_recv_msg(struct kcs_info *kcs_info, struct ipmi_smi_msg *msg)
{ {
/* Deliver the message to the upper layer with the lock /* Deliver the message to the upper layer with the lock
...@@ -309,6 +319,9 @@ static void handle_transaction_done(struct kcs_info *kcs_info) ...@@ -309,6 +319,9 @@ static void handle_transaction_done(struct kcs_info *kcs_info)
#endif #endif
switch (kcs_info->kcs_state) { switch (kcs_info->kcs_state) {
case KCS_NORMAL: case KCS_NORMAL:
if (!kcs_info->curr_msg)
break;
kcs_info->curr_msg->rsp_size kcs_info->curr_msg->rsp_size
= kcs_get_result(kcs_info->kcs_sm, = kcs_get_result(kcs_info->kcs_sm,
kcs_info->curr_msg->rsp, kcs_info->curr_msg->rsp,
...@@ -563,8 +576,9 @@ static void sender(void *send_info, ...@@ -563,8 +576,9 @@ static void sender(void *send_info,
spin_lock_irqsave(&(kcs_info->kcs_lock), flags); spin_lock_irqsave(&(kcs_info->kcs_lock), flags);
result = kcs_event_handler(kcs_info, 0); result = kcs_event_handler(kcs_info, 0);
while (result != KCS_SM_IDLE) { while (result != KCS_SM_IDLE) {
udelay(500); udelay(KCS_SHORT_TIMEOUT_USEC);
result = kcs_event_handler(kcs_info, 500); result = kcs_event_handler(kcs_info,
KCS_SHORT_TIMEOUT_USEC);
} }
spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags); spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
return; return;
...@@ -582,6 +596,7 @@ static void sender(void *send_info, ...@@ -582,6 +596,7 @@ static void sender(void *send_info,
&& (kcs_info->curr_msg == NULL)) && (kcs_info->curr_msg == NULL))
{ {
start_next_msg(kcs_info); start_next_msg(kcs_info);
kcs_restart_short_timer(kcs_info);
} }
spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags); spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
} }
...@@ -598,8 +613,9 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion) ...@@ -598,8 +613,9 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion)
if (i_run_to_completion) { if (i_run_to_completion) {
result = kcs_event_handler(kcs_info, 0); result = kcs_event_handler(kcs_info, 0);
while (result != KCS_SM_IDLE) { while (result != KCS_SM_IDLE) {
udelay(500); udelay(KCS_SHORT_TIMEOUT_USEC);
result = kcs_event_handler(kcs_info, 500); result = kcs_event_handler(kcs_info,
KCS_SHORT_TIMEOUT_USEC);
} }
} }
...@@ -613,14 +629,42 @@ static void request_events(void *send_info) ...@@ -613,14 +629,42 @@ static void request_events(void *send_info)
atomic_set(&kcs_info->req_events, 1); atomic_set(&kcs_info->req_events, 1);
} }
/* Call every 10 ms. */
#define KCS_TIMEOUT_TIME_USEC 10000
#define KCS_USEC_PER_JIFFY (1000000/HZ)
#define KCS_TIMEOUT_JIFFIES (KCS_TIMEOUT_TIME_USEC/KCS_USEC_PER_JIFFY)
#define KCS_SHORT_TIMEOUT_USEC 500 /* .5ms when the SM request a
short timeout */
static int initialized = 0; static int initialized = 0;
/* Must be called with interrupts off and with the kcs_lock held. */
static void kcs_restart_short_timer(struct kcs_info *kcs_info)
{
if (del_timer(&(kcs_info->kcs_timer))) {
#ifdef CONFIG_HIGH_RES_TIMERS
unsigned long jiffies_now;
/* If we don't delete the timer, then it will go off
immediately, anyway. So we only process if we
actually delete the timer. */
/* We already have irqsave on, so no need for it
here. */
read_lock(&xtime_lock);
jiffies_now = jiffies;
kcs_info->kcs_timer.expires = jiffies_now;
kcs_info->kcs_timer.sub_expires
= quick_update_jiffies_sub(jiffies_now);
read_unlock(&xtime_lock);
kcs_info->kcs_timer.sub_expires
+= usec_to_arch_cycles(KCS_SHORT_TIMEOUT_USEC);
while (kcs_info->kcs_timer.sub_expires >= cycles_per_jiffies) {
kcs_info->kcs_timer.expires++;
kcs_info->kcs_timer.sub_expires -= cycles_per_jiffies;
}
#else
kcs_info->kcs_timer.expires = jiffies + 1;
#endif
add_timer(&(kcs_info->kcs_timer));
}
}
static void kcs_timeout(unsigned long data) static void kcs_timeout(unsigned long data)
{ {
struct kcs_info *kcs_info = (struct kcs_info *) data; struct kcs_info *kcs_info = (struct kcs_info *) data;
...@@ -643,12 +687,11 @@ static void kcs_timeout(unsigned long data) ...@@ -643,12 +687,11 @@ static void kcs_timeout(unsigned long data)
printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec);
#endif #endif
jiffies_now = jiffies; jiffies_now = jiffies;
time_diff = ((jiffies_now - kcs_info->last_timeout_jiffies) time_diff = ((jiffies_now - kcs_info->last_timeout_jiffies)
* KCS_USEC_PER_JIFFY); * KCS_USEC_PER_JIFFY);
kcs_result = kcs_event_handler(kcs_info, time_diff); kcs_result = kcs_event_handler(kcs_info, time_diff);
spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
kcs_info->last_timeout_jiffies = jiffies_now; kcs_info->last_timeout_jiffies = jiffies_now;
if ((kcs_info->irq) && (! kcs_info->interrupt_disabled)) { if ((kcs_info->irq) && (! kcs_info->interrupt_disabled)) {
...@@ -669,6 +712,7 @@ static void kcs_timeout(unsigned long data) ...@@ -669,6 +712,7 @@ static void kcs_timeout(unsigned long data)
} }
} else { } else {
kcs_info->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES; kcs_info->kcs_timer.expires = jiffies + KCS_TIMEOUT_JIFFIES;
kcs_info->kcs_timer.sub_expires = 0;
} }
#else #else
/* If requested, take the shortest delay possible */ /* If requested, take the shortest delay possible */
...@@ -681,6 +725,7 @@ static void kcs_timeout(unsigned long data) ...@@ -681,6 +725,7 @@ static void kcs_timeout(unsigned long data)
do_add_timer: do_add_timer:
add_timer(&(kcs_info->kcs_timer)); add_timer(&(kcs_info->kcs_timer));
spin_unlock_irqrestore(&(kcs_info->kcs_lock), flags);
} }
static void kcs_irq_handler(int irq, void *data, struct pt_regs *regs) static void kcs_irq_handler(int irq, void *data, struct pt_regs *regs)
......
...@@ -1765,9 +1765,13 @@ static void ipmi_timeout(unsigned long data) ...@@ -1765,9 +1765,13 @@ static void ipmi_timeout(unsigned long data)
} }
static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
/* FIXME - convert these to slabs. */ /* FIXME - convert these to slabs. */
static void free_smi_msg(struct ipmi_smi_msg *msg) static void free_smi_msg(struct ipmi_smi_msg *msg)
{ {
atomic_dec(&smi_msg_inuse_count);
kfree(msg); kfree(msg);
} }
...@@ -1775,13 +1779,16 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) ...@@ -1775,13 +1779,16 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void)
{ {
struct ipmi_smi_msg *rv; struct ipmi_smi_msg *rv;
rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC); rv = kmalloc(sizeof(struct ipmi_smi_msg), GFP_ATOMIC);
if (rv) if (rv) {
rv->done = free_smi_msg; rv->done = free_smi_msg;
atomic_inc(&smi_msg_inuse_count);
}
return rv; return rv;
} }
static void free_recv_msg(struct ipmi_recv_msg *msg) static void free_recv_msg(struct ipmi_recv_msg *msg)
{ {
atomic_dec(&recv_msg_inuse_count);
kfree(msg); kfree(msg);
} }
...@@ -1790,8 +1797,10 @@ struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) ...@@ -1790,8 +1797,10 @@ struct ipmi_recv_msg *ipmi_alloc_recv_msg(void)
struct ipmi_recv_msg *rv; struct ipmi_recv_msg *rv;
rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC); rv = kmalloc(sizeof(struct ipmi_recv_msg), GFP_ATOMIC);
if (rv) if (rv) {
rv->done = free_recv_msg; rv->done = free_recv_msg;
atomic_inc(&recv_msg_inuse_count);
}
return rv; return rv;
} }
...@@ -1924,6 +1933,8 @@ static __init int ipmi_init_msghandler(void) ...@@ -1924,6 +1933,8 @@ static __init int ipmi_init_msghandler(void)
static __exit void cleanup_ipmi(void) static __exit void cleanup_ipmi(void)
{ {
int count;
if (!initialized) if (!initialized)
return; return;
...@@ -1940,6 +1951,16 @@ static __exit void cleanup_ipmi(void) ...@@ -1940,6 +1951,16 @@ static __exit void cleanup_ipmi(void)
} }
initialized = 0; initialized = 0;
/* Check for buffer leaks. */
count = atomic_read(&smi_msg_inuse_count);
if (count != 0)
printk("ipmi_msghandler: SMI message count %d at exit\n",
count);
count = atomic_read(&recv_msg_inuse_count);
if (count != 0)
printk("ipmi_msghandler: recv message count %d at exit\n",
count);
} }
module_exit(cleanup_ipmi); module_exit(cleanup_ipmi);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment