Commit c2896def authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'ipmi' (emailed ipmi fixes)

Merge ipmi fixes from Corey Minyard:
 "Things collected since last kernel release.

  Some of these are pretty important.  The first three are bug fixes.
  The next two are to hopefully make everyone happy about allowing
  ACPI to be on all the time and not have IPMI have an effect on the
  system when not in use.  The last is a little cleanup"

* emailed patches from Corey Minyard <cminyard@mvista.com>:
  ipmi: boolify some things
  ipmi: Turn off all activity on an idle ipmi interface
  ipmi: Turn off default probing of interfaces
  ipmi: Reset the KCS timeout when starting error recovery
  ipmi: Fix a race restarting the timer
  Char: ipmi_bt_sm, fix infinite loop
parents 88764e0a 7aefac26
...@@ -50,6 +50,18 @@ config IPMI_SI ...@@ -50,6 +50,18 @@ config IPMI_SI
Currently, only KCS and SMIC are supported. If Currently, only KCS and SMIC are supported. If
you are using IPMI, you should probably say "y" here. you are using IPMI, you should probably say "y" here.
config IPMI_SI_PROBE_DEFAULTS
bool 'Probe for all possible IPMI system interfaces by default'
default n
depends on IPMI_SI
help
Modern systems will usually expose IPMI interfaces via a discoverable
firmware mechanism such as ACPI or DMI. Older systems do not, and so
the driver is forced to probe hardware manually. This may cause boot
delays. Say "n" here to disable this manual probing. IPMI will then
only be available on older systems if the "ipmi_si_intf.trydefaults=1"
boot argument is passed.
config IPMI_WATCHDOG config IPMI_WATCHDOG
tristate 'IPMI Watchdog Timer' tristate 'IPMI Watchdog Timer'
help help
......
...@@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt) ...@@ -352,7 +352,7 @@ static inline void write_all_bytes(struct si_sm_data *bt)
static inline int read_all_bytes(struct si_sm_data *bt) static inline int read_all_bytes(struct si_sm_data *bt)
{ {
unsigned char i; unsigned int i;
/* /*
* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode.
......
...@@ -251,6 +251,7 @@ static inline int check_obf(struct si_sm_data *kcs, unsigned char status, ...@@ -251,6 +251,7 @@ static inline int check_obf(struct si_sm_data *kcs, unsigned char status,
if (!GET_STATUS_OBF(status)) { if (!GET_STATUS_OBF(status)) {
kcs->obf_timeout -= time; kcs->obf_timeout -= time;
if (kcs->obf_timeout < 0) { if (kcs->obf_timeout < 0) {
kcs->obf_timeout = OBF_RETRY_TIMEOUT;
start_error_recovery(kcs, "OBF not ready in time"); start_error_recovery(kcs, "OBF not ready in time");
return 1; return 1;
} }
......
...@@ -55,6 +55,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); ...@@ -55,6 +55,7 @@ static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void); static int ipmi_init_msghandler(void);
static void smi_recv_tasklet(unsigned long); static void smi_recv_tasklet(unsigned long);
static void handle_new_recv_msgs(ipmi_smi_t intf); static void handle_new_recv_msgs(ipmi_smi_t intf);
static void need_waiter(ipmi_smi_t intf);
static int initialized; static int initialized;
...@@ -73,14 +74,28 @@ static struct proc_dir_entry *proc_ipmi_root; ...@@ -73,14 +74,28 @@ static struct proc_dir_entry *proc_ipmi_root;
*/ */
#define MAX_MSG_TIMEOUT 60000 #define MAX_MSG_TIMEOUT 60000
/* Call every ~1000 ms. */
#define IPMI_TIMEOUT_TIME 1000
/* How many jiffies does it take to get to the timeout time. */
#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
/*
* Request events from the queue every second (this is the number of
* IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
* future, IPMI will add a way to know immediately if an event is in
* the queue and this silliness can go away.
*/
#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
/* /*
* The main "user" data structure. * The main "user" data structure.
*/ */
struct ipmi_user { struct ipmi_user {
struct list_head link; struct list_head link;
/* Set to "0" when the user is destroyed. */ /* Set to false when the user is destroyed. */
int valid; bool valid;
struct kref refcount; struct kref refcount;
...@@ -92,7 +107,7 @@ struct ipmi_user { ...@@ -92,7 +107,7 @@ struct ipmi_user {
ipmi_smi_t intf; ipmi_smi_t intf;
/* Does this interface receive IPMI events? */ /* Does this interface receive IPMI events? */
int gets_events; bool gets_events;
}; };
struct cmd_rcvr { struct cmd_rcvr {
...@@ -383,6 +398,9 @@ struct ipmi_smi { ...@@ -383,6 +398,9 @@ struct ipmi_smi {
unsigned int waiting_events_count; /* How many events in queue? */ unsigned int waiting_events_count; /* How many events in queue? */
char delivering_events; char delivering_events;
char event_msg_printed; char event_msg_printed;
atomic_t event_waiters;
unsigned int ticks_to_req_ev;
int last_needs_timer;
/* /*
* The event receiver for my BMC, only really used at panic * The event receiver for my BMC, only really used at panic
...@@ -395,7 +413,7 @@ struct ipmi_smi { ...@@ -395,7 +413,7 @@ struct ipmi_smi {
/* For handling of maintenance mode. */ /* For handling of maintenance mode. */
int maintenance_mode; int maintenance_mode;
int maintenance_mode_enable; bool maintenance_mode_enable;
int auto_maintenance_timeout; int auto_maintenance_timeout;
spinlock_t maintenance_mode_lock; /* Used in a timer... */ spinlock_t maintenance_mode_lock; /* Used in a timer... */
...@@ -451,7 +469,6 @@ static DEFINE_MUTEX(ipmi_interfaces_mutex); ...@@ -451,7 +469,6 @@ static DEFINE_MUTEX(ipmi_interfaces_mutex);
static LIST_HEAD(smi_watchers); static LIST_HEAD(smi_watchers);
static DEFINE_MUTEX(smi_watchers_mutex); static DEFINE_MUTEX(smi_watchers_mutex);
#define ipmi_inc_stat(intf, stat) \ #define ipmi_inc_stat(intf, stat) \
atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
#define ipmi_get_stat(intf, stat) \ #define ipmi_get_stat(intf, stat) \
...@@ -772,6 +789,7 @@ static int intf_next_seq(ipmi_smi_t intf, ...@@ -772,6 +789,7 @@ static int intf_next_seq(ipmi_smi_t intf,
*seq = i; *seq = i;
*seqid = intf->seq_table[i].seqid; *seqid = intf->seq_table[i].seqid;
intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ; intf->curr_seq = (i+1)%IPMI_IPMB_NUM_SEQ;
need_waiter(intf);
} else { } else {
rv = -EAGAIN; rv = -EAGAIN;
} }
...@@ -941,7 +959,7 @@ int ipmi_create_user(unsigned int if_num, ...@@ -941,7 +959,7 @@ int ipmi_create_user(unsigned int if_num,
new_user->handler = handler; new_user->handler = handler;
new_user->handler_data = handler_data; new_user->handler_data = handler_data;
new_user->intf = intf; new_user->intf = intf;
new_user->gets_events = 0; new_user->gets_events = false;
if (!try_module_get(intf->handlers->owner)) { if (!try_module_get(intf->handlers->owner)) {
rv = -ENODEV; rv = -ENODEV;
...@@ -962,10 +980,15 @@ int ipmi_create_user(unsigned int if_num, ...@@ -962,10 +980,15 @@ int ipmi_create_user(unsigned int if_num,
*/ */
mutex_unlock(&ipmi_interfaces_mutex); mutex_unlock(&ipmi_interfaces_mutex);
new_user->valid = 1; new_user->valid = true;
spin_lock_irqsave(&intf->seq_lock, flags); spin_lock_irqsave(&intf->seq_lock, flags);
list_add_rcu(&new_user->link, &intf->users); list_add_rcu(&new_user->link, &intf->users);
spin_unlock_irqrestore(&intf->seq_lock, flags); spin_unlock_irqrestore(&intf->seq_lock, flags);
if (handler->ipmi_watchdog_pretimeout) {
/* User wants pretimeouts, so make sure to watch for them. */
if (atomic_inc_return(&intf->event_waiters) == 1)
need_waiter(intf);
}
*user = new_user; *user = new_user;
return 0; return 0;
...@@ -1019,7 +1042,13 @@ int ipmi_destroy_user(ipmi_user_t user) ...@@ -1019,7 +1042,13 @@ int ipmi_destroy_user(ipmi_user_t user)
struct cmd_rcvr *rcvr; struct cmd_rcvr *rcvr;
struct cmd_rcvr *rcvrs = NULL; struct cmd_rcvr *rcvrs = NULL;
user->valid = 0; user->valid = false;
if (user->handler->ipmi_watchdog_pretimeout)
atomic_dec(&intf->event_waiters);
if (user->gets_events)
atomic_dec(&intf->event_waiters);
/* Remove the user from the interface's sequence table. */ /* Remove the user from the interface's sequence table. */
spin_lock_irqsave(&intf->seq_lock, flags); spin_lock_irqsave(&intf->seq_lock, flags);
...@@ -1155,25 +1184,23 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode) ...@@ -1155,25 +1184,23 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
if (intf->maintenance_mode != mode) { if (intf->maintenance_mode != mode) {
switch (mode) { switch (mode) {
case IPMI_MAINTENANCE_MODE_AUTO: case IPMI_MAINTENANCE_MODE_AUTO:
intf->maintenance_mode = mode;
intf->maintenance_mode_enable intf->maintenance_mode_enable
= (intf->auto_maintenance_timeout > 0); = (intf->auto_maintenance_timeout > 0);
break; break;
case IPMI_MAINTENANCE_MODE_OFF: case IPMI_MAINTENANCE_MODE_OFF:
intf->maintenance_mode = mode; intf->maintenance_mode_enable = false;
intf->maintenance_mode_enable = 0;
break; break;
case IPMI_MAINTENANCE_MODE_ON: case IPMI_MAINTENANCE_MODE_ON:
intf->maintenance_mode = mode; intf->maintenance_mode_enable = true;
intf->maintenance_mode_enable = 1;
break; break;
default: default:
rv = -EINVAL; rv = -EINVAL;
goto out_unlock; goto out_unlock;
} }
intf->maintenance_mode = mode;
maintenance_mode_update(intf); maintenance_mode_update(intf);
} }
...@@ -1184,7 +1211,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode) ...@@ -1184,7 +1211,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode)
} }
EXPORT_SYMBOL(ipmi_set_maintenance_mode); EXPORT_SYMBOL(ipmi_set_maintenance_mode);
int ipmi_set_gets_events(ipmi_user_t user, int val) int ipmi_set_gets_events(ipmi_user_t user, bool val)
{ {
unsigned long flags; unsigned long flags;
ipmi_smi_t intf = user->intf; ipmi_smi_t intf = user->intf;
...@@ -1194,8 +1221,18 @@ int ipmi_set_gets_events(ipmi_user_t user, int val) ...@@ -1194,8 +1221,18 @@ int ipmi_set_gets_events(ipmi_user_t user, int val)
INIT_LIST_HEAD(&msgs); INIT_LIST_HEAD(&msgs);
spin_lock_irqsave(&intf->events_lock, flags); spin_lock_irqsave(&intf->events_lock, flags);
if (user->gets_events == val)
goto out;
user->gets_events = val; user->gets_events = val;
if (val) {
if (atomic_inc_return(&intf->event_waiters) == 1)
need_waiter(intf);
} else {
atomic_dec(&intf->event_waiters);
}
if (intf->delivering_events) if (intf->delivering_events)
/* /*
* Another thread is delivering events for this, so * Another thread is delivering events for this, so
...@@ -1289,6 +1326,9 @@ int ipmi_register_for_cmd(ipmi_user_t user, ...@@ -1289,6 +1326,9 @@ int ipmi_register_for_cmd(ipmi_user_t user,
goto out_unlock; goto out_unlock;
} }
if (atomic_inc_return(&intf->event_waiters) == 1)
need_waiter(intf);
list_add_rcu(&rcvr->link, &intf->cmd_rcvrs); list_add_rcu(&rcvr->link, &intf->cmd_rcvrs);
out_unlock: out_unlock:
...@@ -1330,6 +1370,7 @@ int ipmi_unregister_for_cmd(ipmi_user_t user, ...@@ -1330,6 +1370,7 @@ int ipmi_unregister_for_cmd(ipmi_user_t user,
mutex_unlock(&intf->cmd_rcvrs_mutex); mutex_unlock(&intf->cmd_rcvrs_mutex);
synchronize_rcu(); synchronize_rcu();
while (rcvrs) { while (rcvrs) {
atomic_dec(&intf->event_waiters);
rcvr = rcvrs; rcvr = rcvrs;
rcvrs = rcvr->next; rcvrs = rcvr->next;
kfree(rcvr); kfree(rcvr);
...@@ -1535,7 +1576,7 @@ static int i_ipmi_request(ipmi_user_t user, ...@@ -1535,7 +1576,7 @@ static int i_ipmi_request(ipmi_user_t user,
= IPMI_MAINTENANCE_MODE_TIMEOUT; = IPMI_MAINTENANCE_MODE_TIMEOUT;
if (!intf->maintenance_mode if (!intf->maintenance_mode
&& !intf->maintenance_mode_enable) { && !intf->maintenance_mode_enable) {
intf->maintenance_mode_enable = 1; intf->maintenance_mode_enable = true;
maintenance_mode_update(intf); maintenance_mode_update(intf);
} }
spin_unlock_irqrestore(&intf->maintenance_mode_lock, spin_unlock_irqrestore(&intf->maintenance_mode_lock,
...@@ -2876,6 +2917,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, ...@@ -2876,6 +2917,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
(unsigned long) intf); (unsigned long) intf);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->events_lock); spin_lock_init(&intf->events_lock);
atomic_set(&intf->event_waiters, 0);
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
INIT_LIST_HEAD(&intf->waiting_events); INIT_LIST_HEAD(&intf->waiting_events);
intf->waiting_events_count = 0; intf->waiting_events_count = 0;
mutex_init(&intf->cmd_rcvrs_mutex); mutex_init(&intf->cmd_rcvrs_mutex);
...@@ -3965,7 +4008,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, ...@@ -3965,7 +4008,8 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg,
static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
struct list_head *timeouts, long timeout_period, struct list_head *timeouts, long timeout_period,
int slot, unsigned long *flags) int slot, unsigned long *flags,
unsigned int *waiting_msgs)
{ {
struct ipmi_recv_msg *msg; struct ipmi_recv_msg *msg;
struct ipmi_smi_handlers *handlers; struct ipmi_smi_handlers *handlers;
...@@ -3977,8 +4021,10 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, ...@@ -3977,8 +4021,10 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
return; return;
ent->timeout -= timeout_period; ent->timeout -= timeout_period;
if (ent->timeout > 0) if (ent->timeout > 0) {
(*waiting_msgs)++;
return; return;
}
if (ent->retries_left == 0) { if (ent->retries_left == 0) {
/* The message has used all its retries. */ /* The message has used all its retries. */
...@@ -3995,6 +4041,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, ...@@ -3995,6 +4041,8 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
struct ipmi_smi_msg *smi_msg; struct ipmi_smi_msg *smi_msg;
/* More retries, send again. */ /* More retries, send again. */
(*waiting_msgs)++;
/* /*
* Start with the max timer, set to normal timer after * Start with the max timer, set to normal timer after
* the message is sent. * the message is sent.
...@@ -4040,17 +4088,13 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, ...@@ -4040,17 +4088,13 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
} }
} }
static void ipmi_timeout_handler(long timeout_period) static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
{ {
ipmi_smi_t intf;
struct list_head timeouts; struct list_head timeouts;
struct ipmi_recv_msg *msg, *msg2; struct ipmi_recv_msg *msg, *msg2;
unsigned long flags; unsigned long flags;
int i; int i;
unsigned int waiting_msgs = 0;
rcu_read_lock();
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
tasklet_schedule(&intf->recv_tasklet);
/* /*
* Go through the seq table and find any messages that * Go through the seq table and find any messages that
...@@ -4062,7 +4106,7 @@ static void ipmi_timeout_handler(long timeout_period) ...@@ -4062,7 +4106,7 @@ static void ipmi_timeout_handler(long timeout_period)
for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++)
check_msg_timeout(intf, &(intf->seq_table[i]), check_msg_timeout(intf, &(intf->seq_table[i]),
&timeouts, timeout_period, i, &timeouts, timeout_period, i,
&flags); &flags, &waiting_msgs);
spin_unlock_irqrestore(&intf->seq_lock, flags); spin_unlock_irqrestore(&intf->seq_lock, flags);
list_for_each_entry_safe(msg, msg2, &timeouts, link) list_for_each_entry_safe(msg, msg2, &timeouts, link)
...@@ -4083,74 +4127,79 @@ static void ipmi_timeout_handler(long timeout_period) ...@@ -4083,74 +4127,79 @@ static void ipmi_timeout_handler(long timeout_period)
-= timeout_period; -= timeout_period;
if (!intf->maintenance_mode if (!intf->maintenance_mode
&& (intf->auto_maintenance_timeout <= 0)) { && (intf->auto_maintenance_timeout <= 0)) {
intf->maintenance_mode_enable = 0; intf->maintenance_mode_enable = false;
maintenance_mode_update(intf); maintenance_mode_update(intf);
} }
} }
spin_unlock_irqrestore(&intf->maintenance_mode_lock, spin_unlock_irqrestore(&intf->maintenance_mode_lock,
flags); flags);
} }
}
rcu_read_unlock(); tasklet_schedule(&intf->recv_tasklet);
return waiting_msgs;
} }
static void ipmi_request_event(void) static void ipmi_request_event(ipmi_smi_t intf)
{ {
ipmi_smi_t intf;
struct ipmi_smi_handlers *handlers; struct ipmi_smi_handlers *handlers;
rcu_read_lock();
/*
* Called from the timer, no need to check if handlers is
* valid.
*/
list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
/* No event requests when in maintenance mode. */ /* No event requests when in maintenance mode. */
if (intf->maintenance_mode_enable) if (intf->maintenance_mode_enable)
continue; return;
handlers = intf->handlers; handlers = intf->handlers;
if (handlers) if (handlers)
handlers->request_events(intf->send_info); handlers->request_events(intf->send_info);
}
rcu_read_unlock();
} }
static struct timer_list ipmi_timer; static struct timer_list ipmi_timer;
/* Call every ~1000 ms. */
#define IPMI_TIMEOUT_TIME 1000
/* How many jiffies does it take to get to the timeout time. */
#define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000)
/*
* Request events from the queue every second (this is the number of
* IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the
* future, IPMI will add a way to know immediately if an event is in
* the queue and this silliness can go away.
*/
#define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME))
static atomic_t stop_operation; static atomic_t stop_operation;
static unsigned int ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
static void ipmi_timeout(unsigned long data) static void ipmi_timeout(unsigned long data)
{ {
ipmi_smi_t intf;
int nt = 0;
if (atomic_read(&stop_operation)) if (atomic_read(&stop_operation))
return; return;
ticks_to_req_ev--; rcu_read_lock();
if (ticks_to_req_ev == 0) { list_for_each_entry_rcu(intf, &ipmi_interfaces, link) {
ipmi_request_event(); int lnt = 0;
ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
if (atomic_read(&intf->event_waiters)) {
intf->ticks_to_req_ev--;
if (intf->ticks_to_req_ev == 0) {
ipmi_request_event(intf);
intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
}
lnt++;
} }
ipmi_timeout_handler(IPMI_TIMEOUT_TIME); lnt += ipmi_timeout_handler(intf, IPMI_TIMEOUT_TIME);
lnt = !!lnt;
if (lnt != intf->last_needs_timer &&
intf->handlers->set_need_watch)
intf->handlers->set_need_watch(intf->send_info, lnt);
intf->last_needs_timer = lnt;
nt += lnt;
}
rcu_read_unlock();
if (nt)
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES); mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
} }
static void need_waiter(ipmi_smi_t intf)
{
/* Racy, but worst case we start the timer twice. */
if (!timer_pending(&ipmi_timer))
mod_timer(&ipmi_timer, jiffies + IPMI_TIMEOUT_JIFFIES);
}
static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0); static atomic_t smi_msg_inuse_count = ATOMIC_INIT(0);
static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0); static atomic_t recv_msg_inuse_count = ATOMIC_INIT(0);
......
...@@ -217,7 +217,7 @@ struct smi_info { ...@@ -217,7 +217,7 @@ struct smi_info {
unsigned char msg_flags; unsigned char msg_flags;
/* Does the BMC have an event buffer? */ /* Does the BMC have an event buffer? */
char has_event_buffer; bool has_event_buffer;
/* /*
* If set to true, this will request events the next time the * If set to true, this will request events the next time the
...@@ -230,7 +230,7 @@ struct smi_info { ...@@ -230,7 +230,7 @@ struct smi_info {
* call. Generally used after a panic to make sure stuff goes * call. Generally used after a panic to make sure stuff goes
* out. * out.
*/ */
int run_to_completion; bool run_to_completion;
/* The I/O port of an SI interface. */ /* The I/O port of an SI interface. */
int port; int port;
...@@ -248,19 +248,25 @@ struct smi_info { ...@@ -248,19 +248,25 @@ struct smi_info {
/* The timer for this si. */ /* The timer for this si. */
struct timer_list si_timer; struct timer_list si_timer;
/* This flag is set, if the timer is running (timer_pending() isn't enough) */
bool timer_running;
/* The time (in jiffies) the last timeout occurred at. */ /* The time (in jiffies) the last timeout occurred at. */
unsigned long last_timeout_jiffies; unsigned long last_timeout_jiffies;
/* Used to gracefully stop the timer without race conditions. */ /* Used to gracefully stop the timer without race conditions. */
atomic_t stop_operation; atomic_t stop_operation;
/* Are we waiting for the events, pretimeouts, received msgs? */
atomic_t need_watch;
/* /*
* The driver will disable interrupts when it gets into a * The driver will disable interrupts when it gets into a
* situation where it cannot handle messages due to lack of * situation where it cannot handle messages due to lack of
* memory. Once that situation clears up, it will re-enable * memory. Once that situation clears up, it will re-enable
* interrupts. * interrupts.
*/ */
int interrupt_disabled; bool interrupt_disabled;
/* From the get device id response... */ /* From the get device id response... */
struct ipmi_device_id device_id; struct ipmi_device_id device_id;
...@@ -273,7 +279,7 @@ struct smi_info { ...@@ -273,7 +279,7 @@ struct smi_info {
* True if we allocated the device, false if it came from * True if we allocated the device, false if it came from
* someplace else (like PCI). * someplace else (like PCI).
*/ */
int dev_registered; bool dev_registered;
/* Slave address, could be reported from DMI. */ /* Slave address, could be reported from DMI. */
unsigned char slave_addr; unsigned char slave_addr;
...@@ -297,19 +303,19 @@ struct smi_info { ...@@ -297,19 +303,19 @@ struct smi_info {
static int force_kipmid[SI_MAX_PARMS]; static int force_kipmid[SI_MAX_PARMS];
static int num_force_kipmid; static int num_force_kipmid;
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
static int pci_registered; static bool pci_registered;
#endif #endif
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
static int pnp_registered; static bool pnp_registered;
#endif #endif
#ifdef CONFIG_PARISC #ifdef CONFIG_PARISC
static int parisc_registered; static bool parisc_registered;
#endif #endif
static unsigned int kipmid_max_busy_us[SI_MAX_PARMS]; static unsigned int kipmid_max_busy_us[SI_MAX_PARMS];
static int num_max_busy_us; static int num_max_busy_us;
static int unload_when_empty = 1; static bool unload_when_empty = true;
static int add_smi(struct smi_info *smi); static int add_smi(struct smi_info *smi);
static int try_smi_init(struct smi_info *smi); static int try_smi_init(struct smi_info *smi);
...@@ -434,6 +440,13 @@ static void start_clear_flags(struct smi_info *smi_info) ...@@ -434,6 +440,13 @@ static void start_clear_flags(struct smi_info *smi_info)
smi_info->si_state = SI_CLEARING_FLAGS; smi_info->si_state = SI_CLEARING_FLAGS;
} }
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
{
smi_info->last_timeout_jiffies = jiffies;
mod_timer(&smi_info->si_timer, new_val);
smi_info->timer_running = true;
}
/* /*
* When we have a situtaion where we run out of memory and cannot * When we have a situtaion where we run out of memory and cannot
* allocate messages, we just leave them in the BMC and run the system * allocate messages, we just leave them in the BMC and run the system
...@@ -444,10 +457,9 @@ static inline void disable_si_irq(struct smi_info *smi_info) ...@@ -444,10 +457,9 @@ static inline void disable_si_irq(struct smi_info *smi_info)
{ {
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
start_disable_irq(smi_info); start_disable_irq(smi_info);
smi_info->interrupt_disabled = 1; smi_info->interrupt_disabled = true;
if (!atomic_read(&smi_info->stop_operation)) if (!atomic_read(&smi_info->stop_operation))
mod_timer(&smi_info->si_timer, smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
jiffies + SI_TIMEOUT_JIFFIES);
} }
} }
...@@ -455,7 +467,7 @@ static inline void enable_si_irq(struct smi_info *smi_info) ...@@ -455,7 +467,7 @@ static inline void enable_si_irq(struct smi_info *smi_info)
{ {
if ((smi_info->irq) && (smi_info->interrupt_disabled)) { if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
start_enable_irq(smi_info); start_enable_irq(smi_info);
smi_info->interrupt_disabled = 0; smi_info->interrupt_disabled = false;
} }
} }
...@@ -700,7 +712,7 @@ static void handle_transaction_done(struct smi_info *smi_info) ...@@ -700,7 +712,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
dev_warn(smi_info->dev, dev_warn(smi_info->dev,
"Maybe ok, but ipmi might run very slowly.\n"); "Maybe ok, but ipmi might run very slowly.\n");
} else } else
smi_info->interrupt_disabled = 0; smi_info->interrupt_disabled = false;
smi_info->si_state = SI_NORMAL; smi_info->si_state = SI_NORMAL;
break; break;
} }
...@@ -853,6 +865,19 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, ...@@ -853,6 +865,19 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
return si_sm_result; return si_sm_result;
} }
static void check_start_timer_thread(struct smi_info *smi_info)
{
if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
if (smi_info->thread)
wake_up_process(smi_info->thread);
start_next_msg(smi_info);
smi_event_handler(smi_info, 0);
}
}
static void sender(void *send_info, static void sender(void *send_info,
struct ipmi_smi_msg *msg, struct ipmi_smi_msg *msg,
int priority) int priority)
...@@ -906,27 +931,11 @@ static void sender(void *send_info, ...@@ -906,27 +931,11 @@ static void sender(void *send_info,
else else
list_add_tail(&msg->link, &smi_info->xmit_msgs); list_add_tail(&msg->link, &smi_info->xmit_msgs);
if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { check_start_timer_thread(smi_info);
/*
* last_timeout_jiffies is updated here to avoid
* smi_timeout() handler passing very large time_diff
* value to smi_event_handler() that causes
* the send command to abort.
*/
smi_info->last_timeout_jiffies = jiffies;
mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
if (smi_info->thread)
wake_up_process(smi_info->thread);
start_next_msg(smi_info);
smi_event_handler(smi_info, 0);
}
spin_unlock_irqrestore(&smi_info->si_lock, flags); spin_unlock_irqrestore(&smi_info->si_lock, flags);
} }
static void set_run_to_completion(void *send_info, int i_run_to_completion) static void set_run_to_completion(void *send_info, bool i_run_to_completion)
{ {
struct smi_info *smi_info = send_info; struct smi_info *smi_info = send_info;
enum si_sm_result result; enum si_sm_result result;
...@@ -1004,6 +1013,17 @@ static int ipmi_thread(void *data) ...@@ -1004,6 +1013,17 @@ static int ipmi_thread(void *data)
spin_lock_irqsave(&(smi_info->si_lock), flags); spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_result = smi_event_handler(smi_info, 0); smi_result = smi_event_handler(smi_info, 0);
/*
* If the driver is doing something, there is a possible
* race with the timer. If the timer handler see idle,
* and the thread here sees something else, the timer
* handler won't restart the timer even though it is
* required. So start it here if necessary.
*/
if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
spin_unlock_irqrestore(&(smi_info->si_lock), flags); spin_unlock_irqrestore(&(smi_info->si_lock), flags);
busy_wait = ipmi_thread_busy_wait(smi_result, smi_info, busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
&busy_until); &busy_until);
...@@ -1011,9 +1031,15 @@ static int ipmi_thread(void *data) ...@@ -1011,9 +1031,15 @@ static int ipmi_thread(void *data)
; /* do nothing */ ; /* do nothing */
else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
schedule(); schedule();
else if (smi_result == SI_SM_IDLE) else if (smi_result == SI_SM_IDLE) {
if (atomic_read(&smi_info->need_watch)) {
schedule_timeout_interruptible(100); schedule_timeout_interruptible(100);
else } else {
/* Wait to be woken up when we are needed. */
__set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
} else
schedule_timeout_interruptible(1); schedule_timeout_interruptible(1);
} }
return 0; return 0;
...@@ -1024,7 +1050,7 @@ static void poll(void *send_info) ...@@ -1024,7 +1050,7 @@ static void poll(void *send_info)
{ {
struct smi_info *smi_info = send_info; struct smi_info *smi_info = send_info;
unsigned long flags = 0; unsigned long flags = 0;
int run_to_completion = smi_info->run_to_completion; bool run_to_completion = smi_info->run_to_completion;
/* /*
* Make sure there is some delay in the poll loop so we can * Make sure there is some delay in the poll loop so we can
...@@ -1049,6 +1075,17 @@ static void request_events(void *send_info) ...@@ -1049,6 +1075,17 @@ static void request_events(void *send_info)
atomic_set(&smi_info->req_events, 1); atomic_set(&smi_info->req_events, 1);
} }
static void set_need_watch(void *send_info, bool enable)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
atomic_set(&smi_info->need_watch, enable);
spin_lock_irqsave(&smi_info->si_lock, flags);
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static int initialized; static int initialized;
static void smi_timeout(unsigned long data) static void smi_timeout(unsigned long data)
...@@ -1073,10 +1110,6 @@ static void smi_timeout(unsigned long data) ...@@ -1073,10 +1110,6 @@ static void smi_timeout(unsigned long data)
* SI_USEC_PER_JIFFY); * SI_USEC_PER_JIFFY);
smi_result = smi_event_handler(smi_info, time_diff); smi_result = smi_event_handler(smi_info, time_diff);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
smi_info->last_timeout_jiffies = jiffies_now;
if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */ /* Running with interrupts, only do long timeouts. */
timeout = jiffies + SI_TIMEOUT_JIFFIES; timeout = jiffies + SI_TIMEOUT_JIFFIES;
...@@ -1098,7 +1131,10 @@ static void smi_timeout(unsigned long data) ...@@ -1098,7 +1131,10 @@ static void smi_timeout(unsigned long data)
do_mod_timer: do_mod_timer:
if (smi_result != SI_SM_IDLE) if (smi_result != SI_SM_IDLE)
mod_timer(&(smi_info->si_timer), timeout); smi_mod_timer(smi_info, timeout);
else
smi_info->timer_running = false;
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
} }
static irqreturn_t si_irq_handler(int irq, void *data) static irqreturn_t si_irq_handler(int irq, void *data)
...@@ -1146,8 +1182,7 @@ static int smi_start_processing(void *send_info, ...@@ -1146,8 +1182,7 @@ static int smi_start_processing(void *send_info,
/* Set up the timer that drives the interface. */ /* Set up the timer that drives the interface. */
setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi); setup_timer(&new_smi->si_timer, smi_timeout, (long)new_smi);
new_smi->last_timeout_jiffies = jiffies; smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
mod_timer(&new_smi->si_timer, jiffies + SI_TIMEOUT_JIFFIES);
/* /*
* Check if the user forcefully enabled the daemon. * Check if the user forcefully enabled the daemon.
...@@ -1188,7 +1223,7 @@ static int get_smi_info(void *send_info, struct ipmi_smi_info *data) ...@@ -1188,7 +1223,7 @@ static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
return 0; return 0;
} }
static void set_maintenance_mode(void *send_info, int enable) static void set_maintenance_mode(void *send_info, bool enable)
{ {
struct smi_info *smi_info = send_info; struct smi_info *smi_info = send_info;
...@@ -1202,6 +1237,7 @@ static struct ipmi_smi_handlers handlers = { ...@@ -1202,6 +1237,7 @@ static struct ipmi_smi_handlers handlers = {
.get_smi_info = get_smi_info, .get_smi_info = get_smi_info,
.sender = sender, .sender = sender,
.request_events = request_events, .request_events = request_events,
.set_need_watch = set_need_watch,
.set_maintenance_mode = set_maintenance_mode, .set_maintenance_mode = set_maintenance_mode,
.set_run_to_completion = set_run_to_completion, .set_run_to_completion = set_run_to_completion,
.poll = poll, .poll = poll,
...@@ -1229,7 +1265,7 @@ static bool si_tryplatform = 1; ...@@ -1229,7 +1265,7 @@ static bool si_tryplatform = 1;
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
static bool si_trypci = 1; static bool si_trypci = 1;
#endif #endif
static bool si_trydefaults = 1; static bool si_trydefaults = IS_ENABLED(CONFIG_IPMI_SI_PROBE_DEFAULTS);
static char *si_type[SI_MAX_PARMS]; static char *si_type[SI_MAX_PARMS];
#define MAX_SI_TYPE_STR 30 #define MAX_SI_TYPE_STR 30
static char si_type_str[MAX_SI_TYPE_STR]; static char si_type_str[MAX_SI_TYPE_STR];
...@@ -1328,7 +1364,7 @@ module_param_array(force_kipmid, int, &num_force_kipmid, 0); ...@@ -1328,7 +1364,7 @@ module_param_array(force_kipmid, int, &num_force_kipmid, 0);
MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or" MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
" disabled(0). Normally the IPMI driver auto-detects" " disabled(0). Normally the IPMI driver auto-detects"
" this, but the value may be overridden by this parm."); " this, but the value may be overridden by this parm.");
module_param(unload_when_empty, int, 0); module_param(unload_when_empty, bool, 0);
MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are" MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
" specified or found, default is 1. Setting to 0" " specified or found, default is 1. Setting to 0"
" is useful for hot add of devices using hotmod."); " is useful for hot add of devices using hotmod.");
...@@ -3336,18 +3372,19 @@ static int try_smi_init(struct smi_info *new_smi) ...@@ -3336,18 +3372,19 @@ static int try_smi_init(struct smi_info *new_smi)
INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
new_smi->curr_msg = NULL; new_smi->curr_msg = NULL;
atomic_set(&new_smi->req_events, 0); atomic_set(&new_smi->req_events, 0);
new_smi->run_to_completion = 0; new_smi->run_to_completion = false;
for (i = 0; i < SI_NUM_STATS; i++) for (i = 0; i < SI_NUM_STATS; i++)
atomic_set(&new_smi->stats[i], 0); atomic_set(&new_smi->stats[i], 0);
new_smi->interrupt_disabled = 1; new_smi->interrupt_disabled = true;
atomic_set(&new_smi->stop_operation, 0); atomic_set(&new_smi->stop_operation, 0);
atomic_set(&new_smi->need_watch, 0);
new_smi->intf_num = smi_num; new_smi->intf_num = smi_num;
smi_num++; smi_num++;
rv = try_enable_event_buffer(new_smi); rv = try_enable_event_buffer(new_smi);
if (rv == 0) if (rv == 0)
new_smi->has_event_buffer = 1; new_smi->has_event_buffer = true;
/* /*
* Start clearing the flags before we enable interrupts or the * Start clearing the flags before we enable interrupts or the
...@@ -3381,7 +3418,7 @@ static int try_smi_init(struct smi_info *new_smi) ...@@ -3381,7 +3418,7 @@ static int try_smi_init(struct smi_info *new_smi)
rv); rv);
goto out_err; goto out_err;
} }
new_smi->dev_registered = 1; new_smi->dev_registered = true;
} }
rv = ipmi_register_smi(&handlers, rv = ipmi_register_smi(&handlers,
...@@ -3430,7 +3467,7 @@ static int try_smi_init(struct smi_info *new_smi) ...@@ -3430,7 +3467,7 @@ static int try_smi_init(struct smi_info *new_smi)
wait_for_timer_and_thread(new_smi); wait_for_timer_and_thread(new_smi);
out_err: out_err:
new_smi->interrupt_disabled = 1; new_smi->interrupt_disabled = true;
if (new_smi->intf) { if (new_smi->intf) {
ipmi_unregister_smi(new_smi->intf); ipmi_unregister_smi(new_smi->intf);
...@@ -3466,7 +3503,7 @@ static int try_smi_init(struct smi_info *new_smi) ...@@ -3466,7 +3503,7 @@ static int try_smi_init(struct smi_info *new_smi)
if (new_smi->dev_registered) { if (new_smi->dev_registered) {
platform_device_unregister(new_smi->pdev); platform_device_unregister(new_smi->pdev);
new_smi->dev_registered = 0; new_smi->dev_registered = false;
} }
return rv; return rv;
...@@ -3521,14 +3558,14 @@ static int init_ipmi_si(void) ...@@ -3521,14 +3558,14 @@ static int init_ipmi_si(void)
printk(KERN_ERR PFX "Unable to register " printk(KERN_ERR PFX "Unable to register "
"PCI driver: %d\n", rv); "PCI driver: %d\n", rv);
else else
pci_registered = 1; pci_registered = true;
} }
#endif #endif
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
if (si_tryacpi) { if (si_tryacpi) {
pnp_register_driver(&ipmi_pnp_driver); pnp_register_driver(&ipmi_pnp_driver);
pnp_registered = 1; pnp_registered = true;
} }
#endif #endif
...@@ -3544,7 +3581,7 @@ static int init_ipmi_si(void) ...@@ -3544,7 +3581,7 @@ static int init_ipmi_si(void)
#ifdef CONFIG_PARISC #ifdef CONFIG_PARISC
register_parisc_driver(&ipmi_parisc_driver); register_parisc_driver(&ipmi_parisc_driver);
parisc_registered = 1; parisc_registered = true;
/* poking PC IO addresses will crash machine, don't do it */ /* poking PC IO addresses will crash machine, don't do it */
si_trydefaults = 0; si_trydefaults = 0;
#endif #endif
......
...@@ -237,7 +237,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode); ...@@ -237,7 +237,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
* The first user that sets this to TRUE will receive all events that * The first user that sets this to TRUE will receive all events that
* have been queued while no one was waiting for events. * have been queued while no one was waiting for events.
*/ */
int ipmi_set_gets_events(ipmi_user_t user, int val); int ipmi_set_gets_events(ipmi_user_t user, bool val);
/* /*
* Called when a new SMI is registered. This will also be called on * Called when a new SMI is registered. This will also be called on
......
...@@ -109,12 +109,19 @@ struct ipmi_smi_handlers { ...@@ -109,12 +109,19 @@ struct ipmi_smi_handlers {
events from the BMC we are attached to. */ events from the BMC we are attached to. */
void (*request_events)(void *send_info); void (*request_events)(void *send_info);
/* Called by the upper layer when some user requires that the
interface watch for events, received messages, watchdog
pretimeouts, or not. Used by the SMI to know if it should
watch for these. This may be NULL if the SMI does not
implement it. */
void (*set_need_watch)(void *send_info, bool enable);
/* Called when the interface should go into "run to /* Called when the interface should go into "run to
completion" mode. If this call sets the value to true, the completion" mode. If this call sets the value to true, the
interface should make sure that all messages are flushed interface should make sure that all messages are flushed
out and that none are pending, and any new requests are run out and that none are pending, and any new requests are run
to completion immediately. */ to completion immediately. */
void (*set_run_to_completion)(void *send_info, int run_to_completion); void (*set_run_to_completion)(void *send_info, bool run_to_completion);
/* Called to poll for work to do. This is so upper layers can /* Called to poll for work to do. This is so upper layers can
poll for operations during things like crash dumps. */ poll for operations during things like crash dumps. */
...@@ -125,7 +132,7 @@ struct ipmi_smi_handlers { ...@@ -125,7 +132,7 @@ struct ipmi_smi_handlers {
setting. The message handler does the mode handling. Note setting. The message handler does the mode handling. Note
that this is called from interrupt context, so it cannot that this is called from interrupt context, so it cannot
block. */ block. */
void (*set_maintenance_mode)(void *send_info, int enable); void (*set_maintenance_mode)(void *send_info, bool enable);
/* Tell the handler that we are using it/not using it. The /* Tell the handler that we are using it/not using it. The
message handler get the modules that this handler belongs message handler get the modules that this handler belongs
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment