Commit 2a4b6881 authored by NeilBrown's avatar NeilBrown Committed by Greg Kroah-Hartman

staging: lustre: lnet: remove cfs_block_allsigs calls.

Both places that cfs_block_allsigs() is used here,
the goal is to turn an interruptible wait into an
uninterruptible way.
So instead of blocking the signals, change TASK_INTERRUPTIBLE to
TASK_NOLOAD.
In each case, no other functions called while signals are blocked
will sleep - just the one that has been fixed.
In one case, an extra 'interruptible' flag needs to be passed
down so the waiting decision can be made at the right place.
Signed-off-by: default avatarNeilBrown <neilb@suse.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 1b2dad14
...@@ -169,6 +169,7 @@ int LNetEQFree(struct lnet_handle_eq eventq_in); ...@@ -169,6 +169,7 @@ int LNetEQFree(struct lnet_handle_eq eventq_in);
int LNetEQPoll(struct lnet_handle_eq *eventqs_in, int LNetEQPoll(struct lnet_handle_eq *eventqs_in,
int neq_in, int neq_in,
int timeout_ms, int timeout_ms,
int interruptible,
struct lnet_event *event_out, struct lnet_event *event_out,
int *which_eq_out); int *which_eq_out);
/** @} lnet_eq */ /** @} lnet_eq */
......
...@@ -961,19 +961,15 @@ static void ...@@ -961,19 +961,15 @@ static void
lnet_ping_md_unlink(struct lnet_ping_info *pinfo, lnet_ping_md_unlink(struct lnet_ping_info *pinfo,
struct lnet_handle_md *md_handle) struct lnet_handle_md *md_handle)
{ {
sigset_t blocked = cfs_block_allsigs();
LNetMDUnlink(*md_handle); LNetMDUnlink(*md_handle);
LNetInvalidateMDHandle(md_handle); LNetInvalidateMDHandle(md_handle);
/* NB md could be busy; this just starts the unlink */ /* NB md could be busy; this just starts the unlink */
while (pinfo->pi_features != LNET_PING_FEAT_INVAL) { while (pinfo->pi_features != LNET_PING_FEAT_INVAL) {
CDEBUG(D_NET, "Still waiting for ping MD to unlink\n"); CDEBUG(D_NET, "Still waiting for ping MD to unlink\n");
set_current_state(TASK_UNINTERRUPTIBLE); set_current_state(TASK_NOLOAD);
schedule_timeout(HZ); schedule_timeout(HZ);
} }
cfs_restore_sigs(blocked);
} }
static void static void
...@@ -2141,7 +2137,6 @@ static int lnet_ping(struct lnet_process_id id, int timeout_ms, ...@@ -2141,7 +2137,6 @@ static int lnet_ping(struct lnet_process_id id, int timeout_ms,
int nob; int nob;
int rc; int rc;
int rc2; int rc2;
sigset_t blocked;
infosz = offsetof(struct lnet_ping_info, pi_ni[n_ids]); infosz = offsetof(struct lnet_ping_info, pi_ni[n_ids]);
...@@ -2197,13 +2192,9 @@ static int lnet_ping(struct lnet_process_id id, int timeout_ms, ...@@ -2197,13 +2192,9 @@ static int lnet_ping(struct lnet_process_id id, int timeout_ms,
do { do {
/* MUST block for unlink to complete */ /* MUST block for unlink to complete */
if (unlinked)
blocked = cfs_block_allsigs();
rc2 = LNetEQPoll(&eqh, 1, timeout_ms, &event, &which);
if (unlinked) rc2 = LNetEQPoll(&eqh, 1, timeout_ms, !unlinked,
cfs_restore_sigs(blocked); &event, &which);
CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2, CDEBUG(D_NET, "poll %d(%d %d)%s\n", rc2,
(rc2 <= 0) ? -1 : event.type, (rc2 <= 0) ? -1 : event.type,
......
...@@ -308,7 +308,7 @@ lnet_eq_dequeue_event(struct lnet_eq *eq, struct lnet_event *ev) ...@@ -308,7 +308,7 @@ lnet_eq_dequeue_event(struct lnet_eq *eq, struct lnet_event *ev)
*/ */
static int static int
lnet_eq_wait_locked(int *timeout_ms) lnet_eq_wait_locked(int *timeout_ms, long state)
__must_hold(&the_lnet.ln_eq_wait_lock) __must_hold(&the_lnet.ln_eq_wait_lock)
{ {
int tms = *timeout_ms; int tms = *timeout_ms;
...@@ -320,7 +320,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock) ...@@ -320,7 +320,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
return -ENXIO; /* don't want to wait and no new event */ return -ENXIO; /* don't want to wait and no new event */
init_waitqueue_entry(&wl, current); init_waitqueue_entry(&wl, current);
set_current_state(TASK_INTERRUPTIBLE); set_current_state(state);
add_wait_queue(&the_lnet.ln_eq_waitq, &wl); add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
lnet_eq_wait_unlock(); lnet_eq_wait_unlock();
...@@ -359,6 +359,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock) ...@@ -359,6 +359,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
* \param timeout_ms Time in milliseconds to wait for an event to occur on * \param timeout_ms Time in milliseconds to wait for an event to occur on
* one of the EQs. The constant LNET_TIME_FOREVER can be used to indicate an * one of the EQs. The constant LNET_TIME_FOREVER can be used to indicate an
* infinite timeout. * infinite timeout.
* \param interruptible, if true, use TASK_INTERRUPTIBLE, else TASK_NOLOAD
* \param event,which On successful return (1 or -EOVERFLOW), \a event will * \param event,which On successful return (1 or -EOVERFLOW), \a event will
* hold the next event in the EQs, and \a which will contain the index of the * hold the next event in the EQs, and \a which will contain the index of the
* EQ from which the event was taken. * EQ from which the event was taken.
...@@ -372,6 +373,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock) ...@@ -372,6 +373,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
*/ */
int int
LNetEQPoll(struct lnet_handle_eq *eventqs, int neq, int timeout_ms, LNetEQPoll(struct lnet_handle_eq *eventqs, int neq, int timeout_ms,
int interruptible,
struct lnet_event *event, int *which) struct lnet_event *event, int *which)
{ {
int wait = 1; int wait = 1;
...@@ -412,7 +414,9 @@ LNetEQPoll(struct lnet_handle_eq *eventqs, int neq, int timeout_ms, ...@@ -412,7 +414,9 @@ LNetEQPoll(struct lnet_handle_eq *eventqs, int neq, int timeout_ms,
* 0 : don't want to wait anymore, but might have new event * 0 : don't want to wait anymore, but might have new event
* so need to call dequeue again * so need to call dequeue again
*/ */
wait = lnet_eq_wait_locked(&timeout_ms); wait = lnet_eq_wait_locked(&timeout_ms,
interruptible ? TASK_INTERRUPTIBLE
: TASK_NOLOAD);
if (wait < 0) /* no new event */ if (wait < 0) /* no new event */
break; break;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment