Commit 9a278a79 authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Philipp Reisner

drbd: allow read requests to be retried after force-detach

Sometimes, a lower level block device turns into a tar-pit,
not completing requests at all, not even doing error completion.

We can force-detach from such a tar-pit block device,
either by disk-timeout, or by drbdadm detach --force.

Queueing for retry only from the request destruction path (kref hit 0)
makes it impossible to retry affected read requests from the peer,
until the local IO completion happened, as the locally submitted
bio holds a reference on the drbd request object.

If we can only complete READs when the local completion finally
happens, we would not need to force-detach in the first place.

Instead, queue for retry where we otherwise had done the error completion.
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 934722a2
...@@ -2216,12 +2216,25 @@ static void do_retry(struct work_struct *ws) ...@@ -2216,12 +2216,25 @@ static void do_retry(struct work_struct *ws)
struct drbd_conf *mdev = req->w.mdev; struct drbd_conf *mdev = req->w.mdev;
struct bio *bio = req->master_bio; struct bio *bio = req->master_bio;
unsigned long start_time = req->start_time; unsigned long start_time = req->start_time;
bool expected;
/* We have exclusive access to this request object.
* If it had not been RQ_POSTPONED, the code path which queued expected =
* it here would have completed and freed it already. expect(atomic_read(&req->completion_ref) == 0) &&
expect(req->rq_state & RQ_POSTPONED) &&
expect((req->rq_state & RQ_LOCAL_PENDING) == 0 ||
(req->rq_state & RQ_LOCAL_ABORTED) != 0);
if (!expected)
dev_err(DEV, "req=%p completion_ref=%d rq_state=%x\n",
req, atomic_read(&req->completion_ref),
req->rq_state);
/* We still need to put one kref associated with the
* "completion_ref" going zero in the code path that queued it
* here. The request object may still be referenced by a
* frozen local req->private_bio, in case we force-detached.
*/ */
mempool_free(req, drbd_request_mempool); kref_put(&req->kref, drbd_req_destroy);
/* A single suspended or otherwise blocking device may stall /* A single suspended or otherwise blocking device may stall
* all others as well. Fortunately, this code path is to * all others as well. Fortunately, this code path is to
......
...@@ -92,7 +92,7 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev, ...@@ -92,7 +92,7 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
return req; return req;
} }
static void drbd_req_destroy(struct kref *kref) void drbd_req_destroy(struct kref *kref)
{ {
struct drbd_request *req = container_of(kref, struct drbd_request, kref); struct drbd_request *req = container_of(kref, struct drbd_request, kref);
struct drbd_conf *mdev = req->w.mdev; struct drbd_conf *mdev = req->w.mdev;
...@@ -152,10 +152,7 @@ static void drbd_req_destroy(struct kref *kref) ...@@ -152,10 +152,7 @@ static void drbd_req_destroy(struct kref *kref)
} }
} }
if (s & RQ_POSTPONED) mempool_free(req, drbd_request_mempool);
drbd_restart_request(req);
else
mempool_free(req, drbd_request_mempool);
} }
static void wake_all_senders(struct drbd_tconn *tconn) { static void wake_all_senders(struct drbd_tconn *tconn) {
...@@ -292,10 +289,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) ...@@ -292,10 +289,6 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
m->error = ok ? 0 : (error ?: -EIO); m->error = ok ? 0 : (error ?: -EIO);
m->bio = req->master_bio; m->bio = req->master_bio;
req->master_bio = NULL; req->master_bio = NULL;
} else {
/* Assert that this will be drbd_req_destroy()ed
* with this very invokation. */
D_ASSERT(atomic_read(&req->kref.refcount) == 1);
} }
} }
...@@ -320,6 +313,14 @@ static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_ ...@@ -320,6 +313,14 @@ static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_
/* else */ /* else */
drbd_req_complete(req, m); drbd_req_complete(req, m);
if (req->rq_state & RQ_POSTPONED) {
/* don't destroy the req object just yet,
* but queue it for retry */
drbd_restart_request(req);
return 0;
}
return 1; return 1;
} }
......
...@@ -267,6 +267,7 @@ struct bio_and_error { ...@@ -267,6 +267,7 @@ struct bio_and_error {
int error; int error;
}; };
extern void drbd_req_destroy(struct kref *kref);
extern void _req_may_be_done(struct drbd_request *req, extern void _req_may_be_done(struct drbd_request *req,
struct bio_and_error *m); struct bio_and_error *m);
extern int __req_mod(struct drbd_request *req, enum drbd_req_event what, extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment