Commit 41f76d8b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client

Pull ceph fixes from Sage Weil:
 "There is an RBD fix for a crash due to the immutable bio changes, an
  error path fix, and a locking fix in the recent redirect support"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client:
  libceph: do not dereference a NULL bio pointer
  libceph: take map_sem for read in handle_reply()
  libceph: factor out logic from ceph_osdc_start_request()
  libceph: fix error handling in ceph_osdc_init()
parents 42be3f35 0ec1d15e
...@@ -840,9 +840,13 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, ...@@ -840,9 +840,13 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
if (!cursor->bvec_iter.bi_size) { if (!cursor->bvec_iter.bi_size) {
bio = bio->bi_next; bio = bio->bi_next;
cursor->bvec_iter = bio->bi_iter; cursor->bio = bio;
if (bio)
cursor->bvec_iter = bio->bi_iter;
else
memset(&cursor->bvec_iter, 0,
sizeof(cursor->bvec_iter));
} }
cursor->bio = bio;
if (!cursor->last_piece) { if (!cursor->last_piece) {
BUG_ON(!cursor->resid); BUG_ON(!cursor->resid);
......
...@@ -1426,6 +1426,40 @@ static void __send_queued(struct ceph_osd_client *osdc) ...@@ -1426,6 +1426,40 @@ static void __send_queued(struct ceph_osd_client *osdc)
__send_request(osdc, req); __send_request(osdc, req);
} }
/*
* Caller should hold map_sem for read and request_mutex.
*/
static int __ceph_osdc_start_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req,
bool nofail)
{
int rc;
__register_request(osdc, req);
req->r_sent = 0;
req->r_got_reply = 0;
rc = __map_request(osdc, req, 0);
if (rc < 0) {
if (nofail) {
dout("osdc_start_request failed map, "
" will retry %lld\n", req->r_tid);
rc = 0;
} else {
__unregister_request(osdc, req);
}
return rc;
}
if (req->r_osd == NULL) {
dout("send_request %p no up osds in pg\n", req);
ceph_monc_request_next_osdmap(&osdc->client->monc);
} else {
__send_queued(osdc);
}
return 0;
}
/* /*
* Timeout callback, called every N seconds when 1 or more osd * Timeout callback, called every N seconds when 1 or more osd
* requests has been active for more than N seconds. When this * requests has been active for more than N seconds. When this
...@@ -1653,6 +1687,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, ...@@ -1653,6 +1687,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
osdmap_epoch = ceph_decode_32(&p); osdmap_epoch = ceph_decode_32(&p);
/* lookup */ /* lookup */
down_read(&osdc->map_sem);
mutex_lock(&osdc->request_mutex); mutex_lock(&osdc->request_mutex);
req = __lookup_request(osdc, tid); req = __lookup_request(osdc, tid);
if (req == NULL) { if (req == NULL) {
...@@ -1709,7 +1744,6 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, ...@@ -1709,7 +1744,6 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
dout("redirect pool %lld\n", redir.oloc.pool); dout("redirect pool %lld\n", redir.oloc.pool);
__unregister_request(osdc, req); __unregister_request(osdc, req);
mutex_unlock(&osdc->request_mutex);
req->r_target_oloc = redir.oloc; /* struct */ req->r_target_oloc = redir.oloc; /* struct */
...@@ -1721,10 +1755,10 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, ...@@ -1721,10 +1755,10 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
* successfully. In the future we might want to follow * successfully. In the future we might want to follow
* original request's nofail setting here. * original request's nofail setting here.
*/ */
err = ceph_osdc_start_request(osdc, req, true); err = __ceph_osdc_start_request(osdc, req, true);
BUG_ON(err); BUG_ON(err);
goto done; goto out_unlock;
} }
already_completed = req->r_got_reply; already_completed = req->r_got_reply;
...@@ -1742,8 +1776,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, ...@@ -1742,8 +1776,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
req->r_got_reply = 1; req->r_got_reply = 1;
} else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) { } else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
dout("handle_reply tid %llu dup ack\n", tid); dout("handle_reply tid %llu dup ack\n", tid);
mutex_unlock(&osdc->request_mutex); goto out_unlock;
goto done;
} }
dout("handle_reply tid %llu flags %d\n", tid, flags); dout("handle_reply tid %llu flags %d\n", tid, flags);
...@@ -1758,6 +1791,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, ...@@ -1758,6 +1791,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
__unregister_request(osdc, req); __unregister_request(osdc, req);
mutex_unlock(&osdc->request_mutex); mutex_unlock(&osdc->request_mutex);
up_read(&osdc->map_sem);
if (!already_completed) { if (!already_completed) {
if (req->r_unsafe_callback && if (req->r_unsafe_callback &&
...@@ -1775,10 +1809,14 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, ...@@ -1775,10 +1809,14 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
complete_request(req); complete_request(req);
} }
done: out:
dout("req=%p req->r_linger=%d\n", req, req->r_linger); dout("req=%p req->r_linger=%d\n", req, req->r_linger);
ceph_osdc_put_request(req); ceph_osdc_put_request(req);
return; return;
out_unlock:
mutex_unlock(&osdc->request_mutex);
up_read(&osdc->map_sem);
goto out;
bad_put: bad_put:
req->r_result = -EIO; req->r_result = -EIO;
...@@ -1791,6 +1829,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg, ...@@ -1791,6 +1829,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
ceph_osdc_put_request(req); ceph_osdc_put_request(req);
bad_mutex: bad_mutex:
mutex_unlock(&osdc->request_mutex); mutex_unlock(&osdc->request_mutex);
up_read(&osdc->map_sem);
bad: bad:
pr_err("corrupt osd_op_reply got %d %d\n", pr_err("corrupt osd_op_reply got %d %d\n",
(int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len)); (int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
...@@ -2351,34 +2390,16 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc, ...@@ -2351,34 +2390,16 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req, struct ceph_osd_request *req,
bool nofail) bool nofail)
{ {
int rc = 0; int rc;
down_read(&osdc->map_sem); down_read(&osdc->map_sem);
mutex_lock(&osdc->request_mutex); mutex_lock(&osdc->request_mutex);
__register_request(osdc, req);
req->r_sent = 0; rc = __ceph_osdc_start_request(osdc, req, nofail);
req->r_got_reply = 0;
rc = __map_request(osdc, req, 0);
if (rc < 0) {
if (nofail) {
dout("osdc_start_request failed map, "
" will retry %lld\n", req->r_tid);
rc = 0;
} else {
__unregister_request(osdc, req);
}
goto out_unlock;
}
if (req->r_osd == NULL) {
dout("send_request %p no up osds in pg\n", req);
ceph_monc_request_next_osdmap(&osdc->client->monc);
} else {
__send_queued(osdc);
}
rc = 0;
out_unlock:
mutex_unlock(&osdc->request_mutex); mutex_unlock(&osdc->request_mutex);
up_read(&osdc->map_sem); up_read(&osdc->map_sem);
return rc; return rc;
} }
EXPORT_SYMBOL(ceph_osdc_start_request); EXPORT_SYMBOL(ceph_osdc_start_request);
...@@ -2504,9 +2525,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client) ...@@ -2504,9 +2525,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
err = -ENOMEM; err = -ENOMEM;
osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify"); osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
if (!osdc->notify_wq) if (!osdc->notify_wq)
goto out_msgpool; goto out_msgpool_reply;
return 0; return 0;
out_msgpool_reply:
ceph_msgpool_destroy(&osdc->msgpool_op_reply);
out_msgpool: out_msgpool:
ceph_msgpool_destroy(&osdc->msgpool_op); ceph_msgpool_destroy(&osdc->msgpool_op);
out_mempool: out_mempool:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment