Commit a68e564a authored by Xiubo Li's avatar Xiubo Li Committed by Ilya Dryomov

ceph: blocklist the kclient when receiving corrupted snap trace

When received corrupted snap trace we don't know what exactly has
happened in MDS side. And we shouldn't continue IOs and metadatas
access to MDS, which may corrupt or get incorrect contents.

This patch will just block all the further IO/MDS requests
immediately and then evict the kclient itself.

The reason why we still need to evict the kclient just after
blocking all the further IOs is that the MDS could revoke the caps
faster.

Link: https://tracker.ceph.com/issues/57686Signed-off-by: default avatarXiubo Li <xiubli@redhat.com>
Reviewed-by: default avatarVenky Shankar <vshankar@redhat.com>
Signed-off-by: default avatarIlya Dryomov <idryomov@gmail.com>
parent b38b17b6
...@@ -305,7 +305,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) ...@@ -305,7 +305,7 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
struct inode *inode = rreq->inode; struct inode *inode = rreq->inode;
struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_osd_request *req; struct ceph_osd_request *req = NULL;
struct ceph_vino vino = ceph_vino(inode); struct ceph_vino vino = ceph_vino(inode);
struct iov_iter iter; struct iov_iter iter;
struct page **pages; struct page **pages;
...@@ -313,6 +313,11 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq) ...@@ -313,6 +313,11 @@ static void ceph_netfs_issue_read(struct netfs_io_subrequest *subreq)
int err = 0; int err = 0;
u64 len = subreq->len; u64 len = subreq->len;
if (ceph_inode_is_shutdown(inode)) {
err = -EIO;
goto out;
}
if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq)) if (ceph_has_inline_data(ci) && ceph_netfs_issue_op_inline(subreq))
return; return;
...@@ -563,6 +568,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) ...@@ -563,6 +568,9 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
dout("writepage %p idx %lu\n", page, page->index); dout("writepage %p idx %lu\n", page, page->index);
if (ceph_inode_is_shutdown(inode))
return -EIO;
/* verify this is a writeable snap context */ /* verify this is a writeable snap context */
snapc = page_snap_context(page); snapc = page_snap_context(page);
if (!snapc) { if (!snapc) {
...@@ -1643,7 +1651,7 @@ int ceph_uninline_data(struct file *file) ...@@ -1643,7 +1651,7 @@ int ceph_uninline_data(struct file *file)
struct ceph_inode_info *ci = ceph_inode(inode); struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode); struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_osd_request *req = NULL; struct ceph_osd_request *req = NULL;
struct ceph_cap_flush *prealloc_cf; struct ceph_cap_flush *prealloc_cf = NULL;
struct folio *folio = NULL; struct folio *folio = NULL;
u64 inline_version = CEPH_INLINE_NONE; u64 inline_version = CEPH_INLINE_NONE;
struct page *pages[1]; struct page *pages[1];
...@@ -1657,6 +1665,11 @@ int ceph_uninline_data(struct file *file) ...@@ -1657,6 +1665,11 @@ int ceph_uninline_data(struct file *file)
dout("uninline_data %p %llx.%llx inline_version %llu\n", dout("uninline_data %p %llx.%llx inline_version %llu\n",
inode, ceph_vinop(inode), inline_version); inode, ceph_vinop(inode), inline_version);
if (ceph_inode_is_shutdown(inode)) {
err = -EIO;
goto out;
}
if (inline_version == CEPH_INLINE_NONE) if (inline_version == CEPH_INLINE_NONE)
return 0; return 0;
......
...@@ -4078,6 +4078,7 @@ void ceph_handle_caps(struct ceph_mds_session *session, ...@@ -4078,6 +4078,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
void *p, *end; void *p, *end;
struct cap_extra_info extra_info = {}; struct cap_extra_info extra_info = {};
bool queue_trunc; bool queue_trunc;
bool close_sessions = false;
dout("handle_caps from mds%d\n", session->s_mds); dout("handle_caps from mds%d\n", session->s_mds);
...@@ -4215,9 +4216,13 @@ void ceph_handle_caps(struct ceph_mds_session *session, ...@@ -4215,9 +4216,13 @@ void ceph_handle_caps(struct ceph_mds_session *session,
realm = NULL; realm = NULL;
if (snaptrace_len) { if (snaptrace_len) {
down_write(&mdsc->snap_rwsem); down_write(&mdsc->snap_rwsem);
ceph_update_snap_trace(mdsc, snaptrace, if (ceph_update_snap_trace(mdsc, snaptrace,
snaptrace + snaptrace_len, snaptrace + snaptrace_len,
false, &realm); false, &realm)) {
up_write(&mdsc->snap_rwsem);
close_sessions = true;
goto done;
}
downgrade_write(&mdsc->snap_rwsem); downgrade_write(&mdsc->snap_rwsem);
} else { } else {
down_read(&mdsc->snap_rwsem); down_read(&mdsc->snap_rwsem);
...@@ -4277,6 +4282,11 @@ void ceph_handle_caps(struct ceph_mds_session *session, ...@@ -4277,6 +4282,11 @@ void ceph_handle_caps(struct ceph_mds_session *session,
iput(inode); iput(inode);
out: out:
ceph_put_string(extra_info.pool_ns); ceph_put_string(extra_info.pool_ns);
/* Defer closing the sessions after s_mutex lock being released */
if (close_sessions)
ceph_mdsc_close_sessions(mdsc);
return; return;
flush_cap_releases: flush_cap_releases:
......
...@@ -2011,6 +2011,9 @@ static int ceph_zero_partial_object(struct inode *inode, ...@@ -2011,6 +2011,9 @@ static int ceph_zero_partial_object(struct inode *inode,
loff_t zero = 0; loff_t zero = 0;
int op; int op;
if (ceph_inode_is_shutdown(inode))
return -EIO;
if (!length) { if (!length) {
op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE; op = offset ? CEPH_OSD_OP_DELETE : CEPH_OSD_OP_TRUNCATE;
length = &zero; length = &zero;
......
...@@ -806,6 +806,9 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, ...@@ -806,6 +806,9 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
{ {
struct ceph_mds_session *s; struct ceph_mds_session *s;
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
return ERR_PTR(-EIO);
if (mds >= mdsc->mdsmap->possible_max_rank) if (mds >= mdsc->mdsmap->possible_max_rank)
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
...@@ -1478,6 +1481,9 @@ static int __open_session(struct ceph_mds_client *mdsc, ...@@ -1478,6 +1481,9 @@ static int __open_session(struct ceph_mds_client *mdsc,
int mstate; int mstate;
int mds = session->s_mds; int mds = session->s_mds;
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO)
return -EIO;
/* wait for mds to go active? */ /* wait for mds to go active? */
mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
dout("open_session to mds%d (%s)\n", mds, dout("open_session to mds%d (%s)\n", mds,
...@@ -2860,6 +2866,11 @@ static void __do_request(struct ceph_mds_client *mdsc, ...@@ -2860,6 +2866,11 @@ static void __do_request(struct ceph_mds_client *mdsc,
return; return;
} }
if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_FENCE_IO) {
dout("do_request metadata corrupted\n");
err = -EIO;
goto finish;
}
if (req->r_timeout && if (req->r_timeout &&
time_after_eq(jiffies, req->r_started + req->r_timeout)) { time_after_eq(jiffies, req->r_started + req->r_timeout)) {
dout("do_request timed out\n"); dout("do_request timed out\n");
...@@ -3245,6 +3256,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) ...@@ -3245,6 +3256,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
u64 tid; u64 tid;
int err, result; int err, result;
int mds = session->s_mds; int mds = session->s_mds;
bool close_sessions = false;
if (msg->front.iov_len < sizeof(*head)) { if (msg->front.iov_len < sizeof(*head)) {
pr_err("mdsc_handle_reply got corrupt (short) reply\n"); pr_err("mdsc_handle_reply got corrupt (short) reply\n");
...@@ -3351,10 +3363,17 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) ...@@ -3351,10 +3363,17 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
realm = NULL; realm = NULL;
if (rinfo->snapblob_len) { if (rinfo->snapblob_len) {
down_write(&mdsc->snap_rwsem); down_write(&mdsc->snap_rwsem);
ceph_update_snap_trace(mdsc, rinfo->snapblob, err = ceph_update_snap_trace(mdsc, rinfo->snapblob,
rinfo->snapblob + rinfo->snapblob_len, rinfo->snapblob + rinfo->snapblob_len,
le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP, le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
&realm); &realm);
if (err) {
up_write(&mdsc->snap_rwsem);
close_sessions = true;
if (err == -EIO)
ceph_msg_dump(msg);
goto out_err;
}
downgrade_write(&mdsc->snap_rwsem); downgrade_write(&mdsc->snap_rwsem);
} else { } else {
down_read(&mdsc->snap_rwsem); down_read(&mdsc->snap_rwsem);
...@@ -3412,6 +3431,10 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) ...@@ -3412,6 +3431,10 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
req->r_end_latency, err); req->r_end_latency, err);
out: out:
ceph_mdsc_put_request(req); ceph_mdsc_put_request(req);
/* Defer closing the sessions after s_mutex lock being released */
if (close_sessions)
ceph_mdsc_close_sessions(mdsc);
return; return;
} }
...@@ -5011,7 +5034,7 @@ static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped) ...@@ -5011,7 +5034,7 @@ static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
} }
/* /*
* called after sb is ro. * called after sb is ro or when metadata corrupted.
*/ */
void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
{ {
...@@ -5301,7 +5324,8 @@ static void mds_peer_reset(struct ceph_connection *con) ...@@ -5301,7 +5324,8 @@ static void mds_peer_reset(struct ceph_connection *con)
struct ceph_mds_client *mdsc = s->s_mdsc; struct ceph_mds_client *mdsc = s->s_mdsc;
pr_warn("mds%d closed our session\n", s->s_mds); pr_warn("mds%d closed our session\n", s->s_mds);
send_mds_reconnect(mdsc, s); if (READ_ONCE(mdsc->fsc->mount_state) != CEPH_MOUNT_FENCE_IO)
send_mds_reconnect(mdsc, s);
} }
static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg) static void mds_dispatch(struct ceph_connection *con, struct ceph_msg *msg)
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
#include <linux/ceph/ceph_debug.h> #include <linux/ceph/ceph_debug.h>
#include <linux/fs.h>
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/iversion.h> #include <linux/iversion.h>
...@@ -766,8 +767,10 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc, ...@@ -766,8 +767,10 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
struct ceph_snap_realm *realm; struct ceph_snap_realm *realm;
struct ceph_snap_realm *first_realm = NULL; struct ceph_snap_realm *first_realm = NULL;
struct ceph_snap_realm *realm_to_rebuild = NULL; struct ceph_snap_realm *realm_to_rebuild = NULL;
struct ceph_client *client = mdsc->fsc->client;
int rebuild_snapcs; int rebuild_snapcs;
int err = -ENOMEM; int err = -ENOMEM;
int ret;
LIST_HEAD(dirty_realms); LIST_HEAD(dirty_realms);
lockdep_assert_held_write(&mdsc->snap_rwsem); lockdep_assert_held_write(&mdsc->snap_rwsem);
...@@ -884,6 +887,27 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc, ...@@ -884,6 +887,27 @@ int ceph_update_snap_trace(struct ceph_mds_client *mdsc,
if (first_realm) if (first_realm)
ceph_put_snap_realm(mdsc, first_realm); ceph_put_snap_realm(mdsc, first_realm);
pr_err("%s error %d\n", __func__, err); pr_err("%s error %d\n", __func__, err);
/*
* When receiving a corrupted snap trace we don't know what
* exactly has happened in MDS side. And we shouldn't continue
* writing to OSD, which may corrupt the snapshot contents.
*
* Just try to blocklist this kclient and then this kclient
* must be remounted to continue after the corrupted metadata
* fixed in the MDS side.
*/
WRITE_ONCE(mdsc->fsc->mount_state, CEPH_MOUNT_FENCE_IO);
ret = ceph_monc_blocklist_add(&client->monc, &client->msgr.inst.addr);
if (ret)
pr_err("%s failed to blocklist %s: %d\n", __func__,
ceph_pr_addr(&client->msgr.inst.addr), ret);
WARN(1, "%s: %s%sdo remount to continue%s",
__func__, ret ? "" : ceph_pr_addr(&client->msgr.inst.addr),
ret ? "" : " was blocklisted, ",
err == -EIO ? " after corrupted snaptrace is fixed" : "");
return err; return err;
} }
...@@ -984,6 +1008,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, ...@@ -984,6 +1008,7 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
__le64 *split_inos = NULL, *split_realms = NULL; __le64 *split_inos = NULL, *split_realms = NULL;
int i; int i;
int locked_rwsem = 0; int locked_rwsem = 0;
bool close_sessions = false;
/* decode */ /* decode */
if (msg->front.iov_len < sizeof(*h)) if (msg->front.iov_len < sizeof(*h))
...@@ -1092,8 +1117,12 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, ...@@ -1092,8 +1117,12 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
* update using the provided snap trace. if we are deleting a * update using the provided snap trace. if we are deleting a
* snap, we can avoid queueing cap_snaps. * snap, we can avoid queueing cap_snaps.
*/ */
ceph_update_snap_trace(mdsc, p, e, if (ceph_update_snap_trace(mdsc, p, e,
op == CEPH_SNAP_OP_DESTROY, NULL); op == CEPH_SNAP_OP_DESTROY,
NULL)) {
close_sessions = true;
goto bad;
}
if (op == CEPH_SNAP_OP_SPLIT) if (op == CEPH_SNAP_OP_SPLIT)
/* we took a reference when we created the realm, above */ /* we took a reference when we created the realm, above */
...@@ -1112,6 +1141,9 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc, ...@@ -1112,6 +1141,9 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
out: out:
if (locked_rwsem) if (locked_rwsem)
up_write(&mdsc->snap_rwsem); up_write(&mdsc->snap_rwsem);
if (close_sessions)
ceph_mdsc_close_sessions(mdsc);
return; return;
} }
......
...@@ -108,6 +108,7 @@ enum { ...@@ -108,6 +108,7 @@ enum {
CEPH_MOUNT_UNMOUNTED, CEPH_MOUNT_UNMOUNTED,
CEPH_MOUNT_SHUTDOWN, CEPH_MOUNT_SHUTDOWN,
CEPH_MOUNT_RECOVER, CEPH_MOUNT_RECOVER,
CEPH_MOUNT_FENCE_IO,
}; };
#define CEPH_ASYNC_CREATE_CONFLICT_BITS 8 #define CEPH_ASYNC_CREATE_CONFLICT_BITS 8
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment