Commit bd383b8e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ceph-for-5.18-rc5' of https://github.com/ceph/ceph-client

Pull ceph client fixes from Ilya Dryomov:
 "A fix for a NULL dereference that turns out to be easily triggerable
  by fsync (marked for stable) and a false positive WARN and snap_rwsem
  locking fixups"

* tag 'ceph-for-5.18-rc5' of https://github.com/ceph/ceph-client:
  ceph: fix possible NULL pointer dereference for req->r_session
  ceph: remove incorrect session state check
  ceph: get snap_rwsem read lock in handle_cap_export for ceph_add_cap
  libceph: disambiguate cluster/pool full log message
parents 3e71713c 7acae618
...@@ -2274,6 +2274,8 @@ static int unsafe_request_wait(struct inode *inode) ...@@ -2274,6 +2274,8 @@ static int unsafe_request_wait(struct inode *inode)
list_for_each_entry(req, &ci->i_unsafe_dirops, list_for_each_entry(req, &ci->i_unsafe_dirops,
r_unsafe_dir_item) { r_unsafe_dir_item) {
s = req->r_session; s = req->r_session;
if (!s)
continue;
if (unlikely(s->s_mds >= max_sessions)) { if (unlikely(s->s_mds >= max_sessions)) {
spin_unlock(&ci->i_unsafe_lock); spin_unlock(&ci->i_unsafe_lock);
for (i = 0; i < max_sessions; i++) { for (i = 0; i < max_sessions; i++) {
...@@ -2294,6 +2296,8 @@ static int unsafe_request_wait(struct inode *inode) ...@@ -2294,6 +2296,8 @@ static int unsafe_request_wait(struct inode *inode)
list_for_each_entry(req, &ci->i_unsafe_iops, list_for_each_entry(req, &ci->i_unsafe_iops,
r_unsafe_target_item) { r_unsafe_target_item) {
s = req->r_session; s = req->r_session;
if (!s)
continue;
if (unlikely(s->s_mds >= max_sessions)) { if (unlikely(s->s_mds >= max_sessions)) {
spin_unlock(&ci->i_unsafe_lock); spin_unlock(&ci->i_unsafe_lock);
for (i = 0; i < max_sessions; i++) { for (i = 0; i < max_sessions; i++) {
...@@ -3870,6 +3874,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, ...@@ -3870,6 +3874,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n", dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
inode, ci, mds, mseq, target); inode, ci, mds, mseq, target);
retry: retry:
down_read(&mdsc->snap_rwsem);
spin_lock(&ci->i_ceph_lock); spin_lock(&ci->i_ceph_lock);
cap = __get_cap_for_mds(ci, mds); cap = __get_cap_for_mds(ci, mds);
if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id)) if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
...@@ -3933,6 +3938,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, ...@@ -3933,6 +3938,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
} }
spin_unlock(&ci->i_ceph_lock); spin_unlock(&ci->i_ceph_lock);
up_read(&mdsc->snap_rwsem);
mutex_unlock(&session->s_mutex); mutex_unlock(&session->s_mutex);
/* open target session */ /* open target session */
...@@ -3958,6 +3964,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex, ...@@ -3958,6 +3964,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
out_unlock: out_unlock:
spin_unlock(&ci->i_ceph_lock); spin_unlock(&ci->i_ceph_lock);
up_read(&mdsc->snap_rwsem);
mutex_unlock(&session->s_mutex); mutex_unlock(&session->s_mutex);
if (tsession) { if (tsession) {
mutex_unlock(&tsession->s_mutex); mutex_unlock(&tsession->s_mutex);
......
...@@ -4434,8 +4434,6 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc) ...@@ -4434,8 +4434,6 @@ static void maybe_recover_session(struct ceph_mds_client *mdsc)
bool check_session_state(struct ceph_mds_session *s) bool check_session_state(struct ceph_mds_session *s)
{ {
struct ceph_fs_client *fsc = s->s_mdsc->fsc;
switch (s->s_state) { switch (s->s_state) {
case CEPH_MDS_SESSION_OPEN: case CEPH_MDS_SESSION_OPEN:
if (s->s_ttl && time_after(jiffies, s->s_ttl)) { if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
...@@ -4444,10 +4442,6 @@ bool check_session_state(struct ceph_mds_session *s) ...@@ -4444,10 +4442,6 @@ bool check_session_state(struct ceph_mds_session *s)
} }
break; break;
case CEPH_MDS_SESSION_CLOSING: case CEPH_MDS_SESSION_CLOSING:
/* Should never reach this when not force unmounting */
WARN_ON_ONCE(s->s_ttl &&
READ_ONCE(fsc->mount_state) != CEPH_MOUNT_SHUTDOWN);
fallthrough;
case CEPH_MDS_SESSION_NEW: case CEPH_MDS_SESSION_NEW:
case CEPH_MDS_SESSION_RESTARTING: case CEPH_MDS_SESSION_RESTARTING:
case CEPH_MDS_SESSION_CLOSED: case CEPH_MDS_SESSION_CLOSED:
......
...@@ -2385,7 +2385,11 @@ static void __submit_request(struct ceph_osd_request *req, bool wrlocked) ...@@ -2385,7 +2385,11 @@ static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) { if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
err = -ENOSPC; err = -ENOSPC;
} else { } else {
pr_warn_ratelimited("FULL or reached pool quota\n"); if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL))
pr_warn_ratelimited("cluster is full (osdmap FULL)\n");
else
pr_warn_ratelimited("pool %lld is full or reached quota\n",
req->r_t.base_oloc.pool);
req->r_t.paused = true; req->r_t.paused = true;
maybe_request_map(osdc); maybe_request_map(osdc);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment