Commit 90d3eaaf authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'ceph-for-6.9-rc4' of https://github.com/ceph/ceph-client

Pull ceph fixes from Ilya Dryomov:
 "Two CephFS fixes marked for stable and a MAINTAINERS update"

* tag 'ceph-for-6.9-rc4' of https://github.com/ceph/ceph-client:
  MAINTAINERS: remove myself as a Reviewer for Ceph
  ceph: switch to use cap_delay_lock for the unlink delay list
  ceph: redirty page before returning AOP_WRITEPAGE_ACTIVATE
parents d5cf50da d3e04693
...@@ -4869,7 +4869,6 @@ F: drivers/power/supply/cw2015_battery.c ...@@ -4869,7 +4869,6 @@ F: drivers/power/supply/cw2015_battery.c
CEPH COMMON CODE (LIBCEPH) CEPH COMMON CODE (LIBCEPH)
M: Ilya Dryomov <idryomov@gmail.com> M: Ilya Dryomov <idryomov@gmail.com>
M: Xiubo Li <xiubli@redhat.com> M: Xiubo Li <xiubli@redhat.com>
R: Jeff Layton <jlayton@kernel.org>
L: ceph-devel@vger.kernel.org L: ceph-devel@vger.kernel.org
S: Supported S: Supported
W: http://ceph.com/ W: http://ceph.com/
...@@ -4881,7 +4880,6 @@ F: net/ceph/ ...@@ -4881,7 +4880,6 @@ F: net/ceph/
CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH) CEPH DISTRIBUTED FILE SYSTEM CLIENT (CEPH)
M: Xiubo Li <xiubli@redhat.com> M: Xiubo Li <xiubli@redhat.com>
M: Ilya Dryomov <idryomov@gmail.com> M: Ilya Dryomov <idryomov@gmail.com>
R: Jeff Layton <jlayton@kernel.org>
L: ceph-devel@vger.kernel.org L: ceph-devel@vger.kernel.org
S: Supported S: Supported
W: http://ceph.com/ W: http://ceph.com/
......
...@@ -795,8 +795,10 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc) ...@@ -795,8 +795,10 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
ihold(inode); ihold(inode);
if (wbc->sync_mode == WB_SYNC_NONE && if (wbc->sync_mode == WB_SYNC_NONE &&
ceph_inode_to_fs_client(inode)->write_congested) ceph_inode_to_fs_client(inode)->write_congested) {
redirty_page_for_writepage(wbc, page);
return AOP_WRITEPAGE_ACTIVATE; return AOP_WRITEPAGE_ACTIVATE;
}
wait_on_page_fscache(page); wait_on_page_fscache(page);
......
...@@ -4783,13 +4783,13 @@ int ceph_drop_caps_for_unlink(struct inode *inode) ...@@ -4783,13 +4783,13 @@ int ceph_drop_caps_for_unlink(struct inode *inode)
doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode, doutc(mdsc->fsc->client, "%p %llx.%llx\n", inode,
ceph_vinop(inode)); ceph_vinop(inode));
spin_lock(&mdsc->cap_unlink_delay_lock); spin_lock(&mdsc->cap_delay_lock);
ci->i_ceph_flags |= CEPH_I_FLUSH; ci->i_ceph_flags |= CEPH_I_FLUSH;
if (!list_empty(&ci->i_cap_delay_list)) if (!list_empty(&ci->i_cap_delay_list))
list_del_init(&ci->i_cap_delay_list); list_del_init(&ci->i_cap_delay_list);
list_add_tail(&ci->i_cap_delay_list, list_add_tail(&ci->i_cap_delay_list,
&mdsc->cap_unlink_delay_list); &mdsc->cap_unlink_delay_list);
spin_unlock(&mdsc->cap_unlink_delay_lock); spin_unlock(&mdsc->cap_delay_lock);
/* /*
* Fire the work immediately, because the MDS maybe * Fire the work immediately, because the MDS maybe
......
...@@ -2504,7 +2504,7 @@ static void ceph_cap_unlink_work(struct work_struct *work) ...@@ -2504,7 +2504,7 @@ static void ceph_cap_unlink_work(struct work_struct *work)
struct ceph_client *cl = mdsc->fsc->client; struct ceph_client *cl = mdsc->fsc->client;
doutc(cl, "begin\n"); doutc(cl, "begin\n");
spin_lock(&mdsc->cap_unlink_delay_lock); spin_lock(&mdsc->cap_delay_lock);
while (!list_empty(&mdsc->cap_unlink_delay_list)) { while (!list_empty(&mdsc->cap_unlink_delay_list)) {
struct ceph_inode_info *ci; struct ceph_inode_info *ci;
struct inode *inode; struct inode *inode;
...@@ -2516,15 +2516,15 @@ static void ceph_cap_unlink_work(struct work_struct *work) ...@@ -2516,15 +2516,15 @@ static void ceph_cap_unlink_work(struct work_struct *work)
inode = igrab(&ci->netfs.inode); inode = igrab(&ci->netfs.inode);
if (inode) { if (inode) {
spin_unlock(&mdsc->cap_unlink_delay_lock); spin_unlock(&mdsc->cap_delay_lock);
doutc(cl, "on %p %llx.%llx\n", inode, doutc(cl, "on %p %llx.%llx\n", inode,
ceph_vinop(inode)); ceph_vinop(inode));
ceph_check_caps(ci, CHECK_CAPS_FLUSH); ceph_check_caps(ci, CHECK_CAPS_FLUSH);
iput(inode); iput(inode);
spin_lock(&mdsc->cap_unlink_delay_lock); spin_lock(&mdsc->cap_delay_lock);
} }
} }
spin_unlock(&mdsc->cap_unlink_delay_lock); spin_unlock(&mdsc->cap_delay_lock);
doutc(cl, "done\n"); doutc(cl, "done\n");
} }
...@@ -5404,7 +5404,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc) ...@@ -5404,7 +5404,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
INIT_LIST_HEAD(&mdsc->cap_wait_list); INIT_LIST_HEAD(&mdsc->cap_wait_list);
spin_lock_init(&mdsc->cap_delay_lock); spin_lock_init(&mdsc->cap_delay_lock);
INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list); INIT_LIST_HEAD(&mdsc->cap_unlink_delay_list);
spin_lock_init(&mdsc->cap_unlink_delay_lock);
INIT_LIST_HEAD(&mdsc->snap_flush_list); INIT_LIST_HEAD(&mdsc->snap_flush_list);
spin_lock_init(&mdsc->snap_flush_lock); spin_lock_init(&mdsc->snap_flush_lock);
mdsc->last_cap_flush_tid = 1; mdsc->last_cap_flush_tid = 1;
......
...@@ -461,9 +461,8 @@ struct ceph_mds_client { ...@@ -461,9 +461,8 @@ struct ceph_mds_client {
struct delayed_work delayed_work; /* delayed work */ struct delayed_work delayed_work; /* delayed work */
unsigned long last_renew_caps; /* last time we renewed our caps */ unsigned long last_renew_caps; /* last time we renewed our caps */
struct list_head cap_delay_list; /* caps with delayed release */ struct list_head cap_delay_list; /* caps with delayed release */
spinlock_t cap_delay_lock; /* protects cap_delay_list */
struct list_head cap_unlink_delay_list; /* caps with delayed release for unlink */ struct list_head cap_unlink_delay_list; /* caps with delayed release for unlink */
spinlock_t cap_unlink_delay_lock; /* protects cap_unlink_delay_list */ spinlock_t cap_delay_lock; /* protects cap_delay_list and cap_unlink_delay_list */
struct list_head snap_flush_list; /* cap_snaps ready to flush */ struct list_head snap_flush_list; /* cap_snaps ready to flush */
spinlock_t snap_flush_lock; spinlock_t snap_flush_lock;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment