Commit 33c2865f authored by Damien Le Moal's avatar Damien Le Moal Committed by Mike Snitzer

dm zoned: fix metadata block ref counting

Since the ref field of struct dmz_mblock is always used with the
spinlock of struct dmz_metadata locked, there is no need to use an
atomic_t type. Change the type of the ref field to an unsigne
integer.

Fixes: 3b1a94c8 ("dm zoned: drive-managed zoned block device target")
Cc: stable@vger.kernel.org
Signed-off-by: default avatarDamien Le Moal <damien.lemoal@wdc.com>
Signed-off-by: default avatarMike Snitzer <snitzer@redhat.com>
parent d857ad75
...@@ -99,7 +99,7 @@ struct dmz_mblock { ...@@ -99,7 +99,7 @@ struct dmz_mblock {
struct rb_node node; struct rb_node node;
struct list_head link; struct list_head link;
sector_t no; sector_t no;
atomic_t ref; unsigned int ref;
unsigned long state; unsigned long state;
struct page *page; struct page *page;
void *data; void *data;
...@@ -296,7 +296,7 @@ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd, ...@@ -296,7 +296,7 @@ static struct dmz_mblock *dmz_alloc_mblock(struct dmz_metadata *zmd,
RB_CLEAR_NODE(&mblk->node); RB_CLEAR_NODE(&mblk->node);
INIT_LIST_HEAD(&mblk->link); INIT_LIST_HEAD(&mblk->link);
atomic_set(&mblk->ref, 0); mblk->ref = 0;
mblk->state = 0; mblk->state = 0;
mblk->no = mblk_no; mblk->no = mblk_no;
mblk->data = page_address(mblk->page); mblk->data = page_address(mblk->page);
...@@ -397,7 +397,7 @@ static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd, ...@@ -397,7 +397,7 @@ static struct dmz_mblock *dmz_fetch_mblock(struct dmz_metadata *zmd,
return NULL; return NULL;
spin_lock(&zmd->mblk_lock); spin_lock(&zmd->mblk_lock);
atomic_inc(&mblk->ref); mblk->ref++;
set_bit(DMZ_META_READING, &mblk->state); set_bit(DMZ_META_READING, &mblk->state);
dmz_insert_mblock(zmd, mblk); dmz_insert_mblock(zmd, mblk);
spin_unlock(&zmd->mblk_lock); spin_unlock(&zmd->mblk_lock);
...@@ -484,7 +484,8 @@ static void dmz_release_mblock(struct dmz_metadata *zmd, ...@@ -484,7 +484,8 @@ static void dmz_release_mblock(struct dmz_metadata *zmd,
spin_lock(&zmd->mblk_lock); spin_lock(&zmd->mblk_lock);
if (atomic_dec_and_test(&mblk->ref)) { mblk->ref--;
if (mblk->ref == 0) {
if (test_bit(DMZ_META_ERROR, &mblk->state)) { if (test_bit(DMZ_META_ERROR, &mblk->state)) {
rb_erase(&mblk->node, &zmd->mblk_rbtree); rb_erase(&mblk->node, &zmd->mblk_rbtree);
dmz_free_mblock(zmd, mblk); dmz_free_mblock(zmd, mblk);
...@@ -511,7 +512,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd, ...@@ -511,7 +512,8 @@ static struct dmz_mblock *dmz_get_mblock(struct dmz_metadata *zmd,
mblk = dmz_lookup_mblock(zmd, mblk_no); mblk = dmz_lookup_mblock(zmd, mblk_no);
if (mblk) { if (mblk) {
/* Cache hit: remove block from LRU list */ /* Cache hit: remove block from LRU list */
if (atomic_inc_return(&mblk->ref) == 1 && mblk->ref++;
if (mblk->ref == 1 &&
!test_bit(DMZ_META_DIRTY, &mblk->state)) !test_bit(DMZ_META_DIRTY, &mblk->state))
list_del_init(&mblk->link); list_del_init(&mblk->link);
} }
...@@ -753,7 +755,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd) ...@@ -753,7 +755,7 @@ int dmz_flush_metadata(struct dmz_metadata *zmd)
spin_lock(&zmd->mblk_lock); spin_lock(&zmd->mblk_lock);
clear_bit(DMZ_META_DIRTY, &mblk->state); clear_bit(DMZ_META_DIRTY, &mblk->state);
if (atomic_read(&mblk->ref) == 0) if (mblk->ref == 0)
list_add_tail(&mblk->link, &zmd->mblk_lru_list); list_add_tail(&mblk->link, &zmd->mblk_lru_list);
spin_unlock(&zmd->mblk_lock); spin_unlock(&zmd->mblk_lock);
} }
...@@ -2308,7 +2310,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd) ...@@ -2308,7 +2310,7 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
mblk = list_first_entry(&zmd->mblk_dirty_list, mblk = list_first_entry(&zmd->mblk_dirty_list,
struct dmz_mblock, link); struct dmz_mblock, link);
dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)", dmz_dev_warn(zmd->dev, "mblock %llu still in dirty list (ref %u)",
(u64)mblk->no, atomic_read(&mblk->ref)); (u64)mblk->no, mblk->ref);
list_del_init(&mblk->link); list_del_init(&mblk->link);
rb_erase(&mblk->node, &zmd->mblk_rbtree); rb_erase(&mblk->node, &zmd->mblk_rbtree);
dmz_free_mblock(zmd, mblk); dmz_free_mblock(zmd, mblk);
...@@ -2326,8 +2328,8 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd) ...@@ -2326,8 +2328,8 @@ static void dmz_cleanup_metadata(struct dmz_metadata *zmd)
root = &zmd->mblk_rbtree; root = &zmd->mblk_rbtree;
rbtree_postorder_for_each_entry_safe(mblk, next, root, node) { rbtree_postorder_for_each_entry_safe(mblk, next, root, node) {
dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree", dmz_dev_warn(zmd->dev, "mblock %llu ref %u still in rbtree",
(u64)mblk->no, atomic_read(&mblk->ref)); (u64)mblk->no, mblk->ref);
atomic_set(&mblk->ref, 0); mblk->ref = 0;
dmz_free_mblock(zmd, mblk); dmz_free_mblock(zmd, mblk);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment