Commit e6c3948d authored by Chao Yu's avatar Chao Yu Committed by Jaegeuk Kim

f2fs: compress: use more readable atomic_t type for {cic,dic}.ref

refcount_t type variable should never be less than one, so it's a
little bit hard to understand when we use it to indicate pending
compressed page count, let's change to use atomic_t for better
readability.
Signed-off-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 17d7648d
......@@ -677,7 +677,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
if (bio->bi_status || PageError(page))
dic->failed = true;
if (refcount_dec_not_one(&dic->ref))
if (atomic_dec_return(&dic->pending_pages))
return;
trace_f2fs_decompress_pages_start(dic->inode, dic->cluster_idx,
......@@ -746,7 +746,7 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
cops->destroy_decompress_ctx(dic);
out_free_dic:
if (verity)
refcount_set(&dic->ref, dic->nr_cpages);
atomic_set(&dic->pending_pages, dic->nr_cpages);
if (!verity)
f2fs_decompress_end_io(dic->rpages, dic->cluster_size,
ret, false);
......@@ -1161,7 +1161,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode;
refcount_set(&cic->ref, cc->nr_cpages);
atomic_set(&cic->pending_pages, cc->nr_cpages);
cic->rpages = f2fs_kzalloc(sbi, sizeof(struct page *) <<
cc->log_cluster_size, GFP_NOFS);
if (!cic->rpages)
......@@ -1296,7 +1296,7 @@ void f2fs_compress_write_end_io(struct bio *bio, struct page *page)
dec_page_count(sbi, F2FS_WB_DATA);
if (refcount_dec_not_one(&cic->ref))
if (atomic_dec_return(&cic->pending_pages))
return;
for (i = 0; i < cic->nr_rpages; i++) {
......@@ -1438,7 +1438,7 @@ struct decompress_io_ctx *f2fs_alloc_dic(struct compress_ctx *cc)
dic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
dic->inode = cc->inode;
refcount_set(&dic->ref, cc->nr_cpages);
atomic_set(&dic->pending_pages, cc->nr_cpages);
dic->cluster_idx = cc->cluster_idx;
dic->cluster_size = cc->cluster_size;
dic->log_cluster_size = cc->log_cluster_size;
......
......@@ -202,7 +202,7 @@ static void f2fs_verify_bio(struct bio *bio)
dic = (struct decompress_io_ctx *)page_private(page);
if (dic) {
if (refcount_dec_not_one(&dic->ref))
if (atomic_dec_return(&dic->pending_pages))
continue;
f2fs_verify_pages(dic->rpages,
dic->cluster_size);
......@@ -2272,8 +2272,8 @@ int f2fs_read_multi_pages(struct compress_ctx *cc, struct bio **bio_ret,
if (IS_ERR(bio)) {
ret = PTR_ERR(bio);
dic->failed = true;
if (refcount_sub_and_test(dic->nr_cpages - i,
&dic->ref)) {
if (!atomic_sub_return(dic->nr_cpages - i,
&dic->pending_pages)) {
f2fs_decompress_end_io(dic->rpages,
cc->cluster_size, true,
false);
......
......@@ -1376,7 +1376,7 @@ struct compress_io_ctx {
struct inode *inode; /* inode the context belong to */
struct page **rpages; /* pages store raw data in cluster */
unsigned int nr_rpages; /* total page number in rpages */
refcount_t ref; /* referrence count of raw page */
atomic_t pending_pages; /* in-flight compressed page count */
};
/* decompress io context for read IO path */
......@@ -1395,7 +1395,7 @@ struct decompress_io_ctx {
struct compress_data *cbuf; /* virtual mapped address on cpages */
size_t rlen; /* valid data length in rbuf */
size_t clen; /* valid data length in cbuf */
refcount_t ref; /* referrence count of compressed page */
atomic_t pending_pages; /* in-flight compressed page count */
bool failed; /* indicate IO error during decompression */
void *private; /* payload buffer for specified decompression algorithm */
void *private2; /* extra payload buffer */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment