Commit 3271d7eb authored by Fengnan Chang's avatar Fengnan Chang Committed by Jaegeuk Kim

f2fs: compress: reduce one page array alloc and free when write compressed page

Don't alloc new page pointers array to replace old, just use old, introduce
valid_nr_cpages to indicate valid number of page pointers in array, try to
reduce one page array alloc and free when write compress page.
Signed-off-by: default avatarFengnan Chang <changfengnan@vivo.com>
Reviewed-by: default avatarChao Yu <chao@kernel.org>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 8ab77458
...@@ -154,6 +154,7 @@ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse) ...@@ -154,6 +154,7 @@ void f2fs_destroy_compress_ctx(struct compress_ctx *cc, bool reuse)
cc->rpages = NULL; cc->rpages = NULL;
cc->nr_rpages = 0; cc->nr_rpages = 0;
cc->nr_cpages = 0; cc->nr_cpages = 0;
cc->valid_nr_cpages = 0;
if (!reuse) if (!reuse)
cc->cluster_idx = NULL_CLUSTER; cc->cluster_idx = NULL_CLUSTER;
} }
...@@ -620,7 +621,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc) ...@@ -620,7 +621,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
const struct f2fs_compress_ops *cops = const struct f2fs_compress_ops *cops =
f2fs_cops[fi->i_compress_algorithm]; f2fs_cops[fi->i_compress_algorithm];
unsigned int max_len, new_nr_cpages; unsigned int max_len, new_nr_cpages;
struct page **new_cpages;
u32 chksum = 0; u32 chksum = 0;
int i, ret; int i, ret;
...@@ -635,6 +635,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc) ...@@ -635,6 +635,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
max_len = COMPRESS_HEADER_SIZE + cc->clen; max_len = COMPRESS_HEADER_SIZE + cc->clen;
cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE); cc->nr_cpages = DIV_ROUND_UP(max_len, PAGE_SIZE);
cc->valid_nr_cpages = cc->nr_cpages;
cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages); cc->cpages = page_array_alloc(cc->inode, cc->nr_cpages);
if (!cc->cpages) { if (!cc->cpages) {
...@@ -685,13 +686,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc) ...@@ -685,13 +686,6 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE); new_nr_cpages = DIV_ROUND_UP(cc->clen + COMPRESS_HEADER_SIZE, PAGE_SIZE);
/* Now we're going to cut unnecessary tail pages */
new_cpages = page_array_alloc(cc->inode, new_nr_cpages);
if (!new_cpages) {
ret = -ENOMEM;
goto out_vunmap_cbuf;
}
/* zero out any unused part of the last page */ /* zero out any unused part of the last page */
memset(&cc->cbuf->cdata[cc->clen], 0, memset(&cc->cbuf->cdata[cc->clen], 0,
(new_nr_cpages * PAGE_SIZE) - (new_nr_cpages * PAGE_SIZE) -
...@@ -701,10 +695,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc) ...@@ -701,10 +695,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
vm_unmap_ram(cc->rbuf, cc->cluster_size); vm_unmap_ram(cc->rbuf, cc->cluster_size);
for (i = 0; i < cc->nr_cpages; i++) { for (i = 0; i < cc->nr_cpages; i++) {
if (i < new_nr_cpages) { if (i < new_nr_cpages)
new_cpages[i] = cc->cpages[i];
continue; continue;
}
f2fs_compress_free_page(cc->cpages[i]); f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL; cc->cpages[i] = NULL;
} }
...@@ -712,9 +704,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc) ...@@ -712,9 +704,7 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
if (cops->destroy_compress_ctx) if (cops->destroy_compress_ctx)
cops->destroy_compress_ctx(cc); cops->destroy_compress_ctx(cc);
page_array_free(cc->inode, cc->cpages, cc->nr_cpages); cc->valid_nr_cpages = new_nr_cpages;
cc->cpages = new_cpages;
cc->nr_cpages = new_nr_cpages;
trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx, trace_f2fs_compress_pages_end(cc->inode, cc->cluster_idx,
cc->clen, ret); cc->clen, ret);
...@@ -1308,14 +1298,14 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, ...@@ -1308,14 +1298,14 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
cic->magic = F2FS_COMPRESSED_PAGE_MAGIC; cic->magic = F2FS_COMPRESSED_PAGE_MAGIC;
cic->inode = inode; cic->inode = inode;
atomic_set(&cic->pending_pages, cc->nr_cpages); atomic_set(&cic->pending_pages, cc->valid_nr_cpages);
cic->rpages = page_array_alloc(cc->inode, cc->cluster_size); cic->rpages = page_array_alloc(cc->inode, cc->cluster_size);
if (!cic->rpages) if (!cic->rpages)
goto out_put_cic; goto out_put_cic;
cic->nr_rpages = cc->cluster_size; cic->nr_rpages = cc->cluster_size;
for (i = 0; i < cc->nr_cpages; i++) { for (i = 0; i < cc->valid_nr_cpages; i++) {
f2fs_set_compressed_page(cc->cpages[i], inode, f2fs_set_compressed_page(cc->cpages[i], inode,
cc->rpages[i + 1]->index, cic); cc->rpages[i + 1]->index, cic);
fio.compressed_page = cc->cpages[i]; fio.compressed_page = cc->cpages[i];
...@@ -1360,7 +1350,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, ...@@ -1360,7 +1350,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr)) if (fio.compr_blocks && __is_valid_data_blkaddr(blkaddr))
fio.compr_blocks++; fio.compr_blocks++;
if (i > cc->nr_cpages) { if (i > cc->valid_nr_cpages) {
if (__is_valid_data_blkaddr(blkaddr)) { if (__is_valid_data_blkaddr(blkaddr)) {
f2fs_invalidate_blocks(sbi, blkaddr); f2fs_invalidate_blocks(sbi, blkaddr);
f2fs_update_data_blkaddr(&dn, NEW_ADDR); f2fs_update_data_blkaddr(&dn, NEW_ADDR);
...@@ -1385,8 +1375,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, ...@@ -1385,8 +1375,8 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
if (fio.compr_blocks) if (fio.compr_blocks)
f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false); f2fs_i_compr_blocks_update(inode, fio.compr_blocks - 1, false);
f2fs_i_compr_blocks_update(inode, cc->nr_cpages, true); f2fs_i_compr_blocks_update(inode, cc->valid_nr_cpages, true);
add_compr_block_stat(inode, cc->nr_cpages); add_compr_block_stat(inode, cc->valid_nr_cpages);
set_inode_flag(cc->inode, FI_APPEND_WRITE); set_inode_flag(cc->inode, FI_APPEND_WRITE);
if (cc->cluster_idx == 0) if (cc->cluster_idx == 0)
...@@ -1424,9 +1414,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc, ...@@ -1424,9 +1414,7 @@ static int f2fs_write_compressed_pages(struct compress_ctx *cc,
else else
f2fs_unlock_op(sbi); f2fs_unlock_op(sbi);
out_free: out_free:
for (i = 0; i < cc->nr_cpages; i++) { for (i = 0; i < cc->valid_nr_cpages; i++) {
if (!cc->cpages[i])
continue;
f2fs_compress_free_page(cc->cpages[i]); f2fs_compress_free_page(cc->cpages[i]);
cc->cpages[i] = NULL; cc->cpages[i] = NULL;
} }
......
...@@ -2987,6 +2987,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping, ...@@ -2987,6 +2987,7 @@ static int f2fs_write_cache_pages(struct address_space *mapping,
.rpages = NULL, .rpages = NULL,
.nr_rpages = 0, .nr_rpages = 0,
.cpages = NULL, .cpages = NULL,
.valid_nr_cpages = 0,
.rbuf = NULL, .rbuf = NULL,
.cbuf = NULL, .cbuf = NULL,
.rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size, .rlen = PAGE_SIZE * F2FS_I(inode)->i_cluster_size,
......
...@@ -1486,6 +1486,7 @@ struct compress_ctx { ...@@ -1486,6 +1486,7 @@ struct compress_ctx {
unsigned int nr_rpages; /* total page number in rpages */ unsigned int nr_rpages; /* total page number in rpages */
struct page **cpages; /* pages store compressed data in cluster */ struct page **cpages; /* pages store compressed data in cluster */
unsigned int nr_cpages; /* total page number in cpages */ unsigned int nr_cpages; /* total page number in cpages */
unsigned int valid_nr_cpages; /* valid page number in cpages */
void *rbuf; /* virtual mapped address on rpages */ void *rbuf; /* virtual mapped address on rpages */
struct compress_data *cbuf; /* virtual mapped address on cpages */ struct compress_data *cbuf; /* virtual mapped address on cpages */
size_t rlen; /* valid data length in rbuf */ size_t rlen; /* valid data length in rbuf */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment