Commit 6fcaebac authored by Daeho Jeong's avatar Daeho Jeong Committed by Jaegeuk Kim

f2fs: change virtual mapping way for compression pages

By profiling f2fs compression works, I've found vmap() callings have
unexpected hikes in the execution time in our test environment and
those are bottlenecks of f2fs decompression path. Changing these with
vm_map_ram(), we can enhance f2fs decompression speed pretty much.

[Verification]
Android Pixel 3(ARM64, 6GB RAM, 128GB UFS)
Turned on only 0-3 little cores(at 1.785GHz)

dd if=/dev/zero of=dummy bs=1m count=1000
echo 3 > /proc/sys/vm/drop_caches
dd if=dummy of=/dev/zero bs=512k

- w/o compression -
1048576000 bytes (0.9 G) copied, 2.082554 s, 480 M/s
1048576000 bytes (0.9 G) copied, 2.081634 s, 480 M/s
1048576000 bytes (0.9 G) copied, 2.090861 s, 478 M/s

- before patch -
1048576000 bytes (0.9 G) copied, 7.407527 s, 135 M/s
1048576000 bytes (0.9 G) copied, 7.283734 s, 137 M/s
1048576000 bytes (0.9 G) copied, 7.291508 s, 137 M/s

- after patch -
1048576000 bytes (0.9 G) copied, 1.998959 s, 500 M/s
1048576000 bytes (0.9 G) copied, 1.987554 s, 503 M/s
1048576000 bytes (0.9 G) copied, 1.986380 s, 503 M/s
Signed-off-by: default avatarDaeho Jeong <daehojeong@google.com>
Reviewed-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarJaegeuk Kim <jaegeuk@kernel.org>
parent 78134d03
......@@ -555,6 +555,22 @@ static void f2fs_compress_free_page(struct page *page)
mempool_free(page, compress_page_pool);
}
#define MAX_VMAP_RETRIES 3
static void *f2fs_vmap(struct page **pages, unsigned int count)
{
int i;
void *buf = NULL;
for (i = 0; i < MAX_VMAP_RETRIES; i++) {
buf = vm_map_ram(pages, count, -1);
if (buf)
break;
vm_unmap_aliases();
}
return buf;
}
static int f2fs_compress_pages(struct compress_ctx *cc)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(cc->inode);
......@@ -591,13 +607,13 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
}
}
cc->rbuf = vmap(cc->rpages, cc->cluster_size, VM_MAP, PAGE_KERNEL_RO);
cc->rbuf = f2fs_vmap(cc->rpages, cc->cluster_size);
if (!cc->rbuf) {
ret = -ENOMEM;
goto out_free_cpages;
}
cc->cbuf = vmap(cc->cpages, cc->nr_cpages, VM_MAP, PAGE_KERNEL);
cc->cbuf = f2fs_vmap(cc->cpages, cc->nr_cpages);
if (!cc->cbuf) {
ret = -ENOMEM;
goto out_vunmap_rbuf;
......@@ -625,8 +641,8 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
memset(&cc->cbuf->cdata[cc->clen], 0,
(nr_cpages * PAGE_SIZE) - (cc->clen + COMPRESS_HEADER_SIZE));
vunmap(cc->cbuf);
vunmap(cc->rbuf);
vm_unmap_ram(cc->cbuf, cc->nr_cpages);
vm_unmap_ram(cc->rbuf, cc->cluster_size);
for (i = nr_cpages; i < cc->nr_cpages; i++) {
f2fs_compress_free_page(cc->cpages[i]);
......@@ -643,9 +659,9 @@ static int f2fs_compress_pages(struct compress_ctx *cc)
return 0;
out_vunmap_cbuf:
vunmap(cc->cbuf);
vm_unmap_ram(cc->cbuf, cc->nr_cpages);
out_vunmap_rbuf:
vunmap(cc->rbuf);
vm_unmap_ram(cc->rbuf, cc->cluster_size);
out_free_cpages:
for (i = 0; i < cc->nr_cpages; i++) {
if (cc->cpages[i])
......@@ -716,13 +732,13 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
goto out_free_dic;
}
dic->rbuf = vmap(dic->tpages, dic->cluster_size, VM_MAP, PAGE_KERNEL);
dic->rbuf = f2fs_vmap(dic->tpages, dic->cluster_size);
if (!dic->rbuf) {
ret = -ENOMEM;
goto destroy_decompress_ctx;
}
dic->cbuf = vmap(dic->cpages, dic->nr_cpages, VM_MAP, PAGE_KERNEL_RO);
dic->cbuf = f2fs_vmap(dic->cpages, dic->nr_cpages);
if (!dic->cbuf) {
ret = -ENOMEM;
goto out_vunmap_rbuf;
......@@ -739,9 +755,9 @@ void f2fs_decompress_pages(struct bio *bio, struct page *page, bool verity)
ret = cops->decompress_pages(dic);
out_vunmap_cbuf:
vunmap(dic->cbuf);
vm_unmap_ram(dic->cbuf, dic->nr_cpages);
out_vunmap_rbuf:
vunmap(dic->rbuf);
vm_unmap_ram(dic->rbuf, dic->cluster_size);
destroy_decompress_ctx:
if (cops->destroy_decompress_ctx)
cops->destroy_decompress_ctx(dic);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment