Commit 0f6273ab authored by Chunhai Guo's avatar Chunhai Guo Committed by Gao Xiang

erofs: add a reserved buffer pool for lz4 decompression

This adds a special global buffer pool (in the end) for reserved pages.

Using a reserved pool for LZ4 decompression significantly reduces the
time spent on extra temporary page allocation for the extreme cases in
low memory scenarios.

The table below shows the reduction in time spent on page allocation for
LZ4 decompression when using a reserved pool. The results were obtained
from multi-app launch benchmarks on ARM64 Android devices running the
5.15 kernel with an 8-core CPU and 8GB of memory. In the benchmark, we
launched 16 frequently-used apps, and the camera app was the last one in
each round. The data in the table is the average time of camera app for
each round.

After using the reserved pool, there was an average improvement of 150ms
in the overall launch time of our camera app, which was obtained from
the systrace log.

+--------------+---------------+--------------+---------+
|              | w/o page pool | w/ page pool |  diff   |
+--------------+---------------+--------------+---------+
| Average (ms) |     3434      |      21      | -99.38% |
+--------------+---------------+--------------+---------+

Based on the benchmark logs, 64 pages are sufficient for 95% of
scenarios. This value can be adjusted with a module parameter
`reserved_pages`. The default value is 0.

This pool is currently only used for the LZ4 decompressor, but it can be
applied to more decompressors if needed.
Signed-off-by: default avatarChunhai Guo <guochunhai@vivo.com>
Reviewed-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20240402131523.2703948-1-guochunhai@vivo.comSigned-off-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
parent d6db47e5
......@@ -111,7 +111,7 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
victim = availables[--top];
get_page(victim);
} else {
victim = erofs_allocpage(pagepool, rq->gfp);
victim = __erofs_allocpage(pagepool, rq->gfp, true);
if (!victim)
return -ENOMEM;
set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
......
......@@ -438,7 +438,11 @@ void erofs_unregister_sysfs(struct super_block *sb);
int __init erofs_init_sysfs(void);
void erofs_exit_sysfs(void);
struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp);
struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv);
static inline struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
{
return __erofs_allocpage(pagepool, gfp, false);
}
static inline void erofs_pagepool_add(struct page **pagepool, struct page *page)
{
set_page_private(page, (unsigned long)*pagepool);
......
......@@ -12,10 +12,12 @@ struct z_erofs_gbuf {
unsigned int nrpages;
};
static struct z_erofs_gbuf *z_erofs_gbufpool;
static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages;
static struct z_erofs_gbuf *z_erofs_gbufpool, *z_erofs_rsvbuf;
static unsigned int z_erofs_gbuf_count, z_erofs_gbuf_nrpages,
z_erofs_rsv_nrpages;
module_param_named(global_buffers, z_erofs_gbuf_count, uint, 0444);
module_param_named(reserved_pages, z_erofs_rsv_nrpages, uint, 0444);
static atomic_long_t erofs_global_shrink_cnt; /* for all mounted instances */
/* protected by 'erofs_sb_list_lock' */
......@@ -116,19 +118,30 @@ int z_erofs_gbuf_growsize(unsigned int nrpages)
int __init z_erofs_gbuf_init(void)
{
unsigned int i = num_possible_cpus();
unsigned int i, total = num_possible_cpus();
if (!z_erofs_gbuf_count)
z_erofs_gbuf_count = i;
else
z_erofs_gbuf_count = min(z_erofs_gbuf_count, i);
if (z_erofs_gbuf_count)
total = min(z_erofs_gbuf_count, total);
z_erofs_gbuf_count = total;
z_erofs_gbufpool = kcalloc(z_erofs_gbuf_count,
sizeof(*z_erofs_gbufpool), GFP_KERNEL);
/* The last (special) global buffer is the reserved buffer */
total += !!z_erofs_rsv_nrpages;
z_erofs_gbufpool = kcalloc(total, sizeof(*z_erofs_gbufpool),
GFP_KERNEL);
if (!z_erofs_gbufpool)
return -ENOMEM;
for (i = 0; i < z_erofs_gbuf_count; ++i)
if (z_erofs_rsv_nrpages) {
z_erofs_rsvbuf = &z_erofs_gbufpool[total - 1];
z_erofs_rsvbuf->pages = kcalloc(z_erofs_rsv_nrpages,
sizeof(*z_erofs_rsvbuf->pages), GFP_KERNEL);
if (!z_erofs_rsvbuf->pages) {
z_erofs_rsvbuf = NULL;
z_erofs_rsv_nrpages = 0;
}
}
for (i = 0; i < total; ++i)
spin_lock_init(&z_erofs_gbufpool[i].lock);
return 0;
}
......@@ -137,7 +150,7 @@ void z_erofs_gbuf_exit(void)
{
int i;
for (i = 0; i < z_erofs_gbuf_count; ++i) {
for (i = 0; i < z_erofs_gbuf_count + (!!z_erofs_rsvbuf); ++i) {
struct z_erofs_gbuf *gbuf = &z_erofs_gbufpool[i];
if (gbuf->ptr) {
......@@ -157,16 +170,22 @@ void z_erofs_gbuf_exit(void)
kfree(z_erofs_gbufpool);
}
struct page *erofs_allocpage(struct page **pagepool, gfp_t gfp)
struct page *__erofs_allocpage(struct page **pagepool, gfp_t gfp, bool tryrsv)
{
struct page *page = *pagepool;
if (page) {
DBG_BUGON(page_ref_count(page) != 1);
*pagepool = (struct page *)page_private(page);
return page;
} else if (tryrsv && z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages) {
spin_lock(&z_erofs_rsvbuf->lock);
if (z_erofs_rsvbuf->nrpages)
page = z_erofs_rsvbuf->pages[--z_erofs_rsvbuf->nrpages];
spin_unlock(&z_erofs_rsvbuf->lock);
}
return alloc_page(gfp);
if (!page)
page = alloc_page(gfp);
DBG_BUGON(page && page_ref_count(page) != 1);
return page;
}
void erofs_release_pages(struct page **pagepool)
......@@ -175,6 +194,18 @@ void erofs_release_pages(struct page **pagepool)
struct page *page = *pagepool;
*pagepool = (struct page *)page_private(page);
/* try to fill reserved global pool first */
if (z_erofs_rsvbuf && z_erofs_rsvbuf->nrpages <
z_erofs_rsv_nrpages) {
spin_lock(&z_erofs_rsvbuf->lock);
if (z_erofs_rsvbuf->nrpages < z_erofs_rsv_nrpages) {
z_erofs_rsvbuf->pages[z_erofs_rsvbuf->nrpages++]
= page;
spin_unlock(&z_erofs_rsvbuf->lock);
continue;
}
spin_unlock(&z_erofs_rsvbuf->lock);
}
put_page(page);
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment