Commit 73d03931 authored by Gao Xiang's avatar Gao Xiang Committed by Greg Kroah-Hartman

erofs: kill use_vmap module parameter

As Christoph said [1],
"vm_map_ram is supposed to generally behave better.  So if
it doesn't please report that that to the arch maintainer
and linux-mm so that they can look into the issue.  Having
user make choices of deep down kernel internals is just
a horrible interface.

Please talk to maintainers of other bits of the kernel
if you see issues and / or need enhancements. "

Let's redo the previous conclusion and kill the vmap
approach.

[1] https://lore.kernel.org/r/20190830165533.GA10909@infradead.org/Reported-by: default avatarChristoph Hellwig <hch@infradead.org>
Signed-off-by: default avatarGao Xiang <gaoxiang25@huawei.com>
Link: https://lore.kernel.org/r/20190904020912.63925-21-gaoxiang25@huawei.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent e2c71e74
......@@ -67,10 +67,6 @@ cache_strategy=%s Select a strategy for cached decompression from now on:
It still does in-place I/O decompression
for the rest compressed physical clusters.
Module parameters
=================
use_vmap=[0|1] Use vmap() instead of vm_map_ram() (default 0).
On-disk details
===============
......
......@@ -28,10 +28,6 @@ struct z_erofs_decompressor {
char *name;
};
static bool use_vmap;
module_param(use_vmap, bool, 0444);
MODULE_PARM_DESC(use_vmap, "Use vmap() instead of vm_map_ram() (default 0)");
static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
struct list_head *pagepool)
{
......@@ -221,32 +217,6 @@ static void copy_from_pcpubuf(struct page **out, const char *dst,
}
}
static void *erofs_vmap(struct page **pages, unsigned int count)
{
int i = 0;
if (use_vmap)
return vmap(pages, count, VM_MAP, PAGE_KERNEL);
while (1) {
void *addr = vm_map_ram(pages, count, -1, PAGE_KERNEL);
/* retry two more times (totally 3 times) */
if (addr || ++i >= 3)
return addr;
vm_unmap_aliases();
}
return NULL;
}
static void erofs_vunmap(const void *mem, unsigned int count)
{
if (!use_vmap)
vm_unmap_ram(mem, count);
else
vunmap(mem);
}
static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
struct list_head *pagepool)
{
......@@ -255,7 +225,7 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
const struct z_erofs_decompressor *alg = decompressors + rq->alg;
unsigned int dst_maptype;
void *dst;
int ret;
int ret, i;
if (nrpages_out == 1 && !rq->inplace_io) {
DBG_BUGON(!*rq->out);
......@@ -293,9 +263,19 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
goto dstmap_out;
}
dst = erofs_vmap(rq->out, nrpages_out);
i = 0;
while (1) {
dst = vm_map_ram(rq->out, nrpages_out, -1, PAGE_KERNEL);
/* retry two more times (totally 3 times) */
if (dst || ++i >= 3)
break;
vm_unmap_aliases();
}
if (!dst)
return -ENOMEM;
dst_maptype = 2;
dstmap_out:
......@@ -304,7 +284,7 @@ static int z_erofs_decompress_generic(struct z_erofs_decompress_req *rq,
if (!dst_maptype)
kunmap_atomic(dst);
else if (dst_maptype == 2)
erofs_vunmap(dst, nrpages_out);
vm_unmap_ram(dst, nrpages_out);
return ret;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment