Commit 10e5f6e4 authored by Gao Xiang's avatar Gao Xiang

erofs: introduce z_erofs_fixup_insize

To prepare for the upcoming ztailpacking feature, introduce
z_erofs_fixup_insize() and pageofs_in to wrap up the process
to get the exact compressed size via zero padding.

Link: https://lore.kernel.org/r/20211228054604.114518-3-hsiangkao@linux.alibaba.comReviewed-by: default avatarChao Yu <chao@kernel.org>
Signed-off-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
parent d67aee76
...@@ -12,7 +12,7 @@ struct z_erofs_decompress_req { ...@@ -12,7 +12,7 @@ struct z_erofs_decompress_req {
struct super_block *sb; struct super_block *sb;
struct page **in, **out; struct page **in, **out;
unsigned short pageofs_out; unsigned short pageofs_in, pageofs_out;
unsigned int inputsize, outputsize; unsigned int inputsize, outputsize;
/* indicate the algorithm will be used for decompression */ /* indicate the algorithm will be used for decompression */
...@@ -87,6 +87,8 @@ static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi, ...@@ -87,6 +87,8 @@ static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
return page->mapping == MNGD_MAPPING(sbi); return page->mapping == MNGD_MAPPING(sbi);
} }
int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
unsigned int padbufsize);
int z_erofs_decompress(struct z_erofs_decompress_req *rq, int z_erofs_decompress(struct z_erofs_decompress_req *rq,
struct page **pagepool); struct page **pagepool);
......
...@@ -184,6 +184,24 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -184,6 +184,24 @@ static void *z_erofs_lz4_handle_overlap(struct z_erofs_lz4_decompress_ctx *ctx,
return src; return src;
} }
/*
* Get the exact inputsize with zero_padding feature.
* - For LZ4, it should work if zero_padding feature is on (5.3+);
* - For MicroLZMA, it'd be enabled all the time.
*/
int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
unsigned int padbufsize)
{
const char *padend;
padend = memchr_inv(padbuf, 0, padbufsize);
if (!padend)
return -EFSCORRUPTED;
rq->inputsize -= padend - padbuf;
rq->pageofs_in += padend - padbuf;
return 0;
}
static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
u8 *out) u8 *out)
{ {
...@@ -198,21 +216,19 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx, ...@@ -198,21 +216,19 @@ static int z_erofs_lz4_decompress_mem(struct z_erofs_lz4_decompress_ctx *ctx,
inputmargin = 0; inputmargin = 0;
support_0padding = false; support_0padding = false;
/* decompression inplace is only safe when zero_padding is enabled */ /* LZ4 decompression inplace is only safe if zero_padding is enabled */
if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) { if (erofs_sb_has_zero_padding(EROFS_SB(rq->sb))) {
support_0padding = true; support_0padding = true;
ret = z_erofs_fixup_insize(rq, headpage + rq->pageofs_in,
while (!headpage[inputmargin & ~PAGE_MASK]) min_t(unsigned int, rq->inputsize,
if (!(++inputmargin & ~PAGE_MASK)) EROFS_BLKSIZ - rq->pageofs_in));
break; if (ret) {
if (inputmargin >= rq->inputsize) {
kunmap_atomic(headpage); kunmap_atomic(headpage);
return -EIO; return ret;
} }
} }
rq->inputsize -= inputmargin; inputmargin = rq->pageofs_in;
src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin, src = z_erofs_lz4_handle_overlap(ctx, headpage, &inputmargin,
&maptype, support_0padding); &maptype, support_0padding);
if (IS_ERR(src)) if (IS_ERR(src))
......
...@@ -156,7 +156,7 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, ...@@ -156,7 +156,7 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT; PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
const unsigned int nrpages_in = const unsigned int nrpages_in =
PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT; PAGE_ALIGN(rq->inputsize) >> PAGE_SHIFT;
unsigned int inputmargin, inlen, outlen, pageofs; unsigned int inlen, outlen, pageofs;
struct z_erofs_lzma *strm; struct z_erofs_lzma *strm;
u8 *kin; u8 *kin;
bool bounced = false; bool bounced = false;
...@@ -164,16 +164,13 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, ...@@ -164,16 +164,13 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
/* 1. get the exact LZMA compressed size */ /* 1. get the exact LZMA compressed size */
kin = kmap(*rq->in); kin = kmap(*rq->in);
inputmargin = 0; err = z_erofs_fixup_insize(rq, kin + rq->pageofs_in,
while (!kin[inputmargin & ~PAGE_MASK]) min_t(unsigned int, rq->inputsize,
if (!(++inputmargin & ~PAGE_MASK)) EROFS_BLKSIZ - rq->pageofs_in));
break; if (err) {
if (inputmargin >= PAGE_SIZE) {
kunmap(*rq->in); kunmap(*rq->in);
return -EFSCORRUPTED; return err;
} }
rq->inputsize -= inputmargin;
/* 2. get an available lzma context */ /* 2. get an available lzma context */
again: again:
...@@ -193,9 +190,9 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq, ...@@ -193,9 +190,9 @@ int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
xz_dec_microlzma_reset(strm->state, inlen, outlen, xz_dec_microlzma_reset(strm->state, inlen, outlen,
!rq->partial_decoding); !rq->partial_decoding);
pageofs = rq->pageofs_out; pageofs = rq->pageofs_out;
strm->buf.in = kin + inputmargin; strm->buf.in = kin + rq->pageofs_in;
strm->buf.in_pos = 0; strm->buf.in_pos = 0;
strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE - inputmargin); strm->buf.in_size = min_t(u32, inlen, PAGE_SIZE - rq->pageofs_in);
inlen -= strm->buf.in_size; inlen -= strm->buf.in_size;
strm->buf.out = NULL; strm->buf.out = NULL;
strm->buf.out_pos = 0; strm->buf.out_pos = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment