Commit 4f05687f authored by Gao Xiang's avatar Gao Xiang

erofs: introduce struct z_erofs_decompress_backend

Let's introduce struct z_erofs_decompress_backend in order to pass
on the decompression backend context between helper functions more
easier and avoid too many arguments.
Acked-by: default avatarChao Yu <chao@kernel.org>
Signed-off-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20220715154203.48093-13-hsiangkao@linux.alibaba.com
parent e7368187
...@@ -847,9 +847,22 @@ static bool z_erofs_page_is_invalidated(struct page *page) ...@@ -847,9 +847,22 @@ static bool z_erofs_page_is_invalidated(struct page *page)
return !page->mapping && !z_erofs_is_shortlived_page(page); return !page->mapping && !z_erofs_is_shortlived_page(page);
} }
static int z_erofs_parse_out_bvecs(struct z_erofs_pcluster *pcl, struct z_erofs_decompress_backend {
struct page **pages, struct page **pagepool) struct page *onstack_pages[Z_EROFS_ONSTACK_PAGES];
struct super_block *sb;
struct z_erofs_pcluster *pcl;
/* pages with the longest decompressed length for deduplication */
struct page **decompressed_pages;
/* pages to keep the compressed data */
struct page **compressed_pages;
struct page **pagepool;
};
static int z_erofs_parse_out_bvecs(struct z_erofs_decompress_backend *be)
{ {
struct z_erofs_pcluster *pcl = be->pcl;
struct z_erofs_bvec_iter biter; struct z_erofs_bvec_iter biter;
struct page *old_bvpage; struct page *old_bvpage;
int i, err = 0; int i, err = 0;
...@@ -857,39 +870,39 @@ static int z_erofs_parse_out_bvecs(struct z_erofs_pcluster *pcl, ...@@ -857,39 +870,39 @@ static int z_erofs_parse_out_bvecs(struct z_erofs_pcluster *pcl,
z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0); z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0);
for (i = 0; i < pcl->vcnt; ++i) { for (i = 0; i < pcl->vcnt; ++i) {
struct z_erofs_bvec bvec; struct z_erofs_bvec bvec;
unsigned int pagenr; unsigned int pgnr;
z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage); z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage);
if (old_bvpage) if (old_bvpage)
z_erofs_put_shortlivedpage(pagepool, old_bvpage); z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
pagenr = (bvec.offset + pcl->pageofs_out) >> PAGE_SHIFT; pgnr = (bvec.offset + pcl->pageofs_out) >> PAGE_SHIFT;
DBG_BUGON(pagenr >= pcl->nr_pages); DBG_BUGON(pgnr >= pcl->nr_pages);
DBG_BUGON(z_erofs_page_is_invalidated(bvec.page)); DBG_BUGON(z_erofs_page_is_invalidated(bvec.page));
/* /*
* currently EROFS doesn't support multiref(dedup), * currently EROFS doesn't support multiref(dedup),
* so here erroring out one multiref page. * so here erroring out one multiref page.
*/ */
if (pages[pagenr]) { if (be->decompressed_pages[pgnr]) {
DBG_BUGON(1); DBG_BUGON(1);
z_erofs_page_mark_eio(pages[pagenr]); z_erofs_page_mark_eio(be->decompressed_pages[pgnr]);
z_erofs_onlinepage_endio(pages[pagenr]); z_erofs_onlinepage_endio(be->decompressed_pages[pgnr]);
err = -EFSCORRUPTED; err = -EFSCORRUPTED;
} }
pages[pagenr] = bvec.page; be->decompressed_pages[pgnr] = bvec.page;
} }
old_bvpage = z_erofs_bvec_iter_end(&biter); old_bvpage = z_erofs_bvec_iter_end(&biter);
if (old_bvpage) if (old_bvpage)
z_erofs_put_shortlivedpage(pagepool, old_bvpage); z_erofs_put_shortlivedpage(be->pagepool, old_bvpage);
return err; return err;
} }
static struct page **z_erofs_parse_in_bvecs(struct erofs_sb_info *sbi, static int z_erofs_parse_in_bvecs(struct z_erofs_decompress_backend *be,
struct z_erofs_pcluster *pcl, struct page **pages, bool *overlapped)
struct page **pagepool, bool *overlapped)
{ {
struct z_erofs_pcluster *pcl = be->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl); unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
struct page **compressed_pages; struct page **compressed_pages;
int i, err = 0; int i, err = 0;
...@@ -919,7 +932,7 @@ static struct page **z_erofs_parse_in_bvecs(struct erofs_sb_info *sbi, ...@@ -919,7 +932,7 @@ static struct page **z_erofs_parse_in_bvecs(struct erofs_sb_info *sbi,
DBG_BUGON(z_erofs_page_is_invalidated(page)); DBG_BUGON(z_erofs_page_is_invalidated(page));
if (!z_erofs_is_shortlived_page(page)) { if (!z_erofs_is_shortlived_page(page)) {
if (erofs_page_is_managed(sbi, page)) { if (erofs_page_is_managed(EROFS_SB(be->sb), page)) {
if (!PageUptodate(page)) if (!PageUptodate(page))
err = -EIO; err = -EIO;
continue; continue;
...@@ -927,60 +940,58 @@ static struct page **z_erofs_parse_in_bvecs(struct erofs_sb_info *sbi, ...@@ -927,60 +940,58 @@ static struct page **z_erofs_parse_in_bvecs(struct erofs_sb_info *sbi,
pgnr = (bvec->offset + pcl->pageofs_out) >> PAGE_SHIFT; pgnr = (bvec->offset + pcl->pageofs_out) >> PAGE_SHIFT;
DBG_BUGON(pgnr >= pcl->nr_pages); DBG_BUGON(pgnr >= pcl->nr_pages);
if (pages[pgnr]) { if (be->decompressed_pages[pgnr]) {
DBG_BUGON(1); DBG_BUGON(1);
z_erofs_page_mark_eio(pages[pgnr]); z_erofs_page_mark_eio(
z_erofs_onlinepage_endio(pages[pgnr]); be->decompressed_pages[pgnr]);
z_erofs_onlinepage_endio(
be->decompressed_pages[pgnr]);
err = -EFSCORRUPTED; err = -EFSCORRUPTED;
} }
pages[pgnr] = page; be->decompressed_pages[pgnr] = page;
*overlapped = true; *overlapped = true;
} }
} }
if (err) { if (err) {
kfree(compressed_pages); kfree(compressed_pages);
return ERR_PTR(err); return err;
} }
return compressed_pages; be->compressed_pages = compressed_pages;
return 0;
} }
static int z_erofs_decompress_pcluster(struct super_block *sb, static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
struct z_erofs_pcluster *pcl, int err)
struct page **pagepool, int err)
{ {
struct erofs_sb_info *const sbi = EROFS_SB(sb); struct erofs_sb_info *const sbi = EROFS_SB(be->sb);
struct z_erofs_pcluster *pcl = be->pcl;
unsigned int pclusterpages = z_erofs_pclusterpages(pcl); unsigned int pclusterpages = z_erofs_pclusterpages(pcl);
unsigned int i, inputsize, outputsize, llen, nr_pages; unsigned int i, inputsize, outputsize, llen, nr_pages;
struct page *pages_onstack[Z_EROFS_VMAP_ONSTACK_PAGES]; struct page *page;
struct page **pages, **compressed_pages, *page;
int err2; int err2;
bool overlapped, partial; bool overlapped, partial;
might_sleep();
DBG_BUGON(!READ_ONCE(pcl->nr_pages)); DBG_BUGON(!READ_ONCE(pcl->nr_pages));
mutex_lock(&pcl->lock); mutex_lock(&pcl->lock);
nr_pages = pcl->nr_pages; nr_pages = pcl->nr_pages;
if (nr_pages <= Z_EROFS_VMAP_ONSTACK_PAGES) if (nr_pages <= Z_EROFS_ONSTACK_PAGES) {
pages = pages_onstack; be->decompressed_pages = be->onstack_pages;
else memset(be->decompressed_pages, 0,
pages = kvmalloc_array(nr_pages, sizeof(struct page *), sizeof(struct page *) * nr_pages);
GFP_KERNEL | __GFP_NOFAIL); } else {
be->decompressed_pages =
for (i = 0; i < nr_pages; ++i) kvcalloc(nr_pages, sizeof(struct page *),
pages[i] = NULL; GFP_KERNEL | __GFP_NOFAIL);
}
err2 = z_erofs_parse_out_bvecs(pcl, pages, pagepool); err2 = z_erofs_parse_out_bvecs(be);
if (err2)
err = err2;
err2 = z_erofs_parse_in_bvecs(be, &overlapped);
if (err2) if (err2)
err = err2; err = err2;
compressed_pages = z_erofs_parse_in_bvecs(sbi, pcl, pages,
pagepool, &overlapped);
if (IS_ERR(compressed_pages)) {
err = PTR_ERR(compressed_pages);
compressed_pages = NULL;
}
if (err) if (err)
goto out; goto out;
...@@ -1000,9 +1011,9 @@ static int z_erofs_decompress_pcluster(struct super_block *sb, ...@@ -1000,9 +1011,9 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
inputsize = pclusterpages * PAGE_SIZE; inputsize = pclusterpages * PAGE_SIZE;
err = z_erofs_decompress(&(struct z_erofs_decompress_req) { err = z_erofs_decompress(&(struct z_erofs_decompress_req) {
.sb = sb, .sb = be->sb,
.in = compressed_pages, .in = be->compressed_pages,
.out = pages, .out = be->decompressed_pages,
.pageofs_in = pcl->pageofs_in, .pageofs_in = pcl->pageofs_in,
.pageofs_out = pcl->pageofs_out, .pageofs_out = pcl->pageofs_out,
.inputsize = inputsize, .inputsize = inputsize,
...@@ -1010,7 +1021,7 @@ static int z_erofs_decompress_pcluster(struct super_block *sb, ...@@ -1010,7 +1021,7 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
.alg = pcl->algorithmformat, .alg = pcl->algorithmformat,
.inplace_io = overlapped, .inplace_io = overlapped,
.partial_decoding = partial .partial_decoding = partial
}, pagepool); }, be->pagepool);
out: out:
/* must handle all compressed pages before actual file pages */ /* must handle all compressed pages before actual file pages */
...@@ -1026,29 +1037,29 @@ static int z_erofs_decompress_pcluster(struct super_block *sb, ...@@ -1026,29 +1037,29 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
continue; continue;
/* recycle all individual short-lived pages */ /* recycle all individual short-lived pages */
(void)z_erofs_put_shortlivedpage(pagepool, page); (void)z_erofs_put_shortlivedpage(be->pagepool, page);
WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL); WRITE_ONCE(pcl->compressed_bvecs[i].page, NULL);
} }
} }
kfree(compressed_pages); kfree(be->compressed_pages);
for (i = 0; i < nr_pages; ++i) { for (i = 0; i < nr_pages; ++i) {
page = pages[i]; page = be->decompressed_pages[i];
if (!page) if (!page)
continue; continue;
DBG_BUGON(z_erofs_page_is_invalidated(page)); DBG_BUGON(z_erofs_page_is_invalidated(page));
/* recycle all individual short-lived pages */ /* recycle all individual short-lived pages */
if (z_erofs_put_shortlivedpage(pagepool, page)) if (z_erofs_put_shortlivedpage(be->pagepool, page))
continue; continue;
if (err) if (err)
z_erofs_page_mark_eio(page); z_erofs_page_mark_eio(page);
z_erofs_onlinepage_endio(page); z_erofs_onlinepage_endio(page);
} }
if (pages != pages_onstack) if (be->decompressed_pages != be->onstack_pages)
kvfree(pages); kvfree(be->decompressed_pages);
pcl->nr_pages = 0; pcl->nr_pages = 0;
pcl->bvset.nextpage = NULL; pcl->bvset.nextpage = NULL;
...@@ -1063,23 +1074,23 @@ static int z_erofs_decompress_pcluster(struct super_block *sb, ...@@ -1063,23 +1074,23 @@ static int z_erofs_decompress_pcluster(struct super_block *sb,
static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io, static void z_erofs_decompress_queue(const struct z_erofs_decompressqueue *io,
struct page **pagepool) struct page **pagepool)
{ {
struct z_erofs_decompress_backend be = {
.sb = io->sb,
.pagepool = pagepool,
};
z_erofs_next_pcluster_t owned = io->head; z_erofs_next_pcluster_t owned = io->head;
while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) { while (owned != Z_EROFS_PCLUSTER_TAIL_CLOSED) {
struct z_erofs_pcluster *pcl; /* impossible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
/* no possible that 'owned' equals Z_EROFS_WORK_TPTR_TAIL */
DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL); DBG_BUGON(owned == Z_EROFS_PCLUSTER_TAIL);
/* impossible that 'owned' equals Z_EROFS_PCLUSTER_NIL */
/* no possible that 'owned' equals NULL */
DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL); DBG_BUGON(owned == Z_EROFS_PCLUSTER_NIL);
pcl = container_of(owned, struct z_erofs_pcluster, next); be.pcl = container_of(owned, struct z_erofs_pcluster, next);
owned = READ_ONCE(pcl->next); owned = READ_ONCE(be.pcl->next);
z_erofs_decompress_pcluster(io->sb, pcl, pagepool, z_erofs_decompress_pcluster(&be, io->eio ? -EIO : 0);
io->eio ? -EIO : 0); erofs_workgroup_put(&be.pcl->obj);
erofs_workgroup_put(&pcl->obj);
} }
} }
...@@ -1105,7 +1116,6 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, ...@@ -1105,7 +1116,6 @@ static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io,
if (sync) { if (sync) {
if (!atomic_add_return(bios, &io->pending_bios)) if (!atomic_add_return(bios, &io->pending_bios))
complete(&io->u.done); complete(&io->u.done);
return; return;
} }
......
...@@ -173,7 +173,6 @@ static inline void z_erofs_onlinepage_endio(struct page *page) ...@@ -173,7 +173,6 @@ static inline void z_erofs_onlinepage_endio(struct page *page)
} }
} }
#define Z_EROFS_VMAP_ONSTACK_PAGES \ #define Z_EROFS_ONSTACK_PAGES 32
min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment