Commit aeebae9d authored by Gao Xiang's avatar Gao Xiang

erofs: move preparation logic into z_erofs_pcluster_begin()

Some preparation logic should be part of z_erofs_pcluster_begin()
instead of z_erofs_do_read_page().  Let's move now.
Reviewed-by: default avatarYue Hu <huyue2@coolpad.com>
Reviewed-by: default avatarChao Yu <chao@kernel.org>
Signed-off-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20230817082813.81180-3-hsiangkao@linux.alibaba.com
parent dcba1b23
......@@ -852,6 +852,8 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
{
struct erofs_map_blocks *map = &fe->map;
struct super_block *sb = fe->inode->i_sb;
erofs_blk_t blknr = erofs_blknr(sb, map->m_pa);
struct erofs_workgroup *grp = NULL;
int ret;
......@@ -861,8 +863,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
DBG_BUGON(fe->owned_head == Z_EROFS_PCLUSTER_NIL);
if (!(map->m_flags & EROFS_MAP_META)) {
grp = erofs_find_workgroup(fe->inode->i_sb,
map->m_pa >> PAGE_SHIFT);
grp = erofs_find_workgroup(sb, blknr);
} else if ((map->m_pa & ~PAGE_MASK) + map->m_plen > PAGE_SIZE) {
DBG_BUGON(1);
return -EFSCORRUPTED;
......@@ -881,9 +882,26 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
} else if (ret) {
return ret;
}
z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset,
Z_EROFS_INLINE_BVECS, fe->pcl->vcnt);
/* since file-backed online pages are traversed in reverse order */
if (!z_erofs_is_inline_pcluster(fe->pcl)) {
/* bind cache first when cached decompression is preferred */
z_erofs_bind_cache(fe);
} else {
void *mptr;
mptr = erofs_read_metabuf(&map->buf, sb, blknr, EROFS_NO_KMAP);
if (IS_ERR(mptr)) {
ret = PTR_ERR(mptr);
erofs_err(sb, "failed to get inline data %d", ret);
return ret;
}
get_page(map->buf.page);
WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, map->buf.page);
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
}
/* file-backed inplace I/O pages are traversed in reverse order */
fe->icur = z_erofs_pclusterpages(fe->pcl);
return 0;
}
......@@ -982,39 +1000,15 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
err = z_erofs_map_blocks_iter(inode, map, 0);
if (err)
goto out;
} else {
if (fe->pcl)
goto hitted;
/* didn't get a valid pcluster previously (very rare) */
}
if (!(map->m_flags & EROFS_MAP_MAPPED) ||
map->m_flags & EROFS_MAP_FRAGMENT)
} else if (fe->pcl) {
goto hitted;
}
err = z_erofs_pcluster_begin(fe);
if (err)
goto out;
if (z_erofs_is_inline_pcluster(fe->pcl)) {
void *mp;
mp = erofs_read_metabuf(&fe->map.buf, inode->i_sb,
erofs_blknr(inode->i_sb, map->m_pa),
EROFS_NO_KMAP);
if (IS_ERR(mp)) {
err = PTR_ERR(mp);
erofs_err(inode->i_sb,
"failed to get inline page, err %d", err);
if ((map->m_flags & EROFS_MAP_MAPPED) &&
!(map->m_flags & EROFS_MAP_FRAGMENT)) {
err = z_erofs_pcluster_begin(fe);
if (err)
goto out;
}
get_page(fe->map.buf.page);
WRITE_ONCE(fe->pcl->compressed_bvecs[0].page,
fe->map.buf.page);
fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
} else {
/* bind cache first when cached decompression is preferred */
z_erofs_bind_cache(fe);
}
hitted:
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment