Commit a07eeddf authored by Gao Xiang's avatar Gao Xiang Committed by Greg Kroah-Hartman

staging: erofs: clean up z_erofs_map_blocks_iter

This patch mainly introduces `vle_map_blocks_iter_ctx' to clean up
z_erofs_map_blocks_iter and vle_get_logical_extent_head.

It changes the return value of `vle_get_logical_extent_head' to int
for the later error handing. In addition, it also renames `pcn' to
`pblk' since only `pblk' exists in erofs compression ondisk format.
Reviewed-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarGao Xiang <gaoxiang25@huawei.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 37ec35a6
...@@ -1410,6 +1410,13 @@ const struct address_space_operations z_erofs_vle_normalaccess_aops = { ...@@ -1410,6 +1410,13 @@ const struct address_space_operations z_erofs_vle_normalaccess_aops = {
.readpages = z_erofs_vle_normalaccess_readpages, .readpages = z_erofs_vle_normalaccess_readpages,
}; };
/*
* Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode
* ---
* VLE compression mode attempts to compress a number of logical data into
* a physical cluster with a fixed size.
* VLE compression mode uses "struct z_erofs_vle_decompressed_index".
*/
#define __vle_cluster_advise(x, bit, bits) \ #define __vle_cluster_advise(x, bit, bits) \
((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1)) ((le16_to_cpu(x) >> (bit)) & ((1 << (bits)) - 1))
...@@ -1465,90 +1472,96 @@ vle_extent_blkoff(struct inode *inode, pgoff_t index) ...@@ -1465,90 +1472,96 @@ vle_extent_blkoff(struct inode *inode, pgoff_t index)
return erofs_blkoff(iloc(sbi, vi->nid) + ofs); return erofs_blkoff(iloc(sbi, vi->nid) + ofs);
} }
/* struct vle_map_blocks_iter_ctx {
* Variable-sized Logical Extent (Fixed Physical Cluster) Compression Mode struct inode *inode;
* --- struct super_block *sb;
* VLE compression mode attempts to compress a number of logical data into unsigned int clusterbits;
* a physical cluster with a fixed size.
* VLE compression mode uses "struct z_erofs_vle_decompressed_index". struct page **mpage_ret;
*/ void **kaddr_ret;
static erofs_off_t vle_get_logical_extent_head( };
struct inode *inode,
struct page **page_iter, static int
void **kaddr_iter, vle_get_logical_extent_head(const struct vle_map_blocks_iter_ctx *ctx,
unsigned int lcn, /* logical cluster number */ unsigned int lcn, /* logical cluster number */
erofs_blk_t *pcn, unsigned long long *ofs,
unsigned int *flags) erofs_blk_t *pblk,
unsigned int *flags)
{ {
/* for extent meta */ const unsigned int clustersize = 1 << ctx->clusterbits;
struct page *page = *page_iter; const erofs_blk_t mblk = vle_extent_blkaddr(ctx->inode, lcn);
erofs_blk_t blkaddr = vle_extent_blkaddr(inode, lcn); struct page *mpage = *ctx->mpage_ret; /* extent metapage */
struct z_erofs_vle_decompressed_index *di; struct z_erofs_vle_decompressed_index *di;
unsigned long long ofs; unsigned int cluster_type, delta0;
struct super_block *const sb = inode->i_sb;
const unsigned int clusterbits = EROFS_SB(sb)->clusterbits;
const unsigned int clustersize = 1 << clusterbits;
unsigned int delta0;
if (page->index != blkaddr) {
kunmap_atomic(*kaddr_iter);
unlock_page(page);
put_page(page);
page = erofs_get_meta_page_nofail(sb, blkaddr, false); if (mpage->index != mblk) {
*page_iter = page; kunmap_atomic(*ctx->kaddr_ret);
*kaddr_iter = kmap_atomic(page); unlock_page(mpage);
put_page(mpage);
mpage = erofs_get_meta_page_nofail(ctx->sb, mblk, false);
*ctx->mpage_ret = mpage;
*ctx->kaddr_ret = kmap_atomic(mpage);
} }
di = *kaddr_iter + vle_extent_blkoff(inode, lcn); di = *ctx->kaddr_ret + vle_extent_blkoff(ctx->inode, lcn);
switch (vle_cluster_type(di)) {
cluster_type = vle_cluster_type(di);
switch (cluster_type) {
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
delta0 = le16_to_cpu(di->di_u.delta[0]); delta0 = le16_to_cpu(di->di_u.delta[0]);
DBG_BUGON(!delta0); DBG_BUGON(!delta0);
DBG_BUGON(lcn < delta0); DBG_BUGON(lcn < delta0);
ofs = vle_get_logical_extent_head(inode, return vle_get_logical_extent_head(ctx,
page_iter, kaddr_iter, lcn - delta0, ofs, pblk, flags);
lcn - delta0, pcn, flags);
break;
case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN: case Z_EROFS_VLE_CLUSTER_TYPE_PLAIN:
*flags ^= EROFS_MAP_ZIPPED; *flags ^= EROFS_MAP_ZIPPED;
/* fallthrough */
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
/* clustersize should be a power of two */ /* clustersize should be a power of two */
ofs = ((u64)lcn << clusterbits) + *ofs = ((u64)lcn << ctx->clusterbits) +
(le16_to_cpu(di->di_clusterofs) & (clustersize - 1)); (le16_to_cpu(di->di_clusterofs) & (clustersize - 1));
*pcn = le32_to_cpu(di->di_u.blkaddr); *pblk = le32_to_cpu(di->di_u.blkaddr);
break; break;
default: default:
BUG_ON(1); BUG_ON(1);
} }
return ofs; return 0;
} }
int z_erofs_map_blocks_iter(struct inode *inode, int z_erofs_map_blocks_iter(struct inode *inode,
struct erofs_map_blocks *map, struct erofs_map_blocks *map,
struct page **mpage_ret, int flags) struct page **mpage_ret, int flags)
{ {
void *kaddr;
const struct vle_map_blocks_iter_ctx ctx = {
.inode = inode,
.sb = inode->i_sb,
.clusterbits = EROFS_I_SB(inode)->clusterbits,
.mpage_ret = mpage_ret,
.kaddr_ret = &kaddr
};
const unsigned int clustersize = 1 << ctx.clusterbits;
/* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
const bool initial = !map->m_llen;
/* logicial extent (start, end) offset */ /* logicial extent (start, end) offset */
unsigned long long ofs, end; unsigned long long ofs, end;
struct z_erofs_vle_decompressed_index *di; unsigned int lcn;
erofs_blk_t e_blkaddr, pcn;
unsigned int lcn, logical_cluster_ofs, cluster_type;
u32 ofs_rem; u32 ofs_rem;
/* initialize `pblk' to keep gcc from printing foolish warnings */
erofs_blk_t mblk, pblk = 0;
struct page *mpage = *mpage_ret; struct page *mpage = *mpage_ret;
void *kaddr; struct z_erofs_vle_decompressed_index *di;
bool initial; unsigned int cluster_type, logical_cluster_ofs;
struct super_block *const sb = inode->i_sb;
const unsigned int clusterbits = EROFS_SB(sb)->clusterbits;
const unsigned int clustersize = 1 << clusterbits;
int err = 0; int err = 0;
/* if both m_(l,p)len are 0, regularize l_lblk, l_lofs, etc... */
initial = !map->m_llen;
/* when trying to read beyond EOF, leave it unmapped */ /* when trying to read beyond EOF, leave it unmapped */
if (unlikely(map->m_la >= inode->i_size)) { if (unlikely(map->m_la >= inode->i_size)) {
BUG_ON(!initial); DBG_BUGON(!initial);
map->m_llen = map->m_la + 1 - inode->i_size; map->m_llen = map->m_la + 1 - inode->i_size;
map->m_la = inode->i_size; map->m_la = inode->i_size;
map->m_flags = 0; map->m_flags = 0;
...@@ -1561,16 +1574,16 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -1561,16 +1574,16 @@ int z_erofs_map_blocks_iter(struct inode *inode,
ofs = map->m_la + map->m_llen; ofs = map->m_la + map->m_llen;
/* clustersize should be power of two */ /* clustersize should be power of two */
lcn = ofs >> clusterbits; lcn = ofs >> ctx.clusterbits;
ofs_rem = ofs & (clustersize - 1); ofs_rem = ofs & (clustersize - 1);
e_blkaddr = vle_extent_blkaddr(inode, lcn); mblk = vle_extent_blkaddr(inode, lcn);
if (mpage == NULL || mpage->index != e_blkaddr) { if (!mpage || mpage->index != mblk) {
if (mpage != NULL) if (mpage != NULL)
put_page(mpage); put_page(mpage);
mpage = erofs_get_meta_page_nofail(sb, e_blkaddr, false); mpage = erofs_get_meta_page_nofail(ctx.sb, mblk, false);
*mpage_ret = mpage; *mpage_ret = mpage;
} else { } else {
lock_page(mpage); lock_page(mpage);
...@@ -1580,8 +1593,8 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -1580,8 +1593,8 @@ int z_erofs_map_blocks_iter(struct inode *inode,
kaddr = kmap_atomic(mpage); kaddr = kmap_atomic(mpage);
di = kaddr + vle_extent_blkoff(inode, lcn); di = kaddr + vle_extent_blkoff(inode, lcn);
debugln("%s, lcn %u e_blkaddr %u e_blkoff %u", __func__, lcn, debugln("%s, lcn %u mblk %u e_blkoff %u", __func__, lcn,
e_blkaddr, vle_extent_blkoff(inode, lcn)); mblk, vle_extent_blkoff(inode, lcn));
err = vle_decompressed_index_clusterofs(&logical_cluster_ofs, err = vle_decompressed_index_clusterofs(&logical_cluster_ofs,
clustersize, di); clustersize, di);
...@@ -1608,13 +1621,13 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -1608,13 +1621,13 @@ int z_erofs_map_blocks_iter(struct inode *inode,
/* fallthrough */ /* fallthrough */
case Z_EROFS_VLE_CLUSTER_TYPE_HEAD: case Z_EROFS_VLE_CLUSTER_TYPE_HEAD:
if (ofs_rem == logical_cluster_ofs) { if (ofs_rem == logical_cluster_ofs) {
pcn = le32_to_cpu(di->di_u.blkaddr); pblk = le32_to_cpu(di->di_u.blkaddr);
goto exact_hitted; goto exact_hitted;
} }
if (ofs_rem > logical_cluster_ofs) { if (ofs_rem > logical_cluster_ofs) {
ofs = (u64)lcn * clustersize | logical_cluster_ofs; ofs = (u64)lcn * clustersize | logical_cluster_ofs;
pcn = le32_to_cpu(di->di_u.blkaddr); pblk = le32_to_cpu(di->di_u.blkaddr);
break; break;
} }
...@@ -1629,9 +1642,12 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -1629,9 +1642,12 @@ int z_erofs_map_blocks_iter(struct inode *inode,
/* fallthrough */ /* fallthrough */
case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD: case Z_EROFS_VLE_CLUSTER_TYPE_NONHEAD:
/* get the correspoinding first chunk */ /* get the correspoinding first chunk */
ofs = vle_get_logical_extent_head(inode, mpage_ret, err = vle_get_logical_extent_head(&ctx, lcn, &ofs,
&kaddr, lcn, &pcn, &map->m_flags); &pblk, &map->m_flags);
mpage = *mpage_ret; mpage = *mpage_ret;
if (unlikely(err))
goto unmap_out;
break; break;
default: default:
errln("unknown cluster type %u at offset %llu of nid %llu", errln("unknown cluster type %u at offset %llu of nid %llu",
...@@ -1644,7 +1660,7 @@ int z_erofs_map_blocks_iter(struct inode *inode, ...@@ -1644,7 +1660,7 @@ int z_erofs_map_blocks_iter(struct inode *inode,
exact_hitted: exact_hitted:
map->m_llen = end - ofs; map->m_llen = end - ofs;
map->m_plen = clustersize; map->m_plen = clustersize;
map->m_pa = blknr_to_addr(pcn); map->m_pa = blknr_to_addr(pblk);
map->m_flags |= EROFS_MAP_MAPPED; map->m_flags |= EROFS_MAP_MAPPED;
unmap_out: unmap_out:
kunmap_atomic(kaddr); kunmap_atomic(kaddr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment