Commit 87a201b4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'erofs-for-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull erofs updates from Gao Xiang:
 "Nothing exciting lands for this cycle, since we're still busying in
  developing support for sub-page blocks and large-folios of compressed
  data for new scenarios on Android.

  In this cycle, MicroLZMA format is marked as stable, and there are
  minor cleanups around documentation and codebase. In addition, it also
  fixes incorrect lockref usage in erofs_insert_workgroup().

  Summary:

   - Fix inode metadata space layout documentation

   - Avoid warning for MicroLZMA format anymore

   - Fix erofs_insert_workgroup() lockref usage

   - Some cleanups"

* tag 'erofs-for-6.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: fix erofs_insert_workgroup() lockref usage
  erofs: tidy up redundant includes
  erofs: get rid of ROOT_NID()
  erofs: simplify compression configuration parser
  erofs: don't warn MicroLZMA format anymore
  erofs: fix inode metadata space layout description in documentation
parents 57aff997 1a0ac8bd
......@@ -199,7 +199,7 @@ may not. All metadatas can be now observed in two different spaces (views):
| |
|__________________| 64 bytes
Xattrs, extents, data inline are followed by the corresponding inode with
Xattrs, extents, data inline are placed after the corresponding inode with
proper alignment, and they could be optional for different data mappings.
_currently_ total 5 data layouts are supported:
......
......@@ -91,13 +91,10 @@ config EROFS_FS_ZIP_LZMA
select XZ_DEC_MICROLZMA
help
Saying Y here includes support for reading EROFS file systems
containing LZMA compressed data, specifically called microLZMA. it
gives better compression ratios than the LZ4 algorithm, at the
containing LZMA compressed data, specifically called microLZMA. It
gives better compression ratios than the default LZ4 format, at the
expense of more CPU overhead.
LZMA support is an experimental feature for now and so most file
systems will be readable without selecting this option.
If unsure, say N.
config EROFS_FS_ZIP_DEFLATE
......
......@@ -21,6 +21,8 @@ struct z_erofs_decompress_req {
};
struct z_erofs_decompressor {
int (*config)(struct super_block *sb, struct erofs_super_block *dsb,
void *data, int size);
int (*decompress)(struct z_erofs_decompress_req *rq,
struct page **pagepool);
char *name;
......@@ -92,6 +94,10 @@ int z_erofs_fixup_insize(struct z_erofs_decompress_req *rq, const char *padbuf,
extern const struct z_erofs_decompressor erofs_decompressors[];
/* prototypes for specific algorithms */
int z_erofs_load_lzma_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size);
int z_erofs_load_deflate_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size);
int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
struct page **pagepool);
int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
......
......@@ -5,9 +5,7 @@
* Copyright (C) 2021, Alibaba Cloud
*/
#include "internal.h"
#include <linux/prefetch.h>
#include <linux/sched/mm.h>
#include <linux/dax.h>
#include <trace/events/erofs.h>
void erofs_unmap_metabuf(struct erofs_buf *buf)
......
......@@ -4,7 +4,6 @@
* https://www.huawei.com/
*/
#include "compress.h"
#include <linux/module.h>
#include <linux/lz4.h>
#ifndef LZ4_DISTANCE_MAX /* history window size */
......@@ -24,11 +23,11 @@ struct z_erofs_lz4_decompress_ctx {
unsigned int oend;
};
int z_erofs_load_lz4_config(struct super_block *sb,
struct erofs_super_block *dsb,
struct z_erofs_lz4_cfgs *lz4, int size)
static int z_erofs_load_lz4_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct z_erofs_lz4_cfgs *lz4 = data;
u16 distance;
if (lz4) {
......@@ -370,19 +369,75 @@ const struct z_erofs_decompressor erofs_decompressors[] = {
.name = "interlaced"
},
[Z_EROFS_COMPRESSION_LZ4] = {
.config = z_erofs_load_lz4_config,
.decompress = z_erofs_lz4_decompress,
.name = "lz4"
},
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
[Z_EROFS_COMPRESSION_LZMA] = {
.config = z_erofs_load_lzma_config,
.decompress = z_erofs_lzma_decompress,
.name = "lzma"
},
#endif
#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
[Z_EROFS_COMPRESSION_DEFLATE] = {
.config = z_erofs_load_deflate_config,
.decompress = z_erofs_deflate_decompress,
.name = "deflate"
},
#endif
};
int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
unsigned int algs, alg;
erofs_off_t offset;
int size, ret = 0;
if (!erofs_sb_has_compr_cfgs(sbi)) {
sbi->available_compr_algs = Z_EROFS_COMPRESSION_LZ4;
return z_erofs_load_lz4_config(sb, dsb, NULL, 0);
}
sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
erofs_err(sb, "unidentified algorithms %x, please upgrade kernel",
sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
return -EOPNOTSUPP;
}
erofs_init_metabuf(&buf, sb);
offset = EROFS_SUPER_OFFSET + sbi->sb_size;
alg = 0;
for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
void *data;
if (!(algs & 1))
continue;
data = erofs_read_metadata(sb, &buf, &offset, &size);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
break;
}
if (alg >= ARRAY_SIZE(erofs_decompressors) ||
!erofs_decompressors[alg].config) {
erofs_err(sb, "algorithm %d isn't enabled on this kernel",
alg);
ret = -EOPNOTSUPP;
} else {
ret = erofs_decompressors[alg].config(sb,
dsb, data, size);
}
kfree(data);
if (ret)
break;
}
erofs_put_metabuf(&buf);
return ret;
}
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/module.h>
#include <linux/zlib.h>
#include "compress.h"
......@@ -77,9 +76,10 @@ int __init z_erofs_deflate_init(void)
}
int z_erofs_load_deflate_config(struct super_block *sb,
struct erofs_super_block *dsb,
struct z_erofs_deflate_cfgs *dfl, int size)
struct erofs_super_block *dsb, void *data, int size)
{
struct z_erofs_deflate_cfgs *dfl = data;
if (!dfl || size < sizeof(struct z_erofs_deflate_cfgs)) {
erofs_err(sb, "invalid deflate cfgs, size=%u", size);
return -EINVAL;
......
// SPDX-License-Identifier: GPL-2.0-or-later
#include <linux/xz.h>
#include <linux/module.h>
#include "compress.h"
struct z_erofs_lzma {
......@@ -72,10 +71,10 @@ int __init z_erofs_lzma_init(void)
}
int z_erofs_load_lzma_config(struct super_block *sb,
struct erofs_super_block *dsb,
struct z_erofs_lzma_cfgs *lzma, int size)
struct erofs_super_block *dsb, void *data, int size)
{
static DEFINE_MUTEX(lzma_resize_mutex);
struct z_erofs_lzma_cfgs *lzma = data;
unsigned int dict_size, i;
struct z_erofs_lzma *strm, *head = NULL;
int err;
......@@ -96,8 +95,6 @@ int z_erofs_load_lzma_config(struct super_block *sb,
return -EINVAL;
}
erofs_info(sb, "EXPERIMENTAL MicroLZMA in use. Use at your own risk!");
/* in case 2 z_erofs_load_lzma_config() race to avoid deadlock */
mutex_lock(&lzma_resize_mutex);
......
......@@ -8,8 +8,10 @@
#define __EROFS_INTERNAL_H
#include <linux/fs.h>
#include <linux/dax.h>
#include <linux/dcache.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/pagemap.h>
#include <linux/bio.h>
#include <linux/magic.h>
......@@ -228,8 +230,6 @@ struct erofs_buf {
};
#define __EROFS_BUF_INITIALIZER ((struct erofs_buf){ .page = NULL })
#define ROOT_NID(sb) ((sb)->root_nid)
#define erofs_blknr(sb, addr) ((addr) >> (sb)->s_blocksize_bits)
#define erofs_blkoff(sb, addr) ((addr) & ((sb)->s_blocksize - 1))
#define erofs_pos(sb, blk) ((erofs_off_t)(blk) << (sb)->s_blocksize_bits)
......@@ -469,9 +469,6 @@ int __init z_erofs_init_zip_subsystem(void);
void z_erofs_exit_zip_subsystem(void);
int erofs_try_to_free_all_cached_pages(struct erofs_sb_info *sbi,
struct erofs_workgroup *egrp);
int z_erofs_load_lz4_config(struct super_block *sb,
struct erofs_super_block *dsb,
struct z_erofs_lz4_cfgs *lz4, int len);
int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map,
int flags);
void *erofs_get_pcpubuf(unsigned int requiredpages);
......@@ -480,6 +477,7 @@ int erofs_pcpubuf_growsize(unsigned int nrpages);
void __init erofs_pcpubuf_init(void);
void erofs_pcpubuf_exit(void);
int erofs_init_managed_cache(struct super_block *sb);
int z_erofs_parse_cfgs(struct super_block *sb, struct erofs_super_block *dsb);
#else
static inline void erofs_shrinker_register(struct super_block *sb) {}
static inline void erofs_shrinker_unregister(struct super_block *sb) {}
......@@ -487,16 +485,6 @@ static inline int erofs_init_shrinker(void) { return 0; }
static inline void erofs_exit_shrinker(void) {}
static inline int z_erofs_init_zip_subsystem(void) { return 0; }
static inline void z_erofs_exit_zip_subsystem(void) {}
static inline int z_erofs_load_lz4_config(struct super_block *sb,
struct erofs_super_block *dsb,
struct z_erofs_lz4_cfgs *lz4, int len)
{
if (lz4 || dsb->u1.lz4_max_distance) {
erofs_err(sb, "lz4 algorithm isn't enabled");
return -EINVAL;
}
return 0;
}
static inline void erofs_pcpubuf_init(void) {}
static inline void erofs_pcpubuf_exit(void) {}
static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
......@@ -505,41 +493,17 @@ static inline int erofs_init_managed_cache(struct super_block *sb) { return 0; }
#ifdef CONFIG_EROFS_FS_ZIP_LZMA
int __init z_erofs_lzma_init(void);
void z_erofs_lzma_exit(void);
int z_erofs_load_lzma_config(struct super_block *sb,
struct erofs_super_block *dsb,
struct z_erofs_lzma_cfgs *lzma, int size);
#else
static inline int z_erofs_lzma_init(void) { return 0; }
static inline int z_erofs_lzma_exit(void) { return 0; }
static inline int z_erofs_load_lzma_config(struct super_block *sb,
struct erofs_super_block *dsb,
struct z_erofs_lzma_cfgs *lzma, int size) {
if (lzma) {
erofs_err(sb, "lzma algorithm isn't enabled");
return -EINVAL;
}
return 0;
}
#endif /* !CONFIG_EROFS_FS_ZIP_LZMA */
#ifdef CONFIG_EROFS_FS_ZIP_DEFLATE
int __init z_erofs_deflate_init(void);
void z_erofs_deflate_exit(void);
int z_erofs_load_deflate_config(struct super_block *sb,
struct erofs_super_block *dsb,
struct z_erofs_deflate_cfgs *dfl, int size);
#else
static inline int z_erofs_deflate_init(void) { return 0; }
static inline int z_erofs_deflate_exit(void) { return 0; }
static inline int z_erofs_load_deflate_config(struct super_block *sb,
struct erofs_super_block *dsb,
struct z_erofs_deflate_cfgs *dfl, int size) {
if (dfl) {
erofs_err(sb, "deflate algorithm isn't enabled");
return -EINVAL;
}
return 0;
}
#endif /* !CONFIG_EROFS_FS_ZIP_DEFLATE */
#ifdef CONFIG_EROFS_FS_ONDEMAND
......
......@@ -4,14 +4,11 @@
* https://www.huawei.com/
* Copyright (C) 2021, Alibaba Cloud
*/
#include <linux/module.h>
#include <linux/statfs.h>
#include <linux/parser.h>
#include <linux/seq_file.h>
#include <linux/crc32c.h>
#include <linux/fs_context.h>
#include <linux/fs_parser.h>
#include <linux/dax.h>
#include <linux/exportfs.h>
#include "xattr.h"
......@@ -156,68 +153,15 @@ void *erofs_read_metadata(struct super_block *sb, struct erofs_buf *buf,
return buffer;
}
#ifdef CONFIG_EROFS_FS_ZIP
static int erofs_load_compr_cfgs(struct super_block *sb,
struct erofs_super_block *dsb)
#ifndef CONFIG_EROFS_FS_ZIP
static int z_erofs_parse_cfgs(struct super_block *sb,
struct erofs_super_block *dsb)
{
struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
unsigned int algs, alg;
erofs_off_t offset;
int size, ret = 0;
sbi->available_compr_algs = le16_to_cpu(dsb->u1.available_compr_algs);
if (sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS) {
erofs_err(sb, "try to load compressed fs with unsupported algorithms %x",
sbi->available_compr_algs & ~Z_EROFS_ALL_COMPR_ALGS);
return -EINVAL;
}
erofs_init_metabuf(&buf, sb);
offset = EROFS_SUPER_OFFSET + sbi->sb_size;
alg = 0;
for (algs = sbi->available_compr_algs; algs; algs >>= 1, ++alg) {
void *data;
if (!(algs & 1))
continue;
data = erofs_read_metadata(sb, &buf, &offset, &size);
if (IS_ERR(data)) {
ret = PTR_ERR(data);
break;
}
if (!dsb->u1.available_compr_algs)
return 0;
switch (alg) {
case Z_EROFS_COMPRESSION_LZ4:
ret = z_erofs_load_lz4_config(sb, dsb, data, size);
break;
case Z_EROFS_COMPRESSION_LZMA:
ret = z_erofs_load_lzma_config(sb, dsb, data, size);
break;
case Z_EROFS_COMPRESSION_DEFLATE:
ret = z_erofs_load_deflate_config(sb, dsb, data, size);
break;
default:
DBG_BUGON(1);
ret = -EFAULT;
}
kfree(data);
if (ret)
break;
}
erofs_put_metabuf(&buf);
return ret;
}
#else
static int erofs_load_compr_cfgs(struct super_block *sb,
struct erofs_super_block *dsb)
{
if (dsb->u1.available_compr_algs) {
erofs_err(sb, "try to load compressed fs when compression is disabled");
return -EINVAL;
}
return 0;
erofs_err(sb, "compression disabled, unable to mount compressed EROFS");
return -EOPNOTSUPP;
}
#endif
......@@ -406,10 +350,7 @@ static int erofs_read_superblock(struct super_block *sb)
}
/* parse on-disk compression configurations */
if (erofs_sb_has_compr_cfgs(sbi))
ret = erofs_load_compr_cfgs(sb, dsb);
else
ret = z_erofs_load_lz4_config(sb, dsb, NULL, 0);
ret = z_erofs_parse_cfgs(sb, dsb);
if (ret < 0)
goto out;
......@@ -724,13 +665,13 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
xa_init(&sbi->managed_pslots);
#endif
inode = erofs_iget(sb, ROOT_NID(sbi));
inode = erofs_iget(sb, sbi->root_nid);
if (IS_ERR(inode))
return PTR_ERR(inode);
if (!S_ISDIR(inode->i_mode)) {
erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)",
ROOT_NID(sbi), inode->i_mode);
sbi->root_nid, inode->i_mode);
iput(inode);
return -EINVAL;
}
......@@ -760,7 +701,7 @@ static int erofs_fc_fill_super(struct super_block *sb, struct fs_context *fc)
if (err)
return err;
erofs_info(sb, "mounted with root inode @ nid %llu.", ROOT_NID(sbi));
erofs_info(sb, "mounted with root inode @ nid %llu.", sbi->root_nid);
return 0;
}
......
......@@ -77,12 +77,7 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
struct erofs_sb_info *const sbi = EROFS_SB(sb);
struct erofs_workgroup *pre;
/*
* Bump up before making this visible to others for the XArray in order
* to avoid potential UAF without serialized by xa_lock.
*/
lockref_get(&grp->lockref);
DBG_BUGON(grp->lockref.count < 1);
repeat:
xa_lock(&sbi->managed_pslots);
pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
......@@ -96,7 +91,6 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
cond_resched();
goto repeat;
}
lockref_put_return(&grp->lockref);
grp = pre;
}
xa_unlock(&sbi->managed_pslots);
......
......@@ -796,6 +796,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
return PTR_ERR(pcl);
spin_lock_init(&pcl->obj.lockref.lock);
pcl->obj.lockref.count = 1; /* one ref for this request */
pcl->algorithmformat = map->m_algorithmformat;
pcl->length = 0;
pcl->partial = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment