Commit dcb9f486 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'erofs-for-6.10-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull more erofs updates from Gao Xiang:
 "The main ones are metadata API conversion to byte offsets by Al Viro.

  Another patch gets rid of unnecessary memory allocation out of DEFLATE
  decompressor. The remaining one is a trivial cleanup.

   - Convert metadata APIs to byte offsets

   - Avoid allocating DEFLATE streams unnecessarily

   - Some erofs_show_options() cleanup"

* tag 'erofs-for-6.10-rc1-2' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: avoid allocating DEFLATE streams before mounting
  z_erofs_pcluster_begin(): don't bother with rounding position down
  erofs: don't round offset down for erofs_read_metabuf()
  erofs: don't align offset for erofs_read_metabuf() (simple cases)
  erofs: mechanically convert erofs_read_metabuf() to offsets
  erofs: clean up erofs_show_options()
parents c40b1994 80eb4f62
...@@ -72,10 +72,10 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb) ...@@ -72,10 +72,10 @@ void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb)
} }
void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
erofs_blk_t blkaddr, enum erofs_kmap_type type) erofs_off_t offset, enum erofs_kmap_type type)
{ {
erofs_init_metabuf(buf, sb); erofs_init_metabuf(buf, sb);
return erofs_bread(buf, erofs_pos(sb, blkaddr), type); return erofs_bread(buf, offset, type);
} }
static int erofs_map_blocks_flatmode(struct inode *inode, static int erofs_map_blocks_flatmode(struct inode *inode,
...@@ -152,7 +152,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) ...@@ -152,7 +152,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + pos = ALIGN(erofs_iloc(inode) + vi->inode_isize +
vi->xattr_isize, unit) + unit * chunknr; vi->xattr_isize, unit) + unit * chunknr;
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP); kaddr = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP);
if (IS_ERR(kaddr)) { if (IS_ERR(kaddr)) {
err = PTR_ERR(kaddr); err = PTR_ERR(kaddr);
goto out; goto out;
...@@ -163,7 +163,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) ...@@ -163,7 +163,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
/* handle block map */ /* handle block map */
if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) { if (!(vi->chunkformat & EROFS_CHUNK_FORMAT_INDEXES)) {
__le32 *blkaddr = kaddr + erofs_blkoff(sb, pos); __le32 *blkaddr = kaddr;
if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) { if (le32_to_cpu(*blkaddr) == EROFS_NULL_ADDR) {
map->m_flags = 0; map->m_flags = 0;
...@@ -174,7 +174,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map) ...@@ -174,7 +174,7 @@ int erofs_map_blocks(struct inode *inode, struct erofs_map_blocks *map)
goto out_unlock; goto out_unlock;
} }
/* parse chunk indexes */ /* parse chunk indexes */
idx = kaddr + erofs_blkoff(sb, pos); idx = kaddr;
switch (le32_to_cpu(idx->blkaddr)) { switch (le32_to_cpu(idx->blkaddr)) {
case EROFS_NULL_ADDR: case EROFS_NULL_ADDR:
map->m_flags = 0; map->m_flags = 0;
...@@ -294,11 +294,10 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, ...@@ -294,11 +294,10 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length,
struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
iomap->type = IOMAP_INLINE; iomap->type = IOMAP_INLINE;
ptr = erofs_read_metabuf(&buf, sb, ptr = erofs_read_metabuf(&buf, sb, mdev.m_pa, EROFS_KMAP);
erofs_blknr(sb, mdev.m_pa), EROFS_KMAP);
if (IS_ERR(ptr)) if (IS_ERR(ptr))
return PTR_ERR(ptr); return PTR_ERR(ptr);
iomap->inline_data = ptr + erofs_blkoff(sb, mdev.m_pa); iomap->inline_data = ptr;
iomap->private = buf.base; iomap->private = buf.base;
} else { } else {
iomap->type = IOMAP_MAPPED; iomap->type = IOMAP_MAPPED;
......
...@@ -46,39 +46,15 @@ int __init z_erofs_deflate_init(void) ...@@ -46,39 +46,15 @@ int __init z_erofs_deflate_init(void)
/* by default, use # of possible CPUs instead */ /* by default, use # of possible CPUs instead */
if (!z_erofs_deflate_nstrms) if (!z_erofs_deflate_nstrms)
z_erofs_deflate_nstrms = num_possible_cpus(); z_erofs_deflate_nstrms = num_possible_cpus();
for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
++z_erofs_deflate_avail_strms) {
struct z_erofs_deflate *strm;
strm = kzalloc(sizeof(*strm), GFP_KERNEL);
if (!strm)
goto out_failed;
/* XXX: in-kernel zlib cannot shrink windowbits currently */
strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
if (!strm->z.workspace) {
kfree(strm);
goto out_failed;
}
spin_lock(&z_erofs_deflate_lock);
strm->next = z_erofs_deflate_head;
z_erofs_deflate_head = strm;
spin_unlock(&z_erofs_deflate_lock);
}
return 0; return 0;
out_failed:
erofs_err(NULL, "failed to allocate zlib workspace");
z_erofs_deflate_exit();
return -ENOMEM;
} }
int z_erofs_load_deflate_config(struct super_block *sb, int z_erofs_load_deflate_config(struct super_block *sb,
struct erofs_super_block *dsb, void *data, int size) struct erofs_super_block *dsb, void *data, int size)
{ {
struct z_erofs_deflate_cfgs *dfl = data; struct z_erofs_deflate_cfgs *dfl = data;
static DEFINE_MUTEX(deflate_resize_mutex);
static bool inited;
if (!dfl || size < sizeof(struct z_erofs_deflate_cfgs)) { if (!dfl || size < sizeof(struct z_erofs_deflate_cfgs)) {
erofs_err(sb, "invalid deflate cfgs, size=%u", size); erofs_err(sb, "invalid deflate cfgs, size=%u", size);
...@@ -89,9 +65,36 @@ int z_erofs_load_deflate_config(struct super_block *sb, ...@@ -89,9 +65,36 @@ int z_erofs_load_deflate_config(struct super_block *sb,
erofs_err(sb, "unsupported windowbits %u", dfl->windowbits); erofs_err(sb, "unsupported windowbits %u", dfl->windowbits);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
mutex_lock(&deflate_resize_mutex);
if (!inited) {
for (; z_erofs_deflate_avail_strms < z_erofs_deflate_nstrms;
++z_erofs_deflate_avail_strms) {
struct z_erofs_deflate *strm;
strm = kzalloc(sizeof(*strm), GFP_KERNEL);
if (!strm)
goto failed;
/* XXX: in-kernel zlib cannot customize windowbits */
strm->z.workspace = vmalloc(zlib_inflate_workspacesize());
if (!strm->z.workspace) {
kfree(strm);
goto failed;
}
spin_lock(&z_erofs_deflate_lock);
strm->next = z_erofs_deflate_head;
z_erofs_deflate_head = strm;
spin_unlock(&z_erofs_deflate_lock);
}
inited = true;
}
mutex_unlock(&deflate_resize_mutex);
erofs_info(sb, "EXPERIMENTAL DEFLATE feature in use. Use at your own risk!"); erofs_info(sb, "EXPERIMENTAL DEFLATE feature in use. Use at your own risk!");
return 0; return 0;
failed:
mutex_unlock(&deflate_resize_mutex);
z_erofs_deflate_exit();
return -ENOMEM;
} }
int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq, int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
......
...@@ -273,21 +273,15 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req) ...@@ -273,21 +273,15 @@ static int erofs_fscache_data_read_slice(struct erofs_fscache_rq *req)
if (map.m_flags & EROFS_MAP_META) { if (map.m_flags & EROFS_MAP_META) {
struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
struct iov_iter iter; struct iov_iter iter;
erofs_blk_t blknr; size_t size = map.m_llen;
size_t offset, size;
void *src; void *src;
/* For tail packing layout, the offset may be non-zero. */ src = erofs_read_metabuf(&buf, sb, map.m_pa, EROFS_KMAP);
offset = erofs_blkoff(sb, map.m_pa);
blknr = erofs_blknr(sb, map.m_pa);
size = map.m_llen;
src = erofs_read_metabuf(&buf, sb, blknr, EROFS_KMAP);
if (IS_ERR(src)) if (IS_ERR(src))
return PTR_ERR(src); return PTR_ERR(src);
iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE); iov_iter_xarray(&iter, ITER_DEST, &mapping->i_pages, pos, PAGE_SIZE);
if (copy_to_iter(src + offset, size, &iter) != size) { if (copy_to_iter(src, size, &iter) != size) {
erofs_put_metabuf(&buf); erofs_put_metabuf(&buf);
return -EFAULT; return -EFAULT;
} }
......
...@@ -26,7 +26,7 @@ static void *erofs_read_inode(struct erofs_buf *buf, ...@@ -26,7 +26,7 @@ static void *erofs_read_inode(struct erofs_buf *buf,
blkaddr = erofs_blknr(sb, inode_loc); blkaddr = erofs_blknr(sb, inode_loc);
*ofs = erofs_blkoff(sb, inode_loc); *ofs = erofs_blkoff(sb, inode_loc);
kaddr = erofs_read_metabuf(buf, sb, blkaddr, EROFS_KMAP); kaddr = erofs_read_metabuf(buf, sb, erofs_pos(sb, blkaddr), EROFS_KMAP);
if (IS_ERR(kaddr)) { if (IS_ERR(kaddr)) {
erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld", erofs_err(sb, "failed to get inode (nid: %llu) page, err %ld",
vi->nid, PTR_ERR(kaddr)); vi->nid, PTR_ERR(kaddr));
...@@ -66,7 +66,7 @@ static void *erofs_read_inode(struct erofs_buf *buf, ...@@ -66,7 +66,7 @@ static void *erofs_read_inode(struct erofs_buf *buf,
goto err_out; goto err_out;
} }
memcpy(copied, dic, gotten); memcpy(copied, dic, gotten);
kaddr = erofs_read_metabuf(buf, sb, blkaddr + 1, kaddr = erofs_read_metabuf(buf, sb, erofs_pos(sb, blkaddr + 1),
EROFS_KMAP); EROFS_KMAP);
if (IS_ERR(kaddr)) { if (IS_ERR(kaddr)) {
erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld", erofs_err(sb, "failed to get inode payload block (nid: %llu), err %ld",
......
...@@ -64,15 +64,12 @@ enum { ...@@ -64,15 +64,12 @@ enum {
}; };
struct erofs_mount_opts { struct erofs_mount_opts {
#ifdef CONFIG_EROFS_FS_ZIP
/* current strategy of how to use managed cache */ /* current strategy of how to use managed cache */
unsigned char cache_strategy; unsigned char cache_strategy;
/* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */ /* strategy of sync decompression (0 - auto, 1 - force on, 2 - force off) */
unsigned int sync_decompress; unsigned int sync_decompress;
/* threshold for decompression synchronously */ /* threshold for decompression synchronously */
unsigned int max_sync_decompress_pages; unsigned int max_sync_decompress_pages;
#endif
unsigned int mount_opt; unsigned int mount_opt;
}; };
...@@ -406,7 +403,7 @@ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset, ...@@ -406,7 +403,7 @@ void *erofs_bread(struct erofs_buf *buf, erofs_off_t offset,
enum erofs_kmap_type type); enum erofs_kmap_type type);
void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb); void erofs_init_metabuf(struct erofs_buf *buf, struct super_block *sb);
void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb, void *erofs_read_metabuf(struct erofs_buf *buf, struct super_block *sb,
erofs_blk_t blkaddr, enum erofs_kmap_type type); erofs_off_t offset, enum erofs_kmap_type type);
int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev); int erofs_map_dev(struct super_block *sb, struct erofs_map_dev *dev);
int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, int erofs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
u64 start, u64 len); u64 start, u64 len);
......
...@@ -178,12 +178,10 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb, ...@@ -178,12 +178,10 @@ static int erofs_init_device(struct erofs_buf *buf, struct super_block *sb,
struct erofs_fscache *fscache; struct erofs_fscache *fscache;
struct erofs_deviceslot *dis; struct erofs_deviceslot *dis;
struct file *bdev_file; struct file *bdev_file;
void *ptr;
ptr = erofs_read_metabuf(buf, sb, erofs_blknr(sb, *pos), EROFS_KMAP); dis = erofs_read_metabuf(buf, sb, *pos, EROFS_KMAP);
if (IS_ERR(ptr)) if (IS_ERR(dis))
return PTR_ERR(ptr); return PTR_ERR(dis);
dis = ptr + erofs_blkoff(sb, *pos);
if (!sbi->devs->flatdev && !dif->path) { if (!sbi->devs->flatdev && !dif->path) {
if (!dis->tag[0]) { if (!dis->tag[0]) {
...@@ -943,26 +941,14 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root) ...@@ -943,26 +941,14 @@ static int erofs_show_options(struct seq_file *seq, struct dentry *root)
struct erofs_sb_info *sbi = EROFS_SB(root->d_sb); struct erofs_sb_info *sbi = EROFS_SB(root->d_sb);
struct erofs_mount_opts *opt = &sbi->opt; struct erofs_mount_opts *opt = &sbi->opt;
#ifdef CONFIG_EROFS_FS_XATTR if (IS_ENABLED(CONFIG_EROFS_FS_XATTR))
if (test_opt(opt, XATTR_USER)) seq_puts(seq, test_opt(opt, XATTR_USER) ?
seq_puts(seq, ",user_xattr"); ",user_xattr" : ",nouser_xattr");
else if (IS_ENABLED(CONFIG_EROFS_FS_POSIX_ACL))
seq_puts(seq, ",nouser_xattr"); seq_puts(seq, test_opt(opt, POSIX_ACL) ? ",acl" : ",noacl");
#endif if (IS_ENABLED(CONFIG_EROFS_FS_ZIP))
#ifdef CONFIG_EROFS_FS_POSIX_ACL seq_printf(seq, ",cache_strategy=%s",
if (test_opt(opt, POSIX_ACL)) erofs_param_cache_strategy[opt->cache_strategy].name);
seq_puts(seq, ",acl");
else
seq_puts(seq, ",noacl");
#endif
#ifdef CONFIG_EROFS_FS_ZIP
if (opt->cache_strategy == EROFS_ZIP_CACHE_DISABLED)
seq_puts(seq, ",cache_strategy=disabled");
else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAHEAD)
seq_puts(seq, ",cache_strategy=readahead");
else if (opt->cache_strategy == EROFS_ZIP_CACHE_READAROUND)
seq_puts(seq, ",cache_strategy=readaround");
#endif
if (test_opt(opt, DAX_ALWAYS)) if (test_opt(opt, DAX_ALWAYS))
seq_puts(seq, ",dax=always"); seq_puts(seq, ",dax=always");
if (test_opt(opt, DAX_NEVER)) if (test_opt(opt, DAX_NEVER))
......
...@@ -868,7 +868,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe) ...@@ -868,7 +868,7 @@ static int z_erofs_pcluster_begin(struct z_erofs_decompress_frontend *fe)
} else { } else {
void *mptr; void *mptr;
mptr = erofs_read_metabuf(&map->buf, sb, blknr, EROFS_NO_KMAP); mptr = erofs_read_metabuf(&map->buf, sb, map->m_pa, EROFS_NO_KMAP);
if (IS_ERR(mptr)) { if (IS_ERR(mptr)) {
ret = PTR_ERR(mptr); ret = PTR_ERR(mptr);
erofs_err(sb, "failed to get inline data %d", ret); erofs_err(sb, "failed to get inline data %d", ret);
......
...@@ -34,13 +34,13 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, ...@@ -34,13 +34,13 @@ static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m,
unsigned int advise; unsigned int advise;
m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb, m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
erofs_blknr(inode->i_sb, pos), EROFS_KMAP); pos, EROFS_KMAP);
if (IS_ERR(m->kaddr)) if (IS_ERR(m->kaddr))
return PTR_ERR(m->kaddr); return PTR_ERR(m->kaddr);
m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index); m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index);
m->lcn = lcn; m->lcn = lcn;
di = m->kaddr + erofs_blkoff(inode->i_sb, pos); di = m->kaddr;
advise = le16_to_cpu(di->di_advise); advise = le16_to_cpu(di->di_advise);
m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK; m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK;
...@@ -109,7 +109,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, ...@@ -109,7 +109,7 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
{ {
struct erofs_inode *const vi = EROFS_I(m->inode); struct erofs_inode *const vi = EROFS_I(m->inode);
const unsigned int lclusterbits = vi->z_logical_clusterbits; const unsigned int lclusterbits = vi->z_logical_clusterbits;
unsigned int vcnt, base, lo, lobits, encodebits, nblk, eofs; unsigned int vcnt, lo, lobits, encodebits, nblk, bytes;
int i; int i;
u8 *in, type; u8 *in, type;
bool big_pcluster; bool big_pcluster;
...@@ -127,11 +127,11 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m, ...@@ -127,11 +127,11 @@ static int unpack_compacted_index(struct z_erofs_maprecorder *m,
big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1; big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1;
lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U); lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U);
encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt;
eofs = erofs_blkoff(m->inode->i_sb, pos); bytes = pos & ((vcnt << amortizedshift) - 1);
base = round_down(eofs, vcnt << amortizedshift);
in = m->kaddr + base;
i = (eofs - base) >> amortizedshift; in = m->kaddr - bytes;
i = bytes >> amortizedshift;
lo = decode_compactedbits(lobits, in, encodebits * i, &type); lo = decode_compactedbits(lobits, in, encodebits * i, &type);
m->type = type; m->type = type;
...@@ -256,7 +256,7 @@ static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m, ...@@ -256,7 +256,7 @@ static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m,
out: out:
pos += lcn * (1 << amortizedshift); pos += lcn * (1 << amortizedshift);
m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb, m->kaddr = erofs_read_metabuf(&m->map->buf, inode->i_sb,
erofs_blknr(inode->i_sb, pos), EROFS_KMAP); pos, EROFS_KMAP);
if (IS_ERR(m->kaddr)) if (IS_ERR(m->kaddr))
return PTR_ERR(m->kaddr); return PTR_ERR(m->kaddr);
return unpack_compacted_index(m, amortizedshift, pos, lookahead); return unpack_compacted_index(m, amortizedshift, pos, lookahead);
...@@ -570,7 +570,6 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) ...@@ -570,7 +570,6 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
int err, headnr; int err, headnr;
erofs_off_t pos; erofs_off_t pos;
struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
void *kaddr;
struct z_erofs_map_header *h; struct z_erofs_map_header *h;
if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) { if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) {
...@@ -590,13 +589,12 @@ static int z_erofs_fill_inode_lazy(struct inode *inode) ...@@ -590,13 +589,12 @@ static int z_erofs_fill_inode_lazy(struct inode *inode)
goto out_unlock; goto out_unlock;
pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8); pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8);
kaddr = erofs_read_metabuf(&buf, sb, erofs_blknr(sb, pos), EROFS_KMAP); h = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP);
if (IS_ERR(kaddr)) { if (IS_ERR(h)) {
err = PTR_ERR(kaddr); err = PTR_ERR(h);
goto out_unlock; goto out_unlock;
} }
h = kaddr + erofs_blkoff(sb, pos);
/* /*
* if the highest bit of the 8-byte map header is set, the whole file * if the highest bit of the 8-byte map header is set, the whole file
* is stored in the packed inode. The rest bits keeps z_fragmentoff. * is stored in the packed inode. The rest bits keeps z_fragmentoff.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment