Commit 3893c202 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'erofs-for-5.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs

Pull erofs updates from Gao Xiang:
 "A regression fix, several cleanups and (maybe) plus an upcoming new
  mount api convert patch as a part of vfs update are considered
  available for this cycle.

  All commits have been in linux-next and tested with no smoke out.

  Summary:

   - fix an out-of-bound read access introduced in v5.3, which could
     rarely cause data corruption

   - various cleanup patches"

* tag 'erofs-for-5.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: clean up z_erofs_submit_queue()
  erofs: fold in postsubmit_is_all_bypassed()
  erofs: fix out-of-bound read for shifted uncompressed block
  erofs: remove void tagging/untagging of workgroup pointers
  erofs: remove unused tag argument while registering a workgroup
  erofs: remove unused tag argument while finding a workgroup
  erofs: correct indentation of an assigned structure inside a function
parents 53070406 1e4a2955
...@@ -306,24 +306,22 @@ static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq, ...@@ -306,24 +306,22 @@ static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
} }
src = kmap_atomic(*rq->in); src = kmap_atomic(*rq->in);
if (!rq->out[0]) { if (rq->out[0]) {
dst = NULL;
} else {
dst = kmap_atomic(rq->out[0]); dst = kmap_atomic(rq->out[0]);
memcpy(dst + rq->pageofs_out, src, righthalf); memcpy(dst + rq->pageofs_out, src, righthalf);
kunmap_atomic(dst);
} }
if (rq->out[1] == *rq->in) { if (nrpages_out == 2) {
memmove(src, src + righthalf, rq->pageofs_out);
} else if (nrpages_out == 2) {
if (dst)
kunmap_atomic(dst);
DBG_BUGON(!rq->out[1]); DBG_BUGON(!rq->out[1]);
dst = kmap_atomic(rq->out[1]); if (rq->out[1] == *rq->in) {
memcpy(dst, src + righthalf, rq->pageofs_out); memmove(src, src + righthalf, rq->pageofs_out);
} else {
dst = kmap_atomic(rq->out[1]);
memcpy(dst, src + righthalf, rq->pageofs_out);
kunmap_atomic(dst);
}
} }
if (dst)
kunmap_atomic(dst);
kunmap_atomic(src); kunmap_atomic(src);
return 0; return 0;
} }
......
...@@ -401,9 +401,9 @@ static inline void *erofs_get_pcpubuf(unsigned int pagenr) ...@@ -401,9 +401,9 @@ static inline void *erofs_get_pcpubuf(unsigned int pagenr)
#ifdef CONFIG_EROFS_FS_ZIP #ifdef CONFIG_EROFS_FS_ZIP
int erofs_workgroup_put(struct erofs_workgroup *grp); int erofs_workgroup_put(struct erofs_workgroup *grp);
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
pgoff_t index, bool *tag); pgoff_t index);
int erofs_register_workgroup(struct super_block *sb, int erofs_register_workgroup(struct super_block *sb,
struct erofs_workgroup *grp, bool tag); struct erofs_workgroup *grp);
void erofs_workgroup_free_rcu(struct erofs_workgroup *grp); void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
void erofs_shrinker_register(struct super_block *sb); void erofs_shrinker_register(struct super_block *sb);
void erofs_shrinker_unregister(struct super_block *sb); void erofs_shrinker_unregister(struct super_block *sb);
......
...@@ -59,7 +59,7 @@ static int erofs_workgroup_get(struct erofs_workgroup *grp) ...@@ -59,7 +59,7 @@ static int erofs_workgroup_get(struct erofs_workgroup *grp)
} }
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
pgoff_t index, bool *tag) pgoff_t index)
{ {
struct erofs_sb_info *sbi = EROFS_SB(sb); struct erofs_sb_info *sbi = EROFS_SB(sb);
struct erofs_workgroup *grp; struct erofs_workgroup *grp;
...@@ -68,9 +68,6 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, ...@@ -68,9 +68,6 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
rcu_read_lock(); rcu_read_lock();
grp = radix_tree_lookup(&sbi->workstn_tree, index); grp = radix_tree_lookup(&sbi->workstn_tree, index);
if (grp) { if (grp) {
*tag = xa_pointer_tag(grp);
grp = xa_untag_pointer(grp);
if (erofs_workgroup_get(grp)) { if (erofs_workgroup_get(grp)) {
/* prefer to relax rcu read side */ /* prefer to relax rcu read side */
rcu_read_unlock(); rcu_read_unlock();
...@@ -84,8 +81,7 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb, ...@@ -84,8 +81,7 @@ struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
} }
int erofs_register_workgroup(struct super_block *sb, int erofs_register_workgroup(struct super_block *sb,
struct erofs_workgroup *grp, struct erofs_workgroup *grp)
bool tag)
{ {
struct erofs_sb_info *sbi; struct erofs_sb_info *sbi;
int err; int err;
...@@ -103,8 +99,6 @@ int erofs_register_workgroup(struct super_block *sb, ...@@ -103,8 +99,6 @@ int erofs_register_workgroup(struct super_block *sb,
sbi = EROFS_SB(sb); sbi = EROFS_SB(sb);
xa_lock(&sbi->workstn_tree); xa_lock(&sbi->workstn_tree);
grp = xa_tag_pointer(grp, tag);
/* /*
* Bump up reference count before making this workgroup * Bump up reference count before making this workgroup
* visible to other users in order to avoid potential UAF * visible to other users in order to avoid potential UAF
...@@ -175,8 +169,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi, ...@@ -175,8 +169,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
* however in order to avoid some race conditions, add a * however in order to avoid some race conditions, add a
* DBG_BUGON to observe this in advance. * DBG_BUGON to observe this in advance.
*/ */
DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree, DBG_BUGON(radix_tree_delete(&sbi->workstn_tree, grp->index) != grp);
grp->index)) != grp);
/* /*
* If managed cache is on, last refcount should indicate * If managed cache is on, last refcount should indicate
...@@ -201,7 +194,7 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi, ...@@ -201,7 +194,7 @@ static unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
batch, first_index, PAGEVEC_SIZE); batch, first_index, PAGEVEC_SIZE);
for (i = 0; i < found; ++i) { for (i = 0; i < found; ++i) {
struct erofs_workgroup *grp = xa_untag_pointer(batch[i]); struct erofs_workgroup *grp = batch[i];
first_index = grp->index + 1; first_index = grp->index + 1;
......
...@@ -46,18 +46,19 @@ extern const struct xattr_handler erofs_xattr_security_handler; ...@@ -46,18 +46,19 @@ extern const struct xattr_handler erofs_xattr_security_handler;
static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx) static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx)
{ {
static const struct xattr_handler *xattr_handler_map[] = { static const struct xattr_handler *xattr_handler_map[] = {
[EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler, [EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
#ifdef CONFIG_EROFS_FS_POSIX_ACL #ifdef CONFIG_EROFS_FS_POSIX_ACL
[EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler, [EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] =
[EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] = &posix_acl_access_xattr_handler,
&posix_acl_default_xattr_handler, [EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
&posix_acl_default_xattr_handler,
#endif #endif
[EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler, [EROFS_XATTR_INDEX_TRUSTED] = &erofs_xattr_trusted_handler,
#ifdef CONFIG_EROFS_FS_SECURITY #ifdef CONFIG_EROFS_FS_SECURITY
[EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler, [EROFS_XATTR_INDEX_SECURITY] = &erofs_xattr_security_handler,
#endif #endif
}; };
return idx && idx < ARRAY_SIZE(xattr_handler_map) ? return idx && idx < ARRAY_SIZE(xattr_handler_map) ?
xattr_handler_map[idx] : NULL; xattr_handler_map[idx] : NULL;
......
...@@ -345,9 +345,8 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt, ...@@ -345,9 +345,8 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
struct z_erofs_pcluster *pcl; struct z_erofs_pcluster *pcl;
struct z_erofs_collection *cl; struct z_erofs_collection *cl;
unsigned int length; unsigned int length;
bool tag;
grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag); grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
if (!grp) if (!grp)
return -ENOENT; return -ENOENT;
...@@ -438,7 +437,7 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt, ...@@ -438,7 +437,7 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
*/ */
mutex_trylock(&cl->lock); mutex_trylock(&cl->lock);
err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0); err = erofs_register_workgroup(inode->i_sb, &pcl->obj);
if (err) { if (err) {
mutex_unlock(&cl->lock); mutex_unlock(&cl->lock);
kmem_cache_free(pcluster_cachep, pcl); kmem_cache_free(pcluster_cachep, pcl);
...@@ -1149,21 +1148,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, ...@@ -1149,21 +1148,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
qtail[JQ_BYPASS] = &pcl->next; qtail[JQ_BYPASS] = &pcl->next;
} }
static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[], static void z_erofs_submit_queue(struct super_block *sb,
unsigned int nr_bios, bool force_fg)
{
/*
* although background is preferred, no one is pending for submission.
* don't issue workqueue for decompression but drop it directly instead.
*/
if (force_fg || nr_bios)
return false;
kvfree(q[JQ_SUBMIT]);
return true;
}
static bool z_erofs_submit_queue(struct super_block *sb,
z_erofs_next_pcluster_t owned_head, z_erofs_next_pcluster_t owned_head,
struct list_head *pagepool, struct list_head *pagepool,
struct z_erofs_decompressqueue *fgq, struct z_erofs_decompressqueue *fgq,
...@@ -1172,19 +1157,12 @@ static bool z_erofs_submit_queue(struct super_block *sb, ...@@ -1172,19 +1157,12 @@ static bool z_erofs_submit_queue(struct super_block *sb,
struct erofs_sb_info *const sbi = EROFS_SB(sb); struct erofs_sb_info *const sbi = EROFS_SB(sb);
z_erofs_next_pcluster_t qtail[NR_JOBQUEUES]; z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
struct z_erofs_decompressqueue *q[NR_JOBQUEUES]; struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
struct bio *bio;
void *bi_private; void *bi_private;
/* since bio will be NULL, no need to initialize last_index */ /* since bio will be NULL, no need to initialize last_index */
pgoff_t uninitialized_var(last_index); pgoff_t uninitialized_var(last_index);
bool force_submit = false; unsigned int nr_bios = 0;
unsigned int nr_bios; struct bio *bio = NULL;
if (owned_head == Z_EROFS_PCLUSTER_TAIL)
return false;
force_submit = false;
bio = NULL;
nr_bios = 0;
bi_private = jobqueueset_init(sb, q, fgq, force_fg); bi_private = jobqueueset_init(sb, q, fgq, force_fg);
qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head; qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head; qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
...@@ -1194,11 +1172,9 @@ static bool z_erofs_submit_queue(struct super_block *sb, ...@@ -1194,11 +1172,9 @@ static bool z_erofs_submit_queue(struct super_block *sb,
do { do {
struct z_erofs_pcluster *pcl; struct z_erofs_pcluster *pcl;
unsigned int clusterpages; pgoff_t cur, end;
pgoff_t first_index; unsigned int i = 0;
struct page *page; bool bypass = true;
unsigned int i = 0, bypass = 0;
int err;
/* no possible 'owned_head' equals the following */ /* no possible 'owned_head' equals the following */
DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED); DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
...@@ -1206,55 +1182,50 @@ static bool z_erofs_submit_queue(struct super_block *sb, ...@@ -1206,55 +1182,50 @@ static bool z_erofs_submit_queue(struct super_block *sb,
pcl = container_of(owned_head, struct z_erofs_pcluster, next); pcl = container_of(owned_head, struct z_erofs_pcluster, next);
clusterpages = BIT(pcl->clusterbits); cur = pcl->obj.index;
end = cur + BIT(pcl->clusterbits);
/* close the main owned chain at first */ /* close the main owned chain at first */
owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
Z_EROFS_PCLUSTER_TAIL_CLOSED); Z_EROFS_PCLUSTER_TAIL_CLOSED);
first_index = pcl->obj.index; do {
force_submit |= (first_index != last_index + 1); struct page *page;
int err;
repeat: page = pickup_page_for_submission(pcl, i++, pagepool,
page = pickup_page_for_submission(pcl, i, pagepool, MNGD_MAPPING(sbi),
MNGD_MAPPING(sbi), GFP_NOFS);
GFP_NOFS); if (!page)
if (!page) { continue;
force_submit = true;
++bypass;
goto skippage;
}
if (bio && force_submit) { if (bio && cur != last_index + 1) {
submit_bio_retry: submit_bio_retry:
submit_bio(bio); submit_bio(bio);
bio = NULL; bio = NULL;
} }
if (!bio) {
bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
bio->bi_end_io = z_erofs_decompressqueue_endio; if (!bio) {
bio_set_dev(bio, sb->s_bdev); bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
LOG_SECTORS_PER_BLOCK;
bio->bi_private = bi_private;
bio->bi_opf = REQ_OP_READ;
++nr_bios; bio->bi_end_io = z_erofs_decompressqueue_endio;
} bio_set_dev(bio, sb->s_bdev);
bio->bi_iter.bi_sector = (sector_t)cur <<
LOG_SECTORS_PER_BLOCK;
bio->bi_private = bi_private;
bio->bi_opf = REQ_OP_READ;
++nr_bios;
}
err = bio_add_page(bio, page, PAGE_SIZE, 0); err = bio_add_page(bio, page, PAGE_SIZE, 0);
if (err < PAGE_SIZE) if (err < PAGE_SIZE)
goto submit_bio_retry; goto submit_bio_retry;
force_submit = false; last_index = cur;
last_index = first_index + i; bypass = false;
skippage: } while (++cur < end);
if (++i < clusterpages)
goto repeat;
if (bypass < clusterpages) if (!bypass)
qtail[JQ_SUBMIT] = &pcl->next; qtail[JQ_SUBMIT] = &pcl->next;
else else
move_to_bypass_jobqueue(pcl, qtail, owned_head); move_to_bypass_jobqueue(pcl, qtail, owned_head);
...@@ -1263,11 +1234,15 @@ static bool z_erofs_submit_queue(struct super_block *sb, ...@@ -1263,11 +1234,15 @@ static bool z_erofs_submit_queue(struct super_block *sb,
if (bio) if (bio)
submit_bio(bio); submit_bio(bio);
if (postsubmit_is_all_bypassed(q, nr_bios, *force_fg)) /*
return true; * although background is preferred, no one is pending for submission.
* don't issue workqueue for decompression but drop it directly instead.
*/
if (!*force_fg && !nr_bios) {
kvfree(q[JQ_SUBMIT]);
return;
}
z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios); z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
return true;
} }
static void z_erofs_runqueue(struct super_block *sb, static void z_erofs_runqueue(struct super_block *sb,
...@@ -1276,9 +1251,9 @@ static void z_erofs_runqueue(struct super_block *sb, ...@@ -1276,9 +1251,9 @@ static void z_erofs_runqueue(struct super_block *sb,
{ {
struct z_erofs_decompressqueue io[NR_JOBQUEUES]; struct z_erofs_decompressqueue io[NR_JOBQUEUES];
if (!z_erofs_submit_queue(sb, clt->owned_head, if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
pagepool, io, &force_fg))
return; return;
z_erofs_submit_queue(sb, clt->owned_head, pagepool, io, &force_fg);
/* handle bypass queue (no i/o pclusters) immediately */ /* handle bypass queue (no i/o pclusters) immediately */
z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool); z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment