Commit db166fc2 authored by Gao Xiang's avatar Gao Xiang

erofs: clean up `enum z_erofs_collectmode'

`enum z_erofs_collectmode' is really ambiguous, but I'm not quite
sure if there are better naming, basically it's used to judge whether
inplace I/O can be used due to the current status of pclusters in
the chain.

Rename it as `enum z_erofs_pclustermode' instead.
Acked-by: default avatarChao Yu <chao@kernel.org>
Signed-off-by: default avatarGao Xiang <hsiangkao@linux.alibaba.com>
Link: https://lore.kernel.org/r/20220715154203.48093-11-hsiangkao@linux.alibaba.com
parent 5b220b20
...@@ -227,30 +227,29 @@ int __init z_erofs_init_zip_subsystem(void) ...@@ -227,30 +227,29 @@ int __init z_erofs_init_zip_subsystem(void)
return err; return err;
} }
enum z_erofs_collectmode { enum z_erofs_pclustermode {
COLLECT_SECONDARY, Z_EROFS_PCLUSTER_INFLIGHT,
COLLECT_PRIMARY,
/* /*
* The current collection was the tail of an exist chain, in addition * The current pclusters was the tail of an exist chain, in addition
* that the previous processed chained collections are all decided to * that the previous processed chained pclusters are all decided to
* be hooked up to it. * be hooked up to it.
* A new chain will be created for the remaining collections which are * A new chain will be created for the remaining pclusters which are
* not processed yet, therefore different from COLLECT_PRIMARY_FOLLOWED, * not processed yet, so different from Z_EROFS_PCLUSTER_FOLLOWED,
* the next collection cannot reuse the whole page safely in * the next pcluster cannot reuse the whole page safely for inplace I/O
* the following scenario: * in the following scenario:
* ________________________________________________________________ * ________________________________________________________________
* | tail (partial) page | head (partial) page | * | tail (partial) page | head (partial) page |
* | (belongs to the next cl) | (belongs to the current cl) | * | (belongs to the next pcl) | (belongs to the current pcl) |
* |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________| * |_______PCLUSTER_FOLLOWED______|________PCLUSTER_HOOKED__________|
*/ */
COLLECT_PRIMARY_HOOKED, Z_EROFS_PCLUSTER_HOOKED,
/* /*
* a weak form of COLLECT_PRIMARY_FOLLOWED, the difference is that it * a weak form of Z_EROFS_PCLUSTER_FOLLOWED, the difference is that it
* could be dispatched into bypass queue later due to uptodated managed * could be dispatched into bypass queue later due to uptodated managed
* pages. All related online pages cannot be reused for inplace I/O (or * pages. All related online pages cannot be reused for inplace I/O (or
* bvpage) since it can be directly decoded without I/O submission. * bvpage) since it can be directly decoded without I/O submission.
*/ */
COLLECT_PRIMARY_FOLLOWED_NOINPLACE, Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE,
/* /*
* The current collection has been linked with the owned chain, and * The current collection has been linked with the owned chain, and
* could also be linked with the remaining collections, which means * could also be linked with the remaining collections, which means
...@@ -261,12 +260,12 @@ enum z_erofs_collectmode { ...@@ -261,12 +260,12 @@ enum z_erofs_collectmode {
* ________________________________________________________________ * ________________________________________________________________
* | tail (partial) page | head (partial) page | * | tail (partial) page | head (partial) page |
* | (of the current cl) | (of the previous collection) | * | (of the current cl) | (of the previous collection) |
* | PRIMARY_FOLLOWED or | | * | PCLUSTER_FOLLOWED or | |
* |_____PRIMARY_HOOKED___|____________PRIMARY_FOLLOWED____________| * |_____PCLUSTER_HOOKED__|___________PCLUSTER_FOLLOWED____________|
* *
* [ (*) the above page can be used as inplace I/O. ] * [ (*) the above page can be used as inplace I/O. ]
*/ */
COLLECT_PRIMARY_FOLLOWED, Z_EROFS_PCLUSTER_FOLLOWED,
}; };
struct z_erofs_decompress_frontend { struct z_erofs_decompress_frontend {
...@@ -277,7 +276,7 @@ struct z_erofs_decompress_frontend { ...@@ -277,7 +276,7 @@ struct z_erofs_decompress_frontend {
struct page *candidate_bvpage; struct page *candidate_bvpage;
struct z_erofs_pcluster *pcl, *tailpcl; struct z_erofs_pcluster *pcl, *tailpcl;
z_erofs_next_pcluster_t owned_head; z_erofs_next_pcluster_t owned_head;
enum z_erofs_collectmode mode; enum z_erofs_pclustermode mode;
bool readahead; bool readahead;
/* used for applying cache strategy on the fly */ /* used for applying cache strategy on the fly */
...@@ -290,7 +289,7 @@ struct z_erofs_decompress_frontend { ...@@ -290,7 +289,7 @@ struct z_erofs_decompress_frontend {
#define DECOMPRESS_FRONTEND_INIT(__i) { \ #define DECOMPRESS_FRONTEND_INIT(__i) { \
.inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \ .inode = __i, .owned_head = Z_EROFS_PCLUSTER_TAIL, \
.mode = COLLECT_PRIMARY_FOLLOWED, .backmost = true } .mode = Z_EROFS_PCLUSTER_FOLLOWED, .backmost = true }
static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES]; static struct page *z_pagemap_global[Z_EROFS_VMAP_GLOBAL_PAGES];
static DEFINE_MUTEX(z_pagemap_global_lock); static DEFINE_MUTEX(z_pagemap_global_lock);
...@@ -310,7 +309,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe, ...@@ -310,7 +309,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN; __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
unsigned int i; unsigned int i;
if (fe->mode < COLLECT_PRIMARY_FOLLOWED) if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
return; return;
for (i = 0; i < pcl->pclusterpages; ++i) { for (i = 0; i < pcl->pclusterpages; ++i) {
...@@ -358,7 +357,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe, ...@@ -358,7 +357,7 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe,
* managed cache since it can be moved to the bypass queue instead. * managed cache since it can be moved to the bypass queue instead.
*/ */
if (standalone) if (standalone)
fe->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE; fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
} }
/* called by erofs_shrinker to get rid of all compressed_pages */ /* called by erofs_shrinker to get rid of all compressed_pages */
...@@ -439,12 +438,12 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe, ...@@ -439,12 +438,12 @@ static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
{ {
int ret; int ret;
if (fe->mode >= COLLECT_PRIMARY && exclusive) { if (exclusive) {
/* give priority for inplaceio to use file pages first */ /* give priority for inplaceio to use file pages first */
if (z_erofs_try_inplace_io(fe, bvec)) if (z_erofs_try_inplace_io(fe, bvec))
return 0; return 0;
/* otherwise, check if it can be used as a bvpage */ /* otherwise, check if it can be used as a bvpage */
if (fe->mode >= COLLECT_PRIMARY_FOLLOWED && if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
!fe->candidate_bvpage) !fe->candidate_bvpage)
fe->candidate_bvpage = bvec->page; fe->candidate_bvpage = bvec->page;
} }
...@@ -463,7 +462,7 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) ...@@ -463,7 +462,7 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
*owned_head) == Z_EROFS_PCLUSTER_NIL) { *owned_head) == Z_EROFS_PCLUSTER_NIL) {
*owned_head = &pcl->next; *owned_head = &pcl->next;
/* so we can attach this pcluster to our submission chain. */ /* so we can attach this pcluster to our submission chain. */
f->mode = COLLECT_PRIMARY_FOLLOWED; f->mode = Z_EROFS_PCLUSTER_FOLLOWED;
return; return;
} }
...@@ -474,12 +473,12 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f) ...@@ -474,12 +473,12 @@ static void z_erofs_try_to_claim_pcluster(struct z_erofs_decompress_frontend *f)
if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL, if (cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
*owned_head) == Z_EROFS_PCLUSTER_TAIL) { *owned_head) == Z_EROFS_PCLUSTER_TAIL) {
*owned_head = Z_EROFS_PCLUSTER_TAIL; *owned_head = Z_EROFS_PCLUSTER_TAIL;
f->mode = COLLECT_PRIMARY_HOOKED; f->mode = Z_EROFS_PCLUSTER_HOOKED;
f->tailpcl = NULL; f->tailpcl = NULL;
return; return;
} }
/* type 3, it belongs to a chain, but it isn't the end of the chain */ /* type 3, it belongs to a chain, but it isn't the end of the chain */
f->mode = COLLECT_PRIMARY; f->mode = Z_EROFS_PCLUSTER_INFLIGHT;
} }
static int z_erofs_lookup_pcluster(struct z_erofs_decompress_frontend *fe) static int z_erofs_lookup_pcluster(struct z_erofs_decompress_frontend *fe)
...@@ -554,7 +553,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe) ...@@ -554,7 +553,7 @@ static int z_erofs_register_pcluster(struct z_erofs_decompress_frontend *fe)
/* new pclusters should be claimed as type 1, primary and followed */ /* new pclusters should be claimed as type 1, primary and followed */
pcl->next = fe->owned_head; pcl->next = fe->owned_head;
pcl->pageofs_out = map->m_la & ~PAGE_MASK; pcl->pageofs_out = map->m_la & ~PAGE_MASK;
fe->mode = COLLECT_PRIMARY_FOLLOWED; fe->mode = Z_EROFS_PCLUSTER_FOLLOWED;
/* /*
* lock all primary followed works before visible to others * lock all primary followed works before visible to others
...@@ -676,7 +675,7 @@ static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe) ...@@ -676,7 +675,7 @@ static bool z_erofs_collector_end(struct z_erofs_decompress_frontend *fe)
* if all pending pages are added, don't hold its reference * if all pending pages are added, don't hold its reference
* any longer if the pcluster isn't hosted by ourselves. * any longer if the pcluster isn't hosted by ourselves.
*/ */
if (fe->mode < COLLECT_PRIMARY_FOLLOWED_NOINPLACE) if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE)
erofs_workgroup_put(&pcl->obj); erofs_workgroup_put(&pcl->obj);
fe->pcl = NULL; fe->pcl = NULL;
...@@ -756,7 +755,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, ...@@ -756,7 +755,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
get_page(fe->map.buf.page); get_page(fe->map.buf.page);
WRITE_ONCE(fe->pcl->compressed_bvecs[0].page, WRITE_ONCE(fe->pcl->compressed_bvecs[0].page,
fe->map.buf.page); fe->map.buf.page);
fe->mode = COLLECT_PRIMARY_FOLLOWED_NOINPLACE; fe->mode = Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE;
} else { } else {
/* bind cache first when cached decompression is preferred */ /* bind cache first when cached decompression is preferred */
if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy, if (should_alloc_managed_pages(fe, sbi->opt.cache_strategy,
...@@ -774,8 +773,8 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, ...@@ -774,8 +773,8 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
* those chains are handled asynchronously thus the page cannot be used * those chains are handled asynchronously thus the page cannot be used
* for inplace I/O or bvpage (should be processed in a strict order.) * for inplace I/O or bvpage (should be processed in a strict order.)
*/ */
tight &= (fe->mode >= COLLECT_PRIMARY_HOOKED && tight &= (fe->mode >= Z_EROFS_PCLUSTER_HOOKED &&
fe->mode != COLLECT_PRIMARY_FOLLOWED_NOINPLACE); fe->mode != Z_EROFS_PCLUSTER_FOLLOWED_NOINPLACE);
cur = end - min_t(unsigned int, offset + end - map->m_la, end); cur = end - min_t(unsigned int, offset + end - map->m_la, end);
if (!(map->m_flags & EROFS_MAP_MAPPED)) { if (!(map->m_flags & EROFS_MAP_MAPPED)) {
...@@ -785,7 +784,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe, ...@@ -785,7 +784,7 @@ static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
exclusive = (!cur && (!spiltted || tight)); exclusive = (!cur && (!spiltted || tight));
if (cur) if (cur)
tight &= (fe->mode >= COLLECT_PRIMARY_FOLLOWED); tight &= (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED);
retry: retry:
err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) { err = z_erofs_attach_page(fe, &((struct z_erofs_bvec) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment