Commit 8af7c124 authored by Tejun Heo's avatar Tejun Heo

fscache: convert operation to use workqueue instead of slow-work

Make fscache operation to use only workqueue instead of combination of
workqueue and slow-work.  FSCACHE_OP_SLOW is dropped and
FSCACHE_OP_FAST is renamed to FSCACHE_OP_ASYNC and uses newly added
fscache_op_wq workqueue to execute op->processor().
fscache_operation_init_slow() is dropped and fscache_operation_init()
now takes @processor argument directly.

* Unbound workqueue is used.

* fscache_retrieval_work() is no longer necessary as OP_ASYNC now does
  the equivalent thing.

* sysctl fscache.operation_max_active added to control concurrency.
  The default value is nr_cpus clamped between 2 and
  WQ_UNBOUND_MAX_ACTIVE.

* debugfs support is dropped for now.  Tracing API based debug
  facility is planned to be added.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Acked-by: default avatarDavid Howells <dhowells@redhat.com>
parent 8b8edefa
...@@ -422,7 +422,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op, ...@@ -422,7 +422,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits; shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
op->op.flags &= FSCACHE_OP_KEEP_FLAGS; op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
op->op.flags |= FSCACHE_OP_FAST; op->op.flags |= FSCACHE_OP_ASYNC;
op->op.processor = cachefiles_read_copier; op->op.processor = cachefiles_read_copier;
pagevec_init(&pagevec, 0); pagevec_init(&pagevec, 0);
...@@ -729,7 +729,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op, ...@@ -729,7 +729,7 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
pagevec_init(&pagevec, 0); pagevec_init(&pagevec, 0);
op->op.flags &= FSCACHE_OP_KEEP_FLAGS; op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
op->op.flags |= FSCACHE_OP_FAST; op->op.flags |= FSCACHE_OP_ASYNC;
op->op.processor = cachefiles_read_copier; op->op.processor = cachefiles_read_copier;
INIT_LIST_HEAD(&backpages); INIT_LIST_HEAD(&backpages);
......
...@@ -83,6 +83,7 @@ extern unsigned fscache_defer_create; ...@@ -83,6 +83,7 @@ extern unsigned fscache_defer_create;
extern unsigned fscache_debug; extern unsigned fscache_debug;
extern struct kobject *fscache_root; extern struct kobject *fscache_root;
extern struct workqueue_struct *fscache_object_wq; extern struct workqueue_struct *fscache_object_wq;
extern struct workqueue_struct *fscache_op_wq;
DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); DECLARE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
static inline bool fscache_object_congested(void) static inline bool fscache_object_congested(void)
......
...@@ -42,11 +42,13 @@ MODULE_PARM_DESC(fscache_debug, ...@@ -42,11 +42,13 @@ MODULE_PARM_DESC(fscache_debug,
struct kobject *fscache_root; struct kobject *fscache_root;
struct workqueue_struct *fscache_object_wq; struct workqueue_struct *fscache_object_wq;
struct workqueue_struct *fscache_op_wq;
DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait); DEFINE_PER_CPU(wait_queue_head_t, fscache_object_cong_wait);
/* these values serve as lower bounds, will be adjusted in fscache_init() */ /* these values serve as lower bounds, will be adjusted in fscache_init() */
static unsigned fscache_object_max_active = 4; static unsigned fscache_object_max_active = 4;
static unsigned fscache_op_max_active = 2;
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
static struct ctl_table_header *fscache_sysctl_header; static struct ctl_table_header *fscache_sysctl_header;
...@@ -74,6 +76,14 @@ ctl_table fscache_sysctls[] = { ...@@ -74,6 +76,14 @@ ctl_table fscache_sysctls[] = {
.proc_handler = fscache_max_active_sysctl, .proc_handler = fscache_max_active_sysctl,
.extra1 = &fscache_object_wq, .extra1 = &fscache_object_wq,
}, },
{
.procname = "operation_max_active",
.data = &fscache_op_max_active,
.maxlen = sizeof(unsigned),
.mode = 0644,
.proc_handler = fscache_max_active_sysctl,
.extra1 = &fscache_op_wq,
},
{} {}
}; };
...@@ -110,6 +120,16 @@ static int __init fscache_init(void) ...@@ -110,6 +120,16 @@ static int __init fscache_init(void)
if (!fscache_object_wq) if (!fscache_object_wq)
goto error_object_wq; goto error_object_wq;
fscache_op_max_active =
clamp_val(fscache_object_max_active / 2,
fscache_op_max_active, WQ_UNBOUND_MAX_ACTIVE);
ret = -ENOMEM;
fscache_op_wq = alloc_workqueue("fscache_operation", WQ_UNBOUND,
fscache_op_max_active);
if (!fscache_op_wq)
goto error_op_wq;
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu)); init_waitqueue_head(&per_cpu(fscache_object_cong_wait, cpu));
...@@ -152,6 +172,8 @@ static int __init fscache_init(void) ...@@ -152,6 +172,8 @@ static int __init fscache_init(void)
#endif #endif
fscache_proc_cleanup(); fscache_proc_cleanup();
error_proc: error_proc:
destroy_workqueue(fscache_op_wq);
error_op_wq:
destroy_workqueue(fscache_object_wq); destroy_workqueue(fscache_object_wq);
error_object_wq: error_object_wq:
slow_work_unregister_user(THIS_MODULE); slow_work_unregister_user(THIS_MODULE);
...@@ -172,6 +194,7 @@ static void __exit fscache_exit(void) ...@@ -172,6 +194,7 @@ static void __exit fscache_exit(void)
kmem_cache_destroy(fscache_cookie_jar); kmem_cache_destroy(fscache_cookie_jar);
unregister_sysctl_table(fscache_sysctl_header); unregister_sysctl_table(fscache_sysctl_header);
fscache_proc_cleanup(); fscache_proc_cleanup();
destroy_workqueue(fscache_op_wq);
destroy_workqueue(fscache_object_wq); destroy_workqueue(fscache_object_wq);
slow_work_unregister_user(THIS_MODULE); slow_work_unregister_user(THIS_MODULE);
printk(KERN_NOTICE "FS-Cache: Unloaded\n"); printk(KERN_NOTICE "FS-Cache: Unloaded\n");
......
...@@ -42,16 +42,12 @@ void fscache_enqueue_operation(struct fscache_operation *op) ...@@ -42,16 +42,12 @@ void fscache_enqueue_operation(struct fscache_operation *op)
fscache_stat(&fscache_n_op_enqueue); fscache_stat(&fscache_n_op_enqueue);
switch (op->flags & FSCACHE_OP_TYPE) { switch (op->flags & FSCACHE_OP_TYPE) {
case FSCACHE_OP_FAST: case FSCACHE_OP_ASYNC:
_debug("queue fast"); _debug("queue async");
atomic_inc(&op->usage); atomic_inc(&op->usage);
if (!schedule_work(&op->fast_work)) if (!queue_work(fscache_op_wq, &op->work))
fscache_put_operation(op); fscache_put_operation(op);
break; break;
case FSCACHE_OP_SLOW:
_debug("queue slow");
slow_work_enqueue(&op->slow_work);
break;
case FSCACHE_OP_MYTHREAD: case FSCACHE_OP_MYTHREAD:
_debug("queue for caller's attention"); _debug("queue for caller's attention");
break; break;
...@@ -455,36 +451,13 @@ void fscache_operation_gc(struct work_struct *work) ...@@ -455,36 +451,13 @@ void fscache_operation_gc(struct work_struct *work)
} }
/* /*
* allow the slow work item processor to get a ref on an operation * execute an operation using fs_op_wq to provide processing context -
*/ * the caller holds a ref to this object, so we don't need to hold one
static int fscache_op_get_ref(struct slow_work *work)
{
struct fscache_operation *op =
container_of(work, struct fscache_operation, slow_work);
atomic_inc(&op->usage);
return 0;
}
/*
* allow the slow work item processor to discard a ref on an operation
*/
static void fscache_op_put_ref(struct slow_work *work)
{
struct fscache_operation *op =
container_of(work, struct fscache_operation, slow_work);
fscache_put_operation(op);
}
/*
* execute an operation using the slow thread pool to provide processing context
* - the caller holds a ref to this object, so we don't need to hold one
*/ */
static void fscache_op_execute(struct slow_work *work) void fscache_op_work_func(struct work_struct *work)
{ {
struct fscache_operation *op = struct fscache_operation *op =
container_of(work, struct fscache_operation, slow_work); container_of(work, struct fscache_operation, work);
unsigned long start; unsigned long start;
_enter("{OBJ%x OP%x,%d}", _enter("{OBJ%x OP%x,%d}",
...@@ -494,31 +467,7 @@ static void fscache_op_execute(struct slow_work *work) ...@@ -494,31 +467,7 @@ static void fscache_op_execute(struct slow_work *work)
start = jiffies; start = jiffies;
op->processor(op); op->processor(op);
fscache_hist(fscache_ops_histogram, start); fscache_hist(fscache_ops_histogram, start);
fscache_put_operation(op);
_leave(""); _leave("");
} }
/*
* describe an operation for slow-work debugging
*/
#ifdef CONFIG_SLOW_WORK_DEBUG
static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
{
struct fscache_operation *op =
container_of(work, struct fscache_operation, slow_work);
seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx",
op->object->debug_id, op->debug_id,
op->name, op->state, op->flags);
}
#endif
const struct slow_work_ops fscache_op_slow_work_ops = {
.owner = THIS_MODULE,
.get_ref = fscache_op_get_ref,
.put_ref = fscache_op_put_ref,
.execute = fscache_op_execute,
#ifdef CONFIG_SLOW_WORK_DEBUG
.desc = fscache_op_desc,
#endif
};
...@@ -105,7 +105,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie, ...@@ -105,7 +105,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
page_busy: page_busy:
/* we might want to wait here, but that could deadlock the allocator as /* we might want to wait here, but that could deadlock the allocator as
* the slow-work threads writing to the cache may all end up sleeping * the work threads writing to the cache may all end up sleeping
* on memory allocation */ * on memory allocation */
fscache_stat(&fscache_n_store_vmscan_busy); fscache_stat(&fscache_n_store_vmscan_busy);
return false; return false;
...@@ -188,9 +188,8 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) ...@@ -188,9 +188,8 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
return -ENOMEM; return -ENOMEM;
} }
fscache_operation_init(op, NULL); fscache_operation_init(op, fscache_attr_changed_op, NULL);
fscache_operation_init_slow(op, fscache_attr_changed_op); op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
fscache_set_op_name(op, "Attr"); fscache_set_op_name(op, "Attr");
spin_lock(&cookie->lock); spin_lock(&cookie->lock);
...@@ -217,24 +216,6 @@ int __fscache_attr_changed(struct fscache_cookie *cookie) ...@@ -217,24 +216,6 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
} }
EXPORT_SYMBOL(__fscache_attr_changed); EXPORT_SYMBOL(__fscache_attr_changed);
/*
* handle secondary execution given to a retrieval op on behalf of the
* cache
*/
static void fscache_retrieval_work(struct work_struct *work)
{
struct fscache_retrieval *op =
container_of(work, struct fscache_retrieval, op.fast_work);
unsigned long start;
_enter("{OP%x}", op->op.debug_id);
start = jiffies;
op->op.processor(&op->op);
fscache_hist(fscache_ops_histogram, start);
fscache_put_operation(&op->op);
}
/* /*
* release a retrieval op reference * release a retrieval op reference
*/ */
...@@ -269,13 +250,12 @@ static struct fscache_retrieval *fscache_alloc_retrieval( ...@@ -269,13 +250,12 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
return NULL; return NULL;
} }
fscache_operation_init(&op->op, fscache_release_retrieval_op); fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING); op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
op->mapping = mapping; op->mapping = mapping;
op->end_io_func = end_io_func; op->end_io_func = end_io_func;
op->context = context; op->context = context;
op->start_time = jiffies; op->start_time = jiffies;
INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
INIT_LIST_HEAD(&op->to_do); INIT_LIST_HEAD(&op->to_do);
fscache_set_op_name(&op->op, "Retr"); fscache_set_op_name(&op->op, "Retr");
return op; return op;
...@@ -795,9 +775,9 @@ int __fscache_write_page(struct fscache_cookie *cookie, ...@@ -795,9 +775,9 @@ int __fscache_write_page(struct fscache_cookie *cookie,
if (!op) if (!op)
goto nomem; goto nomem;
fscache_operation_init(&op->op, fscache_release_write_op); fscache_operation_init(&op->op, fscache_write_op,
fscache_operation_init_slow(&op->op, fscache_write_op); fscache_release_write_op);
op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING); op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
fscache_set_op_name(&op->op, "Write1"); fscache_set_op_name(&op->op, "Write1");
ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM); ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
...@@ -852,7 +832,7 @@ int __fscache_write_page(struct fscache_cookie *cookie, ...@@ -852,7 +832,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
fscache_stat(&fscache_n_store_ops); fscache_stat(&fscache_n_store_ops);
fscache_stat(&fscache_n_stores_ok); fscache_stat(&fscache_n_stores_ok);
/* the slow work queue now carries its own ref on the object */ /* the work queue now carries its own ref on the object */
fscache_put_operation(&op->op); fscache_put_operation(&op->op);
_leave(" = 0"); _leave(" = 0");
return 0; return 0;
......
...@@ -77,18 +77,14 @@ typedef void (*fscache_operation_release_t)(struct fscache_operation *op); ...@@ -77,18 +77,14 @@ typedef void (*fscache_operation_release_t)(struct fscache_operation *op);
typedef void (*fscache_operation_processor_t)(struct fscache_operation *op); typedef void (*fscache_operation_processor_t)(struct fscache_operation *op);
struct fscache_operation { struct fscache_operation {
union { struct work_struct work; /* record for async ops */
struct work_struct fast_work; /* record for fast ops */
struct slow_work slow_work; /* record for (very) slow ops */
};
struct list_head pend_link; /* link in object->pending_ops */ struct list_head pend_link; /* link in object->pending_ops */
struct fscache_object *object; /* object to be operated upon */ struct fscache_object *object; /* object to be operated upon */
unsigned long flags; unsigned long flags;
#define FSCACHE_OP_TYPE 0x000f /* operation type */ #define FSCACHE_OP_TYPE 0x000f /* operation type */
#define FSCACHE_OP_FAST 0x0001 /* - fast op, processor may not sleep for disk */ #define FSCACHE_OP_ASYNC 0x0001 /* - async op, processor may sleep for disk */
#define FSCACHE_OP_SLOW 0x0002 /* - (very) slow op, processor may sleep for disk */ #define FSCACHE_OP_MYTHREAD 0x0002 /* - processing is done be issuing thread, not pool */
#define FSCACHE_OP_MYTHREAD 0x0003 /* - processing is done be issuing thread, not pool */
#define FSCACHE_OP_WAITING 4 /* cleared when op is woken */ #define FSCACHE_OP_WAITING 4 /* cleared when op is woken */
#define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */ #define FSCACHE_OP_EXCLUSIVE 5 /* exclusive op, other ops must wait */
#define FSCACHE_OP_DEAD 6 /* op is now dead */ #define FSCACHE_OP_DEAD 6 /* op is now dead */
...@@ -106,7 +102,8 @@ struct fscache_operation { ...@@ -106,7 +102,8 @@ struct fscache_operation {
/* operation releaser */ /* operation releaser */
fscache_operation_release_t release; fscache_operation_release_t release;
#ifdef CONFIG_SLOW_WORK_DEBUG #ifdef CONFIG_WORKQUEUE_DEBUGFS
struct work_struct put_work; /* work to delay operation put */
const char *name; /* operation name */ const char *name; /* operation name */
const char *state; /* operation state */ const char *state; /* operation state */
#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0) #define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0)
...@@ -118,7 +115,7 @@ struct fscache_operation { ...@@ -118,7 +115,7 @@ struct fscache_operation {
}; };
extern atomic_t fscache_op_debug_id; extern atomic_t fscache_op_debug_id;
extern const struct slow_work_ops fscache_op_slow_work_ops; extern void fscache_op_work_func(struct work_struct *work);
extern void fscache_enqueue_operation(struct fscache_operation *); extern void fscache_enqueue_operation(struct fscache_operation *);
extern void fscache_put_operation(struct fscache_operation *); extern void fscache_put_operation(struct fscache_operation *);
...@@ -129,33 +126,21 @@ extern void fscache_put_operation(struct fscache_operation *); ...@@ -129,33 +126,21 @@ extern void fscache_put_operation(struct fscache_operation *);
* @release: The release function to assign * @release: The release function to assign
* *
* Do basic initialisation of an operation. The caller must still set flags, * Do basic initialisation of an operation. The caller must still set flags,
* object, either fast_work or slow_work if necessary, and processor if needed. * object and processor if needed.
*/ */
static inline void fscache_operation_init(struct fscache_operation *op, static inline void fscache_operation_init(struct fscache_operation *op,
fscache_operation_release_t release) fscache_operation_processor_t processor,
fscache_operation_release_t release)
{ {
INIT_WORK(&op->work, fscache_op_work_func);
atomic_set(&op->usage, 1); atomic_set(&op->usage, 1);
op->debug_id = atomic_inc_return(&fscache_op_debug_id); op->debug_id = atomic_inc_return(&fscache_op_debug_id);
op->processor = processor;
op->release = release; op->release = release;
INIT_LIST_HEAD(&op->pend_link); INIT_LIST_HEAD(&op->pend_link);
fscache_set_op_state(op, "Init"); fscache_set_op_state(op, "Init");
} }
/**
* fscache_operation_init_slow - Do additional initialisation of a slow op
* @op: The operation to initialise
* @processor: The processor function to assign
*
* Do additional initialisation of an operation as required for slow work.
*/
static inline
void fscache_operation_init_slow(struct fscache_operation *op,
fscache_operation_processor_t processor)
{
op->processor = processor;
slow_work_init(&op->slow_work, &fscache_op_slow_work_ops);
}
/* /*
* data read operation * data read operation
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment