Commit bc3adfc6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

* 'fixes-2.6.38' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: make sure MAYDAY_INITIAL_TIMEOUT is at least 2 jiffies long
  workqueue, freezer: unify spelling of 'freeze' + 'able' to 'freezable'
  workqueue: wake up a worker when a rescuer is leaving a gcwq
parents 3c18d4de 3233cdbd
...@@ -190,9 +190,9 @@ resources, scheduled and executed. ...@@ -190,9 +190,9 @@ resources, scheduled and executed.
* Long running CPU intensive workloads which can be better * Long running CPU intensive workloads which can be better
managed by the system scheduler. managed by the system scheduler.
WQ_FREEZEABLE WQ_FREEZABLE
A freezeable wq participates in the freeze phase of the system A freezable wq participates in the freeze phase of the system
suspend operations. Work items on the wq are drained and no suspend operations. Work items on the wq are drained and no
new work item starts execution until thawed. new work item starts execution until thawed.
......
...@@ -621,7 +621,7 @@ static int __init memstick_init(void) ...@@ -621,7 +621,7 @@ static int __init memstick_init(void)
{ {
int rc; int rc;
workqueue = create_freezeable_workqueue("kmemstick"); workqueue = create_freezable_workqueue("kmemstick");
if (!workqueue) if (!workqueue)
return -ENOMEM; return -ENOMEM;
......
...@@ -329,7 +329,7 @@ static int __init tifm_init(void) ...@@ -329,7 +329,7 @@ static int __init tifm_init(void)
{ {
int rc; int rc;
workqueue = create_freezeable_workqueue("tifm"); workqueue = create_freezable_workqueue("tifm");
if (!workqueue) if (!workqueue)
return -ENOMEM; return -ENOMEM;
......
...@@ -785,7 +785,7 @@ static int __init vmballoon_init(void) ...@@ -785,7 +785,7 @@ static int __init vmballoon_init(void)
if (x86_hyper != &x86_hyper_vmware) if (x86_hyper != &x86_hyper_vmware)
return -ENODEV; return -ENODEV;
vmballoon_wq = create_freezeable_workqueue("vmmemctl"); vmballoon_wq = create_freezable_workqueue("vmmemctl");
if (!vmballoon_wq) { if (!vmballoon_wq) {
pr_err("failed to create workqueue\n"); pr_err("failed to create workqueue\n");
return -ENOMEM; return -ENOMEM;
......
...@@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) ...@@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
init_completion(&dev->dma_done); init_completion(&dev->dma_done);
dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); dev->card_workqueue = create_freezable_workqueue(DRV_NAME);
if (!dev->card_workqueue) if (!dev->card_workqueue)
goto error9; goto error9;
......
...@@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = { ...@@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = {
static __init int sm_module_init(void) static __init int sm_module_init(void)
{ {
int error = 0; int error = 0;
cache_flush_workqueue = create_freezeable_workqueue("smflush"); cache_flush_workqueue = create_freezable_workqueue("smflush");
if (IS_ERR(cache_flush_workqueue)) if (IS_ERR(cache_flush_workqueue))
return PTR_ERR(cache_flush_workqueue); return PTR_ERR(cache_flush_workqueue);
......
...@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net) ...@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net)
goto open_unlock; goto open_unlock;
} }
priv->wq = create_freezeable_workqueue("mcp251x_wq"); priv->wq = create_freezable_workqueue("mcp251x_wq");
INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler);
......
...@@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port) ...@@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port)
s->rts = 0; s->rts = 0;
sprintf(b, "max3100-%d", s->minor); sprintf(b, "max3100-%d", s->minor);
s->workqueue = create_freezeable_workqueue(b); s->workqueue = create_freezable_workqueue(b);
if (!s->workqueue) { if (!s->workqueue) {
dev_warn(&s->spi->dev, "cannot create workqueue\n"); dev_warn(&s->spi->dev, "cannot create workqueue\n");
return -EBUSY; return -EBUSY;
......
...@@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port) ...@@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port)
struct max3107_port *s = container_of(port, struct max3107_port, port); struct max3107_port *s = container_of(port, struct max3107_port, port);
/* Initialize work queue */ /* Initialize work queue */
s->workqueue = create_freezeable_workqueue("max3107"); s->workqueue = create_freezable_workqueue("max3107");
if (!s->workqueue) { if (!s->workqueue) {
dev_err(&s->spi->dev, "Workqueue creation failed\n"); dev_err(&s->spi->dev, "Workqueue creation failed\n");
return -EBUSY; return -EBUSY;
......
...@@ -1779,11 +1779,11 @@ int __init gfs2_glock_init(void) ...@@ -1779,11 +1779,11 @@ int __init gfs2_glock_init(void)
#endif #endif
glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM |
WQ_HIGHPRI | WQ_FREEZEABLE, 0); WQ_HIGHPRI | WQ_FREEZABLE, 0);
if (IS_ERR(glock_workqueue)) if (IS_ERR(glock_workqueue))
return PTR_ERR(glock_workqueue); return PTR_ERR(glock_workqueue);
gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", gfs2_delete_workqueue = alloc_workqueue("delete_workqueue",
WQ_MEM_RECLAIM | WQ_FREEZEABLE, WQ_MEM_RECLAIM | WQ_FREEZABLE,
0); 0);
if (IS_ERR(gfs2_delete_workqueue)) { if (IS_ERR(gfs2_delete_workqueue)) {
destroy_workqueue(glock_workqueue); destroy_workqueue(glock_workqueue);
......
...@@ -144,7 +144,7 @@ static int __init init_gfs2_fs(void) ...@@ -144,7 +144,7 @@ static int __init init_gfs2_fs(void)
error = -ENOMEM; error = -ENOMEM;
gfs_recovery_wq = alloc_workqueue("gfs_recovery", gfs_recovery_wq = alloc_workqueue("gfs_recovery",
WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0); WQ_MEM_RECLAIM | WQ_FREEZABLE, 0);
if (!gfs_recovery_wq) if (!gfs_recovery_wq)
goto fail_wq; goto fail_wq;
......
...@@ -109,7 +109,7 @@ static inline void freezer_count(void) ...@@ -109,7 +109,7 @@ static inline void freezer_count(void)
} }
/* /*
* Check if the task should be counted as freezeable by the freezer * Check if the task should be counted as freezable by the freezer
*/ */
static inline int freezer_should_skip(struct task_struct *p) static inline int freezer_should_skip(struct task_struct *p)
{ {
......
...@@ -1744,7 +1744,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * ...@@ -1744,7 +1744,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */
#define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
#define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */
/* /*
......
...@@ -250,7 +250,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } ...@@ -250,7 +250,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
enum { enum {
WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */
WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ WQ_UNBOUND = 1 << 1, /* not bound to any cpu */
WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ WQ_FREEZABLE = 1 << 2, /* freeze during suspend */
WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */
WQ_HIGHPRI = 1 << 4, /* high priority */ WQ_HIGHPRI = 1 << 4, /* high priority */
WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
...@@ -318,7 +318,7 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, ...@@ -318,7 +318,7 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active,
/** /**
* alloc_ordered_workqueue - allocate an ordered workqueue * alloc_ordered_workqueue - allocate an ordered workqueue
* @name: name of the workqueue * @name: name of the workqueue
* @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
* *
* Allocate an ordered workqueue. An ordered workqueue executes at * Allocate an ordered workqueue. An ordered workqueue executes at
* most one work item at any given time in the queued order. They are * most one work item at any given time in the queued order. They are
...@@ -335,8 +335,8 @@ alloc_ordered_workqueue(const char *name, unsigned int flags) ...@@ -335,8 +335,8 @@ alloc_ordered_workqueue(const char *name, unsigned int flags)
#define create_workqueue(name) \ #define create_workqueue(name) \
alloc_workqueue((name), WQ_MEM_RECLAIM, 1) alloc_workqueue((name), WQ_MEM_RECLAIM, 1)
#define create_freezeable_workqueue(name) \ #define create_freezable_workqueue(name) \
alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
#define create_singlethread_workqueue(name) \ #define create_singlethread_workqueue(name) \
alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1)
......
...@@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(pm_wq); ...@@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(pm_wq);
static int __init pm_start_workqueue(void) static int __init pm_start_workqueue(void)
{ {
pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0); pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
return pm_wq ? 0 : -ENOMEM; return pm_wq ? 0 : -ENOMEM;
} }
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
*/ */
#define TIMEOUT (20 * HZ) #define TIMEOUT (20 * HZ)
static inline int freezeable(struct task_struct * p) static inline int freezable(struct task_struct * p)
{ {
if ((p == current) || if ((p == current) ||
(p->flags & PF_NOFREEZE) || (p->flags & PF_NOFREEZE) ||
...@@ -53,7 +53,7 @@ static int try_to_freeze_tasks(bool sig_only) ...@@ -53,7 +53,7 @@ static int try_to_freeze_tasks(bool sig_only)
todo = 0; todo = 0;
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
do_each_thread(g, p) { do_each_thread(g, p) {
if (frozen(p) || !freezeable(p)) if (frozen(p) || !freezable(p))
continue; continue;
if (!freeze_task(p, sig_only)) if (!freeze_task(p, sig_only))
...@@ -167,7 +167,7 @@ static void thaw_tasks(bool nosig_only) ...@@ -167,7 +167,7 @@ static void thaw_tasks(bool nosig_only)
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
do_each_thread(g, p) { do_each_thread(g, p) {
if (!freezeable(p)) if (!freezable(p))
continue; continue;
if (nosig_only && should_send_signal(p)) if (nosig_only && should_send_signal(p))
......
...@@ -79,7 +79,9 @@ enum { ...@@ -79,7 +79,9 @@ enum {
MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
/* call for help after 10ms
(min two ticks) */
MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
CREATE_COOLDOWN = HZ, /* time to breath after fail */ CREATE_COOLDOWN = HZ, /* time to breath after fail */
TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
...@@ -2047,6 +2049,15 @@ static int rescuer_thread(void *__wq) ...@@ -2047,6 +2049,15 @@ static int rescuer_thread(void *__wq)
move_linked_works(work, scheduled, &n); move_linked_works(work, scheduled, &n);
process_scheduled_works(rescuer); process_scheduled_works(rescuer);
/*
* Leave this gcwq. If keep_working() is %true, notify a
* regular worker; otherwise, we end up with 0 concurrency
* and stalling the execution.
*/
if (keep_working(gcwq))
wake_up_worker(gcwq);
spin_unlock_irq(&gcwq->lock); spin_unlock_irq(&gcwq->lock);
} }
...@@ -2956,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, ...@@ -2956,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
*/ */
spin_lock(&workqueue_lock); spin_lock(&workqueue_lock);
if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
for_each_cwq_cpu(cpu, wq) for_each_cwq_cpu(cpu, wq)
get_cwq(cpu, wq)->max_active = 0; get_cwq(cpu, wq)->max_active = 0;
...@@ -3068,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) ...@@ -3068,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
spin_lock_irq(&gcwq->lock); spin_lock_irq(&gcwq->lock);
if (!(wq->flags & WQ_FREEZEABLE) || if (!(wq->flags & WQ_FREEZABLE) ||
!(gcwq->flags & GCWQ_FREEZING)) !(gcwq->flags & GCWQ_FREEZING))
get_cwq(gcwq->cpu, wq)->max_active = max_active; get_cwq(gcwq->cpu, wq)->max_active = max_active;
...@@ -3318,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq) ...@@ -3318,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
* want to get it over with ASAP - spam rescuers, wake up as * want to get it over with ASAP - spam rescuers, wake up as
* many idlers as necessary and create new ones till the * many idlers as necessary and create new ones till the
* worklist is empty. Note that if the gcwq is frozen, there * worklist is empty. Note that if the gcwq is frozen, there
* may be frozen works in freezeable cwqs. Don't declare * may be frozen works in freezable cwqs. Don't declare
* completion while frozen. * completion while frozen.
*/ */
while (gcwq->nr_workers != gcwq->nr_idle || while (gcwq->nr_workers != gcwq->nr_idle ||
...@@ -3576,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu); ...@@ -3576,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
/** /**
* freeze_workqueues_begin - begin freezing workqueues * freeze_workqueues_begin - begin freezing workqueues
* *
* Start freezing workqueues. After this function returns, all * Start freezing workqueues. After this function returns, all freezable
* freezeable workqueues will queue new works to their frozen_works * workqueues will queue new works to their frozen_works list instead of
* list instead of gcwq->worklist. * gcwq->worklist.
* *
* CONTEXT: * CONTEXT:
* Grabs and releases workqueue_lock and gcwq->lock's. * Grabs and releases workqueue_lock and gcwq->lock's.
...@@ -3604,7 +3615,7 @@ void freeze_workqueues_begin(void) ...@@ -3604,7 +3615,7 @@ void freeze_workqueues_begin(void)
list_for_each_entry(wq, &workqueues, list) { list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (cwq && wq->flags & WQ_FREEZEABLE) if (cwq && wq->flags & WQ_FREEZABLE)
cwq->max_active = 0; cwq->max_active = 0;
} }
...@@ -3615,7 +3626,7 @@ void freeze_workqueues_begin(void) ...@@ -3615,7 +3626,7 @@ void freeze_workqueues_begin(void)
} }
/** /**
* freeze_workqueues_busy - are freezeable workqueues still busy? * freeze_workqueues_busy - are freezable workqueues still busy?
* *
* Check whether freezing is complete. This function must be called * Check whether freezing is complete. This function must be called
* between freeze_workqueues_begin() and thaw_workqueues(). * between freeze_workqueues_begin() and thaw_workqueues().
...@@ -3624,8 +3635,8 @@ void freeze_workqueues_begin(void) ...@@ -3624,8 +3635,8 @@ void freeze_workqueues_begin(void)
* Grabs and releases workqueue_lock. * Grabs and releases workqueue_lock.
* *
* RETURNS: * RETURNS:
* %true if some freezeable workqueues are still busy. %false if * %true if some freezable workqueues are still busy. %false if freezing
* freezing is complete. * is complete.
*/ */
bool freeze_workqueues_busy(void) bool freeze_workqueues_busy(void)
{ {
...@@ -3645,7 +3656,7 @@ bool freeze_workqueues_busy(void) ...@@ -3645,7 +3656,7 @@ bool freeze_workqueues_busy(void)
list_for_each_entry(wq, &workqueues, list) { list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (!cwq || !(wq->flags & WQ_FREEZEABLE)) if (!cwq || !(wq->flags & WQ_FREEZABLE))
continue; continue;
BUG_ON(cwq->nr_active < 0); BUG_ON(cwq->nr_active < 0);
...@@ -3690,7 +3701,7 @@ void thaw_workqueues(void) ...@@ -3690,7 +3701,7 @@ void thaw_workqueues(void)
list_for_each_entry(wq, &workqueues, list) { list_for_each_entry(wq, &workqueues, list) {
struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
if (!cwq || !(wq->flags & WQ_FREEZEABLE)) if (!cwq || !(wq->flags & WQ_FREEZABLE))
continue; continue;
/* restore max_active and repopulate worklist */ /* restore max_active and repopulate worklist */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment