Commit 1e512b08 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Mainly sending this off now for the writeback fixes, since they fix a
  real regression introduced with the cgroup writeback changes.  The
  NVMe fix could wait for next pull for this series, but it's simple
  enough that we might as well include it.

  This contains:

   - two cgroup writeback fixes from Tejun, fixing a user reported issue
     with luks crypt devices hanging when being closed.

   - NVMe error cleanup fix from Jon Derrick, fixing a case where we'd
     attempt to free an unregistered IRQ"

* 'for-linus' of git://git.kernel.dk/linux-block:
  NVMe: Fix irq freeing when queue_request_irq fails
  writeback: don't drain bdi_writeback_congested on bdi destruction
  writeback: don't embed root bdi_writeback_congested in bdi_writeback
parents 1c65ae63 758dd7fd
...@@ -1474,6 +1474,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, ...@@ -1474,6 +1474,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth; nvmeq->q_depth = depth;
nvmeq->qid = qid; nvmeq->qid = qid;
nvmeq->cq_vector = -1;
dev->queues[qid] = nvmeq; dev->queues[qid] = nvmeq;
/* make sure queue descriptor is set before queue count, for kthread */ /* make sure queue descriptor is set before queue count, for kthread */
...@@ -1726,8 +1727,10 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) ...@@ -1726,8 +1727,10 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
nvmeq->cq_vector = 0; nvmeq->cq_vector = 0;
result = queue_request_irq(dev, nvmeq, nvmeq->irqname); result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result) if (result) {
nvmeq->cq_vector = -1;
goto free_nvmeq; goto free_nvmeq;
}
return result; return result;
...@@ -2213,8 +2216,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) ...@@ -2213,8 +2216,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
dev->max_qid = nr_io_queues; dev->max_qid = nr_io_queues;
result = queue_request_irq(dev, adminq, adminq->irqname); result = queue_request_irq(dev, adminq, adminq->irqname);
if (result) if (result) {
adminq->cq_vector = -1;
goto free_queues; goto free_queues;
}
/* Free previously allocated queues that are no longer usable */ /* Free previously allocated queues that are no longer usable */
nvme_free_queues(dev, nr_io_queues + 1); nvme_free_queues(dev, nr_io_queues + 1);
......
...@@ -50,10 +50,10 @@ enum wb_stat_item { ...@@ -50,10 +50,10 @@ enum wb_stat_item {
*/ */
struct bdi_writeback_congested { struct bdi_writeback_congested {
unsigned long state; /* WB_[a]sync_congested flags */ unsigned long state; /* WB_[a]sync_congested flags */
atomic_t refcnt; /* nr of attached wb's and blkg */
#ifdef CONFIG_CGROUP_WRITEBACK #ifdef CONFIG_CGROUP_WRITEBACK
struct backing_dev_info *bdi; /* the associated bdi */ struct backing_dev_info *bdi; /* the associated bdi */
atomic_t refcnt; /* nr of attached wb's and blkg */
int blkcg_id; /* ID of the associated blkcg */ int blkcg_id; /* ID of the associated blkcg */
struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */ struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
#endif #endif
...@@ -150,11 +150,12 @@ struct backing_dev_info { ...@@ -150,11 +150,12 @@ struct backing_dev_info {
atomic_long_t tot_write_bandwidth; atomic_long_t tot_write_bandwidth;
struct bdi_writeback wb; /* the root writeback info for this bdi */ struct bdi_writeback wb; /* the root writeback info for this bdi */
struct bdi_writeback_congested wb_congested; /* its congested state */
#ifdef CONFIG_CGROUP_WRITEBACK #ifdef CONFIG_CGROUP_WRITEBACK
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
struct rb_root cgwb_congested_tree; /* their congested states */ struct rb_root cgwb_congested_tree; /* their congested states */
atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */ atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
#else
struct bdi_writeback_congested *wb_congested;
#endif #endif
wait_queue_head_t wb_waitq; wait_queue_head_t wb_waitq;
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/blk-cgroup.h> #include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h> #include <linux/backing-dev-defs.h>
#include <linux/slab.h>
int __must_check bdi_init(struct backing_dev_info *bdi); int __must_check bdi_init(struct backing_dev_info *bdi);
void bdi_destroy(struct backing_dev_info *bdi); void bdi_destroy(struct backing_dev_info *bdi);
...@@ -465,11 +466,14 @@ static inline bool inode_cgwb_enabled(struct inode *inode) ...@@ -465,11 +466,14 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
static inline struct bdi_writeback_congested * static inline struct bdi_writeback_congested *
wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
{ {
return bdi->wb.congested; atomic_inc(&bdi->wb_congested->refcnt);
return bdi->wb_congested;
} }
static inline void wb_congested_put(struct bdi_writeback_congested *congested) static inline void wb_congested_put(struct bdi_writeback_congested *congested)
{ {
if (atomic_dec_and_test(&congested->refcnt))
kfree(congested);
} }
static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi) static inline struct bdi_writeback *wb_find_current(struct backing_dev_info *bdi)
......
...@@ -287,7 +287,7 @@ void wb_wakeup_delayed(struct bdi_writeback *wb) ...@@ -287,7 +287,7 @@ void wb_wakeup_delayed(struct bdi_writeback *wb)
#define INIT_BW (100 << (20 - PAGE_SHIFT)) #define INIT_BW (100 << (20 - PAGE_SHIFT))
static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
gfp_t gfp) int blkcg_id, gfp_t gfp)
{ {
int i, err; int i, err;
...@@ -311,21 +311,29 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi, ...@@ -311,21 +311,29 @@ static int wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi,
INIT_LIST_HEAD(&wb->work_list); INIT_LIST_HEAD(&wb->work_list);
INIT_DELAYED_WORK(&wb->dwork, wb_workfn); INIT_DELAYED_WORK(&wb->dwork, wb_workfn);
wb->congested = wb_congested_get_create(bdi, blkcg_id, gfp);
if (!wb->congested)
return -ENOMEM;
err = fprop_local_init_percpu(&wb->completions, gfp); err = fprop_local_init_percpu(&wb->completions, gfp);
if (err) if (err)
return err; goto out_put_cong;
for (i = 0; i < NR_WB_STAT_ITEMS; i++) { for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
err = percpu_counter_init(&wb->stat[i], 0, gfp); err = percpu_counter_init(&wb->stat[i], 0, gfp);
if (err) { if (err)
while (--i) goto out_destroy_stat;
percpu_counter_destroy(&wb->stat[i]);
fprop_local_destroy_percpu(&wb->completions);
return err;
}
} }
return 0; return 0;
out_destroy_stat:
while (--i)
percpu_counter_destroy(&wb->stat[i]);
fprop_local_destroy_percpu(&wb->completions);
out_put_cong:
wb_congested_put(wb->congested);
return err;
} }
/* /*
...@@ -361,6 +369,7 @@ static void wb_exit(struct bdi_writeback *wb) ...@@ -361,6 +369,7 @@ static void wb_exit(struct bdi_writeback *wb)
percpu_counter_destroy(&wb->stat[i]); percpu_counter_destroy(&wb->stat[i]);
fprop_local_destroy_percpu(&wb->completions); fprop_local_destroy_percpu(&wb->completions);
wb_congested_put(wb->congested);
} }
#ifdef CONFIG_CGROUP_WRITEBACK #ifdef CONFIG_CGROUP_WRITEBACK
...@@ -392,9 +401,6 @@ wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) ...@@ -392,9 +401,6 @@ wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
struct bdi_writeback_congested *new_congested = NULL, *congested; struct bdi_writeback_congested *new_congested = NULL, *congested;
struct rb_node **node, *parent; struct rb_node **node, *parent;
unsigned long flags; unsigned long flags;
if (blkcg_id == 1)
return &bdi->wb_congested;
retry: retry:
spin_lock_irqsave(&cgwb_lock, flags); spin_lock_irqsave(&cgwb_lock, flags);
...@@ -419,7 +425,6 @@ wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) ...@@ -419,7 +425,6 @@ wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
new_congested = NULL; new_congested = NULL;
rb_link_node(&congested->rb_node, parent, node); rb_link_node(&congested->rb_node, parent, node);
rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree); rb_insert_color(&congested->rb_node, &bdi->cgwb_congested_tree);
atomic_inc(&bdi->usage_cnt);
goto found; goto found;
} }
...@@ -450,24 +455,23 @@ wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp) ...@@ -450,24 +455,23 @@ wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
*/ */
void wb_congested_put(struct bdi_writeback_congested *congested) void wb_congested_put(struct bdi_writeback_congested *congested)
{ {
struct backing_dev_info *bdi = congested->bdi;
unsigned long flags; unsigned long flags;
if (congested->blkcg_id == 1)
return;
local_irq_save(flags); local_irq_save(flags);
if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) { if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) {
local_irq_restore(flags); local_irq_restore(flags);
return; return;
} }
rb_erase(&congested->rb_node, &congested->bdi->cgwb_congested_tree); /* bdi might already have been destroyed leaving @congested unlinked */
if (congested->bdi) {
rb_erase(&congested->rb_node,
&congested->bdi->cgwb_congested_tree);
congested->bdi = NULL;
}
spin_unlock_irqrestore(&cgwb_lock, flags); spin_unlock_irqrestore(&cgwb_lock, flags);
kfree(congested); kfree(congested);
if (atomic_dec_and_test(&bdi->usage_cnt))
wake_up_all(&cgwb_release_wait);
} }
static void cgwb_release_workfn(struct work_struct *work) static void cgwb_release_workfn(struct work_struct *work)
...@@ -480,7 +484,6 @@ static void cgwb_release_workfn(struct work_struct *work) ...@@ -480,7 +484,6 @@ static void cgwb_release_workfn(struct work_struct *work)
css_put(wb->memcg_css); css_put(wb->memcg_css);
css_put(wb->blkcg_css); css_put(wb->blkcg_css);
wb_congested_put(wb->congested);
fprop_local_destroy_percpu(&wb->memcg_completions); fprop_local_destroy_percpu(&wb->memcg_completions);
percpu_ref_exit(&wb->refcnt); percpu_ref_exit(&wb->refcnt);
...@@ -541,7 +544,7 @@ static int cgwb_create(struct backing_dev_info *bdi, ...@@ -541,7 +544,7 @@ static int cgwb_create(struct backing_dev_info *bdi,
if (!wb) if (!wb)
return -ENOMEM; return -ENOMEM;
ret = wb_init(wb, bdi, gfp); ret = wb_init(wb, bdi, blkcg_css->id, gfp);
if (ret) if (ret)
goto err_free; goto err_free;
...@@ -553,12 +556,6 @@ static int cgwb_create(struct backing_dev_info *bdi, ...@@ -553,12 +556,6 @@ static int cgwb_create(struct backing_dev_info *bdi,
if (ret) if (ret)
goto err_ref_exit; goto err_ref_exit;
wb->congested = wb_congested_get_create(bdi, blkcg_css->id, gfp);
if (!wb->congested) {
ret = -ENOMEM;
goto err_fprop_exit;
}
wb->memcg_css = memcg_css; wb->memcg_css = memcg_css;
wb->blkcg_css = blkcg_css; wb->blkcg_css = blkcg_css;
INIT_WORK(&wb->release_work, cgwb_release_workfn); INIT_WORK(&wb->release_work, cgwb_release_workfn);
...@@ -588,12 +585,10 @@ static int cgwb_create(struct backing_dev_info *bdi, ...@@ -588,12 +585,10 @@ static int cgwb_create(struct backing_dev_info *bdi,
if (ret) { if (ret) {
if (ret == -EEXIST) if (ret == -EEXIST)
ret = 0; ret = 0;
goto err_put_congested; goto err_fprop_exit;
} }
goto out_put; goto out_put;
err_put_congested:
wb_congested_put(wb->congested);
err_fprop_exit: err_fprop_exit:
fprop_local_destroy_percpu(&wb->memcg_completions); fprop_local_destroy_percpu(&wb->memcg_completions);
err_ref_exit: err_ref_exit:
...@@ -662,26 +657,41 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi, ...@@ -662,26 +657,41 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
return wb; return wb;
} }
static void cgwb_bdi_init(struct backing_dev_info *bdi) static int cgwb_bdi_init(struct backing_dev_info *bdi)
{ {
bdi->wb.memcg_css = mem_cgroup_root_css; int ret;
bdi->wb.blkcg_css = blkcg_root_css;
bdi->wb_congested.blkcg_id = 1;
INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC); INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
bdi->cgwb_congested_tree = RB_ROOT; bdi->cgwb_congested_tree = RB_ROOT;
atomic_set(&bdi->usage_cnt, 1); atomic_set(&bdi->usage_cnt, 1);
ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
if (!ret) {
bdi->wb.memcg_css = mem_cgroup_root_css;
bdi->wb.blkcg_css = blkcg_root_css;
}
return ret;
} }
static void cgwb_bdi_destroy(struct backing_dev_info *bdi) static void cgwb_bdi_destroy(struct backing_dev_info *bdi)
{ {
struct radix_tree_iter iter; struct radix_tree_iter iter;
struct bdi_writeback_congested *congested, *congested_n;
void **slot; void **slot;
WARN_ON(test_bit(WB_registered, &bdi->wb.state)); WARN_ON(test_bit(WB_registered, &bdi->wb.state));
spin_lock_irq(&cgwb_lock); spin_lock_irq(&cgwb_lock);
radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0) radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
cgwb_kill(*slot); cgwb_kill(*slot);
rbtree_postorder_for_each_entry_safe(congested, congested_n,
&bdi->cgwb_congested_tree, rb_node) {
rb_erase(&congested->rb_node, &bdi->cgwb_congested_tree);
congested->bdi = NULL; /* mark @congested unlinked */
}
spin_unlock_irq(&cgwb_lock); spin_unlock_irq(&cgwb_lock);
/* /*
...@@ -732,15 +742,28 @@ void wb_blkcg_offline(struct blkcg *blkcg) ...@@ -732,15 +742,28 @@ void wb_blkcg_offline(struct blkcg *blkcg)
#else /* CONFIG_CGROUP_WRITEBACK */ #else /* CONFIG_CGROUP_WRITEBACK */
static void cgwb_bdi_init(struct backing_dev_info *bdi) { } static int cgwb_bdi_init(struct backing_dev_info *bdi)
{
int err;
bdi->wb_congested = kzalloc(sizeof(*bdi->wb_congested), GFP_KERNEL);
if (!bdi->wb_congested)
return -ENOMEM;
err = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
if (err) {
kfree(bdi->wb_congested);
return err;
}
return 0;
}
static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { } static void cgwb_bdi_destroy(struct backing_dev_info *bdi) { }
#endif /* CONFIG_CGROUP_WRITEBACK */ #endif /* CONFIG_CGROUP_WRITEBACK */
int bdi_init(struct backing_dev_info *bdi) int bdi_init(struct backing_dev_info *bdi)
{ {
int err;
bdi->dev = NULL; bdi->dev = NULL;
bdi->min_ratio = 0; bdi->min_ratio = 0;
...@@ -749,15 +772,7 @@ int bdi_init(struct backing_dev_info *bdi) ...@@ -749,15 +772,7 @@ int bdi_init(struct backing_dev_info *bdi)
INIT_LIST_HEAD(&bdi->bdi_list); INIT_LIST_HEAD(&bdi->bdi_list);
init_waitqueue_head(&bdi->wb_waitq); init_waitqueue_head(&bdi->wb_waitq);
err = wb_init(&bdi->wb, bdi, GFP_KERNEL); return cgwb_bdi_init(bdi);
if (err)
return err;
bdi->wb_congested.state = 0;
bdi->wb.congested = &bdi->wb_congested;
cgwb_bdi_init(bdi);
return 0;
} }
EXPORT_SYMBOL(bdi_init); EXPORT_SYMBOL(bdi_init);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment