Commit f2b3420b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.15-2021-10-17' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Bigger than usual for this point in time, the majority is fixing some
  issues around BDI lifetimes with the move from the request_queue to
  the disk in this release. In detail:

   - Series on draining fs IO for del_gendisk() (Christoph)

   - NVMe pull request via Christoph:
        - fix the abort command id (Keith Busch)
        - nvme: fix per-namespace chardev deletion (Adam Manzanares)

   - brd locking scope fix (Tetsuo)

   - BFQ fix (Paolo)"

* tag 'block-5.15-2021-10-17' of git://git.kernel.dk/linux-block:
  block, bfq: reset last_bfqq_created on group change
  block: warn when putting the final reference on a registered disk
  brd: reduce the brd_devices_mutex scope
  kyber: avoid q->disk dereferences in trace points
  block: keep q_usage_counter in atomic mode after del_gendisk
  block: drain file system I/O on del_gendisk
  block: split bio_queue_enter from blk_queue_enter
  block: factor out a blk_try_enter_queue helper
  block: call submit_bio_checks under q_usage_counter
  nvme: fix per-namespace chardev deletion
  block/rnbd-clt-sysfs: fix a couple uninitialized variable bugs
  nvme-pci: Fix abort command id
parents cc0af0a9 d29bd414
...@@ -666,6 +666,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, ...@@ -666,6 +666,12 @@ void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfq_put_idle_entity(bfq_entity_service_tree(entity), entity); bfq_put_idle_entity(bfq_entity_service_tree(entity), entity);
bfqg_and_blkg_put(bfqq_group(bfqq)); bfqg_and_blkg_put(bfqq_group(bfqq));
if (entity->parent &&
entity->parent->last_bfqq_created == bfqq)
entity->parent->last_bfqq_created = NULL;
else if (bfqd->last_bfqq_created == bfqq)
bfqd->last_bfqq_created = NULL;
entity->parent = bfqg->my_entity; entity->parent = bfqg->my_entity;
entity->sched_data = &bfqg->sched_data; entity->sched_data = &bfqg->sched_data;
/* pin down bfqg and its associated blkg */ /* pin down bfqg and its associated blkg */
......
...@@ -49,7 +49,6 @@ ...@@ -49,7 +49,6 @@
#include "blk-mq.h" #include "blk-mq.h"
#include "blk-mq-sched.h" #include "blk-mq-sched.h"
#include "blk-pm.h" #include "blk-pm.h"
#include "blk-rq-qos.h"
struct dentry *blk_debugfs_root; struct dentry *blk_debugfs_root;
...@@ -337,23 +336,25 @@ void blk_put_queue(struct request_queue *q) ...@@ -337,23 +336,25 @@ void blk_put_queue(struct request_queue *q)
} }
EXPORT_SYMBOL(blk_put_queue); EXPORT_SYMBOL(blk_put_queue);
void blk_set_queue_dying(struct request_queue *q) void blk_queue_start_drain(struct request_queue *q)
{ {
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
/* /*
* When queue DYING flag is set, we need to block new req * When queue DYING flag is set, we need to block new req
* entering queue, so we call blk_freeze_queue_start() to * entering queue, so we call blk_freeze_queue_start() to
* prevent I/O from crossing blk_queue_enter(). * prevent I/O from crossing blk_queue_enter().
*/ */
blk_freeze_queue_start(q); blk_freeze_queue_start(q);
if (queue_is_mq(q)) if (queue_is_mq(q))
blk_mq_wake_waiters(q); blk_mq_wake_waiters(q);
/* Make blk_queue_enter() reexamine the DYING flag. */ /* Make blk_queue_enter() reexamine the DYING flag. */
wake_up_all(&q->mq_freeze_wq); wake_up_all(&q->mq_freeze_wq);
} }
void blk_set_queue_dying(struct request_queue *q)
{
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
blk_queue_start_drain(q);
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying); EXPORT_SYMBOL_GPL(blk_set_queue_dying);
/** /**
...@@ -385,13 +386,8 @@ void blk_cleanup_queue(struct request_queue *q) ...@@ -385,13 +386,8 @@ void blk_cleanup_queue(struct request_queue *q)
*/ */
blk_freeze_queue(q); blk_freeze_queue(q);
rq_qos_exit(q);
blk_queue_flag_set(QUEUE_FLAG_DEAD, q); blk_queue_flag_set(QUEUE_FLAG_DEAD, q);
/* for synchronous bio-based driver finish in-flight integrity i/o */
blk_flush_integrity();
blk_sync_queue(q); blk_sync_queue(q);
if (queue_is_mq(q)) if (queue_is_mq(q))
blk_mq_exit_queue(q); blk_mq_exit_queue(q);
...@@ -416,6 +412,30 @@ void blk_cleanup_queue(struct request_queue *q) ...@@ -416,6 +412,30 @@ void blk_cleanup_queue(struct request_queue *q)
} }
EXPORT_SYMBOL(blk_cleanup_queue); EXPORT_SYMBOL(blk_cleanup_queue);
static bool blk_try_enter_queue(struct request_queue *q, bool pm)
{
rcu_read_lock();
if (!percpu_ref_tryget_live(&q->q_usage_counter))
goto fail;
/*
* The code that increments the pm_only counter must ensure that the
* counter is globally visible before the queue is unfrozen.
*/
if (blk_queue_pm_only(q) &&
(!pm || queue_rpm_status(q) == RPM_SUSPENDED))
goto fail_put;
rcu_read_unlock();
return true;
fail_put:
percpu_ref_put(&q->q_usage_counter);
fail:
rcu_read_unlock();
return false;
}
/** /**
* blk_queue_enter() - try to increase q->q_usage_counter * blk_queue_enter() - try to increase q->q_usage_counter
* @q: request queue pointer * @q: request queue pointer
...@@ -425,40 +445,18 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) ...@@ -425,40 +445,18 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
{ {
const bool pm = flags & BLK_MQ_REQ_PM; const bool pm = flags & BLK_MQ_REQ_PM;
while (true) { while (!blk_try_enter_queue(q, pm)) {
bool success = false;
rcu_read_lock();
if (percpu_ref_tryget_live(&q->q_usage_counter)) {
/*
* The code that increments the pm_only counter is
* responsible for ensuring that that counter is
* globally visible before the queue is unfrozen.
*/
if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
!blk_queue_pm_only(q)) {
success = true;
} else {
percpu_ref_put(&q->q_usage_counter);
}
}
rcu_read_unlock();
if (success)
return 0;
if (flags & BLK_MQ_REQ_NOWAIT) if (flags & BLK_MQ_REQ_NOWAIT)
return -EBUSY; return -EBUSY;
/* /*
* read pair of barrier in blk_freeze_queue_start(), * read pair of barrier in blk_freeze_queue_start(), we need to
* we need to order reading __PERCPU_REF_DEAD flag of * order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
* .q_usage_counter and reading .mq_freeze_depth or * reading .mq_freeze_depth or queue dying flag, otherwise the
* queue dying flag, otherwise the following wait may * following wait may never return if the two reads are
* never return if the two reads are reordered. * reordered.
*/ */
smp_rmb(); smp_rmb();
wait_event(q->mq_freeze_wq, wait_event(q->mq_freeze_wq,
(!q->mq_freeze_depth && (!q->mq_freeze_depth &&
blk_pm_resume_queue(pm, q)) || blk_pm_resume_queue(pm, q)) ||
...@@ -466,23 +464,43 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags) ...@@ -466,23 +464,43 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
if (blk_queue_dying(q)) if (blk_queue_dying(q))
return -ENODEV; return -ENODEV;
} }
return 0;
} }
static inline int bio_queue_enter(struct bio *bio) static inline int bio_queue_enter(struct bio *bio)
{ {
struct request_queue *q = bio->bi_bdev->bd_disk->queue; struct gendisk *disk = bio->bi_bdev->bd_disk;
bool nowait = bio->bi_opf & REQ_NOWAIT; struct request_queue *q = disk->queue;
int ret;
ret = blk_queue_enter(q, nowait ? BLK_MQ_REQ_NOWAIT : 0); while (!blk_try_enter_queue(q, false)) {
if (unlikely(ret)) { if (bio->bi_opf & REQ_NOWAIT) {
if (nowait && !blk_queue_dying(q)) if (test_bit(GD_DEAD, &disk->state))
goto dead;
bio_wouldblock_error(bio); bio_wouldblock_error(bio);
else return -EBUSY;
bio_io_error(bio); }
/*
* read pair of barrier in blk_freeze_queue_start(), we need to
* order reading __PERCPU_REF_DEAD flag of .q_usage_counter and
* reading .mq_freeze_depth or queue dying flag, otherwise the
* following wait may never return if the two reads are
* reordered.
*/
smp_rmb();
wait_event(q->mq_freeze_wq,
(!q->mq_freeze_depth &&
blk_pm_resume_queue(false, q)) ||
test_bit(GD_DEAD, &disk->state));
if (test_bit(GD_DEAD, &disk->state))
goto dead;
} }
return ret; return 0;
dead:
bio_io_error(bio);
return -ENODEV;
} }
void blk_queue_exit(struct request_queue *q) void blk_queue_exit(struct request_queue *q)
...@@ -899,11 +917,18 @@ static blk_qc_t __submit_bio(struct bio *bio) ...@@ -899,11 +917,18 @@ static blk_qc_t __submit_bio(struct bio *bio)
struct gendisk *disk = bio->bi_bdev->bd_disk; struct gendisk *disk = bio->bi_bdev->bd_disk;
blk_qc_t ret = BLK_QC_T_NONE; blk_qc_t ret = BLK_QC_T_NONE;
if (blk_crypto_bio_prep(&bio)) { if (unlikely(bio_queue_enter(bio) != 0))
if (!disk->fops->submit_bio) return BLK_QC_T_NONE;
return blk_mq_submit_bio(bio);
if (!submit_bio_checks(bio) || !blk_crypto_bio_prep(&bio))
goto queue_exit;
if (disk->fops->submit_bio) {
ret = disk->fops->submit_bio(bio); ret = disk->fops->submit_bio(bio);
goto queue_exit;
} }
return blk_mq_submit_bio(bio);
queue_exit:
blk_queue_exit(disk->queue); blk_queue_exit(disk->queue);
return ret; return ret;
} }
...@@ -941,9 +966,6 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio) ...@@ -941,9 +966,6 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
struct request_queue *q = bio->bi_bdev->bd_disk->queue; struct request_queue *q = bio->bi_bdev->bd_disk->queue;
struct bio_list lower, same; struct bio_list lower, same;
if (unlikely(bio_queue_enter(bio) != 0))
continue;
/* /*
* Create a fresh bio_list for all subordinate requests. * Create a fresh bio_list for all subordinate requests.
*/ */
...@@ -979,23 +1001,12 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio) ...@@ -979,23 +1001,12 @@ static blk_qc_t __submit_bio_noacct(struct bio *bio)
static blk_qc_t __submit_bio_noacct_mq(struct bio *bio) static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
{ {
struct bio_list bio_list[2] = { }; struct bio_list bio_list[2] = { };
blk_qc_t ret = BLK_QC_T_NONE; blk_qc_t ret;
current->bio_list = bio_list; current->bio_list = bio_list;
do { do {
struct gendisk *disk = bio->bi_bdev->bd_disk; ret = __submit_bio(bio);
if (unlikely(bio_queue_enter(bio) != 0))
continue;
if (!blk_crypto_bio_prep(&bio)) {
blk_queue_exit(disk->queue);
ret = BLK_QC_T_NONE;
continue;
}
ret = blk_mq_submit_bio(bio);
} while ((bio = bio_list_pop(&bio_list[0]))); } while ((bio = bio_list_pop(&bio_list[0])));
current->bio_list = NULL; current->bio_list = NULL;
...@@ -1013,9 +1024,6 @@ static blk_qc_t __submit_bio_noacct_mq(struct bio *bio) ...@@ -1013,9 +1024,6 @@ static blk_qc_t __submit_bio_noacct_mq(struct bio *bio)
*/ */
blk_qc_t submit_bio_noacct(struct bio *bio) blk_qc_t submit_bio_noacct(struct bio *bio)
{ {
if (!submit_bio_checks(bio))
return BLK_QC_T_NONE;
/* /*
* We only want one ->submit_bio to be active at a time, else stack * We only want one ->submit_bio to be active at a time, else stack
* usage with stacked devices could be a problem. Use current->bio_list * usage with stacked devices could be a problem. Use current->bio_list
......
...@@ -188,9 +188,11 @@ void blk_mq_freeze_queue(struct request_queue *q) ...@@ -188,9 +188,11 @@ void blk_mq_freeze_queue(struct request_queue *q)
} }
EXPORT_SYMBOL_GPL(blk_mq_freeze_queue); EXPORT_SYMBOL_GPL(blk_mq_freeze_queue);
void blk_mq_unfreeze_queue(struct request_queue *q) void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic)
{ {
mutex_lock(&q->mq_freeze_lock); mutex_lock(&q->mq_freeze_lock);
if (force_atomic)
q->q_usage_counter.data->force_atomic = true;
q->mq_freeze_depth--; q->mq_freeze_depth--;
WARN_ON_ONCE(q->mq_freeze_depth < 0); WARN_ON_ONCE(q->mq_freeze_depth < 0);
if (!q->mq_freeze_depth) { if (!q->mq_freeze_depth) {
...@@ -199,6 +201,11 @@ void blk_mq_unfreeze_queue(struct request_queue *q) ...@@ -199,6 +201,11 @@ void blk_mq_unfreeze_queue(struct request_queue *q)
} }
mutex_unlock(&q->mq_freeze_lock); mutex_unlock(&q->mq_freeze_lock);
} }
void blk_mq_unfreeze_queue(struct request_queue *q)
{
__blk_mq_unfreeze_queue(q, false);
}
EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
/* /*
......
...@@ -51,6 +51,8 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size, ...@@ -51,6 +51,8 @@ struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
void blk_free_flush_queue(struct blk_flush_queue *q); void blk_free_flush_queue(struct blk_flush_queue *q);
void blk_freeze_queue(struct request_queue *q); void blk_freeze_queue(struct request_queue *q);
void __blk_mq_unfreeze_queue(struct request_queue *q, bool force_atomic);
void blk_queue_start_drain(struct request_queue *q);
#define BIO_INLINE_VECS 4 #define BIO_INLINE_VECS 4
struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs, struct bio_vec *bvec_alloc(mempool_t *pool, unsigned short *nr_vecs,
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include <linux/badblocks.h> #include <linux/badblocks.h>
#include "blk.h" #include "blk.h"
#include "blk-rq-qos.h"
static struct kobject *block_depr; static struct kobject *block_depr;
...@@ -559,6 +560,8 @@ EXPORT_SYMBOL(device_add_disk); ...@@ -559,6 +560,8 @@ EXPORT_SYMBOL(device_add_disk);
*/ */
void del_gendisk(struct gendisk *disk) void del_gendisk(struct gendisk *disk)
{ {
struct request_queue *q = disk->queue;
might_sleep(); might_sleep();
if (WARN_ON_ONCE(!disk_live(disk) && !(disk->flags & GENHD_FL_HIDDEN))) if (WARN_ON_ONCE(!disk_live(disk) && !(disk->flags & GENHD_FL_HIDDEN)))
...@@ -575,8 +578,27 @@ void del_gendisk(struct gendisk *disk) ...@@ -575,8 +578,27 @@ void del_gendisk(struct gendisk *disk)
fsync_bdev(disk->part0); fsync_bdev(disk->part0);
__invalidate_device(disk->part0, true); __invalidate_device(disk->part0, true);
/*
* Fail any new I/O.
*/
set_bit(GD_DEAD, &disk->state);
set_capacity(disk, 0); set_capacity(disk, 0);
/*
* Prevent new I/O from crossing bio_queue_enter().
*/
blk_queue_start_drain(q);
blk_mq_freeze_queue_wait(q);
rq_qos_exit(q);
blk_sync_queue(q);
blk_flush_integrity();
/*
* Allow using passthrough request again after the queue is torn down.
*/
blk_queue_flag_clear(QUEUE_FLAG_INIT_DONE, q);
__blk_mq_unfreeze_queue(q, true);
if (!(disk->flags & GENHD_FL_HIDDEN)) { if (!(disk->flags & GENHD_FL_HIDDEN)) {
sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi"); sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
...@@ -1056,6 +1078,7 @@ static void disk_release(struct device *dev) ...@@ -1056,6 +1078,7 @@ static void disk_release(struct device *dev)
struct gendisk *disk = dev_to_disk(dev); struct gendisk *disk = dev_to_disk(dev);
might_sleep(); might_sleep();
WARN_ON_ONCE(disk_live(disk));
disk_release_events(disk); disk_release_events(disk);
kfree(disk->random); kfree(disk->random);
......
...@@ -151,6 +151,7 @@ struct kyber_ctx_queue { ...@@ -151,6 +151,7 @@ struct kyber_ctx_queue {
struct kyber_queue_data { struct kyber_queue_data {
struct request_queue *q; struct request_queue *q;
dev_t dev;
/* /*
* Each scheduling domain has a limited number of in-flight requests * Each scheduling domain has a limited number of in-flight requests
...@@ -257,7 +258,7 @@ static int calculate_percentile(struct kyber_queue_data *kqd, ...@@ -257,7 +258,7 @@ static int calculate_percentile(struct kyber_queue_data *kqd,
} }
memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type])); memset(buckets, 0, sizeof(kqd->latency_buckets[sched_domain][type]));
trace_kyber_latency(kqd->q, kyber_domain_names[sched_domain], trace_kyber_latency(kqd->dev, kyber_domain_names[sched_domain],
kyber_latency_type_names[type], percentile, kyber_latency_type_names[type], percentile,
bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples); bucket + 1, 1 << KYBER_LATENCY_SHIFT, samples);
...@@ -270,7 +271,7 @@ static void kyber_resize_domain(struct kyber_queue_data *kqd, ...@@ -270,7 +271,7 @@ static void kyber_resize_domain(struct kyber_queue_data *kqd,
depth = clamp(depth, 1U, kyber_depth[sched_domain]); depth = clamp(depth, 1U, kyber_depth[sched_domain]);
if (depth != kqd->domain_tokens[sched_domain].sb.depth) { if (depth != kqd->domain_tokens[sched_domain].sb.depth) {
sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth); sbitmap_queue_resize(&kqd->domain_tokens[sched_domain], depth);
trace_kyber_adjust(kqd->q, kyber_domain_names[sched_domain], trace_kyber_adjust(kqd->dev, kyber_domain_names[sched_domain],
depth); depth);
} }
} }
...@@ -366,6 +367,7 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q) ...@@ -366,6 +367,7 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
goto err; goto err;
kqd->q = q; kqd->q = q;
kqd->dev = disk_devt(q->disk);
kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency, kqd->cpu_latency = alloc_percpu_gfp(struct kyber_cpu_latency,
GFP_KERNEL | __GFP_ZERO); GFP_KERNEL | __GFP_ZERO);
...@@ -774,7 +776,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd, ...@@ -774,7 +776,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
return rq; return rq;
} else { } else {
trace_kyber_throttled(kqd->q, trace_kyber_throttled(kqd->dev,
kyber_domain_names[khd->cur_domain]); kyber_domain_names[khd->cur_domain]);
} }
} else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) { } else if (sbitmap_any_bit_set(&khd->kcq_map[khd->cur_domain])) {
...@@ -787,7 +789,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd, ...@@ -787,7 +789,7 @@ kyber_dispatch_cur_domain(struct kyber_queue_data *kqd,
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
return rq; return rq;
} else { } else {
trace_kyber_throttled(kqd->q, trace_kyber_throttled(kqd->dev,
kyber_domain_names[khd->cur_domain]); kyber_domain_names[khd->cur_domain]);
} }
} }
......
...@@ -373,10 +373,22 @@ static int brd_alloc(int i) ...@@ -373,10 +373,22 @@ static int brd_alloc(int i)
struct gendisk *disk; struct gendisk *disk;
char buf[DISK_NAME_LEN]; char buf[DISK_NAME_LEN];
mutex_lock(&brd_devices_mutex);
list_for_each_entry(brd, &brd_devices, brd_list) {
if (brd->brd_number == i) {
mutex_unlock(&brd_devices_mutex);
return -EEXIST;
}
}
brd = kzalloc(sizeof(*brd), GFP_KERNEL); brd = kzalloc(sizeof(*brd), GFP_KERNEL);
if (!brd) if (!brd) {
mutex_unlock(&brd_devices_mutex);
return -ENOMEM; return -ENOMEM;
}
brd->brd_number = i; brd->brd_number = i;
list_add_tail(&brd->brd_list, &brd_devices);
mutex_unlock(&brd_devices_mutex);
spin_lock_init(&brd->brd_lock); spin_lock_init(&brd->brd_lock);
INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC);
...@@ -411,37 +423,30 @@ static int brd_alloc(int i) ...@@ -411,37 +423,30 @@ static int brd_alloc(int i)
blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue); blk_queue_flag_set(QUEUE_FLAG_NONROT, disk->queue);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue); blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, disk->queue);
add_disk(disk); add_disk(disk);
list_add_tail(&brd->brd_list, &brd_devices);
return 0; return 0;
out_free_dev: out_free_dev:
mutex_lock(&brd_devices_mutex);
list_del(&brd->brd_list);
mutex_unlock(&brd_devices_mutex);
kfree(brd); kfree(brd);
return -ENOMEM; return -ENOMEM;
} }
static void brd_probe(dev_t dev) static void brd_probe(dev_t dev)
{ {
int i = MINOR(dev) / max_part; brd_alloc(MINOR(dev) / max_part);
struct brd_device *brd;
mutex_lock(&brd_devices_mutex);
list_for_each_entry(brd, &brd_devices, brd_list) {
if (brd->brd_number == i)
goto out_unlock;
}
brd_alloc(i);
out_unlock:
mutex_unlock(&brd_devices_mutex);
} }
static void brd_del_one(struct brd_device *brd) static void brd_del_one(struct brd_device *brd)
{ {
list_del(&brd->brd_list);
del_gendisk(brd->brd_disk); del_gendisk(brd->brd_disk);
blk_cleanup_disk(brd->brd_disk); blk_cleanup_disk(brd->brd_disk);
brd_free_pages(brd); brd_free_pages(brd);
mutex_lock(&brd_devices_mutex);
list_del(&brd->brd_list);
mutex_unlock(&brd_devices_mutex);
kfree(brd); kfree(brd);
} }
...@@ -491,25 +496,21 @@ static int __init brd_init(void) ...@@ -491,25 +496,21 @@ static int __init brd_init(void)
brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL); brd_debugfs_dir = debugfs_create_dir("ramdisk_pages", NULL);
mutex_lock(&brd_devices_mutex);
for (i = 0; i < rd_nr; i++) { for (i = 0; i < rd_nr; i++) {
err = brd_alloc(i); err = brd_alloc(i);
if (err) if (err)
goto out_free; goto out_free;
} }
mutex_unlock(&brd_devices_mutex);
pr_info("brd: module loaded\n"); pr_info("brd: module loaded\n");
return 0; return 0;
out_free: out_free:
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
debugfs_remove_recursive(brd_debugfs_dir); debugfs_remove_recursive(brd_debugfs_dir);
list_for_each_entry_safe(brd, next, &brd_devices, brd_list) list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
brd_del_one(brd); brd_del_one(brd);
mutex_unlock(&brd_devices_mutex);
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
pr_info("brd: module NOT loaded !!!\n"); pr_info("brd: module NOT loaded !!!\n");
return err; return err;
...@@ -519,13 +520,12 @@ static void __exit brd_exit(void) ...@@ -519,13 +520,12 @@ static void __exit brd_exit(void)
{ {
struct brd_device *brd, *next; struct brd_device *brd, *next;
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
debugfs_remove_recursive(brd_debugfs_dir); debugfs_remove_recursive(brd_debugfs_dir);
list_for_each_entry_safe(brd, next, &brd_devices, brd_list) list_for_each_entry_safe(brd, next, &brd_devices, brd_list)
brd_del_one(brd); brd_del_one(brd);
unregister_blkdev(RAMDISK_MAJOR, "ramdisk");
pr_info("brd: module unloaded\n"); pr_info("brd: module unloaded\n");
} }
......
...@@ -71,8 +71,10 @@ static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt, ...@@ -71,8 +71,10 @@ static int rnbd_clt_parse_map_options(const char *buf, size_t max_path_cnt,
int opt_mask = 0; int opt_mask = 0;
int token; int token;
int ret = -EINVAL; int ret = -EINVAL;
int i, dest_port, nr_poll_queues; int nr_poll_queues = 0;
int dest_port = 0;
int p_cnt = 0; int p_cnt = 0;
int i;
options = kstrdup(buf, GFP_KERNEL); options = kstrdup(buf, GFP_KERNEL);
if (!options) if (!options)
......
...@@ -3550,10 +3550,15 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys, ...@@ -3550,10 +3550,15 @@ static int __nvme_check_ids(struct nvme_subsystem *subsys,
return 0; return 0;
} }
static void nvme_cdev_rel(struct device *dev)
{
ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(dev->devt));
}
void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device) void nvme_cdev_del(struct cdev *cdev, struct device *cdev_device)
{ {
cdev_device_del(cdev, cdev_device); cdev_device_del(cdev, cdev_device);
ida_simple_remove(&nvme_ns_chr_minor_ida, MINOR(cdev_device->devt)); put_device(cdev_device);
} }
int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
...@@ -3566,14 +3571,14 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device, ...@@ -3566,14 +3571,14 @@ int nvme_cdev_add(struct cdev *cdev, struct device *cdev_device,
return minor; return minor;
cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor); cdev_device->devt = MKDEV(MAJOR(nvme_ns_chr_devt), minor);
cdev_device->class = nvme_ns_chr_class; cdev_device->class = nvme_ns_chr_class;
cdev_device->release = nvme_cdev_rel;
device_initialize(cdev_device); device_initialize(cdev_device);
cdev_init(cdev, fops); cdev_init(cdev, fops);
cdev->owner = owner; cdev->owner = owner;
ret = cdev_device_add(cdev, cdev_device); ret = cdev_device_add(cdev, cdev_device);
if (ret) { if (ret)
put_device(cdev_device); put_device(cdev_device);
ida_simple_remove(&nvme_ns_chr_minor_ida, minor);
}
return ret; return ret;
} }
...@@ -3605,11 +3610,9 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns) ...@@ -3605,11 +3610,9 @@ static int nvme_add_ns_cdev(struct nvme_ns *ns)
ns->ctrl->instance, ns->head->instance); ns->ctrl->instance, ns->head->instance);
if (ret) if (ret)
return ret; return ret;
ret = nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
ns->ctrl->ops->module); return nvme_cdev_add(&ns->cdev, &ns->cdev_device, &nvme_ns_chr_fops,
if (ret) ns->ctrl->ops->module);
kfree_const(ns->cdev_device.kobj.name);
return ret;
} }
static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl, static struct nvme_ns_head *nvme_alloc_ns_head(struct nvme_ctrl *ctrl,
......
...@@ -431,8 +431,6 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head) ...@@ -431,8 +431,6 @@ static int nvme_add_ns_head_cdev(struct nvme_ns_head *head)
return ret; return ret;
ret = nvme_cdev_add(&head->cdev, &head->cdev_device, ret = nvme_cdev_add(&head->cdev, &head->cdev_device,
&nvme_ns_head_chr_fops, THIS_MODULE); &nvme_ns_head_chr_fops, THIS_MODULE);
if (ret)
kfree_const(head->cdev_device.kobj.name);
return ret; return ret;
} }
......
...@@ -1330,7 +1330,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) ...@@ -1330,7 +1330,7 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
iod->aborted = 1; iod->aborted = 1;
cmd.abort.opcode = nvme_admin_abort_cmd; cmd.abort.opcode = nvme_admin_abort_cmd;
cmd.abort.cid = req->tag; cmd.abort.cid = nvme_cid(req);
cmd.abort.sqid = cpu_to_le16(nvmeq->qid); cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
dev_warn(nvmeq->dev->ctrl.device, dev_warn(nvmeq->dev->ctrl.device,
......
...@@ -149,6 +149,7 @@ struct gendisk { ...@@ -149,6 +149,7 @@ struct gendisk {
unsigned long state; unsigned long state;
#define GD_NEED_PART_SCAN 0 #define GD_NEED_PART_SCAN 0
#define GD_READ_ONLY 1 #define GD_READ_ONLY 1
#define GD_DEAD 2
struct mutex open_mutex; /* open/close mutex */ struct mutex open_mutex; /* open/close mutex */
unsigned open_partitions; /* number of open partitions */ unsigned open_partitions; /* number of open partitions */
......
...@@ -13,11 +13,11 @@ ...@@ -13,11 +13,11 @@
TRACE_EVENT(kyber_latency, TRACE_EVENT(kyber_latency,
TP_PROTO(struct request_queue *q, const char *domain, const char *type, TP_PROTO(dev_t dev, const char *domain, const char *type,
unsigned int percentile, unsigned int numerator, unsigned int percentile, unsigned int numerator,
unsigned int denominator, unsigned int samples), unsigned int denominator, unsigned int samples),
TP_ARGS(q, domain, type, percentile, numerator, denominator, samples), TP_ARGS(dev, domain, type, percentile, numerator, denominator, samples),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( dev_t, dev ) __field( dev_t, dev )
...@@ -30,7 +30,7 @@ TRACE_EVENT(kyber_latency, ...@@ -30,7 +30,7 @@ TRACE_EVENT(kyber_latency,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = disk_devt(q->disk); __entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain)); strlcpy(__entry->domain, domain, sizeof(__entry->domain));
strlcpy(__entry->type, type, sizeof(__entry->type)); strlcpy(__entry->type, type, sizeof(__entry->type));
__entry->percentile = percentile; __entry->percentile = percentile;
...@@ -47,10 +47,9 @@ TRACE_EVENT(kyber_latency, ...@@ -47,10 +47,9 @@ TRACE_EVENT(kyber_latency,
TRACE_EVENT(kyber_adjust, TRACE_EVENT(kyber_adjust,
TP_PROTO(struct request_queue *q, const char *domain, TP_PROTO(dev_t dev, const char *domain, unsigned int depth),
unsigned int depth),
TP_ARGS(q, domain, depth), TP_ARGS(dev, domain, depth),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( dev_t, dev ) __field( dev_t, dev )
...@@ -59,7 +58,7 @@ TRACE_EVENT(kyber_adjust, ...@@ -59,7 +58,7 @@ TRACE_EVENT(kyber_adjust,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = disk_devt(q->disk); __entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain)); strlcpy(__entry->domain, domain, sizeof(__entry->domain));
__entry->depth = depth; __entry->depth = depth;
), ),
...@@ -71,9 +70,9 @@ TRACE_EVENT(kyber_adjust, ...@@ -71,9 +70,9 @@ TRACE_EVENT(kyber_adjust,
TRACE_EVENT(kyber_throttled, TRACE_EVENT(kyber_throttled,
TP_PROTO(struct request_queue *q, const char *domain), TP_PROTO(dev_t dev, const char *domain),
TP_ARGS(q, domain), TP_ARGS(dev, domain),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( dev_t, dev ) __field( dev_t, dev )
...@@ -81,7 +80,7 @@ TRACE_EVENT(kyber_throttled, ...@@ -81,7 +80,7 @@ TRACE_EVENT(kyber_throttled,
), ),
TP_fast_assign( TP_fast_assign(
__entry->dev = disk_devt(q->disk); __entry->dev = dev;
strlcpy(__entry->domain, domain, sizeof(__entry->domain)); strlcpy(__entry->domain, domain, sizeof(__entry->domain));
), ),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment