Commit b9889768 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.17-2022-02-17' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - Surprise removal fix (Christoph)

 - Ensure that pages are zeroed before submitted for userspace IO
   (Haimin)

 - Fix blk-wbt accounting issue with BFQ (Laibin)

 - Use bsize for discard granularity in loop (Ming)

 - Fix missing zone handling in blk_complete_request() (Pankaj)

* tag 'block-5.17-2022-02-17' of git://git.kernel.dk/linux-block:
  block/wbt: fix negative inflight counter when remove scsi device
  block: fix surprise removal for drivers calling blk_set_queue_dying
  block-map: add __GFP_ZERO flag for alloc_page in function bio_copy_kern
  block: loop:use kstatfs.f_bsize of backing file to set discard granularity
  block: Add handling for zone append command in blk_complete_request
parents 2848551b e92bc4cd
...@@ -7018,6 +7018,8 @@ static void bfq_exit_queue(struct elevator_queue *e) ...@@ -7018,6 +7018,8 @@ static void bfq_exit_queue(struct elevator_queue *e)
spin_unlock_irq(&bfqd->lock); spin_unlock_irq(&bfqd->lock);
#endif #endif
wbt_enable_default(bfqd->queue);
kfree(bfqd); kfree(bfqd);
} }
......
...@@ -284,13 +284,6 @@ void blk_queue_start_drain(struct request_queue *q) ...@@ -284,13 +284,6 @@ void blk_queue_start_drain(struct request_queue *q)
wake_up_all(&q->mq_freeze_wq); wake_up_all(&q->mq_freeze_wq);
} }
void blk_set_queue_dying(struct request_queue *q)
{
blk_queue_flag_set(QUEUE_FLAG_DYING, q);
blk_queue_start_drain(q);
}
EXPORT_SYMBOL_GPL(blk_set_queue_dying);
/** /**
* blk_cleanup_queue - shutdown a request queue * blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown * @q: request queue to shutdown
...@@ -308,7 +301,8 @@ void blk_cleanup_queue(struct request_queue *q) ...@@ -308,7 +301,8 @@ void blk_cleanup_queue(struct request_queue *q)
WARN_ON_ONCE(blk_queue_registered(q)); WARN_ON_ONCE(blk_queue_registered(q));
/* mark @q DYING, no new request or merges will be allowed afterwards */ /* mark @q DYING, no new request or merges will be allowed afterwards */
blk_set_queue_dying(q); blk_queue_flag_set(QUEUE_FLAG_DYING, q);
blk_queue_start_drain(q);
blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q); blk_queue_flag_set(QUEUE_FLAG_NOMERGES, q);
blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q); blk_queue_flag_set(QUEUE_FLAG_NOXMERGES, q);
......
...@@ -446,7 +446,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data, ...@@ -446,7 +446,7 @@ static struct bio *bio_copy_kern(struct request_queue *q, void *data,
if (bytes > len) if (bytes > len)
bytes = len; bytes = len;
page = alloc_page(GFP_NOIO | gfp_mask); page = alloc_page(GFP_NOIO | __GFP_ZERO | gfp_mask);
if (!page) if (!page)
goto cleanup; goto cleanup;
......
...@@ -736,6 +736,10 @@ static void blk_complete_request(struct request *req) ...@@ -736,6 +736,10 @@ static void blk_complete_request(struct request *req)
/* Completion has already been traced */ /* Completion has already been traced */
bio_clear_flag(bio, BIO_TRACE_COMPLETION); bio_clear_flag(bio, BIO_TRACE_COMPLETION);
if (req_op(req) == REQ_OP_ZONE_APPEND)
bio->bi_iter.bi_sector = req->__sector;
if (!is_flush) if (!is_flush)
bio_endio(bio); bio_endio(bio);
bio = next; bio = next;
......
...@@ -525,8 +525,6 @@ void elv_unregister_queue(struct request_queue *q) ...@@ -525,8 +525,6 @@ void elv_unregister_queue(struct request_queue *q)
kobject_del(&e->kobj); kobject_del(&e->kobj);
e->registered = 0; e->registered = 0;
/* Re-enable throttling in case elevator disabled it */
wbt_enable_default(q);
} }
} }
......
...@@ -548,6 +548,20 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk, ...@@ -548,6 +548,20 @@ int __must_check device_add_disk(struct device *parent, struct gendisk *disk,
} }
EXPORT_SYMBOL(device_add_disk); EXPORT_SYMBOL(device_add_disk);
/**
* blk_mark_disk_dead - mark a disk as dead
* @disk: disk to mark as dead
*
* Mark as disk as dead (e.g. surprise removed) and don't accept any new I/O
* to this disk.
*/
void blk_mark_disk_dead(struct gendisk *disk)
{
set_bit(GD_DEAD, &disk->state);
blk_queue_start_drain(disk->queue);
}
EXPORT_SYMBOL_GPL(blk_mark_disk_dead);
/** /**
* del_gendisk - remove the gendisk * del_gendisk - remove the gendisk
* @disk: the struct gendisk to remove * @disk: the struct gendisk to remove
......
...@@ -79,6 +79,7 @@ ...@@ -79,6 +79,7 @@
#include <linux/ioprio.h> #include <linux/ioprio.h>
#include <linux/blk-cgroup.h> #include <linux/blk-cgroup.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <linux/statfs.h>
#include "loop.h" #include "loop.h"
...@@ -774,8 +775,13 @@ static void loop_config_discard(struct loop_device *lo) ...@@ -774,8 +775,13 @@ static void loop_config_discard(struct loop_device *lo)
granularity = 0; granularity = 0;
} else { } else {
struct kstatfs sbuf;
max_discard_sectors = UINT_MAX >> 9; max_discard_sectors = UINT_MAX >> 9;
granularity = inode->i_sb->s_blocksize; if (!vfs_statfs(&file->f_path, &sbuf))
granularity = sbuf.f_bsize;
else
max_discard_sectors = 0;
} }
if (max_discard_sectors) { if (max_discard_sectors) {
......
...@@ -4112,7 +4112,7 @@ static void mtip_pci_remove(struct pci_dev *pdev) ...@@ -4112,7 +4112,7 @@ static void mtip_pci_remove(struct pci_dev *pdev)
"Completion workers still active!\n"); "Completion workers still active!\n");
} }
blk_set_queue_dying(dd->queue); blk_mark_disk_dead(dd->disk);
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag); set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
/* Clean up the block layer. */ /* Clean up the block layer. */
......
...@@ -7185,7 +7185,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus, ...@@ -7185,7 +7185,7 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
* IO to complete/fail. * IO to complete/fail.
*/ */
blk_mq_freeze_queue(rbd_dev->disk->queue); blk_mq_freeze_queue(rbd_dev->disk->queue);
blk_set_queue_dying(rbd_dev->disk->queue); blk_mark_disk_dead(rbd_dev->disk);
} }
del_gendisk(rbd_dev->disk); del_gendisk(rbd_dev->disk);
......
...@@ -2126,7 +2126,7 @@ static void blkfront_closing(struct blkfront_info *info) ...@@ -2126,7 +2126,7 @@ static void blkfront_closing(struct blkfront_info *info)
/* No more blkif_request(). */ /* No more blkif_request(). */
blk_mq_stop_hw_queues(info->rq); blk_mq_stop_hw_queues(info->rq);
blk_set_queue_dying(info->rq); blk_mark_disk_dead(info->gd);
set_capacity(info->gd, 0); set_capacity(info->gd, 0);
for_each_rinfo(info, rinfo, i) { for_each_rinfo(info, rinfo, i) {
......
...@@ -2077,7 +2077,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait) ...@@ -2077,7 +2077,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
set_bit(DMF_FREEING, &md->flags); set_bit(DMF_FREEING, &md->flags);
spin_unlock(&_minor_lock); spin_unlock(&_minor_lock);
blk_set_queue_dying(md->queue); blk_mark_disk_dead(md->disk);
/* /*
* Take suspend_lock so that presuspend and postsuspend methods * Take suspend_lock so that presuspend and postsuspend methods
......
...@@ -4574,7 +4574,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns) ...@@ -4574,7 +4574,7 @@ static void nvme_set_queue_dying(struct nvme_ns *ns)
if (test_and_set_bit(NVME_NS_DEAD, &ns->flags)) if (test_and_set_bit(NVME_NS_DEAD, &ns->flags))
return; return;
blk_set_queue_dying(ns->queue); blk_mark_disk_dead(ns->disk);
nvme_start_ns_queue(ns); nvme_start_ns_queue(ns);
set_capacity_and_notify(ns->disk, 0); set_capacity_and_notify(ns->disk, 0);
......
...@@ -848,7 +848,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head) ...@@ -848,7 +848,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
{ {
if (!head->disk) if (!head->disk)
return; return;
blk_set_queue_dying(head->disk->queue); blk_mark_disk_dead(head->disk);
/* make sure all pending bios are cleaned up */ /* make sure all pending bios are cleaned up */
kblockd_schedule_work(&head->requeue_work); kblockd_schedule_work(&head->requeue_work);
flush_work(&head->requeue_work); flush_work(&head->requeue_work);
......
...@@ -748,7 +748,8 @@ extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q, ...@@ -748,7 +748,8 @@ extern bool blk_queue_can_use_dma_map_merging(struct request_queue *q,
bool __must_check blk_get_queue(struct request_queue *); bool __must_check blk_get_queue(struct request_queue *);
extern void blk_put_queue(struct request_queue *); extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);
void blk_mark_disk_dead(struct gendisk *disk);
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment