Commit 3d29cb17 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.7-2020-04-24' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A few fixes/changes that should go into this release:

   - null_blk zoned fixes (Damien)

   - blkdev_close() sync improvement (Douglas)

   - Fix regression in blk-iocost that impacted (at least) systemtap
     (Waiman)

   - Comment fix, header removal (Zhiqiang, Jianpeng)"

* tag 'block-5.7-2020-04-24' of git://git.kernel.dk/linux-block:
  null_blk: Cleanup zoned device initialization
  null_blk: Fix zoned command handling
  block: remove unused header
  blk-iocost: Fix error on iocost_ioc_vrate_adj
  bdev: Reduce time holding bd_mutex in sync in blkdev_close()
  buffer: remove useless comment and WB_REASON_FREE_MORE_MEM, reason.
parents da5de55d d205bde7
...@@ -1591,7 +1591,7 @@ static void ioc_timer_fn(struct timer_list *timer) ...@@ -1591,7 +1591,7 @@ static void ioc_timer_fn(struct timer_list *timer)
vrate_min, vrate_max); vrate_min, vrate_max);
} }
trace_iocost_ioc_vrate_adj(ioc, vrate, &missed_ppm, rq_wait_pct, trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct,
nr_lagging, nr_shortages, nr_lagging, nr_shortages,
nr_surpluses); nr_surpluses);
...@@ -1600,7 +1600,7 @@ static void ioc_timer_fn(struct timer_list *timer) ...@@ -1600,7 +1600,7 @@ static void ioc_timer_fn(struct timer_list *timer)
ioc->period_us * vrate * INUSE_MARGIN_PCT, 100); ioc->period_us * vrate * INUSE_MARGIN_PCT, 100);
} else if (ioc->busy_level != prev_busy_level || nr_lagging) { } else if (ioc->busy_level != prev_busy_level || nr_lagging) {
trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate), trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate),
&missed_ppm, rq_wait_pct, nr_lagging, missed_ppm, rq_wait_pct, nr_lagging,
nr_shortages, nr_surpluses); nr_shortages, nr_surpluses);
} }
......
...@@ -85,26 +85,35 @@ struct nullb { ...@@ -85,26 +85,35 @@ struct nullb {
char disk_name[DISK_NAME_LEN]; char disk_name[DISK_NAME_LEN];
}; };
blk_status_t null_process_cmd(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector,
unsigned int nr_sectors);
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
int null_zone_init(struct nullb_device *dev); int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q);
void null_zone_exit(struct nullb_device *dev); int null_register_zoned_dev(struct nullb *nullb);
void null_free_zoned_dev(struct nullb_device *dev);
int null_report_zones(struct gendisk *disk, sector_t sector, int null_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data); unsigned int nr_zones, report_zones_cb cb, void *data);
blk_status_t null_handle_zoned(struct nullb_cmd *cmd, blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector, enum req_opf op, sector_t sector,
sector_t nr_sectors); sector_t nr_sectors);
size_t null_zone_valid_read_len(struct nullb *nullb, size_t null_zone_valid_read_len(struct nullb *nullb,
sector_t sector, unsigned int len); sector_t sector, unsigned int len);
#else #else
static inline int null_zone_init(struct nullb_device *dev) static inline int null_init_zoned_dev(struct nullb_device *dev,
struct request_queue *q)
{ {
pr_err("CONFIG_BLK_DEV_ZONED not enabled\n"); pr_err("CONFIG_BLK_DEV_ZONED not enabled\n");
return -EINVAL; return -EINVAL;
} }
static inline void null_zone_exit(struct nullb_device *dev) {} static inline int null_register_zoned_dev(struct nullb *nullb)
static inline blk_status_t null_handle_zoned(struct nullb_cmd *cmd, {
enum req_opf op, sector_t sector, return -ENODEV;
sector_t nr_sectors) }
static inline void null_free_zoned_dev(struct nullb_device *dev) {}
static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector, sector_t nr_sectors)
{ {
return BLK_STS_NOTSUPP; return BLK_STS_NOTSUPP;
} }
......
...@@ -580,7 +580,7 @@ static void null_free_dev(struct nullb_device *dev) ...@@ -580,7 +580,7 @@ static void null_free_dev(struct nullb_device *dev)
if (!dev) if (!dev)
return; return;
null_zone_exit(dev); null_free_zoned_dev(dev);
badblocks_exit(&dev->badblocks); badblocks_exit(&dev->badblocks);
kfree(dev); kfree(dev);
} }
...@@ -1276,6 +1276,25 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd) ...@@ -1276,6 +1276,25 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd)
} }
} }
blk_status_t null_process_cmd(struct nullb_cmd *cmd,
enum req_opf op, sector_t sector,
unsigned int nr_sectors)
{
struct nullb_device *dev = cmd->nq->dev;
blk_status_t ret;
if (dev->badblocks.shift != -1) {
ret = null_handle_badblocks(cmd, sector, nr_sectors);
if (ret != BLK_STS_OK)
return ret;
}
if (dev->memory_backed)
return null_handle_memory_backed(cmd, op);
return BLK_STS_OK;
}
static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
sector_t nr_sectors, enum req_opf op) sector_t nr_sectors, enum req_opf op)
{ {
...@@ -1294,17 +1313,11 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector, ...@@ -1294,17 +1313,11 @@ static blk_status_t null_handle_cmd(struct nullb_cmd *cmd, sector_t sector,
goto out; goto out;
} }
if (nullb->dev->badblocks.shift != -1) { if (dev->zoned)
cmd->error = null_handle_badblocks(cmd, sector, nr_sectors); cmd->error = null_process_zoned_cmd(cmd, op,
if (cmd->error != BLK_STS_OK) sector, nr_sectors);
goto out; else
} cmd->error = null_process_cmd(cmd, op, sector, nr_sectors);
if (dev->memory_backed)
cmd->error = null_handle_memory_backed(cmd, op);
if (!cmd->error && dev->zoned)
cmd->error = null_handle_zoned(cmd, op, sector, nr_sectors);
out: out:
nullb_complete_cmd(cmd); nullb_complete_cmd(cmd);
...@@ -1605,19 +1618,12 @@ static int null_gendisk_register(struct nullb *nullb) ...@@ -1605,19 +1618,12 @@ static int null_gendisk_register(struct nullb *nullb)
disk->queue = nullb->q; disk->queue = nullb->q;
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
#ifdef CONFIG_BLK_DEV_ZONED
if (nullb->dev->zoned) { if (nullb->dev->zoned) {
if (queue_is_mq(nullb->q)) { int ret = null_register_zoned_dev(nullb);
int ret = blk_revalidate_disk_zones(disk);
if (ret) if (ret)
return ret; return ret;
} else {
blk_queue_chunk_sectors(nullb->q,
nullb->dev->zone_size_sects);
nullb->q->nr_zones = blkdev_nr_zones(disk);
}
} }
#endif
add_disk(disk); add_disk(disk);
return 0; return 0;
...@@ -1773,14 +1779,9 @@ static int null_add_dev(struct nullb_device *dev) ...@@ -1773,14 +1779,9 @@ static int null_add_dev(struct nullb_device *dev)
} }
if (dev->zoned) { if (dev->zoned) {
rv = null_zone_init(dev); rv = null_init_zoned_dev(dev, nullb->q);
if (rv) if (rv)
goto out_cleanup_blk_queue; goto out_cleanup_blk_queue;
nullb->q->limits.zoned = BLK_ZONED_HM;
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, nullb->q);
blk_queue_required_elevator_features(nullb->q,
ELEVATOR_F_ZBD_SEQ_WRITE);
} }
nullb->q->queuedata = nullb; nullb->q->queuedata = nullb;
...@@ -1809,8 +1810,7 @@ static int null_add_dev(struct nullb_device *dev) ...@@ -1809,8 +1810,7 @@ static int null_add_dev(struct nullb_device *dev)
return 0; return 0;
out_cleanup_zone: out_cleanup_zone:
if (dev->zoned) null_free_zoned_dev(dev);
null_zone_exit(dev);
out_cleanup_blk_queue: out_cleanup_blk_queue:
blk_cleanup_queue(nullb->q); blk_cleanup_queue(nullb->q);
out_cleanup_tags: out_cleanup_tags:
......
...@@ -13,7 +13,7 @@ static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) ...@@ -13,7 +13,7 @@ static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
return sect >> ilog2(dev->zone_size_sects); return sect >> ilog2(dev->zone_size_sects);
} }
int null_zone_init(struct nullb_device *dev) int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
{ {
sector_t dev_size = (sector_t)dev->size * 1024 * 1024; sector_t dev_size = (sector_t)dev->size * 1024 * 1024;
sector_t sector = 0; sector_t sector = 0;
...@@ -61,10 +61,27 @@ int null_zone_init(struct nullb_device *dev) ...@@ -61,10 +61,27 @@ int null_zone_init(struct nullb_device *dev)
sector += dev->zone_size_sects; sector += dev->zone_size_sects;
} }
q->limits.zoned = BLK_ZONED_HM;
blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
return 0;
}
int null_register_zoned_dev(struct nullb *nullb)
{
struct request_queue *q = nullb->q;
if (queue_is_mq(q))
return blk_revalidate_disk_zones(nullb->disk);
blk_queue_chunk_sectors(q, nullb->dev->zone_size_sects);
q->nr_zones = blkdev_nr_zones(nullb->disk);
return 0; return 0;
} }
void null_zone_exit(struct nullb_device *dev) void null_free_zoned_dev(struct nullb_device *dev)
{ {
kvfree(dev->zones); kvfree(dev->zones);
} }
...@@ -126,11 +143,16 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, ...@@ -126,11 +143,16 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
struct nullb_device *dev = cmd->nq->dev; struct nullb_device *dev = cmd->nq->dev;
unsigned int zno = null_zone_no(dev, sector); unsigned int zno = null_zone_no(dev, sector);
struct blk_zone *zone = &dev->zones[zno]; struct blk_zone *zone = &dev->zones[zno];
blk_status_t ret;
trace_nullb_zone_op(cmd, zno, zone->cond);
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
switch (zone->cond) { switch (zone->cond) {
case BLK_ZONE_COND_FULL: case BLK_ZONE_COND_FULL:
/* Cannot write to a full zone */ /* Cannot write to a full zone */
cmd->error = BLK_STS_IOERR;
return BLK_STS_IOERR; return BLK_STS_IOERR;
case BLK_ZONE_COND_EMPTY: case BLK_ZONE_COND_EMPTY:
case BLK_ZONE_COND_IMP_OPEN: case BLK_ZONE_COND_IMP_OPEN:
...@@ -143,19 +165,18 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector, ...@@ -143,19 +165,18 @@ static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
if (zone->cond != BLK_ZONE_COND_EXP_OPEN) if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
zone->cond = BLK_ZONE_COND_IMP_OPEN; zone->cond = BLK_ZONE_COND_IMP_OPEN;
ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
if (ret != BLK_STS_OK)
return ret;
zone->wp += nr_sectors; zone->wp += nr_sectors;
if (zone->wp == zone->start + zone->len) if (zone->wp == zone->start + zone->len)
zone->cond = BLK_ZONE_COND_FULL; zone->cond = BLK_ZONE_COND_FULL;
break; return BLK_STS_OK;
case BLK_ZONE_COND_NOT_WP:
break;
default: default:
/* Invalid zone condition */ /* Invalid zone condition */
return BLK_STS_IOERR; return BLK_STS_IOERR;
} }
trace_nullb_zone_op(cmd, zno, zone->cond);
return BLK_STS_OK;
} }
static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
...@@ -216,8 +237,8 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op, ...@@ -216,8 +237,8 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
return BLK_STS_OK; return BLK_STS_OK;
} }
blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op, blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_opf op,
sector_t sector, sector_t nr_sectors) sector_t sector, sector_t nr_sectors)
{ {
switch (op) { switch (op) {
case REQ_OP_WRITE: case REQ_OP_WRITE:
...@@ -229,6 +250,6 @@ blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op, ...@@ -229,6 +250,6 @@ blk_status_t null_handle_zoned(struct nullb_cmd *cmd, enum req_opf op,
case REQ_OP_ZONE_FINISH: case REQ_OP_ZONE_FINISH:
return null_zone_mgmt(cmd, op, sector); return null_zone_mgmt(cmd, op, sector);
default: default:
return BLK_STS_OK; return null_process_cmd(cmd, op, sector, nr_sectors);
} }
} }
...@@ -19,7 +19,6 @@ ...@@ -19,7 +19,6 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/blkpg.h> #include <linux/blkpg.h>
#include <linux/magic.h> #include <linux/magic.h>
#include <linux/dax.h>
#include <linux/buffer_head.h> #include <linux/buffer_head.h>
#include <linux/swap.h> #include <linux/swap.h>
#include <linux/pagevec.h> #include <linux/pagevec.h>
...@@ -1893,6 +1892,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) ...@@ -1893,6 +1892,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
struct gendisk *disk = bdev->bd_disk; struct gendisk *disk = bdev->bd_disk;
struct block_device *victim = NULL; struct block_device *victim = NULL;
/*
* Sync early if it looks like we're the last one. If someone else
* opens the block device between now and the decrement of bd_openers
* then we did a sync that we didn't need to, but that's not the end
* of the world and we want to avoid long (could be several minute)
* syncs while holding the mutex.
*/
if (bdev->bd_openers == 1)
sync_blockdev(bdev);
mutex_lock_nested(&bdev->bd_mutex, for_part); mutex_lock_nested(&bdev->bd_mutex, for_part);
if (for_part) if (for_part)
bdev->bd_part_count--; bdev->bd_part_count--;
......
...@@ -967,7 +967,7 @@ grow_dev_page(struct block_device *bdev, sector_t block, ...@@ -967,7 +967,7 @@ grow_dev_page(struct block_device *bdev, sector_t block,
struct page *page; struct page *page;
struct buffer_head *bh; struct buffer_head *bh;
sector_t end_block; sector_t end_block;
int ret = 0; /* Will call free_more_memory() */ int ret = 0;
gfp_t gfp_mask; gfp_t gfp_mask;
gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp; gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp;
......
...@@ -54,7 +54,6 @@ enum wb_reason { ...@@ -54,7 +54,6 @@ enum wb_reason {
WB_REASON_SYNC, WB_REASON_SYNC,
WB_REASON_PERIODIC, WB_REASON_PERIODIC,
WB_REASON_LAPTOP_TIMER, WB_REASON_LAPTOP_TIMER,
WB_REASON_FREE_MORE_MEM,
WB_REASON_FS_FREE_SPACE, WB_REASON_FS_FREE_SPACE,
/* /*
* There is no bdi forker thread any more and works are done * There is no bdi forker thread any more and works are done
......
...@@ -130,7 +130,7 @@ DEFINE_EVENT(iocg_inuse_update, iocost_inuse_reset, ...@@ -130,7 +130,7 @@ DEFINE_EVENT(iocg_inuse_update, iocost_inuse_reset,
TRACE_EVENT(iocost_ioc_vrate_adj, TRACE_EVENT(iocost_ioc_vrate_adj,
TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 (*missed_ppm)[2], TP_PROTO(struct ioc *ioc, u64 new_vrate, u32 *missed_ppm,
u32 rq_wait_pct, int nr_lagging, int nr_shortages, u32 rq_wait_pct, int nr_lagging, int nr_shortages,
int nr_surpluses), int nr_surpluses),
...@@ -155,8 +155,8 @@ TRACE_EVENT(iocost_ioc_vrate_adj, ...@@ -155,8 +155,8 @@ TRACE_EVENT(iocost_ioc_vrate_adj,
__entry->old_vrate = atomic64_read(&ioc->vtime_rate);; __entry->old_vrate = atomic64_read(&ioc->vtime_rate);;
__entry->new_vrate = new_vrate; __entry->new_vrate = new_vrate;
__entry->busy_level = ioc->busy_level; __entry->busy_level = ioc->busy_level;
__entry->read_missed_ppm = (*missed_ppm)[READ]; __entry->read_missed_ppm = missed_ppm[READ];
__entry->write_missed_ppm = (*missed_ppm)[WRITE]; __entry->write_missed_ppm = missed_ppm[WRITE];
__entry->rq_wait_pct = rq_wait_pct; __entry->rq_wait_pct = rq_wait_pct;
__entry->nr_lagging = nr_lagging; __entry->nr_lagging = nr_lagging;
__entry->nr_shortages = nr_shortages; __entry->nr_shortages = nr_shortages;
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
EM( WB_REASON_SYNC, "sync") \ EM( WB_REASON_SYNC, "sync") \
EM( WB_REASON_PERIODIC, "periodic") \ EM( WB_REASON_PERIODIC, "periodic") \
EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \ EM( WB_REASON_LAPTOP_TIMER, "laptop_timer") \
EM( WB_REASON_FREE_MORE_MEM, "free_more_memory") \
EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \ EM( WB_REASON_FS_FREE_SPACE, "fs_free_space") \
EMe(WB_REASON_FORKER_THREAD, "forker_thread") EMe(WB_REASON_FORKER_THREAD, "forker_thread")
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment