Commit ee472d83 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: add a flags argument to (__)blkdev_issue_zeroout

Turn the existing discard flag into a new BLKDEV_ZERO_UNMAP flag with
similar semantics, but without referring to diѕcard.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
Reviewed-by: default avatarHannes Reinecke <hare@suse.com>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent c20cfc27
...@@ -282,14 +282,18 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev, ...@@ -282,14 +282,18 @@ static int __blkdev_issue_write_zeroes(struct block_device *bdev,
* @nr_sects: number of sectors to write * @nr_sects: number of sectors to write
* @gfp_mask: memory allocation flags (for bio_alloc) * @gfp_mask: memory allocation flags (for bio_alloc)
* @biop: pointer to anchor bio * @biop: pointer to anchor bio
* @discard: discard flag * @flags: controls detailed behavior
* *
* Description: * Description:
* Generate and issue number of bios with zerofiled pages. * Zero-fill a block range, either using hardware offload or by explicitly
* writing zeroes to the device.
*
* If a device is using logical block provisioning, the underlying space will
* not be released if %flags contains BLKDEV_ZERO_NOUNMAP.
*/ */
int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
bool discard) unsigned flags)
{ {
int ret; int ret;
int bi_size = 0; int bi_size = 0;
...@@ -337,28 +341,21 @@ EXPORT_SYMBOL(__blkdev_issue_zeroout); ...@@ -337,28 +341,21 @@ EXPORT_SYMBOL(__blkdev_issue_zeroout);
* @sector: start sector * @sector: start sector
* @nr_sects: number of sectors to write * @nr_sects: number of sectors to write
* @gfp_mask: memory allocation flags (for bio_alloc) * @gfp_mask: memory allocation flags (for bio_alloc)
* @discard: whether to discard the block range * @flags: controls detailed behavior
* *
* Description: * Description:
* Zero-fill a block range. If the discard flag is set and the block * Zero-fill a block range, either using hardware offload or by explicitly
* device guarantees that subsequent READ operations to the block range * writing zeroes to the device. See __blkdev_issue_zeroout() for the
* in question will return zeroes, the blocks will be discarded. Should * valid values for %flags.
* the discard request fail, if the discard flag is not set, or if
* discard_zeroes_data is not supported, this function will resort to
* zeroing the blocks manually, thus provisioning (allocating,
* anchoring) them. If the block device supports WRITE ZEROES or WRITE SAME
* command(s), blkdev_issue_zeroout() will use it to optimize the process of
* clearing the block range. Otherwise the zeroing will be performed
* using regular WRITE calls.
*/ */
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, bool discard) sector_t nr_sects, gfp_t gfp_mask, unsigned flags)
{ {
int ret; int ret;
struct bio *bio = NULL; struct bio *bio = NULL;
struct blk_plug plug; struct blk_plug plug;
if (discard) { if (!(flags & BLKDEV_ZERO_NOUNMAP)) {
if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask, if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
BLKDEV_DISCARD_ZERO)) BLKDEV_DISCARD_ZERO))
return 0; return 0;
...@@ -366,7 +363,7 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, ...@@ -366,7 +363,7 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
blk_start_plug(&plug); blk_start_plug(&plug);
ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask, ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
&bio, discard); &bio, flags);
if (ret == 0 && bio) { if (ret == 0 && bio) {
ret = submit_bio_wait(bio); ret = submit_bio_wait(bio);
bio_put(bio); bio_put(bio);
......
...@@ -255,7 +255,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode, ...@@ -255,7 +255,7 @@ static int blk_ioctl_zeroout(struct block_device *bdev, fmode_t mode,
truncate_inode_pages_range(mapping, start, end); truncate_inode_pages_range(mapping, start, end);
return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL, return blkdev_issue_zeroout(bdev, start >> 9, len >> 9, GFP_KERNEL,
false); BLKDEV_ZERO_NOUNMAP);
} }
static int put_ushort(unsigned long arg, unsigned short val) static int put_ushort(unsigned long arg, unsigned short val)
......
...@@ -1499,19 +1499,22 @@ int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, u ...@@ -1499,19 +1499,22 @@ int drbd_issue_discard_or_zero_out(struct drbd_device *device, sector_t start, u
tmp = start + granularity - sector_div(tmp, granularity); tmp = start + granularity - sector_div(tmp, granularity);
nr = tmp - start; nr = tmp - start;
err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO, 0); err |= blkdev_issue_zeroout(bdev, start, nr, GFP_NOIO,
BLKDEV_ZERO_NOUNMAP);
nr_sectors -= nr; nr_sectors -= nr;
start = tmp; start = tmp;
} }
while (nr_sectors >= granularity) { while (nr_sectors >= granularity) {
nr = min_t(sector_t, nr_sectors, max_discard_sectors); nr = min_t(sector_t, nr_sectors, max_discard_sectors);
err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO, 0); err |= blkdev_issue_discard(bdev, start, nr, GFP_NOIO,
BLKDEV_ZERO_NOUNMAP);
nr_sectors -= nr; nr_sectors -= nr;
start += nr; start += nr;
} }
zero_out: zero_out:
if (nr_sectors) { if (nr_sectors) {
err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO, 0); err |= blkdev_issue_zeroout(bdev, start, nr_sectors, GFP_NOIO,
BLKDEV_ZERO_NOUNMAP);
} }
return err != 0; return err != 0;
} }
......
...@@ -184,7 +184,7 @@ static void nvmet_execute_write_zeroes(struct nvmet_req *req) ...@@ -184,7 +184,7 @@ static void nvmet_execute_write_zeroes(struct nvmet_req *req)
(req->ns->blksize_shift - 9)) + 1; (req->ns->blksize_shift - 9)) + 1;
if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector, if (__blkdev_issue_zeroout(req->ns->bdev, sector, nr_sector,
GFP_KERNEL, &bio, true)) GFP_KERNEL, &bio, 0))
status = NVME_SC_INTERNAL | NVME_SC_DNR; status = NVME_SC_INTERNAL | NVME_SC_DNR;
if (bio) { if (bio) {
......
...@@ -2105,7 +2105,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start, ...@@ -2105,7 +2105,7 @@ static long blkdev_fallocate(struct file *file, int mode, loff_t start,
case FALLOC_FL_ZERO_RANGE: case FALLOC_FL_ZERO_RANGE:
case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE: case FALLOC_FL_ZERO_RANGE | FALLOC_FL_KEEP_SIZE:
error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9, error = blkdev_issue_zeroout(bdev, start >> 9, len >> 9,
GFP_KERNEL, false); GFP_KERNEL, BLKDEV_ZERO_NOUNMAP);
break; break;
case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE: case FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE:
/* Only punch if the device can do zeroing discard. */ /* Only punch if the device can do zeroing discard. */
......
...@@ -982,7 +982,7 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector, ...@@ -982,7 +982,7 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
sector_t start_sector = dax.sector + (offset >> 9); sector_t start_sector = dax.sector + (offset >> 9);
return blkdev_issue_zeroout(bdev, start_sector, return blkdev_issue_zeroout(bdev, start_sector,
length >> 9, GFP_NOFS, true); length >> 9, GFP_NOFS, 0);
} else { } else {
if (dax_map_atomic(bdev, &dax) < 0) if (dax_map_atomic(bdev, &dax) < 0)
return PTR_ERR(dax.addr); return PTR_ERR(dax.addr);
......
...@@ -81,7 +81,7 @@ xfs_zero_extent( ...@@ -81,7 +81,7 @@ xfs_zero_extent(
return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)), return blkdev_issue_zeroout(xfs_find_bdev_for_inode(VFS_I(ip)),
block << (mp->m_super->s_blocksize_bits - 9), block << (mp->m_super->s_blocksize_bits - 9),
count_fsb << (mp->m_super->s_blocksize_bits - 9), count_fsb << (mp->m_super->s_blocksize_bits - 9),
GFP_NOFS, true); GFP_NOFS, 0);
} }
int int
......
...@@ -1336,23 +1336,27 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, ...@@ -1336,23 +1336,27 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
return bqt->tag_index[tag]; return bqt->tag_index[tag];
} }
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page);
#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */ #define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */ #define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, int flags, sector_t nr_sects, gfp_t gfp_mask, int flags,
struct bio **biop); struct bio **biop);
extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct page *page); #define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop, sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
bool discard); unsigned flags);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, bool discard); sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
static inline int sb_issue_discard(struct super_block *sb, sector_t block, static inline int sb_issue_discard(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
{ {
...@@ -1366,7 +1370,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block, ...@@ -1366,7 +1370,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
return blkdev_issue_zeroout(sb->s_bdev, return blkdev_issue_zeroout(sb->s_bdev,
block << (sb->s_blocksize_bits - 9), block << (sb->s_blocksize_bits - 9),
nr_blocks << (sb->s_blocksize_bits - 9), nr_blocks << (sb->s_blocksize_bits - 9),
gfp_mask, true); gfp_mask, 0);
} }
extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment