Commit b7c33571 authored by Nick Piggin's avatar Nick Piggin Committed by Jens Axboe

brd: support discard

Support discard requests in brd by zeroing or deleting the underlying backing
pages. This is simply to help with testing and documentation nature of
brd code.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Signed-off-by: default avatarJens Axboe <jens.axboe@oracle.com>
parent 0e3c9a22
...@@ -133,6 +133,28 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) ...@@ -133,6 +133,28 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector)
return page; return page;
} }
static void brd_free_page(struct brd_device *brd, sector_t sector)
{
struct page *page;
pgoff_t idx;
spin_lock(&brd->brd_lock);
idx = sector >> PAGE_SECTORS_SHIFT;
page = radix_tree_delete(&brd->brd_pages, idx);
spin_unlock(&brd->brd_lock);
if (page)
__free_page(page);
}
static void brd_zero_page(struct brd_device *brd, sector_t sector)
{
struct page *page;
page = brd_lookup_page(brd, sector);
if (page)
clear_highpage(page);
}
/* /*
* Free all backing store pages and radix tree. This must only be called when * Free all backing store pages and radix tree. This must only be called when
* there are no other users of the device. * there are no other users of the device.
...@@ -189,6 +211,24 @@ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) ...@@ -189,6 +211,24 @@ static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n)
return 0; return 0;
} }
static void discard_from_brd(struct brd_device *brd,
sector_t sector, size_t n)
{
while (n >= PAGE_SIZE) {
/*
* Don't want to actually discard pages here because
* re-allocating the pages can result in writeback
* deadlocks under heavy load.
*/
if (0)
brd_free_page(brd, sector);
else
brd_zero_page(brd, sector);
sector += PAGE_SIZE >> SECTOR_SHIFT;
n -= PAGE_SIZE;
}
}
/* /*
* Copy n bytes from src to the brd starting at sector. Does not sleep. * Copy n bytes from src to the brd starting at sector. Does not sleep.
*/ */
...@@ -300,6 +340,12 @@ static int brd_make_request(struct request_queue *q, struct bio *bio) ...@@ -300,6 +340,12 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
get_capacity(bdev->bd_disk)) get_capacity(bdev->bd_disk))
goto out; goto out;
if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) {
err = 0;
discard_from_brd(brd, sector, bio->bi_size);
goto out;
}
rw = bio_rw(bio); rw = bio_rw(bio);
if (rw == READA) if (rw == READA)
rw = READ; rw = READ;
...@@ -320,7 +366,7 @@ static int brd_make_request(struct request_queue *q, struct bio *bio) ...@@ -320,7 +366,7 @@ static int brd_make_request(struct request_queue *q, struct bio *bio)
} }
#ifdef CONFIG_BLK_DEV_XIP #ifdef CONFIG_BLK_DEV_XIP
static int brd_direct_access (struct block_device *bdev, sector_t sector, static int brd_direct_access(struct block_device *bdev, sector_t sector,
void **kaddr, unsigned long *pfn) void **kaddr, unsigned long *pfn)
{ {
struct brd_device *brd = bdev->bd_disk->private_data; struct brd_device *brd = bdev->bd_disk->private_data;
...@@ -437,6 +483,11 @@ static struct brd_device *brd_alloc(int i) ...@@ -437,6 +483,11 @@ static struct brd_device *brd_alloc(int i)
blk_queue_max_hw_sectors(brd->brd_queue, 1024); blk_queue_max_hw_sectors(brd->brd_queue, 1024);
blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY);
brd->brd_queue->limits.discard_granularity = PAGE_SIZE;
brd->brd_queue->limits.max_discard_sectors = UINT_MAX;
brd->brd_queue->limits.discard_zeroes_data = 1;
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, brd->brd_queue);
disk = brd->brd_disk = alloc_disk(1 << part_shift); disk = brd->brd_disk = alloc_disk(1 << part_shift);
if (!disk) if (!disk)
goto out_free_queue; goto out_free_queue;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment