Commit af22941a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
 "Just a set of small fixes that have either been queued up after the
  original pull for this merge window, or just missed the original pull
  request.

   - a few bcache fixes/changes from Eric and Kent

   - add WRITE_SAME to the command filter whitelist frm Mauricio

   - kill an unused struct member from Ritesh

   - partition IO alignment fix from Stefan

   - nvme sysfs printf fix from Stephen"

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: check partition alignment
  nvme : Use correct scnprintf in cmb show
  block: allow WRITE_SAME commands with the SG_IO ioctl
  block: Remove unused member (busy) from struct blk_queue_tag
  bcache: partition support: add 16 minors per bcacheN device
  bcache: Make gc wakeup sane, remove set_task_state()
parents 9be962d5 633395b6
...@@ -45,6 +45,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user ...@@ -45,6 +45,9 @@ static int blkpg_ioctl(struct block_device *bdev, struct blkpg_ioctl_arg __user
|| pstart < 0 || plength < 0 || partno > 65535) || pstart < 0 || plength < 0 || partno > 65535)
return -EINVAL; return -EINVAL;
} }
/* check if partition is aligned to blocksize */
if (p.start & (bdev_logical_block_size(bdev) - 1))
return -EINVAL;
mutex_lock(&bdev->bd_mutex); mutex_lock(&bdev->bd_mutex);
......
...@@ -182,6 +182,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter) ...@@ -182,6 +182,9 @@ static void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter)
__set_bit(WRITE_16, filter->write_ok); __set_bit(WRITE_16, filter->write_ok);
__set_bit(WRITE_LONG, filter->write_ok); __set_bit(WRITE_LONG, filter->write_ok);
__set_bit(WRITE_LONG_2, filter->write_ok); __set_bit(WRITE_LONG_2, filter->write_ok);
__set_bit(WRITE_SAME, filter->write_ok);
__set_bit(WRITE_SAME_16, filter->write_ok);
__set_bit(WRITE_SAME_32, filter->write_ok);
__set_bit(ERASE, filter->write_ok); __set_bit(ERASE, filter->write_ok);
__set_bit(GPCMD_MODE_SELECT_10, filter->write_ok); __set_bit(GPCMD_MODE_SELECT_10, filter->write_ok);
__set_bit(MODE_SELECT, filter->write_ok); __set_bit(MODE_SELECT, filter->write_ok);
......
...@@ -425,7 +425,7 @@ struct cache { ...@@ -425,7 +425,7 @@ struct cache {
* until a gc finishes - otherwise we could pointlessly burn a ton of * until a gc finishes - otherwise we could pointlessly burn a ton of
* cpu * cpu
*/ */
unsigned invalidate_needs_gc:1; unsigned invalidate_needs_gc;
bool discard; /* Get rid of? */ bool discard; /* Get rid of? */
...@@ -593,8 +593,8 @@ struct cache_set { ...@@ -593,8 +593,8 @@ struct cache_set {
/* Counts how many sectors bio_insert has added to the cache */ /* Counts how many sectors bio_insert has added to the cache */
atomic_t sectors_to_gc; atomic_t sectors_to_gc;
wait_queue_head_t gc_wait;
wait_queue_head_t moving_gc_wait;
struct keybuf moving_gc_keys; struct keybuf moving_gc_keys;
/* Number of moving GC bios in flight */ /* Number of moving GC bios in flight */
struct semaphore moving_in_flight; struct semaphore moving_in_flight;
......
...@@ -1757,32 +1757,34 @@ static void bch_btree_gc(struct cache_set *c) ...@@ -1757,32 +1757,34 @@ static void bch_btree_gc(struct cache_set *c)
bch_moving_gc(c); bch_moving_gc(c);
} }
static int bch_gc_thread(void *arg) static bool gc_should_run(struct cache_set *c)
{ {
struct cache_set *c = arg;
struct cache *ca; struct cache *ca;
unsigned i; unsigned i;
while (1) { for_each_cache(ca, c, i)
again: if (ca->invalidate_needs_gc)
bch_btree_gc(c); return true;
set_current_state(TASK_INTERRUPTIBLE); if (atomic_read(&c->sectors_to_gc) < 0)
if (kthread_should_stop()) return true;
break;
mutex_lock(&c->bucket_lock); return false;
}
for_each_cache(ca, c, i) static int bch_gc_thread(void *arg)
if (ca->invalidate_needs_gc) { {
mutex_unlock(&c->bucket_lock); struct cache_set *c = arg;
set_current_state(TASK_RUNNING);
goto again;
}
mutex_unlock(&c->bucket_lock); while (1) {
wait_event_interruptible(c->gc_wait,
kthread_should_stop() || gc_should_run(c));
schedule(); if (kthread_should_stop())
break;
set_gc_sectors(c);
bch_btree_gc(c);
} }
return 0; return 0;
...@@ -1790,11 +1792,10 @@ static int bch_gc_thread(void *arg) ...@@ -1790,11 +1792,10 @@ static int bch_gc_thread(void *arg)
int bch_gc_thread_start(struct cache_set *c) int bch_gc_thread_start(struct cache_set *c)
{ {
c->gc_thread = kthread_create(bch_gc_thread, c, "bcache_gc"); c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
if (IS_ERR(c->gc_thread)) if (IS_ERR(c->gc_thread))
return PTR_ERR(c->gc_thread); return PTR_ERR(c->gc_thread);
set_task_state(c->gc_thread, TASK_INTERRUPTIBLE);
return 0; return 0;
} }
......
...@@ -260,8 +260,7 @@ void bch_initial_mark_key(struct cache_set *, int, struct bkey *); ...@@ -260,8 +260,7 @@ void bch_initial_mark_key(struct cache_set *, int, struct bkey *);
static inline void wake_up_gc(struct cache_set *c) static inline void wake_up_gc(struct cache_set *c)
{ {
if (c->gc_thread) wake_up(&c->gc_wait);
wake_up_process(c->gc_thread);
} }
#define MAP_DONE 0 #define MAP_DONE 0
......
...@@ -196,10 +196,8 @@ static void bch_data_insert_start(struct closure *cl) ...@@ -196,10 +196,8 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl); struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n; struct bio *bio = op->bio, *n;
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) { if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
set_gc_sectors(op->c);
wake_up_gc(op->c); wake_up_gc(op->c);
}
if (op->bypass) if (op->bypass)
return bch_data_invalidate(cl); return bch_data_invalidate(cl);
......
...@@ -58,6 +58,7 @@ static wait_queue_head_t unregister_wait; ...@@ -58,6 +58,7 @@ static wait_queue_head_t unregister_wait;
struct workqueue_struct *bcache_wq; struct workqueue_struct *bcache_wq;
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE) #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
#define BCACHE_MINORS 16 /* partition support */
/* Superblock */ /* Superblock */
...@@ -783,8 +784,10 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, ...@@ -783,8 +784,10 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
if (minor < 0) if (minor < 0)
return minor; return minor;
minor *= BCACHE_MINORS;
if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
!(d->disk = alloc_disk(1))) { !(d->disk = alloc_disk(BCACHE_MINORS))) {
ida_simple_remove(&bcache_minor, minor); ida_simple_remove(&bcache_minor, minor);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1489,6 +1492,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb) ...@@ -1489,6 +1492,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
mutex_init(&c->bucket_lock); mutex_init(&c->bucket_lock);
init_waitqueue_head(&c->btree_cache_wait); init_waitqueue_head(&c->btree_cache_wait);
init_waitqueue_head(&c->bucket_wait); init_waitqueue_head(&c->bucket_wait);
init_waitqueue_head(&c->gc_wait);
sema_init(&c->uuid_write_mutex, 1); sema_init(&c->uuid_write_mutex, 1);
spin_lock_init(&c->btree_gc_time.lock); spin_lock_init(&c->btree_gc_time.lock);
...@@ -1548,6 +1552,7 @@ static void run_cache_set(struct cache_set *c) ...@@ -1548,6 +1552,7 @@ static void run_cache_set(struct cache_set *c)
for_each_cache(ca, c, i) for_each_cache(ca, c, i)
c->nbuckets += ca->sb.nbuckets; c->nbuckets += ca->sb.nbuckets;
set_gc_sectors(c);
if (CACHE_SYNC(&c->sb)) { if (CACHE_SYNC(&c->sb)) {
LIST_HEAD(journal); LIST_HEAD(journal);
......
...@@ -50,7 +50,7 @@ ...@@ -50,7 +50,7 @@
#define NVME_AQ_DEPTH 256 #define NVME_AQ_DEPTH 256
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
/* /*
* We handle AEN commands ourselves and don't even let the * We handle AEN commands ourselves and don't even let the
* block layer know about them. * block layer know about them.
...@@ -1349,7 +1349,7 @@ static ssize_t nvme_cmb_show(struct device *dev, ...@@ -1349,7 +1349,7 @@ static ssize_t nvme_cmb_show(struct device *dev,
{ {
struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev)); struct nvme_dev *ndev = to_nvme_dev(dev_get_drvdata(dev));
return snprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n", return scnprintf(buf, PAGE_SIZE, "cmbloc : x%08x\ncmbsz : x%08x\n",
ndev->cmbloc, ndev->cmbsz); ndev->cmbloc, ndev->cmbsz);
} }
static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL); static DEVICE_ATTR(cmb, S_IRUGO, nvme_cmb_show, NULL);
......
...@@ -288,7 +288,6 @@ enum blk_queue_state { ...@@ -288,7 +288,6 @@ enum blk_queue_state {
struct blk_queue_tag { struct blk_queue_tag {
struct request **tag_index; /* map of busy tags */ struct request **tag_index; /* map of busy tags */
unsigned long *tag_map; /* bit map of free/busy tags */ unsigned long *tag_map; /* bit map of free/busy tags */
int busy; /* current depth */
int max_depth; /* what we will send to device */ int max_depth; /* what we will send to device */
int real_max_depth; /* what the array can hold */ int real_max_depth; /* what the array can hold */
atomic_t refcnt; /* map can be shared */ atomic_t refcnt; /* map can be shared */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment