Commit 64b4fc45 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.14-2021-08-27' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:

 - Revert the mq-deadline priority handling, it's causing serious
   performance regressions. While experimental patches exists to fix
   this up, it's too late to do so now. Revert it and re-do it properly
   for 5.15 instead.

 - Fix a NULL vs IS_ERR() regression in this release (Dan)

 - Fix a mq-deadline accounting regression in this release (Bart)

 - Mark cryptoloop as deprecated. It's broken and dm-crypt fully
   supports it, and it's actively intefering with loop. Plan on removal
   for 5.16 (Christoph)

* tag 'block-5.14-2021-08-27' of git://git.kernel.dk/linux-block:
  cryptoloop: add a deprecation warning
  pd: fix a NULL vs IS_ERR() check
  Revert "block/mq-deadline: Prioritize high-priority requests"
  mq-deadline: Fix request accounting
parents 6f18b82b 222013f9
...@@ -31,11 +31,6 @@ ...@@ -31,11 +31,6 @@
*/ */
static const int read_expire = HZ / 2; /* max time before a read is submitted. */ static const int read_expire = HZ / 2; /* max time before a read is submitted. */
static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
/*
* Time after which to dispatch lower priority requests even if higher
* priority requests are pending.
*/
static const int aging_expire = 10 * HZ;
static const int writes_starved = 2; /* max times reads can starve a write */ static const int writes_starved = 2; /* max times reads can starve a write */
static const int fifo_batch = 16; /* # of sequential requests treated as one static const int fifo_batch = 16; /* # of sequential requests treated as one
by the above parameters. For throughput. */ by the above parameters. For throughput. */
...@@ -103,7 +98,6 @@ struct deadline_data { ...@@ -103,7 +98,6 @@ struct deadline_data {
int writes_starved; int writes_starved;
int front_merges; int front_merges;
u32 async_depth; u32 async_depth;
int aging_expire;
spinlock_t lock; spinlock_t lock;
spinlock_t zone_lock; spinlock_t zone_lock;
...@@ -369,11 +363,10 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio, ...@@ -369,11 +363,10 @@ deadline_next_request(struct deadline_data *dd, struct dd_per_prio *per_prio,
/* /*
* deadline_dispatch_requests selects the best request according to * deadline_dispatch_requests selects the best request according to
* read/write expire, fifo_batch, etc and with a start time <= @latest. * read/write expire, fifo_batch, etc
*/ */
static struct request *__dd_dispatch_request(struct deadline_data *dd, static struct request *__dd_dispatch_request(struct deadline_data *dd,
struct dd_per_prio *per_prio, struct dd_per_prio *per_prio)
u64 latest_start_ns)
{ {
struct request *rq, *next_rq; struct request *rq, *next_rq;
enum dd_data_dir data_dir; enum dd_data_dir data_dir;
...@@ -385,8 +378,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd, ...@@ -385,8 +378,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
if (!list_empty(&per_prio->dispatch)) { if (!list_empty(&per_prio->dispatch)) {
rq = list_first_entry(&per_prio->dispatch, struct request, rq = list_first_entry(&per_prio->dispatch, struct request,
queuelist); queuelist);
if (rq->start_time_ns > latest_start_ns)
return NULL;
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
goto done; goto done;
} }
...@@ -464,8 +455,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd, ...@@ -464,8 +455,6 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
dd->batching = 0; dd->batching = 0;
dispatch_request: dispatch_request:
if (rq->start_time_ns > latest_start_ns)
return NULL;
/* /*
* rq is the selected appropriate request. * rq is the selected appropriate request.
*/ */
...@@ -494,32 +483,15 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd, ...@@ -494,32 +483,15 @@ static struct request *__dd_dispatch_request(struct deadline_data *dd,
static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx) static struct request *dd_dispatch_request(struct blk_mq_hw_ctx *hctx)
{ {
struct deadline_data *dd = hctx->queue->elevator->elevator_data; struct deadline_data *dd = hctx->queue->elevator->elevator_data;
const u64 now_ns = ktime_get_ns(); struct request *rq;
struct request *rq = NULL;
enum dd_prio prio; enum dd_prio prio;
spin_lock(&dd->lock); spin_lock(&dd->lock);
/*
* Start with dispatching requests whose deadline expired more than
* aging_expire jiffies ago.
*/
for (prio = DD_BE_PRIO; prio <= DD_PRIO_MAX; prio++) {
rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns -
jiffies_to_nsecs(dd->aging_expire));
if (rq)
goto unlock;
}
/*
* Next, dispatch requests in priority order. Ignore lower priority
* requests if any higher priority requests are pending.
*/
for (prio = 0; prio <= DD_PRIO_MAX; prio++) { for (prio = 0; prio <= DD_PRIO_MAX; prio++) {
rq = __dd_dispatch_request(dd, &dd->per_prio[prio], now_ns); rq = __dd_dispatch_request(dd, &dd->per_prio[prio]);
if (rq || dd_queued(dd, prio)) if (rq)
break; break;
} }
unlock:
spin_unlock(&dd->lock); spin_unlock(&dd->lock);
return rq; return rq;
...@@ -620,7 +592,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e) ...@@ -620,7 +592,6 @@ static int dd_init_sched(struct request_queue *q, struct elevator_type *e)
dd->front_merges = 1; dd->front_merges = 1;
dd->last_dir = DD_WRITE; dd->last_dir = DD_WRITE;
dd->fifo_batch = fifo_batch; dd->fifo_batch = fifo_batch;
dd->aging_expire = aging_expire;
spin_lock_init(&dd->lock); spin_lock_init(&dd->lock);
spin_lock_init(&dd->zone_lock); spin_lock_init(&dd->zone_lock);
...@@ -711,6 +682,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, ...@@ -711,6 +682,7 @@ static void dd_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
prio = ioprio_class_to_prio[ioprio_class]; prio = ioprio_class_to_prio[ioprio_class];
dd_count(dd, inserted, prio); dd_count(dd, inserted, prio);
rq->elv.priv[0] = (void *)(uintptr_t)1;
if (blk_mq_sched_try_insert_merge(q, rq, &free)) { if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
blk_mq_free_requests(&free); blk_mq_free_requests(&free);
...@@ -759,12 +731,10 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx, ...@@ -759,12 +731,10 @@ static void dd_insert_requests(struct blk_mq_hw_ctx *hctx,
spin_unlock(&dd->lock); spin_unlock(&dd->lock);
} }
/* /* Callback from inside blk_mq_rq_ctx_init(). */
* Nothing to do here. This is defined only to ensure that .finish_request
* method is called upon request completion.
*/
static void dd_prepare_request(struct request *rq) static void dd_prepare_request(struct request *rq)
{ {
rq->elv.priv[0] = NULL;
} }
/* /*
...@@ -791,7 +761,14 @@ static void dd_finish_request(struct request *rq) ...@@ -791,7 +761,14 @@ static void dd_finish_request(struct request *rq)
const enum dd_prio prio = ioprio_class_to_prio[ioprio_class]; const enum dd_prio prio = ioprio_class_to_prio[ioprio_class];
struct dd_per_prio *per_prio = &dd->per_prio[prio]; struct dd_per_prio *per_prio = &dd->per_prio[prio];
dd_count(dd, completed, prio); /*
* The block layer core may call dd_finish_request() without having
* called dd_insert_requests(). Hence only update statistics for
* requests for which dd_insert_requests() has been called. See also
* blk_mq_request_bypass_insert().
*/
if (rq->elv.priv[0])
dd_count(dd, completed, prio);
if (blk_queue_is_zoned(q)) { if (blk_queue_is_zoned(q)) {
unsigned long flags; unsigned long flags;
...@@ -836,7 +813,6 @@ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ ...@@ -836,7 +813,6 @@ static ssize_t __FUNC(struct elevator_queue *e, char *page) \
#define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR)) #define SHOW_JIFFIES(__FUNC, __VAR) SHOW_INT(__FUNC, jiffies_to_msecs(__VAR))
SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]); SHOW_JIFFIES(deadline_read_expire_show, dd->fifo_expire[DD_READ]);
SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]); SHOW_JIFFIES(deadline_write_expire_show, dd->fifo_expire[DD_WRITE]);
SHOW_JIFFIES(deadline_aging_expire_show, dd->aging_expire);
SHOW_INT(deadline_writes_starved_show, dd->writes_starved); SHOW_INT(deadline_writes_starved_show, dd->writes_starved);
SHOW_INT(deadline_front_merges_show, dd->front_merges); SHOW_INT(deadline_front_merges_show, dd->front_merges);
SHOW_INT(deadline_async_depth_show, dd->front_merges); SHOW_INT(deadline_async_depth_show, dd->front_merges);
...@@ -866,7 +842,6 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) ...@@ -866,7 +842,6 @@ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)
STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies) STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, msecs_to_jiffies)
STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX); STORE_JIFFIES(deadline_read_expire_store, &dd->fifo_expire[DD_READ], 0, INT_MAX);
STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX); STORE_JIFFIES(deadline_write_expire_store, &dd->fifo_expire[DD_WRITE], 0, INT_MAX);
STORE_JIFFIES(deadline_aging_expire_store, &dd->aging_expire, 0, INT_MAX);
STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX); STORE_INT(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX);
STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1); STORE_INT(deadline_front_merges_store, &dd->front_merges, 0, 1);
STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX); STORE_INT(deadline_async_depth_store, &dd->front_merges, 1, INT_MAX);
...@@ -885,7 +860,6 @@ static struct elv_fs_entry deadline_attrs[] = { ...@@ -885,7 +860,6 @@ static struct elv_fs_entry deadline_attrs[] = {
DD_ATTR(front_merges), DD_ATTR(front_merges),
DD_ATTR(async_depth), DD_ATTR(async_depth),
DD_ATTR(fifo_batch), DD_ATTR(fifo_batch),
DD_ATTR(aging_expire),
__ATTR_NULL __ATTR_NULL
}; };
......
...@@ -213,7 +213,7 @@ config BLK_DEV_LOOP_MIN_COUNT ...@@ -213,7 +213,7 @@ config BLK_DEV_LOOP_MIN_COUNT
dynamically allocated with the /dev/loop-control interface. dynamically allocated with the /dev/loop-control interface.
config BLK_DEV_CRYPTOLOOP config BLK_DEV_CRYPTOLOOP
tristate "Cryptoloop Support" tristate "Cryptoloop Support (DEPRECATED)"
select CRYPTO select CRYPTO
select CRYPTO_CBC select CRYPTO_CBC
depends on BLK_DEV_LOOP depends on BLK_DEV_LOOP
...@@ -225,7 +225,7 @@ config BLK_DEV_CRYPTOLOOP ...@@ -225,7 +225,7 @@ config BLK_DEV_CRYPTOLOOP
WARNING: This device is not safe for journaled file systems like WARNING: This device is not safe for journaled file systems like
ext3 or Reiserfs. Please use the Device Mapper crypto module ext3 or Reiserfs. Please use the Device Mapper crypto module
instead, which can be configured to be on-disk compatible with the instead, which can be configured to be on-disk compatible with the
cryptoloop device. cryptoloop device. cryptoloop support will be removed in Linux 5.16.
source "drivers/block/drbd/Kconfig" source "drivers/block/drbd/Kconfig"
......
...@@ -189,6 +189,8 @@ init_cryptoloop(void) ...@@ -189,6 +189,8 @@ init_cryptoloop(void)
if (rc) if (rc)
printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n"); printk(KERN_ERR "cryptoloop: loop_register_transfer failed\n");
else
pr_warn("the cryptoloop driver has been deprecated and will be removed in in Linux 5.16\n");
return rc; return rc;
} }
......
...@@ -892,7 +892,7 @@ static void pd_probe_drive(struct pd_unit *disk) ...@@ -892,7 +892,7 @@ static void pd_probe_drive(struct pd_unit *disk)
return; return;
p = blk_mq_alloc_disk(&disk->tag_set, disk); p = blk_mq_alloc_disk(&disk->tag_set, disk);
if (!p) { if (IS_ERR(p)) {
blk_mq_free_tag_set(&disk->tag_set); blk_mq_free_tag_set(&disk->tag_set);
return; return;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment