Commit c8864cb7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20190202' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A few fixes that should go into this release. This contains:

   - MD pull request from Song, fixing a recovery OOM issue (Alexei)

   - Fix for a sync related stall (Jianchao)

   - Dummy callback for timeouts (Tetsuo)

   - IDE atapi sense ordering fix (me)"

* tag 'for-linus-20190202' of git://git.kernel.dk/linux-block:
  ide: ensure atapi sense request aren't preempted
  blk-mq: fix a hung issue when fsync
  block: pass no-op callback to INIT_WORK().
  md/raid5: fix 'out of memory' during raid cache recovery
parents 3cde55ee 9a6d5488
...@@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t) ...@@ -462,6 +462,10 @@ static void blk_rq_timed_out_timer(struct timer_list *t)
kblockd_schedule_work(&q->timeout_work); kblockd_schedule_work(&q->timeout_work);
} }
static void blk_timeout_work(struct work_struct *work)
{
}
/** /**
* blk_alloc_queue_node - allocate a request queue * blk_alloc_queue_node - allocate a request queue
* @gfp_mask: memory allocation flags * @gfp_mask: memory allocation flags
...@@ -505,7 +509,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) ...@@ -505,7 +509,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
timer_setup(&q->backing_dev_info->laptop_mode_wb_timer, timer_setup(&q->backing_dev_info->laptop_mode_wb_timer,
laptop_mode_timer_fn, 0); laptop_mode_timer_fn, 0);
timer_setup(&q->timeout, blk_rq_timed_out_timer, 0); timer_setup(&q->timeout, blk_rq_timed_out_timer, 0);
INIT_WORK(&q->timeout_work, NULL); INIT_WORK(&q->timeout_work, blk_timeout_work);
INIT_LIST_HEAD(&q->icq_list); INIT_LIST_HEAD(&q->icq_list);
#ifdef CONFIG_BLK_CGROUP #ifdef CONFIG_BLK_CGROUP
INIT_LIST_HEAD(&q->blkg_list); INIT_LIST_HEAD(&q->blkg_list);
......
...@@ -335,7 +335,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error) ...@@ -335,7 +335,7 @@ static void mq_flush_data_end_io(struct request *rq, blk_status_t error)
blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error); blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
spin_unlock_irqrestore(&fq->mq_flush_lock, flags); spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
blk_mq_run_hw_queue(hctx, true); blk_mq_sched_restart(hctx);
} }
/** /**
......
...@@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense); ...@@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense);
int ide_queue_sense_rq(ide_drive_t *drive, void *special) int ide_queue_sense_rq(ide_drive_t *drive, void *special)
{ {
struct request *sense_rq = drive->sense_rq; ide_hwif_t *hwif = drive->hwif;
struct request *sense_rq;
unsigned long flags;
spin_lock_irqsave(&hwif->lock, flags);
/* deferred failure from ide_prep_sense() */ /* deferred failure from ide_prep_sense() */
if (!drive->sense_rq_armed) { if (!drive->sense_rq_armed) {
printk(KERN_WARNING PFX "%s: error queuing a sense request\n", printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
drive->name); drive->name);
spin_unlock_irqrestore(&hwif->lock, flags);
return -ENOMEM; return -ENOMEM;
} }
sense_rq = drive->sense_rq;
ide_req(sense_rq)->special = special; ide_req(sense_rq)->special = special;
drive->sense_rq_armed = false; drive->sense_rq_armed = false;
drive->hwif->rq = NULL; drive->hwif->rq = NULL;
ide_insert_request_head(drive, sense_rq); ide_insert_request_head(drive, sense_rq);
spin_unlock_irqrestore(&hwif->lock, flags);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(ide_queue_sense_rq); EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
......
...@@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error, ...@@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
} }
if (!blk_update_request(rq, error, nr_bytes)) { if (!blk_update_request(rq, error, nr_bytes)) {
if (rq == drive->sense_rq) if (rq == drive->sense_rq) {
drive->sense_rq = NULL; drive->sense_rq = NULL;
drive->sense_rq_active = false;
}
__blk_mq_end_request(rq, error); __blk_mq_end_request(rq, error);
return 0; return 0;
...@@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) ...@@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
} }
/* blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
* Issue a new request to a device. bool local_requeue)
*/
blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{ {
ide_drive_t *drive = hctx->queue->queuedata; ide_hwif_t *hwif = drive->hwif;
ide_hwif_t *hwif = drive->hwif;
struct ide_host *host = hwif->host; struct ide_host *host = hwif->host;
struct request *rq = bd->rq;
ide_startstop_t startstop; ide_startstop_t startstop;
if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
...@@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ide_lock_host(host, hwif)) if (ide_lock_host(host, hwif))
return BLK_STS_DEV_RESOURCE; return BLK_STS_DEV_RESOURCE;
blk_mq_start_request(rq);
spin_lock_irq(&hwif->lock); spin_lock_irq(&hwif->lock);
if (!ide_lock_port(hwif)) { if (!ide_lock_port(hwif)) {
...@@ -510,18 +505,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -510,18 +505,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
hwif->cur_dev = drive; hwif->cur_dev = drive;
drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
/*
* we know that the queue isn't empty, but this can happen
* if ->prep_rq() decides to kill a request
*/
if (!rq) {
rq = bd->rq;
if (!rq) {
ide_unlock_port(hwif);
goto out;
}
}
/* /*
* Sanity: don't accept a request that isn't a PM request * Sanity: don't accept a request that isn't a PM request
* if we are currently power managed. This is very important as * if we are currently power managed. This is very important as
...@@ -560,9 +543,12 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -560,9 +543,12 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
} }
} else { } else {
plug_device: plug_device:
if (local_requeue)
list_add(&rq->queuelist, &drive->rq_list);
spin_unlock_irq(&hwif->lock); spin_unlock_irq(&hwif->lock);
ide_unlock_host(host); ide_unlock_host(host);
ide_requeue_and_plug(drive, rq); if (!local_requeue)
ide_requeue_and_plug(drive, rq);
return BLK_STS_OK; return BLK_STS_OK;
} }
...@@ -573,6 +559,26 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -573,6 +559,26 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK; return BLK_STS_OK;
} }
/*
* Issue a new request to a device.
*/
blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
ide_drive_t *drive = hctx->queue->queuedata;
ide_hwif_t *hwif = drive->hwif;
spin_lock_irq(&hwif->lock);
if (drive->sense_rq_active) {
spin_unlock_irq(&hwif->lock);
return BLK_STS_DEV_RESOURCE;
}
spin_unlock_irq(&hwif->lock);
blk_mq_start_request(bd->rq);
return ide_issue_rq(drive, bd->rq, false);
}
static int drive_is_ready(ide_drive_t *drive) static int drive_is_ready(ide_drive_t *drive)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
...@@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer); ...@@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer);
void ide_insert_request_head(ide_drive_t *drive, struct request *rq) void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
{ {
ide_hwif_t *hwif = drive->hwif; drive->sense_rq_active = true;
unsigned long flags;
spin_lock_irqsave(&hwif->lock, flags);
list_add_tail(&rq->queuelist, &drive->rq_list); list_add_tail(&rq->queuelist, &drive->rq_list);
spin_unlock_irqrestore(&hwif->lock, flags);
kblockd_schedule_work(&drive->rq_work); kblockd_schedule_work(&drive->rq_work);
} }
EXPORT_SYMBOL_GPL(ide_insert_request_head); EXPORT_SYMBOL_GPL(ide_insert_request_head);
...@@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) ...@@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS; scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
scsi_req(rq)->cmd_len = 1; scsi_req(rq)->cmd_len = 1;
ide_req(rq)->type = ATA_PRIV_MISC; ide_req(rq)->type = ATA_PRIV_MISC;
spin_lock_irq(&hwif->lock);
ide_insert_request_head(drive, rq); ide_insert_request_head(drive, rq);
spin_unlock_irq(&hwif->lock);
out: out:
return; return;
......
...@@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work) ...@@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work)
ide_drive_t *drive = container_of(work, ide_drive_t, rq_work); ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
struct request *rq; struct request *rq;
blk_status_t ret;
LIST_HEAD(list); LIST_HEAD(list);
spin_lock_irq(&hwif->lock); blk_mq_quiesce_queue(drive->queue);
if (!list_empty(&drive->rq_list))
list_splice_init(&drive->rq_list, &list);
spin_unlock_irq(&hwif->lock);
while (!list_empty(&list)) { ret = BLK_STS_OK;
rq = list_first_entry(&list, struct request, queuelist); spin_lock_irq(&hwif->lock);
while (!list_empty(&drive->rq_list)) {
rq = list_first_entry(&drive->rq_list, struct request, queuelist);
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL);
spin_unlock_irq(&hwif->lock);
ret = ide_issue_rq(drive, rq, true);
spin_lock_irq(&hwif->lock);
} }
spin_unlock_irq(&hwif->lock);
blk_mq_unquiesce_queue(drive->queue);
if (ret != BLK_STS_OK)
kblockd_schedule_work(&drive->rq_work);
} }
static const u8 ide_hwif_to_major[] = static const u8 ide_hwif_to_major[] =
......
...@@ -1935,12 +1935,14 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf, ...@@ -1935,12 +1935,14 @@ r5l_recovery_replay_one_stripe(struct r5conf *conf,
} }
static struct stripe_head * static struct stripe_head *
r5c_recovery_alloc_stripe(struct r5conf *conf, r5c_recovery_alloc_stripe(
sector_t stripe_sect) struct r5conf *conf,
sector_t stripe_sect,
int noblock)
{ {
struct stripe_head *sh; struct stripe_head *sh;
sh = raid5_get_active_stripe(conf, stripe_sect, 0, 1, 0); sh = raid5_get_active_stripe(conf, stripe_sect, 0, noblock, 0);
if (!sh) if (!sh)
return NULL; /* no more stripe available */ return NULL; /* no more stripe available */
...@@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, ...@@ -2150,7 +2152,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
stripe_sect); stripe_sect);
if (!sh) { if (!sh) {
sh = r5c_recovery_alloc_stripe(conf, stripe_sect); sh = r5c_recovery_alloc_stripe(conf, stripe_sect, 1);
/* /*
* cannot get stripe from raid5_get_active_stripe * cannot get stripe from raid5_get_active_stripe
* try replay some stripes * try replay some stripes
...@@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log, ...@@ -2159,20 +2161,29 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
r5c_recovery_replay_stripes( r5c_recovery_replay_stripes(
cached_stripe_list, ctx); cached_stripe_list, ctx);
sh = r5c_recovery_alloc_stripe( sh = r5c_recovery_alloc_stripe(
conf, stripe_sect); conf, stripe_sect, 1);
} }
if (!sh) { if (!sh) {
int new_size = conf->min_nr_stripes * 2;
pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n", pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
mdname(mddev), mdname(mddev),
conf->min_nr_stripes * 2); new_size);
raid5_set_cache_size(mddev, ret = raid5_set_cache_size(mddev, new_size);
conf->min_nr_stripes * 2); if (conf->min_nr_stripes <= new_size / 2) {
sh = r5c_recovery_alloc_stripe(conf, pr_err("md/raid:%s: Cannot increase cache size, ret=%d, new_size=%d, min_nr_stripes=%d, max_nr_stripes=%d\n",
stripe_sect); mdname(mddev),
ret,
new_size,
conf->min_nr_stripes,
conf->max_nr_stripes);
return -ENOMEM;
}
sh = r5c_recovery_alloc_stripe(
conf, stripe_sect, 0);
} }
if (!sh) { if (!sh) {
pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n", pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
mdname(mddev)); mdname(mddev));
return -ENOMEM; return -ENOMEM;
} }
list_add_tail(&sh->lru, cached_stripe_list); list_add_tail(&sh->lru, cached_stripe_list);
......
...@@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page) ...@@ -6369,6 +6369,7 @@ raid5_show_stripe_cache_size(struct mddev *mddev, char *page)
int int
raid5_set_cache_size(struct mddev *mddev, int size) raid5_set_cache_size(struct mddev *mddev, int size)
{ {
int result = 0;
struct r5conf *conf = mddev->private; struct r5conf *conf = mddev->private;
if (size <= 16 || size > 32768) if (size <= 16 || size > 32768)
...@@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size) ...@@ -6385,11 +6386,14 @@ raid5_set_cache_size(struct mddev *mddev, int size)
mutex_lock(&conf->cache_size_mutex); mutex_lock(&conf->cache_size_mutex);
while (size > conf->max_nr_stripes) while (size > conf->max_nr_stripes)
if (!grow_one_stripe(conf, GFP_KERNEL)) if (!grow_one_stripe(conf, GFP_KERNEL)) {
conf->min_nr_stripes = conf->max_nr_stripes;
result = -ENOMEM;
break; break;
}
mutex_unlock(&conf->cache_size_mutex); mutex_unlock(&conf->cache_size_mutex);
return 0; return result;
} }
EXPORT_SYMBOL(raid5_set_cache_size); EXPORT_SYMBOL(raid5_set_cache_size);
......
...@@ -615,6 +615,7 @@ struct ide_drive_s { ...@@ -615,6 +615,7 @@ struct ide_drive_s {
/* current sense rq and buffer */ /* current sense rq and buffer */
bool sense_rq_armed; bool sense_rq_armed;
bool sense_rq_active;
struct request *sense_rq; struct request *sense_rq;
struct request_sense sense_data; struct request_sense sense_data;
...@@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); ...@@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
extern void ide_timer_expiry(struct timer_list *t); extern void ide_timer_expiry(struct timer_list *t);
extern irqreturn_t ide_intr(int irq, void *dev_id); extern irqreturn_t ide_intr(int irq, void *dev_id);
extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
void ide_init_disk(struct gendisk *, ide_drive_t *); void ide_init_disk(struct gendisk *, ide_drive_t *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment