Commit dc5db218 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-20181109' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:

 - Two fixes for an ubd regression, one for missing locking, and one for
   a missing initialization of a field. The latter was an old latent
   bug, but it's now visible and triggers (Me, Anton Ivanov)

 - Set of NVMe fixes via Christoph, but applied manually due to a git
   tree mixup (Christoph, Sagi)

 - Fix for a discard split regression, in three patches (Ming)

 - Update libata git trees (Geert)

 - SPDX identifier for sata_rcar (Kuninori Morimoto)

 - Virtual boundary merge fix (Johannes)

 - Preemptively clear memory we are going to pass to userspace, in case
   the driver does a short read (Keith)

* tag 'for-linus-20181109' of git://git.kernel.dk/linux-block:
  block: make sure writesame bio is aligned with logical block size
  block: cleanup __blkdev_issue_discard()
  block: make sure discard bio is aligned with logical block size
  Revert "nvmet-rdma: use a private workqueue for delete"
  nvme: make sure ns head inherits underlying device limits
  nvmet: don't try to add ns to p2p map unless it actually uses it
  sata_rcar: convert to SPDX identifiers
  ubd: fix missing initialization of io_req
  block: Clear kernel memory before copying to user
  MAINTAINERS: Fix remaining pointers to obsolete libata.git
  ubd: fix missing lock around request issue
  block: respect virtual boundary mask in bvecs
parents d757a3b0 34ffec60
...@@ -8367,7 +8367,7 @@ F: drivers/media/dvb-frontends/lgdt3305.* ...@@ -8367,7 +8367,7 @@ F: drivers/media/dvb-frontends/lgdt3305.*
LIBATA PATA ARASAN COMPACT FLASH CONTROLLER LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
M: Viresh Kumar <vireshk@kernel.org> M: Viresh Kumar <vireshk@kernel.org>
L: linux-ide@vger.kernel.org L: linux-ide@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained S: Maintained
F: include/linux/pata_arasan_cf_data.h F: include/linux/pata_arasan_cf_data.h
F: drivers/ata/pata_arasan_cf.c F: drivers/ata/pata_arasan_cf.c
...@@ -8384,7 +8384,7 @@ F: drivers/ata/ata_generic.c ...@@ -8384,7 +8384,7 @@ F: drivers/ata/ata_generic.c
LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS LIBATA PATA FARADAY FTIDE010 AND GEMINI SATA BRIDGE DRIVERS
M: Linus Walleij <linus.walleij@linaro.org> M: Linus Walleij <linus.walleij@linaro.org>
L: linux-ide@vger.kernel.org L: linux-ide@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained S: Maintained
F: drivers/ata/pata_ftide010.c F: drivers/ata/pata_ftide010.c
F: drivers/ata/sata_gemini.c F: drivers/ata/sata_gemini.c
...@@ -8403,7 +8403,7 @@ F: include/linux/ahci_platform.h ...@@ -8403,7 +8403,7 @@ F: include/linux/ahci_platform.h
LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER
M: Mikael Pettersson <mikpelinux@gmail.com> M: Mikael Pettersson <mikpelinux@gmail.com>
L: linux-ide@vger.kernel.org L: linux-ide@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/axboe/linux-block.git
S: Maintained S: Maintained
F: drivers/ata/sata_promise.* F: drivers/ata/sata_promise.*
......
...@@ -1305,6 +1305,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, ...@@ -1305,6 +1305,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
io_req->fds[0] = dev->cow.fd; io_req->fds[0] = dev->cow.fd;
else else
io_req->fds[0] = dev->fd; io_req->fds[0] = dev->fd;
io_req->error = 0;
if (req_op(req) == REQ_OP_FLUSH) { if (req_op(req) == REQ_OP_FLUSH) {
io_req->op = UBD_FLUSH; io_req->op = UBD_FLUSH;
...@@ -1313,9 +1314,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, ...@@ -1313,9 +1314,7 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
io_req->cow_offset = -1; io_req->cow_offset = -1;
io_req->offset = off; io_req->offset = off;
io_req->length = bvec->bv_len; io_req->length = bvec->bv_len;
io_req->error = 0;
io_req->sector_mask = 0; io_req->sector_mask = 0;
io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE; io_req->op = rq_data_dir(req) == READ ? UBD_READ : UBD_WRITE;
io_req->offsets[0] = 0; io_req->offsets[0] = 0;
io_req->offsets[1] = dev->cow.data_offset; io_req->offsets[1] = dev->cow.data_offset;
...@@ -1341,11 +1340,14 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req, ...@@ -1341,11 +1340,14 @@ static int ubd_queue_one_vec(struct blk_mq_hw_ctx *hctx, struct request *req,
static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd) const struct blk_mq_queue_data *bd)
{ {
struct ubd *ubd_dev = hctx->queue->queuedata;
struct request *req = bd->rq; struct request *req = bd->rq;
int ret = 0; int ret = 0;
blk_mq_start_request(req); blk_mq_start_request(req);
spin_lock_irq(&ubd_dev->lock);
if (req_op(req) == REQ_OP_FLUSH) { if (req_op(req) == REQ_OP_FLUSH) {
ret = ubd_queue_one_vec(hctx, req, 0, NULL); ret = ubd_queue_one_vec(hctx, req, 0, NULL);
} else { } else {
...@@ -1361,9 +1363,11 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -1361,9 +1363,11 @@ static blk_status_t ubd_queue_rq(struct blk_mq_hw_ctx *hctx,
} }
} }
out: out:
if (ret < 0) { spin_unlock_irq(&ubd_dev->lock);
if (ret < 0)
blk_mq_requeue_request(req, true); blk_mq_requeue_request(req, true);
}
return BLK_STS_OK; return BLK_STS_OK;
} }
......
...@@ -1260,6 +1260,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q, ...@@ -1260,6 +1260,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
if (ret) if (ret)
goto cleanup; goto cleanup;
} else { } else {
zero_fill_bio(bio);
iov_iter_advance(iter, bio->bi_iter.bi_size); iov_iter_advance(iter, bio->bi_iter.bi_size);
} }
......
...@@ -51,16 +51,12 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -51,16 +51,12 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if ((sector | nr_sects) & bs_mask) if ((sector | nr_sects) & bs_mask)
return -EINVAL; return -EINVAL;
while (nr_sects) { if (!nr_sects)
unsigned int req_sects = nr_sects; return -EINVAL;
sector_t end_sect;
if (!req_sects)
goto fail;
if (req_sects > UINT_MAX >> 9)
req_sects = UINT_MAX >> 9;
end_sect = sector + req_sects; while (nr_sects) {
unsigned int req_sects = min_t(unsigned int, nr_sects,
bio_allowed_max_sectors(q));
bio = blk_next_bio(bio, 0, gfp_mask); bio = blk_next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
...@@ -68,8 +64,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -68,8 +64,8 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bio_set_op_attrs(bio, op, 0); bio_set_op_attrs(bio, op, 0);
bio->bi_iter.bi_size = req_sects << 9; bio->bi_iter.bi_size = req_sects << 9;
sector += req_sects;
nr_sects -= req_sects; nr_sects -= req_sects;
sector = end_sect;
/* /*
* We can loop for a long time in here, if someone does * We can loop for a long time in here, if someone does
...@@ -82,14 +78,6 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -82,14 +78,6 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
*biop = bio; *biop = bio;
return 0; return 0;
fail:
if (bio) {
submit_bio_wait(bio);
bio_put(bio);
}
*biop = NULL;
return -EOPNOTSUPP;
} }
EXPORT_SYMBOL(__blkdev_issue_discard); EXPORT_SYMBOL(__blkdev_issue_discard);
...@@ -161,7 +149,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector, ...@@ -161,7 +149,7 @@ static int __blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
return -EOPNOTSUPP; return -EOPNOTSUPP;
/* Ensure that max_write_same_sectors doesn't overflow bi_size */ /* Ensure that max_write_same_sectors doesn't overflow bi_size */
max_write_same_sectors = UINT_MAX >> 9; max_write_same_sectors = bio_allowed_max_sectors(q);
while (nr_sects) { while (nr_sects) {
bio = blk_next_bio(bio, 1, gfp_mask); bio = blk_next_bio(bio, 1, gfp_mask);
......
...@@ -46,7 +46,7 @@ static inline bool bio_will_gap(struct request_queue *q, ...@@ -46,7 +46,7 @@ static inline bool bio_will_gap(struct request_queue *q,
bio_get_first_bvec(prev_rq->bio, &pb); bio_get_first_bvec(prev_rq->bio, &pb);
else else
bio_get_first_bvec(prev, &pb); bio_get_first_bvec(prev, &pb);
if (pb.bv_offset) if (pb.bv_offset & queue_virt_boundary(q))
return true; return true;
/* /*
...@@ -90,7 +90,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q, ...@@ -90,7 +90,8 @@ static struct bio *blk_bio_discard_split(struct request_queue *q,
/* Zero-sector (unknown) and one-sector granularities are the same. */ /* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U); granularity = max(q->limits.discard_granularity >> 9, 1U);
max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); max_discard_sectors = min(q->limits.max_discard_sectors,
bio_allowed_max_sectors(q));
max_discard_sectors -= max_discard_sectors % granularity; max_discard_sectors -= max_discard_sectors % granularity;
if (unlikely(!max_discard_sectors)) { if (unlikely(!max_discard_sectors)) {
......
...@@ -169,7 +169,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q, ...@@ -169,7 +169,7 @@ static inline bool biovec_phys_mergeable(struct request_queue *q,
static inline bool __bvec_gap_to_prev(struct request_queue *q, static inline bool __bvec_gap_to_prev(struct request_queue *q,
struct bio_vec *bprv, unsigned int offset) struct bio_vec *bprv, unsigned int offset)
{ {
return offset || return (offset & queue_virt_boundary(q)) ||
((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
} }
...@@ -395,6 +395,16 @@ static inline unsigned long blk_rq_deadline(struct request *rq) ...@@ -395,6 +395,16 @@ static inline unsigned long blk_rq_deadline(struct request *rq)
return rq->__deadline & ~0x1UL; return rq->__deadline & ~0x1UL;
} }
/*
* The max size one bio can handle is UINT_MAX becasue bvec_iter.bi_size
* is defined as 'unsigned int', meantime it has to aligned to with logical
* block size which is the minimum accepted unit by hardware.
*/
static inline unsigned int bio_allowed_max_sectors(struct request_queue *q)
{
return round_down(UINT_MAX, queue_logical_block_size(q)) >> 9;
}
/* /*
* Internal io_context interface * Internal io_context interface
*/ */
......
// SPDX-License-Identifier: GPL-2.0+
/* /*
* Renesas R-Car SATA driver * Renesas R-Car SATA driver
* *
* Author: Vladimir Barinov <source@cogentembedded.com> * Author: Vladimir Barinov <source@cogentembedded.com>
* Copyright (C) 2013-2015 Cogent Embedded, Inc. * Copyright (C) 2013-2015 Cogent Embedded, Inc.
* Copyright (C) 2013-2015 Renesas Solutions Corp. * Copyright (C) 2013-2015 Renesas Solutions Corp.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
......
...@@ -1519,8 +1519,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) ...@@ -1519,8 +1519,10 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
if (ns->ndev) if (ns->ndev)
nvme_nvm_update_nvm_info(ns); nvme_nvm_update_nvm_info(ns);
#ifdef CONFIG_NVME_MULTIPATH #ifdef CONFIG_NVME_MULTIPATH
if (ns->head->disk) if (ns->head->disk) {
nvme_update_disk_info(ns->head->disk, ns, id); nvme_update_disk_info(ns->head->disk, ns, id);
blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
}
#endif #endif
} }
......
...@@ -285,6 +285,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) ...@@ -285,6 +285,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
blk_queue_flag_set(QUEUE_FLAG_NONROT, q); blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
/* set to a default value for 512 until disk is validated */ /* set to a default value for 512 until disk is validated */
blk_queue_logical_block_size(q, 512); blk_queue_logical_block_size(q, 512);
blk_set_stacking_limits(&q->limits);
/* we need to propagate up the VMC settings */ /* we need to propagate up the VMC settings */
if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
......
...@@ -420,7 +420,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl, ...@@ -420,7 +420,7 @@ static void nvmet_p2pmem_ns_add_p2p(struct nvmet_ctrl *ctrl,
struct pci_dev *p2p_dev; struct pci_dev *p2p_dev;
int ret; int ret;
if (!ctrl->p2p_client) if (!ctrl->p2p_client || !ns->use_p2pmem)
return; return;
if (ns->p2p_dev) { if (ns->p2p_dev) {
......
...@@ -122,7 +122,6 @@ struct nvmet_rdma_device { ...@@ -122,7 +122,6 @@ struct nvmet_rdma_device {
int inline_page_count; int inline_page_count;
}; };
static struct workqueue_struct *nvmet_rdma_delete_wq;
static bool nvmet_rdma_use_srq; static bool nvmet_rdma_use_srq;
module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
MODULE_PARM_DESC(use_srq, "Use shared receive queue."); MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
...@@ -1274,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id, ...@@ -1274,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
if (queue->host_qid == 0) { if (queue->host_qid == 0) {
/* Let inflight controller teardown complete */ /* Let inflight controller teardown complete */
flush_workqueue(nvmet_rdma_delete_wq); flush_scheduled_work();
} }
ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
if (ret) { if (ret) {
queue_work(nvmet_rdma_delete_wq, &queue->release_work); schedule_work(&queue->release_work);
/* Destroying rdma_cm id is not needed here */ /* Destroying rdma_cm id is not needed here */
return 0; return 0;
} }
...@@ -1344,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue) ...@@ -1344,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
if (disconnect) { if (disconnect) {
rdma_disconnect(queue->cm_id); rdma_disconnect(queue->cm_id);
queue_work(nvmet_rdma_delete_wq, &queue->release_work); schedule_work(&queue->release_work);
} }
} }
...@@ -1374,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id, ...@@ -1374,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
mutex_unlock(&nvmet_rdma_queue_mutex); mutex_unlock(&nvmet_rdma_queue_mutex);
pr_err("failed to connect queue %d\n", queue->idx); pr_err("failed to connect queue %d\n", queue->idx);
queue_work(nvmet_rdma_delete_wq, &queue->release_work); schedule_work(&queue->release_work);
} }
/** /**
...@@ -1656,17 +1655,8 @@ static int __init nvmet_rdma_init(void) ...@@ -1656,17 +1655,8 @@ static int __init nvmet_rdma_init(void)
if (ret) if (ret)
goto err_ib_client; goto err_ib_client;
nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
if (!nvmet_rdma_delete_wq) {
ret = -ENOMEM;
goto err_unreg_transport;
}
return 0; return 0;
err_unreg_transport:
nvmet_unregister_transport(&nvmet_rdma_ops);
err_ib_client: err_ib_client:
ib_unregister_client(&nvmet_rdma_ib_client); ib_unregister_client(&nvmet_rdma_ib_client);
return ret; return ret;
...@@ -1674,7 +1664,6 @@ static int __init nvmet_rdma_init(void) ...@@ -1674,7 +1664,6 @@ static int __init nvmet_rdma_init(void)
static void __exit nvmet_rdma_exit(void) static void __exit nvmet_rdma_exit(void)
{ {
destroy_workqueue(nvmet_rdma_delete_wq);
nvmet_unregister_transport(&nvmet_rdma_ops); nvmet_unregister_transport(&nvmet_rdma_ops);
ib_unregister_client(&nvmet_rdma_ib_client); ib_unregister_client(&nvmet_rdma_ib_client);
WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment