Commit 512c781f authored by Gioh Kim's avatar Gioh Kim Committed by Jens Axboe

block/rnbd: Set write-back cache and fua same to the target device

The rnbd-client always sets the write-back cache and fua attributes
of the rnbd device queue regardless of the target device on the server.
That generates IO hang issue when the target device does not
support both of write-back cacne and fua.

This patch adds more fields for the cache policy and fua into the
device opening message. The rnbd-server sends the information
if the target device supports the write-back cache and fua
and rnbd-client recevives it and set the device queue accordingly.
Signed-off-by: default avatarGioh Kim <gi-oh.kim@cloud.ionos.com>
[jwang: some minor change, rename a few varables, remove unrelated comments.]
Signed-off-by: default avatarJack Wang <jinpu.wang@cloud.ionos.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 3877ece0
...@@ -88,6 +88,8 @@ static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev, ...@@ -88,6 +88,8 @@ static int rnbd_clt_set_dev_attr(struct rnbd_clt_dev *dev,
dev->discard_alignment = le32_to_cpu(rsp->discard_alignment); dev->discard_alignment = le32_to_cpu(rsp->discard_alignment);
dev->secure_discard = le16_to_cpu(rsp->secure_discard); dev->secure_discard = le16_to_cpu(rsp->secure_discard);
dev->rotational = rsp->rotational; dev->rotational = rsp->rotational;
dev->wc = !!(rsp->cache_policy & RNBD_WRITEBACK);
dev->fua = !!(rsp->cache_policy & RNBD_FUA);
dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE; dev->max_hw_sectors = sess->max_io_size / SECTOR_SIZE;
dev->max_segments = BMAX_SEGMENTS; dev->max_segments = BMAX_SEGMENTS;
...@@ -1305,7 +1307,7 @@ static void setup_request_queue(struct rnbd_clt_dev *dev) ...@@ -1305,7 +1307,7 @@ static void setup_request_queue(struct rnbd_clt_dev *dev)
blk_queue_max_segments(dev->queue, dev->max_segments); blk_queue_max_segments(dev->queue, dev->max_segments);
blk_queue_io_opt(dev->queue, dev->sess->max_io_size); blk_queue_io_opt(dev->queue, dev->sess->max_io_size);
blk_queue_virt_boundary(dev->queue, SZ_4K - 1); blk_queue_virt_boundary(dev->queue, SZ_4K - 1);
blk_queue_write_cache(dev->queue, true, true); blk_queue_write_cache(dev->queue, dev->wc, dev->fua);
dev->queue->queuedata = dev; dev->queue->queuedata = dev;
} }
...@@ -1528,13 +1530,13 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname, ...@@ -1528,13 +1530,13 @@ struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
} }
rnbd_clt_info(dev, rnbd_clt_info(dev,
"map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d)\n", "map_device: Device mapped as %s (nsectors: %zu, logical_block_size: %d, physical_block_size: %d, max_write_same_sectors: %d, max_discard_sectors: %d, discard_granularity: %d, discard_alignment: %d, secure_discard: %d, max_segments: %d, max_hw_sectors: %d, rotational: %d, wc: %d, fua: %d)\n",
dev->gd->disk_name, dev->nsectors, dev->gd->disk_name, dev->nsectors,
dev->logical_block_size, dev->physical_block_size, dev->logical_block_size, dev->physical_block_size,
dev->max_write_same_sectors, dev->max_discard_sectors, dev->max_write_same_sectors, dev->max_discard_sectors,
dev->discard_granularity, dev->discard_alignment, dev->discard_granularity, dev->discard_alignment,
dev->secure_discard, dev->max_segments, dev->secure_discard, dev->max_segments,
dev->max_hw_sectors, dev->rotational); dev->max_hw_sectors, dev->rotational, dev->wc, dev->fua);
mutex_unlock(&dev->lock); mutex_unlock(&dev->lock);
......
...@@ -112,6 +112,8 @@ struct rnbd_clt_dev { ...@@ -112,6 +112,8 @@ struct rnbd_clt_dev {
enum rnbd_access_mode access_mode; enum rnbd_access_mode access_mode;
bool read_only; bool read_only;
bool rotational; bool rotational;
bool wc;
bool fua;
u32 max_hw_sectors; u32 max_hw_sectors;
u32 max_write_same_sectors; u32 max_write_same_sectors;
u32 max_discard_sectors; u32 max_discard_sectors;
......
...@@ -108,6 +108,11 @@ struct rnbd_msg_close { ...@@ -108,6 +108,11 @@ struct rnbd_msg_close {
__le32 device_id; __le32 device_id;
}; };
enum rnbd_cache_policy {
RNBD_FUA = 1 << 0,
RNBD_WRITEBACK = 1 << 1,
};
/** /**
* struct rnbd_msg_open_rsp - response message to RNBD_MSG_OPEN * struct rnbd_msg_open_rsp - response message to RNBD_MSG_OPEN
* @hdr: message header * @hdr: message header
...@@ -124,6 +129,7 @@ struct rnbd_msg_close { ...@@ -124,6 +129,7 @@ struct rnbd_msg_close {
* @max_segments: max segments hardware support in one transfer * @max_segments: max segments hardware support in one transfer
* @secure_discard: supports secure discard * @secure_discard: supports secure discard
* @rotation: is a rotational disc? * @rotation: is a rotational disc?
* @cache_policy: support write-back caching or FUA?
*/ */
struct rnbd_msg_open_rsp { struct rnbd_msg_open_rsp {
struct rnbd_msg_hdr hdr; struct rnbd_msg_hdr hdr;
...@@ -139,7 +145,8 @@ struct rnbd_msg_open_rsp { ...@@ -139,7 +145,8 @@ struct rnbd_msg_open_rsp {
__le16 max_segments; __le16 max_segments;
__le16 secure_discard; __le16 secure_discard;
u8 rotational; u8 rotational;
u8 reserved[11]; u8 cache_policy;
u8 reserved[10];
}; };
/** /**
......
...@@ -550,6 +550,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp, ...@@ -550,6 +550,7 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
struct rnbd_srv_sess_dev *sess_dev) struct rnbd_srv_sess_dev *sess_dev)
{ {
struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev; struct rnbd_dev *rnbd_dev = sess_dev->rnbd_dev;
struct request_queue *q = bdev_get_queue(rnbd_dev->bdev);
rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP); rsp->hdr.type = cpu_to_le16(RNBD_MSG_OPEN_RSP);
rsp->device_id = rsp->device_id =
...@@ -574,8 +575,12 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp, ...@@ -574,8 +575,12 @@ static void rnbd_srv_fill_msg_open_rsp(struct rnbd_msg_open_rsp *rsp,
cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev)); cpu_to_le32(rnbd_dev_get_discard_alignment(rnbd_dev));
rsp->secure_discard = rsp->secure_discard =
cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev)); cpu_to_le16(rnbd_dev_get_secure_discard(rnbd_dev));
rsp->rotational = rsp->rotational = !blk_queue_nonrot(q);
!blk_queue_nonrot(bdev_get_queue(rnbd_dev->bdev)); rsp->cache_policy = 0;
if (test_bit(QUEUE_FLAG_WC, &q->queue_flags))
rsp->cache_policy |= RNBD_WRITEBACK;
if (blk_queue_fua(q))
rsp->cache_policy |= RNBD_FUA;
} }
static struct rnbd_srv_sess_dev * static struct rnbd_srv_sess_dev *
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment