Commit 02e031cb authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

block: remove REQ_HARDBARRIER

REQ_HARDBARRIER is dead now, so remove the leftovers.  What's left
at this point is:

 - various checks inside the block layer.
 - sanity checks in bio based drivers.
 - now unused bio_empty_barrier helper.
 - Xen blockfront use of BLKIF_OP_WRITE_BARRIER - it's dead for a while,
   but Xen really needs to sort out it's barrier situaton.
 - setting of ordered tags in uas - dead code copied from old scsi
   drivers.
 - scsi different retry for barriers - it's dead and should have been
   removed when flushes were converted to FS requests.
 - blktrace handling of barriers - removed.  Someone who knows blktrace
   better should add support for REQ_FLUSH and REQ_FUA, though.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <jaxboe@fusionio.com>
parent 00e375e7
...@@ -1194,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1194,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio)
int where = ELEVATOR_INSERT_SORT; int where = ELEVATOR_INSERT_SORT;
int rw_flags; int rw_flags;
/* REQ_HARDBARRIER is no more */
if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
"block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
/* /*
* low level driver can indicate that it wants pages above a * low level driver can indicate that it wants pages above a
* certain limit bounced to low memory (ie for highmem, or even * certain limit bounced to low memory (ie for highmem, or even
......
...@@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) ...@@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
q->nr_sorted--; q->nr_sorted--;
boundary = q->end_sector; boundary = q->end_sector;
stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
list_for_each_prev(entry, &q->queue_head) { list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry); struct request *pos = list_entry_rq(entry);
...@@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) ...@@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
void __elv_add_request(struct request_queue *q, struct request *rq, int where, void __elv_add_request(struct request_queue *q, struct request *rq, int where,
int plug) int plug)
{ {
if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */ /* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS || if (rq->cmd_type == REQ_TYPE_FS ||
(rq->cmd_flags & REQ_DISCARD)) { (rq->cmd_flags & REQ_DISCARD)) {
......
...@@ -180,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) ...@@ -180,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
BUG(); BUG();
bio_endio(bio, -ENXIO); bio_endio(bio, -ENXIO);
return 0; return 0;
} else if (bio->bi_rw & REQ_HARDBARRIER) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
} else if (bio->bi_io_vec == NULL) { } else if (bio->bi_io_vec == NULL) {
printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
BUG(); BUG();
......
...@@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) ...@@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
if (bio_rw(bio) == WRITE) { if (bio_rw(bio) == WRITE) {
struct file *file = lo->lo_backing_file; struct file *file = lo->lo_backing_file;
/* REQ_HARDBARRIER is deprecated */
if (bio->bi_rw & REQ_HARDBARRIER) {
ret = -EOPNOTSUPP;
goto out;
}
if (bio->bi_rw & REQ_FLUSH) { if (bio->bi_rw & REQ_FLUSH) {
ret = vfs_fsync(file, 0); ret = vfs_fsync(file, 0);
if (unlikely(ret && ret != -EINVAL)) { if (unlikely(ret && ret != -EINVAL)) {
......
...@@ -289,8 +289,6 @@ static int blkif_queue_request(struct request *req) ...@@ -289,8 +289,6 @@ static int blkif_queue_request(struct request *req)
ring_req->operation = rq_data_dir(req) ? ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ; BLKIF_OP_WRITE : BLKIF_OP_READ;
if (req->cmd_flags & REQ_HARDBARRIER)
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
......
...@@ -320,17 +320,9 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) ...@@ -320,17 +320,9 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
"changed. The Linux SCSI layer does not " "changed. The Linux SCSI layer does not "
"automatically adjust these parameters.\n"); "automatically adjust these parameters.\n");
if (scmd->request->cmd_flags & REQ_HARDBARRIER)
/* /*
* barrier requests should always retry on UA * Pass the UA upwards for a determination in the completion
* otherwise block will get a spurious error * functions.
*/
return NEEDS_RETRY;
else
/*
* for normal (non barrier) commands, pass the
* UA upwards for a determination in the
* completion functions
*/ */
return SUCCESS; return SUCCESS;
......
...@@ -331,9 +331,6 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp, ...@@ -331,9 +331,6 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
iu->iu_id = IU_ID_COMMAND; iu->iu_id = IU_ID_COMMAND;
iu->tag = cpu_to_be16(stream_id); iu->tag = cpu_to_be16(stream_id);
if (sdev->ordered_tags && (cmnd->request->cmd_flags & REQ_HARDBARRIER))
iu->prio_attr = UAS_ORDERED_TAG;
else
iu->prio_attr = UAS_SIMPLE_TAG; iu->prio_attr = UAS_SIMPLE_TAG;
iu->len = len; iu->len = len;
int_to_scsilun(sdev->lun, &iu->lun); int_to_scsilun(sdev->lun, &iu->lun);
......
...@@ -66,10 +66,6 @@ ...@@ -66,10 +66,6 @@
#define bio_offset(bio) bio_iovec((bio))->bv_offset #define bio_offset(bio) bio_iovec((bio))->bv_offset
#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
#define bio_sectors(bio) ((bio)->bi_size >> 9) #define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_empty_barrier(bio) \
((bio->bi_rw & REQ_HARDBARRIER) && \
!bio_has_data(bio) && \
!(bio->bi_rw & REQ_DISCARD))
static inline unsigned int bio_cur_bytes(struct bio *bio) static inline unsigned int bio_cur_bytes(struct bio *bio)
{ {
......
...@@ -122,7 +122,6 @@ enum rq_flag_bits { ...@@ -122,7 +122,6 @@ enum rq_flag_bits {
__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
__REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
__REQ_HARDBARRIER, /* may not be passed by drive either */
__REQ_SYNC, /* request is sync (sync write or read) */ __REQ_SYNC, /* request is sync (sync write or read) */
__REQ_META, /* metadata io request */ __REQ_META, /* metadata io request */
__REQ_DISCARD, /* request to discard sectors */ __REQ_DISCARD, /* request to discard sectors */
...@@ -159,7 +158,6 @@ enum rq_flag_bits { ...@@ -159,7 +158,6 @@ enum rq_flag_bits {
#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define REQ_SYNC (1 << __REQ_SYNC) #define REQ_SYNC (1 << __REQ_SYNC)
#define REQ_META (1 << __REQ_META) #define REQ_META (1 << __REQ_META)
#define REQ_DISCARD (1 << __REQ_DISCARD) #define REQ_DISCARD (1 << __REQ_DISCARD)
...@@ -168,8 +166,8 @@ enum rq_flag_bits { ...@@ -168,8 +166,8 @@ enum rq_flag_bits {
#define REQ_FAILFAST_MASK \ #define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \ #define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
REQ_META | REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
#define REQ_CLONE_MASK REQ_COMMON_MASK #define REQ_CLONE_MASK REQ_COMMON_MASK
#define REQ_UNPLUG (1 << __REQ_UNPLUG) #define REQ_UNPLUG (1 << __REQ_UNPLUG)
......
...@@ -552,8 +552,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync) ...@@ -552,8 +552,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
* it already be started by driver. * it already be started by driver.
*/ */
#define RQ_NOMERGE_FLAGS \ #define RQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \ (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
REQ_FLUSH | REQ_FUA)
#define rq_mergeable(rq) \ #define rq_mergeable(rq) \
(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
(((rq)->cmd_flags & REQ_DISCARD) || \ (((rq)->cmd_flags & REQ_DISCARD) || \
......
...@@ -168,7 +168,6 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, ...@@ -168,7 +168,6 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
BLK_TC_ACT(BLK_TC_WRITE) }; BLK_TC_ACT(BLK_TC_WRITE) };
#define BLK_TC_HARDBARRIER BLK_TC_BARRIER
#define BLK_TC_RAHEAD BLK_TC_AHEAD #define BLK_TC_RAHEAD BLK_TC_AHEAD
/* The ilog2() calls fall out because they're constant */ /* The ilog2() calls fall out because they're constant */
...@@ -196,7 +195,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -196,7 +195,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
return; return;
what |= ddir_act[rw & WRITE]; what |= ddir_act[rw & WRITE];
what |= MASK_TC_BIT(rw, HARDBARRIER);
what |= MASK_TC_BIT(rw, SYNC); what |= MASK_TC_BIT(rw, SYNC);
what |= MASK_TC_BIT(rw, RAHEAD); what |= MASK_TC_BIT(rw, RAHEAD);
what |= MASK_TC_BIT(rw, META); what |= MASK_TC_BIT(rw, META);
...@@ -1807,8 +1805,6 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) ...@@ -1807,8 +1805,6 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
if (rw & REQ_RAHEAD) if (rw & REQ_RAHEAD)
rwbs[i++] = 'A'; rwbs[i++] = 'A';
if (rw & REQ_HARDBARRIER)
rwbs[i++] = 'B';
if (rw & REQ_SYNC) if (rw & REQ_SYNC)
rwbs[i++] = 'S'; rwbs[i++] = 'S';
if (rw & REQ_META) if (rw & REQ_META)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment