Commit a3b05e8f authored by Jens Axboe's avatar Jens Axboe Committed by Jens Axboe

[PATCH] Kill various deprecated/unused block layer defines/functions

Signed-off-by: default avatarJens Axboe <axboe@suse.de>
parent 1ea25ecb
...@@ -1229,7 +1229,6 @@ static inline void complete_buffers(struct bio *bio, int status) ...@@ -1229,7 +1229,6 @@ static inline void complete_buffers(struct bio *bio, int status)
int nr_sectors = bio_sectors(bio); int nr_sectors = bio_sectors(bio);
bio->bi_next = NULL; bio->bi_next = NULL;
blk_finished_io(len);
bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO); bio_endio(bio, nr_sectors << 9, status ? 0 : -EIO);
bio = xbh; bio = xbh;
} }
......
...@@ -989,7 +989,6 @@ static inline void complete_buffers(struct bio *bio, int ok) ...@@ -989,7 +989,6 @@ static inline void complete_buffers(struct bio *bio, int ok)
xbh = bio->bi_next; xbh = bio->bi_next;
bio->bi_next = NULL; bio->bi_next = NULL;
blk_finished_io(nr_sectors);
bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO); bio_endio(bio, nr_sectors << 9, ok ? 0 : -EIO);
bio = xbh; bio = xbh;
......
...@@ -578,12 +578,6 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw) ...@@ -578,12 +578,6 @@ static inline void blk_clear_queue_full(struct request_queue *q, int rw)
#define rq_mergeable(rq) \ #define rq_mergeable(rq) \
(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq))) (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && blk_fs_request((rq)))
/*
* noop, requests are automagically marked as active/inactive by I/O
* scheduler -- see elv_next_request
*/
#define blk_queue_headactive(q, head_active)
/* /*
* q->prep_rq_fn return values * q->prep_rq_fn return values
*/ */
...@@ -621,11 +615,6 @@ static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio) ...@@ -621,11 +615,6 @@ static inline void blk_queue_bounce(request_queue_t *q, struct bio **bio)
if ((rq->bio)) \ if ((rq->bio)) \
for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next) for (_bio = (rq)->bio; _bio; _bio = _bio->bi_next)
struct sec_size {
unsigned block_size;
unsigned block_size_bits;
};
extern int blk_register_queue(struct gendisk *disk); extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk); extern void blk_unregister_queue(struct gendisk *disk);
extern void register_disk(struct gendisk *dev); extern void register_disk(struct gendisk *dev);
...@@ -690,16 +679,6 @@ extern void end_that_request_last(struct request *, int); ...@@ -690,16 +679,6 @@ extern void end_that_request_last(struct request *, int);
extern void end_request(struct request *req, int uptodate); extern void end_request(struct request *req, int uptodate);
extern void blk_complete_request(struct request *); extern void blk_complete_request(struct request *);
static inline int rq_all_done(struct request *rq, unsigned int nr_bytes)
{
if (blk_fs_request(rq))
return (nr_bytes >= (rq->hard_nr_sectors << 9));
else if (blk_pc_request(rq))
return nr_bytes >= rq->data_len;
return 0;
}
/* /*
* end_that_request_first/chunk() takes an uptodate argument. we account * end_that_request_first/chunk() takes an uptodate argument. we account
* any value <= as an io error. 0 means -EIO for compatability reasons, * any value <= as an io error. 0 means -EIO for compatability reasons,
...@@ -807,14 +786,6 @@ static inline int queue_dma_alignment(request_queue_t *q) ...@@ -807,14 +786,6 @@ static inline int queue_dma_alignment(request_queue_t *q)
return retval; return retval;
} }
static inline int bdev_dma_aligment(struct block_device *bdev)
{
return queue_dma_alignment(bdev_get_queue(bdev));
}
#define blk_finished_io(nsects) do { } while (0)
#define blk_started_io(nsects) do { } while (0)
/* assumes size > 256 */ /* assumes size > 256 */
static inline unsigned int blksize_bits(unsigned int size) static inline unsigned int blksize_bits(unsigned int size)
{ {
......
...@@ -79,7 +79,6 @@ extern int dir_notify_enable; ...@@ -79,7 +79,6 @@ extern int dir_notify_enable;
#define WRITE 1 #define WRITE 1
#define READA 2 /* read-ahead - don't block if no resources */ #define READA 2 /* read-ahead - don't block if no resources */
#define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */
#define SPECIAL 4 /* For non-blockdevice requests in request queue */
#define READ_SYNC (READ | (1 << BIO_RW_SYNC)) #define READ_SYNC (READ | (1 << BIO_RW_SYNC))
#define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC))
#define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER)) #define WRITE_BARRIER ((1 << BIO_RW) | (1 << BIO_RW_BARRIER))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment