Commit b4f42e28 authored by Jens Axboe's avatar Jens Axboe

block: remove struct request buffer member

This was used in the olden days, back when onions were proper
yellow. Basically it mapped to the current buffer to be
transferred. With highmem being added more than a decade ago,
most drivers map pages out of a bio, and rq->buffer isn't
pointing at anything valid.

Convert old style drivers to just use bio_data().

For the discard payload use case, just reference the page
in the bio.
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent f89e0dd9
......@@ -146,8 +146,8 @@ void blk_dump_rq_flags(struct request *rq, char *msg)
printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n",
(unsigned long long)blk_rq_pos(rq),
blk_rq_sectors(rq), blk_rq_cur_sectors(rq));
printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n",
rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq));
printk(KERN_INFO " bio %p, biotail %p, len %u\n",
rq->bio, rq->biotail, blk_rq_bytes(rq));
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
printk(KERN_INFO " cdb: ");
......@@ -1360,7 +1360,6 @@ void blk_add_request_payload(struct request *rq, struct page *page,
rq->__data_len = rq->resid_len = len;
rq->nr_phys_segments = 1;
rq->buffer = bio_data(bio);
}
EXPORT_SYMBOL_GPL(blk_add_request_payload);
......@@ -1402,12 +1401,6 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
bio->bi_next = req->bio;
req->bio = bio;
/*
* may not be valid. if the low level driver said
* it didn't need a bounce buffer then it better
* not touch req->buffer either...
*/
req->buffer = bio_data(bio);
req->__sector = bio->bi_iter.bi_sector;
req->__data_len += bio->bi_iter.bi_size;
req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
......@@ -2434,7 +2427,6 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
}
req->__data_len -= total_bytes;
req->buffer = bio_data(req->bio);
/* update sector only for requests with clear definition of sector */
if (req->cmd_type == REQ_TYPE_FS)
......@@ -2752,10 +2744,9 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
/* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */
rq->cmd_flags |= bio->bi_rw & REQ_WRITE;
if (bio_has_data(bio)) {
if (bio_has_data(bio))
rq->nr_phys_segments = bio_phys_segments(q, bio);
rq->buffer = bio_data(bio);
}
rq->__data_len = bio->bi_iter.bi_size;
rq->bio = rq->biotail = bio;
......@@ -2831,7 +2822,7 @@ EXPORT_SYMBOL_GPL(blk_rq_unprep_clone);
/*
* Copy attributes of the original request to the clone request.
* The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied.
* The actual data parts (e.g. ->cmd, ->sense) are not copied.
*/
static void __blk_rq_prep_clone(struct request *dst, struct request *src)
{
......@@ -2857,7 +2848,7 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
*
* Description:
* Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq.
* The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense)
* The actual data parts of @rq_src (e.g. ->cmd, ->sense)
* are not copied, and copying such parts is the caller's responsibility.
* Also, pages which the original bios are pointing to are not copied
* and the cloned bios just point same pages.
......
......@@ -155,7 +155,6 @@ int blk_rq_map_user(struct request_queue *q, struct request *rq,
if (!bio_flagged(bio, BIO_USER_MAPPED))
rq->cmd_flags |= REQ_COPY_USER;
rq->buffer = NULL;
return 0;
unmap_rq:
blk_rq_unmap_user(bio);
......@@ -238,7 +237,6 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
blk_queue_bounce(q, &bio);
bio_get(bio);
blk_rq_bio_prep(q, rq, bio);
rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_user_iov);
......@@ -325,7 +323,6 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
}
blk_queue_bounce(q, &rq->bio);
rq->buffer = NULL;
return 0;
}
EXPORT_SYMBOL(blk_rq_map_kern);
......@@ -1406,7 +1406,7 @@ static void redo_fd_request(void)
track = block / (floppy->dtype->sects * floppy->type->sect_mult);
sector = block % (floppy->dtype->sects * floppy->type->sect_mult);
data = rq->buffer + 512 * cnt;
data = bio_data(rq->bio) + 512 * cnt;
#ifdef DEBUG
printk("access to track %d, sector %d, with buffer at "
"0x%08lx\n", track, sector, data);
......
......@@ -1484,7 +1484,7 @@ static void redo_fd_request(void)
ReqCnt = 0;
ReqCmd = rq_data_dir(fd_request);
ReqBlock = blk_rq_pos(fd_request);
ReqBuffer = fd_request->buffer;
ReqBuffer = bio_data(fd_request->bio);
setup_req_params( drive );
do_fd_action( drive );
......
......@@ -2351,7 +2351,7 @@ static void rw_interrupt(void)
}
if (CT(COMMAND) != FD_READ ||
raw_cmd->kernel_data == current_req->buffer) {
raw_cmd->kernel_data == bio_data(current_req->bio)) {
/* transfer directly from buffer */
cont->done(1);
} else if (CT(COMMAND) == FD_READ) {
......@@ -2640,7 +2640,7 @@ static int make_raw_rw_request(void)
raw_cmd->flags &= ~FD_RAW_WRITE;
raw_cmd->flags |= FD_RAW_READ;
COMMAND = FM_MODE(_floppy, FD_READ);
} else if ((unsigned long)current_req->buffer < MAX_DMA_ADDRESS) {
} else if ((unsigned long)bio_data(current_req->bio) < MAX_DMA_ADDRESS) {
unsigned long dma_limit;
int direct, indirect;
......@@ -2654,13 +2654,13 @@ static int make_raw_rw_request(void)
*/
max_size = buffer_chain_size();
dma_limit = (MAX_DMA_ADDRESS -
((unsigned long)current_req->buffer)) >> 9;
((unsigned long)bio_data(current_req->bio))) >> 9;
if ((unsigned long)max_size > dma_limit)
max_size = dma_limit;
/* 64 kb boundaries */
if (CROSS_64KB(current_req->buffer, max_size << 9))
if (CROSS_64KB(bio_data(current_req->bio), max_size << 9))
max_size = (K_64 -
((unsigned long)current_req->buffer) %
((unsigned long)bio_data(current_req->bio)) %
K_64) >> 9;
direct = transfer_size(ssize, max_sector, max_size) - fsector_t;
/*
......@@ -2677,7 +2677,7 @@ static int make_raw_rw_request(void)
(DP->read_track & (1 << DRS->probed_format)))))) {
max_size = blk_rq_sectors(current_req);
} else {
raw_cmd->kernel_data = current_req->buffer;
raw_cmd->kernel_data = bio_data(current_req->bio);
raw_cmd->length = current_count_sectors << 9;
if (raw_cmd->length == 0) {
DPRINT("%s: zero dma transfer attempted\n", __func__);
......@@ -2731,7 +2731,7 @@ static int make_raw_rw_request(void)
raw_cmd->length = ((raw_cmd->length - 1) | (ssize - 1)) + 1;
raw_cmd->length <<= 9;
if ((raw_cmd->length < current_count_sectors << 9) ||
(raw_cmd->kernel_data != current_req->buffer &&
(raw_cmd->kernel_data != bio_data(current_req->bio) &&
CT(COMMAND) == FD_WRITE &&
(aligned_sector_t + (raw_cmd->length >> 9) > buffer_max ||
aligned_sector_t < buffer_min)) ||
......@@ -2739,7 +2739,7 @@ static int make_raw_rw_request(void)
raw_cmd->length <= 0 || current_count_sectors <= 0) {
DPRINT("fractionary current count b=%lx s=%lx\n",
raw_cmd->length, current_count_sectors);
if (raw_cmd->kernel_data != current_req->buffer)
if (raw_cmd->kernel_data != bio_data(current_req->bio))
pr_info("addr=%d, length=%ld\n",
(int)((raw_cmd->kernel_data -
floppy_track_buffer) >> 9),
......@@ -2756,7 +2756,7 @@ static int make_raw_rw_request(void)
return 0;
}
if (raw_cmd->kernel_data != current_req->buffer) {
if (raw_cmd->kernel_data != bio_data(current_req->bio)) {
if (raw_cmd->kernel_data < floppy_track_buffer ||
current_count_sectors < 0 ||
raw_cmd->length < 0 ||
......
......@@ -464,11 +464,11 @@ static void read_intr(void)
ok_to_read:
req = hd_req;
insw(HD_DATA, req->buffer, 256);
insw(HD_DATA, bio_data(req->bio), 256);
#ifdef DEBUG
printk("%s: read: sector %ld, remaining = %u, buffer=%p\n",
req->rq_disk->disk_name, blk_rq_pos(req) + 1,
blk_rq_sectors(req) - 1, req->buffer+512);
blk_rq_sectors(req) - 1, bio_data(req->bio)+512);
#endif
if (hd_end_request(0, 512)) {
SET_HANDLER(&read_intr);
......@@ -505,7 +505,7 @@ static void write_intr(void)
ok_to_write:
if (hd_end_request(0, 512)) {
SET_HANDLER(&write_intr);
outsw(HD_DATA, req->buffer, 256);
outsw(HD_DATA, bio_data(req->bio), 256);
return;
}
......@@ -624,7 +624,7 @@ static void hd_request(void)
printk("%s: %sing: CHS=%d/%d/%d, sectors=%d, buffer=%p\n",
req->rq_disk->disk_name,
req_data_dir(req) == READ ? "read" : "writ",
cyl, head, sec, nsect, req->buffer);
cyl, head, sec, nsect, bio_data(req->bio));
#endif
if (req->cmd_type == REQ_TYPE_FS) {
switch (rq_data_dir(req)) {
......@@ -643,7 +643,7 @@ static void hd_request(void)
bad_rw_intr();
goto repeat;
}
outsw(HD_DATA, req->buffer, 256);
outsw(HD_DATA, bio_data(req->bio), 256);
break;
default:
printk("unknown hd-command\n");
......
......@@ -479,7 +479,7 @@ static unsigned int mg_out(struct mg_host *host,
static void mg_read_one(struct mg_host *host, struct request *req)
{
u16 *buff = (u16 *)req->buffer;
u16 *buff = (u16 *)bio_data(req->bio);
u32 i;
for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
......@@ -496,7 +496,7 @@ static void mg_read(struct request *req)
mg_bad_rw_intr(host);
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
blk_rq_sectors(req), blk_rq_pos(req), bio_data(req->bio));
do {
if (mg_wait(host, ATA_DRQ,
......@@ -514,7 +514,7 @@ static void mg_read(struct request *req)
static void mg_write_one(struct mg_host *host, struct request *req)
{
u16 *buff = (u16 *)req->buffer;
u16 *buff = (u16 *)bio_data(req->bio);
u32 i;
for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
......@@ -534,7 +534,7 @@ static void mg_write(struct request *req)
}
MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
rem, blk_rq_pos(req), req->buffer);
rem, blk_rq_pos(req), bio_data(req->bio));
if (mg_wait(host, ATA_DRQ,
MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
......@@ -585,7 +585,7 @@ static void mg_read_intr(struct mg_host *host)
mg_read_one(host, req);
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
blk_rq_pos(req), blk_rq_sectors(req) - 1, bio_data(req->bio));
/* send read confirm */
outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
......@@ -624,7 +624,7 @@ static void mg_write_intr(struct mg_host *host)
/* write 1 sector and set handler if remains */
mg_write_one(host, req);
MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
blk_rq_pos(req), blk_rq_sectors(req), bio_data(req->bio));
host->mg_do_intr = mg_write_intr;
mod_timer(&host->timer, jiffies + 3 * HZ);
}
......
......@@ -747,7 +747,7 @@ static void do_pcd_request(struct request_queue * q)
pcd_current = cd;
pcd_sector = blk_rq_pos(pcd_req);
pcd_count = blk_rq_cur_sectors(pcd_req);
pcd_buf = pcd_req->buffer;
pcd_buf = bio_data(pcd_req->bio);
pcd_busy = 1;
ps_set_intr(do_pcd_read, NULL, 0, nice);
return;
......
......@@ -454,7 +454,7 @@ static enum action do_pd_io_start(void)
if (pd_block + pd_count > get_capacity(pd_req->rq_disk))
return Fail;
pd_run = blk_rq_sectors(pd_req);
pd_buf = pd_req->buffer;
pd_buf = bio_data(pd_req->bio);
pd_retries = 0;
if (pd_cmd == READ)
return do_pd_read_start();
......@@ -485,7 +485,7 @@ static int pd_next_buf(void)
spin_lock_irqsave(&pd_lock, saved_flags);
__blk_end_request_cur(pd_req, 0);
pd_count = blk_rq_cur_sectors(pd_req);
pd_buf = pd_req->buffer;
pd_buf = bio_data(pd_req->bio);
spin_unlock_irqrestore(&pd_lock, saved_flags);
return 0;
}
......
......@@ -795,7 +795,7 @@ static void do_pf_request(struct request_queue * q)
}
pf_cmd = rq_data_dir(pf_req);
pf_buf = pf_req->buffer;
pf_buf = bio_data(pf_req->bio);
pf_retries = 0;
pf_busy = 1;
......@@ -827,7 +827,7 @@ static int pf_next_buf(void)
if (!pf_req)
return 1;
pf_count = blk_rq_cur_sectors(pf_req);
pf_buf = pf_req->buffer;
pf_buf = bio_data(pf_req->bio);
}
return 0;
}
......
......@@ -563,7 +563,6 @@ skd_prep_discard_cdb(struct skd_scsi_request *scsi_req,
req = skreq->req;
blk_add_request_payload(req, page, len);
req->buffer = buf;
}
static void skd_request_fn_not_online(struct request_queue *q);
......@@ -856,10 +855,10 @@ static void skd_end_request(struct skd_device *skdev,
if ((io_flags & REQ_DISCARD) &&
(skreq->discard_page == 1)) {
struct bio *bio = req->bio;
pr_debug("%s:%s:%d, free the page!",
skdev->name, __func__, __LINE__);
free_page((unsigned long)req->buffer);
req->buffer = NULL;
__free_page(bio->bi_io_vec->bv_page);
}
if (unlikely(error)) {
......
......@@ -549,7 +549,7 @@ static void redo_fd_request(struct request_queue *q)
case READ:
err = floppy_read_sectors(fs, blk_rq_pos(req),
blk_rq_cur_sectors(req),
req->buffer);
bio_data(req->bio));
break;
}
done:
......
......@@ -342,7 +342,7 @@ static void start_request(struct floppy_state *fs)
swim3_dbg("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n",
req->rq_disk->disk_name, req->cmd,
(long)blk_rq_pos(req), blk_rq_sectors(req),
req->buffer);
bio_data(req->bio));
swim3_dbg(" errors=%d current_nr_sectors=%u\n",
req->errors, blk_rq_cur_sectors(req));
#endif
......@@ -479,11 +479,11 @@ static inline void setup_transfer(struct floppy_state *fs)
/* Set up 3 dma commands: write preamble, data, postamble */
init_dma(cp, OUTPUT_MORE, write_preamble, sizeof(write_preamble));
++cp;
init_dma(cp, OUTPUT_MORE, req->buffer, 512);
init_dma(cp, OUTPUT_MORE, bio_data(req->bio), 512);
++cp;
init_dma(cp, OUTPUT_LAST, write_postamble, sizeof(write_postamble));
} else {
init_dma(cp, INPUT_LAST, req->buffer, n * 512);
init_dma(cp, INPUT_LAST, bio_data(req->bio), n * 512);
}
++cp;
out_le16(&cp->command, DBDMA_STOP);
......
......@@ -612,10 +612,10 @@ static void do_blkif_request(struct request_queue *rq)
}
pr_debug("do_blk_req %p: cmd %p, sec %lx, "
"(%u/%u) buffer:%p [%s]\n",
"(%u/%u) [%s]\n",
req, req->cmd, (unsigned long)blk_rq_pos(req),
blk_rq_cur_sectors(req), blk_rq_sectors(req),
req->buffer, rq_data_dir(req) ? "write" : "read");
rq_data_dir(req) ? "write" : "read");
if (blkif_queue_request(req)) {
blk_requeue_request(rq, req);
......
......@@ -661,7 +661,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
rq_data_dir(req));
ace->req = req;
ace->data_ptr = req->buffer;
ace->data_ptr = bio_data(req->bio);
ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR;
ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF);
......@@ -733,7 +733,7 @@ static void ace_fsm_dostate(struct ace_device *ace)
* blk_rq_sectors(ace->req),
* blk_rq_cur_sectors(ace->req));
*/
ace->data_ptr = ace->req->buffer;
ace->data_ptr = bio_data(ace->req->bio);
ace->data_count = blk_rq_cur_sectors(ace->req) * 16;
ace_fsm_yieldirq(ace);
break;
......
......@@ -87,13 +87,15 @@ static void do_z2_request(struct request_queue *q)
while (len) {
unsigned long addr = start & Z2RAM_CHUNKMASK;
unsigned long size = Z2RAM_CHUNKSIZE - addr;
void *buffer = bio_data(req->bio);
if (len < size)
size = len;
addr += z2ram_map[ start >> Z2RAM_CHUNKSHIFT ];
if (rq_data_dir(req) == READ)
memcpy(req->buffer, (char *)addr, size);
memcpy(buffer, (char *)addr, size);
else
memcpy((char *)addr, req->buffer, size);
memcpy((char *)addr, buffer, size);
start += size;
len -= size;
}
......
......@@ -188,10 +188,9 @@ static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq,
ledtrig_ide_activity();
pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n",
pr_debug("%s: %sing: block=%llu, sectors=%u\n",
drive->name, rq_data_dir(rq) == READ ? "read" : "writ",
(unsigned long long)block, blk_rq_sectors(rq),
(unsigned long)rq->buffer);
(unsigned long long)block, blk_rq_sectors(rq));
if (hwif->rw_disk)
hwif->rw_disk(drive, rq);
......
......@@ -1544,7 +1544,6 @@ static int setup_clone(struct request *clone, struct request *rq,
clone->cmd = rq->cmd;
clone->cmd_len = rq->cmd_len;
clone->sense = rq->sense;
clone->buffer = rq->buffer;
clone->end_io = end_clone_request;
clone->end_io_data = tio;
......
......@@ -82,8 +82,7 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
block = blk_rq_pos(req) << 9 >> tr->blkshift;
nsect = blk_rq_cur_bytes(req) >> tr->blkshift;
buf = req->buffer;
buf = bio_data(req->bio);
if (req->cmd_type != REQ_TYPE_FS)
return -EIO;
......
......@@ -253,7 +253,7 @@ static int do_ubiblock_request(struct ubiblock *dev, struct request *req)
* flash access anyway.
*/
mutex_lock(&dev->dev_mutex);
ret = ubiblock_read(dev, req->buffer, sec, len);
ret = ubiblock_read(dev, bio_data(req->bio), sec, len);
mutex_unlock(&dev->dev_mutex);
return ret;
......
......@@ -1018,8 +1018,6 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
return BLKPREP_DEFER;
}
req->buffer = NULL;
/*
* Next, walk the list, and fill in the addresses and sizes of
* each segment.
......@@ -1156,7 +1154,6 @@ int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
BUG_ON(blk_rq_bytes(req));
memset(&cmd->sdb, 0, sizeof(cmd->sdb));
req->buffer = NULL;
}
cmd->cmd_len = req->cmd_len;
......
......@@ -739,14 +739,11 @@ static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
blk_add_request_payload(rq, page, len);
ret = scsi_setup_blk_pc_cmnd(sdp, rq);
rq->buffer = page_address(page);
rq->__data_len = nr_bytes;
out:
if (ret != BLKPREP_OK) {
if (ret != BLKPREP_OK)
__free_page(page);
rq->buffer = NULL;
}
return ret;
}
......@@ -843,8 +840,9 @@ static void sd_unprep_fn(struct request_queue *q, struct request *rq)
struct scsi_cmnd *SCpnt = rq->special;
if (rq->cmd_flags & REQ_DISCARD) {
free_page((unsigned long)rq->buffer);
rq->buffer = NULL;
struct bio *bio = rq->bio;
__free_page(bio->bi_io_vec->bv_page);
}
if (SCpnt->cmnd != rq->cmd) {
mempool_free(SCpnt->cmnd, sd_cdb_pool);
......
......@@ -178,7 +178,6 @@ struct request {
unsigned short ioprio;
void *special; /* opaque pointer available for LLD use */
char *buffer; /* kaddr of the current segment if available */
int tag;
int errors;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment