Commit 83bc4198 authored by Russell King's avatar Russell King

[MMC] Fix end of request handling.

We were mixing end_request with end_that_request_chunk, which
is apparantly bad news.  Also, the handycapped pxamci driver
was telling us that it had transferred all data successfully
on error, which is obviously wrong.
parent e77c6276
...@@ -166,10 +166,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ...@@ -166,10 +166,9 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{ {
struct mmc_blk_data *md = mq->data; struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card; struct mmc_card *card = md->queue.card;
int err, sz = 0; int ret;
err = mmc_card_claim_host(card); if (mmc_card_claim_host(card))
if (err)
goto cmd_err; goto cmd_err;
do { do {
...@@ -204,33 +203,26 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ...@@ -204,33 +203,26 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
mmc_wait_for_req(card->host, &brq.mrq); mmc_wait_for_req(card->host, &brq.mrq);
if (brq.cmd.error) { if (brq.cmd.error) {
err = brq.cmd.error;
printk(KERN_ERR "%s: error %d sending read/write command\n", printk(KERN_ERR "%s: error %d sending read/write command\n",
req->rq_disk->disk_name, err); req->rq_disk->disk_name, brq.cmd.error);
goto cmd_err; goto cmd_err;
} }
if (rq_data_dir(req) == READ) {
sz = brq.data.bytes_xfered;
} else {
sz = 0;
}
if (brq.data.error) { if (brq.data.error) {
err = brq.data.error;
printk(KERN_ERR "%s: error %d transferring data\n", printk(KERN_ERR "%s: error %d transferring data\n",
req->rq_disk->disk_name, err); req->rq_disk->disk_name, brq.data.error);
goto cmd_err; goto cmd_err;
} }
if (brq.stop.error) { if (brq.stop.error) {
err = brq.stop.error;
printk(KERN_ERR "%s: error %d sending stop command\n", printk(KERN_ERR "%s: error %d sending stop command\n",
req->rq_disk->disk_name, err); req->rq_disk->disk_name, brq.stop.error);
goto cmd_err; goto cmd_err;
} }
do { do {
int err;
cmd.opcode = MMC_SEND_STATUS; cmd.opcode = MMC_SEND_STATUS;
cmd.arg = card->rca << 16; cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SHORT | MMC_RSP_CRC; cmd.flags = MMC_RSP_SHORT | MMC_RSP_CRC;
...@@ -246,13 +238,25 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ...@@ -246,13 +238,25 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
if (cmd.resp[0] & ~0x00000900) if (cmd.resp[0] & ~0x00000900)
printk(KERN_ERR "%s: status = %08x\n", printk(KERN_ERR "%s: status = %08x\n",
req->rq_disk->disk_name, cmd.resp[0]); req->rq_disk->disk_name, cmd.resp[0]);
err = mmc_decode_status(cmd.resp); if (mmc_decode_status(cmd.resp))
if (err)
goto cmd_err; goto cmd_err;
#endif #endif
sz = brq.data.bytes_xfered; /*
} while (end_that_request_chunk(req, 1, sz)); * A block was successfully transferred.
*/
spin_lock_irq(&md->lock);
ret = end_that_request_chunk(req, 1, brq.data.bytes_xfered);
if (!ret) {
/*
* The whole request completed successfully.
*/
add_disk_randomness(req->rq_disk);
blkdev_dequeue_request(req);
end_that_request_last(req);
}
spin_unlock_irq(&md->lock);
} while (ret);
mmc_card_release_host(card); mmc_card_release_host(card);
...@@ -261,8 +265,22 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ...@@ -261,8 +265,22 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
cmd_err: cmd_err:
mmc_card_release_host(card); mmc_card_release_host(card);
end_that_request_chunk(req, 1, sz); /*
req->errors = err; * This is a little draconian, but until we get proper
* error handling sorted out here, its the best we can
* do - especially as some hosts have no idea how much
* data was transferred before the error occurred.
*/
spin_lock_irq(&md->lock);
do {
ret = end_that_request_chunk(req, 0,
req->current_nr_sectors << 9);
} while (ret);
add_disk_randomness(req->rq_disk);
blkdev_dequeue_request(req);
end_that_request_last(req);
spin_unlock_irq(&md->lock);
return 0; return 0;
} }
......
...@@ -57,7 +57,6 @@ static int mmc_queue_thread(void *d) ...@@ -57,7 +57,6 @@ static int mmc_queue_thread(void *d)
struct mmc_queue *mq = d; struct mmc_queue *mq = d;
struct request_queue *q = mq->queue; struct request_queue *q = mq->queue;
DECLARE_WAITQUEUE(wait, current); DECLARE_WAITQUEUE(wait, current);
int ret;
/* /*
* Set iothread to ensure that we aren't put to sleep by * Set iothread to ensure that we aren't put to sleep by
...@@ -76,10 +75,10 @@ static int mmc_queue_thread(void *d) ...@@ -76,10 +75,10 @@ static int mmc_queue_thread(void *d)
complete(&mq->thread_complete); complete(&mq->thread_complete);
add_wait_queue(&mq->thread_wq, &wait); add_wait_queue(&mq->thread_wq, &wait);
spin_lock_irq(q->queue_lock);
do { do {
struct request *req = NULL; struct request *req = NULL;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE); set_current_state(TASK_INTERRUPTIBLE);
if (!blk_queue_plugged(q)) if (!blk_queue_plugged(q))
mq->req = req = elv_next_request(q); mq->req = req = elv_next_request(q);
...@@ -93,10 +92,7 @@ static int mmc_queue_thread(void *d) ...@@ -93,10 +92,7 @@ static int mmc_queue_thread(void *d)
} }
set_current_state(TASK_RUNNING); set_current_state(TASK_RUNNING);
ret = mq->issue_fn(mq, req); mq->issue_fn(mq, req);
spin_lock_irq(q->queue_lock);
end_request(req, ret);
} while (1); } while (1);
remove_wait_queue(&mq->thread_wq, &wait); remove_wait_queue(&mq->thread_wq, &wait);
......
...@@ -286,7 +286,10 @@ static int pxamci_data_done(struct pxamci_host *host, unsigned int stat) ...@@ -286,7 +286,10 @@ static int pxamci_data_done(struct pxamci_host *host, unsigned int stat)
* This means that if there was an error on any block, we mark all * This means that if there was an error on any block, we mark all
* data blocks as being in error. * data blocks as being in error.
*/ */
data->bytes_xfered = data->blocks << data->blksz_bits; if (data->error == MMC_ERR_NONE)
data->bytes_xfered = data->blocks << data->blksz_bits;
else
data->bytes_xfered = 0;
pxamci_disable_irq(host, DATA_TRAN_DONE); pxamci_disable_irq(host, DATA_TRAN_DONE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment